summaryrefslogtreecommitdiff
path: root/fs/gfs2/glock.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/gfs2/glock.c')
-rw-r--r--fs/gfs2/glock.c230
1 files changed, 102 insertions, 128 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 269c3bc7fced..ba25b884169e 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -34,8 +34,8 @@
#include <linux/lockref.h>
#include <linux/rhashtable.h>
#include <linux/pid_namespace.h>
-#include <linux/fdtable.h>
#include <linux/file.h>
+#include <linux/random.h>
#include "gfs2.h"
#include "incore.h"
@@ -563,11 +563,11 @@ static void state_change(struct gfs2_glock *gl, unsigned int new_state)
gl->gl_tchange = jiffies;
}
-static void gfs2_set_demote(struct gfs2_glock *gl)
+static void gfs2_set_demote(int nr, struct gfs2_glock *gl)
{
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
- set_bit(GLF_DEMOTE, &gl->gl_flags);
+ set_bit(nr, &gl->gl_flags);
smp_mb();
wake_up(&sdp->sd_async_glock_wait);
}
@@ -607,14 +607,19 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
if (gh && (ret & LM_OUT_CANCELED))
gfs2_holder_wake(gh);
if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
- /* move to back of queue and try next entry */
if (ret & LM_OUT_CANCELED) {
- list_move_tail(&gh->gh_list, &gl->gl_holders);
+ list_del_init(&gh->gh_list);
+ trace_gfs2_glock_queue(gh, 0);
+ gl->gl_target = gl->gl_state;
gh = find_first_waiter(gl);
- gl->gl_target = gh->gh_state;
- if (do_promote(gl))
- goto out;
- goto retry;
+ if (gh) {
+ gl->gl_target = gh->gh_state;
+ if (do_promote(gl))
+ goto out;
+ do_xmote(gl, gh, gl->gl_target);
+ return;
+ }
+ goto out;
}
/* Some error or failed "try lock" - report it */
if ((ret & LM_OUT_ERROR) ||
@@ -627,7 +632,6 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
switch(state) {
/* Unlocked due to conversion deadlock, try again */
case LM_ST_UNLOCKED:
-retry:
do_xmote(gl, gh, gl->gl_target);
break;
/* Conversion fails, unlock and try again */
@@ -661,7 +665,8 @@ retry:
do_promote(gl);
}
out:
- clear_bit(GLF_LOCK, &gl->gl_flags);
+ if (!test_bit(GLF_CANCELING, &gl->gl_flags))
+ clear_bit(GLF_LOCK, &gl->gl_flags);
}
static bool is_system_glock(struct gfs2_glock *gl)
@@ -807,6 +812,7 @@ skip_inval:
}
if (ls->ls_ops->lm_lock) {
+ set_bit(GLF_PENDING_REPLY, &gl->gl_flags);
spin_unlock(&gl->gl_lockref.lock);
ret = ls->ls_ops->lm_lock(gl, target, lck_flags);
spin_lock(&gl->gl_lockref.lock);
@@ -825,6 +831,7 @@ skip_inval:
/* The operation will be completed asynchronously. */
return;
}
+ clear_bit(GLF_PENDING_REPLY, &gl->gl_flags);
}
/* Complete the operation now. */
@@ -843,12 +850,13 @@ static void run_queue(struct gfs2_glock *gl, const int nonblock)
__releases(&gl->gl_lockref.lock)
__acquires(&gl->gl_lockref.lock)
{
- struct gfs2_holder *gh = NULL;
+ struct gfs2_holder *gh;
if (test_bit(GLF_LOCK, &gl->gl_flags))
return;
set_bit(GLF_LOCK, &gl->gl_flags);
+ /* While a demote is in progress, the GLF_LOCK flag must be set. */
GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
@@ -860,18 +868,22 @@ __acquires(&gl->gl_lockref.lock)
set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
gl->gl_target = gl->gl_demote_state;
+ do_xmote(gl, NULL, gl->gl_target);
+ return;
} else {
if (test_bit(GLF_DEMOTE, &gl->gl_flags))
gfs2_demote_wake(gl);
if (do_promote(gl))
goto out_unlock;
gh = find_first_waiter(gl);
+ if (!gh)
+ goto out_unlock;
gl->gl_target = gh->gh_state;
if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
do_error(gl, 0); /* Fail queued try locks */
+ do_xmote(gl, gh, gl->gl_target);
+ return;
}
- do_xmote(gl, gh, gl->gl_target);
- return;
out_sched:
clear_bit(GLF_LOCK, &gl->gl_flags);
@@ -898,12 +910,8 @@ void glock_set_object(struct gfs2_glock *gl, void *object)
prev_object = gl->gl_object;
gl->gl_object = object;
spin_unlock(&gl->gl_lockref.lock);
- if (gfs2_assert_warn(gl->gl_name.ln_sbd, prev_object == NULL)) {
- pr_warn("glock=%u/%llx\n",
- gl->gl_name.ln_type,
- (unsigned long long)gl->gl_name.ln_number);
+ if (gfs2_assert_warn(gl->gl_name.ln_sbd, prev_object == NULL))
gfs2_dump_glock(NULL, gl, true);
- }
}
/**
@@ -919,12 +927,8 @@ void glock_clear_object(struct gfs2_glock *gl, void *object)
prev_object = gl->gl_object;
gl->gl_object = NULL;
spin_unlock(&gl->gl_lockref.lock);
- if (gfs2_assert_warn(gl->gl_name.ln_sbd, prev_object == object)) {
- pr_warn("glock=%u/%llx\n",
- gl->gl_name.ln_type,
- (unsigned long long)gl->gl_name.ln_number);
+ if (gfs2_assert_warn(gl->gl_name.ln_sbd, prev_object == object))
gfs2_dump_glock(NULL, gl, true);
- }
}
void gfs2_inode_remember_delete(struct gfs2_glock *gl, u64 generation)
@@ -959,48 +963,56 @@ static void gfs2_glock_poke(struct gfs2_glock *gl)
gfs2_holder_uninit(&gh);
}
-static bool gfs2_try_evict(struct gfs2_glock *gl)
+static struct gfs2_inode *gfs2_grab_existing_inode(struct gfs2_glock *gl)
+{
+ struct gfs2_inode *ip;
+
+ spin_lock(&gl->gl_lockref.lock);
+ ip = gl->gl_object;
+ if (ip && !igrab(&ip->i_inode))
+ ip = NULL;
+ spin_unlock(&gl->gl_lockref.lock);
+ if (ip) {
+ wait_on_inode(&ip->i_inode);
+ if (is_bad_inode(&ip->i_inode)) {
+ iput(&ip->i_inode);
+ ip = NULL;
+ }
+ }
+ return ip;
+}
+
+static void gfs2_try_evict(struct gfs2_glock *gl)
{
struct gfs2_inode *ip;
- bool evicted = false;
/*
* If there is contention on the iopen glock and we have an inode, try
- * to grab and release the inode so that it can be evicted. This will
- * allow the remote node to go ahead and delete the inode without us
- * having to do it, which will avoid rgrp glock thrashing.
+ * to grab and release the inode so that it can be evicted. The
+ * GIF_DEFER_DELETE flag indicates to gfs2_evict_inode() that the inode
+ * should not be deleted locally. This will allow the remote node to
+ * go ahead and delete the inode without us having to do it, which will
+ * avoid rgrp glock thrashing.
*
* The remote node is likely still holding the corresponding inode
* glock, so it will run before we get to verify that the delete has
- * happened below.
+ * happened below. (Verification is triggered by the call to
+ * gfs2_queue_verify_delete() in gfs2_evict_inode().)
*/
- spin_lock(&gl->gl_lockref.lock);
- ip = gl->gl_object;
- if (ip && !igrab(&ip->i_inode))
- ip = NULL;
- spin_unlock(&gl->gl_lockref.lock);
+ ip = gfs2_grab_existing_inode(gl);
if (ip) {
- gl->gl_no_formal_ino = ip->i_no_formal_ino;
- set_bit(GIF_DEFERRED_DELETE, &ip->i_flags);
+ set_bit(GLF_DEFER_DELETE, &gl->gl_flags);
d_prune_aliases(&ip->i_inode);
iput(&ip->i_inode);
+ clear_bit(GLF_DEFER_DELETE, &gl->gl_flags);
/* If the inode was evicted, gl->gl_object will now be NULL. */
- spin_lock(&gl->gl_lockref.lock);
- ip = gl->gl_object;
- if (ip) {
- clear_bit(GIF_DEFERRED_DELETE, &ip->i_flags);
- if (!igrab(&ip->i_inode))
- ip = NULL;
- }
- spin_unlock(&gl->gl_lockref.lock);
+ ip = gfs2_grab_existing_inode(gl);
if (ip) {
gfs2_glock_poke(ip->i_gl);
iput(&ip->i_inode);
}
- evicted = !ip;
}
- return evicted;
}
bool gfs2_queue_try_to_evict(struct gfs2_glock *gl)
@@ -1009,18 +1021,18 @@ bool gfs2_queue_try_to_evict(struct gfs2_glock *gl)
if (test_and_set_bit(GLF_TRY_TO_EVICT, &gl->gl_flags))
return false;
- return queue_delayed_work(sdp->sd_delete_wq,
- &gl->gl_delete, 0);
+ return !mod_delayed_work(sdp->sd_delete_wq, &gl->gl_delete, 0);
}
-static bool gfs2_queue_verify_evict(struct gfs2_glock *gl)
+bool gfs2_queue_verify_delete(struct gfs2_glock *gl, bool later)
{
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+ unsigned long delay;
- if (test_and_set_bit(GLF_VERIFY_EVICT, &gl->gl_flags))
+ if (test_and_set_bit(GLF_VERIFY_DELETE, &gl->gl_flags))
return false;
- return queue_delayed_work(sdp->sd_delete_wq,
- &gl->gl_delete, 5 * HZ);
+ delay = later ? HZ + get_random_long() % (HZ * 9) : 0;
+ return queue_delayed_work(sdp->sd_delete_wq, &gl->gl_delete, delay);
}
static void delete_work_func(struct work_struct *work)
@@ -1028,43 +1040,21 @@ static void delete_work_func(struct work_struct *work)
struct delayed_work *dwork = to_delayed_work(work);
struct gfs2_glock *gl = container_of(dwork, struct gfs2_glock, gl_delete);
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
- struct inode *inode;
- u64 no_addr = gl->gl_name.ln_number;
+ bool verify_delete = test_and_clear_bit(GLF_VERIFY_DELETE, &gl->gl_flags);
- if (test_and_clear_bit(GLF_TRY_TO_EVICT, &gl->gl_flags)) {
- /*
- * If we can evict the inode, give the remote node trying to
- * delete the inode some time before verifying that the delete
- * has happened. Otherwise, if we cause contention on the inode glock
- * immediately, the remote node will think that we still have
- * the inode in use, and so it will give up waiting.
- *
- * If we can't evict the inode, signal to the remote node that
- * the inode is still in use. We'll later try to delete the
- * inode locally in gfs2_evict_inode.
- *
- * FIXME: We only need to verify that the remote node has
- * deleted the inode because nodes before this remote delete
- * rework won't cooperate. At a later time, when we no longer
- * care about compatibility with such nodes, we can skip this
- * step entirely.
- */
- if (gfs2_try_evict(gl)) {
- if (test_bit(SDF_KILL, &sdp->sd_flags))
- goto out;
- if (gfs2_queue_verify_evict(gl))
- return;
- }
- goto out;
- }
+ if (test_and_clear_bit(GLF_TRY_TO_EVICT, &gl->gl_flags))
+ gfs2_try_evict(gl);
+
+ if (verify_delete) {
+ u64 no_addr = gl->gl_name.ln_number;
+ struct inode *inode;
- if (test_and_clear_bit(GLF_VERIFY_EVICT, &gl->gl_flags)) {
inode = gfs2_lookup_by_inum(sdp, no_addr, gl->gl_no_formal_ino,
GFS2_BLKST_UNLINKED);
if (IS_ERR(inode)) {
if (PTR_ERR(inode) == -EAGAIN &&
!test_bit(SDF_KILL, &sdp->sd_flags) &&
- gfs2_queue_verify_evict(gl))
+ gfs2_queue_verify_delete(gl, true))
return;
} else {
d_prune_aliases(inode);
@@ -1072,7 +1062,6 @@ static void delete_work_func(struct work_struct *work)
}
}
-out:
gfs2_glock_put(gl);
}
@@ -1101,7 +1090,7 @@ static void glock_work_func(struct work_struct *work)
if (!delay) {
clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
- gfs2_set_demote(gl);
+ gfs2_set_demote(GLF_DEMOTE, gl);
}
}
run_queue(gl, 0);
@@ -1177,7 +1166,6 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
const struct gfs2_glock_operations *glops, int create,
struct gfs2_glock **glp)
{
- struct super_block *s = sdp->sd_vfs;
struct lm_lockname name = { .ln_number = number,
.ln_type = glops->go_type,
.ln_sbd = sdp };
@@ -1218,8 +1206,8 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
if (glops->go_instantiate)
gl->gl_flags |= BIT(GLF_INSTANTIATE_NEEDED);
gl->gl_name = name;
+ lockref_init(&gl->gl_lockref);
lockdep_set_subclass(&gl->gl_lockref.lock, glops->go_subclass);
- gl->gl_lockref.count = 1;
gl->gl_state = LM_ST_UNLOCKED;
gl->gl_target = LM_ST_UNLOCKED;
gl->gl_demote_state = LM_ST_EXCLUSIVE;
@@ -1240,7 +1228,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
mapping = gfs2_glock2aspace(gl);
if (mapping) {
mapping->a_ops = &gfs2_meta_aops;
- mapping->host = s->s_bdev->bd_mapping->host;
+ mapping->host = sdp->sd_inode;
mapping->flags = 0;
mapping_set_gfp_mask(mapping, GFP_NOFS);
mapping->i_private_data = NULL;
@@ -1443,10 +1431,7 @@ out:
static void request_demote(struct gfs2_glock *gl, unsigned int state,
unsigned long delay, bool remote)
{
- if (delay)
- set_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
- else
- gfs2_set_demote(gl);
+ gfs2_set_demote(delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE, gl);
if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
gl->gl_demote_state = state;
gl->gl_demote_time = jiffies;
@@ -1482,9 +1467,7 @@ static inline bool pid_is_meaningful(const struct gfs2_holder *gh)
{
if (!(gh->gh_flags & GL_NOPID))
return true;
- if (gh->gh_state == LM_ST_UNLOCKED)
- return true;
- return false;
+ return !test_bit(HIF_HOLDER, &gh->gh_iflags);
}
/**
@@ -1503,7 +1486,6 @@ __acquires(&gl->gl_lockref.lock)
{
struct gfs2_glock *gl = gh->gh_gl;
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
- struct list_head *insert_pt = NULL;
struct gfs2_holder *gh2;
int try_futile = 0;
@@ -1539,21 +1521,11 @@ fail:
gfs2_holder_wake(gh);
return;
}
- if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
- continue;
}
trace_gfs2_glock_queue(gh, 1);
gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT);
gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT);
- if (likely(insert_pt == NULL)) {
- list_add_tail(&gh->gh_list, &gl->gl_holders);
- return;
- }
- list_add_tail(&gh->gh_list, insert_pt);
- spin_unlock(&gl->gl_lockref.lock);
- if (sdp->sd_lockstruct.ls_ops->lm_cancel)
- sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
- spin_lock(&gl->gl_lockref.lock);
+ list_add_tail(&gh->gh_list, &gl->gl_holders);
return;
trap_recursive:
@@ -1636,12 +1608,6 @@ int gfs2_glock_poll(struct gfs2_holder *gh)
return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
}
-static inline bool needs_demote(struct gfs2_glock *gl)
-{
- return (test_bit(GLF_DEMOTE, &gl->gl_flags) ||
- test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags));
-}
-
static void __gfs2_glock_dq(struct gfs2_holder *gh)
{
struct gfs2_glock *gl = gh->gh_gl;
@@ -1650,8 +1616,8 @@ static void __gfs2_glock_dq(struct gfs2_holder *gh)
/*
* This holder should not be cached, so mark it for demote.
- * Note: this should be done before the check for needs_demote
- * below.
+ * Note: this should be done before the glock_needs_demote
+ * check below.
*/
if (gh->gh_flags & GL_NOCACHE)
request_demote(gl, LM_ST_UNLOCKED, 0, false);
@@ -1664,7 +1630,7 @@ static void __gfs2_glock_dq(struct gfs2_holder *gh)
* If there hasn't been a demote request we are done.
* (Let the remaining holders, if any, keep holding it.)
*/
- if (!needs_demote(gl)) {
+ if (!glock_needs_demote(gl)) {
if (list_empty(&gl->gl_holders))
fast_path = 1;
}
@@ -1699,11 +1665,19 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
}
if (list_is_first(&gh->gh_list, &gl->gl_holders) &&
- !test_bit(HIF_HOLDER, &gh->gh_iflags)) {
+ !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
+ test_bit(GLF_LOCK, &gl->gl_flags) &&
+ !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
+ !test_bit(GLF_CANCELING, &gl->gl_flags)) {
+ set_bit(GLF_CANCELING, &gl->gl_flags);
spin_unlock(&gl->gl_lockref.lock);
gl->gl_name.ln_sbd->sd_lockstruct.ls_ops->lm_cancel(gl);
wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE);
spin_lock(&gl->gl_lockref.lock);
+ clear_bit(GLF_CANCELING, &gl->gl_flags);
+ clear_bit(GLF_LOCK, &gl->gl_flags);
+ if (!gfs2_holder_queued(gh))
+ goto out;
}
/*
@@ -1949,6 +1923,7 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
spin_lock(&gl->gl_lockref.lock);
+ clear_bit(GLF_PENDING_REPLY, &gl->gl_flags);
gl->gl_reply = ret;
if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))) {
@@ -2118,7 +2093,7 @@ static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
void gfs2_cancel_delete_work(struct gfs2_glock *gl)
{
clear_bit(GLF_TRY_TO_EVICT, &gl->gl_flags);
- clear_bit(GLF_VERIFY_EVICT, &gl->gl_flags);
+ clear_bit(GLF_VERIFY_DELETE, &gl->gl_flags);
if (cancel_delayed_work(&gl->gl_delete))
gfs2_glock_put(gl);
}
@@ -2349,6 +2324,8 @@ static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
*p++ = 'f';
if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
*p++ = 'i';
+ if (test_bit(GLF_PENDING_REPLY, gflags))
+ *p++ = 'R';
if (test_bit(GLF_HAVE_REPLY, gflags))
*p++ = 'r';
if (test_bit(GLF_INITIAL, gflags))
@@ -2371,8 +2348,12 @@ static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
*p++ = 'N';
if (test_bit(GLF_TRY_TO_EVICT, gflags))
*p++ = 'e';
- if (test_bit(GLF_VERIFY_EVICT, gflags))
+ if (test_bit(GLF_VERIFY_DELETE, gflags))
*p++ = 'E';
+ if (test_bit(GLF_DEFER_DELETE, gflags))
+ *p++ = 's';
+ if (test_bit(GLF_CANCELING, gflags))
+ *p++ = 'C';
*p = 0;
return buf;
}
@@ -2768,25 +2749,18 @@ static struct file *gfs2_glockfd_next_file(struct gfs2_glockfd_iter *i)
i->file = NULL;
}
- rcu_read_lock();
for(;; i->fd++) {
- struct inode *inode;
-
- i->file = task_lookup_next_fdget_rcu(i->task, &i->fd);
+ i->file = fget_task_next(i->task, &i->fd);
if (!i->file) {
i->fd = 0;
break;
}
- inode = file_inode(i->file);
- if (inode->i_sb == i->sb)
+ if (file_inode(i->file)->i_sb == i->sb)
break;
- rcu_read_unlock();
fput(i->file);
- rcu_read_lock();
}
- rcu_read_unlock();
return i->file;
}