summaryrefslogtreecommitdiff
path: root/fs/gfs2/glock.c
diff options
context:
space:
mode:
authorBob Peterson <rpeterso@redhat.com>2020-10-12 21:57:37 +0300
committerAndreas Gruenbacher <agruenba@redhat.com>2020-10-15 18:04:53 +0300
commite2c6c8a797eea88b267743d99d593d368aa43481 (patch)
treeaca3a8209bc80bc402d9c11232d86a58ce743594 /fs/gfs2/glock.c
parentb2a846dbef4ef54ef032f0f5ee188c609a0278a7 (diff)
downloadlinux-e2c6c8a797eea88b267743d99d593d368aa43481.tar.xz
gfs2: eliminate GLF_QUEUED flag in favor of list_empty(gl_holders)
Before this patch, glock.c maintained a flag, GLF_QUEUED, which indicated when a glock had a holder queued. It was only checked for inode glocks, although set and cleared by all glocks, and it was only used to determine whether the glock should be held for the minimum hold time before releasing. The problem is that the flag is not accurate at all. If a process holds the glock, the flag is set. When they dequeue the glock, it only cleared the flag in cases when the state actually changed. So if the state doesn't change, the flag may still be set, even when nothing is queued. This happens to iopen glocks often: the get held in SH, then the file is closed, but the glock remains in SH mode. We don't need a special flag to indicate this: we can simply tell whether the glock has any items queued to the holders queue. It's a waste of cpu time to maintain it. This patch eliminates the flag in favor of simply checking list_empty on the glock holders. Signed-off-by: Bob Peterson <rpeterso@redhat.com> Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
Diffstat (limited to 'fs/gfs2/glock.c')
-rw-r--r--fs/gfs2/glock.c11
1 files changed, 3 insertions, 8 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 19d4db4c44e7..c0d441228562 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -458,9 +458,6 @@ static void state_change(struct gfs2_glock *gl, unsigned int new_state)
else
gl->gl_lockref.count--;
}
- if (held1 && held2 && list_empty(&gl->gl_holders))
- clear_bit(GLF_QUEUED, &gl->gl_flags);
-
if (new_state != gl->gl_target)
/* shorten our minimum hold time */
gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR,
@@ -1350,7 +1347,6 @@ fail:
if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
insert_pt = &gh2->gh_list;
}
- set_bit(GLF_QUEUED, &gl->gl_flags);
trace_gfs2_glock_queue(gh, 1);
gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT);
gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT);
@@ -1651,16 +1647,15 @@ void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
unsigned long now = jiffies;
gfs2_glock_hold(gl);
+ spin_lock(&gl->gl_lockref.lock);
holdtime = gl->gl_tchange + gl->gl_hold_time;
- if (test_bit(GLF_QUEUED, &gl->gl_flags) &&
+ if (!list_empty(&gl->gl_holders) &&
gl->gl_name.ln_type == LM_TYPE_INODE) {
if (time_before(now, holdtime))
delay = holdtime - now;
if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
delay = gl->gl_hold_time;
}
-
- spin_lock(&gl->gl_lockref.lock);
handle_callback(gl, state, delay, true);
__gfs2_glock_queue_work(gl, delay);
spin_unlock(&gl->gl_lockref.lock);
@@ -2105,7 +2100,7 @@ static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
*p++ = 'I';
if (test_bit(GLF_FROZEN, gflags))
*p++ = 'F';
- if (test_bit(GLF_QUEUED, gflags))
+ if (!list_empty(&gl->gl_holders))
*p++ = 'q';
if (test_bit(GLF_LRU, gflags))
*p++ = 'L';