diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2026-02-10 03:29:57 +0300 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2026-02-10 03:29:57 +0300 |
| commit | 7141433fbed290f4dd42008d3102db2363275035 (patch) | |
| tree | 38ac5906305c1f1143e0a803d3e7e9dcdebacf7b | |
| parent | 56feb532bb927ae1c26726e2e7c0de95f54a3d67 (diff) | |
| parent | e411d74cc5ba290f85d0dd5e4d1df8f1d6d975d2 (diff) | |
| download | linux-7141433fbed290f4dd42008d3102db2363275035.tar.xz | |
Merge tag 'gfs2-for-7.0' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2
Pull gfs2 updates from Andreas Gruenbacher:
- Prevent rename() from failing with -ESTALE when there are locking
conflicts and retry the operation instead
- Don't fail when fiemap triggers a page fault (xfstest generic/742)
- Fix another locking request cancellation bug
- Minor other fixes and cleanups
* tag 'gfs2-for-7.0' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2:
gfs2: fiemap page fault fix
gfs2: fix memory leaks in gfs2_fill_super error path
gfs2: Fix use-after-free in iomap inline data write path
gfs2: Fix slab-use-after-free in qd_put
gfs2: Introduce glock_{type,number,sbd} helpers
gfs2: gfs2_glock_hold cleanup
gfs: Use fixed GL_GLOCK_MIN_HOLD time
gfs2: Fix gfs2_log_get_bio argument type
gfs2: gfs2_chain_bio start sector fix
gfs2: Initialize bio->bi_opf early
gfs2: Rename gfs2_log_submit_{bio -> write}
gfs2: Do not cancel internal demote requests
gfs2: run_queue cleanup
gfs2: Retries missing in gfs2_{rename,exchange}
gfs2: glock cancelation flag fix
| -rw-r--r-- | fs/gfs2/bmap.c | 13 | ||||
| -rw-r--r-- | fs/gfs2/glock.c | 190 | ||||
| -rw-r--r-- | fs/gfs2/glock.h | 9 | ||||
| -rw-r--r-- | fs/gfs2/glops.c | 34 | ||||
| -rw-r--r-- | fs/gfs2/incore.h | 18 | ||||
| -rw-r--r-- | fs/gfs2/inode.c | 34 | ||||
| -rw-r--r-- | fs/gfs2/lock_dlm.c | 28 | ||||
| -rw-r--r-- | fs/gfs2/log.c | 7 | ||||
| -rw-r--r-- | fs/gfs2/lops.c | 51 | ||||
| -rw-r--r-- | fs/gfs2/lops.h | 4 | ||||
| -rw-r--r-- | fs/gfs2/meta_io.c | 6 | ||||
| -rw-r--r-- | fs/gfs2/meta_io.h | 2 | ||||
| -rw-r--r-- | fs/gfs2/ops_fstype.c | 2 | ||||
| -rw-r--r-- | fs/gfs2/quota.c | 5 | ||||
| -rw-r--r-- | fs/gfs2/rgrp.c | 2 | ||||
| -rw-r--r-- | fs/gfs2/super.c | 4 | ||||
| -rw-r--r-- | fs/gfs2/trace_gfs2.h | 50 | ||||
| -rw-r--r-- | fs/gfs2/trans.c | 4 |
18 files changed, 277 insertions, 186 deletions
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c index 131091520de6..fdcac8e3f2ba 100644 --- a/fs/gfs2/bmap.c +++ b/fs/gfs2/bmap.c @@ -1127,10 +1127,18 @@ static int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length, goto out_unlock; break; default: - goto out_unlock; + goto out; } ret = gfs2_iomap_begin_write(inode, pos, length, flags, iomap, &mp); + if (ret) + goto out_unlock; + +out: + if (iomap->type == IOMAP_INLINE) { + iomap->private = metapath_dibh(&mp); + get_bh(iomap->private); + } out_unlock: release_metapath(&mp); @@ -1144,6 +1152,9 @@ static int gfs2_iomap_end(struct inode *inode, loff_t pos, loff_t length, struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); + if (iomap->private) + brelse(iomap->private); + switch (flags & (IOMAP_WRITE | IOMAP_ZERO)) { case IOMAP_WRITE: if (flags & IOMAP_DIRECT) diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 92e029104d8a..6fb2731e8be1 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -60,7 +60,8 @@ struct gfs2_glock_iter { typedef void (*glock_examiner) (struct gfs2_glock * gl); -static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target); +static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, + unsigned int target, bool may_cancel); static void request_demote(struct gfs2_glock *gl, unsigned int state, unsigned long delay, bool remote); @@ -146,7 +147,7 @@ static void __gfs2_glock_free(struct gfs2_glock *gl) } void gfs2_glock_free(struct gfs2_glock *gl) { - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; + struct gfs2_sbd *sdp = glock_sbd(gl); __gfs2_glock_free(gl); if (atomic_dec_and_test(&sdp->sd_glock_disposal)) @@ -154,7 +155,7 @@ void gfs2_glock_free(struct gfs2_glock *gl) { } void gfs2_glock_free_later(struct gfs2_glock *gl) { - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; + struct gfs2_sbd *sdp = glock_sbd(gl); spin_lock(&lru_lock); list_add(&gl->gl_lru, &sdp->sd_dead_glocks); @@ -184,8 +185,8 @@ static void gfs2_free_dead_glocks(struct gfs2_sbd *sdp) struct gfs2_glock *gfs2_glock_hold(struct gfs2_glock *gl) { - GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref)); - lockref_get(&gl->gl_lockref); + if (!lockref_get_not_dead(&gl->gl_lockref)) + GLOCK_BUG_ON(gl, 1); return gl; } @@ -218,7 +219,7 @@ static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl) * work queue. */ static void gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) { - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; + struct gfs2_sbd *sdp = glock_sbd(gl); if (!queue_delayed_work(sdp->sd_glock_wq, &gl->gl_work, delay)) { /* @@ -234,7 +235,7 @@ static void gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) { static void __gfs2_glock_put(struct gfs2_glock *gl) { - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; + struct gfs2_sbd *sdp = glock_sbd(gl); struct address_space *mapping = gfs2_glock2aspace(gl); lockref_mark_dead(&gl->gl_lockref); @@ -356,7 +357,7 @@ static void gfs2_holder_wake(struct gfs2_holder *gh) smp_mb__after_atomic(); wake_up_bit(&gh->gh_iflags, HIF_WAIT); if (gh->gh_flags & GL_ASYNC) { - struct gfs2_sbd *sdp = gh->gh_gl->gl_name.ln_sbd; + struct gfs2_sbd *sdp = glock_sbd(gh->gh_gl); wake_up(&sdp->sd_async_glock_wait); } @@ -458,7 +459,7 @@ done: static void do_promote(struct gfs2_glock *gl) { - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; + struct gfs2_sbd *sdp = glock_sbd(gl); struct gfs2_holder *gh, *current_gh; if (gfs2_withdrawn(sdp)) { @@ -538,7 +539,7 @@ static void state_change(struct gfs2_glock *gl, unsigned int new_state) static void gfs2_set_demote(int nr, struct gfs2_glock *gl) { - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; + struct gfs2_sbd *sdp = glock_sbd(gl); set_bit(nr, &gl->gl_flags); smp_mb(); @@ -600,17 +601,19 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret) switch(gl->gl_state) { /* Unlocked due to conversion deadlock, try again */ case LM_ST_UNLOCKED: - do_xmote(gl, gh, gl->gl_target); + do_xmote(gl, gh, gl->gl_target, + !test_bit(GLF_DEMOTE_IN_PROGRESS, + &gl->gl_flags)); break; /* Conversion fails, unlock and try again */ case LM_ST_SHARED: case LM_ST_DEFERRED: - do_xmote(gl, gh, LM_ST_UNLOCKED); + do_xmote(gl, gh, LM_ST_UNLOCKED, false); break; default: /* Everything else */ - fs_err(gl->gl_name.ln_sbd, + fs_err(glock_sbd(gl), "glock %u:%llu requested=%u ret=%u\n", - gl->gl_name.ln_type, gl->gl_name.ln_number, + glock_type(gl), glock_number(gl), gl->gl_req, ret); GLOCK_BUG_ON(gl, 1); } @@ -638,7 +641,7 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret) } out: if (!test_bit(GLF_CANCELING, &gl->gl_flags)) - clear_bit(GLF_LOCK, &gl->gl_flags); + clear_and_wake_up_bit(GLF_LOCK, &gl->gl_flags); } /** @@ -646,16 +649,17 @@ out: * @gl: The lock state * @gh: The holder (only for promotes) * @target: The target lock state + * @may_cancel: Operation may be canceled * */ static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, - unsigned int target) + unsigned int target, bool may_cancel) __releases(&gl->gl_lockref.lock) __acquires(&gl->gl_lockref.lock) { const struct gfs2_glock_operations *glops = gl->gl_ops; - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; + struct gfs2_sbd *sdp = glock_sbd(gl); struct lm_lockstruct *ls = &sdp->sd_lockstruct; int ret; @@ -703,17 +707,20 @@ skip_inval: } if (ls->ls_ops->lm_lock) { - set_bit(GLF_PENDING_REPLY, &gl->gl_flags); spin_unlock(&gl->gl_lockref.lock); ret = ls->ls_ops->lm_lock(gl, target, gh ? gh->gh_flags : 0); spin_lock(&gl->gl_lockref.lock); if (!ret) { + if (may_cancel) { + set_bit(GLF_MAY_CANCEL, &gl->gl_flags); + smp_mb__after_atomic(); + wake_up_bit(&gl->gl_flags, GLF_LOCK); + } /* The operation will be completed asynchronously. */ gl->gl_lockref.count++; return; } - clear_bit(GLF_PENDING_REPLY, &gl->gl_flags); if (ret == -ENODEV) { /* @@ -753,13 +760,10 @@ __acquires(&gl->gl_lockref.lock) if (test_bit(GLF_LOCK, &gl->gl_flags)) return; - set_bit(GLF_LOCK, &gl->gl_flags); /* - * The GLF_DEMOTE_IN_PROGRESS flag is only set intermittently during - * locking operations. We have just started a locking operation by - * setting the GLF_LOCK flag, so the GLF_DEMOTE_IN_PROGRESS flag must - * be cleared. + * The GLF_DEMOTE_IN_PROGRESS flag must only be set when the GLF_LOCK + * flag is set as well. */ GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)); @@ -770,39 +774,36 @@ __acquires(&gl->gl_lockref.lock) } if (find_first_holder(gl)) - goto out_unlock; + return; if (nonblock) goto out_sched; set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags); GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE); gl->gl_target = gl->gl_demote_state; - do_xmote(gl, NULL, gl->gl_target); + set_bit(GLF_LOCK, &gl->gl_flags); + do_xmote(gl, NULL, gl->gl_target, false); return; } promote: do_promote(gl); if (find_first_holder(gl)) - goto out_unlock; + return; gh = find_first_waiter(gl); if (!gh) - goto out_unlock; + return; if (nonblock) goto out_sched; gl->gl_target = gh->gh_state; if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) do_error(gl, 0); /* Fail queued try locks */ - do_xmote(gl, gh, gl->gl_target); + set_bit(GLF_LOCK, &gl->gl_flags); + do_xmote(gl, gh, gl->gl_target, true); return; out_sched: - clear_bit(GLF_LOCK, &gl->gl_flags); gl->gl_lockref.count++; gfs2_glock_queue_work(gl, 0); - return; - -out_unlock: - clear_bit(GLF_LOCK, &gl->gl_flags); } /** @@ -818,7 +819,7 @@ void glock_set_object(struct gfs2_glock *gl, void *object) prev_object = gl->gl_object; gl->gl_object = object; spin_unlock(&gl->gl_lockref.lock); - if (gfs2_assert_warn(gl->gl_name.ln_sbd, prev_object == NULL)) + if (gfs2_assert_warn(glock_sbd(gl), prev_object == NULL)) gfs2_dump_glock(NULL, gl, true); } @@ -835,7 +836,7 @@ void glock_clear_object(struct gfs2_glock *gl, void *object) prev_object = gl->gl_object; gl->gl_object = NULL; spin_unlock(&gl->gl_lockref.lock); - if (gfs2_assert_warn(gl->gl_name.ln_sbd, prev_object == object)) + if (gfs2_assert_warn(glock_sbd(gl), prev_object == object)) gfs2_dump_glock(NULL, gl, true); } @@ -925,7 +926,7 @@ static void gfs2_try_to_evict(struct gfs2_glock *gl) bool gfs2_queue_try_to_evict(struct gfs2_glock *gl) { - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; + struct gfs2_sbd *sdp = glock_sbd(gl); if (test_and_set_bit(GLF_TRY_TO_EVICT, &gl->gl_flags)) return false; @@ -934,7 +935,7 @@ bool gfs2_queue_try_to_evict(struct gfs2_glock *gl) bool gfs2_queue_verify_delete(struct gfs2_glock *gl, bool later) { - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; + struct gfs2_sbd *sdp = glock_sbd(gl); unsigned long delay; if (test_and_set_bit(GLF_VERIFY_DELETE, &gl->gl_flags)) @@ -947,7 +948,7 @@ static void delete_work_func(struct work_struct *work) { struct delayed_work *dwork = to_delayed_work(work); struct gfs2_glock *gl = container_of(dwork, struct gfs2_glock, gl_delete); - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; + struct gfs2_sbd *sdp = glock_sbd(gl); bool verify_delete = test_and_clear_bit(GLF_VERIFY_DELETE, &gl->gl_flags); /* @@ -960,7 +961,7 @@ static void delete_work_func(struct work_struct *work) gfs2_try_to_evict(gl); if (verify_delete) { - u64 no_addr = gl->gl_name.ln_number; + u64 no_addr = glock_number(gl); struct inode *inode; inode = gfs2_lookup_by_inum(sdp, no_addr, gl->gl_no_formal_ino, @@ -994,7 +995,7 @@ static void glock_work_func(struct work_struct *work) if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && gl->gl_state != LM_ST_UNLOCKED && gl->gl_demote_state != LM_ST_EXCLUSIVE) { - if (gl->gl_name.ln_type == LM_TYPE_INODE) { + if (glock_type(gl) == LM_TYPE_INODE) { unsigned long holdtime, now = jiffies; holdtime = gl->gl_tchange + gl->gl_hold_time; @@ -1136,7 +1137,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, gl->gl_object = NULL; gl->gl_hold_time = GL_GLOCK_DFT_HOLD; INIT_DELAYED_WORK(&gl->gl_work, glock_work_func); - if (gl->gl_name.ln_type == LM_TYPE_IOPEN) + if (glock_type(gl) == LM_TYPE_IOPEN) INIT_DELAYED_WORK(&gl->gl_delete, delete_work_func); mapping = gfs2_glock2aspace(gl); @@ -1284,31 +1285,45 @@ static int glocks_pending(unsigned int num_gh, struct gfs2_holder *ghs) * gfs2_glock_async_wait - wait on multiple asynchronous glock acquisitions * @num_gh: the number of holders in the array * @ghs: the glock holder array + * @retries: number of retries attempted so far * * Returns: 0 on success, meaning all glocks have been granted and are held. * -ESTALE if the request timed out, meaning all glocks were released, * and the caller should retry the operation. */ -int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs) +int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs, + unsigned int retries) { - struct gfs2_sbd *sdp = ghs[0].gh_gl->gl_name.ln_sbd; - int i, ret = 0, timeout = 0; + struct gfs2_sbd *sdp = glock_sbd(ghs[0].gh_gl); unsigned long start_time = jiffies; + int i, ret = 0; + long timeout; might_sleep(); - /* - * Total up the (minimum hold time * 2) of all glocks and use that to - * determine the max amount of time we should wait. - */ - for (i = 0; i < num_gh; i++) - timeout += ghs[i].gh_gl->gl_hold_time << 1; - if (!wait_event_timeout(sdp->sd_async_glock_wait, + timeout = GL_GLOCK_MIN_HOLD; + if (retries) { + unsigned int max_shift; + long incr; + + /* Add a random delay and increase the timeout exponentially. */ + max_shift = BITS_PER_LONG - 2 - __fls(GL_GLOCK_HOLD_INCR); + incr = min(GL_GLOCK_HOLD_INCR << min(retries - 1, max_shift), + 10 * HZ - GL_GLOCK_MIN_HOLD); + schedule_timeout_interruptible(get_random_long() % (incr / 3)); + if (signal_pending(current)) + goto interrupted; + timeout += (incr / 3) + get_random_long() % (incr / 3); + } + + if (!wait_event_interruptible_timeout(sdp->sd_async_glock_wait, !glocks_pending(num_gh, ghs), timeout)) { ret = -ESTALE; /* request timed out. */ goto out; } + if (signal_pending(current)) + goto interrupted; for (i = 0; i < num_gh; i++) { struct gfs2_holder *gh = &ghs[i]; @@ -1332,6 +1347,10 @@ out: } } return ret; + +interrupted: + ret = -EINTR; + goto out; } /** @@ -1418,7 +1437,7 @@ static inline bool pid_is_meaningful(const struct gfs2_holder *gh) static inline void add_to_queue(struct gfs2_holder *gh) { struct gfs2_glock *gl = gh->gh_gl; - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; + struct gfs2_sbd *sdp = glock_sbd(gl); struct gfs2_holder *gh2; GLOCK_BUG_ON(gl, gh->gh_owner_pid == NULL); @@ -1451,11 +1470,11 @@ trap_recursive: fs_err(sdp, "original: %pSR\n", (void *)gh2->gh_ip); fs_err(sdp, "pid: %d\n", pid_nr(gh2->gh_owner_pid)); fs_err(sdp, "lock type: %d req lock state : %d\n", - gh2->gh_gl->gl_name.ln_type, gh2->gh_state); + glock_type(gh2->gh_gl), gh2->gh_state); fs_err(sdp, "new: %pSR\n", (void *)gh->gh_ip); fs_err(sdp, "pid: %d\n", pid_nr(gh->gh_owner_pid)); fs_err(sdp, "lock type: %d req lock state : %d\n", - gh->gh_gl->gl_name.ln_type, gh->gh_state); + glock_type(gh->gh_gl), gh->gh_state); gfs2_dump_glock(NULL, gl, true); BUG(); } @@ -1472,7 +1491,7 @@ trap_recursive: int gfs2_glock_nq(struct gfs2_holder *gh) { struct gfs2_glock *gl = gh->gh_gl; - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; + struct gfs2_sbd *sdp = glock_sbd(gl); int error; if (gfs2_withdrawn(sdp)) @@ -1545,6 +1564,8 @@ static void __gfs2_glock_dq(struct gfs2_holder *gh) list_del_init(&gh->gh_list); clear_bit(HIF_HOLDER, &gh->gh_iflags); trace_gfs2_glock_queue(gh, 0); + if (test_bit(HIF_WAIT, &gh->gh_iflags)) + gfs2_holder_wake(gh); /* * If there hasn't been a demote request we are done. @@ -1559,7 +1580,7 @@ static void __gfs2_glock_dq(struct gfs2_holder *gh) gl->gl_lockref.count++; if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && !test_bit(GLF_DEMOTE, &gl->gl_flags) && - gl->gl_name.ln_type == LM_TYPE_INODE) + glock_type(gl) == LM_TYPE_INODE) delay = gl->gl_hold_time; gfs2_glock_queue_work(gl, delay); } @@ -1574,6 +1595,7 @@ void gfs2_glock_dq(struct gfs2_holder *gh) { struct gfs2_glock *gl = gh->gh_gl; +again: spin_lock(&gl->gl_lockref.lock); if (!gfs2_holder_queued(gh)) { /* @@ -1588,13 +1610,25 @@ void gfs2_glock_dq(struct gfs2_holder *gh) test_bit(GLF_LOCK, &gl->gl_flags) && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) && !test_bit(GLF_CANCELING, &gl->gl_flags)) { + if (!test_bit(GLF_MAY_CANCEL, &gl->gl_flags)) { + struct wait_queue_head *wq; + DEFINE_WAIT(wait); + + wq = bit_waitqueue(&gl->gl_flags, GLF_LOCK); + prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE); + spin_unlock(&gl->gl_lockref.lock); + schedule(); + finish_wait(wq, &wait); + goto again; + } + set_bit(GLF_CANCELING, &gl->gl_flags); spin_unlock(&gl->gl_lockref.lock); - gl->gl_name.ln_sbd->sd_lockstruct.ls_ops->lm_cancel(gl); + glock_sbd(gl)->sd_lockstruct.ls_ops->lm_cancel(gl); wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE); spin_lock(&gl->gl_lockref.lock); clear_bit(GLF_CANCELING, &gl->gl_flags); - clear_bit(GLF_LOCK, &gl->gl_flags); + clear_and_wake_up_bit(GLF_LOCK, &gl->gl_flags); if (!gfs2_holder_queued(gh)) goto out; } @@ -1764,7 +1798,7 @@ void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state) gfs2_glock_hold(gl); spin_lock(&gl->gl_lockref.lock); if (!list_empty(&gl->gl_holders) && - gl->gl_name.ln_type == LM_TYPE_INODE) { + glock_type(gl) == LM_TYPE_INODE) { unsigned long now = jiffies; unsigned long holdtime; @@ -1821,10 +1855,10 @@ static int gfs2_should_freeze(const struct gfs2_glock *gl) void gfs2_glock_complete(struct gfs2_glock *gl, int ret) { - struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct; + struct lm_lockstruct *ls = &glock_sbd(gl)->sd_lockstruct; spin_lock(&gl->gl_lockref.lock); - clear_bit(GLF_PENDING_REPLY, &gl->gl_flags); + clear_bit(GLF_MAY_CANCEL, &gl->gl_flags); gl->gl_reply = ret; if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))) { @@ -1849,9 +1883,9 @@ static int glock_cmp(void *priv, const struct list_head *a, gla = list_entry(a, struct gfs2_glock, gl_lru); glb = list_entry(b, struct gfs2_glock, gl_lru); - if (gla->gl_name.ln_number > glb->gl_name.ln_number) + if (glock_number(gla) > glock_number(glb)) return 1; - if (gla->gl_name.ln_number < glb->gl_name.ln_number) + if (glock_number(gla) < glock_number(glb)) return -1; return 0; @@ -1859,7 +1893,7 @@ static int glock_cmp(void *priv, const struct list_head *a, static bool can_free_glock(struct gfs2_glock *gl) { - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; + struct gfs2_sbd *sdp = glock_sbd(gl); return !test_bit(GLF_LOCK, &gl->gl_flags) && !gl->gl_lockref.count && @@ -1981,7 +2015,7 @@ static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp) rhashtable_walk_start(&iter); while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl)) { - if (gl->gl_name.ln_sbd == sdp) + if (glock_sbd(gl) == sdp) examiner(gl); } @@ -2001,8 +2035,8 @@ void gfs2_cancel_delete_work(struct gfs2_glock *gl) static void flush_delete_work(struct gfs2_glock *gl) { - if (gl->gl_name.ln_type == LM_TYPE_IOPEN) { - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; + if (glock_type(gl) == LM_TYPE_IOPEN) { + struct gfs2_sbd *sdp = glock_sbd(gl); if (cancel_delayed_work(&gl->gl_delete)) { queue_delayed_work(sdp->sd_delete_wq, @@ -2231,8 +2265,8 @@ static const char *gflags2str(char *buf, const struct gfs2_glock *gl) *p++ = 'y'; if (test_bit(GLF_LFLUSH, gflags)) *p++ = 'f'; - if (test_bit(GLF_PENDING_REPLY, gflags)) - *p++ = 'R'; + if (test_bit(GLF_MAY_CANCEL, gflags)) + *p++ = 'c'; if (test_bit(GLF_HAVE_REPLY, gflags)) *p++ = 'r'; if (test_bit(GLF_INITIAL, gflags)) @@ -2287,7 +2321,7 @@ void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl, bool fsid) unsigned long long dtime; const struct gfs2_holder *gh; char gflags_buf[32]; - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; + struct gfs2_sbd *sdp = glock_sbd(gl); char fs_id_buf[sizeof(sdp->sd_fsname) + 7]; unsigned long nrpages = 0; @@ -2306,8 +2340,8 @@ void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl, bool fsid) gfs2_print_dbg(seq, "%sG: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d " "v:%d r:%d m:%ld p:%lu\n", fs_id_buf, state2str(gl->gl_state), - gl->gl_name.ln_type, - (unsigned long long)gl->gl_name.ln_number, + glock_type(gl), + (unsigned long long) glock_number(gl), gflags2str(gflags_buf, gl), state2str(gl->gl_target), state2str(gl->gl_demote_state), dtime, @@ -2327,8 +2361,8 @@ static int gfs2_glstats_seq_show(struct seq_file *seq, void *iter_ptr) struct gfs2_glock *gl = iter_ptr; seq_printf(seq, "G: n:%u/%llx rtt:%llu/%llu rttb:%llu/%llu irt:%llu/%llu dcnt: %llu qcnt: %llu\n", - gl->gl_name.ln_type, - (unsigned long long)gl->gl_name.ln_number, + glock_type(gl), + (unsigned long long) glock_number(gl), (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTT], (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR], (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTB], @@ -2444,7 +2478,7 @@ static void gfs2_glock_iter_next(struct gfs2_glock_iter *gi, loff_t n) gl = NULL; break; } - if (gl->gl_name.ln_sbd != gi->sdp) + if (glock_sbd(gl) != gi->sdp) continue; if (n <= 1) { if (!lockref_get_not_dead(&gl->gl_lockref)) @@ -2740,8 +2774,8 @@ static int gfs2_glockfd_seq_show(struct seq_file *seq, void *iter_ptr) gl = GFS2_I(inode)->i_iopen_gh.gh_gl; if (gl) { seq_printf(seq, "%d %u %u/%llx\n", - i->tgid, i->fd, gl->gl_name.ln_type, - (unsigned long long)gl->gl_name.ln_number); + i->tgid, i->fd, glock_type(gl), + (unsigned long long) glock_number(gl)); } gfs2_glockfd_seq_show_flock(seq, i); inode_unlock_shared(inode); diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h index 55d5985f32a0..6341ac9b863f 100644 --- a/fs/gfs2/glock.h +++ b/fs/gfs2/glock.h @@ -126,7 +126,7 @@ enum { #define GL_GLOCK_MAX_HOLD (long)(HZ / 5) #define GL_GLOCK_DFT_HOLD (long)(HZ / 5) -#define GL_GLOCK_MIN_HOLD (long)(10) +#define GL_GLOCK_MIN_HOLD (long)(HZ / 100) #define GL_GLOCK_HOLD_INCR (long)(HZ / 20) #define GL_GLOCK_HOLD_DECR (long)(HZ / 40) @@ -204,7 +204,8 @@ int gfs2_glock_poll(struct gfs2_holder *gh); int gfs2_instantiate(struct gfs2_holder *gh); int gfs2_glock_holder_ready(struct gfs2_holder *gh); int gfs2_glock_wait(struct gfs2_holder *gh); -int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs); +int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs, + unsigned int retries); void gfs2_glock_dq(struct gfs2_holder *gh); void gfs2_glock_dq_wait(struct gfs2_holder *gh); void gfs2_glock_dq_uninit(struct gfs2_holder *gh); @@ -221,11 +222,11 @@ void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl, BUG(); } } while(0) #define gfs2_glock_assert_warn(gl, x) do { if (unlikely(!(x))) { \ gfs2_dump_glock(NULL, gl, true); \ - gfs2_assert_warn((gl)->gl_name.ln_sbd, (x)); } } \ + gfs2_assert_warn(glock_sbd(gl), (x)); } } \ while (0) #define gfs2_glock_assert_withdraw(gl, x) do { if (unlikely(!(x))) { \ gfs2_dump_glock(NULL, gl, true); \ - gfs2_assert_withdraw((gl)->gl_name.ln_sbd, (x)); } } \ + gfs2_assert_withdraw(glock_sbd(gl), (x)); } } \ while (0) __printf(2, 3) diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c index 2173ccf5034b..ba61649368bf 100644 --- a/fs/gfs2/glops.c +++ b/fs/gfs2/glops.c @@ -32,7 +32,7 @@ struct workqueue_struct *gfs2_freeze_wq; static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh) { - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; + struct gfs2_sbd *sdp = glock_sbd(gl); fs_err(sdp, "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page " @@ -40,7 +40,7 @@ static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh) bh, (unsigned long long)bh->b_blocknr, bh->b_state, bh->b_folio->mapping, bh->b_folio->flags.f); fs_err(sdp, "AIL glock %u:%llu mapping %p\n", - gl->gl_name.ln_type, gl->gl_name.ln_number, + glock_type(gl), glock_number(gl), gfs2_glock2aspace(gl)); gfs2_lm(sdp, "AIL error\n"); gfs2_withdraw(sdp); @@ -58,7 +58,7 @@ static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh) static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync, unsigned int nr_revokes) { - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; + struct gfs2_sbd *sdp = glock_sbd(gl); struct list_head *head = &gl->gl_ail_list; struct gfs2_bufdata *bd, *tmp; struct buffer_head *bh; @@ -86,7 +86,7 @@ static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync, static int gfs2_ail_empty_gl(struct gfs2_glock *gl) { - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; + struct gfs2_sbd *sdp = glock_sbd(gl); struct gfs2_trans tr; unsigned int revokes; int ret = 0; @@ -139,7 +139,7 @@ flush: void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) { - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; + struct gfs2_sbd *sdp = glock_sbd(gl); unsigned int revokes = atomic_read(&gl->gl_ail_count); int ret; @@ -163,7 +163,7 @@ void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) static int gfs2_rgrp_metasync(struct gfs2_glock *gl) { - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; + struct gfs2_sbd *sdp = glock_sbd(gl); struct address_space *metamapping = gfs2_aspace(sdp); struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl); const unsigned bsize = sdp->sd_sb.sb_bsize; @@ -191,7 +191,7 @@ static int gfs2_rgrp_metasync(struct gfs2_glock *gl) static int rgrp_go_sync(struct gfs2_glock *gl) { - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; + struct gfs2_sbd *sdp = glock_sbd(gl); struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl); int error; @@ -220,7 +220,7 @@ static int rgrp_go_sync(struct gfs2_glock *gl) static void rgrp_go_inval(struct gfs2_glock *gl, int flags) { - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; + struct gfs2_sbd *sdp = glock_sbd(gl); struct address_space *mapping = gfs2_aspace(sdp); struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl); const unsigned bsize = sdp->sd_sb.sb_bsize; @@ -290,7 +290,7 @@ int gfs2_inode_metasync(struct gfs2_glock *gl) filemap_fdatawrite(metamapping); error = filemap_fdatawait(metamapping); if (error) - gfs2_io_error(gl->gl_name.ln_sbd); + gfs2_io_error(glock_sbd(gl)); return error; } @@ -317,7 +317,7 @@ static int inode_go_sync(struct gfs2_glock *gl) GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE); - gfs2_log_flush(gl->gl_name.ln_sbd, gl, GFS2_LOG_HEAD_FLUSH_NORMAL | + gfs2_log_flush(glock_sbd(gl), gl, GFS2_LOG_HEAD_FLUSH_NORMAL | GFS2_LFC_INODE_GO_SYNC); filemap_fdatawrite(metamapping); if (isreg) { @@ -359,7 +359,7 @@ static void inode_go_inval(struct gfs2_glock *gl, int flags) { struct gfs2_inode *ip = gfs2_glock2inode(gl); - gfs2_assert_withdraw(gl->gl_name.ln_sbd, !atomic_read(&gl->gl_ail_count)); + gfs2_assert_withdraw(glock_sbd(gl), !atomic_read(&gl->gl_ail_count)); if (flags & DIO_METADATA) { struct address_space *mapping = gfs2_glock2aspace(gl); @@ -372,11 +372,11 @@ static void inode_go_inval(struct gfs2_glock *gl, int flags) } } - if (ip == GFS2_I(gl->gl_name.ln_sbd->sd_rindex)) { - gfs2_log_flush(gl->gl_name.ln_sbd, NULL, + if (ip == GFS2_I(glock_sbd(gl)->sd_rindex)) { + gfs2_log_flush(glock_sbd(gl), NULL, GFS2_LOG_HEAD_FLUSH_NORMAL | GFS2_LFC_INODE_GO_INVAL); - gl->gl_name.ln_sbd->sd_rindex_uptodate = 0; + glock_sbd(gl)->sd_rindex_uptodate = 0; } if (ip && S_ISREG(ip->i_inode.i_mode)) truncate_inode_pages(ip->i_inode.i_mapping, 0); @@ -567,7 +567,7 @@ static void inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl, static void freeze_go_callback(struct gfs2_glock *gl, bool remote) { - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; + struct gfs2_sbd *sdp = glock_sbd(gl); struct super_block *sb = sdp->sd_vfs; if (!remote || @@ -596,7 +596,7 @@ static void freeze_go_callback(struct gfs2_glock *gl, bool remote) */ static int freeze_go_xmote_bh(struct gfs2_glock *gl) { - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; + struct gfs2_sbd *sdp = glock_sbd(gl); struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode); struct gfs2_glock *j_gl = ip->i_gl; struct gfs2_log_header_host head; @@ -626,7 +626,7 @@ static int freeze_go_xmote_bh(struct gfs2_glock *gl) static void iopen_go_callback(struct gfs2_glock *gl, bool remote) { struct gfs2_inode *ip = gl->gl_object; - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; + struct gfs2_sbd *sdp = glock_sbd(gl); if (!remote || test_bit(SDF_KILL, &sdp->sd_flags)) return; diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index d05d8fe4e456..61465777826a 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h @@ -326,7 +326,7 @@ enum { GLF_BLOCKING = 15, GLF_TRY_TO_EVICT = 17, /* iopen glocks only */ GLF_VERIFY_DELETE = 18, /* iopen glocks only */ - GLF_PENDING_REPLY = 19, + GLF_MAY_CANCEL = 19, GLF_DEFER_DELETE = 20, /* iopen glocks only */ GLF_CANCELING = 21, }; @@ -369,6 +369,16 @@ struct gfs2_glock { struct rhash_head gl_node; }; +static inline unsigned int glock_type(const struct gfs2_glock *gl) +{ + return gl->gl_name.ln_type; +} + +static inline u64 glock_number(const struct gfs2_glock *gl) +{ + return gl->gl_name.ln_number; +} + enum { GIF_QD_LOCKED = 1, GIF_SW_PAGED = 3, @@ -839,6 +849,8 @@ struct gfs2_sbd { struct dentry *debugfs_dir; /* debugfs directory */ }; +#define glock_sbd(gl) ((gl)->gl_name.ln_sbd) + #define GFS2_BAD_INO 1 static inline struct address_space *gfs2_aspace(struct gfs2_sbd *sdp) @@ -853,9 +865,9 @@ static inline void gfs2_glstats_inc(struct gfs2_glock *gl, int which) static inline void gfs2_sbstats_inc(const struct gfs2_glock *gl, int which) { - const struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; + const struct gfs2_sbd *sdp = glock_sbd(gl); preempt_disable(); - this_cpu_ptr(sdp->sd_lkstats)->lkstats[gl->gl_name.ln_type].stats[which]++; + this_cpu_ptr(sdp->sd_lkstats)->lkstats[glock_type(gl)].stats[which]++; preempt_enable(); } diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c index c02ebf0ca625..8344040ecaf7 100644 --- a/fs/gfs2/inode.c +++ b/fs/gfs2/inode.c @@ -1495,7 +1495,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry, unsigned int num_gh; int dir_rename = 0; struct gfs2_diradd da = { .nr_blocks = 0, .save_loc = 0, }; - unsigned int x; + unsigned int retries = 0, x; int error; gfs2_holder_mark_uninitialized(&r_gh); @@ -1545,12 +1545,17 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry, num_gh++; } +again: for (x = 0; x < num_gh; x++) { error = gfs2_glock_nq(ghs + x); if (error) goto out_gunlock; } - error = gfs2_glock_async_wait(num_gh, ghs); + error = gfs2_glock_async_wait(num_gh, ghs, retries); + if (error == -ESTALE) { + retries++; + goto again; + } if (error) goto out_gunlock; @@ -1739,7 +1744,7 @@ static int gfs2_exchange(struct inode *odir, struct dentry *odentry, struct gfs2_sbd *sdp = GFS2_SB(odir); struct gfs2_holder ghs[4], r_gh; unsigned int num_gh; - unsigned int x; + unsigned int retries = 0, x; umode_t old_mode = oip->i_inode.i_mode; umode_t new_mode = nip->i_inode.i_mode; int error; @@ -1783,13 +1788,18 @@ static int gfs2_exchange(struct inode *odir, struct dentry *odentry, gfs2_holder_init(nip->i_gl, LM_ST_EXCLUSIVE, GL_ASYNC, ghs + num_gh); num_gh++; +again: for (x = 0; x < num_gh; x++) { error = gfs2_glock_nq(ghs + x); if (error) goto out_gunlock; } - error = gfs2_glock_async_wait(num_gh, ghs); + error = gfs2_glock_async_wait(num_gh, ghs, retries); + if (error == -ESTALE) { + retries++; + goto again; + } if (error) goto out_gunlock; @@ -2182,6 +2192,14 @@ static int gfs2_getattr(struct mnt_idmap *idmap, return 0; } +static bool fault_in_fiemap(struct fiemap_extent_info *fi) +{ + struct fiemap_extent __user *dest = fi->fi_extents_start; + size_t size = sizeof(*dest) * fi->fi_extents_max; + + return fault_in_safe_writeable((char __user *)dest, size) == 0; +} + static int gfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, u64 start, u64 len) { @@ -2191,14 +2209,22 @@ static int gfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, inode_lock_shared(inode); +retry: ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh); if (ret) goto out; + pagefault_disable(); ret = iomap_fiemap(inode, fieinfo, start, len, &gfs2_iomap_ops); + pagefault_enable(); gfs2_glock_dq_uninit(&gh); + if (ret == -EFAULT && fault_in_fiemap(fieinfo)) { + fieinfo->fi_extents_mapped = 0; + goto retry; + } + out: inode_unlock_shared(inode); return ret; diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c index b8d249925395..53b8419ee15f 100644 --- a/fs/gfs2/lock_dlm.c +++ b/fs/gfs2/lock_dlm.c @@ -74,13 +74,13 @@ static inline void gfs2_update_reply_times(struct gfs2_glock *gl, bool blocking) { struct gfs2_pcpu_lkstats *lks; - const unsigned gltype = gl->gl_name.ln_type; + const unsigned gltype = glock_type(gl); unsigned index = blocking ? GFS2_LKS_SRTTB : GFS2_LKS_SRTT; s64 rtt; preempt_disable(); rtt = ktime_to_ns(ktime_sub(ktime_get_real(), gl->gl_dstamp)); - lks = this_cpu_ptr(gl->gl_name.ln_sbd->sd_lkstats); + lks = this_cpu_ptr(glock_sbd(gl)->sd_lkstats); gfs2_update_stats(&gl->gl_stats, index, rtt); /* Local */ gfs2_update_stats(&lks->lkstats[gltype], index, rtt); /* Global */ preempt_enable(); @@ -100,7 +100,7 @@ static inline void gfs2_update_reply_times(struct gfs2_glock *gl, static inline void gfs2_update_request_times(struct gfs2_glock *gl) { struct gfs2_pcpu_lkstats *lks; - const unsigned gltype = gl->gl_name.ln_type; + const unsigned gltype = glock_type(gl); ktime_t dstamp; s64 irt; @@ -108,7 +108,7 @@ static inline void gfs2_update_request_times(struct gfs2_glock *gl) dstamp = gl->gl_dstamp; gl->gl_dstamp = ktime_get_real(); irt = ktime_to_ns(ktime_sub(gl->gl_dstamp, dstamp)); - lks = this_cpu_ptr(gl->gl_name.ln_sbd->sd_lkstats); + lks = this_cpu_ptr(glock_sbd(gl)->sd_lkstats); gfs2_update_stats(&gl->gl_stats, GFS2_LKS_SIRT, irt); /* Local */ gfs2_update_stats(&lks->lkstats[gltype], GFS2_LKS_SIRT, irt); /* Global */ preempt_enable(); @@ -195,7 +195,7 @@ static void gdlm_bast(void *arg, int mode) gfs2_glock_cb(gl, LM_ST_SHARED); break; default: - fs_err(gl->gl_name.ln_sbd, "unknown bast mode %d\n", mode); + fs_err(glock_sbd(gl), "unknown bast mode %d\n", mode); BUG(); } } @@ -276,7 +276,7 @@ static void gfs2_reverse_hex(char *c, u64 value) static int gdlm_lock(struct gfs2_glock *gl, unsigned int req_state, unsigned int flags) { - struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct; + struct lm_lockstruct *ls = &glock_sbd(gl)->sd_lockstruct; bool blocking; int cur, req; u32 lkf; @@ -284,8 +284,8 @@ static int gdlm_lock(struct gfs2_glock *gl, unsigned int req_state, int error; gl->gl_req = req_state; - cur = make_mode(gl->gl_name.ln_sbd, gl->gl_state); - req = make_mode(gl->gl_name.ln_sbd, req_state); + cur = make_mode(glock_sbd(gl), gl->gl_state); + req = make_mode(glock_sbd(gl), req_state); blocking = !down_conversion(cur, req) && !(flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB)); lkf = make_flags(gl, flags, req, blocking); @@ -296,8 +296,8 @@ static int gdlm_lock(struct gfs2_glock *gl, unsigned int req_state, if (test_bit(GLF_INITIAL, &gl->gl_flags)) { memset(strname, ' ', GDLM_STRNAME_BYTES - 1); strname[GDLM_STRNAME_BYTES - 1] = '\0'; - gfs2_reverse_hex(strname + 7, gl->gl_name.ln_type); - gfs2_reverse_hex(strname + 23, gl->gl_name.ln_number); + gfs2_reverse_hex(strname + 7, glock_type(gl)); + gfs2_reverse_hex(strname + 23, glock_number(gl)); gl->gl_dstamp = ktime_get_real(); } else { gfs2_update_request_times(gl); @@ -323,7 +323,7 @@ again: static void gdlm_put_lock(struct gfs2_glock *gl) { - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; + struct gfs2_sbd *sdp = glock_sbd(gl); struct lm_lockstruct *ls = &sdp->sd_lockstruct; uint32_t flags = 0; int error; @@ -375,14 +375,14 @@ again: if (error) { fs_err(sdp, "gdlm_unlock %x,%llx err=%d\n", - gl->gl_name.ln_type, - (unsigned long long)gl->gl_name.ln_number, error); + glock_type(gl), + (unsigned long long) glock_number(gl), error); } } static void gdlm_cancel(struct gfs2_glock *gl) { - struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct; + struct lm_lockstruct *ls = &glock_sbd(gl)->sd_lockstruct; down_read(&ls->ls_sem); if (likely(ls->ls_dlm != NULL)) { diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c index 8312cd2cdae4..347df29d610e 100644 --- a/fs/gfs2/log.c +++ b/fs/gfs2/log.c @@ -888,8 +888,9 @@ void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd, sb->s_blocksize - LH_V1_SIZE - 4); lh->lh_crc = cpu_to_be32(crc); - gfs2_log_write(sdp, jd, page, sb->s_blocksize, 0, dblock); - gfs2_log_submit_bio(&jd->jd_log_bio, REQ_OP_WRITE | op_flags); + gfs2_log_write(sdp, jd, page, sb->s_blocksize, 0, dblock, + REQ_OP_WRITE | op_flags); + gfs2_log_submit_write(&jd->jd_log_bio); } /** @@ -1096,7 +1097,7 @@ repeat: if (gfs2_withdrawn(sdp)) goto out_withdraw; if (sdp->sd_jdesc) - gfs2_log_submit_bio(&sdp->sd_jdesc->jd_log_bio, REQ_OP_WRITE); + gfs2_log_submit_write(&sdp->sd_jdesc->jd_log_bio); if (gfs2_withdrawn(sdp)) goto out_withdraw; diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c index d27a0b1080a9..e03928def87e 100644 --- a/fs/gfs2/lops.c +++ b/fs/gfs2/lops.c @@ -65,15 +65,15 @@ void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh) static bool buffer_is_rgrp(const struct gfs2_bufdata *bd) { - return bd->bd_gl->gl_name.ln_type == LM_TYPE_RGRP; + return glock_type(bd->bd_gl) == LM_TYPE_RGRP; } static void maybe_release_space(struct gfs2_bufdata *bd) { struct gfs2_glock *gl = bd->bd_gl; - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; + struct gfs2_sbd *sdp = glock_sbd(gl); struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl); - unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number; + unsigned int index = bd->bd_bh->b_blocknr - glock_number(gl); struct gfs2_bitmap *bi = rgd->rd_bits + index; rgrp_lock_local(rgd); @@ -229,21 +229,19 @@ static void gfs2_end_log_write(struct bio *bio) } /** - * gfs2_log_submit_bio - Submit any pending log bio + * gfs2_log_submit_write - Submit a pending log write bio * @biop: Address of the bio pointer - * @opf: REQ_OP | op_flags * * Submit any pending part-built or full bio to the block device. If * there is no pending bio, then this is a no-op. */ -void gfs2_log_submit_bio(struct bio **biop, blk_opf_t opf) +void gfs2_log_submit_write(struct bio **biop) { struct bio *bio = *biop; if (bio) { struct gfs2_sbd *sdp = bio->bi_private; atomic_inc(&sdp->sd_log_in_flight); - bio->bi_opf = opf; submit_bio(bio); *biop = NULL; } @@ -254,6 +252,7 @@ void gfs2_log_submit_bio(struct bio **biop, blk_opf_t opf) * @sdp: The super block * @blkno: The device block number we want to write to * @end_io: The bi_end_io callback + * @opf: REQ_OP | op_flags * * Allocate a new bio, initialize it with the given parameters and return it. * @@ -261,10 +260,10 @@ void gfs2_log_submit_bio(struct bio **biop, blk_opf_t opf) */ static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno, - bio_end_io_t *end_io) + bio_end_io_t *end_io, blk_opf_t opf) { struct super_block *sb = sdp->sd_vfs; - struct bio *bio = bio_alloc(sb->s_bdev, BIO_MAX_VECS, 0, GFP_NOIO); + struct bio *bio = bio_alloc(sb->s_bdev, BIO_MAX_VECS, opf, GFP_NOIO); bio->bi_iter.bi_sector = blkno << sdp->sd_fsb2bb_shift; bio->bi_end_io = end_io; @@ -278,7 +277,7 @@ static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno, * @sdp: The super block * @blkno: The device block number we want to write to * @biop: The bio to get or allocate - * @op: REQ_OP + * @opf: REQ_OP | op_flags * @end_io: The bi_end_io callback * @flush: Always flush the current bio and allocate a new one? * @@ -291,7 +290,7 @@ static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno, */ static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno, - struct bio **biop, enum req_op op, + struct bio **biop, blk_opf_t opf, bio_end_io_t *end_io, bool flush) { struct bio *bio = *biop; @@ -303,10 +302,10 @@ static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno, nblk >>= sdp->sd_fsb2bb_shift; if (blkno == nblk && !flush) return bio; - gfs2_log_submit_bio(biop, op); + gfs2_log_submit_write(biop); } - *biop = gfs2_log_alloc_bio(sdp, blkno, end_io); + *biop = gfs2_log_alloc_bio(sdp, blkno, end_io, opf); return *biop; } @@ -318,6 +317,7 @@ static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno, * @size: the size of the data to write * @offset: the offset within the page * @blkno: block number of the log entry + * @opf: REQ_OP | op_flags * * Try and add the page segment to the current bio. If that fails, * submit the current bio to the device and create a new one, and @@ -326,17 +326,17 @@ static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno, void gfs2_log_write(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd, struct page *page, unsigned size, unsigned offset, - u64 blkno) + u64 blkno, blk_opf_t opf) { struct bio *bio; int ret; - bio = gfs2_log_get_bio(sdp, blkno, &jd->jd_log_bio, REQ_OP_WRITE, + bio = gfs2_log_get_bio(sdp, blkno, &jd->jd_log_bio, opf, gfs2_end_log_write, false); ret = bio_add_page(bio, page, size, offset); if (ret == 0) { bio = gfs2_log_get_bio(sdp, blkno, &jd->jd_log_bio, - REQ_OP_WRITE, gfs2_end_log_write, true); + opf, gfs2_end_log_write, true); ret = bio_add_page(bio, page, size, offset); WARN_ON(ret == 0); } @@ -359,7 +359,7 @@ static void gfs2_log_write_bh(struct gfs2_sbd *sdp, struct buffer_head *bh) dblock = gfs2_log_bmap(sdp->sd_jdesc, sdp->sd_log_flush_head); gfs2_log_incr_head(sdp); gfs2_log_write(sdp, sdp->sd_jdesc, folio_page(bh->b_folio, 0), - bh->b_size, bh_offset(bh), dblock); + bh->b_size, bh_offset(bh), dblock, REQ_OP_WRITE); } /** @@ -380,7 +380,8 @@ static void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page) dblock = gfs2_log_bmap(sdp->sd_jdesc, sdp->sd_log_flush_head); gfs2_log_incr_head(sdp); - gfs2_log_write(sdp, sdp->sd_jdesc, page, sb->s_blocksize, 0, dblock); + gfs2_log_write(sdp, sdp->sd_jdesc, page, sb->s_blocksize, 0, dblock, + REQ_OP_WRITE); } /** @@ -477,13 +478,14 @@ static void gfs2_jhead_process_page(struct gfs2_jdesc *jd, unsigned long index, folio_put_refs(folio, 2); } -static struct bio *gfs2_chain_bio(struct bio *prev, unsigned int nr_iovecs) +static struct bio *gfs2_chain_bio(struct bio *prev, unsigned int nr_iovecs, + sector_t sector, blk_opf_t opf) { struct bio *new; - new = bio_alloc(prev->bi_bdev, nr_iovecs, prev->bi_opf, GFP_NOIO); + new = bio_alloc(prev->bi_bdev, nr_iovecs, opf, GFP_NOIO); bio_clone_blkg_association(new, prev); - new->bi_iter.bi_sector = bio_end_sector(prev); + new->bi_iter.bi_sector = sector; bio_chain(new, prev); submit_bio(prev); return new; @@ -546,7 +548,8 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head) unsigned int blocks = (PAGE_SIZE - off) >> bsize_shift; - bio = gfs2_chain_bio(bio, blocks); + bio = gfs2_chain_bio(bio, blocks, sector, + REQ_OP_READ); goto add_block_to_new_bio; } } @@ -556,8 +559,8 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head) submit_bio(bio); } - bio = gfs2_log_alloc_bio(sdp, dblock, gfs2_end_log_read); - bio->bi_opf = REQ_OP_READ; + bio = gfs2_log_alloc_bio(sdp, dblock, gfs2_end_log_read, + REQ_OP_READ); add_block_to_new_bio: bio_add_folio_nofail(bio, folio, bsize, off); block_added: diff --git a/fs/gfs2/lops.h b/fs/gfs2/lops.h index be740bf33666..772557b63b48 100644 --- a/fs/gfs2/lops.h +++ b/fs/gfs2/lops.h @@ -16,8 +16,8 @@ void gfs2_log_incr_head(struct gfs2_sbd *sdp); u64 gfs2_log_bmap(struct gfs2_jdesc *jd, unsigned int lbn); void gfs2_log_write(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd, struct page *page, unsigned size, unsigned offset, - u64 blkno); -void gfs2_log_submit_bio(struct bio **biop, blk_opf_t opf); + u64 blkno, blk_opf_t opf); +void gfs2_log_submit_write(struct bio **biop); void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh); int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head); diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c index e4356198d8d8..3c8e4553102d 100644 --- a/fs/gfs2/meta_io.c +++ b/fs/gfs2/meta_io.c @@ -126,7 +126,7 @@ const struct address_space_operations gfs2_rgrp_aops = { struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create) { struct address_space *mapping = gfs2_glock2aspace(gl); - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; + struct gfs2_sbd *sdp = glock_sbd(gl); struct folio *folio; struct buffer_head *bh; unsigned int shift; @@ -259,7 +259,7 @@ static void gfs2_submit_bhs(blk_opf_t opf, struct buffer_head *bhs[], int num) int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags, int rahead, struct buffer_head **bhp) { - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; + struct gfs2_sbd *sdp = glock_sbd(gl); struct buffer_head *bh, *bhs[2]; int num = 0; @@ -513,7 +513,7 @@ int gfs2_meta_buffer(struct gfs2_inode *ip, u32 mtype, u64 num, struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen) { - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; + struct gfs2_sbd *sdp = glock_sbd(gl); struct buffer_head *first_bh, *bh; u32 max_ra = gfs2_tune_get(sdp, gt_max_readahead) >> sdp->sd_sb.sb_bsize_shift; diff --git a/fs/gfs2/meta_io.h b/fs/gfs2/meta_io.h index b7c8a6684d02..2fe5dec193ed 100644 --- a/fs/gfs2/meta_io.h +++ b/fs/gfs2/meta_io.h @@ -43,7 +43,7 @@ static inline struct gfs2_sbd *gfs2_mapping2sbd(struct address_space *mapping) if (mapping->a_ops == &gfs2_meta_aops) { struct gfs2_glock_aspace *gla = container_of(mapping, struct gfs2_glock_aspace, mapping); - return gla->glock.gl_name.ln_sbd; + return glock_sbd(&gla->glock); } else return inode->i_sb->s_fs_info; } diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index e7a88b717991..c7d57de7c8f0 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c @@ -1276,7 +1276,6 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc) if (error) { gfs2_freeze_unlock(sdp); - gfs2_destroy_threads(sdp); fs_err(sdp, "can't make FS RW: %d\n", error); goto fail_per_node; } @@ -1286,6 +1285,7 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc) fail_per_node: init_per_node(sdp, UNDO); + gfs2_destroy_threads(sdp); fail_inodes: init_inodes(sdp, UNDO); fail_sb: diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c index b1692f12a602..1c3455093ae8 100644 --- a/fs/gfs2/quota.c +++ b/fs/gfs2/quota.c @@ -334,6 +334,7 @@ static void qd_put(struct gfs2_quota_data *qd) lockref_mark_dead(&qd->qd_lockref); spin_unlock(&qd->qd_lockref.lock); + list_lru_del_obj(&gfs2_qd_lru, &qd->qd_lru); gfs2_qd_dispose(qd); return; } @@ -978,7 +979,7 @@ out_dq: gfs2_glock_dq_uninit(&ghs[qx]); inode_unlock(&ip->i_inode); kfree(ghs); - gfs2_log_flush(ip->i_gl->gl_name.ln_sbd, ip->i_gl, + gfs2_log_flush(glock_sbd(ip->i_gl), ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL | GFS2_LFC_DO_SYNC); if (!error) { for (x = 0; x < num_qd; x++) { @@ -1027,7 +1028,7 @@ static int do_glock(struct gfs2_quota_data *qd, int force_refresh, struct gfs2_holder i_gh; int error; - gfs2_assert_warn(sdp, sdp == qd->qd_gl->gl_name.ln_sbd); + gfs2_assert_warn(sdp, sdp == glock_sbd(qd->qd_gl)); restart: error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh); if (error) diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c index b14e54b38ee8..8a97ca734afc 100644 --- a/fs/gfs2/rgrp.c +++ b/fs/gfs2/rgrp.c @@ -1923,7 +1923,7 @@ static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip static bool gfs2_rgrp_congested(const struct gfs2_rgrpd *rgd, int loops) { const struct gfs2_glock *gl = rgd->rd_gl; - const struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; + const struct gfs2_sbd *sdp = glock_sbd(gl); struct gfs2_lkstats *st; u64 r_dcount, l_dcount; u64 l_srttb, a_srttb = 0; diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index f6cd907b3ec6..d96160636161 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c @@ -147,8 +147,10 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp) } error = gfs2_quota_init(sdp); - if (!error && gfs2_withdrawn(sdp)) + if (!error && gfs2_withdrawn(sdp)) { + gfs2_quota_cleanup(sdp); error = -EIO; + } if (!error) set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags); return error; diff --git a/fs/gfs2/trace_gfs2.h b/fs/gfs2/trace_gfs2.h index fcfbf68ec725..6fd39fcdd00e 100644 --- a/fs/gfs2/trace_gfs2.h +++ b/fs/gfs2/trace_gfs2.h @@ -52,7 +52,7 @@ {(1UL << GLF_DEMOTE_IN_PROGRESS), "p" }, \ {(1UL << GLF_DIRTY), "y" }, \ {(1UL << GLF_LFLUSH), "f" }, \ - {(1UL << GLF_PENDING_REPLY), "R" }, \ + {(1UL << GLF_MAY_CANCEL), "c" }, \ {(1UL << GLF_HAVE_REPLY), "r" }, \ {(1UL << GLF_INITIAL), "a" }, \ {(1UL << GLF_HAVE_FROZEN_REPLY), "F" }, \ @@ -111,9 +111,9 @@ TRACE_EVENT(gfs2_glock_state_change, ), TP_fast_assign( - __entry->dev = gl->gl_name.ln_sbd->sd_vfs->s_dev; - __entry->glnum = gl->gl_name.ln_number; - __entry->gltype = gl->gl_name.ln_type; + __entry->dev = glock_sbd(gl)->sd_vfs->s_dev; + __entry->glnum = glock_number(gl); + __entry->gltype = glock_type(gl); __entry->cur_state = glock_trace_state(gl->gl_state); __entry->new_state = glock_trace_state(new_state); __entry->tgt_state = glock_trace_state(gl->gl_target); @@ -147,9 +147,9 @@ TRACE_EVENT(gfs2_glock_put, ), TP_fast_assign( - __entry->dev = gl->gl_name.ln_sbd->sd_vfs->s_dev; - __entry->gltype = gl->gl_name.ln_type; - __entry->glnum = gl->gl_name.ln_number; + __entry->dev = glock_sbd(gl)->sd_vfs->s_dev; + __entry->gltype = glock_type(gl); + __entry->glnum = glock_number(gl); __entry->cur_state = glock_trace_state(gl->gl_state); __entry->flags = gl->gl_flags | (gl->gl_object ? (1UL<<GLF_OBJECT) : 0); ), @@ -181,9 +181,9 @@ TRACE_EVENT(gfs2_demote_rq, ), TP_fast_assign( - __entry->dev = gl->gl_name.ln_sbd->sd_vfs->s_dev; - __entry->gltype = gl->gl_name.ln_type; - __entry->glnum = gl->gl_name.ln_number; + __entry->dev = glock_sbd(gl)->sd_vfs->s_dev; + __entry->gltype = glock_type(gl); + __entry->glnum = glock_number(gl); __entry->cur_state = glock_trace_state(gl->gl_state); __entry->dmt_state = glock_trace_state(gl->gl_demote_state); __entry->flags = gl->gl_flags | (gl->gl_object ? (1UL<<GLF_OBJECT) : 0); @@ -215,9 +215,9 @@ TRACE_EVENT(gfs2_promote, ), TP_fast_assign( - __entry->dev = gh->gh_gl->gl_name.ln_sbd->sd_vfs->s_dev; - __entry->glnum = gh->gh_gl->gl_name.ln_number; - __entry->gltype = gh->gh_gl->gl_name.ln_type; + __entry->dev = glock_sbd(gh->gh_gl)->sd_vfs->s_dev; + __entry->glnum = glock_number(gh->gh_gl); + __entry->gltype = glock_type(gh->gh_gl); __entry->state = glock_trace_state(gh->gh_state); ), @@ -243,9 +243,9 @@ TRACE_EVENT(gfs2_glock_queue, ), TP_fast_assign( - __entry->dev = gh->gh_gl->gl_name.ln_sbd->sd_vfs->s_dev; - __entry->glnum = gh->gh_gl->gl_name.ln_number; - __entry->gltype = gh->gh_gl->gl_name.ln_type; + __entry->dev = glock_sbd(gh->gh_gl)->sd_vfs->s_dev; + __entry->glnum = glock_number(gh->gh_gl); + __entry->gltype = glock_type(gh->gh_gl); __entry->queue = queue; __entry->state = glock_trace_state(gh->gh_state); ), @@ -282,9 +282,9 @@ TRACE_EVENT(gfs2_glock_lock_time, ), TP_fast_assign( - __entry->dev = gl->gl_name.ln_sbd->sd_vfs->s_dev; - __entry->glnum = gl->gl_name.ln_number; - __entry->gltype = gl->gl_name.ln_type; + __entry->dev = glock_sbd(gl)->sd_vfs->s_dev; + __entry->glnum = glock_number(gl); + __entry->gltype = glock_type(gl); __entry->status = gl->gl_lksb.sb_status; __entry->flags = gl->gl_lksb.sb_flags; __entry->tdiff = tdiff; @@ -337,11 +337,11 @@ TRACE_EVENT(gfs2_pin, ), TP_fast_assign( - __entry->dev = bd->bd_gl->gl_name.ln_sbd->sd_vfs->s_dev; + __entry->dev = glock_sbd(bd->bd_gl)->sd_vfs->s_dev; __entry->pin = pin; __entry->len = bd->bd_bh->b_size; __entry->block = bd->bd_bh->b_blocknr; - __entry->ino = bd->bd_gl->gl_name.ln_number; + __entry->ino = glock_number(bd->bd_gl); ), TP_printk("%u,%u log %s %llu/%lu inode %llu", @@ -458,7 +458,7 @@ TRACE_EVENT(gfs2_bmap, ), TP_fast_assign( - __entry->dev = ip->i_gl->gl_name.ln_sbd->sd_vfs->s_dev; + __entry->dev = glock_sbd(ip->i_gl)->sd_vfs->s_dev; __entry->lblock = lblock; __entry->pblock = buffer_mapped(bh) ? bh->b_blocknr : 0; __entry->inum = ip->i_no_addr; @@ -494,7 +494,7 @@ TRACE_EVENT(gfs2_iomap_start, ), TP_fast_assign( - __entry->dev = ip->i_gl->gl_name.ln_sbd->sd_vfs->s_dev; + __entry->dev = glock_sbd(ip->i_gl)->sd_vfs->s_dev; __entry->inum = ip->i_no_addr; __entry->pos = pos; __entry->length = length; @@ -526,7 +526,7 @@ TRACE_EVENT(gfs2_iomap_end, ), TP_fast_assign( - __entry->dev = ip->i_gl->gl_name.ln_sbd->sd_vfs->s_dev; + __entry->dev = glock_sbd(ip->i_gl)->sd_vfs->s_dev; __entry->inum = ip->i_no_addr; __entry->offset = iomap->offset; __entry->length = iomap->length; @@ -568,7 +568,7 @@ TRACE_EVENT(gfs2_block_alloc, ), TP_fast_assign( - __entry->dev = rgd->rd_gl->gl_name.ln_sbd->sd_vfs->s_dev; + __entry->dev = glock_sbd(rgd->rd_gl)->sd_vfs->s_dev; __entry->start = block; __entry->inum = ip->i_no_addr; __entry->len = len; diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c index 6df65540e13d..95f2632cdb01 100644 --- a/fs/gfs2/trans.c +++ b/fs/gfs2/trans.c @@ -197,7 +197,7 @@ static struct gfs2_bufdata *gfs2_alloc_bufdata(struct gfs2_glock *gl, void gfs2_trans_add_data(struct gfs2_glock *gl, struct buffer_head *bh) { struct gfs2_trans *tr = current->journal_info; - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; + struct gfs2_sbd *sdp = glock_sbd(gl); struct gfs2_bufdata *bd; lock_buffer(bh); @@ -255,7 +255,7 @@ void gfs2_trans_add_databufs(struct gfs2_glock *gl, struct folio *folio, void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh) { - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; + struct gfs2_sbd *sdp = glock_sbd(gl); struct super_block *sb = sdp->sd_vfs; struct gfs2_bufdata *bd; struct gfs2_meta_header *mh; |
