diff options
author | Dave Chinner <dchinner@redhat.com> | 2022-07-07 11:51:59 +0300 |
---|---|---|
committer | Dave Chinner <david@fromorbit.com> | 2022-07-07 11:51:59 +0300 |
commit | 1dd2a2c18e314ad89200f8296c86dd4ecd53dea6 (patch) | |
tree | 682de80e2ab7e4ce98095fe1aaa6400135ec4815 /fs/xfs/xfs_log_cil.c | |
parent | 7c8ade212120085439eddc4cfddfa29d41c3f426 (diff) | |
download | linux-1dd2a2c18e314ad89200f8296c86dd4ecd53dea6.tar.xz |
xfs: track CIL ticket reservation in percpu structure
To get it out from under the cil spinlock.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Diffstat (limited to 'fs/xfs/xfs_log_cil.c')
-rw-r--r-- | fs/xfs/xfs_log_cil.c | 16 |
1 files changed, 12 insertions, 4 deletions
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c index 2d16add7a8d4..e38e10082da2 100644 --- a/fs/xfs/xfs_log_cil.c +++ b/fs/xfs/xfs_log_cil.c @@ -125,6 +125,9 @@ xlog_cil_push_pcp_aggregate( for_each_online_cpu(cpu) { cilpcp = per_cpu_ptr(cil->xc_pcp, cpu); + ctx->ticket->t_curr_res += cilpcp->space_reserved; + cilpcp->space_reserved = 0; + /* * We're in the middle of switching cil contexts. Reset the * counter we use to detect when the current context is nearing @@ -608,6 +611,7 @@ xlog_cil_insert_items( ctx_res = split_res * tp->t_ticket->t_iclog_hdrs; atomic_sub(tp->t_ticket->t_iclog_hdrs, &cil->xc_iclog_hdrs); } + cilpcp->space_reserved += ctx_res; /* * Accurately account when over the soft limit, otherwise fold the @@ -632,14 +636,12 @@ xlog_cil_insert_items( } put_cpu_ptr(cilpcp); - spin_lock(&cil->xc_cil_lock); - ctx->ticket->t_curr_res += ctx_res; - /* * Now (re-)position everything modified at the tail of the CIL. * We do this here so we only need to take the CIL lock once during * the transaction commit. */ + spin_lock(&cil->xc_cil_lock); list_for_each_entry(lip, &tp->t_items, li_trans) { /* Skip items which aren't dirty in this transaction. */ if (!test_bit(XFS_LI_DIRTY, &lip->li_flags)) @@ -1746,9 +1748,15 @@ xlog_cil_pcp_dead( { struct xfs_cil *cil = log->l_cilp; struct xlog_cil_pcp *cilpcp = per_cpu_ptr(cil->xc_pcp, cpu); + struct xfs_cil_ctx *ctx; down_write(&cil->xc_ctx_lock); - atomic_add(cilpcp->space_used, &cil->xc_ctx->space_used); + ctx = cil->xc_ctx; + if (ctx->ticket) + ctx->ticket->t_curr_res += cilpcp->space_reserved; + cilpcp->space_reserved = 0; + + atomic_add(cilpcp->space_used, &ctx->space_used); cilpcp->space_used = 0; up_write(&cil->xc_ctx_lock); } |