diff options
author | Dave Chinner <dchinner@redhat.com> | 2010-12-03 14:11:29 +0300 |
---|---|---|
committer | Dave Chinner <david@fromorbit.com> | 2010-12-03 14:11:29 +0300 |
commit | 84f3c683c4d3f36d3c3ed320babd960a332ac458 (patch) | |
tree | 5884f4e5a04a7a67c634e003bfa590fd2e643fdc /fs/xfs/xfs_log.c | |
parent | 2ced19cbae5448b720919a494606c62095d4f4db (diff) | |
download | linux-84f3c683c4d3f36d3c3ed320babd960a332ac458.tar.xz |
xfs: convert l_last_sync_lsn to an atomic variable
log->l_last_sync_lsn is updated in only one critical spot - log
buffer Io completion - and is protected by the grant lock here. This
requires the grant lock to be taken for every log buffer IO
completion. Converting the l_last_sync_lsn variable to an atomic64_t
means that we do not need to take the grant lock in log buffer IO
completion to update it.
This also removes the need for explicitly holding a spinlock to read
the l_last_sync_lsn on 32 bit platforms.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'fs/xfs/xfs_log.c')
-rw-r--r-- | fs/xfs/xfs_log.c | 55 |
1 files changed, 25 insertions, 30 deletions
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 1e2020d5a8b6..70790eb48336 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -675,12 +675,8 @@ xfs_log_move_tail(xfs_mount_t *mp, if (XLOG_FORCED_SHUTDOWN(log)) return; - if (tail_lsn == 0) { - /* needed since sync_lsn is 64 bits */ - spin_lock(&log->l_icloglock); - tail_lsn = log->l_last_sync_lsn; - spin_unlock(&log->l_icloglock); - } + if (tail_lsn == 0) + tail_lsn = atomic64_read(&log->l_last_sync_lsn); spin_lock(&log->l_grant_lock); @@ -800,11 +796,9 @@ xlog_assign_tail_lsn(xfs_mount_t *mp) tail_lsn = xfs_trans_ail_tail(mp->m_ail); spin_lock(&log->l_grant_lock); - if (tail_lsn != 0) { - log->l_tail_lsn = tail_lsn; - } else { - tail_lsn = log->l_tail_lsn = log->l_last_sync_lsn; - } + if (!tail_lsn) + tail_lsn = atomic64_read(&log->l_last_sync_lsn); + log->l_tail_lsn = tail_lsn; spin_unlock(&log->l_grant_lock); return tail_lsn; @@ -1014,9 +1008,9 @@ xlog_alloc_log(xfs_mount_t *mp, log->l_flags |= XLOG_ACTIVE_RECOVERY; log->l_prev_block = -1; - log->l_tail_lsn = xlog_assign_lsn(1, 0); /* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */ - log->l_last_sync_lsn = log->l_tail_lsn; + log->l_tail_lsn = xlog_assign_lsn(1, 0); + atomic64_set(&log->l_last_sync_lsn, xlog_assign_lsn(1, 0)); log->l_curr_cycle = 1; /* 0 is bad since this is initial value */ xlog_assign_grant_head(&log->l_grant_reserve_head, 1, 0); xlog_assign_grant_head(&log->l_grant_write_head, 1, 0); @@ -1194,6 +1188,7 @@ xlog_grant_push_ail( int need_bytes) { xfs_lsn_t threshold_lsn = 0; + xfs_lsn_t last_sync_lsn; xfs_lsn_t tail_lsn; int free_blocks; int free_bytes; @@ -1228,10 +1223,12 @@ xlog_grant_push_ail( threshold_block); /* * Don't pass in an lsn greater than the lsn of the last - * log record known to be on disk. + * log record known to be on disk. Use a snapshot of the last sync lsn + * so that it doesn't change between the compare and the set. */ - if (XFS_LSN_CMP(threshold_lsn, log->l_last_sync_lsn) > 0) - threshold_lsn = log->l_last_sync_lsn; + last_sync_lsn = atomic64_read(&log->l_last_sync_lsn); + if (XFS_LSN_CMP(threshold_lsn, last_sync_lsn) > 0) + threshold_lsn = last_sync_lsn; /* * Get the transaction layer to kick the dirty buffers out to @@ -2194,7 +2191,7 @@ xlog_state_do_callback( lowest_lsn = xlog_get_lowest_lsn(log); if (lowest_lsn && XFS_LSN_CMP(lowest_lsn, - be64_to_cpu(iclog->ic_header.h_lsn)) < 0) { + be64_to_cpu(iclog->ic_header.h_lsn)) < 0) { iclog = iclog->ic_next; continue; /* Leave this iclog for * another thread */ @@ -2202,23 +2199,21 @@ xlog_state_do_callback( iclog->ic_state = XLOG_STATE_CALLBACK; - spin_unlock(&log->l_icloglock); - /* l_last_sync_lsn field protected by - * l_grant_lock. Don't worry about iclog's lsn. - * No one else can be here except us. + /* + * update the last_sync_lsn before we drop the + * icloglock to ensure we are the only one that + * can update it. */ - spin_lock(&log->l_grant_lock); - ASSERT(XFS_LSN_CMP(log->l_last_sync_lsn, - be64_to_cpu(iclog->ic_header.h_lsn)) <= 0); - log->l_last_sync_lsn = - be64_to_cpu(iclog->ic_header.h_lsn); - spin_unlock(&log->l_grant_lock); + ASSERT(XFS_LSN_CMP(atomic64_read(&log->l_last_sync_lsn), + be64_to_cpu(iclog->ic_header.h_lsn)) <= 0); + atomic64_set(&log->l_last_sync_lsn, + be64_to_cpu(iclog->ic_header.h_lsn)); - } else { - spin_unlock(&log->l_icloglock); + } else ioerrors++; - } + + spin_unlock(&log->l_icloglock); /* * Keep processing entries in the callback list until |