diff options
author | Al Viro <viro@zeniv.linux.org.uk> | 2015-06-17 19:02:56 +0300 |
---|---|---|
committer | Al Viro <viro@zeniv.linux.org.uk> | 2015-07-07 00:39:25 +0300 |
commit | 724bb09fdc06d4ff03757b25d6dba9ef1b133e8f (patch) | |
tree | 5ef4441867f681e132d820c78948b34c0f0ca1df /fs/ufs/truncate.c | |
parent | 4af7b2c080715b9452fdaefb7ada72b4dc79593e (diff) | |
download | linux-724bb09fdc06d4ff03757b25d6dba9ef1b133e8f.tar.xz |
ufs: don't use lock_ufs() for block pointers tree protection
* stores to block pointers are under per-inode seqlock (meta_lock) and
mutex (truncate_mutex)
* fetches of block pointers are either under truncate_mutex, or wrapped
into seqretry loop on meta_lock
* all changes of ->i_size are under truncate_mutex and i_mutex
* all changes of ->i_lastfrag are under truncate_mutex
It's similar to what ext2 is doing; the main difference is that unlike
ext2 we can't rely upon the atomicity of stores into block pointers -
on UFS2 they are 64bit. So we can't cut the corner when switching
a pointer from NULL to non-NULL as we could in ext2_splice_branch()
and need to use meta_lock on all modifications.
We use seqlock where ext2 uses rwlock; ext2 could probably also benefit
from such change...
Another non-trivial difference is that with UFS we *cannot* have reader
grab truncate_mutex in case of race - it has to keep retrying. That
might be possible to change, but not until we lift tail unpacking
several levels up in call chain.
After that commit we do *NOT* hold fs-wide serialization on accesses
to block pointers anymore. Moreover, lock_ufs() can become a normal
mutex now - it's only used on statfs, remount and sync_fs and none
of those uses are recursive. As the matter of fact, *now* it can be
collapsed with ->s_lock, and be eventually replaced with saner
per-cylinder-group spinlocks, but that's a separate story.
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs/ufs/truncate.c')
-rw-r--r-- | fs/ufs/truncate.c | 22 |
1 files changed, 20 insertions, 2 deletions
diff --git a/fs/ufs/truncate.c b/fs/ufs/truncate.c index 9908a6045d7a..ad34b7f4b499 100644 --- a/fs/ufs/truncate.c +++ b/fs/ufs/truncate.c @@ -128,7 +128,9 @@ next1: tmp = ufs_data_ptr_to_cpu(sb, p); if (!tmp) continue; + write_seqlock(&ufsi->meta_lock); ufs_data_ptr_clear(uspi, p); + write_sequnlock(&ufsi->meta_lock); if (free_count == 0) { frag_to_free = tmp; @@ -157,7 +159,9 @@ next1: if (!tmp ) ufs_panic(sb, "ufs_truncate_direct", "internal error"); frag4 = ufs_fragnum (frag4); + write_seqlock(&ufsi->meta_lock); ufs_data_ptr_clear(uspi, p); + write_sequnlock(&ufsi->meta_lock); ufs_free_fragments (inode, tmp, frag4); mark_inode_dirty(inode); @@ -199,7 +203,9 @@ static int ufs_trunc_indirect(struct inode *inode, u64 offset, void *p) return 1; } if (!ind_ubh) { + write_seqlock(&UFS_I(inode)->meta_lock); ufs_data_ptr_clear(uspi, p); + write_sequnlock(&UFS_I(inode)->meta_lock); return 0; } @@ -210,7 +216,9 @@ static int ufs_trunc_indirect(struct inode *inode, u64 offset, void *p) if (!tmp) continue; + write_seqlock(&UFS_I(inode)->meta_lock); ufs_data_ptr_clear(uspi, ind); + write_sequnlock(&UFS_I(inode)->meta_lock); ubh_mark_buffer_dirty(ind_ubh); if (free_count == 0) { frag_to_free = tmp; @@ -235,7 +243,9 @@ static int ufs_trunc_indirect(struct inode *inode, u64 offset, void *p) break; if (i >= uspi->s_apb) { tmp = ufs_data_ptr_to_cpu(sb, p); + write_seqlock(&UFS_I(inode)->meta_lock); ufs_data_ptr_clear(uspi, p); + write_sequnlock(&UFS_I(inode)->meta_lock); ubh_bforget(ind_ubh); ufs_free_blocks (inode, tmp, uspi->s_fpb); @@ -278,7 +288,9 @@ static int ufs_trunc_dindirect(struct inode *inode, u64 offset, void *p) return 1; } if (!dind_bh) { + write_seqlock(&UFS_I(inode)->meta_lock); ufs_data_ptr_clear(uspi, p); + write_sequnlock(&UFS_I(inode)->meta_lock); return 0; } @@ -297,7 +309,9 @@ static int ufs_trunc_dindirect(struct inode *inode, u64 offset, void *p) break; if (i >= uspi->s_apb) { tmp = ufs_data_ptr_to_cpu(sb, p); + write_seqlock(&UFS_I(inode)->meta_lock); ufs_data_ptr_clear(uspi, p); + write_sequnlock(&UFS_I(inode)->meta_lock); ubh_bforget(dind_bh); ufs_free_blocks(inode, tmp, uspi->s_fpb); @@ -339,7 +353,9 @@ static int ufs_trunc_tindirect(struct inode *inode) return 1; } if (!tind_bh) { + write_seqlock(&ufsi->meta_lock); ufs_data_ptr_clear(uspi, p); + write_sequnlock(&ufsi->meta_lock); return 0; } @@ -355,7 +371,9 @@ static int ufs_trunc_tindirect(struct inode *inode) break; if (i >= uspi->s_apb) { tmp = ufs_data_ptr_to_cpu(sb, p); + write_seqlock(&ufsi->meta_lock); ufs_data_ptr_clear(uspi, p); + write_sequnlock(&ufsi->meta_lock); ubh_bforget(tind_bh); ufs_free_blocks(inode, tmp, uspi->s_fpb); @@ -447,7 +465,7 @@ static void __ufs_truncate_blocks(struct inode *inode) struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; int retry; - lock_ufs(sb); + mutex_lock(&ufsi->truncate_mutex); while (1) { retry = ufs_trunc_direct(inode); retry |= ufs_trunc_indirect(inode, UFS_IND_BLOCK, @@ -465,7 +483,7 @@ static void __ufs_truncate_blocks(struct inode *inode) } ufsi->i_lastfrag = DIRECT_FRAGMENT; - unlock_ufs(sb); + mutex_unlock(&ufsi->truncate_mutex); } int ufs_truncate(struct inode *inode, loff_t size) |