diff options
author | Christoph Hellwig <hch@lst.de> | 2024-06-23 08:44:31 +0300 |
---|---|---|
committer | Chandan Babu R <chandanbabu@kernel.org> | 2024-07-01 07:02:29 +0300 |
commit | 4818fd60db5feeeecb84d36d0162c3fb3eccb522 (patch) | |
tree | 65480926671be7a501b2495e4aa748cccb8d90de /fs/xfs/xfs_file.c | |
parent | 4e82fa11fbbcc5426366dc2ddc839fd56b9d53de (diff) | |
download | linux-4818fd60db5feeeecb84d36d0162c3fb3eccb522.tar.xz |
xfs: fold xfs_ilock_for_write_fault into xfs_write_fault
Now that the page fault handler has been refactored, the only caller
of xfs_ilock_for_write_fault is simple enough and calls it
unconditionally. Fold the logic and expand the comments explaining it.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Signed-off-by: Chandan Babu R <chandanbabu@kernel.org>
Diffstat (limited to 'fs/xfs/xfs_file.c')
-rw-r--r-- | fs/xfs/xfs_file.c | 33 |
1 files changed, 15 insertions, 18 deletions
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 904be41f3e5e..4cdc54dc9686 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -227,21 +227,6 @@ xfs_ilock_iocb_for_write( return 0; } -static unsigned int -xfs_ilock_for_write_fault( - struct xfs_inode *ip) -{ - /* get a shared lock if no remapping in progress */ - xfs_ilock(ip, XFS_MMAPLOCK_SHARED); - if (!xfs_iflags_test(ip, XFS_IREMAPPING)) - return XFS_MMAPLOCK_SHARED; - - /* wait for remapping to complete */ - xfs_iunlock(ip, XFS_MMAPLOCK_SHARED); - xfs_ilock(ip, XFS_MMAPLOCK_EXCL); - return XFS_MMAPLOCK_EXCL; -} - STATIC ssize_t xfs_file_dio_read( struct kiocb *iocb, @@ -1294,18 +1279,30 @@ xfs_write_fault( unsigned int order) { struct inode *inode = file_inode(vmf->vma->vm_file); - unsigned int lock_mode; + struct xfs_inode *ip = XFS_I(inode); + unsigned int lock_mode = XFS_MMAPLOCK_SHARED; vm_fault_t ret; sb_start_pagefault(inode->i_sb); file_update_time(vmf->vma->vm_file); - lock_mode = xfs_ilock_for_write_fault(XFS_I(inode)); + /* + * Normally we only need the shared mmaplock, but if a reflink remap is + * in progress we take the exclusive lock to wait for the remap to + * finish before taking a write fault. + */ + xfs_ilock(ip, XFS_MMAPLOCK_SHARED); + if (xfs_iflags_test(ip, XFS_IREMAPPING)) { + xfs_iunlock(ip, XFS_MMAPLOCK_SHARED); + xfs_ilock(ip, XFS_MMAPLOCK_EXCL); + lock_mode = XFS_MMAPLOCK_EXCL; + } + if (IS_DAX(inode)) ret = xfs_dax_fault_locked(vmf, order, true); else ret = iomap_page_mkwrite(vmf, &xfs_page_mkwrite_iomap_ops); - xfs_iunlock(XFS_I(inode), lock_mode); + xfs_iunlock(ip, lock_mode); sb_end_pagefault(inode->i_sb); return ret; |