summaryrefslogtreecommitdiff
path: root/fs/xfs/xfs_file.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2024-06-23 08:44:29 +0300
committerChandan Babu R <chandanbabu@kernel.org>2024-07-01 07:02:29 +0300
commit6a39ec1d394458e59f411edecf7b08ce34bdc7c8 (patch)
tree63c8548b1efa69b72a25fc30ab04c074cfbd1f0d /fs/xfs/xfs_file.c
parent9092b1de35a45ec7291156382db7a7ee13bdbb27 (diff)
downloadlinux-6a39ec1d394458e59f411edecf7b08ce34bdc7c8.tar.xz
xfs: refactor __xfs_filemap_fault
Split the write fault and DAX fault handling into separate helpers so that the main fault handler is easier to follow. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Signed-off-by: Chandan Babu R <chandanbabu@kernel.org>
Diffstat (limited to 'fs/xfs/xfs_file.c')
-rw-r--r--fs/xfs/xfs_file.c71
1 files changed, 45 insertions, 26 deletions
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 8aab2f66fe01..32a2cd6ec82e 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -1252,7 +1252,7 @@ xfs_file_llseek(
}
static inline vm_fault_t
-xfs_dax_fault(
+xfs_dax_fault_locked(
struct vm_fault *vmf,
unsigned int order,
bool write_fault)
@@ -1273,6 +1273,45 @@ xfs_dax_fault(
return ret;
}
+static vm_fault_t
+xfs_dax_read_fault(
+ struct vm_fault *vmf,
+ unsigned int order)
+{
+ struct xfs_inode *ip = XFS_I(file_inode(vmf->vma->vm_file));
+ unsigned int lock_mode;
+ vm_fault_t ret;
+
+ lock_mode = xfs_ilock_for_write_fault(ip);
+ ret = xfs_dax_fault_locked(vmf, order, false);
+ xfs_iunlock(ip, lock_mode);
+
+ return ret;
+}
+
+static vm_fault_t
+xfs_write_fault(
+ struct vm_fault *vmf,
+ unsigned int order)
+{
+ struct inode *inode = file_inode(vmf->vma->vm_file);
+ unsigned int lock_mode;
+ vm_fault_t ret;
+
+ sb_start_pagefault(inode->i_sb);
+ file_update_time(vmf->vma->vm_file);
+
+ lock_mode = xfs_ilock_for_write_fault(XFS_I(inode));
+ if (IS_DAX(inode))
+ ret = xfs_dax_fault_locked(vmf, order, true);
+ else
+ ret = iomap_page_mkwrite(vmf, &xfs_page_mkwrite_iomap_ops);
+ xfs_iunlock(XFS_I(inode), lock_mode);
+
+ sb_end_pagefault(inode->i_sb);
+ return ret;
+}
+
/*
* Locking for serialisation of IO during page faults. This results in a lock
* ordering of:
@@ -1290,34 +1329,14 @@ __xfs_filemap_fault(
bool write_fault)
{
struct inode *inode = file_inode(vmf->vma->vm_file);
- struct xfs_inode *ip = XFS_I(inode);
- vm_fault_t ret;
- unsigned int lock_mode = 0;
-
- trace_xfs_filemap_fault(ip, order, write_fault);
- if (write_fault) {
- sb_start_pagefault(inode->i_sb);
- file_update_time(vmf->vma->vm_file);
- }
-
- if (IS_DAX(inode) || write_fault)
- lock_mode = xfs_ilock_for_write_fault(XFS_I(inode));
-
- if (IS_DAX(inode)) {
- ret = xfs_dax_fault(vmf, order, write_fault);
- } else if (write_fault) {
- ret = iomap_page_mkwrite(vmf, &xfs_page_mkwrite_iomap_ops);
- } else {
- ret = filemap_fault(vmf);
- }
-
- if (lock_mode)
- xfs_iunlock(XFS_I(inode), lock_mode);
+ trace_xfs_filemap_fault(XFS_I(inode), order, write_fault);
if (write_fault)
- sb_end_pagefault(inode->i_sb);
- return ret;
+ return xfs_write_fault(vmf, order);
+ if (IS_DAX(inode))
+ return xfs_dax_read_fault(vmf, order);
+ return filemap_fault(vmf);
}
static inline bool