summaryrefslogtreecommitdiff
path: root/fs/btrfs/file.h
diff options
context:
space:
mode:
authorQu Wenruo <wqu@suse.com>2024-10-10 07:46:12 +0300
committerDavid Sterba <dsterba@suse.com>2024-11-11 16:34:19 +0300
commitc87c299776e4d75bcc5559203ae2c37dc0396d80 (patch)
treeeea982d95ceb48047011838e598e3c3a7818fc3a /fs/btrfs/file.h
parentb1c5f6eda2d024c187d4d47f0310bc5a1cc8b1d3 (diff)
downloadlinux-c87c299776e4d75bcc5559203ae2c37dc0396d80.tar.xz
btrfs: make buffered write to copy one page a time
Currently the btrfs_buffered_write() is preparing multiple page a time, allowing a better performance. But the current trend is to support larger folio as an optimization, instead of implementing own multi-page optimization. This is inspired by generic_perform_write(), which is copying one folio a time. Such change will prepare us to migrate to implement the write_begin() and write_end() callbacks, and make every involved function a little easier. Signed-off-by: Qu Wenruo <wqu@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/file.h')
-rw-r--r--fs/btrfs/file.h5
1 files changed, 2 insertions, 3 deletions
diff --git a/fs/btrfs/file.h b/fs/btrfs/file.h
index c2ce0ae94a9c..69a7b78d99bb 100644
--- a/fs/btrfs/file.h
+++ b/fs/btrfs/file.h
@@ -34,9 +34,8 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
ssize_t btrfs_do_write_iter(struct kiocb *iocb, struct iov_iter *from,
const struct btrfs_ioctl_encoded_io_args *encoded);
int btrfs_release_file(struct inode *inode, struct file *file);
-int btrfs_dirty_pages(struct btrfs_inode *inode, struct page **pages,
- loff_t pos, size_t write_bytes,
- struct extent_state **cached, bool noreserve);
+int btrfs_dirty_page(struct btrfs_inode *inode, struct page *page, loff_t pos,
+ size_t write_bytes, struct extent_state **cached, bool noreserve);
int btrfs_fdatawrite_range(struct btrfs_inode *inode, loff_t start, loff_t end);
int btrfs_check_nocow_lock(struct btrfs_inode *inode, loff_t pos,
size_t *write_bytes, bool nowait);