summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorChristian Brauner <brauner@kernel.org>2025-09-29 12:38:29 +0300
committerChristian Brauner <brauner@kernel.org>2025-11-05 14:57:24 +0300
commit4966b46652680b2d86b8a59319d4f529edf20187 (patch)
treeb6b5431177af815386a02debc40f3054ff715979 /include
parent7aa6bc3e8766990824f66ca76c19596ce10daf3e (diff)
parent93570c652b80cd632dc49b590a35d3f0e268893b (diff)
downloadlinux-4966b46652680b2d86b8a59319d4f529edf20187.tar.xz
Merge patch series "fuse: use iomap for buffered reads + readahead"
Joanne Koong <joannelkoong@gmail.com> says: This series adds fuse iomap support for buffered reads and readahead. This is needed so that granular uptodate tracking can be used in fuse when large folios are enabled so that only the non-uptodate portions of the folio need to be read in instead of having to read in the entire folio. It also is needed in order to turn on large folios for servers that use the writeback cache since otherwise there is a race condition that may lead to data corruption if there is a partial write, then a read and the read happens before the write has undergone writeback, since otherwise the folio will not be marked uptodate from the partial write so the read will read in the entire folio from disk, which will overwrite the partial write. This is on top of two locally-patched iomap patches [1] [2] patched on top of commit f1c864be6e88 ("Merge branch 'vfs-6.18.async' into vfs.all") in Christian's vfs.all tree. This series was run through fstests on fuse passthrough_hp with an out-of kernel patch enabling fuse large folios. This patchset does not enable large folios on fuse yet. That will be part of a different patchset. * patches from https://lore.kernel.org/20250926002609.1302233-1-joannelkoong@gmail.com: fuse: remove fc->blkbits workaround for partial writes fuse: use iomap for readahead fuse: use iomap for read_folio iomap: make iomap_read_folio() a void return iomap: move buffered io bio logic into new file iomap: add caller-provided callbacks for read and readahead iomap: set accurate iter->pos when reading folio ranges iomap: track pending read bytes more optimally iomap: rename iomap_readpage_ctx struct to iomap_read_folio_ctx iomap: rename iomap_readpage_iter() to iomap_read_folio_iter() iomap: iterate over folio mapping in iomap_readpage_iter() iomap: store read/readahead bio generically iomap: move read/readahead bio submission logic into helper function iomap: move bio read logic into helper function Signed-off-by: Christian Brauner <brauner@kernel.org>
Diffstat (limited to 'include')
-rw-r--r--include/linux/iomap.h63
1 files changed, 61 insertions, 2 deletions
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index 4469b2318b08..6d864b446b6e 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -16,6 +16,7 @@ struct inode;
struct iomap_iter;
struct iomap_dio;
struct iomap_writepage_ctx;
+struct iomap_read_folio_ctx;
struct iov_iter;
struct kiocb;
struct page;
@@ -337,8 +338,10 @@ static inline bool iomap_want_unshare_iter(const struct iomap_iter *iter)
ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from,
const struct iomap_ops *ops,
const struct iomap_write_ops *write_ops, void *private);
-int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops);
-void iomap_readahead(struct readahead_control *, const struct iomap_ops *ops);
+void iomap_read_folio(const struct iomap_ops *ops,
+ struct iomap_read_folio_ctx *ctx);
+void iomap_readahead(const struct iomap_ops *ops,
+ struct iomap_read_folio_ctx *ctx);
bool iomap_is_partially_uptodate(struct folio *, size_t from, size_t count);
struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos, size_t len);
bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags);
@@ -465,6 +468,8 @@ ssize_t iomap_add_to_ioend(struct iomap_writepage_ctx *wpc, struct folio *folio,
loff_t pos, loff_t end_pos, unsigned int dirty_len);
int iomap_ioend_writeback_submit(struct iomap_writepage_ctx *wpc, int error);
+void iomap_finish_folio_read(struct folio *folio, size_t off, size_t len,
+ int error);
void iomap_start_folio_write(struct inode *inode, struct folio *folio,
size_t len);
void iomap_finish_folio_write(struct inode *inode, struct folio *folio,
@@ -473,6 +478,34 @@ void iomap_finish_folio_write(struct inode *inode, struct folio *folio,
int iomap_writeback_folio(struct iomap_writepage_ctx *wpc, struct folio *folio);
int iomap_writepages(struct iomap_writepage_ctx *wpc);
+struct iomap_read_folio_ctx {
+ const struct iomap_read_ops *ops;
+ struct folio *cur_folio;
+ struct readahead_control *rac;
+ void *read_ctx;
+};
+
+struct iomap_read_ops {
+ /*
+ * Read in a folio range.
+ *
+ * The caller is responsible for calling iomap_finish_folio_read() after
+ * reading in the folio range. This should be done even if an error is
+ * encountered during the read.
+ *
+ * Returns 0 on success or a negative error on failure.
+ */
+ int (*read_folio_range)(const struct iomap_iter *iter,
+ struct iomap_read_folio_ctx *ctx, size_t len);
+
+ /*
+ * Submit any pending read requests.
+ *
+ * This is optional.
+ */
+ void (*submit_read)(struct iomap_read_folio_ctx *ctx);
+};
+
/*
* Flags for direct I/O ->end_io:
*/
@@ -538,4 +571,30 @@ int iomap_swapfile_activate(struct swap_info_struct *sis,
extern struct bio_set iomap_ioend_bioset;
+#ifdef CONFIG_BLOCK
+extern const struct iomap_read_ops iomap_bio_read_ops;
+
+static inline void iomap_bio_read_folio(struct folio *folio,
+ const struct iomap_ops *ops)
+{
+ struct iomap_read_folio_ctx ctx = {
+ .ops = &iomap_bio_read_ops,
+ .cur_folio = folio,
+ };
+
+ iomap_read_folio(ops, &ctx);
+}
+
+static inline void iomap_bio_readahead(struct readahead_control *rac,
+ const struct iomap_ops *ops)
+{
+ struct iomap_read_folio_ctx ctx = {
+ .ops = &iomap_bio_read_ops,
+ .rac = rac,
+ };
+
+ iomap_readahead(ops, &ctx);
+}
+#endif /* CONFIG_BLOCK */
+
#endif /* LINUX_IOMAP_H */