summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2026-02-02 09:06:33 +0300
committerEric Biggers <ebiggers@kernel.org>2026-02-03 04:15:26 +0300
commitf1a6cf44b344b1ac2cefb387779e3002be237a7e (patch)
treed5a8639f50d557be2c241d6e7f227213b44cf301 /include/linux
parent314b652b7e7ad335fa20b693c8878a4850dae098 (diff)
downloadlinux-f1a6cf44b344b1ac2cefb387779e3002be237a7e.tar.xz
fsverity: kick off hash readahead at data I/O submission time
Currently all reads of the fsverity hashes are kicked off from the data I/O completion handler, leading to needlessly dependent I/O. This is worked around a bit by performing readahead on the level 0 nodes, but still fairly ineffective. Switch to a model where the ->read_folio and ->readahead methods instead kick off explicit readahead of the fsverity hashed so they are usually available at I/O completion time. For 64k sequential reads on my test VM this improves read performance from 2.4GB/s - 2.6GB/s to 3.5GB/s - 3.9GB/s. The improvements for random reads are likely to be even bigger. Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: David Sterba <dsterba@suse.com> # btrfs Link: https://lore.kernel.org/r/20260202060754.270269-5-hch@lst.de Signed-off-by: Eric Biggers <ebiggers@kernel.org>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/fsverity.h30
1 files changed, 22 insertions, 8 deletions
diff --git a/include/linux/fsverity.h b/include/linux/fsverity.h
index 8ddaa87fece3..722a42754a86 100644
--- a/include/linux/fsverity.h
+++ b/include/linux/fsverity.h
@@ -97,10 +97,6 @@ struct fsverity_operations {
*
* @inode: the inode
* @index: 0-based index of the page within the Merkle tree
- * @num_ra_pages: The number of Merkle tree pages that should be
- * prefetched starting at @index if the page at @index
- * isn't already cached. Implementations may ignore this
- * argument; it's only a performance optimization.
*
* This can be called at any time on an open verity file. It may be
* called by multiple processes concurrently, even with the same page.
@@ -110,8 +106,23 @@ struct fsverity_operations {
* Return: the page on success, ERR_PTR() on failure
*/
struct page *(*read_merkle_tree_page)(struct inode *inode,
- pgoff_t index,
- unsigned long num_ra_pages);
+ pgoff_t index);
+
+ /**
+ * Perform readahead of a Merkle tree for the given inode.
+ *
+ * @inode: the inode
+ * @index: 0-based index of the first page within the Merkle tree
+ * @nr_pages: number of pages to be read ahead.
+ *
+ * This can be called at any time on an open verity file. It may be
+ * called by multiple processes concurrently, even with the same range.
+ *
+ * Optional method so that ->read_merkle_tree_page preferably finds
+ * cached data instead of issuing dependent I/O.
+ */
+ void (*readahead_merkle_tree)(struct inode *inode, pgoff_t index,
+ unsigned long nr_pages);
/**
* Write a Merkle tree block to the given file.
@@ -308,8 +319,11 @@ static inline int fsverity_file_open(struct inode *inode, struct file *filp)
}
void fsverity_cleanup_inode(struct inode *inode);
+void fsverity_readahead(struct inode *inode, pgoff_t index,
+ unsigned long nr_pages);
-struct page *generic_read_merkle_tree_page(struct inode *inode, pgoff_t index,
- unsigned long num_ra_pages);
+struct page *generic_read_merkle_tree_page(struct inode *inode, pgoff_t index);
+void generic_readahead_merkle_tree(struct inode *inode, pgoff_t index,
+ unsigned long nr_pages);
#endif /* _LINUX_FSVERITY_H */