summaryrefslogtreecommitdiff
path: root/fs/btrfs/extent_io.c
diff options
context:
space:
mode:
authorJosef Bacik <josef@toxicpanda.com>2024-07-23 22:55:33 +0300
committerDavid Sterba <dsterba@suse.com>2024-09-10 17:51:12 +0300
commit03d6612648a48d0f5a60a013d4b583b4886807d2 (patch)
tree78aa9a84fe0a163fc75158e0b9f2ff5f26434b37 /fs/btrfs/extent_io.c
parent45714ff75c3618a191a952ce96ec15724bd4fdb3 (diff)
downloadlinux-03d6612648a48d0f5a60a013d4b583b4886807d2.tar.xz
btrfs: convert btrfs_readahead() to only use folio
We're the only user of readahead_page_batch(). Convert btrfs_readahead() to use the folio based helpers to do readahead. Signed-off-by: Josef Bacik <josef@toxicpanda.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/extent_io.c')
-rw-r--r--fs/btrfs/extent_io.c36
1 files changed, 8 insertions, 28 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index c73cd4f89015..2798a3ca1db4 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1176,26 +1176,6 @@ int btrfs_read_folio(struct file *file, struct folio *folio)
return ret;
}
-static inline void contiguous_readpages(struct page *pages[], int nr_pages,
- u64 start, u64 end,
- struct extent_map **em_cached,
- struct btrfs_bio_ctrl *bio_ctrl,
- u64 *prev_em_start)
-{
- struct btrfs_inode *inode = page_to_inode(pages[0]);
- int index;
-
- ASSERT(em_cached);
-
- btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
-
- for (index = 0; index < nr_pages; index++) {
- btrfs_do_readpage(pages[index], em_cached, bio_ctrl,
- prev_em_start);
- put_page(pages[index]);
- }
-}
-
/*
* helper for __extent_writepage, doing all of the delayed allocation setup.
*
@@ -2379,18 +2359,18 @@ int btrfs_writepages(struct address_space *mapping, struct writeback_control *wb
void btrfs_readahead(struct readahead_control *rac)
{
struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ | REQ_RAHEAD };
- struct page *pagepool[16];
+ struct btrfs_inode *inode = BTRFS_I(rac->mapping->host);
+ struct folio *folio;
+ u64 start = readahead_pos(rac);
+ u64 end = start + readahead_length(rac) - 1;
struct extent_map *em_cached = NULL;
u64 prev_em_start = (u64)-1;
- int nr;
- while ((nr = readahead_page_batch(rac, pagepool))) {
- u64 contig_start = readahead_pos(rac);
- u64 contig_end = contig_start + readahead_batch_length(rac) - 1;
+ btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
- contiguous_readpages(pagepool, nr, contig_start, contig_end,
- &em_cached, &bio_ctrl, &prev_em_start);
- }
+ while ((folio = readahead_folio(rac)) != NULL)
+ btrfs_do_readpage(&folio->page, &em_cached, &bio_ctrl,
+ &prev_em_start);
if (em_cached)
free_extent_map(em_cached);