diff options
author | Matthew Wilcox (Oracle) <willy@infradead.org> | 2021-05-27 19:30:54 +0300 |
---|---|---|
committer | Matthew Wilcox (Oracle) <willy@infradead.org> | 2022-01-04 21:15:34 +0300 |
commit | 7836d9990079ed611199819ccf487061b748193a (patch) | |
tree | 0ba00c8059d09d3b3311bd1373cde228241e827d | |
parent | 2fa4eeb800c0f8279f396abde010360f48b4b1d4 (diff) | |
download | linux-7836d9990079ed611199819ccf487061b748193a.tar.xz |
readahead: Convert page_cache_async_ra() to take a folio
Using the folio here avoids checking whether it's a tail page.
This patch mostly just enables some of the following patches.
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: William Kucharski <william.kucharski@oracle.com>
-rw-r--r-- | include/linux/pagemap.h | 4 | ||||
-rw-r--r-- | mm/readahead.c | 6 |
2 files changed, 5 insertions, 5 deletions
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 8c2cad7f0c36..30302be6977f 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -993,7 +993,7 @@ struct readahead_control { void page_cache_ra_unbounded(struct readahead_control *, unsigned long nr_to_read, unsigned long lookahead_count); void page_cache_sync_ra(struct readahead_control *, unsigned long req_count); -void page_cache_async_ra(struct readahead_control *, struct page *, +void page_cache_async_ra(struct readahead_control *, struct folio *, unsigned long req_count); void readahead_expand(struct readahead_control *ractl, loff_t new_start, size_t new_len); @@ -1040,7 +1040,7 @@ void page_cache_async_readahead(struct address_space *mapping, struct page *page, pgoff_t index, unsigned long req_count) { DEFINE_READAHEAD(ractl, file, ra, mapping, index); - page_cache_async_ra(&ractl, page, req_count); + page_cache_async_ra(&ractl, page_folio(page), req_count); } static inline struct folio *__readahead_folio(struct readahead_control *ractl) diff --git a/mm/readahead.c b/mm/readahead.c index 6ae5693de28c..e48e78641772 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -581,7 +581,7 @@ void page_cache_sync_ra(struct readahead_control *ractl, EXPORT_SYMBOL_GPL(page_cache_sync_ra); void page_cache_async_ra(struct readahead_control *ractl, - struct page *page, unsigned long req_count) + struct folio *folio, unsigned long req_count) { /* no read-ahead */ if (!ractl->ra->ra_pages) @@ -590,10 +590,10 @@ void page_cache_async_ra(struct readahead_control *ractl, /* * Same bit is used for PG_readahead and PG_reclaim. */ - if (PageWriteback(page)) + if (folio_test_writeback(folio)) return; - ClearPageReadahead(page); + folio_clear_readahead(folio); /* * Defer asynchronous read-ahead on IO congestion. |