diff options
author | Matthew Wilcox (Oracle) <willy@infradead.org> | 2023-01-16 22:39:39 +0300 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2023-02-03 09:33:21 +0300 |
commit | 8808ecab3afc18958a9216911cd7967017e7057c (patch) | |
tree | fbfd9f9c385f7244447b2de4d7f9b52ab8fd33a8 /mm/filemap.c | |
parent | 5b4bd90f9ac76136c7148684b12276d4ae2d64a2 (diff) | |
download | linux-8808ecab3afc18958a9216911cd7967017e7057c.tar.xz |
filemap: convert filemap_map_pmd() to take a folio
Patch series "Some more filemap folio conversions".
Three more places which could easily be converted to folios. The third
one fixes a minor bug in readahead_expand(), but it's only a performance
bug and there are few users of readahead_expand(), so I don't think it's
worth backporting.
This patch (of 3):
Save a few calls to compound_head(). We specify exactly which page from
the folio to use by passing in start_pgoff, which means this will work for
a folio which is larger than PMD size. The rest of the VM isn't prepared
for that yet, but now this function is.
Link: https://lkml.kernel.org/r/20230116193941.2148487-1-willy@infradead.org
Link: https://lkml.kernel.org/r/20230116193941.2148487-2-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: William Kucharski <william.kucharski@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/filemap.c')
-rw-r--r-- | mm/filemap.c | 18 |
1 files changed, 10 insertions, 8 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index 31bf18ec6d01..b6b7efc9abc0 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -3259,22 +3259,24 @@ out_retry: } EXPORT_SYMBOL(filemap_fault); -static bool filemap_map_pmd(struct vm_fault *vmf, struct page *page) +static bool filemap_map_pmd(struct vm_fault *vmf, struct folio *folio, + pgoff_t start) { struct mm_struct *mm = vmf->vma->vm_mm; /* Huge page is mapped? No need to proceed. */ if (pmd_trans_huge(*vmf->pmd)) { - unlock_page(page); - put_page(page); + folio_unlock(folio); + folio_put(folio); return true; } - if (pmd_none(*vmf->pmd) && PageTransHuge(page)) { + if (pmd_none(*vmf->pmd) && folio_test_pmd_mappable(folio)) { + struct page *page = folio_file_page(folio, start); vm_fault_t ret = do_set_pmd(vmf, page); if (!ret) { /* The page is mapped successfully, reference consumed. */ - unlock_page(page); + folio_unlock(folio); return true; } } @@ -3284,8 +3286,8 @@ static bool filemap_map_pmd(struct vm_fault *vmf, struct page *page) /* See comment in handle_pte_fault() */ if (pmd_devmap_trans_unstable(vmf->pmd)) { - unlock_page(page); - put_page(page); + folio_unlock(folio); + folio_put(folio); return true; } @@ -3368,7 +3370,7 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf, if (!folio) goto out; - if (filemap_map_pmd(vmf, &folio->page)) { + if (filemap_map_pmd(vmf, folio, start_pgoff)) { ret = VM_FAULT_NOPAGE; goto out; } |