diff options
author | Matthew Wilcox (Oracle) <willy@infradead.org> | 2023-12-11 19:22:08 +0300 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2023-12-29 22:58:25 +0300 |
commit | f00f48436c789af659047d3c5d6f6d17e640634e (patch) | |
tree | 5a2ab098071222e5af6ea3b6be6f1626385aec72 /mm/swapfile.c | |
parent | 8d294a8c6393afbde59cf14a0e8413df4b206698 (diff) | |
download | linux-f00f48436c789af659047d3c5d6f6d17e640634e.tar.xz |
mm: convert unuse_pte() to use a folio throughout
Saves about eight calls to compound_head().
Link: https://lkml.kernel.org/r/20231211162214.2146080-4-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/swapfile.c')
-rw-r--r-- | mm/swapfile.c | 47 |
1 files changed, 25 insertions, 22 deletions
diff --git a/mm/swapfile.c b/mm/swapfile.c index 85f9c355cb99..1501bc956456 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1741,21 +1741,25 @@ static inline int pte_same_as_swp(pte_t pte, pte_t swp_pte) static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, swp_entry_t entry, struct folio *folio) { - struct page *page = folio_file_page(folio, swp_offset(entry)); - struct page *swapcache; + struct page *page; + struct folio *swapcache; spinlock_t *ptl; pte_t *pte, new_pte, old_pte; - bool hwpoisoned = PageHWPoison(page); + bool hwpoisoned = false; int ret = 1; - swapcache = page; + swapcache = folio; folio = ksm_might_need_to_copy(folio, vma, addr); if (unlikely(!folio)) return -ENOMEM; - else if (unlikely(folio == ERR_PTR(-EHWPOISON))) + else if (unlikely(folio == ERR_PTR(-EHWPOISON))) { + hwpoisoned = true; + folio = swapcache; + } + + page = folio_file_page(folio, swp_offset(entry)); + if (PageHWPoison(page)) hwpoisoned = true; - else - page = folio_file_page(folio, swp_offset(entry)); pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); if (unlikely(!pte || !pte_same_as_swp(ptep_get(pte), @@ -1766,13 +1770,12 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, old_pte = ptep_get(pte); - if (unlikely(hwpoisoned || !PageUptodate(page))) { + if (unlikely(hwpoisoned || !folio_test_uptodate(folio))) { swp_entry_t swp_entry; dec_mm_counter(vma->vm_mm, MM_SWAPENTS); if (hwpoisoned) { - swp_entry = make_hwpoison_entry(swapcache); - page = swapcache; + swp_entry = make_hwpoison_entry(page); } else { swp_entry = make_poisoned_swp_entry(); } @@ -1786,27 +1789,27 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, * when reading from swap. This metadata may be indexed by swap entry * so this must be called before swap_free(). */ - arch_swap_restore(entry, page_folio(page)); + arch_swap_restore(entry, folio); dec_mm_counter(vma->vm_mm, MM_SWAPENTS); inc_mm_counter(vma->vm_mm, MM_ANONPAGES); - get_page(page); - if (page == swapcache) { + folio_get(folio); + if (folio == swapcache) { rmap_t rmap_flags = RMAP_NONE; /* - * See do_swap_page(): PageWriteback() would be problematic. - * However, we do a wait_on_page_writeback() just before this - * call and have the page locked. + * See do_swap_page(): writeback would be problematic. + * However, we do a folio_wait_writeback() just before this + * call and have the folio locked. */ - VM_BUG_ON_PAGE(PageWriteback(page), page); + VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio); if (pte_swp_exclusive(old_pte)) rmap_flags |= RMAP_EXCLUSIVE; page_add_anon_rmap(page, vma, addr, rmap_flags); } else { /* ksm created a completely new copy */ - page_add_new_anon_rmap(page, vma, addr); - lru_cache_add_inactive_or_unevictable(page, vma); + folio_add_new_anon_rmap(folio, vma, addr); + folio_add_lru_vma(folio, vma); } new_pte = pte_mkold(mk_pte(page, vma->vm_page_prot)); if (pte_swp_soft_dirty(old_pte)) @@ -1819,9 +1822,9 @@ setpte: out: if (pte) pte_unmap_unlock(pte, ptl); - if (page != swapcache) { - unlock_page(page); - put_page(page); + if (folio != swapcache) { + folio_unlock(folio); + folio_put(folio); } return ret; } |