diff options
author | Vishal Moola (Oracle) <vishal.moola@gmail.com> | 2022-12-21 21:08:46 +0300 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2023-01-19 04:12:47 +0300 |
commit | 07e8c82b5eff8ef34b74210eacb8d9c4a2886b82 (patch) | |
tree | 50a699b37be1d962217d0cd8c4977c857a6a5fbc /mm/madvise.c | |
parent | 318e9342fbbb6888d903d86e83865609901a1c65 (diff) | |
download | linux-07e8c82b5eff8ef34b74210eacb8d9c4a2886b82.tar.xz |
madvise: convert madvise_cold_or_pageout_pte_range() to use folios
This change removes a number of calls to compound_head(), and saves
1729 bytes of kernel text.
Link: https://lkml.kernel.org/r/20221221180848.20774-3-vishal.moola@gmail.com
Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: SeongJae Park <sj@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/madvise.c')
-rw-r--r-- | mm/madvise.c | 98 |
1 files changed, 49 insertions, 49 deletions
diff --git a/mm/madvise.c b/mm/madvise.c index 479d9a32e44a..575ebf0363b8 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -345,8 +345,8 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, struct vm_area_struct *vma = walk->vma; pte_t *orig_pte, *pte, ptent; spinlock_t *ptl; - struct page *page = NULL; - LIST_HEAD(page_list); + struct folio *folio = NULL; + LIST_HEAD(folio_list); bool pageout_anon_only_filter; if (fatal_signal_pending(current)) @@ -375,26 +375,26 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, goto huge_unlock; } - page = pmd_page(orig_pmd); + folio = pfn_folio(pmd_pfn(orig_pmd)); - /* Do not interfere with other mappings of this page */ - if (page_mapcount(page) != 1) + /* Do not interfere with other mappings of this folio */ + if (folio_mapcount(folio) != 1) goto huge_unlock; - if (pageout_anon_only_filter && !PageAnon(page)) + if (pageout_anon_only_filter && !folio_test_anon(folio)) goto huge_unlock; if (next - addr != HPAGE_PMD_SIZE) { int err; - get_page(page); + folio_get(folio); spin_unlock(ptl); - lock_page(page); - err = split_huge_page(page); - unlock_page(page); - put_page(page); + folio_lock(folio); + err = split_folio(folio); + folio_unlock(folio); + folio_put(folio); if (!err) - goto regular_page; + goto regular_folio; return 0; } @@ -406,25 +406,25 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, tlb_remove_pmd_tlb_entry(tlb, pmd, addr); } - ClearPageReferenced(page); - test_and_clear_page_young(page); + folio_clear_referenced(folio); + folio_test_clear_young(folio); if (pageout) { - if (!isolate_lru_page(page)) { - if (PageUnevictable(page)) - putback_lru_page(page); + if (!folio_isolate_lru(folio)) { + if (folio_test_unevictable(folio)) + folio_putback_lru(folio); else - list_add(&page->lru, &page_list); + list_add(&folio->lru, &folio_list); } } else - deactivate_page(page); + deactivate_page(&folio->page); huge_unlock: spin_unlock(ptl); if (pageout) - reclaim_pages(&page_list); + reclaim_pages(&folio_list); return 0; } -regular_page: +regular_folio: if (pmd_trans_unstable(pmd)) return 0; #endif @@ -441,33 +441,33 @@ regular_page: if (!pte_present(ptent)) continue; - page = vm_normal_page(vma, addr, ptent); - if (!page || is_zone_device_page(page)) + folio = vm_normal_folio(vma, addr, ptent); + if (!folio || folio_is_zone_device(folio)) continue; /* * Creating a THP page is expensive so split it only if we * are sure it's worth. Split it if we are only owner. */ - if (PageTransCompound(page)) { - if (page_mapcount(page) != 1) + if (folio_test_large(folio)) { + if (folio_mapcount(folio) != 1) break; - if (pageout_anon_only_filter && !PageAnon(page)) + if (pageout_anon_only_filter && !folio_test_anon(folio)) break; - get_page(page); - if (!trylock_page(page)) { - put_page(page); + folio_get(folio); + if (!folio_trylock(folio)) { + folio_put(folio); break; } pte_unmap_unlock(orig_pte, ptl); - if (split_huge_page(page)) { - unlock_page(page); - put_page(page); + if (split_folio(folio)) { + folio_unlock(folio); + folio_put(folio); orig_pte = pte_offset_map_lock(mm, pmd, addr, &ptl); break; } - unlock_page(page); - put_page(page); + folio_unlock(folio); + folio_put(folio); orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl); pte--; addr -= PAGE_SIZE; @@ -475,16 +475,16 @@ regular_page: } /* - * Do not interfere with other mappings of this page and - * non-LRU page. + * Do not interfere with other mappings of this folio and + * non-LRU folio. */ - if (!PageLRU(page) || page_mapcount(page) != 1) + if (!folio_test_lru(folio) || folio_mapcount(folio) != 1) continue; - if (pageout_anon_only_filter && !PageAnon(page)) + if (pageout_anon_only_filter && !folio_test_anon(folio)) continue; - VM_BUG_ON_PAGE(PageTransCompound(page), page); + VM_BUG_ON_FOLIO(folio_test_large(folio), folio); if (pte_young(ptent)) { ptent = ptep_get_and_clear_full(mm, addr, pte, @@ -495,28 +495,28 @@ regular_page: } /* - * We are deactivating a page for accelerating reclaiming. - * VM couldn't reclaim the page unless we clear PG_young. + * We are deactivating a folio for accelerating reclaiming. + * VM couldn't reclaim the folio unless we clear PG_young. * As a side effect, it makes confuse idle-page tracking * because they will miss recent referenced history. */ - ClearPageReferenced(page); - test_and_clear_page_young(page); + folio_clear_referenced(folio); + folio_test_clear_young(folio); if (pageout) { - if (!isolate_lru_page(page)) { - if (PageUnevictable(page)) - putback_lru_page(page); + if (!folio_isolate_lru(folio)) { + if (folio_test_unevictable(folio)) + folio_putback_lru(folio); else - list_add(&page->lru, &page_list); + list_add(&folio->lru, &folio_list); } } else - deactivate_page(page); + deactivate_page(&folio->page); } arch_leave_lazy_mmu_mode(); pte_unmap_unlock(orig_pte, ptl); if (pageout) - reclaim_pages(&page_list); + reclaim_pages(&folio_list); cond_resched(); return 0; |