diff options
author | Matthew Wilcox (Oracle) <willy@infradead.org> | 2022-01-28 22:29:43 +0300 |
---|---|---|
committer | Matthew Wilcox (Oracle) <willy@infradead.org> | 2022-03-21 20:01:32 +0300 |
commit | 4b8554c527f3cfa183f6c06d231a9387873205a0 (patch) | |
tree | cba1023980f8eaca5ae0f9c917056179113a1516 /mm/migrate.c | |
parent | 869f7ee6f6477341f859c8b0949ae81caf9ca7f3 (diff) | |
download | linux-4b8554c527f3cfa183f6c06d231a9387873205a0.tar.xz |
mm/rmap: Convert try_to_migrate() to folios
Convert the callers to pass a folio and the try_to_migrate_one()
worker to use a folio throughout. Fixes an assumption that a
folio must be <= PMD size.
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Diffstat (limited to 'mm/migrate.c')
-rw-r--r-- | mm/migrate.c | 6 |
1 files changed, 4 insertions, 2 deletions
diff --git a/mm/migrate.c b/mm/migrate.c index 358bc311caaa..6ed85a5d1be5 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -912,6 +912,7 @@ out: static int __unmap_and_move(struct page *page, struct page *newpage, int force, enum migrate_mode mode) { + struct folio *folio = page_folio(page); int rc = -EAGAIN; bool page_was_mapped = false; struct anon_vma *anon_vma = NULL; @@ -1015,7 +1016,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage, /* Establish migration ptes */ VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma, page); - try_to_migrate(page, 0); + try_to_migrate(folio, 0); page_was_mapped = true; } @@ -1165,6 +1166,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, enum migrate_mode mode, int reason, struct list_head *ret) { + struct folio *src = page_folio(hpage); int rc = -EAGAIN; int page_was_mapped = 0; struct page *new_hpage; @@ -1241,7 +1243,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, ttu |= TTU_RMAP_LOCKED; } - try_to_migrate(hpage, ttu); + try_to_migrate(src, ttu); page_was_mapped = 1; if (mapping_locked) |