diff options
| author | Matthew Wilcox (Oracle) <willy@infradead.org> | 2026-01-09 07:13:43 +0300 |
|---|---|---|
| committer | Andrew Morton <akpm@linux-foundation.org> | 2026-01-27 07:02:33 +0300 |
| commit | 832d95b5314eea558cf4cc9ca40db10122ce8f63 (patch) | |
| tree | a5556e7edd428f89e2363efd3bf34dd3284bfce2 | |
| parent | 3d702678f57edc524f73a7865382ae304269f590 (diff) | |
| download | linux-832d95b5314eea558cf4cc9ca40db10122ce8f63.tar.xz | |
migrate: replace RMP_ flags with TTU_ flags
Instead of translating between RMP_ and TTU_ flags, remove the RMP_ flags
and just use the TTU_ flag space; there's plenty available.
Possibly we should rename these to RMAP_ flags, and maybe even pass them
in through rmap_walk_arg, but that can be done later.
Link: https://lkml.kernel.org/r/20260109041345.3863089-3-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: David Hildenbrand (Red Hat) <david@kernel.org>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Byungchul Park <byungchul@sk.com>
Cc: Gregory Price <gourry@gourry.net>
Cc: Jann Horn <jannh@google.com>
Cc: Joshua Hahn <joshua.hahnjy@gmail.com>
Cc: Lance Yang <lance.yang@linux.dev>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: Rakie Kim <rakie.kim@sk.com>
Cc: Rik van Riel <riel@surriel.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Ying Huang <ying.huang@linux.alibaba.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
| -rw-r--r-- | include/linux/rmap.h | 9 | ||||
| -rw-r--r-- | mm/huge_memory.c | 8 | ||||
| -rw-r--r-- | mm/migrate.c | 12 |
3 files changed, 13 insertions, 16 deletions
diff --git a/include/linux/rmap.h b/include/linux/rmap.h index dd764951b03d..8dc0871e5f00 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -92,6 +92,7 @@ struct anon_vma_chain { }; enum ttu_flags { + TTU_USE_SHARED_ZEROPAGE = 0x2, /* for unused pages of large folios */ TTU_SPLIT_HUGE_PMD = 0x4, /* split huge PMD if any */ TTU_IGNORE_MLOCK = 0x8, /* ignore mlock */ TTU_SYNC = 0x10, /* avoid racy checks with PVMW_SYNC */ @@ -933,12 +934,8 @@ int mapping_wrprotect_range(struct address_space *mapping, pgoff_t pgoff, int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff, struct vm_area_struct *vma); -enum rmp_flags { - RMP_LOCKED = 1 << 0, - RMP_USE_SHARED_ZEROPAGE = 1 << 1, -}; - -void remove_migration_ptes(struct folio *src, struct folio *dst, int flags); +void remove_migration_ptes(struct folio *src, struct folio *dst, + enum ttu_flags flags); /* * rmap_walk_control: To control rmap traversing for specific needs diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 40cf59301c21..44ff8a648afd 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -3431,7 +3431,7 @@ static void remap_page(struct folio *folio, unsigned long nr, int flags) if (!folio_test_anon(folio)) return; for (;;) { - remove_migration_ptes(folio, folio, RMP_LOCKED | flags); + remove_migration_ptes(folio, folio, TTU_RMAP_LOCKED | flags); i += folio_nr_pages(folio); if (i >= nr) break; @@ -3944,7 +3944,7 @@ static int __folio_split(struct folio *folio, unsigned int new_order, int old_order = folio_order(folio); struct folio *new_folio, *next; int nr_shmem_dropped = 0; - int remap_flags = 0; + enum ttu_flags ttu_flags = 0; int ret; pgoff_t end = 0; @@ -4064,9 +4064,9 @@ fail: shmem_uncharge(mapping->host, nr_shmem_dropped); if (!ret && is_anon && !folio_is_device_private(folio)) - remap_flags = RMP_USE_SHARED_ZEROPAGE; + ttu_flags = TTU_USE_SHARED_ZEROPAGE; - remap_page(folio, 1 << old_order, remap_flags); + remap_page(folio, 1 << old_order, ttu_flags); /* * Unlock all after-split folios except the one containing diff --git a/mm/migrate.c b/mm/migrate.c index 4688b9e38cd2..4750a2ba15fe 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -452,11 +452,12 @@ static bool remove_migration_pte(struct folio *folio, * Get rid of all migration entries and replace them by * references to the indicated page. */ -void remove_migration_ptes(struct folio *src, struct folio *dst, int flags) +void remove_migration_ptes(struct folio *src, struct folio *dst, + enum ttu_flags flags) { struct rmap_walk_arg rmap_walk_arg = { .folio = src, - .map_unused_to_zeropage = flags & RMP_USE_SHARED_ZEROPAGE, + .map_unused_to_zeropage = flags & TTU_USE_SHARED_ZEROPAGE, }; struct rmap_walk_control rwc = { @@ -464,9 +465,9 @@ void remove_migration_ptes(struct folio *src, struct folio *dst, int flags) .arg = &rmap_walk_arg, }; - VM_BUG_ON_FOLIO((flags & RMP_USE_SHARED_ZEROPAGE) && (src != dst), src); + VM_BUG_ON_FOLIO((flags & TTU_USE_SHARED_ZEROPAGE) && (src != dst), src); - if (flags & RMP_LOCKED) + if (flags & TTU_RMAP_LOCKED) rmap_walk_locked(dst, &rwc); else rmap_walk(dst, &rwc); @@ -1521,8 +1522,7 @@ static int unmap_and_move_huge_page(new_folio_t get_new_folio, rc = move_to_new_folio(dst, src, mode); if (page_was_mapped) - remove_migration_ptes(src, !rc ? dst : src, - ttu ? RMP_LOCKED : 0); + remove_migration_ptes(src, !rc ? dst : src, ttu); if (ttu & TTU_RMAP_LOCKED) i_mmap_unlock_write(mapping); |
