diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2023-09-05 20:56:27 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2023-09-05 20:56:27 +0300 |
commit | 5eea5820c7340d39e56e169e1b87199391105f6b (patch) | |
tree | 807d640dc77fa1d9fb9b12cc8ba234925b177031 /mm | |
parent | 893a259caa6f08477bff2bccf1df0cdff38271ac (diff) | |
parent | e68d343d2720779362cb7160cb7f4bd24979b2b4 (diff) | |
download | linux-5eea5820c7340d39e56e169e1b87199391105f6b.tar.xz |
Merge tag 'mm-stable-2023-09-04-14-00' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull more MM updates from Andrew Morton:
- Stefan Roesch has added ksm statistics to /proc/pid/smaps
- Also a number of singleton patches, mainly cleanups and leftovers
* tag 'mm-stable-2023-09-04-14-00' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
mm/kmemleak: move up cond_resched() call in page scanning loop
mm: page_alloc: remove stale CMA guard code
MAINTAINERS: add rmap.h to mm entry
rmap: remove anon_vma_link() nommu stub
proc/ksm: add ksm stats to /proc/pid/smaps
mm/hwpoison: rename hwp_walk* to hwpoison_walk*
mm: memory-failure: add PageOffline() check
Diffstat (limited to 'mm')
-rw-r--r-- | mm/kmemleak.c | 5 | ||||
-rw-r--r-- | mm/memory-failure.c | 21 | ||||
-rw-r--r-- | mm/page_alloc.c | 21 |
3 files changed, 18 insertions, 29 deletions
diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 2918150e31bd..54c2c90d3abc 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -1584,6 +1584,9 @@ static void kmemleak_scan(void) for (pfn = start_pfn; pfn < end_pfn; pfn++) { struct page *page = pfn_to_online_page(pfn); + if (!(pfn & 63)) + cond_resched(); + if (!page) continue; @@ -1594,8 +1597,6 @@ static void kmemleak_scan(void) if (page_count(page) == 0) continue; scan_block(page, page + 1, NULL); - if (!(pfn & 63)) - cond_resched(); } } put_online_mems(); diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 881c35ef1daa..7b01fffe7a79 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -717,7 +717,7 @@ static void collect_procs(struct page *page, struct list_head *tokill, collect_procs_file(page, tokill, force_early); } -struct hwp_walk { +struct hwpoison_walk { struct to_kill tk; unsigned long pfn; int flags; @@ -752,7 +752,7 @@ static int check_hwpoisoned_entry(pte_t pte, unsigned long addr, short shift, #ifdef CONFIG_TRANSPARENT_HUGEPAGE static int check_hwpoisoned_pmd_entry(pmd_t *pmdp, unsigned long addr, - struct hwp_walk *hwp) + struct hwpoison_walk *hwp) { pmd_t pmd = *pmdp; unsigned long pfn; @@ -770,7 +770,7 @@ static int check_hwpoisoned_pmd_entry(pmd_t *pmdp, unsigned long addr, } #else static int check_hwpoisoned_pmd_entry(pmd_t *pmdp, unsigned long addr, - struct hwp_walk *hwp) + struct hwpoison_walk *hwp) { return 0; } @@ -779,7 +779,7 @@ static int check_hwpoisoned_pmd_entry(pmd_t *pmdp, unsigned long addr, static int hwpoison_pte_range(pmd_t *pmdp, unsigned long addr, unsigned long end, struct mm_walk *walk) { - struct hwp_walk *hwp = walk->private; + struct hwpoison_walk *hwp = walk->private; int ret = 0; pte_t *ptep, *mapped_pte; spinlock_t *ptl; @@ -813,7 +813,7 @@ static int hwpoison_hugetlb_range(pte_t *ptep, unsigned long hmask, unsigned long addr, unsigned long end, struct mm_walk *walk) { - struct hwp_walk *hwp = walk->private; + struct hwpoison_walk *hwp = walk->private; pte_t pte = huge_ptep_get(ptep); struct hstate *h = hstate_vma(walk->vma); @@ -824,7 +824,7 @@ static int hwpoison_hugetlb_range(pte_t *ptep, unsigned long hmask, #define hwpoison_hugetlb_range NULL #endif -static const struct mm_walk_ops hwp_walk_ops = { +static const struct mm_walk_ops hwpoison_walk_ops = { .pmd_entry = hwpoison_pte_range, .hugetlb_entry = hwpoison_hugetlb_range, .walk_lock = PGWALK_RDLOCK, @@ -847,7 +847,7 @@ static int kill_accessing_process(struct task_struct *p, unsigned long pfn, int flags) { int ret; - struct hwp_walk priv = { + struct hwpoison_walk priv = { .pfn = pfn, }; priv.tk.tsk = p; @@ -856,7 +856,7 @@ static int kill_accessing_process(struct task_struct *p, unsigned long pfn, return -EFAULT; mmap_read_lock(p->mm); - ret = walk_page_range(p->mm, 0, TASK_SIZE, &hwp_walk_ops, + ret = walk_page_range(p->mm, 0, TASK_SIZE, &hwpoison_walk_ops, (void *)&priv); if (ret == 1 && priv.tk.addr) kill_proc(&priv.tk, pfn, flags); @@ -1562,7 +1562,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn, * Here we are interested only in user-mapped pages, so skip any * other types of pages. */ - if (PageReserved(p) || PageSlab(p) || PageTable(p)) + if (PageReserved(p) || PageSlab(p) || PageTable(p) || PageOffline(p)) return true; if (!(PageLRU(hpage) || PageHuge(p))) return true; @@ -2533,7 +2533,8 @@ int unpoison_memory(unsigned long pfn) goto unlock_mutex; } - if (folio_test_slab(folio) || PageTable(&folio->page) || folio_test_reserved(folio)) + if (folio_test_slab(folio) || PageTable(&folio->page) || + folio_test_reserved(folio) || PageOffline(&folio->page)) goto unlock_mutex; /* diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 452459836b71..0c5be12f9336 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2641,12 +2641,6 @@ struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone, do { page = NULL; spin_lock_irqsave(&zone->lock, flags); - /* - * order-0 request can reach here when the pcplist is skipped - * due to non-CMA allocation context. HIGHATOMIC area is - * reserved for high-order atomic allocation, so order-0 - * request should skip it. - */ if (alloc_flags & ALLOC_HIGHATOMIC) page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); if (!page) { @@ -2780,17 +2774,10 @@ struct page *rmqueue(struct zone *preferred_zone, WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); if (likely(pcp_allowed_order(order))) { - /* - * MIGRATE_MOVABLE pcplist could have the pages on CMA area and - * we need to skip it when CMA area isn't allowed. - */ - if (!IS_ENABLED(CONFIG_CMA) || alloc_flags & ALLOC_CMA || - migratetype != MIGRATE_MOVABLE) { - page = rmqueue_pcplist(preferred_zone, zone, order, - migratetype, alloc_flags); - if (likely(page)) - goto out; - } + page = rmqueue_pcplist(preferred_zone, zone, order, + migratetype, alloc_flags); + if (likely(page)) + goto out; } page = rmqueue_buddy(preferred_zone, zone, order, alloc_flags, |