From f5d09de9f1bf9674c6418ff10d0a40cfe29268e1 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Mon, 28 Oct 2024 22:56:56 +0800 Subject: mm: use aligned address in copy_user_gigantic_page() In current kernel, hugetlb_wp() calls copy_user_large_folio() with the fault address. Where the fault address may be not aligned with the huge page size. Then, copy_user_large_folio() may call copy_user_gigantic_page() with the address, while copy_user_gigantic_page() requires the address to be huge page size aligned. So, this may cause memory corruption or information leak, addtional, use more obvious naming 'addr_hint' instead of 'addr' for copy_user_gigantic_page(). Link: https://lkml.kernel.org/r/20241028145656.932941-2-wangkefeng.wang@huawei.com Fixes: 530dd9926dc1 ("mm: memory: improve copy_user_large_folio()") Signed-off-by: Kefeng Wang Reviewed-by: David Hildenbrand Cc: Huang Ying Cc: Matthew Wilcox (Oracle) Cc: Muchun Song Cc: Signed-off-by: Andrew Morton --- mm/hugetlb.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'mm/hugetlb.c') diff --git a/mm/hugetlb.c b/mm/hugetlb.c index ea2ed8e301ef..cec4b121193f 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -5340,7 +5340,7 @@ again: break; } ret = copy_user_large_folio(new_folio, pte_folio, - ALIGN_DOWN(addr, sz), dst_vma); + addr, dst_vma); folio_put(pte_folio); if (ret) { folio_put(new_folio); @@ -6643,8 +6643,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte, *foliop = NULL; goto out; } - ret = copy_user_large_folio(folio, *foliop, - ALIGN_DOWN(dst_addr, size), dst_vma); + ret = copy_user_large_folio(folio, *foliop, dst_addr, dst_vma); folio_put(*foliop); *foliop = NULL; if (ret) { -- cgit v1.2.3 From 59d9094df3d79443937add8700b2ef1a866b1081 Mon Sep 17 00:00:00 2001 From: Liu Shixin Date: Mon, 16 Dec 2024 15:11:47 +0800 Subject: mm: hugetlb: independent PMD page table shared count The folio refcount may be increased unexpectly through try_get_folio() by caller such as split_huge_pages. In huge_pmd_unshare(), we use refcount to check whether a pmd page table is shared. The check is incorrect if the refcount is increased by the above caller, and this can cause the page table leaked: BUG: Bad page state in process sh pfn:109324 page: refcount:0 mapcount:0 mapping:0000000000000000 index:0x66 pfn:0x109324 flags: 0x17ffff800000000(node=0|zone=2|lastcpupid=0xfffff) page_type: f2(table) raw: 017ffff800000000 0000000000000000 0000000000000000 0000000000000000 raw: 0000000000000066 0000000000000000 00000000f2000000 0000000000000000 page dumped because: nonzero mapcount ... CPU: 31 UID: 0 PID: 7515 Comm: sh Kdump: loaded Tainted: G B 6.13.0-rc2master+ #7 Tainted: [B]=BAD_PAGE Hardware name: QEMU KVM Virtual Machine, BIOS 0.0.0 02/06/2015 Call trace: show_stack+0x20/0x38 (C) dump_stack_lvl+0x80/0xf8 dump_stack+0x18/0x28 bad_page+0x8c/0x130 free_page_is_bad_report+0xa4/0xb0 free_unref_page+0x3cc/0x620 __folio_put+0xf4/0x158 split_huge_pages_all+0x1e0/0x3e8 split_huge_pages_write+0x25c/0x2d8 full_proxy_write+0x64/0xd8 vfs_write+0xcc/0x280 ksys_write+0x70/0x110 __arm64_sys_write+0x24/0x38 invoke_syscall+0x50/0x120 el0_svc_common.constprop.0+0xc8/0xf0 do_el0_svc+0x24/0x38 el0_svc+0x34/0x128 el0t_64_sync_handler+0xc8/0xd0 el0t_64_sync+0x190/0x198 The issue may be triggered by damon, offline_page, page_idle, etc, which will increase the refcount of page table. 1. The page table itself will be discarded after reporting the "nonzero mapcount". 2. The HugeTLB page mapped by the page table miss freeing since we treat the page table as shared and a shared page table will not be unmapped. Fix it by introducing independent PMD page table shared count. As described by comment, pt_index/pt_mm/pt_frag_refcount are used for s390 gmap, x86 pgds and powerpc, pt_share_count is used for x86/arm64/riscv pmds, so we can reuse the field as pt_share_count. Link: https://lkml.kernel.org/r/20241216071147.3984217-1-liushixin2@huawei.com Fixes: 39dde65c9940 ("[PATCH] shared page table for hugetlb page") Signed-off-by: Liu Shixin Cc: Kefeng Wang Cc: Ken Chen Cc: Muchun Song Cc: Nanyong Sun Cc: Jane Chu Cc: Signed-off-by: Andrew Morton --- include/linux/mm.h | 1 + include/linux/mm_types.h | 30 ++++++++++++++++++++++++++++++ mm/hugetlb.c | 16 +++++++--------- 3 files changed, 38 insertions(+), 9 deletions(-) (limited to 'mm/hugetlb.c') diff --git a/include/linux/mm.h b/include/linux/mm.h index fb397918c43d..b1c3db9cf355 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3125,6 +3125,7 @@ static inline bool pagetable_pmd_ctor(struct ptdesc *ptdesc) if (!pmd_ptlock_init(ptdesc)) return false; __folio_set_pgtable(folio); + ptdesc_pmd_pts_init(ptdesc); lruvec_stat_add_folio(folio, NR_PAGETABLE); return true; } diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 7361a8f3ab68..332cee285662 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -445,6 +445,7 @@ FOLIO_MATCH(compound_head, _head_2a); * @pt_index: Used for s390 gmap. * @pt_mm: Used for x86 pgds. * @pt_frag_refcount: For fragmented page table tracking. Powerpc only. + * @pt_share_count: Used for HugeTLB PMD page table share count. * @_pt_pad_2: Padding to ensure proper alignment. * @ptl: Lock for the page table. * @__page_type: Same as page->page_type. Unused for page tables. @@ -471,6 +472,9 @@ struct ptdesc { pgoff_t pt_index; struct mm_struct *pt_mm; atomic_t pt_frag_refcount; +#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING + atomic_t pt_share_count; +#endif }; union { @@ -516,6 +520,32 @@ static_assert(sizeof(struct ptdesc) <= sizeof(struct page)); const struct page *: (const struct ptdesc *)(p), \ struct page *: (struct ptdesc *)(p))) +#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING +static inline void ptdesc_pmd_pts_init(struct ptdesc *ptdesc) +{ + atomic_set(&ptdesc->pt_share_count, 0); +} + +static inline void ptdesc_pmd_pts_inc(struct ptdesc *ptdesc) +{ + atomic_inc(&ptdesc->pt_share_count); +} + +static inline void ptdesc_pmd_pts_dec(struct ptdesc *ptdesc) +{ + atomic_dec(&ptdesc->pt_share_count); +} + +static inline int ptdesc_pmd_pts_count(struct ptdesc *ptdesc) +{ + return atomic_read(&ptdesc->pt_share_count); +} +#else +static inline void ptdesc_pmd_pts_init(struct ptdesc *ptdesc) +{ +} +#endif + /* * Used for sizing the vmemmap region on some architectures */ diff --git a/mm/hugetlb.c b/mm/hugetlb.c index cec4b121193f..c498874a7170 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -7211,7 +7211,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, spte = hugetlb_walk(svma, saddr, vma_mmu_pagesize(svma)); if (spte) { - get_page(virt_to_page(spte)); + ptdesc_pmd_pts_inc(virt_to_ptdesc(spte)); break; } } @@ -7226,7 +7226,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, (pmd_t *)((unsigned long)spte & PAGE_MASK)); mm_inc_nr_pmds(mm); } else { - put_page(virt_to_page(spte)); + ptdesc_pmd_pts_dec(virt_to_ptdesc(spte)); } spin_unlock(&mm->page_table_lock); out: @@ -7238,10 +7238,6 @@ out: /* * unmap huge page backed by shared pte. * - * Hugetlb pte page is ref counted at the time of mapping. If pte is shared - * indicated by page_count > 1, unmap is achieved by clearing pud and - * decrementing the ref count. If count == 1, the pte page is not shared. - * * Called with page table lock held. * * returns: 1 successfully unmapped a shared pte page @@ -7250,18 +7246,20 @@ out: int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { + unsigned long sz = huge_page_size(hstate_vma(vma)); pgd_t *pgd = pgd_offset(mm, addr); p4d_t *p4d = p4d_offset(pgd, addr); pud_t *pud = pud_offset(p4d, addr); i_mmap_assert_write_locked(vma->vm_file->f_mapping); hugetlb_vma_assert_locked(vma); - BUG_ON(page_count(virt_to_page(ptep)) == 0); - if (page_count(virt_to_page(ptep)) == 1) + if (sz != PMD_SIZE) + return 0; + if (!ptdesc_pmd_pts_count(virt_to_ptdesc(ptep))) return 0; pud_clear(pud); - put_page(virt_to_page(ptep)); + ptdesc_pmd_pts_dec(virt_to_ptdesc(ptep)); mm_dec_nr_pmds(mm); return 1; } -- cgit v1.2.3 From 0cef0bb836e3cfe00f08f9606c72abd72fe78ca3 Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Tue, 7 Jan 2025 14:47:52 +0000 Subject: mm: clear uffd-wp PTE/PMD state on mremap() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When mremap()ing a memory region previously registered with userfaultfd as write-protected but without UFFD_FEATURE_EVENT_REMAP, an inconsistency in flag clearing leads to a mismatch between the vma flags (which have uffd-wp cleared) and the pte/pmd flags (which do not have uffd-wp cleared). This mismatch causes a subsequent mprotect(PROT_WRITE) to trigger a warning in page_table_check_pte_flags() due to setting the pte to writable while uffd-wp is still set. Fix this by always explicitly clearing the uffd-wp pte/pmd flags on any such mremap() so that the values are consistent with the existing clearing of VM_UFFD_WP. Be careful to clear the logical flag regardless of its physical form; a PTE bit, a swap PTE bit, or a PTE marker. Cover PTE, huge PMD and hugetlb paths. Link: https://lkml.kernel.org/r/20250107144755.1871363-2-ryan.roberts@arm.com Co-developed-by: Mikołaj Lenczewski Signed-off-by: Mikołaj Lenczewski Signed-off-by: Ryan Roberts Closes: https://lore.kernel.org/linux-mm/810b44a8-d2ae-4107-b665-5a42eae2d948@arm.com/ Fixes: 63b2d4174c4a ("userfaultfd: wp: add the writeprotect API to userfaultfd ioctl") Cc: David Hildenbrand Cc: Jann Horn Cc: Liam R. Howlett Cc: Lorenzo Stoakes Cc: Mark Rutland Cc: Muchun Song Cc: Peter Xu Cc: Shuah Khan Cc: Vlastimil Babka Cc: Signed-off-by: Andrew Morton --- include/linux/userfaultfd_k.h | 12 ++++++++++++ mm/huge_memory.c | 12 ++++++++++++ mm/hugetlb.c | 14 +++++++++++++- mm/mremap.c | 32 +++++++++++++++++++++++++++++++- 4 files changed, 68 insertions(+), 2 deletions(-) (limited to 'mm/hugetlb.c') diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h index cb40f1a1d081..75342022d144 100644 --- a/include/linux/userfaultfd_k.h +++ b/include/linux/userfaultfd_k.h @@ -247,6 +247,13 @@ static inline bool vma_can_userfault(struct vm_area_struct *vma, vma_is_shmem(vma); } +static inline bool vma_has_uffd_without_event_remap(struct vm_area_struct *vma) +{ + struct userfaultfd_ctx *uffd_ctx = vma->vm_userfaultfd_ctx.ctx; + + return uffd_ctx && (uffd_ctx->features & UFFD_FEATURE_EVENT_REMAP) == 0; +} + extern int dup_userfaultfd(struct vm_area_struct *, struct list_head *); extern void dup_userfaultfd_complete(struct list_head *); void dup_userfaultfd_fail(struct list_head *); @@ -402,6 +409,11 @@ static inline bool userfaultfd_wp_async(struct vm_area_struct *vma) return false; } +static inline bool vma_has_uffd_without_event_remap(struct vm_area_struct *vma) +{ + return false; +} + #endif /* CONFIG_USERFAULTFD */ static inline bool userfaultfd_wp_use_markers(struct vm_area_struct *vma) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index e53d83b3e5cf..db64116a4f84 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2206,6 +2206,16 @@ static pmd_t move_soft_dirty_pmd(pmd_t pmd) return pmd; } +static pmd_t clear_uffd_wp_pmd(pmd_t pmd) +{ + if (pmd_present(pmd)) + pmd = pmd_clear_uffd_wp(pmd); + else if (is_swap_pmd(pmd)) + pmd = pmd_swp_clear_uffd_wp(pmd); + + return pmd; +} + bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd) { @@ -2244,6 +2254,8 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, pgtable_trans_huge_deposit(mm, new_pmd, pgtable); } pmd = move_soft_dirty_pmd(pmd); + if (vma_has_uffd_without_event_remap(vma)) + pmd = clear_uffd_wp_pmd(pmd); set_pmd_at(mm, new_addr, new_pmd, pmd); if (force_flush) flush_pmd_tlb_range(vma, old_addr, old_addr + PMD_SIZE); diff --git a/mm/hugetlb.c b/mm/hugetlb.c index c498874a7170..eaaec19caa7c 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -5402,6 +5402,7 @@ static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr, unsigned long new_addr, pte_t *src_pte, pte_t *dst_pte, unsigned long sz) { + bool need_clear_uffd_wp = vma_has_uffd_without_event_remap(vma); struct hstate *h = hstate_vma(vma); struct mm_struct *mm = vma->vm_mm; spinlock_t *src_ptl, *dst_ptl; @@ -5418,7 +5419,18 @@ static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr, spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); pte = huge_ptep_get_and_clear(mm, old_addr, src_pte); - set_huge_pte_at(mm, new_addr, dst_pte, pte, sz); + + if (need_clear_uffd_wp && pte_marker_uffd_wp(pte)) + huge_pte_clear(mm, new_addr, dst_pte, sz); + else { + if (need_clear_uffd_wp) { + if (pte_present(pte)) + pte = huge_pte_clear_uffd_wp(pte); + else if (is_swap_pte(pte)) + pte = pte_swp_clear_uffd_wp(pte); + } + set_huge_pte_at(mm, new_addr, dst_pte, pte, sz); + } if (src_ptl != dst_ptl) spin_unlock(src_ptl); diff --git a/mm/mremap.c b/mm/mremap.c index 60473413836b..cff7f552f909 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -138,6 +138,7 @@ static int move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, struct vm_area_struct *new_vma, pmd_t *new_pmd, unsigned long new_addr, bool need_rmap_locks) { + bool need_clear_uffd_wp = vma_has_uffd_without_event_remap(vma); struct mm_struct *mm = vma->vm_mm; pte_t *old_pte, *new_pte, pte; pmd_t dummy_pmdval; @@ -216,7 +217,18 @@ static int move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, force_flush = true; pte = move_pte(pte, old_addr, new_addr); pte = move_soft_dirty_pte(pte); - set_pte_at(mm, new_addr, new_pte, pte); + + if (need_clear_uffd_wp && pte_marker_uffd_wp(pte)) + pte_clear(mm, new_addr, new_pte); + else { + if (need_clear_uffd_wp) { + if (pte_present(pte)) + pte = pte_clear_uffd_wp(pte); + else if (is_swap_pte(pte)) + pte = pte_swp_clear_uffd_wp(pte); + } + set_pte_at(mm, new_addr, new_pte, pte); + } } arch_leave_lazy_mmu_mode(); @@ -278,6 +290,15 @@ static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr, if (WARN_ON_ONCE(!pmd_none(*new_pmd))) return false; + /* If this pmd belongs to a uffd vma with remap events disabled, we need + * to ensure that the uffd-wp state is cleared from all pgtables. This + * means recursing into lower page tables in move_page_tables(), and we + * can reuse the existing code if we simply treat the entry as "not + * moved". + */ + if (vma_has_uffd_without_event_remap(vma)) + return false; + /* * We don't have to worry about the ordering of src and dst * ptlocks because exclusive mmap_lock prevents deadlock. @@ -333,6 +354,15 @@ static bool move_normal_pud(struct vm_area_struct *vma, unsigned long old_addr, if (WARN_ON_ONCE(!pud_none(*new_pud))) return false; + /* If this pud belongs to a uffd vma with remap events disabled, we need + * to ensure that the uffd-wp state is cleared from all pgtables. This + * means recursing into lower page tables in move_page_tables(), and we + * can reuse the existing code if we simply treat the entry as "not + * moved". + */ + if (vma_has_uffd_without_event_remap(vma)) + return false; + /* * We don't have to worry about the ordering of src and dst * ptlocks because exclusive mmap_lock prevents deadlock. -- cgit v1.2.3 From d0f14f7ee0e2d5df447d54487ae0c3aee5a7208f Mon Sep 17 00:00:00 2001 From: Koichiro Den Date: Thu, 5 Dec 2024 01:55:03 +0900 Subject: hugetlb: prioritize surplus allocation from current node Previously, surplus allocations triggered by mmap were typically made from the node where the process was running. On a page fault, the area was reliably dequeued from the hugepage_freelists for that node. However, since commit 003af997c8a9 ("hugetlb: force allocating surplus hugepages on mempolicy allowed nodes"), dequeue_hugetlb_folio_vma() may fall back to other nodes unnecessarily even if there is no MPOL_BIND policy, causing folios to be dequeued from nodes other than the current one. Also, allocating from the node where the current process is running is likely to result in a performance win, as mmap-ing processes often touch the area not so long after allocation. This change minimizes surprises for users relying on the previous behavior while maintaining the benefit introduced by the commit. So, prioritize the node the current process is running on when possible. Link: https://lkml.kernel.org/r/20241204165503.628784-1-koichiro.den@canonical.com Signed-off-by: Koichiro Den Acked-by: Aristeu Rozanski Cc: Aristeu Rozanski Cc: David Hildenbrand Cc: Muchun Song Cc: Vishal Moola (Oracle) Signed-off-by: Andrew Morton --- mm/hugetlb.c | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) (limited to 'mm/hugetlb.c') diff --git a/mm/hugetlb.c b/mm/hugetlb.c index eaaec19caa7c..ac275a8864e0 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2463,7 +2463,13 @@ static int gather_surplus_pages(struct hstate *h, long delta) long needed, allocated; bool alloc_ok = true; int node; - nodemask_t *mbind_nodemask = policy_mbind_nodemask(htlb_alloc_mask(h)); + nodemask_t *mbind_nodemask, alloc_nodemask; + + mbind_nodemask = policy_mbind_nodemask(htlb_alloc_mask(h)); + if (mbind_nodemask) + nodes_and(alloc_nodemask, *mbind_nodemask, cpuset_current_mems_allowed); + else + alloc_nodemask = cpuset_current_mems_allowed; lockdep_assert_held(&hugetlb_lock); needed = (h->resv_huge_pages + delta) - h->free_huge_pages; @@ -2479,8 +2485,16 @@ retry: spin_unlock_irq(&hugetlb_lock); for (i = 0; i < needed; i++) { folio = NULL; - for_each_node_mask(node, cpuset_current_mems_allowed) { - if (!mbind_nodemask || node_isset(node, *mbind_nodemask)) { + + /* Prioritize current node */ + if (node_isset(numa_mem_id(), alloc_nodemask)) + folio = alloc_surplus_hugetlb_folio(h, htlb_alloc_mask(h), + numa_mem_id(), NULL); + + if (!folio) { + for_each_node_mask(node, alloc_nodemask) { + if (node == numa_mem_id()) + continue; folio = alloc_surplus_hugetlb_folio(h, htlb_alloc_mask(h), node, NULL); if (folio) -- cgit v1.2.3 From d8fd84dd4ce72e371a6604809e25e16e24b997d4 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 4 Dec 2024 16:31:00 +0100 Subject: mm/hugetlb: don't map folios writable without VM_WRITE when copying during fork() If we have to trigger a hugetlb folio copy during fork() because the anon folio might be pinned, we currently unconditionally create a writable PTE. However, the VMA might not have write permissions (VM_WRITE) at that point. Fix it by checking the VMA for VM_WRITE. Make the code less error prone by moving checking for VM_WRITE into make_huge_pte(), and letting callers only specify whether we should try making it writable. A simple reproducer that longterm-pins the folios using liburing to then mprotect(PROT_READ) the folios befor fork() [1] results in: Before: [FAIL] access should not have worked After: [PASS] access did not work as expected [1] https://gitlab.com/davidhildenbrand/scratchspace/-/raw/main/reproducers/hugetlb-mkwrite-fork.c This is rather a corner case, so stable might not be warranted. Link: https://lkml.kernel.org/r/20241204153100.1967364-1-david@redhat.com Fixes: 4eae4efa2c29 ("hugetlb: do early cow when page pinned on src mm") Signed-off-by: David Hildenbrand Acked-by: Peter Xu Cc: Muchun Song Cc: Guillaume Morin Signed-off-by: Andrew Morton --- mm/hugetlb.c | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) (limited to 'mm/hugetlb.c') diff --git a/mm/hugetlb.c b/mm/hugetlb.c index ac275a8864e0..c9d8c6a1c03c 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -5155,12 +5155,12 @@ const struct vm_operations_struct hugetlb_vm_ops = { }; static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, - int writable) + bool try_mkwrite) { pte_t entry; unsigned int shift = huge_page_shift(hstate_vma(vma)); - if (writable) { + if (try_mkwrite && (vma->vm_flags & VM_WRITE)) { entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page, vma->vm_page_prot))); } else { @@ -5213,7 +5213,7 @@ static void hugetlb_install_folio(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr, struct folio *new_folio, pte_t old, unsigned long sz) { - pte_t newpte = make_huge_pte(vma, &new_folio->page, 1); + pte_t newpte = make_huge_pte(vma, &new_folio->page, true); __folio_mark_uptodate(new_folio); hugetlb_add_new_anon_rmap(new_folio, vma, addr); @@ -6249,8 +6249,7 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping, hugetlb_add_new_anon_rmap(folio, vma, vmf->address); else hugetlb_add_file_rmap(folio); - new_pte = make_huge_pte(vma, &folio->page, ((vma->vm_flags & VM_WRITE) - && (vma->vm_flags & VM_SHARED))); + new_pte = make_huge_pte(vma, &folio->page, vma->vm_flags & VM_SHARED); /* * If this pte was previously wr-protected, keep it wr-protected even * if populated. @@ -6582,7 +6581,6 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte, spinlock_t *ptl; int ret = -ENOMEM; struct folio *folio; - int writable; bool folio_in_pagecache = false; if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) { @@ -6736,12 +6734,8 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte, * For either: (1) CONTINUE on a non-shared VMA, or (2) UFFDIO_COPY * with wp flag set, don't set pte write bit. */ - if (wp_enabled || (is_continue && !vm_shared)) - writable = 0; - else - writable = dst_vma->vm_flags & VM_WRITE; - - _dst_pte = make_huge_pte(dst_vma, &folio->page, writable); + _dst_pte = make_huge_pte(dst_vma, &folio->page, + !wp_enabled && !(is_continue && !vm_shared)); /* * Always mark UFFDIO_COPY page dirty; note that this may not be * extremely important for hugetlbfs for now since swapping is not -- cgit v1.2.3 From 052ccfbcc6cd87fe678b0ab27cc0715cb3aefb21 Mon Sep 17 00:00:00 2001 From: Guillaume Morin Date: Fri, 6 Dec 2024 22:28:36 +0100 Subject: mm/hugetlb: support FOLL_FORCE|FOLL_WRITE Eric reported that PTRACE_POKETEXT fails when applications use hugetlb for mapping text using huge pages. Before commit 1d8d14641fd9 ("mm/hugetlb: support write-faults in shared mappings"), PTRACE_POKETEXT worked by accident, but it was buggy and silently ended up mapping pages writable into the page tables even though VM_WRITE was not set. In general, FOLL_FORCE|FOLL_WRITE does currently not work with hugetlb. Let's implement FOLL_FORCE|FOLL_WRITE properly for hugetlb, such that what used to work in the past by accident now properly works, allowing applications using hugetlb for text etc. to get properly debugged. This change might also be required to implement uprobes support for hugetlb [1]. [1] https://lore.kernel.org/lkml/ZiK50qob9yl5e0Xz@bender.morinfr.org/ Link: https://lkml.kernel.org/r/Z1NshNfWuzUCPebA@bender.morinfr.org Signed-off-by: Guillaume Morin Cc: Muchun Song Cc: Andrew Morton Cc: Peter Xu Cc: David Hildenbrand Cc: Eric Hagberg Signed-off-by: Andrew Morton --- mm/gup.c | 91 +++++++++++++++++++++++++++++------------------------------- mm/hugetlb.c | 17 ++++++------ 2 files changed, 53 insertions(+), 55 deletions(-) (limited to 'mm/hugetlb.c') diff --git a/mm/gup.c b/mm/gup.c index 3b75e631f369..00a1269cbee0 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -596,6 +596,33 @@ static struct folio *try_grab_folio_fast(struct page *page, int refs, } #endif /* CONFIG_HAVE_GUP_FAST */ +/* Common code for can_follow_write_* */ +static inline bool can_follow_write_common(struct page *page, + struct vm_area_struct *vma, unsigned int flags) +{ + /* Maybe FOLL_FORCE is set to override it? */ + if (!(flags & FOLL_FORCE)) + return false; + + /* But FOLL_FORCE has no effect on shared mappings */ + if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED)) + return false; + + /* ... or read-only private ones */ + if (!(vma->vm_flags & VM_MAYWRITE)) + return false; + + /* ... or already writable ones that just need to take a write fault */ + if (vma->vm_flags & VM_WRITE) + return false; + + /* + * See can_change_pte_writable(): we broke COW and could map the page + * writable if we have an exclusive anonymous page ... + */ + return page && PageAnon(page) && PageAnonExclusive(page); +} + static struct page *no_page_table(struct vm_area_struct *vma, unsigned int flags, unsigned long address) { @@ -622,6 +649,18 @@ static struct page *no_page_table(struct vm_area_struct *vma, } #ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES +/* FOLL_FORCE can write to even unwritable PUDs in COW mappings. */ +static inline bool can_follow_write_pud(pud_t pud, struct page *page, + struct vm_area_struct *vma, + unsigned int flags) +{ + /* If the pud is writable, we can write to the page. */ + if (pud_write(pud)) + return true; + + return can_follow_write_common(page, vma, flags); +} + static struct page *follow_huge_pud(struct vm_area_struct *vma, unsigned long addr, pud_t *pudp, int flags, struct follow_page_context *ctx) @@ -634,10 +673,11 @@ static struct page *follow_huge_pud(struct vm_area_struct *vma, assert_spin_locked(pud_lockptr(mm, pudp)); - if ((flags & FOLL_WRITE) && !pud_write(pud)) + if (!pud_present(pud)) return NULL; - if (!pud_present(pud)) + if ((flags & FOLL_WRITE) && + !can_follow_write_pud(pud, pfn_to_page(pfn), vma, flags)) return NULL; pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT; @@ -686,27 +726,7 @@ static inline bool can_follow_write_pmd(pmd_t pmd, struct page *page, if (pmd_write(pmd)) return true; - /* Maybe FOLL_FORCE is set to override it? */ - if (!(flags & FOLL_FORCE)) - return false; - - /* But FOLL_FORCE has no effect on shared mappings */ - if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED)) - return false; - - /* ... or read-only private ones */ - if (!(vma->vm_flags & VM_MAYWRITE)) - return false; - - /* ... or already writable ones that just need to take a write fault */ - if (vma->vm_flags & VM_WRITE) - return false; - - /* - * See can_change_pte_writable(): we broke COW and could map the page - * writable if we have an exclusive anonymous page ... - */ - if (!page || !PageAnon(page) || !PageAnonExclusive(page)) + if (!can_follow_write_common(page, vma, flags)) return false; /* ... and a write-fault isn't required for other reasons. */ @@ -807,27 +827,7 @@ static inline bool can_follow_write_pte(pte_t pte, struct page *page, if (pte_write(pte)) return true; - /* Maybe FOLL_FORCE is set to override it? */ - if (!(flags & FOLL_FORCE)) - return false; - - /* But FOLL_FORCE has no effect on shared mappings */ - if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED)) - return false; - - /* ... or read-only private ones */ - if (!(vma->vm_flags & VM_MAYWRITE)) - return false; - - /* ... or already writable ones that just need to take a write fault */ - if (vma->vm_flags & VM_WRITE) - return false; - - /* - * See can_change_pte_writable(): we broke COW and could map the page - * writable if we have an exclusive anonymous page ... - */ - if (!page || !PageAnon(page) || !PageAnonExclusive(page)) + if (!can_follow_write_common(page, vma, flags)) return false; /* ... and a write-fault isn't required for other reasons. */ @@ -1294,9 +1294,6 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) if (!(vm_flags & VM_WRITE) || (vm_flags & VM_SHADOW_STACK)) { if (!(gup_flags & FOLL_FORCE)) return -EFAULT; - /* hugetlb does not support FOLL_FORCE|FOLL_WRITE. */ - if (is_vm_hugetlb_page(vma)) - return -EFAULT; /* * We used to let the write,force case do COW in a * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could diff --git a/mm/hugetlb.c b/mm/hugetlb.c index c9d8c6a1c03c..21de25546a25 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -5183,6 +5183,13 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma, update_mmu_cache(vma, address, ptep); } +static void set_huge_ptep_maybe_writable(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep) +{ + if (vma->vm_flags & VM_WRITE) + set_huge_ptep_writable(vma, address, ptep); +} + bool is_hugetlb_entry_migration(pte_t pte) { swp_entry_t swp; @@ -5828,13 +5835,6 @@ static vm_fault_t hugetlb_wp(struct folio *pagecache_folio, if (!unshare && huge_pte_uffd_wp(pte)) return 0; - /* - * hugetlb does not support FOLL_FORCE-style write faults that keep the - * PTE mapped R/O such as maybe_mkwrite() would do. - */ - if (WARN_ON_ONCE(!unshare && !(vma->vm_flags & VM_WRITE))) - return VM_FAULT_SIGSEGV; - /* Let's take out MAP_SHARED mappings first. */ if (vma->vm_flags & VM_MAYSHARE) { set_huge_ptep_writable(vma, vmf->address, vmf->pte); @@ -5863,7 +5863,8 @@ retry_avoidcopy: SetPageAnonExclusive(&old_folio->page); } if (likely(!unshare)) - set_huge_ptep_writable(vma, vmf->address, vmf->pte); + set_huge_ptep_maybe_writable(vma, vmf->address, + vmf->pte); delayacct_wpcopy_end(); return 0; -- cgit v1.2.3 From 991135774c0e05a4734e6d32aa03b00355e4cac9 Mon Sep 17 00:00:00 2001 From: Joshua Hahn Date: Wed, 11 Dec 2024 12:39:50 -0800 Subject: memcg/hugetlb: introduce mem_cgroup_charge_hugetlb This patch introduces mem_cgroup_charge_hugetlb which combines the logic of mem_cgroup_hugetlb_try_charge / mem_cgroup_hugetlb_commit_charge and removes the need for mem_cgroup_hugetlb_cancel_charge. It also reduces the footprint of memcg in hugetlb code and consolidates all memcg related error paths into one. Link: https://lkml.kernel.org/r/20241211203951.764733-3-joshua.hahnjy@gmail.com Signed-off-by: Joshua Hahn Acked-by: Shakeel Butt Reviewed-by: Nhat Pham Cc: Johannes Weiner Cc: Michal Hocko Cc: Muchun Song Cc: Roman Gushchin Signed-off-by: Andrew Morton --- include/linux/memcontrol.h | 7 +++++++ mm/hugetlb.c | 35 ++++++++++++++--------------------- mm/memcontrol.c | 34 ++++++++++++++++++++++++++++++++++ 3 files changed, 55 insertions(+), 21 deletions(-) (limited to 'mm/hugetlb.c') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index b28180269e75..387470bed399 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -649,6 +649,8 @@ static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg, gfp_t gfp, long nr_pages); +int mem_cgroup_charge_hugetlb(struct folio* folio, gfp_t gfp); + int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm, gfp_t gfp, swp_entry_t entry); @@ -1169,6 +1171,11 @@ static inline int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg, return 0; } +static inline int mem_cgroup_charge_hugetlb(struct folio* folio, gfp_t gfp) +{ + return 0; +} + static inline int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm, gfp_t gfp, swp_entry_t entry) { diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 21de25546a25..1672bfd85b4d 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2981,21 +2981,13 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, struct hugepage_subpool *spool = subpool_vma(vma); struct hstate *h = hstate_vma(vma); struct folio *folio; - long map_chg, map_commit, nr_pages = pages_per_huge_page(h); + long map_chg, map_commit; long gbl_chg; - int memcg_charge_ret, ret, idx; + int ret, idx; struct hugetlb_cgroup *h_cg = NULL; - struct mem_cgroup *memcg; bool deferred_reserve; gfp_t gfp = htlb_alloc_mask(h) | __GFP_RETRY_MAYFAIL; - memcg = get_mem_cgroup_from_current(); - memcg_charge_ret = mem_cgroup_hugetlb_try_charge(memcg, gfp, nr_pages); - if (memcg_charge_ret == -ENOMEM) { - mem_cgroup_put(memcg); - return ERR_PTR(-ENOMEM); - } - idx = hstate_index(h); /* * Examine the region/reserve map to determine if the process @@ -3003,12 +2995,8 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, * code of zero indicates a reservation exists (no change). */ map_chg = gbl_chg = vma_needs_reservation(h, vma, addr); - if (map_chg < 0) { - if (!memcg_charge_ret) - mem_cgroup_cancel_charge(memcg, nr_pages); - mem_cgroup_put(memcg); + if (map_chg < 0) return ERR_PTR(-ENOMEM); - } /* * Processes that did not create the mapping will have no @@ -3106,10 +3094,18 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, } } - if (!memcg_charge_ret) - mem_cgroup_commit_charge(folio, memcg); + ret = mem_cgroup_charge_hugetlb(folio, gfp); + /* + * Unconditionally increment NR_HUGETLB here. If it turns out that + * mem_cgroup_charge_hugetlb failed, then immediately free the page and + * decrement NR_HUGETLB. + */ lruvec_stat_mod_folio(folio, NR_HUGETLB, pages_per_huge_page(h)); - mem_cgroup_put(memcg); + + if (ret == -ENOMEM) { + free_huge_folio(folio); + return ERR_PTR(-ENOMEM); + } return folio; @@ -3124,9 +3120,6 @@ out_subpool_put: hugepage_subpool_put_pages(spool, 1); out_end_reservation: vma_end_reservation(h, vma, addr); - if (!memcg_charge_ret) - mem_cgroup_cancel_charge(memcg, nr_pages); - mem_cgroup_put(memcg); return ERR_PTR(-ENOSPC); } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index b25eab9c933e..c903e260a830 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -4561,6 +4561,40 @@ int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg, gfp_t gfp, return 0; } +/** + * mem_cgroup_charge_hugetlb - charge the memcg for a hugetlb folio + * @folio: folio being charged + * @gfp: reclaim mode + * + * This function is called when allocating a huge page folio, after the page has + * already been obtained and charged to the appropriate hugetlb cgroup + * controller (if it is enabled). + * + * Returns ENOMEM if the memcg is already full. + * Returns 0 if either the charge was successful, or if we skip the charging. + */ +int mem_cgroup_charge_hugetlb(struct folio *folio, gfp_t gfp) +{ + struct mem_cgroup *memcg = get_mem_cgroup_from_current(); + int ret = 0; + + /* + * Even memcg does not account for hugetlb, we still want to update + * system-level stats via lruvec_stat_mod_folio. Return 0, and skip + * charging the memcg. + */ + if (mem_cgroup_disabled() || !memcg_accounts_hugetlb() || + !memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) + goto out; + + if (charge_memcg(folio, memcg, gfp)) + ret = -ENOMEM; + +out: + mem_cgroup_put(memcg); + return ret; +} + /** * mem_cgroup_swapin_charge_folio - Charge a newly allocated folio for swapin. * @folio: folio to charge. -- cgit v1.2.3 From 04f13d241b8b146b23038bffd907cb8278391d07 Mon Sep 17 00:00:00 2001 From: yangge Date: Sat, 11 Jan 2025 15:58:20 +0800 Subject: mm: replace free hugepage folios after migration My machine has 4 NUMA nodes, each equipped with 32GB of memory. I have configured each NUMA node with 16GB of CMA and 16GB of in-use hugetlb pages. The allocation of contiguous memory via cma_alloc() can fail probabilistically. When there are free hugetlb folios in the hugetlb pool, during the migration of in-use hugetlb folios, new folios are allocated from the free hugetlb pool. After the migration is completed, the old folios are released back to the free hugetlb pool instead of being returned to the buddy system. This can cause test_pages_isolated() check to fail, ultimately leading to the failure of cma_alloc(). Call trace: cma_alloc() __alloc_contig_migrate_range() // migrate in-use hugepage test_pages_isolated() __test_page_isolated_in_pageblock() PageBuddy(page) // check if the page is in buddy To address this issue, we introduce a function named replace_free_hugepage_folios(). This function will replace the hugepage in the free hugepage pool with a new one and release the old one to the buddy system. After the migration of in-use hugetlb pages is completed, we will invoke replace_free_hugepage_folios() to ensure that these hugepages are properly released to the buddy system. Following this step, when test_pages_isolated() is executed for inspection, it will successfully pass. Additionally, when alloc_contig_range() is used to migrate multiple in-use hugetlb pages, it can result in some in-use hugetlb pages being released back to the free hugetlb pool and subsequently being reallocated and used again. For example: [huge 0] [huge 1] To migrate huge 0, we obtain huge x from the pool. After the migration is completed, we return the now-freed huge 0 back to the pool. When it's time to migrate huge 1, we can simply reuse the now-freed huge 0 from the pool. As a result, when replace_free_hugepage_folios() is executed, it cannot release huge 0 back to the buddy system. To address this issue, we should prevent the reuse of isolated free hugepages during the migration process. Link: https://lkml.kernel.org/r/1734503588-16254-1-git-send-email-yangge1116@126.com Link: https://lkml.kernel.org/r/1736582300-11364-1-git-send-email-yangge1116@126.com Signed-off-by: yangge Cc: Baolin Wang Cc: Barry Song <21cnbao@gmail.com> Cc: David Hildenbrand Cc: SeongJae Park Signed-off-by: Andrew Morton --- include/linux/hugetlb.h | 7 +++++++ mm/hugetlb.c | 42 ++++++++++++++++++++++++++++++++++++++++++ mm/page_alloc.c | 12 +++++++++++- 3 files changed, 60 insertions(+), 1 deletion(-) (limited to 'mm/hugetlb.c') diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index ae4fe8615bb6..10faf42ca96a 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -681,6 +681,7 @@ struct huge_bootmem_page { }; int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list); +int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn); struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, unsigned long addr, int avoid_reserve); struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid, @@ -1059,6 +1060,12 @@ static inline int isolate_or_dissolve_huge_page(struct page *page, return -ENOMEM; } +static inline int replace_free_hugepage_folios(unsigned long start_pfn, + unsigned long end_pfn) +{ + return 0; +} + static inline struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, unsigned long addr, int avoid_reserve) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 1672bfd85b4d..312ed27b9721 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -48,6 +48,7 @@ #include #include "internal.h" #include "hugetlb_vmemmap.h" +#include int hugetlb_max_hstate __read_mostly; unsigned int default_hstate_idx; @@ -1336,6 +1337,9 @@ static struct folio *dequeue_hugetlb_folio_node_exact(struct hstate *h, if (folio_test_hwpoison(folio)) continue; + if (is_migrate_isolate_page(&folio->page)) + continue; + list_move(&folio->lru, &h->hugepage_activelist); folio_ref_unfreeze(folio, 1); folio_clear_hugetlb_freed(folio); @@ -2975,6 +2979,44 @@ int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list) return ret; } +/* + * replace_free_hugepage_folios - Replace free hugepage folios in a given pfn + * range with new folios. + * @start_pfn: start pfn of the given pfn range + * @end_pfn: end pfn of the given pfn range + * Returns 0 on success, otherwise negated error. + */ +int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn) +{ + struct hstate *h; + struct folio *folio; + int ret = 0; + + LIST_HEAD(isolate_list); + + while (start_pfn < end_pfn) { + folio = pfn_folio(start_pfn); + if (folio_test_hugetlb(folio)) { + h = folio_hstate(folio); + } else { + start_pfn++; + continue; + } + + if (!folio_ref_count(folio)) { + ret = alloc_and_dissolve_hugetlb_folio(h, folio, + &isolate_list); + if (ret) + break; + + putback_movable_pages(&isolate_list); + } + start_pfn++; + } + + return ret; +} + struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, unsigned long addr, int avoid_reserve) { diff --git a/mm/page_alloc.c b/mm/page_alloc.c index dde19db204b2..df5d617738e7 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -6504,7 +6504,17 @@ int alloc_contig_range_noprof(unsigned long start, unsigned long end, ret = __alloc_contig_migrate_range(&cc, start, end, migratetype); if (ret && ret != -EBUSY) goto done; - ret = 0; + + /* + * When in-use hugetlb pages are migrated, they may simply be released + * back into the free hugepage pool instead of being returned to the + * buddy system. After the migration of in-use huge pages is completed, + * we will invoke replace_free_hugepage_folios() to ensure that these + * hugepages are properly released to the buddy system. + */ + ret = replace_free_hugepage_folios(start, end); + if (ret) + goto done; /* * Pages from [start, end) are within a pageblock_nr_pages -- cgit v1.2.3 From 58db7c5fbe7daa42098d4965133a864f98ba90ba Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Tue, 7 Jan 2025 15:39:56 -0500 Subject: mm/hugetlb: fix avoid_reserve to allow taking folio from subpool Patch series "mm/hugetlb: Refactor hugetlb allocation resv accounting", v2. This is a follow up on Ackerley's series here as replacement: https://lore.kernel.org/r/cover.1728684491.git.ackerleytng@google.com The goal of this series is to cleanup hugetlb resv accounting, especially during folio allocation, to decouple a few things: - Hugetlb folios v.s. Hugetlbfs: IOW, the hope is in the future hugetlb folios can be allocated completely without hugetlbfs. - Decouple VMA v.s. hugetlb folio allocations: allocating a hugetlb folio should not always require a hugetlbfs VMA. For example, either it got allocated from the inode level (see hugetlbfs_fallocate() where it used a pesudo VMA for allocation), or it can be allocated by other kernel subsystems. It paves way for other users to allocate hugetlb folios out of either system reservations, or subpools (instead of hugetlbfs, as a file system). For longer term, this prepares hugetlb as a separate concept versus hugetlbfs, so that hugetlb folios can be allocated by not only hugetlbfs and other things. Tests I've done: - I had a reproducer in patch 1 for the bug I found, this will start to work after patch 1 or the whole set applied. - Hugetlb regression tests (on x86_64 2MBs), includes: - All vmtests on hugetlbfs - libhugetlbfs test suite (which may fail some tests, but no new failures will be introduced by this series, so all such failures happen before this series so shouldn't be relevant). This patch (of 7): Since commit 04f2cbe35699 ("hugetlb: guarantee that COW faults for a process that called mmap(MAP_PRIVATE) on hugetlbfs will succeed"), avoid_reserve was introduced for a special case of CoW on hugetlb private mappings, and only if the owner VMA is trying to allocate yet another hugetlb folio that is not reserved within the private vma reserved map. Later on, in commit d85f69b0b533 ("mm/hugetlb: alloc_huge_page handle areas hole punched by fallocate"), alloc_huge_page() enforced to not consume any global reservation as long as avoid_reserve=true. This operation doesn't look correct, because even if it will enforce the allocation to not use global reservation at all, it will still try to take one reservation from the spool (if the subpool existed). Then since the spool reserved pages take from global reservation, it'll also take one reservation globally. Logically it can cause global reservation to go wrong. I wrote a reproducer below, trigger this special path, and every run of such program will cause global reservation count to increment by one, until it hits the number of free pages: #define _GNU_SOURCE /* See feature_test_macros(7) */ #include #include #include #include #include #include #define MSIZE (2UL << 20) int main(int argc, char *argv[]) { const char *path; int *buf; int fd, ret; pid_t child; if (argc < 2) { printf("usage: %s \n", argv[0]); return -1; } path = argv[1]; fd = open(path, O_RDWR | O_CREAT, 0666); if (fd < 0) { perror("open failed"); return -1; } ret = fallocate(fd, 0, 0, MSIZE); if (ret != 0) { perror("fallocate"); return -1; } buf = mmap(NULL, MSIZE, PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0); if (buf == MAP_FAILED) { perror("mmap() failed"); return -1; } /* Allocate a page */ *buf = 1; child = fork(); if (child == 0) { /* child doesn't need to do anything */ exit(0); } /* Trigger CoW from owner */ *buf = 2; munmap(buf, MSIZE); close(fd); unlink(path); return 0; } It can only reproduce with a sub-mount when there're reserved pages on the spool, like: # sysctl vm.nr_hugepages=128 # mkdir ./hugetlb-pool # mount -t hugetlbfs -o min_size=8M,pagesize=2M none ./hugetlb-pool Then run the reproducer on the mountpoint: # ./reproducer ./hugetlb-pool/test Fix it by taking the reservation from spool if available. In general, avoid_reserve is IMHO more about "avoid vma resv map", not spool's. I copied stable, however I have no intention for backporting if it's not a clean cherry-pick, because private hugetlb mapping, and then fork() on top is too rare to hit. Link: https://lkml.kernel.org/r/20250107204002.2683356-1-peterx@redhat.com Link: https://lkml.kernel.org/r/20250107204002.2683356-2-peterx@redhat.com Fixes: d85f69b0b533 ("mm/hugetlb: alloc_huge_page handle areas hole punched by fallocate") Signed-off-by: Peter Xu Reviewed-by: Ackerley Tng Tested-by: Ackerley Tng Reviewed-by: Oscar Salvador Cc: Breno Leitao Cc: Muchun Song Cc: Naoya Horiguchi Cc: Rik van Riel Cc: Roman Gushchin Cc: Signed-off-by: Andrew Morton --- mm/hugetlb.c | 22 +++------------------- 1 file changed, 3 insertions(+), 19 deletions(-) (limited to 'mm/hugetlb.c') diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 312ed27b9721..a10d376cb1a8 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1398,8 +1398,7 @@ static unsigned long available_huge_pages(struct hstate *h) static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h, struct vm_area_struct *vma, - unsigned long address, int avoid_reserve, - long chg) + unsigned long address, long chg) { struct folio *folio = NULL; struct mempolicy *mpol; @@ -1415,10 +1414,6 @@ static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h, if (!vma_has_reserves(vma, chg) && !available_huge_pages(h)) goto err; - /* If reserves cannot be used, ensure enough pages are in the pool */ - if (avoid_reserve && !available_huge_pages(h)) - goto err; - gfp_mask = htlb_alloc_mask(h); nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask); @@ -1434,7 +1429,7 @@ static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h, folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, nid, nodemask); - if (folio && !avoid_reserve && vma_has_reserves(vma, chg)) { + if (folio && vma_has_reserves(vma, chg)) { folio_set_hugetlb_restore_reserve(folio); h->resv_huge_pages--; } @@ -3051,17 +3046,6 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, gbl_chg = hugepage_subpool_get_pages(spool, 1); if (gbl_chg < 0) goto out_end_reservation; - - /* - * Even though there was no reservation in the region/reserve - * map, there could be reservations associated with the - * subpool that can be used. This would be indicated if the - * return value of hugepage_subpool_get_pages() is zero. - * However, if avoid_reserve is specified we still avoid even - * the subpool reservations. - */ - if (avoid_reserve) - gbl_chg = 1; } /* If this allocation is not consuming a reservation, charge it now. @@ -3084,7 +3068,7 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, * from the global free pool (global change). gbl_chg == 0 indicates * a reservation exists for the allocation. */ - folio = dequeue_hugetlb_folio_vma(h, vma, addr, avoid_reserve, gbl_chg); + folio = dequeue_hugetlb_folio_vma(h, vma, addr, gbl_chg); if (!folio) { spin_unlock_irq(&hugetlb_lock); folio = alloc_buddy_hugetlb_folio_with_mpol(h, vma, addr); -- cgit v1.2.3 From be8d7314b18ede1c6cdebbb982c9659020e2f1f2 Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Tue, 7 Jan 2025 15:39:57 -0500 Subject: mm/hugetlb: stop using avoid_reserve flag in fork() When fork() and stumble on top of a dma-pinned hugetlb private page, CoW must happen during fork() to guarantee dma coherency. In this specific path, hugetlb pages need to be allocated for the child process. Stop using avoid_reserve=1 flag here: it's not required to be used here, as dest_vma (which is destined to be a MAP_PRIVATE hugetlb vma) will have no private vma resv map, and that will make sure it won't be able to use a vma reservation later. No functional change intended with this change. Said that, it's still wanted to do this, so as to reduce the usage of avoid_reserve to the only one user, which is also why this flag was introduced initially in commit 04f2cbe35699 ("hugetlb: guarantee that COW faults for a process that called mmap(MAP_PRIVATE) on hugetlbfs will succeed"). I don't see whoever else should set it at all. Further patch will clean up resv accounting based on this. Link: https://lkml.kernel.org/r/20250107204002.2683356-3-peterx@redhat.com Signed-off-by: Peter Xu Reviewed-by: Oscar Salvador Cc: Ackerley Tng Cc: Breno Leitao Cc: Muchun Song Cc: Naoya Horiguchi Cc: Rik van Riel Cc: Roman Gushchin Signed-off-by: Andrew Morton --- mm/hugetlb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm/hugetlb.c') diff --git a/mm/hugetlb.c b/mm/hugetlb.c index a10d376cb1a8..7df19b5f956c 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -5373,7 +5373,7 @@ again: spin_unlock(src_ptl); spin_unlock(dst_ptl); /* Do not use reserve as it's private owned */ - new_folio = alloc_hugetlb_folio(dst_vma, addr, 1); + new_folio = alloc_hugetlb_folio(dst_vma, addr, 0); if (IS_ERR(new_folio)) { folio_put(pte_folio); ret = PTR_ERR(new_folio); -- cgit v1.2.3 From 30cef82bc6e8975a360ec05b707f7fb194c875ed Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Tue, 7 Jan 2025 15:39:58 -0500 Subject: mm/hugetlb: rename avoid_reserve to cow_from_owner The old name "avoid_reserve" can be too generic and can be used wrongly in the new call sites that want to allocate a hugetlb folio. It's confusing on two things: (1) whether one can opt-in to avoid global reservation, and (2) whether it should take more than one count. In reality, this flag is only used in an extremely hacky path, in an extremely hacky way in hugetlb CoW path only, and always use with 1 saying "skip global reservation". Rename the flag to avoid future abuse of this flag, making it a boolean so as to reflect its true representation that it's not a counter. To make it even harder to abuse, add a comment above the function to explain it. Link: https://lkml.kernel.org/r/20250107204002.2683356-4-peterx@redhat.com Signed-off-by: Peter Xu Reviewed-by: Oscar Salvador Cc: Ackerley Tng Cc: Breno Leitao Cc: Muchun Song Cc: Naoya Horiguchi Cc: Rik van Riel Cc: Roman Gushchin Signed-off-by: Andrew Morton --- fs/hugetlbfs/inode.c | 2 +- include/linux/hugetlb.h | 4 ++-- mm/hugetlb.c | 33 ++++++++++++++++++++------------- 3 files changed, 23 insertions(+), 16 deletions(-) (limited to 'mm/hugetlb.c') diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 62fb0cbc93ab..0fc179a59830 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -814,7 +814,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset, * folios in these areas, we need to consume the reserves * to keep reservation accounting consistent. */ - folio = alloc_hugetlb_folio(&pseudo_vma, addr, 0); + folio = alloc_hugetlb_folio(&pseudo_vma, addr, false); if (IS_ERR(folio)) { mutex_unlock(&hugetlb_fault_mutex_table[hash]); error = PTR_ERR(folio); diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 10faf42ca96a..49ec2362ce92 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -683,7 +683,7 @@ struct huge_bootmem_page { int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list); int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn); struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, - unsigned long addr, int avoid_reserve); + unsigned long addr, bool cow_from_owner); struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid, nodemask_t *nmask, gfp_t gfp_mask, bool allow_alloc_fallback); @@ -1068,7 +1068,7 @@ static inline int replace_free_hugepage_folios(unsigned long start_pfn, static inline struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, unsigned long addr, - int avoid_reserve) + bool cow_from_owner) { return NULL; } diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 7df19b5f956c..e59b734b2c95 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3012,8 +3012,15 @@ int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn) return ret; } +/* + * NOTE! "cow_from_owner" represents a very hacky usage only used in CoW + * faults of hugetlb private mappings on top of a non-page-cache folio (in + * which case even if there's a private vma resv map it won't cover such + * allocation). New call sites should (probably) never set it to true!! + * When it's set, the allocation will bypass all vma level reservations. + */ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, - unsigned long addr, int avoid_reserve) + unsigned long addr, bool cow_from_owner) { struct hugepage_subpool *spool = subpool_vma(vma); struct hstate *h = hstate_vma(vma); @@ -3042,7 +3049,7 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, * Allocations for MAP_NORESERVE mappings also need to be * checked against any subpool limit. */ - if (map_chg || avoid_reserve) { + if (map_chg || cow_from_owner) { gbl_chg = hugepage_subpool_get_pages(spool, 1); if (gbl_chg < 0) goto out_end_reservation; @@ -3050,7 +3057,7 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, /* If this allocation is not consuming a reservation, charge it now. */ - deferred_reserve = map_chg || avoid_reserve; + deferred_reserve = map_chg || cow_from_owner; if (deferred_reserve) { ret = hugetlb_cgroup_charge_cgroup_rsvd( idx, pages_per_huge_page(h), &h_cg); @@ -3075,7 +3082,7 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, if (!folio) goto out_uncharge_cgroup; spin_lock_irq(&hugetlb_lock); - if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) { + if (!cow_from_owner && vma_has_reserves(vma, gbl_chg)) { folio_set_hugetlb_restore_reserve(folio); h->resv_huge_pages--; } @@ -3142,7 +3149,7 @@ out_uncharge_cgroup_reservation: hugetlb_cgroup_uncharge_cgroup_rsvd(idx, pages_per_huge_page(h), h_cg); out_subpool_put: - if (map_chg || avoid_reserve) + if (map_chg || cow_from_owner) hugepage_subpool_put_pages(spool, 1); out_end_reservation: vma_end_reservation(h, vma, addr); @@ -5373,7 +5380,7 @@ again: spin_unlock(src_ptl); spin_unlock(dst_ptl); /* Do not use reserve as it's private owned */ - new_folio = alloc_hugetlb_folio(dst_vma, addr, 0); + new_folio = alloc_hugetlb_folio(dst_vma, addr, false); if (IS_ERR(new_folio)) { folio_put(pte_folio); ret = PTR_ERR(new_folio); @@ -5839,7 +5846,7 @@ static vm_fault_t hugetlb_wp(struct folio *pagecache_folio, struct hstate *h = hstate_vma(vma); struct folio *old_folio; struct folio *new_folio; - int outside_reserve = 0; + bool cow_from_owner = 0; vm_fault_t ret = 0; struct mmu_notifier_range range; @@ -5902,7 +5909,7 @@ retry_avoidcopy: */ if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && old_folio != pagecache_folio) - outside_reserve = 1; + cow_from_owner = true; folio_get(old_folio); @@ -5911,7 +5918,7 @@ retry_avoidcopy: * be acquired again before returning to the caller, as expected. */ spin_unlock(vmf->ptl); - new_folio = alloc_hugetlb_folio(vma, vmf->address, outside_reserve); + new_folio = alloc_hugetlb_folio(vma, vmf->address, cow_from_owner); if (IS_ERR(new_folio)) { /* @@ -5921,7 +5928,7 @@ retry_avoidcopy: * reliability, unmap the page from child processes. The child * may get SIGKILLed if it later faults. */ - if (outside_reserve) { + if (cow_from_owner) { struct address_space *mapping = vma->vm_file->f_mapping; pgoff_t idx; u32 hash; @@ -6172,7 +6179,7 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping, goto out; } - folio = alloc_hugetlb_folio(vma, vmf->address, 0); + folio = alloc_hugetlb_folio(vma, vmf->address, false); if (IS_ERR(folio)) { /* * Returning error will result in faulting task being @@ -6638,7 +6645,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte, goto out; } - folio = alloc_hugetlb_folio(dst_vma, dst_addr, 0); + folio = alloc_hugetlb_folio(dst_vma, dst_addr, false); if (IS_ERR(folio)) { ret = -ENOMEM; goto out; @@ -6680,7 +6687,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte, goto out; } - folio = alloc_hugetlb_folio(dst_vma, dst_addr, 0); + folio = alloc_hugetlb_folio(dst_vma, dst_addr, false); if (IS_ERR(folio)) { folio_put(*foliop); ret = -ENOMEM; -- cgit v1.2.3 From 923682a0dd57065aef21bc297bfa6a9101c5da83 Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Tue, 7 Jan 2025 15:39:59 -0500 Subject: mm/hugetlb: clean up map/global resv accounting when allocate alloc_hugetlb_folio() isn't a function easy to read, especially on reservation accountings for either VMA or globally (majorly, spool only). The 1st complexity lies in the special private CoW path, aka, cow_from_owner=true case. The 2nd complexity may be the confusing updates of gbl_chg after it's set once, which looks like they can change anytime on the fly. Logically, cow_from_user is only about vma reservation. We could already decouple the flag and consolidate it into map charge flag very early. Then we don't need to keep checking the CoW special flag every time. This patch does it by making map_chg a tri-state flag. Tri-state needed is unfortunate, and it's because currently vma_needs_reservation() has a side effect internally, that it must be followed by either a end() or commit(). We keep the same semantic as before on one thing: "if (map_chg)" means we need a separate per-vma resv count. It keeps most of the old code like before untouched with the new enum. After this patch, we take these steps to decide these variables, hopefully slightly easier to follow: - First, decide map_chg. This will take cow_from_owner into account, once and for all. It's about whether we could take a resv count from the vma, no matter it's shared, private, etc. - Then, decide gbl_chg. The only diff here is spool, comparing to map_chg. Now only update each flag once and for all, instead of keep any of them flipping which can be very hard to follow. With cow_from_owner merged into map_chg, we could remove quite a few such checks all over. Side benefit of such is that we can get rid of one more confusing flag, which is deferred_reserve. Cleanup the comments a bit too. E.g., MAP_NORESERVE may not need to check against spool limit, AFAIU, if it's on a shared mapping, and if the page cache folio has its inode's resv map available (in which case map_chg would have been set zero, hence the code should be correct, not the comment). There's one trivial detail that needs attention that this patch touched, which is this check right after vma_commit_reservation(): if (map_chg > map_commit) It changes to: if (unlikely(map_chg == MAP_CHG_NEEDED && retval == 0)) It should behave the same like before, because previously the only way to make "map_chg > map_commit" happen is map_chg=1 && map_commit=0. That's exactly the rewritten line. Meanwhile, either commit() or end() will need to be skipped if ENFORCE, to keep the old behavior. Even though it looks a lot changed, but no functional change expected. Link: https://lkml.kernel.org/r/20250107204002.2683356-5-peterx@redhat.com Signed-off-by: Peter Xu Cc: Ackerley Tng Cc: Breno Leitao Cc: Muchun Song Cc: Naoya Horiguchi Cc: Oscar Salvador Cc: Rik van Riel Cc: Roman Gushchin Signed-off-by: Andrew Morton --- mm/hugetlb.c | 110 +++++++++++++++++++++++++++++++++++++++++------------------ 1 file changed, 77 insertions(+), 33 deletions(-) (limited to 'mm/hugetlb.c') diff --git a/mm/hugetlb.c b/mm/hugetlb.c index e59b734b2c95..bac6ee9ede4d 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3012,6 +3012,25 @@ int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn) return ret; } +typedef enum { + /* + * For either 0/1: we checked the per-vma resv map, and one resv + * count either can be reused (0), or an extra needed (1). + */ + MAP_CHG_REUSE = 0, + MAP_CHG_NEEDED = 1, + /* + * Cannot use per-vma resv count can be used, hence a new resv + * count is enforced. + * + * NOTE: This is mostly identical to MAP_CHG_NEEDED, except + * that currently vma_needs_reservation() has an unwanted side + * effect to either use end() or commit() to complete the + * transaction. Hence it needs to differenciate from NEEDED. + */ + MAP_CHG_ENFORCED = 2, +} map_chg_state; + /* * NOTE! "cow_from_owner" represents a very hacky usage only used in CoW * faults of hugetlb private mappings on top of a non-page-cache folio (in @@ -3025,40 +3044,59 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, struct hugepage_subpool *spool = subpool_vma(vma); struct hstate *h = hstate_vma(vma); struct folio *folio; - long map_chg, map_commit; - long gbl_chg; + long retval, gbl_chg; + map_chg_state map_chg; int ret, idx; struct hugetlb_cgroup *h_cg = NULL; - bool deferred_reserve; gfp_t gfp = htlb_alloc_mask(h) | __GFP_RETRY_MAYFAIL; idx = hstate_index(h); - /* - * Examine the region/reserve map to determine if the process - * has a reservation for the page to be allocated. A return - * code of zero indicates a reservation exists (no change). - */ - map_chg = gbl_chg = vma_needs_reservation(h, vma, addr); - if (map_chg < 0) - return ERR_PTR(-ENOMEM); + + /* Whether we need a separate per-vma reservation? */ + if (cow_from_owner) { + /* + * Special case! Since it's a CoW on top of a reserved + * page, the private resv map doesn't count. So it cannot + * consume the per-vma resv map even if it's reserved. + */ + map_chg = MAP_CHG_ENFORCED; + } else { + /* + * Examine the region/reserve map to determine if the process + * has a reservation for the page to be allocated. A return + * code of zero indicates a reservation exists (no change). + */ + retval = vma_needs_reservation(h, vma, addr); + if (retval < 0) + return ERR_PTR(-ENOMEM); + map_chg = retval ? MAP_CHG_NEEDED : MAP_CHG_REUSE; + } /* + * Whether we need a separate global reservation? + * * Processes that did not create the mapping will have no * reserves as indicated by the region/reserve map. Check * that the allocation will not exceed the subpool limit. - * Allocations for MAP_NORESERVE mappings also need to be - * checked against any subpool limit. + * Or if it can get one from the pool reservation directly. */ - if (map_chg || cow_from_owner) { + if (map_chg) { gbl_chg = hugepage_subpool_get_pages(spool, 1); if (gbl_chg < 0) goto out_end_reservation; + } else { + /* + * If we have the vma reservation ready, no need for extra + * global reservation. + */ + gbl_chg = 0; } - /* If this allocation is not consuming a reservation, charge it now. + /* + * If this allocation is not consuming a per-vma reservation, + * charge the hugetlb cgroup now. */ - deferred_reserve = map_chg || cow_from_owner; - if (deferred_reserve) { + if (map_chg) { ret = hugetlb_cgroup_charge_cgroup_rsvd( idx, pages_per_huge_page(h), &h_cg); if (ret) @@ -3082,7 +3120,7 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, if (!folio) goto out_uncharge_cgroup; spin_lock_irq(&hugetlb_lock); - if (!cow_from_owner && vma_has_reserves(vma, gbl_chg)) { + if (vma_has_reserves(vma, gbl_chg)) { folio_set_hugetlb_restore_reserve(folio); h->resv_huge_pages--; } @@ -3095,7 +3133,7 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, /* If allocation is not consuming a reservation, also store the * hugetlb_cgroup pointer on the page. */ - if (deferred_reserve) { + if (map_chg) { hugetlb_cgroup_commit_charge_rsvd(idx, pages_per_huge_page(h), h_cg, folio); } @@ -3104,26 +3142,31 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, hugetlb_set_folio_subpool(folio, spool); - map_commit = vma_commit_reservation(h, vma, addr); - if (unlikely(map_chg > map_commit)) { + if (map_chg != MAP_CHG_ENFORCED) { + /* commit() is only needed if the map_chg is not enforced */ + retval = vma_commit_reservation(h, vma, addr); /* + * Check for possible race conditions. When it happens.. * The page was added to the reservation map between * vma_needs_reservation and vma_commit_reservation. * This indicates a race with hugetlb_reserve_pages. * Adjust for the subpool count incremented above AND - * in hugetlb_reserve_pages for the same page. Also, + * in hugetlb_reserve_pages for the same page. Also, * the reservation count added in hugetlb_reserve_pages * no longer applies. */ - long rsv_adjust; + if (unlikely(map_chg == MAP_CHG_NEEDED && retval == 0)) { + long rsv_adjust; - rsv_adjust = hugepage_subpool_put_pages(spool, 1); - hugetlb_acct_memory(h, -rsv_adjust); - if (deferred_reserve) { - spin_lock_irq(&hugetlb_lock); - hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h), - pages_per_huge_page(h), folio); - spin_unlock_irq(&hugetlb_lock); + rsv_adjust = hugepage_subpool_put_pages(spool, 1); + hugetlb_acct_memory(h, -rsv_adjust); + if (map_chg) { + spin_lock_irq(&hugetlb_lock); + hugetlb_cgroup_uncharge_folio_rsvd( + hstate_index(h), pages_per_huge_page(h), + folio); + spin_unlock_irq(&hugetlb_lock); + } } } @@ -3145,14 +3188,15 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, out_uncharge_cgroup: hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg); out_uncharge_cgroup_reservation: - if (deferred_reserve) + if (map_chg) hugetlb_cgroup_uncharge_cgroup_rsvd(idx, pages_per_huge_page(h), h_cg); out_subpool_put: - if (map_chg || cow_from_owner) + if (map_chg) hugepage_subpool_put_pages(spool, 1); out_end_reservation: - vma_end_reservation(h, vma, addr); + if (map_chg != MAP_CHG_ENFORCED) + vma_end_reservation(h, vma, addr); return ERR_PTR(-ENOSPC); } -- cgit v1.2.3 From 51e1de00acdb75585c46654147c7c7eb0689a068 Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Tue, 7 Jan 2025 15:40:00 -0500 Subject: mm/hugetlb: simplify vma_has_reserves() vma_has_reserves() is a helper "trying" to know whether the vma should consume one reservation when allocating the hugetlb folio. However it's not clear on why we need such complexity, as such information is already represented in the "chg" variable. From alloc_hugetlb_folio() context, "chg" (or in the function's context, "gbl_chg") is defined as: - If gbl_chg=1, the allocation cannot reuse an existing reservation - If gbl_chg=0, the allocation should reuse an existing reservation Firstly, map_chg is defined as following, to cover all cases of hugetlb reservation scenarios (mostly, via vma_needs_reservation(), but cow_from_owner is an outlier): CONDITION HAS RESERVATION? ========= ================ - SHARED: always check against per-inode resv_map (ignore NONRESERVE) - If resv exists ==> YES [1] - If not ==> NO [2] - PRIVATE: complicated... - Request came from a CoW from owner resv map ==> NO [3] (when cow_from_owner==true) - If does not own a resv_map at all.. ==> NO [4] (examples: VM_NORESERVE, private fork()) - If owns a resv_map, but resv donsn't exists ==> NO [5] - If owns a resv_map, and resv exists ==> YES [6] Further on, gbl_chg considered spool setup, so that is a decision based on all the context. If we look at vma_has_reserves(), it almost does check that has already been processed by map_chg accounting (I marked each return value to the case above): static bool vma_has_reserves(struct vm_area_struct *vma, long chg) { if (vma->vm_flags & VM_NORESERVE) { if (vma->vm_flags & VM_MAYSHARE && chg == 0) return true; ==> [1] else return false; ==> [2] or [4] } if (vma->vm_flags & VM_MAYSHARE) { if (chg) return false; ==> [2] else return true; ==> [1] } if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { if (chg) return false; ==> [5] else return true; ==> [6] } return false; ==> [4] } It didn't check [3], but [3] case was actually already covered now by the "chg" / "gbl_chg" / "map_chg" calculations. In short, vma_has_reserves() doesn't provide anything more than return "!chg".. so just simplify all the things. There're a lot of comments describing truncation races, IIUC there should have no race as long as map_chg is properly done. Link: https://lkml.kernel.org/r/20250107204002.2683356-6-peterx@redhat.com Signed-off-by: Peter Xu Cc: Ackerley Tng Cc: Breno Leitao Cc: Muchun Song Cc: Naoya Horiguchi Cc: Oscar Salvador Cc: Rik van Riel Cc: Roman Gushchin Signed-off-by: Andrew Morton --- mm/hugetlb.c | 67 +++++++----------------------------------------------------- 1 file changed, 7 insertions(+), 60 deletions(-) (limited to 'mm/hugetlb.c') diff --git a/mm/hugetlb.c b/mm/hugetlb.c index bac6ee9ede4d..efd8e9f9bf0e 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1248,66 +1248,13 @@ void clear_vma_resv_huge_pages(struct vm_area_struct *vma) } /* Returns true if the VMA has associated reserve pages */ -static bool vma_has_reserves(struct vm_area_struct *vma, long chg) +static bool vma_has_reserves(long chg) { - if (vma->vm_flags & VM_NORESERVE) { - /* - * This address is already reserved by other process(chg == 0), - * so, we should decrement reserved count. Without decrementing, - * reserve count remains after releasing inode, because this - * allocated page will go into page cache and is regarded as - * coming from reserved pool in releasing step. Currently, we - * don't have any other solution to deal with this situation - * properly, so add work-around here. - */ - if (vma->vm_flags & VM_MAYSHARE && chg == 0) - return true; - else - return false; - } - - /* Shared mappings always use reserves */ - if (vma->vm_flags & VM_MAYSHARE) { - /* - * We know VM_NORESERVE is not set. Therefore, there SHOULD - * be a region map for all pages. The only situation where - * there is no region map is if a hole was punched via - * fallocate. In this case, there really are no reserves to - * use. This situation is indicated if chg != 0. - */ - if (chg) - return false; - else - return true; - } - /* - * Only the process that called mmap() has reserves for - * private mappings. + * Now "chg" has all the conditions considered for whether we + * should use an existing reservation. */ - if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { - /* - * Like the shared case above, a hole punch or truncate - * could have been performed on the private mapping. - * Examine the value of chg to determine if reserves - * actually exist or were previously consumed. - * Very Subtle - The value of chg comes from a previous - * call to vma_needs_reserves(). The reserve map for - * private mappings has different (opposite) semantics - * than that of shared mappings. vma_needs_reserves() - * has already taken this difference in semantics into - * account. Therefore, the meaning of chg is the same - * as in the shared case above. Code could easily be - * combined, but keeping it separate draws attention to - * subtle differences. - */ - if (chg) - return false; - else - return true; - } - - return false; + return chg == 0; } static void enqueue_hugetlb_folio(struct hstate *h, struct folio *folio) @@ -1411,7 +1358,7 @@ static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h, * have no page reserves. This check ensures that reservations are * not "stolen". The child may still get SIGKILLed */ - if (!vma_has_reserves(vma, chg) && !available_huge_pages(h)) + if (!vma_has_reserves(chg) && !available_huge_pages(h)) goto err; gfp_mask = htlb_alloc_mask(h); @@ -1429,7 +1376,7 @@ static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h, folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, nid, nodemask); - if (folio && vma_has_reserves(vma, chg)) { + if (folio && vma_has_reserves(chg)) { folio_set_hugetlb_restore_reserve(folio); h->resv_huge_pages--; } @@ -3120,7 +3067,7 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, if (!folio) goto out_uncharge_cgroup; spin_lock_irq(&hugetlb_lock); - if (vma_has_reserves(vma, gbl_chg)) { + if (vma_has_reserves(gbl_chg)) { folio_set_hugetlb_restore_reserve(folio); h->resv_huge_pages--; } -- cgit v1.2.3 From 72d8f72631d278c7003defbe3fe88fe5332822f8 Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Tue, 7 Jan 2025 15:40:01 -0500 Subject: mm/hugetlb: drop vma_has_reserves() After the previous cleanup, vma_has_reserves() is mostly an empty helper except that it says "use reserve count" is inverted meaning from "needs a global reserve count", which is still true. To avoid confusions on having two inverted ways to ask the same question, always use the gbl_chg everywhere, and drop the function. When at it, rename "chg" to "gbl_chg" in dequeue_hugetlb_folio_vma(). It might be helpful for readers to see that the "chg" here is the global reserve count, not the vma resv count. Link: https://lkml.kernel.org/r/20250107204002.2683356-7-peterx@redhat.com Signed-off-by: Peter Xu Cc: Ackerley Tng Cc: Breno Leitao Cc: Muchun Song Cc: Naoya Horiguchi Cc: Oscar Salvador Cc: Rik van Riel Cc: Roman Gushchin Signed-off-by: Andrew Morton --- mm/hugetlb.c | 23 ++++++----------------- 1 file changed, 6 insertions(+), 17 deletions(-) (limited to 'mm/hugetlb.c') diff --git a/mm/hugetlb.c b/mm/hugetlb.c index efd8e9f9bf0e..8e46798a9dfc 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1247,16 +1247,6 @@ void clear_vma_resv_huge_pages(struct vm_area_struct *vma) hugetlb_dup_vma_private(vma); } -/* Returns true if the VMA has associated reserve pages */ -static bool vma_has_reserves(long chg) -{ - /* - * Now "chg" has all the conditions considered for whether we - * should use an existing reservation. - */ - return chg == 0; -} - static void enqueue_hugetlb_folio(struct hstate *h, struct folio *folio) { int nid = folio_nid(folio); @@ -1345,7 +1335,7 @@ static unsigned long available_huge_pages(struct hstate *h) static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h, struct vm_area_struct *vma, - unsigned long address, long chg) + unsigned long address, long gbl_chg) { struct folio *folio = NULL; struct mempolicy *mpol; @@ -1354,11 +1344,10 @@ static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h, int nid; /* - * A child process with MAP_PRIVATE mappings created by their parent - * have no page reserves. This check ensures that reservations are - * not "stolen". The child may still get SIGKILLed + * gbl_chg==1 means the allocation requires a new page that was not + * reserved before. Making sure there's at least one free page. */ - if (!vma_has_reserves(chg) && !available_huge_pages(h)) + if (gbl_chg && !available_huge_pages(h)) goto err; gfp_mask = htlb_alloc_mask(h); @@ -1376,7 +1365,7 @@ static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h, folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, nid, nodemask); - if (folio && vma_has_reserves(chg)) { + if (folio && !gbl_chg) { folio_set_hugetlb_restore_reserve(folio); h->resv_huge_pages--; } @@ -3067,7 +3056,7 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, if (!folio) goto out_uncharge_cgroup; spin_lock_irq(&hugetlb_lock); - if (vma_has_reserves(gbl_chg)) { + if (!gbl_chg) { folio_set_hugetlb_restore_reserve(folio); h->resv_huge_pages--; } -- cgit v1.2.3 From f931af2e41ab406e7fc3063ba5a73b536779006e Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Tue, 7 Jan 2025 15:40:02 -0500 Subject: mm/hugetlb: unify restore reserve accounting for new allocations Either hugetlb pages dequeued from hstate, or newly allocated from buddy, would require restore-reserve accounting to be managed properly. Merge the two paths on it. Add a small comment to make it slightly nicer. Link: https://lkml.kernel.org/r/20250107204002.2683356-8-peterx@redhat.com Signed-off-by: Peter Xu Cc: Ackerley Tng Cc: Breno Leitao Cc: Muchun Song Cc: Naoya Horiguchi Cc: Oscar Salvador Cc: Rik van Riel Cc: Roman Gushchin Signed-off-by: Andrew Morton --- mm/hugetlb.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'mm/hugetlb.c') diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 8e46798a9dfc..58c2c5498207 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1365,11 +1365,6 @@ static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h, folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, nid, nodemask); - if (folio && !gbl_chg) { - folio_set_hugetlb_restore_reserve(folio); - h->resv_huge_pages--; - } - mpol_cond_put(mpol); return folio; @@ -3056,15 +3051,20 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, if (!folio) goto out_uncharge_cgroup; spin_lock_irq(&hugetlb_lock); - if (!gbl_chg) { - folio_set_hugetlb_restore_reserve(folio); - h->resv_huge_pages--; - } list_add(&folio->lru, &h->hugepage_activelist); folio_ref_unfreeze(folio, 1); /* Fall through */ } + /* + * Either dequeued or buddy-allocated folio needs to add special + * mark to the folio when it consumes a global reservation. + */ + if (!gbl_chg) { + folio_set_hugetlb_restore_reserve(folio); + h->resv_huge_pages--; + } + hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, folio); /* If allocation is not consuming a reservation, also store the * hugetlb_cgroup pointer on the page. -- cgit v1.2.3 From 4c640f128074e0d4459ecf072595a44df5c2ae18 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 13 Jan 2025 14:16:07 +0100 Subject: mm/hugetlb: rename isolate_hugetlb() to folio_isolate_hugetlb() Let's make the function name match "folio_isolate_lru()", and add some kernel doc. Link: https://lkml.kernel.org/r/20250113131611.2554758-3-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Matthew Wilcox (Oracle) Reviewed-by: Baolin Wang Cc: Muchun Song Cc: Sidhartha Kumar Signed-off-by: Andrew Morton --- include/linux/hugetlb.h | 4 ++-- mm/gup.c | 2 +- mm/hugetlb.c | 23 ++++++++++++++++++++--- mm/mempolicy.c | 2 +- mm/migrate.c | 6 +++--- 5 files changed, 27 insertions(+), 10 deletions(-) (limited to 'mm/hugetlb.c') diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 49ec2362ce92..c95ad5cd7894 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -153,7 +153,7 @@ bool hugetlb_reserve_pages(struct inode *inode, long from, long to, vm_flags_t vm_flags); long hugetlb_unreserve_pages(struct inode *inode, long start, long end, long freed); -bool isolate_hugetlb(struct folio *folio, struct list_head *list); +bool folio_isolate_hugetlb(struct folio *folio, struct list_head *list); int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison); int get_huge_page_for_hwpoison(unsigned long pfn, int flags, bool *migratable_cleared); @@ -414,7 +414,7 @@ static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, return NULL; } -static inline bool isolate_hugetlb(struct folio *folio, struct list_head *list) +static inline bool folio_isolate_hugetlb(struct folio *folio, struct list_head *list) { return false; } diff --git a/mm/gup.c b/mm/gup.c index 00a1269cbee0..2cc3a9d28e70 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -2344,7 +2344,7 @@ static unsigned long collect_longterm_unpinnable_folios( continue; if (folio_test_hugetlb(folio)) { - isolate_hugetlb(folio, movable_folio_list); + folio_isolate_hugetlb(folio, movable_folio_list); continue; } diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 58c2c5498207..15a689964265 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2812,7 +2812,7 @@ retry: * Fail with -EBUSY if not possible. */ spin_unlock_irq(&hugetlb_lock); - isolated = isolate_hugetlb(old_folio, list); + isolated = folio_isolate_hugetlb(old_folio, list); ret = isolated ? 0 : -EBUSY; spin_lock_irq(&hugetlb_lock); goto free_new; @@ -2897,7 +2897,7 @@ int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list) if (hstate_is_gigantic(h)) return -ENOMEM; - if (folio_ref_count(folio) && isolate_hugetlb(folio, list)) + if (folio_ref_count(folio) && folio_isolate_hugetlb(folio, list)) ret = 0; else if (!folio_ref_count(folio)) ret = alloc_and_dissolve_hugetlb_folio(h, folio, list); @@ -7421,7 +7421,24 @@ __weak unsigned long hugetlb_mask_last_page(struct hstate *h) #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */ -bool isolate_hugetlb(struct folio *folio, struct list_head *list) +/** + * folio_isolate_hugetlb - try to isolate an allocated hugetlb folio + * @folio: the folio to isolate + * @list: the list to add the folio to on success + * + * Isolate an allocated (refcount > 0) hugetlb folio, marking it as + * isolated/non-migratable, and moving it from the active list to the + * given list. + * + * Isolation will fail if @folio is not an allocated hugetlb folio, or if + * it is already isolated/non-migratable. + * + * On success, an additional folio reference is taken that must be dropped + * using folio_putback_active_hugetlb() to undo the isolation. + * + * Return: True if isolation worked, otherwise False. + */ +bool folio_isolate_hugetlb(struct folio *folio, struct list_head *list) { bool ret = true; diff --git a/mm/mempolicy.c b/mm/mempolicy.c index f83b73236ffe..bbaadbeeb291 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -647,7 +647,7 @@ static int queue_folios_hugetlb(pte_t *pte, unsigned long hmask, */ if ((flags & MPOL_MF_MOVE_ALL) || (!folio_likely_mapped_shared(folio) && !hugetlb_pmd_shared(pte))) - if (!isolate_hugetlb(folio, qp->pagelist)) + if (!folio_isolate_hugetlb(folio, qp->pagelist)) qp->nr_failed++; unlock: spin_unlock(ptl); diff --git a/mm/migrate.c b/mm/migrate.c index 32cc8e0b1cce..c3052877e844 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -128,7 +128,7 @@ static void putback_movable_folio(struct folio *folio) * * This function shall be used whenever the isolated pageset has been * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range() - * and isolate_hugetlb(). + * and folio_isolate_hugetlb(). */ void putback_movable_pages(struct list_head *l) { @@ -169,7 +169,7 @@ bool isolate_folio_to_list(struct folio *folio, struct list_head *list) bool isolated, lru; if (folio_test_hugetlb(folio)) - return isolate_hugetlb(folio, list); + return folio_isolate_hugetlb(folio, list); lru = !__folio_test_movable(folio); if (lru) @@ -2200,7 +2200,7 @@ static int __add_folio_for_migration(struct folio *folio, int node, return -EACCES; if (folio_test_hugetlb(folio)) { - if (isolate_hugetlb(folio, pagelist)) + if (folio_isolate_hugetlb(folio, pagelist)) return 1; } else if (folio_isolate_lru(folio)) { list_add_tail(&folio->lru, pagelist); -- cgit v1.2.3 From ba23f58de896842028b4b33b95530f08288396fe Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 13 Jan 2025 14:16:08 +0100 Subject: mm/migrate: don't call folio_putback_active_hugetlb() on dst hugetlb folio We replaced a simple put_page() by a putback_active_hugepage() call in commit 3aaa76e125c1 ("mm: migrate: hugetlb: putback destination hugepage to active list"), to set the "active" flag on the dst hugetlb folio. Nowadays, we decoupled the "active" list from the flag, by calling the flag "migratable". Calling "putback" on something that wasn't allocated is weird and not future proof, especially if we might reach that path when migration failed and we just want to free the freshly allocated hugetlb folio. Let's simply handle the migratable flag and the active list flag in move_hugetlb_state(), where we know that allocation succeeded and already handle the temporary flag; use a simple folio_put() to return our reference. Link: https://lkml.kernel.org/r/20250113131611.2554758-4-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Baolin Wang Cc: Matthew Wilcox (Oracle) Cc: Muchun Song Cc: Sidhartha Kumar Signed-off-by: Andrew Morton --- mm/hugetlb.c | 10 ++++++++++ mm/migrate.c | 8 ++++---- 2 files changed, 14 insertions(+), 4 deletions(-) (limited to 'mm/hugetlb.c') diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 15a689964265..4f95031eeeea 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -7533,6 +7533,16 @@ void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int re } spin_unlock_irq(&hugetlb_lock); } + + /* + * Our old folio is isolated and has "migratable" cleared until it + * is putback. As migration succeeded, set the new folio "migratable" + * and add it to the active list. + */ + spin_lock_irq(&hugetlb_lock); + folio_set_hugetlb_migratable(new_folio); + list_move_tail(&new_folio->lru, &(folio_hstate(new_folio))->hugepage_activelist); + spin_unlock_irq(&hugetlb_lock); } static void hugetlb_unshare_pmds(struct vm_area_struct *vma, diff --git a/mm/migrate.c b/mm/migrate.c index c3052877e844..2a6da67a0eda 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1539,14 +1539,14 @@ out: list_move_tail(&src->lru, ret); /* - * If migration was not successful and there's a freeing callback, use - * it. Otherwise, put_page() will drop the reference grabbed during - * isolation. + * If migration was not successful and there's a freeing callback, + * return the folio to that special allocator. Otherwise, simply drop + * our additional reference. */ if (put_new_folio) put_new_folio(dst, private); else - folio_putback_active_hugetlb(dst); + folio_put(dst); return rc; } -- cgit v1.2.3 From b235448e8cab7eea17d164efc7bf55505985ba65 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 13 Jan 2025 14:16:09 +0100 Subject: mm/hugetlb: rename folio_putback_active_hugetlb() to folio_putback_hugetlb() Now that folio_putback_hugetlb() is only called on folios that were previously isolated through folio_isolate_hugetlb(), let's rename it to match folio_putback_lru(). Add some kernel doc to clarify how this function is supposed to be used. Link: https://lkml.kernel.org/r/20250113131611.2554758-5-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Baolin Wang Cc: Matthew Wilcox (Oracle) Cc: Muchun Song Cc: Sidhartha Kumar Signed-off-by: Andrew Morton --- include/linux/hugetlb.h | 4 ++-- mm/hugetlb.c | 15 +++++++++++++-- mm/migrate.c | 6 +++--- 3 files changed, 18 insertions(+), 7 deletions(-) (limited to 'mm/hugetlb.c') diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index c95ad5cd7894..ec8c0ccc8f95 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -157,7 +157,7 @@ bool folio_isolate_hugetlb(struct folio *folio, struct list_head *list); int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison); int get_huge_page_for_hwpoison(unsigned long pfn, int flags, bool *migratable_cleared); -void folio_putback_active_hugetlb(struct folio *folio); +void folio_putback_hugetlb(struct folio *folio); void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason); void hugetlb_fix_reserve_counts(struct inode *inode); extern struct mutex *hugetlb_fault_mutex_table; @@ -430,7 +430,7 @@ static inline int get_huge_page_for_hwpoison(unsigned long pfn, int flags, return 0; } -static inline void folio_putback_active_hugetlb(struct folio *folio) +static inline void folio_putback_hugetlb(struct folio *folio) { } diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 4f95031eeeea..308b0e3876b5 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -7434,7 +7434,7 @@ __weak unsigned long hugetlb_mask_last_page(struct hstate *h) * it is already isolated/non-migratable. * * On success, an additional folio reference is taken that must be dropped - * using folio_putback_active_hugetlb() to undo the isolation. + * using folio_putback_hugetlb() to undo the isolation. * * Return: True if isolation worked, otherwise False. */ @@ -7486,7 +7486,18 @@ int get_huge_page_for_hwpoison(unsigned long pfn, int flags, return ret; } -void folio_putback_active_hugetlb(struct folio *folio) +/** + * folio_putback_hugetlb - unisolate a hugetlb folio + * @folio: the isolated hugetlb folio + * + * Putback/un-isolate the hugetlb folio that was previous isolated using + * folio_isolate_hugetlb(): marking it non-isolated/migratable and putting it + * back onto the active list. + * + * Will drop the additional folio reference obtained through + * folio_isolate_hugetlb(). + */ +void folio_putback_hugetlb(struct folio *folio) { spin_lock_irq(&hugetlb_lock); folio_set_hugetlb_migratable(folio); diff --git a/mm/migrate.c b/mm/migrate.c index 2a6da67a0eda..61587121ccb2 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -137,7 +137,7 @@ void putback_movable_pages(struct list_head *l) list_for_each_entry_safe(folio, folio2, l, lru) { if (unlikely(folio_test_hugetlb(folio))) { - folio_putback_active_hugetlb(folio); + folio_putback_hugetlb(folio); continue; } list_del(&folio->lru); @@ -1451,7 +1451,7 @@ static int unmap_and_move_huge_page(new_folio_t get_new_folio, if (folio_ref_count(src) == 1) { /* page was freed from under us. So we are done. */ - folio_putback_active_hugetlb(src); + folio_putback_hugetlb(src); return MIGRATEPAGE_SUCCESS; } @@ -1534,7 +1534,7 @@ out_unlock: folio_unlock(src); out: if (rc == MIGRATEPAGE_SUCCESS) - folio_putback_active_hugetlb(src); + folio_putback_hugetlb(src); else if (rc != -EAGAIN) list_move_tail(&src->lru, ret); -- cgit v1.2.3 From 89a41a0263293856678189981e5407375261c4ff Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 13 Jan 2025 14:16:11 +0100 Subject: mm/hugetlb: use folio->lru int demote_free_hugetlb_folios() We are demoting hugetlb folios to smaller hugetlb folios; let's avoid messing with pages where avoidable and handle it more similar to __split_huge_page_tail(). Link: https://lkml.kernel.org/r/20250113131611.2554758-7-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Sidhartha Kumar Cc: Baolin Wang Cc: Matthew Wilcox (Oracle) Cc: Muchun Song Signed-off-by: Andrew Morton --- mm/hugetlb.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'mm/hugetlb.c') diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 308b0e3876b5..87761b042ed0 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3826,13 +3826,15 @@ static long demote_free_hugetlb_folios(struct hstate *src, struct hstate *dst, for (i = 0; i < pages_per_huge_page(src); i += pages_per_huge_page(dst)) { struct page *page = folio_page(folio, i); + /* Careful: see __split_huge_page_tail() */ + struct folio *new_folio = (struct folio *)page; - page->mapping = NULL; clear_compound_head(page); prep_compound_page(page, dst->order); - init_new_hugetlb_folio(dst, page_folio(page)); - list_add(&page->lru, &dst_list); + new_folio->mapping = NULL; + init_new_hugetlb_folio(dst, new_folio); + list_add(&new_folio->lru, &dst_list); } } -- cgit v1.2.3 From 1751f872cc97f992ed5c4c72c55588db1f0021e1 Mon Sep 17 00:00:00 2001 From: Joel Granados Date: Tue, 28 Jan 2025 13:48:37 +0100 Subject: treewide: const qualify ctl_tables where applicable Add the const qualifier to all the ctl_tables in the tree except for watchdog_hardlockup_sysctl, memory_allocation_profiling_sysctls, loadpin_sysctl_table and the ones calling register_net_sysctl (./net, drivers/inifiniband dirs). These are special cases as they use a registration function with a non-const qualified ctl_table argument or modify the arrays before passing them on to the registration function. Constifying ctl_table structs will prevent the modification of proc_handler function pointers as the arrays would reside in .rodata. This is made possible after commit 78eb4ea25cd5 ("sysctl: treewide: constify the ctl_table argument of proc_handlers") constified all the proc_handlers. Created this by running an spatch followed by a sed command: Spatch: virtual patch @ depends on !(file in "net") disable optional_qualifier @ identifier table_name != { watchdog_hardlockup_sysctl, iwcm_ctl_table, ucma_ctl_table, memory_allocation_profiling_sysctls, loadpin_sysctl_table }; @@ + const struct ctl_table table_name [] = { ... }; sed: sed --in-place \ -e "s/struct ctl_table .table = &uts_kern/const struct ctl_table *table = \&uts_kern/" \ kernel/utsname_sysctl.c Reviewed-by: Song Liu Acked-by: Steven Rostedt (Google) # for kernel/trace/ Reviewed-by: Martin K. Petersen # SCSI Reviewed-by: Darrick J. Wong # xfs Acked-by: Jani Nikula Acked-by: Corey Minyard Acked-by: Wei Liu Acked-by: Thomas Gleixner Reviewed-by: Bill O'Donnell Acked-by: Baoquan He Acked-by: Ashutosh Dixit Acked-by: Anna Schumaker Signed-off-by: Joel Granados --- arch/arm/kernel/isa.c | 2 +- arch/arm64/kernel/fpsimd.c | 4 ++-- arch/arm64/kernel/process.c | 2 +- arch/powerpc/kernel/idle.c | 2 +- arch/powerpc/platforms/pseries/mobility.c | 2 +- arch/riscv/kernel/process.c | 2 +- arch/riscv/kernel/vector.c | 2 +- arch/s390/appldata/appldata_base.c | 2 +- arch/s390/kernel/debug.c | 2 +- arch/s390/kernel/hiperdispatch.c | 2 +- arch/s390/kernel/topology.c | 2 +- arch/s390/mm/cmm.c | 2 +- arch/s390/mm/pgalloc.c | 2 +- arch/x86/entry/vdso/vdso32-setup.c | 2 +- arch/x86/kernel/cpu/bus_lock.c | 2 +- crypto/fips.c | 2 +- drivers/base/firmware_loader/fallback_table.c | 2 +- drivers/cdrom/cdrom.c | 2 +- drivers/char/hpet.c | 2 +- drivers/char/ipmi/ipmi_poweroff.c | 2 +- drivers/char/random.c | 2 +- drivers/gpu/drm/i915/i915_perf.c | 2 +- drivers/gpu/drm/xe/xe_observation.c | 2 +- drivers/hv/hv_common.c | 2 +- drivers/md/md.c | 2 +- drivers/misc/sgi-xp/xpc_main.c | 4 ++-- drivers/perf/arm_pmuv3.c | 2 +- drivers/perf/riscv_pmu_sbi.c | 2 +- drivers/scsi/scsi_sysctl.c | 2 +- drivers/scsi/sg.c | 2 +- drivers/tty/tty_io.c | 2 +- drivers/xen/balloon.c | 2 +- fs/aio.c | 2 +- fs/cachefiles/error_inject.c | 2 +- fs/coda/sysctl.c | 2 +- fs/coredump.c | 2 +- fs/dcache.c | 2 +- fs/devpts/inode.c | 2 +- fs/eventpoll.c | 2 +- fs/exec.c | 2 +- fs/file_table.c | 2 +- fs/fuse/sysctl.c | 2 +- fs/inode.c | 2 +- fs/lockd/svc.c | 2 +- fs/locks.c | 2 +- fs/namei.c | 2 +- fs/namespace.c | 2 +- fs/nfs/nfs4sysctl.c | 2 +- fs/nfs/sysctl.c | 2 +- fs/notify/dnotify/dnotify.c | 2 +- fs/notify/fanotify/fanotify_user.c | 2 +- fs/notify/inotify/inotify_user.c | 2 +- fs/ocfs2/stackglue.c | 2 +- fs/pipe.c | 2 +- fs/quota/dquot.c | 2 +- fs/sysctls.c | 2 +- fs/userfaultfd.c | 2 +- fs/verity/init.c | 2 +- fs/xfs/xfs_sysctl.c | 2 +- init/do_mounts_initrd.c | 2 +- io_uring/io_uring.c | 2 +- ipc/ipc_sysctl.c | 2 +- ipc/mq_sysctl.c | 2 +- kernel/acct.c | 2 +- kernel/bpf/syscall.c | 2 +- kernel/delayacct.c | 2 +- kernel/exit.c | 2 +- kernel/hung_task.c | 2 +- kernel/kexec_core.c | 2 +- kernel/kprobes.c | 2 +- kernel/latencytop.c | 2 +- kernel/locking/lockdep.c | 2 +- kernel/panic.c | 2 +- kernel/pid.c | 2 +- kernel/pid_namespace.c | 2 +- kernel/pid_sysctl.h | 2 +- kernel/printk/sysctl.c | 2 +- kernel/reboot.c | 2 +- kernel/sched/autogroup.c | 2 +- kernel/sched/core.c | 2 +- kernel/sched/deadline.c | 2 +- kernel/sched/fair.c | 2 +- kernel/sched/rt.c | 2 +- kernel/sched/topology.c | 2 +- kernel/seccomp.c | 2 +- kernel/signal.c | 2 +- kernel/stackleak.c | 2 +- kernel/sysctl-test.c | 6 +++--- kernel/sysctl.c | 4 ++-- kernel/time/timer.c | 2 +- kernel/trace/ftrace.c | 2 +- kernel/trace/trace_events_user.c | 2 +- kernel/umh.c | 2 +- kernel/utsname_sysctl.c | 4 ++-- kernel/watchdog.c | 2 +- lib/test_sysctl.c | 6 +++--- mm/compaction.c | 2 +- mm/hugetlb.c | 2 +- mm/hugetlb_vmemmap.c | 2 +- mm/memory-failure.c | 2 +- mm/oom_kill.c | 2 +- mm/page-writeback.c | 2 +- mm/page_alloc.c | 2 +- security/apparmor/lsm.c | 2 +- security/keys/sysctl.c | 2 +- security/yama/yama_lsm.c | 2 +- 106 files changed, 114 insertions(+), 114 deletions(-) (limited to 'mm/hugetlb.c') diff --git a/arch/arm/kernel/isa.c b/arch/arm/kernel/isa.c index 905b1b191546..db8be609fab2 100644 --- a/arch/arm/kernel/isa.c +++ b/arch/arm/kernel/isa.c @@ -16,7 +16,7 @@ static unsigned int isa_membase, isa_portbase, isa_portshift; -static struct ctl_table ctl_isa_vars[] = { +static const struct ctl_table ctl_isa_vars[] = { { .procname = "membase", .data = &isa_membase, diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 8c4c1a2186cc..2b601d88762d 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -562,7 +562,7 @@ static int vec_proc_do_default_vl(const struct ctl_table *table, int write, return 0; } -static struct ctl_table sve_default_vl_table[] = { +static const struct ctl_table sve_default_vl_table[] = { { .procname = "sve_default_vector_length", .mode = 0644, @@ -585,7 +585,7 @@ static int __init sve_sysctl_init(void) { return 0; } #endif /* ! (CONFIG_ARM64_SVE && CONFIG_SYSCTL) */ #if defined(CONFIG_ARM64_SME) && defined(CONFIG_SYSCTL) -static struct ctl_table sme_default_vl_table[] = { +static const struct ctl_table sme_default_vl_table[] = { { .procname = "sme_default_vector_length", .mode = 0644, diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 2968a33bb3bc..42faebb7b712 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -859,7 +859,7 @@ long get_tagged_addr_ctrl(struct task_struct *task) * disable it for tasks that already opted in to the relaxed ABI. */ -static struct ctl_table tagged_addr_sysctl_table[] = { +static const struct ctl_table tagged_addr_sysctl_table[] = { { .procname = "tagged_addr_disabled", .mode = 0644, diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c index 30b56c67fa61..e527cd3ef128 100644 --- a/arch/powerpc/kernel/idle.c +++ b/arch/powerpc/kernel/idle.c @@ -97,7 +97,7 @@ void power4_idle(void) /* * Register the sysctl to set/clear powersave_nap. */ -static struct ctl_table powersave_nap_ctl_table[] = { +static const struct ctl_table powersave_nap_ctl_table[] = { { .procname = "powersave-nap", .data = &powersave_nap, diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c index 1798f0f14d58..62bd8e2d5d4c 100644 --- a/arch/powerpc/platforms/pseries/mobility.c +++ b/arch/powerpc/platforms/pseries/mobility.c @@ -53,7 +53,7 @@ struct update_props_workarea { static unsigned int nmi_wd_lpm_factor = 200; #ifdef CONFIG_SYSCTL -static struct ctl_table nmi_wd_lpm_factor_ctl_table[] = { +static const struct ctl_table nmi_wd_lpm_factor_ctl_table[] = { { .procname = "nmi_wd_lpm_factor", .data = &nmi_wd_lpm_factor, diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c index 58b6482c2bf6..7891294abf49 100644 --- a/arch/riscv/kernel/process.c +++ b/arch/riscv/kernel/process.c @@ -364,7 +364,7 @@ static bool try_to_set_pmm(unsigned long value) * disable it for tasks that already opted in to the relaxed ABI. */ -static struct ctl_table tagged_addr_sysctl_table[] = { +static const struct ctl_table tagged_addr_sysctl_table[] = { { .procname = "tagged_addr_disabled", .mode = 0644, diff --git a/arch/riscv/kernel/vector.c b/arch/riscv/kernel/vector.c index 821818886fab..d022b028ac3f 100644 --- a/arch/riscv/kernel/vector.c +++ b/arch/riscv/kernel/vector.c @@ -287,7 +287,7 @@ long riscv_v_vstate_ctrl_set_current(unsigned long arg) #ifdef CONFIG_SYSCTL -static struct ctl_table riscv_v_default_vstate_table[] = { +static const struct ctl_table riscv_v_default_vstate_table[] = { { .procname = "riscv_v_default_allow", .data = &riscv_v_implicit_uacc, diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c index 91a30e017d65..dd7ba7587dd5 100644 --- a/arch/s390/appldata/appldata_base.c +++ b/arch/s390/appldata/appldata_base.c @@ -52,7 +52,7 @@ static int appldata_interval_handler(const struct ctl_table *ctl, int write, void *buffer, size_t *lenp, loff_t *ppos); static struct ctl_table_header *appldata_sysctl_header; -static struct ctl_table appldata_table[] = { +static const struct ctl_table appldata_table[] = { { .procname = "timer", .mode = S_IRUGO | S_IWUSR, diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c index ba6b7329a10e..ce038e9205f7 100644 --- a/arch/s390/kernel/debug.c +++ b/arch/s390/kernel/debug.c @@ -1122,7 +1122,7 @@ static int s390dbf_procactive(const struct ctl_table *table, int write, return 0; } -static struct ctl_table s390dbf_table[] = { +static const struct ctl_table s390dbf_table[] = { { .procname = "debug_stoppable", .data = &debug_stoppable, diff --git a/arch/s390/kernel/hiperdispatch.c b/arch/s390/kernel/hiperdispatch.c index 2a99a216ab62..7857a7e8e56c 100644 --- a/arch/s390/kernel/hiperdispatch.c +++ b/arch/s390/kernel/hiperdispatch.c @@ -292,7 +292,7 @@ static int hiperdispatch_ctl_handler(const struct ctl_table *ctl, int write, return 0; } -static struct ctl_table hiperdispatch_ctl_table[] = { +static const struct ctl_table hiperdispatch_ctl_table[] = { { .procname = "hiperdispatch", .mode = 0644, diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index 3808f942a433..211cc8382e4a 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c @@ -667,7 +667,7 @@ static int polarization_ctl_handler(const struct ctl_table *ctl, int write, return set_polarization(polarization); } -static struct ctl_table topology_ctl_table[] = { +static const struct ctl_table topology_ctl_table[] = { { .procname = "topology", .mode = 0644, diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c index 7bf0f691827b..39f44b6256e0 100644 --- a/arch/s390/mm/cmm.c +++ b/arch/s390/mm/cmm.c @@ -332,7 +332,7 @@ static int cmm_timeout_handler(const struct ctl_table *ctl, int write, return 0; } -static struct ctl_table cmm_table[] = { +static const struct ctl_table cmm_table[] = { { .procname = "cmm_pages", .mode = 0644, diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c index d33f55b7ee98..cd2fef79ad2c 100644 --- a/arch/s390/mm/pgalloc.c +++ b/arch/s390/mm/pgalloc.c @@ -21,7 +21,7 @@ int page_table_allocate_pgste = 0; EXPORT_SYMBOL(page_table_allocate_pgste); -static struct ctl_table page_table_sysctl[] = { +static const struct ctl_table page_table_sysctl[] = { { .procname = "allocate_pgste", .data = &page_table_allocate_pgste, diff --git a/arch/x86/entry/vdso/vdso32-setup.c b/arch/x86/entry/vdso/vdso32-setup.c index 76e4e74f35b5..f6d2d8aba643 100644 --- a/arch/x86/entry/vdso/vdso32-setup.c +++ b/arch/x86/entry/vdso/vdso32-setup.c @@ -57,7 +57,7 @@ __setup_param("vdso=", vdso_setup, vdso32_setup, 0); /* Register vsyscall32 into the ABI table */ #include -static struct ctl_table abi_table2[] = { +static const struct ctl_table abi_table2[] = { { .procname = "vsyscall32", .data = &vdso32_enabled, diff --git a/arch/x86/kernel/cpu/bus_lock.c b/arch/x86/kernel/cpu/bus_lock.c index 704e9241b964..6cba85c79d42 100644 --- a/arch/x86/kernel/cpu/bus_lock.c +++ b/arch/x86/kernel/cpu/bus_lock.c @@ -49,7 +49,7 @@ static unsigned int sysctl_sld_mitigate = 1; static DEFINE_SEMAPHORE(buslock_sem, 1); #ifdef CONFIG_PROC_SYSCTL -static struct ctl_table sld_sysctls[] = { +static const struct ctl_table sld_sysctls[] = { { .procname = "split_lock_mitigate", .data = &sysctl_sld_mitigate, diff --git a/crypto/fips.c b/crypto/fips.c index a58e7750f532..2fa3a9ee61a1 100644 --- a/crypto/fips.c +++ b/crypto/fips.c @@ -41,7 +41,7 @@ __setup("fips=", fips_enable); static char fips_name[] = FIPS_MODULE_NAME; static char fips_version[] = FIPS_MODULE_VERSION; -static struct ctl_table crypto_sysctl_table[] = { +static const struct ctl_table crypto_sysctl_table[] = { { .procname = "fips_enabled", .data = &fips_enabled, diff --git a/drivers/base/firmware_loader/fallback_table.c b/drivers/base/firmware_loader/fallback_table.c index ddb70e29eb42..c8afc501a8a4 100644 --- a/drivers/base/firmware_loader/fallback_table.c +++ b/drivers/base/firmware_loader/fallback_table.c @@ -25,7 +25,7 @@ struct firmware_fallback_config fw_fallback_config = { EXPORT_SYMBOL_NS_GPL(fw_fallback_config, "FIRMWARE_LOADER_PRIVATE"); #ifdef CONFIG_SYSCTL -static struct ctl_table firmware_config_table[] = { +static const struct ctl_table firmware_config_table[] = { { .procname = "force_sysfs_fallback", .data = &fw_fallback_config.force_sysfs_fallback, diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index 51745ed1bbab..b163e043c687 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c @@ -3612,7 +3612,7 @@ static int cdrom_sysctl_handler(const struct ctl_table *ctl, int write, } /* Place files in /proc/sys/dev/cdrom */ -static struct ctl_table cdrom_table[] = { +static const struct ctl_table cdrom_table[] = { { .procname = "info", .data = &cdrom_sysctl_settings.info, diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c index 48fe96ab4649..e110857824fc 100644 --- a/drivers/char/hpet.c +++ b/drivers/char/hpet.c @@ -724,7 +724,7 @@ static int hpet_is_known(struct hpet_data *hdp) return 0; } -static struct ctl_table hpet_table[] = { +static const struct ctl_table hpet_table[] = { { .procname = "max-user-freq", .data = &hpet_max_freq, diff --git a/drivers/char/ipmi/ipmi_poweroff.c b/drivers/char/ipmi/ipmi_poweroff.c index 05f17e3e6207..e63c316d8aaa 100644 --- a/drivers/char/ipmi/ipmi_poweroff.c +++ b/drivers/char/ipmi/ipmi_poweroff.c @@ -650,7 +650,7 @@ static struct ipmi_smi_watcher smi_watcher = { #ifdef CONFIG_PROC_FS #include -static struct ctl_table ipmi_table[] = { +static const struct ctl_table ipmi_table[] = { { .procname = "poweroff_powercycle", .data = &poweroff_powercycle, .maxlen = sizeof(poweroff_powercycle), diff --git a/drivers/char/random.c b/drivers/char/random.c index 23ee76bbb4aa..2581186fa61b 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -1665,7 +1665,7 @@ static int proc_do_rointvec(const struct ctl_table *table, int write, void *buf, return write ? 0 : proc_dointvec(table, 0, buf, lenp, ppos); } -static struct ctl_table random_table[] = { +static const struct ctl_table random_table[] = { { .procname = "poolsize", .data = &sysctl_poolsize, diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 2406cda75b7b..5384d1bb4923 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -4802,7 +4802,7 @@ err_unlock: return ret; } -static struct ctl_table oa_table[] = { +static const struct ctl_table oa_table[] = { { .procname = "perf_stream_paranoid", .data = &i915_perf_stream_paranoid, diff --git a/drivers/gpu/drm/xe/xe_observation.c b/drivers/gpu/drm/xe/xe_observation.c index 8ec1b84cbb9e..57cf01efc07f 100644 --- a/drivers/gpu/drm/xe/xe_observation.c +++ b/drivers/gpu/drm/xe/xe_observation.c @@ -56,7 +56,7 @@ int xe_observation_ioctl(struct drm_device *dev, void *data, struct drm_file *fi } } -static struct ctl_table observation_ctl_table[] = { +static const struct ctl_table observation_ctl_table[] = { { .procname = "observation_paranoid", .data = &xe_observation_paranoid, diff --git a/drivers/hv/hv_common.c b/drivers/hv/hv_common.c index af5d1dc451f6..f2e6f55d6ca6 100644 --- a/drivers/hv/hv_common.c +++ b/drivers/hv/hv_common.c @@ -141,7 +141,7 @@ static int sysctl_record_panic_msg = 1; * sysctl option to allow the user to control whether kmsg data should be * reported to Hyper-V on panic. */ -static struct ctl_table hv_ctl_table[] = { +static const struct ctl_table hv_ctl_table[] = { { .procname = "hyperv_record_panic_msg", .data = &sysctl_record_panic_msg, diff --git a/drivers/md/md.c b/drivers/md/md.c index 866015b681af..22f7bd3b94d5 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -294,7 +294,7 @@ void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev) static struct ctl_table_header *raid_table_header; -static struct ctl_table raid_table[] = { +static const struct ctl_table raid_table[] = { { .procname = "speed_limit_min", .data = &sysctl_speed_limit_min, diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c index 61b66e318488..7a3c34306de9 100644 --- a/drivers/misc/sgi-xp/xpc_main.c +++ b/drivers/misc/sgi-xp/xpc_main.c @@ -93,7 +93,7 @@ int xpc_disengage_timelimit = XPC_DISENGAGE_DEFAULT_TIMELIMIT; static int xpc_disengage_min_timelimit; /* = 0 */ static int xpc_disengage_max_timelimit = 120; -static struct ctl_table xpc_sys_xpc_hb[] = { +static const struct ctl_table xpc_sys_xpc_hb[] = { { .procname = "hb_interval", .data = &xpc_hb_interval, @@ -111,7 +111,7 @@ static struct ctl_table xpc_sys_xpc_hb[] = { .extra1 = &xpc_hb_check_min_interval, .extra2 = &xpc_hb_check_max_interval}, }; -static struct ctl_table xpc_sys_xpc[] = { +static const struct ctl_table xpc_sys_xpc[] = { { .procname = "disengage_timelimit", .data = &xpc_disengage_timelimit, diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c index b5cc11abc962..0e360feb3432 100644 --- a/drivers/perf/arm_pmuv3.c +++ b/drivers/perf/arm_pmuv3.c @@ -1279,7 +1279,7 @@ static int armv8pmu_proc_user_access_handler(const struct ctl_table *table, int return 0; } -static struct ctl_table armv8_pmu_sysctl_table[] = { +static const struct ctl_table armv8_pmu_sysctl_table[] = { { .procname = "perf_user_access", .data = &sysctl_perf_user_access, diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c index 194c153e5d71..698de8ddf895 100644 --- a/drivers/perf/riscv_pmu_sbi.c +++ b/drivers/perf/riscv_pmu_sbi.c @@ -1317,7 +1317,7 @@ static int riscv_pmu_proc_user_access_handler(const struct ctl_table *table, return 0; } -static struct ctl_table sbi_pmu_sysctl_table[] = { +static const struct ctl_table sbi_pmu_sysctl_table[] = { { .procname = "perf_user_access", .data = &sysctl_perf_user_access, diff --git a/drivers/scsi/scsi_sysctl.c b/drivers/scsi/scsi_sysctl.c index 093774d77534..be4aef0f4f99 100644 --- a/drivers/scsi/scsi_sysctl.c +++ b/drivers/scsi/scsi_sysctl.c @@ -12,7 +12,7 @@ #include "scsi_priv.h" -static struct ctl_table scsi_table[] = { +static const struct ctl_table scsi_table[] = { { .procname = "logging_level", .data = &scsi_logging_level, .maxlen = sizeof(scsi_logging_level), diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 94127868bedf..effb7e768165 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -1639,7 +1639,7 @@ MODULE_PARM_DESC(allow_dio, "allow direct I/O (default: 0 (disallow))"); #ifdef CONFIG_SYSCTL #include -static struct ctl_table sg_sysctls[] = { +static const struct ctl_table sg_sysctls[] = { { .procname = "sg-big-buff", .data = &sg_big_buff, diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index 50c0c23ae678..449dbd216460 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c @@ -3617,7 +3617,7 @@ void console_sysfs_notify(void) sysfs_notify(&consdev->kobj, NULL, "active"); } -static struct ctl_table tty_table[] = { +static const struct ctl_table tty_table[] = { { .procname = "legacy_tiocsti", .data = &tty_legacy_tiocsti, diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index 528395133b4f..163f7f1d70f1 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -84,7 +84,7 @@ module_param(balloon_boot_timeout, uint, 0444); #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG static int xen_hotplug_unpopulated; -static struct ctl_table balloon_table[] = { +static const struct ctl_table balloon_table[] = { { .procname = "hotplug_unpopulated", .data = &xen_hotplug_unpopulated, diff --git a/fs/aio.c b/fs/aio.c index 50671640b588..7b976b564cfc 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -224,7 +224,7 @@ static unsigned long aio_nr; /* current system wide number of aio requests */ static unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */ /*----end sysctl variables---*/ #ifdef CONFIG_SYSCTL -static struct ctl_table aio_sysctls[] = { +static const struct ctl_table aio_sysctls[] = { { .procname = "aio-nr", .data = &aio_nr, diff --git a/fs/cachefiles/error_inject.c b/fs/cachefiles/error_inject.c index 1715d5ca2b2d..e341ade47dd8 100644 --- a/fs/cachefiles/error_inject.c +++ b/fs/cachefiles/error_inject.c @@ -11,7 +11,7 @@ unsigned int cachefiles_error_injection_state; static struct ctl_table_header *cachefiles_sysctl; -static struct ctl_table cachefiles_sysctls[] = { +static const struct ctl_table cachefiles_sysctls[] = { { .procname = "error_injection", .data = &cachefiles_error_injection_state, diff --git a/fs/coda/sysctl.c b/fs/coda/sysctl.c index 9f2d5743e2c8..0df46f09b6cc 100644 --- a/fs/coda/sysctl.c +++ b/fs/coda/sysctl.c @@ -14,7 +14,7 @@ static struct ctl_table_header *fs_table_header; -static struct ctl_table coda_table[] = { +static const struct ctl_table coda_table[] = { { .procname = "timeout", .data = &coda_timeout, diff --git a/fs/coredump.c b/fs/coredump.c index d48edb37bc35..591700e1b2ce 100644 --- a/fs/coredump.c +++ b/fs/coredump.c @@ -995,7 +995,7 @@ static int proc_dostring_coredump(const struct ctl_table *table, int write, static const unsigned int core_file_note_size_min = CORE_FILE_NOTE_SIZE_DEFAULT; static const unsigned int core_file_note_size_max = CORE_FILE_NOTE_SIZE_MAX; -static struct ctl_table coredump_sysctls[] = { +static const struct ctl_table coredump_sysctls[] = { { .procname = "core_uses_pid", .data = &core_uses_pid, diff --git a/fs/dcache.c b/fs/dcache.c index 1a01d7a6a7a9..1cd929f17eec 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -192,7 +192,7 @@ static int proc_nr_dentry(const struct ctl_table *table, int write, void *buffer return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); } -static struct ctl_table fs_dcache_sysctls[] = { +static const struct ctl_table fs_dcache_sysctls[] = { { .procname = "dentry-state", .data = &dentry_stat, diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c index b20e565b9c5e..1096ff8562fa 100644 --- a/fs/devpts/inode.c +++ b/fs/devpts/inode.c @@ -45,7 +45,7 @@ static int pty_limit_min; static int pty_limit_max = INT_MAX; static atomic_t pty_count = ATOMIC_INIT(0); -static struct ctl_table pty_table[] = { +static const struct ctl_table pty_table[] = { { .procname = "max", .maxlen = sizeof(int), diff --git a/fs/eventpoll.c b/fs/eventpoll.c index f9898e60dd8b..7c0980db77b3 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -318,7 +318,7 @@ static void unlist_file(struct epitems_head *head) static long long_zero; static long long_max = LONG_MAX; -static struct ctl_table epoll_table[] = { +static const struct ctl_table epoll_table[] = { { .procname = "max_user_watches", .data = &max_user_watches, diff --git a/fs/exec.c b/fs/exec.c index a49839174472..506cd411f4ac 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -2159,7 +2159,7 @@ static int proc_dointvec_minmax_coredump(const struct ctl_table *table, int writ return error; } -static struct ctl_table fs_exec_sysctls[] = { +static const struct ctl_table fs_exec_sysctls[] = { { .procname = "suid_dumpable", .data = &suid_dumpable, diff --git a/fs/file_table.c b/fs/file_table.c index a32171d2b83f..7f7c378c6e31 100644 --- a/fs/file_table.c +++ b/fs/file_table.c @@ -106,7 +106,7 @@ static int proc_nr_files(const struct ctl_table *table, int write, void *buffer, return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); } -static struct ctl_table fs_stat_sysctls[] = { +static const struct ctl_table fs_stat_sysctls[] = { { .procname = "file-nr", .data = &files_stat, diff --git a/fs/fuse/sysctl.c b/fs/fuse/sysctl.c index b272bb333005..63fb1e5bee30 100644 --- a/fs/fuse/sysctl.c +++ b/fs/fuse/sysctl.c @@ -13,7 +13,7 @@ static struct ctl_table_header *fuse_table_header; /* Bound by fuse_init_out max_pages, which is a u16 */ static unsigned int sysctl_fuse_max_pages_limit = 65535; -static struct ctl_table fuse_sysctl_table[] = { +static const struct ctl_table fuse_sysctl_table[] = { { .procname = "max_pages_limit", .data = &fuse_max_pages_limit, diff --git a/fs/inode.c b/fs/inode.c index 6b4c77268fc0..5587aabdaa5e 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -184,7 +184,7 @@ static int proc_nr_inodes(const struct ctl_table *table, int write, void *buffer return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); } -static struct ctl_table inodes_sysctls[] = { +static const struct ctl_table inodes_sysctls[] = { { .procname = "inode-nr", .data = &inodes_stat, diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c index 7ded57ec3a60..2c8eedc6c2cc 100644 --- a/fs/lockd/svc.c +++ b/fs/lockd/svc.c @@ -412,7 +412,7 @@ EXPORT_SYMBOL_GPL(lockd_down); * Sysctl parameters (same as module parameters, different interface). */ -static struct ctl_table nlm_sysctls[] = { +static const struct ctl_table nlm_sysctls[] = { { .procname = "nlm_grace_period", .data = &nlm_grace_period, diff --git a/fs/locks.c b/fs/locks.c index 25afc8d9c9d1..1619cddfa7a4 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -97,7 +97,7 @@ static int leases_enable = 1; static int lease_break_time = 45; #ifdef CONFIG_SYSCTL -static struct ctl_table locks_sysctls[] = { +static const struct ctl_table locks_sysctls[] = { { .procname = "leases-enable", .data = &leases_enable, diff --git a/fs/namei.c b/fs/namei.c index e56c29a22d26..8c82afddd2ad 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1099,7 +1099,7 @@ static int sysctl_protected_fifos __read_mostly; static int sysctl_protected_regular __read_mostly; #ifdef CONFIG_SYSCTL -static struct ctl_table namei_sysctls[] = { +static const struct ctl_table namei_sysctls[] = { { .procname = "protected_symlinks", .data = &sysctl_protected_symlinks, diff --git a/fs/namespace.c b/fs/namespace.c index 4013fbac354a..a3ed3f2980cb 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -5985,7 +5985,7 @@ const struct proc_ns_operations mntns_operations = { }; #ifdef CONFIG_SYSCTL -static struct ctl_table fs_namespace_sysctls[] = { +static const struct ctl_table fs_namespace_sysctls[] = { { .procname = "mount-max", .data = &sysctl_mount_max, diff --git a/fs/nfs/nfs4sysctl.c b/fs/nfs/nfs4sysctl.c index 886a7c4c60b3..d1a92d8f8ba4 100644 --- a/fs/nfs/nfs4sysctl.c +++ b/fs/nfs/nfs4sysctl.c @@ -17,7 +17,7 @@ static const int nfs_set_port_min; static const int nfs_set_port_max = 65535; static struct ctl_table_header *nfs4_callback_sysctl_table; -static struct ctl_table nfs4_cb_sysctls[] = { +static const struct ctl_table nfs4_cb_sysctls[] = { { .procname = "nfs_callback_tcpport", .data = &nfs_callback_set_tcpport, diff --git a/fs/nfs/sysctl.c b/fs/nfs/sysctl.c index e645be1a3381..f579df0e8d67 100644 --- a/fs/nfs/sysctl.c +++ b/fs/nfs/sysctl.c @@ -14,7 +14,7 @@ static struct ctl_table_header *nfs_callback_sysctl_table; -static struct ctl_table nfs_cb_sysctls[] = { +static const struct ctl_table nfs_cb_sysctls[] = { { .procname = "nfs_mountpoint_timeout", .data = &nfs_mountpoint_expiry_timeout, diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c index 6004dfdfdf0f..c4cdaf5fa7ed 100644 --- a/fs/notify/dnotify/dnotify.c +++ b/fs/notify/dnotify/dnotify.c @@ -20,7 +20,7 @@ static int dir_notify_enable __read_mostly = 1; #ifdef CONFIG_SYSCTL -static struct ctl_table dnotify_sysctls[] = { +static const struct ctl_table dnotify_sysctls[] = { { .procname = "dir-notify-enable", .data = &dir_notify_enable, diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c index 6ff94e312232..ba3e2d09eb44 100644 --- a/fs/notify/fanotify/fanotify_user.c +++ b/fs/notify/fanotify/fanotify_user.c @@ -58,7 +58,7 @@ static int fanotify_max_queued_events __read_mostly; static long ft_zero = 0; static long ft_int_max = INT_MAX; -static struct ctl_table fanotify_table[] = { +static const struct ctl_table fanotify_table[] = { { .procname = "max_user_groups", .data = &init_user_ns.ucount_max[UCOUNT_FANOTIFY_GROUPS], diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c index e0c48956608a..b372fb2c56bd 100644 --- a/fs/notify/inotify/inotify_user.c +++ b/fs/notify/inotify/inotify_user.c @@ -58,7 +58,7 @@ struct kmem_cache *inotify_inode_mark_cachep __ro_after_init; static long it_zero = 0; static long it_int_max = INT_MAX; -static struct ctl_table inotify_table[] = { +static const struct ctl_table inotify_table[] = { { .procname = "max_user_instances", .data = &init_user_ns.ucount_max[UCOUNT_INOTIFY_INSTANCES], diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c index 20aa37b67cfb..ddd761cf44c8 100644 --- a/fs/ocfs2/stackglue.c +++ b/fs/ocfs2/stackglue.c @@ -650,7 +650,7 @@ error: * and easier to preserve the name. */ -static struct ctl_table ocfs2_nm_table[] = { +static const struct ctl_table ocfs2_nm_table[] = { { .procname = "hb_ctl_path", .data = ocfs2_hb_ctl_path, diff --git a/fs/pipe.c b/fs/pipe.c index 82fede0f2111..94b59045ab44 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -1478,7 +1478,7 @@ static int proc_dopipe_max_size(const struct ctl_table *table, int write, do_proc_dopipe_max_size_conv, NULL); } -static struct ctl_table fs_pipe_sysctls[] = { +static const struct ctl_table fs_pipe_sysctls[] = { { .procname = "pipe-max-size", .data = &pipe_max_size, diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index f9578918cfb2..825c5c2e0962 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -2926,7 +2926,7 @@ static int do_proc_dqstats(const struct ctl_table *table, int write, return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); } -static struct ctl_table fs_dqstats_table[] = { +static const struct ctl_table fs_dqstats_table[] = { { .procname = "lookups", .data = &dqstats.stat[DQST_LOOKUPS], diff --git a/fs/sysctls.c b/fs/sysctls.c index 8dbde9a802fa..ad429dffeb4b 100644 --- a/fs/sysctls.c +++ b/fs/sysctls.c @@ -7,7 +7,7 @@ #include #include -static struct ctl_table fs_shared_sysctls[] = { +static const struct ctl_table fs_shared_sysctls[] = { { .procname = "overflowuid", .data = &fs_overflowuid, diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index 7c0bd0b55f88..97c4d71115d8 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -36,7 +36,7 @@ static int sysctl_unprivileged_userfaultfd __read_mostly; #ifdef CONFIG_SYSCTL -static struct ctl_table vm_userfaultfd_table[] = { +static const struct ctl_table vm_userfaultfd_table[] = { { .procname = "unprivileged_userfaultfd", .data = &sysctl_unprivileged_userfaultfd, diff --git a/fs/verity/init.c b/fs/verity/init.c index f440f0e61e3e..6e8d33b50240 100644 --- a/fs/verity/init.c +++ b/fs/verity/init.c @@ -10,7 +10,7 @@ #include #ifdef CONFIG_SYSCTL -static struct ctl_table fsverity_sysctl_table[] = { +static const struct ctl_table fsverity_sysctl_table[] = { #ifdef CONFIG_FS_VERITY_BUILTIN_SIGNATURES { .procname = "require_signatures", diff --git a/fs/xfs/xfs_sysctl.c b/fs/xfs/xfs_sysctl.c index c84df23b494d..751dc74a3067 100644 --- a/fs/xfs/xfs_sysctl.c +++ b/fs/xfs/xfs_sysctl.c @@ -66,7 +66,7 @@ xfs_deprecated_dointvec_minmax( return proc_dointvec_minmax(ctl, write, buffer, lenp, ppos); } -static struct ctl_table xfs_table[] = { +static const struct ctl_table xfs_table[] = { { .procname = "irix_sgid_inherit", .data = &xfs_params.sgid_inherit.val, diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c index f86ef92a6c46..f6867bad0d78 100644 --- a/init/do_mounts_initrd.c +++ b/init/do_mounts_initrd.c @@ -21,7 +21,7 @@ phys_addr_t phys_initrd_start __initdata; unsigned long phys_initrd_size __initdata; #ifdef CONFIG_SYSCTL -static struct ctl_table kern_do_mounts_initrd_table[] = { +static const struct ctl_table kern_do_mounts_initrd_table[] = { { .procname = "real-root-dev", .data = &real_root_dev, diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 7bfbc7c22367..5a0f8a5041d6 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -157,7 +157,7 @@ static int __read_mostly sysctl_io_uring_disabled; static int __read_mostly sysctl_io_uring_group = -1; #ifdef CONFIG_SYSCTL -static struct ctl_table kernel_io_uring_disabled_table[] = { +static const struct ctl_table kernel_io_uring_disabled_table[] = { { .procname = "io_uring_disabled", .data = &sysctl_io_uring_disabled, diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c index 54318e0b4557..15b17e86e198 100644 --- a/ipc/ipc_sysctl.c +++ b/ipc/ipc_sysctl.c @@ -73,7 +73,7 @@ int ipc_mni = IPCMNI; int ipc_mni_shift = IPCMNI_SHIFT; int ipc_min_cycle = RADIX_TREE_MAP_SIZE; -static struct ctl_table ipc_sysctls[] = { +static const struct ctl_table ipc_sysctls[] = { { .procname = "shmmax", .data = &init_ipc_ns.shm_ctlmax, diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c index b70dc2ff22d8..0dd12e1c9f53 100644 --- a/ipc/mq_sysctl.c +++ b/ipc/mq_sysctl.c @@ -20,7 +20,7 @@ static int msg_max_limit_max = HARD_MSGMAX; static int msg_maxsize_limit_min = MIN_MSGSIZEMAX; static int msg_maxsize_limit_max = HARD_MSGSIZEMAX; -static struct ctl_table mq_sysctls[] = { +static const struct ctl_table mq_sysctls[] = { { .procname = "queues_max", .data = &init_ipc_ns.mq_queues_max, diff --git a/kernel/acct.c b/kernel/acct.c index 179848ad33e9..31222e8cd534 100644 --- a/kernel/acct.c +++ b/kernel/acct.c @@ -76,7 +76,7 @@ static int acct_parm[3] = {4, 2, 30}; #define ACCT_TIMEOUT (acct_parm[2]) /* foo second timeout between checks */ #ifdef CONFIG_SYSCTL -static struct ctl_table kern_acct_table[] = { +static const struct ctl_table kern_acct_table[] = { { .procname = "acct", .data = &acct_parm, diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 0daf098e3207..c420edbfb7c8 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -6128,7 +6128,7 @@ static int bpf_unpriv_handler(const struct ctl_table *table, int write, return ret; } -static struct ctl_table bpf_syscall_table[] = { +static const struct ctl_table bpf_syscall_table[] = { { .procname = "unprivileged_bpf_disabled", .data = &sysctl_unprivileged_bpf_disabled, diff --git a/kernel/delayacct.c b/kernel/delayacct.c index b238eb8c6573..eb63a021ac04 100644 --- a/kernel/delayacct.c +++ b/kernel/delayacct.c @@ -64,7 +64,7 @@ static int sysctl_delayacct(const struct ctl_table *table, int write, void *buff return err; } -static struct ctl_table kern_delayacct_table[] = { +static const struct ctl_table kern_delayacct_table[] = { { .procname = "task_delayacct", .data = NULL, diff --git a/kernel/exit.c b/kernel/exit.c index 1dcddfe537ee..3485e5fc499e 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -85,7 +85,7 @@ static unsigned int oops_limit = 10000; #ifdef CONFIG_SYSCTL -static struct ctl_table kern_exit_table[] = { +static const struct ctl_table kern_exit_table[] = { { .procname = "oops_limit", .data = &oops_limit, diff --git a/kernel/hung_task.c b/kernel/hung_task.c index 953169893a95..04efa7a6e69b 100644 --- a/kernel/hung_task.c +++ b/kernel/hung_task.c @@ -274,7 +274,7 @@ static int proc_dohung_task_timeout_secs(const struct ctl_table *table, int writ * and hung_task_check_interval_secs */ static const unsigned long hung_task_timeout_max = (LONG_MAX / HZ); -static struct ctl_table hung_task_sysctls[] = { +static const struct ctl_table hung_task_sysctls[] = { #ifdef CONFIG_SMP { .procname = "hung_task_all_cpu_backtrace", diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c index b424a5c6ae87..c0bdc1686154 100644 --- a/kernel/kexec_core.c +++ b/kernel/kexec_core.c @@ -925,7 +925,7 @@ static int kexec_limit_handler(const struct ctl_table *table, int write, return proc_dointvec(&tmp, write, buffer, lenp, ppos); } -static struct ctl_table kexec_core_sysctls[] = { +static const struct ctl_table kexec_core_sysctls[] = { { .procname = "kexec_load_disabled", .data = &kexec_load_disabled, diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 030569210670..88aeac84e4c0 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -946,7 +946,7 @@ static int proc_kprobes_optimization_handler(const struct ctl_table *table, return ret; } -static struct ctl_table kprobe_sysctls[] = { +static const struct ctl_table kprobe_sysctls[] = { { .procname = "kprobes-optimization", .data = &sysctl_kprobes_optimization, diff --git a/kernel/latencytop.c b/kernel/latencytop.c index 77ee3ea8a573..d4281d1e13a6 100644 --- a/kernel/latencytop.c +++ b/kernel/latencytop.c @@ -77,7 +77,7 @@ static int sysctl_latencytop(const struct ctl_table *table, int write, void *buf return err; } -static struct ctl_table latencytop_sysctl[] = { +static const struct ctl_table latencytop_sysctl[] = { { .procname = "latencytop", .data = &latencytop_enabled, diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 29acd238dad7..4470680f0226 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -79,7 +79,7 @@ module_param(lock_stat, int, 0644); #endif #ifdef CONFIG_SYSCTL -static struct ctl_table kern_lockdep_table[] = { +static const struct ctl_table kern_lockdep_table[] = { #ifdef CONFIG_PROVE_LOCKING { .procname = "prove_locking", diff --git a/kernel/panic.c b/kernel/panic.c index fbc59b3b64d0..d8635d5cecb2 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -84,7 +84,7 @@ ATOMIC_NOTIFIER_HEAD(panic_notifier_list); EXPORT_SYMBOL(panic_notifier_list); #ifdef CONFIG_SYSCTL -static struct ctl_table kern_panic_table[] = { +static const struct ctl_table kern_panic_table[] = { #ifdef CONFIG_SMP { .procname = "oops_all_cpu_backtrace", diff --git a/kernel/pid.c b/kernel/pid.c index 3a10a7b6fcf8..924084713be8 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -695,7 +695,7 @@ static struct ctl_table_root pid_table_root = { .set_ownership = pid_table_root_set_ownership, }; -static struct ctl_table pid_table[] = { +static const struct ctl_table pid_table[] = { { .procname = "pid_max", .data = &init_pid_ns.pid_max, diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c index f1ffa032fc32..8f6cfec87555 100644 --- a/kernel/pid_namespace.c +++ b/kernel/pid_namespace.c @@ -303,7 +303,7 @@ static int pid_ns_ctl_handler(const struct ctl_table *table, int write, return ret; } -static struct ctl_table pid_ns_ctl_table[] = { +static const struct ctl_table pid_ns_ctl_table[] = { { .procname = "ns_last_pid", .maxlen = sizeof(int), diff --git a/kernel/pid_sysctl.h b/kernel/pid_sysctl.h index 18ecaef6be41..5d8f981de7c5 100644 --- a/kernel/pid_sysctl.h +++ b/kernel/pid_sysctl.h @@ -31,7 +31,7 @@ static int pid_mfd_noexec_dointvec_minmax(const struct ctl_table *table, return err; } -static struct ctl_table pid_ns_ctl_table_vm[] = { +static const struct ctl_table pid_ns_ctl_table_vm[] = { { .procname = "memfd_noexec", .data = &init_pid_ns.memfd_noexec_scope, diff --git a/kernel/printk/sysctl.c b/kernel/printk/sysctl.c index f5072dc85f7a..da77f3f5c1fe 100644 --- a/kernel/printk/sysctl.c +++ b/kernel/printk/sysctl.c @@ -20,7 +20,7 @@ static int proc_dointvec_minmax_sysadmin(const struct ctl_table *table, int writ return proc_dointvec_minmax(table, write, buffer, lenp, ppos); } -static struct ctl_table printk_sysctls[] = { +static const struct ctl_table printk_sysctls[] = { { .procname = "printk", .data = &console_loglevel, diff --git a/kernel/reboot.c b/kernel/reboot.c index a701000bab34..b5a8569e5d81 100644 --- a/kernel/reboot.c +++ b/kernel/reboot.c @@ -1287,7 +1287,7 @@ static struct attribute *reboot_attrs[] = { }; #ifdef CONFIG_SYSCTL -static struct ctl_table kern_reboot_table[] = { +static const struct ctl_table kern_reboot_table[] = { { .procname = "poweroff_cmd", .data = &poweroff_cmd, diff --git a/kernel/sched/autogroup.c b/kernel/sched/autogroup.c index db68a964e34e..83d46b9b8ec8 100644 --- a/kernel/sched/autogroup.c +++ b/kernel/sched/autogroup.c @@ -9,7 +9,7 @@ static struct autogroup autogroup_default; static atomic_t autogroup_seq_nr; #ifdef CONFIG_SYSCTL -static struct ctl_table sched_autogroup_sysctls[] = { +static const struct ctl_table sched_autogroup_sysctls[] = { { .procname = "sched_autogroup_enabled", .data = &sysctl_sched_autogroup_enabled, diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 9142a0394d46..165c90ba64ea 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4654,7 +4654,7 @@ static int sysctl_schedstats(const struct ctl_table *table, int write, void *buf #endif /* CONFIG_SCHEDSTATS */ #ifdef CONFIG_SYSCTL -static struct ctl_table sched_core_sysctls[] = { +static const struct ctl_table sched_core_sysctls[] = { #ifdef CONFIG_SCHEDSTATS { .procname = "sched_schedstats", diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 62192ac79c30..38e4537790af 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -26,7 +26,7 @@ static unsigned int sysctl_sched_dl_period_max = 1 << 22; /* ~4 seconds */ static unsigned int sysctl_sched_dl_period_min = 100; /* 100 us */ #ifdef CONFIG_SYSCTL -static struct ctl_table sched_dl_sysctls[] = { +static const struct ctl_table sched_dl_sysctls[] = { { .procname = "sched_deadline_period_max_us", .data = &sysctl_sched_dl_period_max, diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 1e78caa21436..ce2e94ccad0c 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -133,7 +133,7 @@ static unsigned int sysctl_numa_balancing_promote_rate_limit = 65536; #endif #ifdef CONFIG_SYSCTL -static struct ctl_table sched_fair_sysctls[] = { +static const struct ctl_table sched_fair_sysctls[] = { #ifdef CONFIG_CFS_BANDWIDTH { .procname = "sched_cfs_bandwidth_slice_us", diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index bd66a46b06ac..4b8e33c615b1 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -26,7 +26,7 @@ static int sched_rt_handler(const struct ctl_table *table, int write, void *buff size_t *lenp, loff_t *ppos); static int sched_rr_handler(const struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos); -static struct ctl_table sched_rt_sysctls[] = { +static const struct ctl_table sched_rt_sysctls[] = { { .procname = "sched_rt_period_us", .data = &sysctl_sched_rt_period, diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index da33ec9e94ab..c49aea8c1025 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -312,7 +312,7 @@ static int sched_energy_aware_handler(const struct ctl_table *table, int write, return ret; } -static struct ctl_table sched_energy_aware_sysctls[] = { +static const struct ctl_table sched_energy_aware_sysctls[] = { { .procname = "sched_energy_aware", .data = &sysctl_sched_energy_aware, diff --git a/kernel/seccomp.c b/kernel/seccomp.c index 385d48293a5f..f59381c4a2ff 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c @@ -2450,7 +2450,7 @@ static int seccomp_actions_logged_handler(const struct ctl_table *ro_table, int return ret; } -static struct ctl_table seccomp_sysctl_table[] = { +static const struct ctl_table seccomp_sysctl_table[] = { { .procname = "actions_avail", .data = (void *) &seccomp_actions_avail, diff --git a/kernel/signal.c b/kernel/signal.c index a2afd54303f0..875e97f6205a 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -4950,7 +4950,7 @@ static inline void siginfo_buildtime_checks(void) } #if defined(CONFIG_SYSCTL) -static struct ctl_table signal_debug_table[] = { +static const struct ctl_table signal_debug_table[] = { #ifdef CONFIG_SYSCTL_EXCEPTION_TRACE { .procname = "exception-trace", diff --git a/kernel/stackleak.c b/kernel/stackleak.c index 0f4804f28c61..bb65321761b4 100644 --- a/kernel/stackleak.c +++ b/kernel/stackleak.c @@ -45,7 +45,7 @@ static int stack_erasing_sysctl(const struct ctl_table *table, int write, str_enabled_disabled(state)); return ret; } -static struct ctl_table stackleak_sysctls[] = { +static const struct ctl_table stackleak_sysctls[] = { { .procname = "stack_erasing", .data = NULL, diff --git a/kernel/sysctl-test.c b/kernel/sysctl-test.c index 3ac98bb7fb82..eb2842bd0557 100644 --- a/kernel/sysctl-test.c +++ b/kernel/sysctl-test.c @@ -374,7 +374,7 @@ static void sysctl_test_register_sysctl_sz_invalid_extra_value( struct kunit *test) { unsigned char data = 0; - struct ctl_table table_foo[] = { + const struct ctl_table table_foo[] = { { .procname = "foo", .data = &data, @@ -386,7 +386,7 @@ static void sysctl_test_register_sysctl_sz_invalid_extra_value( }, }; - struct ctl_table table_bar[] = { + const struct ctl_table table_bar[] = { { .procname = "bar", .data = &data, @@ -398,7 +398,7 @@ static void sysctl_test_register_sysctl_sz_invalid_extra_value( }, }; - struct ctl_table table_qux[] = { + const struct ctl_table table_qux[] = { { .procname = "qux", .data = &data, diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 7ae7a4136855..cb57da499ebb 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -1609,7 +1609,7 @@ int proc_do_static_key(const struct ctl_table *table, int write, return ret; } -static struct ctl_table kern_table[] = { +static const struct ctl_table kern_table[] = { { .procname = "panic", .data = &panic_timeout, @@ -2021,7 +2021,7 @@ static struct ctl_table kern_table[] = { #endif }; -static struct ctl_table vm_table[] = { +static const struct ctl_table vm_table[] = { { .procname = "overcommit_memory", .data = &sysctl_overcommit_memory, diff --git a/kernel/time/timer.c b/kernel/time/timer.c index 40706cb36920..c8f776dc6ee0 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -301,7 +301,7 @@ static int timer_migration_handler(const struct ctl_table *table, int write, return ret; } -static struct ctl_table timer_sysctl[] = { +static const struct ctl_table timer_sysctl[] = { { .procname = "timer_migration", .data = &sysctl_timer_migration, diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index f79eb9386c7f..728ecda6e8d4 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -8780,7 +8780,7 @@ ftrace_enable_sysctl(const struct ctl_table *table, int write, return 0; } -static struct ctl_table ftrace_sysctls[] = { +static const struct ctl_table ftrace_sysctls[] = { { .procname = "ftrace_enabled", .data = &ftrace_enabled, diff --git a/kernel/trace/trace_events_user.c b/kernel/trace/trace_events_user.c index 17bcad8f79de..97325fbd6283 100644 --- a/kernel/trace/trace_events_user.c +++ b/kernel/trace/trace_events_user.c @@ -2899,7 +2899,7 @@ static int set_max_user_events_sysctl(const struct ctl_table *table, int write, return ret; } -static struct ctl_table user_event_sysctls[] = { +static const struct ctl_table user_event_sysctls[] = { { .procname = "user_events_max", .data = &max_user_events, diff --git a/kernel/umh.c b/kernel/umh.c index be9234270777..b4da45a3a7cf 100644 --- a/kernel/umh.c +++ b/kernel/umh.c @@ -544,7 +544,7 @@ static int proc_cap_handler(const struct ctl_table *table, int write, return 0; } -static struct ctl_table usermodehelper_table[] = { +static const struct ctl_table usermodehelper_table[] = { { .procname = "bset", .data = &usermodehelper_bset, diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c index 7282f61a8650..bfbaaecb1dd4 100644 --- a/kernel/utsname_sysctl.c +++ b/kernel/utsname_sysctl.c @@ -75,7 +75,7 @@ static DEFINE_CTL_TABLE_POLL(hostname_poll); static DEFINE_CTL_TABLE_POLL(domainname_poll); // Note: update 'enum uts_proc' to match any changes to this table -static struct ctl_table uts_kern_table[] = { +static const struct ctl_table uts_kern_table[] = { { .procname = "arch", .data = init_uts_ns.name.machine, @@ -129,7 +129,7 @@ static struct ctl_table uts_kern_table[] = { */ void uts_proc_notify(enum uts_proc proc) { - struct ctl_table *table = &uts_kern_table[proc]; + const struct ctl_table *table = &uts_kern_table[proc]; proc_sys_poll_notify(table->poll); } diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 177abb7d0d4e..b2da7de39d06 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -1094,7 +1094,7 @@ static int proc_watchdog_cpumask(const struct ctl_table *table, int write, static const int sixty = 60; -static struct ctl_table watchdog_sysctls[] = { +static const struct ctl_table watchdog_sysctls[] = { { .procname = "watchdog", .data = &watchdog_user_enabled, diff --git a/lib/test_sysctl.c b/lib/test_sysctl.c index b6696fa1d426..4249e0cc8aaf 100644 --- a/lib/test_sysctl.c +++ b/lib/test_sysctl.c @@ -71,7 +71,7 @@ static struct test_sysctl_data test_data = { }; /* These are all under /proc/sys/debug/test_sysctl/ */ -static struct ctl_table test_table[] = { +static const struct ctl_table test_table[] = { { .procname = "int_0001", .data = &test_data.int_0001, @@ -177,7 +177,7 @@ static int test_sysctl_setup_node_tests(void) } /* Used to test that unregister actually removes the directory */ -static struct ctl_table test_table_unregister[] = { +static const struct ctl_table test_table_unregister[] = { { .procname = "unregister_error", .data = &test_data.int_0001, @@ -220,7 +220,7 @@ static int test_sysctl_run_register_mount_point(void) return 0; } -static struct ctl_table test_table_empty[] = { }; +static const struct ctl_table test_table_empty[] = { }; static int test_sysctl_run_register_empty(void) { diff --git a/mm/compaction.c b/mm/compaction.c index 73e80b2fb22e..bcc0df0066dc 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -3272,7 +3272,7 @@ static int proc_dointvec_minmax_warn_RT_change(const struct ctl_table *table, return ret; } -static struct ctl_table vm_compaction[] = { +static const struct ctl_table vm_compaction[] = { { .procname = "compact_memory", .data = &sysctl_compact_memory, diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 87761b042ed0..3b25b69aa94f 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -4867,7 +4867,7 @@ out: return ret; } -static struct ctl_table hugetlb_table[] = { +static const struct ctl_table hugetlb_table[] = { { .procname = "nr_hugepages", .data = NULL, diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c index 57b7f591eee8..7735972add01 100644 --- a/mm/hugetlb_vmemmap.c +++ b/mm/hugetlb_vmemmap.c @@ -693,7 +693,7 @@ void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_l free_vmemmap_page_list(&vmemmap_pages); } -static struct ctl_table hugetlb_vmemmap_sysctls[] = { +static const struct ctl_table hugetlb_vmemmap_sysctls[] = { { .procname = "hugetlb_optimize_vmemmap", .data = &vmemmap_optimize_enabled, diff --git a/mm/memory-failure.c b/mm/memory-failure.c index a7b8ccd29b6f..995a15eb67e2 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -124,7 +124,7 @@ const struct attribute_group memory_failure_attr_group = { .attrs = memory_failure_attr, }; -static struct ctl_table memory_failure_table[] = { +static const struct ctl_table memory_failure_table[] = { { .procname = "memory_failure_early_kill", .data = &sysctl_memory_failure_early_kill, diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 044ebab2c941..1cf121ad7085 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -705,7 +705,7 @@ static void queue_oom_reaper(struct task_struct *tsk) } #ifdef CONFIG_SYSCTL -static struct ctl_table vm_oom_kill_table[] = { +static const struct ctl_table vm_oom_kill_table[] = { { .procname = "panic_on_oom", .data = &sysctl_panic_on_oom, diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 4f5970723cf2..eb55ece39c56 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -2298,7 +2298,7 @@ static int page_writeback_cpu_online(unsigned int cpu) /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */ static const unsigned long dirty_bytes_min = 2 * PAGE_SIZE; -static struct ctl_table vm_page_writeback_sysctls[] = { +static const struct ctl_table vm_page_writeback_sysctls[] = { { .procname = "dirty_background_ratio", .data = &dirty_background_ratio, diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 6e469c7ef9a4..579789600a3c 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -6166,7 +6166,7 @@ out: return ret; } -static struct ctl_table page_alloc_sysctl_table[] = { +static const struct ctl_table page_alloc_sysctl_table[] = { { .procname = "min_free_kbytes", .data = &min_free_kbytes, diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c index 1edc12862a7d..9b6c2f157f83 100644 --- a/security/apparmor/lsm.c +++ b/security/apparmor/lsm.c @@ -2038,7 +2038,7 @@ static int apparmor_dointvec(const struct ctl_table *table, int write, return proc_dointvec(table, write, buffer, lenp, ppos); } -static struct ctl_table apparmor_sysctl_table[] = { +static const struct ctl_table apparmor_sysctl_table[] = { #ifdef CONFIG_USER_NS { .procname = "unprivileged_userns_apparmor_policy", diff --git a/security/keys/sysctl.c b/security/keys/sysctl.c index 91f000eef3ad..cde08c478f32 100644 --- a/security/keys/sysctl.c +++ b/security/keys/sysctl.c @@ -9,7 +9,7 @@ #include #include "internal.h" -static struct ctl_table key_sysctls[] = { +static const struct ctl_table key_sysctls[] = { { .procname = "maxkeys", .data = &key_quota_maxkeys, diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c index 1a2d02fee09b..1971710620c1 100644 --- a/security/yama/yama_lsm.c +++ b/security/yama/yama_lsm.c @@ -452,7 +452,7 @@ static int yama_dointvec_minmax(const struct ctl_table *table, int write, static int max_scope = YAMA_SCOPE_NO_ATTACH; -static struct ctl_table yama_sysctl_table[] = { +static const struct ctl_table yama_sysctl_table[] = { { .procname = "ptrace_scope", .data = &ptrace_scope, -- cgit v1.2.3 From 76e961157e078bc5d3cd2df08317e00b00a829eb Mon Sep 17 00:00:00 2001 From: "Ritesh Harjani (IBM)" Date: Sat, 11 Jan 2025 16:36:55 +0530 Subject: mm/hugetlb: fix hugepage allocation for interleaved memory nodes gather_bootmem_prealloc() assumes the start nid as 0 and size as num_node_state(N_MEMORY). That means in case if memory attached numa nodes are interleaved, then gather_bootmem_prealloc_parallel() will fail to scan few of these nodes. Since memory attached numa nodes can be interleaved in any fashion, hence ensure that the current code checks for all numa node ids (.size = nr_node_ids). Let's still keep max_threads as N_MEMORY, so that it can distributes all nr_node_ids among the these many no. threads. e.g. qemu cmdline ======================== numa_cmd="-numa node,nodeid=1,memdev=mem1,cpus=2-3 -numa node,nodeid=0,cpus=0-1 -numa dist,src=0,dst=1,val=20" mem_cmd="-object memory-backend-ram,id=mem1,size=16G" w/o this patch for cmdline (default_hugepagesz=1GB hugepagesz=1GB hugepages=2): ========================== ~ # cat /proc/meminfo |grep -i huge AnonHugePages: 0 kB ShmemHugePages: 0 kB FileHugePages: 0 kB HugePages_Total: 0 HugePages_Free: 0 HugePages_Rsvd: 0 HugePages_Surp: 0 Hugepagesize: 1048576 kB Hugetlb: 0 kB with this patch for cmdline (default_hugepagesz=1GB hugepagesz=1GB hugepages=2): =========================== ~ # cat /proc/meminfo |grep -i huge AnonHugePages: 0 kB ShmemHugePages: 0 kB FileHugePages: 0 kB HugePages_Total: 2 HugePages_Free: 2 HugePages_Rsvd: 0 HugePages_Surp: 0 Hugepagesize: 1048576 kB Hugetlb: 2097152 kB Link: https://lkml.kernel.org/r/f8d8dad3a5471d284f54185f65d575a6aaab692b.1736592534.git.ritesh.list@gmail.com Fixes: b78b27d02930 ("hugetlb: parallelize 1G hugetlb initialization") Signed-off-by: Ritesh Harjani (IBM) Reported-by: Pavithra Prakash Suggested-by: Muchun Song Tested-by: Sourabh Jain Reviewed-by: Luiz Capitulino Acked-by: David Rientjes Cc: Donet Tom Cc: Gang Li Cc: Daniel Jordan Cc: Signed-off-by: Andrew Morton --- mm/hugetlb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm/hugetlb.c') diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 3b25b69aa94f..65068671e460 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3309,7 +3309,7 @@ static void __init gather_bootmem_prealloc(void) .thread_fn = gather_bootmem_prealloc_parallel, .fn_arg = NULL, .start = 0, - .size = num_node_state(N_MEMORY), + .size = nr_node_ids, .align = 1, .min_chunk = 1, .max_threads = num_node_state(N_MEMORY), -- cgit v1.2.3 From 6d7bc938adca9024a6b51cf55d9b0542b653b69c Mon Sep 17 00:00:00 2001 From: Luiz Capitulino Date: Mon, 10 Feb 2025 22:48:56 -0500 Subject: mm: hugetlb: avoid fallback for specific node allocation of 1G pages When using the HugeTLB kernel command-line to allocate 1G pages from a specific node, such as: default_hugepagesz=1G hugepages=1:1 If node 1 happens to not have enough memory for the requested number of 1G pages, the allocation falls back to other nodes. A quick way to reproduce this is by creating a KVM guest with a memory-less node and trying to allocate 1 1G page from it. Instead of failing, the allocation will fallback to other nodes. This defeats the purpose of node specific allocation. Also, specific node allocation for 2M pages don't have this behavior: the allocation will just fail for the pages it can't satisfy. This issue happens because HugeTLB calls memblock_alloc_try_nid_raw() for 1G boot-time allocation as this function falls back to other nodes if the allocation can't be satisfied. Use memblock_alloc_exact_nid_raw() instead, which ensures that the allocation will only be satisfied from the specified node. Link: https://lkml.kernel.org/r/20250211034856.629371-1-luizcap@redhat.com Fixes: b5389086ad7b ("hugetlbfs: extend the definition of hugepages parameter to support node allocation") Signed-off-by: Luiz Capitulino Acked-by: Oscar Salvador Acked-by: David Hildenbrand Cc: "Mike Rapoport (IBM)" Cc: Muchun Song Cc: Zhenguo Yao Cc: Frank van der Linden Signed-off-by: Andrew Morton --- mm/hugetlb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm/hugetlb.c') diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 65068671e460..163190e89ea1 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3145,7 +3145,7 @@ int __alloc_bootmem_huge_page(struct hstate *h, int nid) /* do node specific alloc */ if (nid != NUMA_NO_NODE) { - m = memblock_alloc_try_nid_raw(huge_page_size(h), huge_page_size(h), + m = memblock_alloc_exact_nid_raw(huge_page_size(h), huge_page_size(h), 0, MEMBLOCK_ALLOC_ACCESSIBLE, nid); if (!m) return 0; -- cgit v1.2.3