diff options
author | Peter Xu <peterx@redhat.com> | 2022-10-24 22:33:36 +0300 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2022-11-09 04:37:21 +0300 |
commit | b12fdbf15f92b6cf5fecdd8a1855afe8809e5c58 (patch) | |
tree | 65cccea20e27eb0909024e5a5eb0e516f4fbdc94 | |
parent | 26215b7ee923b9251f7bb12c4e5f09dc465d35f2 (diff) | |
download | linux-b12fdbf15f92b6cf5fecdd8a1855afe8809e5c58.tar.xz |
Revert "mm/uffd: fix warning without PTE_MARKER_UFFD_WP compiled in"
With " mm/uffd: Fix vma check on userfault for wp" to fix the
registration, we'll be safe to remove the macro hacks now.
Link: https://lkml.kernel.org/r/20221024193336.1233616-3-peterx@redhat.com
Signed-off-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r-- | mm/hugetlb.c | 4 | ||||
-rw-r--r-- | mm/memory.c | 2 | ||||
-rw-r--r-- | mm/mprotect.c | 2 |
3 files changed, 0 insertions, 8 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index d11e92117d4a..fc8908d715d6 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -5114,7 +5114,6 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct * unmapped and its refcount is dropped, so just clear pte here. */ if (unlikely(!pte_present(pte))) { -#ifdef CONFIG_PTE_MARKER_UFFD_WP /* * If the pte was wr-protected by uffd-wp in any of the * swap forms, meanwhile the caller does not want to @@ -5126,7 +5125,6 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct set_huge_pte_at(mm, address, ptep, make_pte_marker(PTE_MARKER_UFFD_WP)); else -#endif huge_pte_clear(mm, address, ptep, sz); spin_unlock(ptl); continue; @@ -5155,13 +5153,11 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct tlb_remove_huge_tlb_entry(h, tlb, ptep, address); if (huge_pte_dirty(pte)) set_page_dirty(page); -#ifdef CONFIG_PTE_MARKER_UFFD_WP /* Leave a uffd-wp pte marker if needed */ if (huge_pte_uffd_wp(pte) && !(zap_flags & ZAP_FLAG_DROP_MARKER)) set_huge_pte_at(mm, address, ptep, make_pte_marker(PTE_MARKER_UFFD_WP)); -#endif hugetlb_count_sub(pages_per_huge_page(h), mm); page_remove_rmap(page, vma, true); diff --git a/mm/memory.c b/mm/memory.c index f88c351aecd4..81cc75e71888 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1393,12 +1393,10 @@ zap_install_uffd_wp_if_needed(struct vm_area_struct *vma, unsigned long addr, pte_t *pte, struct zap_details *details, pte_t pteval) { -#ifdef CONFIG_PTE_MARKER_UFFD_WP if (zap_drop_file_uffd_wp(details)) return; pte_install_uffd_wp_if_needed(vma, addr, pte, pteval); -#endif } static unsigned long zap_pte_range(struct mmu_gather *tlb, diff --git a/mm/mprotect.c b/mm/mprotect.c index 99762403cc8f..8d770855b591 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -267,7 +267,6 @@ static unsigned long change_pte_range(struct mmu_gather *tlb, } else { /* It must be an none page, or what else?.. */ WARN_ON_ONCE(!pte_none(oldpte)); -#ifdef CONFIG_PTE_MARKER_UFFD_WP if (unlikely(uffd_wp && !vma_is_anonymous(vma))) { /* * For file-backed mem, we need to be able to @@ -279,7 +278,6 @@ static unsigned long change_pte_range(struct mmu_gather *tlb, make_pte_marker(PTE_MARKER_UFFD_WP)); pages++; } -#endif } } while (pte++, addr += PAGE_SIZE, addr != end); arch_leave_lazy_mmu_mode(); |