summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-10-13 01:02:38 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2020-10-13 01:02:38 +0300
commitc1b4ec85ee40cc7a9f7b48bea9013094f2d88203 (patch)
treebb6d86d5553ed678ae50023f31c40a07be215607 /arch
parentb85cac574592b843c4be93c83303feeee0c4dc25 (diff)
parent7a27ef5e83089090f3a4073a9157c862ef00acfc (diff)
downloadlinux-c1b4ec85ee40cc7a9f7b48bea9013094f2d88203.tar.xz
Merge tag 'x86-mm-2020-10-12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 mm updates from Ingo Molnar: "Do not sync vmalloc/ioremap mappings on x86-64 kernels. Hopefully now without the bugs!" * tag 'x86-mm-2020-10-12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/mm/64: Update comment in preallocate_vmalloc_pages() x86/mm/64: Do not sync vmalloc/ioremap mappings
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/pgtable_64_types.h2
-rw-r--r--arch/x86/mm/init_64.c20
2 files changed, 10 insertions, 12 deletions
diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
index 8f63efb2a2cc..52e5f5f2240d 100644
--- a/arch/x86/include/asm/pgtable_64_types.h
+++ b/arch/x86/include/asm/pgtable_64_types.h
@@ -159,6 +159,4 @@ extern unsigned int ptrs_per_p4d;
#define PGD_KERNEL_START ((PAGE_SIZE / 2) / sizeof(pgd_t))
-#define ARCH_PAGE_TABLE_SYNC_MASK (pgtable_l5_enabled() ? PGTBL_PGD_MODIFIED : PGTBL_P4D_MODIFIED)
-
#endif /* _ASM_X86_PGTABLE_64_DEFS_H */
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index a4ac13cc3fdc..b5a3fa4033d3 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -217,11 +217,6 @@ static void sync_global_pgds(unsigned long start, unsigned long end)
sync_global_pgds_l4(start, end);
}
-void arch_sync_kernel_mappings(unsigned long start, unsigned long end)
-{
- sync_global_pgds(start, end);
-}
-
/*
* NOTE: This function is marked __ref because it calls __init function
* (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0.
@@ -1257,14 +1252,19 @@ static void __init preallocate_vmalloc_pages(void)
if (!p4d)
goto failed;
- /*
- * With 5-level paging the P4D level is not folded. So the PGDs
- * are now populated and there is no need to walk down to the
- * PUD level.
- */
if (pgtable_l5_enabled())
continue;
+ /*
+ * The goal here is to allocate all possibly required
+ * hardware page tables pointed to by the top hardware
+ * level.
+ *
+ * On 4-level systems, the P4D layer is folded away and
+ * the above code does no preallocation. Below, go down
+ * to the pud _software_ level to ensure the second
+ * hardware level is allocated on 4-level systems too.
+ */
lvl = "pud";
pud = pud_alloc(&init_mm, p4d, addr);
if (!pud)