diff options
author | Joerg Roedel <jroedel@suse.de> | 2020-07-21 12:59:51 +0300 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2020-07-27 13:32:29 +0300 |
commit | 6eb82f9940267d3af260989d077a2833f588beae (patch) | |
tree | bbeb2b40c522f162f51e8739da46cbc973973589 /arch/x86 | |
parent | 92ed301919932f777713b9172e525674157e983d (diff) | |
download | linux-6eb82f9940267d3af260989d077a2833f588beae.tar.xz |
x86/mm: Pre-allocate P4D/PUD pages for vmalloc area
Pre-allocate the page-table pages for the vmalloc area at the level
which needs synchronization on x86-64, which is P4D for 5-level and
PUD for 4-level paging.
Doing this at boot makes sure no synchronization of that area is
necessary at runtime. The synchronization takes the pgd_lock and
iterates over all page-tables in the system, so it can take quite long
and is better avoided.
Signed-off-by: Joerg Roedel <jroedel@suse.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Reviewed-by: Mike Rapoport <rppt@linux.ibm.com>
Link: https://lore.kernel.org/r/20200721095953.6218-2-joro@8bytes.org
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/mm/init_64.c | 52 |
1 files changed, 52 insertions, 0 deletions
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index dbae185511cd..e76bdb001460 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -1238,6 +1238,56 @@ static void __init register_page_bootmem_info(void) #endif } +/* + * Pre-allocates page-table pages for the vmalloc area in the kernel page-table. + * Only the level which needs to be synchronized between all page-tables is + * allocated because the synchronization can be expensive. + */ +static void __init preallocate_vmalloc_pages(void) +{ + unsigned long addr; + const char *lvl; + + for (addr = VMALLOC_START; addr <= VMALLOC_END; addr = ALIGN(addr + 1, PGDIR_SIZE)) { + pgd_t *pgd = pgd_offset_k(addr); + p4d_t *p4d; + pud_t *pud; + + p4d = p4d_offset(pgd, addr); + if (p4d_none(*p4d)) { + /* Can only happen with 5-level paging */ + p4d = p4d_alloc(&init_mm, pgd, addr); + if (!p4d) { + lvl = "p4d"; + goto failed; + } + } + + if (pgtable_l5_enabled()) + continue; + + pud = pud_offset(p4d, addr); + if (pud_none(*pud)) { + /* Ends up here only with 4-level paging */ + pud = pud_alloc(&init_mm, p4d, addr); + if (!pud) { + lvl = "pud"; + goto failed; + } + } + } + + return; + +failed: + + /* + * The pages have to be there now or they will be missing in + * process page-tables later. + */ + panic("Failed to pre-allocate %s pages for vmalloc area\n", lvl); +} + void __init mem_init(void) { pci_iommu_alloc(); @@ -1261,6 +1311,8 @@ void __init mem_init(void) if (get_gate_vma(&init_mm)) kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR, PAGE_SIZE, KCORE_USER); + preallocate_vmalloc_pages(); + mem_init_print_info(NULL); } |