diff options
author | Alexandre Ghiti <alexghiti@rivosinc.com> | 2023-12-14 16:29:35 +0300 |
---|---|---|
committer | Palmer Dabbelt <palmer@rivosinc.com> | 2024-01-11 19:01:47 +0300 |
commit | ff172d4818ad32dba433dae189e36684e43c5c74 (patch) | |
tree | 67be4620ee9278501e038660b6b2b737447e6b1d /arch/riscv | |
parent | d3e591a38c98d448ae84eba1f89388c55382cb0e (diff) | |
download | linux-ff172d4818ad32dba433dae189e36684e43c5c74.tar.xz |
riscv: Use hugepage mappings for vmemmap
This will allow better TLB utilization and then should be more performant.
Before:
---[ vmemmap start ]---
0xffff8d8002000000-0xffff8d8012000000 0x000000046ec00000 256M PTE . .. .. D A G . . W R V
---[ vmemmap end ]---
After:
---[ vmemmap start ]---
0xffff8d8002000000-0xffff8d8012000000 0x000000046ec00000 256M PMD . .. .. D A G . . W R V
---[ vmemmap end ]---
Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com>
Link: https://lore.kernel.org/r/20231214132935.212864-1-alexghiti@rivosinc.com
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
Diffstat (limited to 'arch/riscv')
-rw-r--r-- | arch/riscv/mm/init.c | 21 |
1 files changed, 20 insertions, 1 deletions
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index a65937336cdc..f533dd667a83 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -1387,10 +1387,29 @@ void __init misc_mem_init(void) } #ifdef CONFIG_SPARSEMEM_VMEMMAP +void __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node, + unsigned long addr, unsigned long next) +{ + pmd_set_huge(pmd, virt_to_phys(p), PAGE_KERNEL); +} + +int __meminit vmemmap_check_pmd(pmd_t *pmdp, int node, + unsigned long addr, unsigned long next) +{ + vmemmap_verify((pte_t *)pmdp, node, addr, next); + return 1; +} + int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, struct vmem_altmap *altmap) { - return vmemmap_populate_basepages(start, end, node, NULL); + /* + * Note that SPARSEMEM_VMEMMAP is only selected for rv64 and that we + * can't use hugepage mappings for 2-level page table because in case of + * memory hotplug, we are not able to update all the page tables with + * the new PMDs. + */ + return vmemmap_populate_hugepages(start, end, node, NULL); } #endif |