summaryrefslogtreecommitdiff
path: root/arch/riscv/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/riscv/mm')
-rw-r--r--arch/riscv/mm/Makefile3
-rw-r--r--arch/riscv/mm/cacheflush.c8
-rw-r--r--arch/riscv/mm/dma-noncoherent.c2
-rw-r--r--arch/riscv/mm/fault.c78
-rw-r--r--arch/riscv/mm/hugetlbpage.c40
-rw-r--r--arch/riscv/mm/init.c118
6 files changed, 191 insertions, 58 deletions
diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile
index b85e9e82f082..9c454f90fd3d 100644
--- a/arch/riscv/mm/Makefile
+++ b/arch/riscv/mm/Makefile
@@ -13,8 +13,7 @@ endif
KCOV_INSTRUMENT_init.o := n
obj-y += init.o
-obj-y += extable.o
-obj-$(CONFIG_MMU) += fault.o pageattr.o
+obj-$(CONFIG_MMU) += extable.o fault.o pageattr.o
obj-y += cacheflush.o
obj-y += context.o
obj-y += pgtable.o
diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c
index fca532ddf3ec..fbc59b3f69f2 100644
--- a/arch/riscv/mm/cacheflush.c
+++ b/arch/riscv/mm/cacheflush.c
@@ -104,9 +104,9 @@ EXPORT_SYMBOL_GPL(riscv_cbom_block_size);
unsigned int riscv_cboz_block_size;
EXPORT_SYMBOL_GPL(riscv_cboz_block_size);
-static void cbo_get_block_size(struct device_node *node,
- const char *name, u32 *block_size,
- unsigned long *first_hartid)
+static void __init cbo_get_block_size(struct device_node *node,
+ const char *name, u32 *block_size,
+ unsigned long *first_hartid)
{
unsigned long hartid;
u32 val;
@@ -126,7 +126,7 @@ static void cbo_get_block_size(struct device_node *node,
}
}
-void riscv_init_cbo_blocksizes(void)
+void __init riscv_init_cbo_blocksizes(void)
{
unsigned long cbom_hartid, cboz_hartid;
u32 cbom_block_size = 0, cboz_block_size = 0;
diff --git a/arch/riscv/mm/dma-noncoherent.c b/arch/riscv/mm/dma-noncoherent.c
index d919efab6eba..d51a75864e53 100644
--- a/arch/riscv/mm/dma-noncoherent.c
+++ b/arch/riscv/mm/dma-noncoherent.c
@@ -10,7 +10,7 @@
#include <linux/mm.h>
#include <asm/cacheflush.h>
-static bool noncoherent_supported;
+static bool noncoherent_supported __ro_after_init;
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
index 8685f85a7474..6ea2cce4cc17 100644
--- a/arch/riscv/mm/fault.c
+++ b/arch/riscv/mm/fault.c
@@ -84,13 +84,13 @@ static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_f
BUG();
}
-static inline void bad_area(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr)
+static inline void
+bad_area_nosemaphore(struct pt_regs *regs, int code, unsigned long addr)
{
/*
* Something tried to access memory that isn't in our memory map.
* Fix it, but check if it's kernel or user first.
*/
- mmap_read_unlock(mm);
/* User mode accesses just cause a SIGSEGV */
if (user_mode(regs)) {
do_trap(regs, SIGSEGV, code, addr);
@@ -100,6 +100,15 @@ static inline void bad_area(struct pt_regs *regs, struct mm_struct *mm, int code
no_context(regs, addr);
}
+static inline void
+bad_area(struct pt_regs *regs, struct mm_struct *mm, int code,
+ unsigned long addr)
+{
+ mmap_read_unlock(mm);
+
+ bad_area_nosemaphore(regs, code, addr);
+}
+
static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long addr)
{
pgd_t *pgd, *pgd_k;
@@ -238,24 +247,12 @@ void handle_page_fault(struct pt_regs *regs)
* only copy the information from the master page table,
* nothing more.
*/
- if (unlikely((addr >= VMALLOC_START) && (addr < VMALLOC_END))) {
+ if ((!IS_ENABLED(CONFIG_MMU) || !IS_ENABLED(CONFIG_64BIT)) &&
+ unlikely(addr >= VMALLOC_START && addr < VMALLOC_END)) {
vmalloc_fault(regs, code, addr);
return;
}
-#ifdef CONFIG_64BIT
- /*
- * Modules in 64bit kernels lie in their own virtual region which is not
- * in the vmalloc region, but dealing with page faults in this region
- * or the vmalloc region amounts to doing the same thing: checking that
- * the mapping exists in init_mm.pgd and updating user page table, so
- * just use vmalloc_fault.
- */
- if (unlikely(addr >= MODULES_VADDR && addr < MODULES_END)) {
- vmalloc_fault(regs, code, addr);
- return;
- }
-#endif
/* Enable interrupts if they were enabled in the parent context. */
if (!regs_irqs_disabled(regs))
local_irq_enable();
@@ -286,24 +283,41 @@ void handle_page_fault(struct pt_regs *regs)
flags |= FAULT_FLAG_WRITE;
else if (cause == EXC_INST_PAGE_FAULT)
flags |= FAULT_FLAG_INSTRUCTION;
-retry:
- mmap_read_lock(mm);
- vma = find_vma(mm, addr);
- if (unlikely(!vma)) {
- tsk->thread.bad_cause = cause;
- bad_area(regs, mm, code, addr);
- return;
+#ifdef CONFIG_PER_VMA_LOCK
+ if (!(flags & FAULT_FLAG_USER))
+ goto lock_mmap;
+
+ vma = lock_vma_under_rcu(mm, addr);
+ if (!vma)
+ goto lock_mmap;
+
+ if (unlikely(access_error(cause, vma))) {
+ vma_end_read(vma);
+ goto lock_mmap;
}
- if (likely(vma->vm_start <= addr))
- goto good_area;
- if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
- tsk->thread.bad_cause = cause;
- bad_area(regs, mm, code, addr);
+
+ fault = handle_mm_fault(vma, addr, flags | FAULT_FLAG_VMA_LOCK, regs);
+ vma_end_read(vma);
+
+ if (!(fault & VM_FAULT_RETRY)) {
+ count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
+ goto done;
+ }
+ count_vm_vma_lock_event(VMA_LOCK_RETRY);
+
+ if (fault_signal_pending(fault, regs)) {
+ if (!user_mode(regs))
+ no_context(regs, addr);
return;
}
- if (unlikely(expand_stack(vma, addr))) {
+lock_mmap:
+#endif /* CONFIG_PER_VMA_LOCK */
+
+retry:
+ vma = lock_mm_and_find_vma(mm, addr, regs);
+ if (unlikely(!vma)) {
tsk->thread.bad_cause = cause;
- bad_area(regs, mm, code, addr);
+ bad_area_nosemaphore(regs, code, addr);
return;
}
@@ -311,7 +325,6 @@ retry:
* Ok, we have a good vm_area for this memory access, so
* we can handle it.
*/
-good_area:
code = SEGV_ACCERR;
if (unlikely(access_error(cause, vma))) {
@@ -355,6 +368,9 @@ good_area:
mmap_read_unlock(mm);
+#ifdef CONFIG_PER_VMA_LOCK
+done:
+#endif
if (unlikely(fault & VM_FAULT_ERROR)) {
tsk->thread.bad_cause = cause;
mm_fault_error(regs, addr, fault);
diff --git a/arch/riscv/mm/hugetlbpage.c b/arch/riscv/mm/hugetlbpage.c
index a163a3e0f0d4..96225a8533ad 100644
--- a/arch/riscv/mm/hugetlbpage.c
+++ b/arch/riscv/mm/hugetlbpage.c
@@ -3,6 +3,30 @@
#include <linux/err.h>
#ifdef CONFIG_RISCV_ISA_SVNAPOT
+pte_t huge_ptep_get(pte_t *ptep)
+{
+ unsigned long pte_num;
+ int i;
+ pte_t orig_pte = ptep_get(ptep);
+
+ if (!pte_present(orig_pte) || !pte_napot(orig_pte))
+ return orig_pte;
+
+ pte_num = napot_pte_num(napot_cont_order(orig_pte));
+
+ for (i = 0; i < pte_num; i++, ptep++) {
+ pte_t pte = ptep_get(ptep);
+
+ if (pte_dirty(pte))
+ orig_pte = pte_mkdirty(orig_pte);
+
+ if (pte_young(pte))
+ orig_pte = pte_mkyoung(orig_pte);
+ }
+
+ return orig_pte;
+}
+
pte_t *huge_pte_alloc(struct mm_struct *mm,
struct vm_area_struct *vma,
unsigned long addr,
@@ -43,13 +67,17 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
for_each_napot_order(order) {
if (napot_cont_size(order) == sz) {
- pte = pte_alloc_map(mm, pmd, addr & napot_cont_mask(order));
+ pte = pte_alloc_huge(mm, pmd, addr & napot_cont_mask(order));
break;
}
}
out:
- WARN_ON_ONCE(pte && pte_present(*pte) && !pte_huge(*pte));
+ if (pte) {
+ pte_t pteval = ptep_get_lockless(pte);
+
+ WARN_ON_ONCE(pte_present(pteval) && !pte_huge(pteval));
+ }
return pte;
}
@@ -90,7 +118,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
for_each_napot_order(order) {
if (napot_cont_size(order) == sz) {
- pte = pte_offset_kernel(pmd, addr & napot_cont_mask(order));
+ pte = pte_offset_huge(pmd, addr & napot_cont_mask(order));
break;
}
}
@@ -218,6 +246,7 @@ void huge_ptep_set_wrprotect(struct mm_struct *mm,
{
pte_t pte = ptep_get(ptep);
unsigned long order;
+ pte_t orig_pte;
int i, pte_num;
if (!pte_napot(pte)) {
@@ -228,9 +257,12 @@ void huge_ptep_set_wrprotect(struct mm_struct *mm,
order = napot_cont_order(pte);
pte_num = napot_pte_num(order);
ptep = huge_pte_offset(mm, addr, napot_cont_size(order));
+ orig_pte = get_clear_contig_flush(mm, addr, ptep, pte_num);
+
+ orig_pte = pte_wrprotect(orig_pte);
for (i = 0; i < pte_num; i++, addr += PAGE_SIZE, ptep++)
- ptep_set_wrprotect(mm, addr, ptep);
+ set_pte_at(mm, addr, ptep, orig_pte);
}
pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 747e5b1ef02d..9ce504737d18 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -23,6 +23,7 @@
#ifdef CONFIG_RELOCATABLE
#include <linux/elf.h>
#endif
+#include <linux/kfence.h>
#include <asm/fixmap.h>
#include <asm/tlbflush.h>
@@ -266,7 +267,6 @@ static void __init setup_bootmem(void)
dma_contiguous_reserve(dma32_phys_limit);
if (IS_ENABLED(CONFIG_64BIT))
hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT);
- memblock_allow_resize();
}
#ifdef CONFIG_MMU
@@ -293,7 +293,7 @@ static const pgprot_t protection_map[16] = {
[VM_EXEC] = PAGE_EXEC,
[VM_EXEC | VM_READ] = PAGE_READ_EXEC,
[VM_EXEC | VM_WRITE] = PAGE_COPY_EXEC,
- [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_READ_EXEC,
+ [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_EXEC,
[VM_SHARED] = PAGE_NONE,
[VM_SHARED | VM_READ] = PAGE_READ,
[VM_SHARED | VM_WRITE] = PAGE_SHARED,
@@ -356,7 +356,7 @@ static phys_addr_t __init alloc_pte_late(uintptr_t va)
unsigned long vaddr;
vaddr = __get_free_page(GFP_KERNEL);
- BUG_ON(!vaddr || !pgtable_pte_page_ctor(virt_to_page(vaddr)));
+ BUG_ON(!vaddr || !pgtable_pte_page_ctor(virt_to_page((void *)vaddr)));
return __pa(vaddr);
}
@@ -439,7 +439,7 @@ static phys_addr_t __init alloc_pmd_late(uintptr_t va)
unsigned long vaddr;
vaddr = __get_free_page(GFP_KERNEL);
- BUG_ON(!vaddr || !pgtable_pmd_page_ctor(virt_to_page(vaddr)));
+ BUG_ON(!vaddr || !pgtable_pmd_page_ctor(virt_to_page((void *)vaddr)));
return __pa(vaddr);
}
@@ -659,18 +659,19 @@ void __init create_pgd_mapping(pgd_t *pgdp,
create_pgd_next_mapping(nextp, va, pa, sz, prot);
}
-static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size)
+static uintptr_t __init best_map_size(phys_addr_t pa, uintptr_t va,
+ phys_addr_t size)
{
- if (!(base & (PGDIR_SIZE - 1)) && size >= PGDIR_SIZE)
+ if (!(pa & (PGDIR_SIZE - 1)) && !(va & (PGDIR_SIZE - 1)) && size >= PGDIR_SIZE)
return PGDIR_SIZE;
- if (!(base & (P4D_SIZE - 1)) && size >= P4D_SIZE)
+ if (!(pa & (P4D_SIZE - 1)) && !(va & (P4D_SIZE - 1)) && size >= P4D_SIZE)
return P4D_SIZE;
- if (!(base & (PUD_SIZE - 1)) && size >= PUD_SIZE)
+ if (!(pa & (PUD_SIZE - 1)) && !(va & (PUD_SIZE - 1)) && size >= PUD_SIZE)
return PUD_SIZE;
- if (!(base & (PMD_SIZE - 1)) && size >= PMD_SIZE)
+ if (!(pa & (PMD_SIZE - 1)) && !(va & (PMD_SIZE - 1)) && size >= PMD_SIZE)
return PMD_SIZE;
return PAGE_SIZE;
@@ -922,9 +923,9 @@ static void __init create_kernel_page_table(pgd_t *pgdir, bool early)
static void __init create_fdt_early_page_table(uintptr_t fix_fdt_va,
uintptr_t dtb_pa)
{
+#ifndef CONFIG_BUILTIN_DTB
uintptr_t pa = dtb_pa & ~(PMD_SIZE - 1);
-#ifndef CONFIG_BUILTIN_DTB
/* Make sure the fdt fixmap address is always aligned on PMD size */
BUILD_BUG_ON(FIX_FDT % (PMD_SIZE / PAGE_SIZE));
@@ -1167,14 +1168,16 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
}
static void __init create_linear_mapping_range(phys_addr_t start,
- phys_addr_t end)
+ phys_addr_t end,
+ uintptr_t fixed_map_size)
{
phys_addr_t pa;
uintptr_t va, map_size;
for (pa = start; pa < end; pa += map_size) {
va = (uintptr_t)__va(pa);
- map_size = best_map_size(pa, end - pa);
+ map_size = fixed_map_size ? fixed_map_size :
+ best_map_size(pa, va, end - pa);
create_pgd_mapping(swapper_pg_dir, va, pa, map_size,
pgprot_from_va(va));
@@ -1184,6 +1187,7 @@ static void __init create_linear_mapping_range(phys_addr_t start,
static void __init create_linear_mapping_page_table(void)
{
phys_addr_t start, end;
+ phys_addr_t kfence_pool __maybe_unused;
u64 i;
#ifdef CONFIG_STRICT_KERNEL_RWX
@@ -1197,6 +1201,19 @@ static void __init create_linear_mapping_page_table(void)
memblock_mark_nomap(krodata_start, krodata_size);
#endif
+#ifdef CONFIG_KFENCE
+ /*
+ * kfence pool must be backed by PAGE_SIZE mappings, so allocate it
+ * before we setup the linear mapping so that we avoid using hugepages
+ * for this region.
+ */
+ kfence_pool = memblock_phys_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
+ BUG_ON(!kfence_pool);
+
+ memblock_mark_nomap(kfence_pool, KFENCE_POOL_SIZE);
+ __kfence_pool = __va(kfence_pool);
+#endif
+
/* Map all memory banks in the linear mapping */
for_each_mem_range(i, &start, &end) {
if (start >= end)
@@ -1207,17 +1224,25 @@ static void __init create_linear_mapping_page_table(void)
if (end >= __pa(PAGE_OFFSET) + memory_limit)
end = __pa(PAGE_OFFSET) + memory_limit;
- create_linear_mapping_range(start, end);
+ create_linear_mapping_range(start, end, 0);
}
#ifdef CONFIG_STRICT_KERNEL_RWX
- create_linear_mapping_range(ktext_start, ktext_start + ktext_size);
+ create_linear_mapping_range(ktext_start, ktext_start + ktext_size, 0);
create_linear_mapping_range(krodata_start,
- krodata_start + krodata_size);
+ krodata_start + krodata_size, 0);
memblock_clear_nomap(ktext_start, ktext_size);
memblock_clear_nomap(krodata_start, krodata_size);
#endif
+
+#ifdef CONFIG_KFENCE
+ create_linear_mapping_range(kfence_pool,
+ kfence_pool + KFENCE_POOL_SIZE,
+ PAGE_SIZE);
+
+ memblock_clear_nomap(kfence_pool, KFENCE_POOL_SIZE);
+#endif
}
static void __init setup_vm_final(void)
@@ -1321,7 +1346,7 @@ static void __init reserve_crashkernel(void)
*/
crash_base = memblock_phys_alloc_range(crash_size, PMD_SIZE,
search_start,
- min(search_end, (unsigned long) SZ_4G));
+ min(search_end, (unsigned long)(SZ_4G - 1)));
if (crash_base == 0) {
/* Try again without restricting region to 32bit addressible memory */
crash_base = memblock_phys_alloc_range(crash_size, PMD_SIZE,
@@ -1344,6 +1369,9 @@ void __init paging_init(void)
{
setup_bootmem();
setup_vm_final();
+
+ /* Depend on that Linear Mapping is ready */
+ memblock_allow_resize();
}
void __init misc_mem_init(void)
@@ -1363,3 +1391,61 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
return vmemmap_populate_basepages(start, end, node, NULL);
}
#endif
+
+#if defined(CONFIG_MMU) && defined(CONFIG_64BIT)
+/*
+ * Pre-allocates page-table pages for a specific area in the kernel
+ * page-table. Only the level which needs to be synchronized between
+ * all page-tables is allocated because the synchronization can be
+ * expensive.
+ */
+static void __init preallocate_pgd_pages_range(unsigned long start, unsigned long end,
+ const char *area)
+{
+ unsigned long addr;
+ const char *lvl;
+
+ for (addr = start; addr < end && addr >= start; addr = ALIGN(addr + 1, PGDIR_SIZE)) {
+ pgd_t *pgd = pgd_offset_k(addr);
+ p4d_t *p4d;
+ pud_t *pud;
+ pmd_t *pmd;
+
+ lvl = "p4d";
+ p4d = p4d_alloc(&init_mm, pgd, addr);
+ if (!p4d)
+ goto failed;
+
+ if (pgtable_l5_enabled)
+ continue;
+
+ lvl = "pud";
+ pud = pud_alloc(&init_mm, p4d, addr);
+ if (!pud)
+ goto failed;
+
+ if (pgtable_l4_enabled)
+ continue;
+
+ lvl = "pmd";
+ pmd = pmd_alloc(&init_mm, pud, addr);
+ if (!pmd)
+ goto failed;
+ }
+ return;
+
+failed:
+ /*
+ * The pages have to be there now or they will be missing in
+ * process page-tables later.
+ */
+ panic("Failed to pre-allocate %s pages for %s area\n", lvl, area);
+}
+
+void __init pgtable_cache_init(void)
+{
+ preallocate_pgd_pages_range(VMALLOC_START, VMALLOC_END, "vmalloc");
+ if (IS_ENABLED(CONFIG_MODULES))
+ preallocate_pgd_pages_range(MODULES_VADDR, MODULES_END, "bpf/modules");
+}
+#endif