diff options
Diffstat (limited to 'arch/riscv')
38 files changed, 284 insertions, 184 deletions
diff --git a/arch/riscv/boot/dts/sophgo/cv18xx.dtsi b/arch/riscv/boot/dts/sophgo/cv18xx.dtsi index b724fb6d9689..b8063ba6d6d7 100644 --- a/arch/riscv/boot/dts/sophgo/cv18xx.dtsi +++ b/arch/riscv/boot/dts/sophgo/cv18xx.dtsi @@ -309,7 +309,7 @@ 1024 1024 1024 1024>; snps,priority = <0 1 2 3 4 5 6 7>; snps,dma-masters = <2>; - snps,data-width = <4>; + snps,data-width = <2>; status = "disabled"; }; diff --git a/arch/riscv/boot/dts/starfive/jh7110-pinfunc.h b/arch/riscv/boot/dts/starfive/jh7110-pinfunc.h index 256de17f5261..ae49c908e7fb 100644 --- a/arch/riscv/boot/dts/starfive/jh7110-pinfunc.h +++ b/arch/riscv/boot/dts/starfive/jh7110-pinfunc.h @@ -89,7 +89,7 @@ #define GPOUT_SYS_SDIO1_DATA1 59 #define GPOUT_SYS_SDIO1_DATA2 60 #define GPOUT_SYS_SDIO1_DATA3 61 -#define GPOUT_SYS_SDIO1_DATA4 63 +#define GPOUT_SYS_SDIO1_DATA4 62 #define GPOUT_SYS_SDIO1_DATA5 63 #define GPOUT_SYS_SDIO1_DATA6 64 #define GPOUT_SYS_SDIO1_DATA7 65 diff --git a/arch/riscv/crypto/Kconfig b/arch/riscv/crypto/Kconfig index ad58dad9a580..c67095a3d669 100644 --- a/arch/riscv/crypto/Kconfig +++ b/arch/riscv/crypto/Kconfig @@ -22,7 +22,6 @@ config CRYPTO_CHACHA_RISCV64 tristate "Ciphers: ChaCha" depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO select CRYPTO_SKCIPHER - select CRYPTO_LIB_CHACHA_GENERIC help Length-preserving ciphers: ChaCha20 stream cipher algorithm diff --git a/arch/riscv/errata/Makefile b/arch/riscv/errata/Makefile index f0da9d7b39c3..bc6c77ba837d 100644 --- a/arch/riscv/errata/Makefile +++ b/arch/riscv/errata/Makefile @@ -1,5 +1,9 @@ ifdef CONFIG_RELOCATABLE -KBUILD_CFLAGS += -fno-pie +# We can't use PIC/PIE when handling early-boot errata parsing, as the kernel +# doesn't have a GOT setup at that point. So instead just use medany: it's +# usually position-independent, so it should be good enough for the errata +# handling. +KBUILD_CFLAGS += -fno-pie -mcmodel=medany endif ifdef CONFIG_RISCV_ALTERNATIVE_EARLY diff --git a/arch/riscv/include/asm/alternative-macros.h b/arch/riscv/include/asm/alternative-macros.h index 721ec275ce57..231d777d936c 100644 --- a/arch/riscv/include/asm/alternative-macros.h +++ b/arch/riscv/include/asm/alternative-macros.h @@ -115,24 +115,19 @@ \old_c .endm -#define _ALTERNATIVE_CFG(old_c, ...) \ - ALTERNATIVE_CFG old_c - -#define _ALTERNATIVE_CFG_2(old_c, ...) \ - ALTERNATIVE_CFG old_c +#define __ALTERNATIVE_CFG(old_c, ...) ALTERNATIVE_CFG old_c +#define __ALTERNATIVE_CFG_2(old_c, ...) ALTERNATIVE_CFG old_c #else /* !__ASSEMBLY__ */ -#define __ALTERNATIVE_CFG(old_c) \ - old_c "\n" +#define __ALTERNATIVE_CFG(old_c, ...) old_c "\n" +#define __ALTERNATIVE_CFG_2(old_c, ...) old_c "\n" -#define _ALTERNATIVE_CFG(old_c, ...) \ - __ALTERNATIVE_CFG(old_c) +#endif /* __ASSEMBLY__ */ -#define _ALTERNATIVE_CFG_2(old_c, ...) \ - __ALTERNATIVE_CFG(old_c) +#define _ALTERNATIVE_CFG(old_c, ...) __ALTERNATIVE_CFG(old_c) +#define _ALTERNATIVE_CFG_2(old_c, ...) __ALTERNATIVE_CFG_2(old_c) -#endif /* __ASSEMBLY__ */ #endif /* CONFIG_RISCV_ALTERNATIVE */ /* diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h index 8de73f91bfa3..b59ffeb668d6 100644 --- a/arch/riscv/include/asm/cacheflush.h +++ b/arch/riscv/include/asm/cacheflush.h @@ -34,11 +34,6 @@ static inline void flush_dcache_page(struct page *page) flush_dcache_folio(page_folio(page)); } -/* - * RISC-V doesn't have an instruction to flush parts of the instruction cache, - * so instead we just flush the whole thing. - */ -#define flush_icache_range(start, end) flush_icache_all() #define flush_icache_user_page(vma, pg, addr, len) \ do { \ if (vma->vm_flags & VM_EXEC) \ @@ -78,6 +73,16 @@ void flush_icache_mm(struct mm_struct *mm, bool local); #endif /* CONFIG_SMP */ +/* + * RISC-V doesn't have an instruction to flush parts of the instruction cache, + * so instead we just flush the whole thing. + */ +#define flush_icache_range flush_icache_range +static inline void flush_icache_range(unsigned long start, unsigned long end) +{ + flush_icache_all(); +} + extern unsigned int riscv_cbom_block_size; extern unsigned int riscv_cboz_block_size; void riscv_init_cbo_blocksizes(void); diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h index ebbce134917c..6efa95ad033a 100644 --- a/arch/riscv/include/asm/cmpxchg.h +++ b/arch/riscv/include/asm/cmpxchg.h @@ -169,7 +169,7 @@ break; \ case 4: \ __arch_cmpxchg(".w", ".w" sc_sfx, prepend, append, \ - __ret, __ptr, (long), __old, __new); \ + __ret, __ptr, (long)(int)(long), __old, __new); \ break; \ case 8: \ __arch_cmpxchg(".d", ".d" sc_sfx, prepend, append, \ diff --git a/arch/riscv/include/asm/ftrace.h b/arch/riscv/include/asm/ftrace.h index 2cddd79ff21b..f253c8dae878 100644 --- a/arch/riscv/include/asm/ftrace.h +++ b/arch/riscv/include/asm/ftrace.h @@ -92,7 +92,7 @@ struct dyn_arch_ftrace { #define make_call_t0(caller, callee, call) \ do { \ unsigned int offset = \ - (unsigned long) callee - (unsigned long) caller; \ + (unsigned long) (callee) - (unsigned long) (caller); \ call[0] = to_auipc_t0(offset); \ call[1] = to_jalr_t0(offset); \ } while (0) @@ -108,7 +108,7 @@ do { \ #define make_call_ra(caller, callee, call) \ do { \ unsigned int offset = \ - (unsigned long) callee - (unsigned long) caller; \ + (unsigned long) (callee) - (unsigned long) (caller); \ call[0] = to_auipc_ra(offset); \ call[1] = to_jalr_ra(offset); \ } while (0) diff --git a/arch/riscv/include/asm/futex.h b/arch/riscv/include/asm/futex.h index fc8130f995c1..6907c456ac8c 100644 --- a/arch/riscv/include/asm/futex.h +++ b/arch/riscv/include/asm/futex.h @@ -93,7 +93,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, _ASM_EXTABLE_UACCESS_ERR(1b, 3b, %[r]) \ _ASM_EXTABLE_UACCESS_ERR(2b, 3b, %[r]) \ : [r] "+r" (ret), [v] "=&r" (val), [u] "+m" (*uaddr), [t] "=&r" (tmp) - : [ov] "Jr" (oldval), [nv] "Jr" (newval) + : [ov] "Jr" ((long)(int)oldval), [nv] "Jr" (newval) : "memory"); __disable_user_access(); diff --git a/arch/riscv/include/asm/hugetlb.h b/arch/riscv/include/asm/hugetlb.h index faf3624d8057..446126497768 100644 --- a/arch/riscv/include/asm/hugetlb.h +++ b/arch/riscv/include/asm/hugetlb.h @@ -28,7 +28,8 @@ void set_huge_pte_at(struct mm_struct *mm, #define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR pte_t huge_ptep_get_and_clear(struct mm_struct *mm, - unsigned long addr, pte_t *ptep); + unsigned long addr, pte_t *ptep, + unsigned long sz); #define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, diff --git a/arch/riscv/include/asm/kgdb.h b/arch/riscv/include/asm/kgdb.h index 46677daf708b..cc11c4544cff 100644 --- a/arch/riscv/include/asm/kgdb.h +++ b/arch/riscv/include/asm/kgdb.h @@ -19,16 +19,9 @@ #ifndef __ASSEMBLY__ +void arch_kgdb_breakpoint(void); extern unsigned long kgdb_compiled_break; -static inline void arch_kgdb_breakpoint(void) -{ - asm(".global kgdb_compiled_break\n" - ".option norvc\n" - "kgdb_compiled_break: ebreak\n" - ".option rvc\n"); -} - #endif /* !__ASSEMBLY__ */ #define DBG_REG_ZERO "zero" diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h index febf820d5058..e8beadc2bffd 100644 --- a/arch/riscv/include/asm/page.h +++ b/arch/riscv/include/asm/page.h @@ -26,12 +26,9 @@ * When not using MMU this corresponds to the first free page in * physical memory (aligned on a page boundary). */ -#ifdef CONFIG_64BIT #ifdef CONFIG_MMU +#ifdef CONFIG_64BIT #define PAGE_OFFSET kernel_map.page_offset -#else -#define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL) -#endif /* * By default, CONFIG_PAGE_OFFSET value corresponds to SV57 address space so * define the PAGE_OFFSET value for SV48 and SV39. @@ -41,6 +38,9 @@ #else #define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL) #endif /* CONFIG_64BIT */ +#else +#define PAGE_OFFSET ((unsigned long)phys_ram_base) +#endif /* CONFIG_MMU */ #ifndef __ASSEMBLY__ @@ -97,11 +97,7 @@ typedef struct page *pgtable_t; #define MIN_MEMBLOCK_ADDR 0 #endif -#ifdef CONFIG_MMU #define ARCH_PFN_OFFSET (PFN_DOWN((unsigned long)phys_ram_base)) -#else -#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT) -#endif /* CONFIG_MMU */ struct kernel_mapping { unsigned long page_offset; diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index c0866ada5bbc..03881122506a 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -12,7 +12,7 @@ #include <asm/pgtable-bits.h> #ifndef CONFIG_MMU -#define KERNEL_LINK_ADDR PAGE_OFFSET +#define KERNEL_LINK_ADDR _AC(CONFIG_PAGE_OFFSET, UL) #define KERN_VIRT_SIZE (UL(-1)) #else @@ -916,7 +916,6 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte) */ #ifdef CONFIG_64BIT #define TASK_SIZE_64 (PGDIR_SIZE * PTRS_PER_PGD / 2) -#define TASK_SIZE_MAX LONG_MAX #ifdef CONFIG_COMPAT #define TASK_SIZE_32 (_AC(0x80000000, UL) - PAGE_SIZE) diff --git a/arch/riscv/include/asm/syscall.h b/arch/riscv/include/asm/syscall.h index 121fff429dce..eceabf59ae48 100644 --- a/arch/riscv/include/asm/syscall.h +++ b/arch/riscv/include/asm/syscall.h @@ -62,8 +62,11 @@ static inline void syscall_get_arguments(struct task_struct *task, unsigned long *args) { args[0] = regs->orig_a0; - args++; - memcpy(args, ®s->a1, 5 * sizeof(args[0])); + args[1] = regs->a1; + args[2] = regs->a2; + args[3] = regs->a3; + args[4] = regs->a4; + args[5] = regs->a5; } static inline int syscall_get_arch(struct task_struct *task) diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile index 69dc8aaab3fb..6b3b5255c889 100644 --- a/arch/riscv/kernel/Makefile +++ b/arch/riscv/kernel/Makefile @@ -9,8 +9,8 @@ CFLAGS_REMOVE_patch.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_sbi.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_return_address.o = $(CC_FLAGS_FTRACE) endif -CFLAGS_syscall_table.o += $(call cc-option,-Wno-override-init,) -CFLAGS_compat_syscall_table.o += $(call cc-option,-Wno-override-init,) +CFLAGS_syscall_table.o += $(call cc-disable-warning, override-init) +CFLAGS_compat_syscall_table.o += $(call cc-disable-warning, override-init) ifdef CONFIG_KEXEC_CORE AFLAGS_kexec_relocate.o := -mcmodel=medany $(call cc-option,-mno-relax) diff --git a/arch/riscv/kernel/cacheinfo.c b/arch/riscv/kernel/cacheinfo.c index 2d40736fc37c..26b085dbdd07 100644 --- a/arch/riscv/kernel/cacheinfo.c +++ b/arch/riscv/kernel/cacheinfo.c @@ -108,11 +108,11 @@ int populate_cache_leaves(unsigned int cpu) if (!np) return -ENOENT; - if (of_property_read_bool(np, "cache-size")) + if (of_property_present(np, "cache-size")) ci_leaf_init(this_leaf++, CACHE_TYPE_UNIFIED, level); - if (of_property_read_bool(np, "i-cache-size")) + if (of_property_present(np, "i-cache-size")) ci_leaf_init(this_leaf++, CACHE_TYPE_INST, level); - if (of_property_read_bool(np, "d-cache-size")) + if (of_property_present(np, "d-cache-size")) ci_leaf_init(this_leaf++, CACHE_TYPE_DATA, level); prev = np; @@ -125,11 +125,11 @@ int populate_cache_leaves(unsigned int cpu) break; if (level <= levels) break; - if (of_property_read_bool(np, "cache-size")) + if (of_property_present(np, "cache-size")) ci_leaf_init(this_leaf++, CACHE_TYPE_UNIFIED, level); - if (of_property_read_bool(np, "i-cache-size")) + if (of_property_present(np, "i-cache-size")) ci_leaf_init(this_leaf++, CACHE_TYPE_INST, level); - if (of_property_read_bool(np, "d-cache-size")) + if (of_property_present(np, "d-cache-size")) ci_leaf_init(this_leaf++, CACHE_TYPE_DATA, level); levels = level; } diff --git a/arch/riscv/kernel/cpu_ops_sbi.c b/arch/riscv/kernel/cpu_ops_sbi.c index e6fbaaf54956..87d655944803 100644 --- a/arch/riscv/kernel/cpu_ops_sbi.c +++ b/arch/riscv/kernel/cpu_ops_sbi.c @@ -18,10 +18,10 @@ const struct cpu_operations cpu_ops_sbi; /* * Ordered booting via HSM brings one cpu at a time. However, cpu hotplug can - * be invoked from multiple threads in parallel. Define a per cpu data + * be invoked from multiple threads in parallel. Define an array of boot data * to handle that. */ -static DEFINE_PER_CPU(struct sbi_hart_boot_data, boot_data); +static struct sbi_hart_boot_data boot_data[NR_CPUS]; static int sbi_hsm_hart_start(unsigned long hartid, unsigned long saddr, unsigned long priv) @@ -67,7 +67,7 @@ static int sbi_cpu_start(unsigned int cpuid, struct task_struct *tidle) unsigned long boot_addr = __pa_symbol(secondary_start_sbi); unsigned long hartid = cpuid_to_hartid_map(cpuid); unsigned long hsm_data; - struct sbi_hart_boot_data *bdata = &per_cpu(boot_data, cpuid); + struct sbi_hart_boot_data *bdata = &boot_data[cpuid]; /* Make sure tidle is updated */ smp_mb(); diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c index 3a8eeaa9310c..308430af3e8f 100644 --- a/arch/riscv/kernel/cpufeature.c +++ b/arch/riscv/kernel/cpufeature.c @@ -454,7 +454,7 @@ static void __init riscv_resolve_isa(unsigned long *source_isa, if (bit < RISCV_ISA_EXT_BASE) *this_hwcap |= isa2hwcap[bit]; } - } while (loop && memcmp(prev_resolved_isa, resolved_isa, sizeof(prev_resolved_isa))); + } while (loop && !bitmap_equal(prev_resolved_isa, resolved_isa, RISCV_ISA_EXT_MAX)); } static void __init match_isa_ext(const char *name, const char *name_end, unsigned long *bitmap) diff --git a/arch/riscv/kernel/elf_kexec.c b/arch/riscv/kernel/elf_kexec.c index 3c37661801f9..e783a72d051f 100644 --- a/arch/riscv/kernel/elf_kexec.c +++ b/arch/riscv/kernel/elf_kexec.c @@ -468,6 +468,9 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi, case R_RISCV_ALIGN: case R_RISCV_RELAX: break; + case R_RISCV_64: + *(u64 *)loc = val; + break; default: pr_err("Unknown rela relocation: %d\n", r_type); return -ENOEXEC; diff --git a/arch/riscv/kernel/kgdb.c b/arch/riscv/kernel/kgdb.c index 2e0266ae6bd7..9f3db3503dab 100644 --- a/arch/riscv/kernel/kgdb.c +++ b/arch/riscv/kernel/kgdb.c @@ -254,6 +254,12 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc) regs->epc = pc; } +noinline void arch_kgdb_breakpoint(void) +{ + asm(".global kgdb_compiled_break\n" + "kgdb_compiled_break: ebreak\n"); +} + void kgdb_arch_handle_qxfer_pkt(char *remcom_in_buffer, char *remcom_out_buffer) { diff --git a/arch/riscv/kernel/module-sections.c b/arch/riscv/kernel/module-sections.c index e264e59e596e..91d0b355ceef 100644 --- a/arch/riscv/kernel/module-sections.c +++ b/arch/riscv/kernel/module-sections.c @@ -73,16 +73,17 @@ static bool duplicate_rela(const Elf_Rela *rela, int idx) static void count_max_entries(Elf_Rela *relas, int num, unsigned int *plts, unsigned int *gots) { - unsigned int type, i; - - for (i = 0; i < num; i++) { - type = ELF_RISCV_R_TYPE(relas[i].r_info); - if (type == R_RISCV_CALL_PLT) { + for (int i = 0; i < num; i++) { + switch (ELF_R_TYPE(relas[i].r_info)) { + case R_RISCV_CALL_PLT: + case R_RISCV_PLT32: if (!duplicate_rela(relas, i)) (*plts)++; - } else if (type == R_RISCV_GOT_HI20) { + break; + case R_RISCV_GOT_HI20: if (!duplicate_rela(relas, i)) (*gots)++; + break; } } } diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c index 47d0ebeec93c..7f6147c18033 100644 --- a/arch/riscv/kernel/module.c +++ b/arch/riscv/kernel/module.c @@ -648,7 +648,7 @@ process_accumulated_relocations(struct module *me, kfree(bucket_iter); } - kfree(*relocation_hashtable); + kvfree(*relocation_hashtable); } static int add_relocation_to_accumulate(struct module *me, int type, @@ -752,9 +752,10 @@ initialize_relocation_hashtable(unsigned int num_relocations, hashtable_size <<= should_double_size; - *relocation_hashtable = kmalloc_array(hashtable_size, - sizeof(**relocation_hashtable), - GFP_KERNEL); + /* Number of relocations may be large, so kvmalloc it */ + *relocation_hashtable = kvmalloc_array(hashtable_size, + sizeof(**relocation_hashtable), + GFP_KERNEL); if (!*relocation_hashtable) return 0; @@ -859,7 +860,7 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, } j++; - if (j > sechdrs[relsec].sh_size / sizeof(*rel)) + if (j == num_relocations) j = 0; } while (j_idx != j); diff --git a/arch/riscv/kernel/probes/uprobes.c b/arch/riscv/kernel/probes/uprobes.c index 4b3dc8beaf77..cc15f7ca6cc1 100644 --- a/arch/riscv/kernel/probes/uprobes.c +++ b/arch/riscv/kernel/probes/uprobes.c @@ -167,6 +167,7 @@ void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, /* Initialize the slot */ void *kaddr = kmap_atomic(page); void *dst = kaddr + (vaddr & ~PAGE_MASK); + unsigned long start = (unsigned long)dst; memcpy(dst, src, len); @@ -176,13 +177,6 @@ void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, *(uprobe_opcode_t *)dst = __BUG_INSN_32; } + flush_icache_range(start, start + len); kunmap_atomic(kaddr); - - /* - * We probably need flush_icache_user_page() but it needs vma. - * This should work on most of architectures by default. If - * architecture needs to do something different it can define - * its own version of the function. - */ - flush_dcache_page(page); } diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index 2b3c152d3c91..194bda6d74ce 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c @@ -66,6 +66,9 @@ static struct resource bss_res = { .name = "Kernel bss", }; static struct resource elfcorehdr_res = { .name = "ELF Core hdr", }; #endif +static int num_standard_resources; +static struct resource *standard_resources; + static int __init add_resource(struct resource *parent, struct resource *res) { @@ -139,7 +142,7 @@ static void __init init_resources(void) struct resource *res = NULL; struct resource *mem_res = NULL; size_t mem_res_sz = 0; - int num_resources = 0, res_idx = 0; + int num_resources = 0, res_idx = 0, non_resv_res = 0; int ret = 0; /* + 1 as memblock_alloc() might increase memblock.reserved.cnt */ @@ -195,6 +198,7 @@ static void __init init_resources(void) /* Add /memory regions to the resource tree */ for_each_mem_region(region) { res = &mem_res[res_idx--]; + non_resv_res++; if (unlikely(memblock_is_nomap(region))) { res->name = "Reserved"; @@ -212,6 +216,9 @@ static void __init init_resources(void) goto error; } + num_standard_resources = non_resv_res; + standard_resources = &mem_res[res_idx + 1]; + /* Clean-up any unused pre-allocated resources */ if (res_idx >= 0) memblock_free(mem_res, (res_idx + 1) * sizeof(*mem_res)); @@ -223,6 +230,33 @@ static void __init init_resources(void) memblock_free(mem_res, mem_res_sz); } +static int __init reserve_memblock_reserved_regions(void) +{ + u64 i, j; + + for (i = 0; i < num_standard_resources; i++) { + struct resource *mem = &standard_resources[i]; + phys_addr_t r_start, r_end, mem_size = resource_size(mem); + + if (!memblock_is_region_reserved(mem->start, mem_size)) + continue; + + for_each_reserved_mem_range(j, &r_start, &r_end) { + resource_size_t start, end; + + start = max(PFN_PHYS(PFN_DOWN(r_start)), mem->start); + end = min(PFN_PHYS(PFN_UP(r_end)) - 1, mem->end); + + if (start > mem->end || end < mem->start) + continue; + + reserve_region_with_split(mem, start, end, "Reserved"); + } + } + + return 0; +} +arch_initcall(reserve_memblock_reserved_regions); static void __init parse_dtb(void) { @@ -288,8 +322,8 @@ void __init setup_arch(char **cmdline_p) riscv_init_cbo_blocksizes(); riscv_fill_hwcap(); - init_rt_signal_env(); apply_boot_alternatives(); + init_rt_signal_env(); if (IS_ENABLED(CONFIG_RISCV_ISA_ZICBOM) && riscv_isa_extension_available(NULL, ZICBOM)) diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c index dcd282419456..c3c517b9eee5 100644 --- a/arch/riscv/kernel/signal.c +++ b/arch/riscv/kernel/signal.c @@ -215,12 +215,6 @@ static size_t get_rt_frame_size(bool cal_all) if (cal_all || riscv_v_vstate_query(task_pt_regs(current))) total_context_size += riscv_v_sc_size; } - /* - * Preserved a __riscv_ctx_hdr for END signal context header if an - * extension uses __riscv_extra_ext_header - */ - if (total_context_size) - total_context_size += sizeof(struct __riscv_ctx_hdr); frame_size += total_context_size; diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c index 8ff8e8b36524..80230de167de 100644 --- a/arch/riscv/kernel/traps.c +++ b/arch/riscv/kernel/traps.c @@ -6,6 +6,7 @@ #include <linux/cpu.h> #include <linux/kernel.h> #include <linux/init.h> +#include <linux/irqflags.h> #include <linux/randomize_kstack.h> #include <linux/sched.h> #include <linux/sched/debug.h> @@ -151,7 +152,9 @@ asmlinkage __visible __trap_section void name(struct pt_regs *regs) \ { \ if (user_mode(regs)) { \ irqentry_enter_from_user_mode(regs); \ + local_irq_enable(); \ do_trap_error(regs, signo, code, regs->epc, "Oops - " str); \ + local_irq_disable(); \ irqentry_exit_to_user_mode(regs); \ } else { \ irqentry_state_t state = irqentry_nmi_enter(regs); \ @@ -173,17 +176,14 @@ asmlinkage __visible __trap_section void do_trap_insn_illegal(struct pt_regs *re if (user_mode(regs)) { irqentry_enter_from_user_mode(regs); - local_irq_enable(); handled = riscv_v_first_use_handler(regs); - - local_irq_disable(); - if (!handled) do_trap_error(regs, SIGILL, ILL_ILLOPC, regs->epc, "Oops - illegal instruction"); + local_irq_disable(); irqentry_exit_to_user_mode(regs); } else { irqentry_state_t state = irqentry_nmi_enter(regs); @@ -198,47 +198,57 @@ asmlinkage __visible __trap_section void do_trap_insn_illegal(struct pt_regs *re DO_ERROR_INFO(do_trap_load_fault, SIGSEGV, SEGV_ACCERR, "load access fault"); -asmlinkage __visible __trap_section void do_trap_load_misaligned(struct pt_regs *regs) +enum misaligned_access_type { + MISALIGNED_STORE, + MISALIGNED_LOAD, +}; +static const struct { + const char *type_str; + int (*handler)(struct pt_regs *regs); +} misaligned_handler[] = { + [MISALIGNED_STORE] = { + .type_str = "Oops - store (or AMO) address misaligned", + .handler = handle_misaligned_store, + }, + [MISALIGNED_LOAD] = { + .type_str = "Oops - load address misaligned", + .handler = handle_misaligned_load, + }, +}; + +static void do_trap_misaligned(struct pt_regs *regs, enum misaligned_access_type type) { + irqentry_state_t state; + if (user_mode(regs)) { irqentry_enter_from_user_mode(regs); + local_irq_enable(); + } else { + state = irqentry_nmi_enter(regs); + } - if (handle_misaligned_load(regs)) - do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc, - "Oops - load address misaligned"); + if (misaligned_handler[type].handler(regs)) + do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc, + misaligned_handler[type].type_str); + if (user_mode(regs)) { + local_irq_disable(); irqentry_exit_to_user_mode(regs); } else { - irqentry_state_t state = irqentry_nmi_enter(regs); - - if (handle_misaligned_load(regs)) - do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc, - "Oops - load address misaligned"); - irqentry_nmi_exit(regs, state); } } -asmlinkage __visible __trap_section void do_trap_store_misaligned(struct pt_regs *regs) +asmlinkage __visible __trap_section void do_trap_load_misaligned(struct pt_regs *regs) { - if (user_mode(regs)) { - irqentry_enter_from_user_mode(regs); - - if (handle_misaligned_store(regs)) - do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc, - "Oops - store (or AMO) address misaligned"); - - irqentry_exit_to_user_mode(regs); - } else { - irqentry_state_t state = irqentry_nmi_enter(regs); - - if (handle_misaligned_store(regs)) - do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc, - "Oops - store (or AMO) address misaligned"); + do_trap_misaligned(regs, MISALIGNED_LOAD); +} - irqentry_nmi_exit(regs, state); - } +asmlinkage __visible __trap_section void do_trap_store_misaligned(struct pt_regs *regs) +{ + do_trap_misaligned(regs, MISALIGNED_STORE); } + DO_ERROR_INFO(do_trap_store_fault, SIGSEGV, SEGV_ACCERR, "store (or AMO) access fault"); DO_ERROR_INFO(do_trap_ecall_s, @@ -298,9 +308,11 @@ asmlinkage __visible __trap_section void do_trap_break(struct pt_regs *regs) { if (user_mode(regs)) { irqentry_enter_from_user_mode(regs); + local_irq_enable(); handle_break(regs); + local_irq_disable(); irqentry_exit_to_user_mode(regs); } else { irqentry_state_t state = irqentry_nmi_enter(regs); diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c index 9a80a12f6b48..4128aa5e0c76 100644 --- a/arch/riscv/kernel/traps_misaligned.c +++ b/arch/riscv/kernel/traps_misaligned.c @@ -87,6 +87,13 @@ #define INSN_MATCH_C_FSWSP 0xe002 #define INSN_MASK_C_FSWSP 0xe003 +#define INSN_MATCH_C_LHU 0x8400 +#define INSN_MASK_C_LHU 0xfc43 +#define INSN_MATCH_C_LH 0x8440 +#define INSN_MASK_C_LH 0xfc43 +#define INSN_MATCH_C_SH 0x8c00 +#define INSN_MASK_C_SH 0xfc43 + #define INSN_LEN(insn) ((((insn) & 0x3) < 0x3) ? 2 : 4) #if defined(CONFIG_64BIT) @@ -405,6 +412,13 @@ int handle_misaligned_load(struct pt_regs *regs) fp = 1; len = 4; #endif + } else if ((insn & INSN_MASK_C_LHU) == INSN_MATCH_C_LHU) { + len = 2; + insn = RVC_RS2S(insn) << SH_RD; + } else if ((insn & INSN_MASK_C_LH) == INSN_MATCH_C_LH) { + len = 2; + shift = 8 * (sizeof(ulong) - len); + insn = RVC_RS2S(insn) << SH_RD; } else { regs->epc = epc; return -1; @@ -422,7 +436,7 @@ int handle_misaligned_load(struct pt_regs *regs) } if (!fp) - SET_RD(insn, regs, val.data_ulong << shift >> shift); + SET_RD(insn, regs, (long)(val.data_ulong << shift) >> shift); else if (len == 8) set_f64_rd(insn, regs, val.data_u64); else @@ -504,6 +518,9 @@ int handle_misaligned_store(struct pt_regs *regs) len = 4; val.data_ulong = GET_F32_RS2C(insn, regs); #endif + } else if ((insn & INSN_MASK_C_SH) == INSN_MATCH_C_SH) { + len = 2; + val.data_ulong = GET_RS2S(insn, regs); } else { regs->epc = epc; return -1; diff --git a/arch/riscv/kernel/vdso/vdso.lds.S b/arch/riscv/kernel/vdso/vdso.lds.S index cbe2a179331d..99e51f775539 100644 --- a/arch/riscv/kernel/vdso/vdso.lds.S +++ b/arch/riscv/kernel/vdso/vdso.lds.S @@ -31,7 +31,7 @@ SECTIONS *(.data .data.* .gnu.linkonce.d.*) *(.dynbss) *(.bss .bss.* .gnu.linkonce.b.*) - } + } :text .note : { *(.note.*) } :text :note diff --git a/arch/riscv/kernel/vector.c b/arch/riscv/kernel/vector.c index 682b3feee451..a30fb2fb8a2b 100644 --- a/arch/riscv/kernel/vector.c +++ b/arch/riscv/kernel/vector.c @@ -309,7 +309,7 @@ static int __init riscv_v_sysctl_init(void) static int __init riscv_v_sysctl_init(void) { return 0; } #endif /* ! CONFIG_SYSCTL */ -static int riscv_v_init(void) +static int __init riscv_v_init(void) { return riscv_v_sysctl_init(); } diff --git a/arch/riscv/kvm/vcpu_pmu.c b/arch/riscv/kvm/vcpu_pmu.c index 2707a51b082c..78ac3216a54d 100644 --- a/arch/riscv/kvm/vcpu_pmu.c +++ b/arch/riscv/kvm/vcpu_pmu.c @@ -666,6 +666,7 @@ int kvm_riscv_vcpu_pmu_ctr_cfg_match(struct kvm_vcpu *vcpu, unsigned long ctr_ba .type = etype, .size = sizeof(struct perf_event_attr), .pinned = true, + .disabled = true, /* * It should never reach here if the platform doesn't support the sscofpmf * extension as mode filtering won't work without it. diff --git a/arch/riscv/kvm/vcpu_sbi.c b/arch/riscv/kvm/vcpu_sbi.c index 6e704ed86a83..635c67ed3665 100644 --- a/arch/riscv/kvm/vcpu_sbi.c +++ b/arch/riscv/kvm/vcpu_sbi.c @@ -139,9 +139,9 @@ void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu, struct kvm_vcpu *tmp; kvm_for_each_vcpu(i, tmp, vcpu->kvm) { - spin_lock(&vcpu->arch.mp_state_lock); + spin_lock(&tmp->arch.mp_state_lock); WRITE_ONCE(tmp->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED); - spin_unlock(&vcpu->arch.mp_state_lock); + spin_unlock(&tmp->arch.mp_state_lock); } kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP); diff --git a/arch/riscv/kvm/vcpu_sbi_hsm.c b/arch/riscv/kvm/vcpu_sbi_hsm.c index dce667f4b6ab..3070bb31745d 100644 --- a/arch/riscv/kvm/vcpu_sbi_hsm.c +++ b/arch/riscv/kvm/vcpu_sbi_hsm.c @@ -9,6 +9,7 @@ #include <linux/errno.h> #include <linux/err.h> #include <linux/kvm_host.h> +#include <linux/wordpart.h> #include <asm/sbi.h> #include <asm/kvm_vcpu_sbi.h> @@ -79,12 +80,12 @@ static int kvm_sbi_hsm_vcpu_get_status(struct kvm_vcpu *vcpu) target_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, target_vcpuid); if (!target_vcpu) return SBI_ERR_INVALID_PARAM; - if (!kvm_riscv_vcpu_stopped(target_vcpu)) - return SBI_HSM_STATE_STARTED; - else if (vcpu->stat.generic.blocking) + if (kvm_riscv_vcpu_stopped(target_vcpu)) + return SBI_HSM_STATE_STOPPED; + else if (target_vcpu->stat.generic.blocking) return SBI_HSM_STATE_SUSPENDED; else - return SBI_HSM_STATE_STOPPED; + return SBI_HSM_STATE_STARTED; } static int kvm_sbi_ext_hsm_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, @@ -109,7 +110,7 @@ static int kvm_sbi_ext_hsm_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, } return 0; case SBI_EXT_HSM_HART_SUSPEND: - switch (cp->a0) { + switch (lower_32_bits(cp->a0)) { case SBI_HSM_SUSPEND_RET_DEFAULT: kvm_riscv_vcpu_wfi(vcpu); break; diff --git a/arch/riscv/kvm/vcpu_sbi_replace.c b/arch/riscv/kvm/vcpu_sbi_replace.c index 9c2ab3dfa93a..b17fad091bab 100644 --- a/arch/riscv/kvm/vcpu_sbi_replace.c +++ b/arch/riscv/kvm/vcpu_sbi_replace.c @@ -21,7 +21,7 @@ static int kvm_sbi_ext_time_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, u64 next_cycle; if (cp->a6 != SBI_EXT_TIME_SET_TIMER) { - retdata->err_val = SBI_ERR_INVALID_PARAM; + retdata->err_val = SBI_ERR_NOT_SUPPORTED; return 0; } @@ -51,9 +51,10 @@ static int kvm_sbi_ext_ipi_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, struct kvm_cpu_context *cp = &vcpu->arch.guest_context; unsigned long hmask = cp->a0; unsigned long hbase = cp->a1; + unsigned long hart_bit = 0, sentmask = 0; if (cp->a6 != SBI_EXT_IPI_SEND_IPI) { - retdata->err_val = SBI_ERR_INVALID_PARAM; + retdata->err_val = SBI_ERR_NOT_SUPPORTED; return 0; } @@ -62,15 +63,23 @@ static int kvm_sbi_ext_ipi_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, if (hbase != -1UL) { if (tmp->vcpu_id < hbase) continue; - if (!(hmask & (1UL << (tmp->vcpu_id - hbase)))) + hart_bit = tmp->vcpu_id - hbase; + if (hart_bit >= __riscv_xlen) + goto done; + if (!(hmask & (1UL << hart_bit))) continue; } ret = kvm_riscv_vcpu_set_interrupt(tmp, IRQ_VS_SOFT); if (ret < 0) break; + sentmask |= 1UL << hart_bit; kvm_riscv_vcpu_pmu_incr_fw(tmp, SBI_PMU_FW_IPI_RCVD); } +done: + if (hbase != -1UL && (hmask ^ sentmask)) + retdata->err_val = SBI_ERR_INVALID_PARAM; + return ret; } @@ -94,7 +103,7 @@ static int kvm_sbi_ext_rfence_handler(struct kvm_vcpu *vcpu, struct kvm_run *run kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_FENCE_I_SENT); break; case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA: - if (cp->a2 == 0 && cp->a3 == 0) + if ((cp->a2 == 0 && cp->a3 == 0) || cp->a3 == -1UL) kvm_riscv_hfence_vvma_all(vcpu->kvm, hbase, hmask); else kvm_riscv_hfence_vvma_gva(vcpu->kvm, hbase, hmask, @@ -102,7 +111,7 @@ static int kvm_sbi_ext_rfence_handler(struct kvm_vcpu *vcpu, struct kvm_run *run kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_SENT); break; case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID: - if (cp->a2 == 0 && cp->a3 == 0) + if ((cp->a2 == 0 && cp->a3 == 0) || cp->a3 == -1UL) kvm_riscv_hfence_vvma_asid_all(vcpu->kvm, hbase, hmask, cp->a4); else @@ -118,9 +127,9 @@ static int kvm_sbi_ext_rfence_handler(struct kvm_vcpu *vcpu, struct kvm_run *run case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID: /* * Until nested virtualization is implemented, the - * SBI HFENCE calls should be treated as NOPs + * SBI HFENCE calls should return not supported + * hence fallthrough. */ - break; default: retdata->err_val = SBI_ERR_NOT_SUPPORTED; } diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c index b81672729887..b2e4b81763f8 100644 --- a/arch/riscv/mm/cacheflush.c +++ b/arch/riscv/mm/cacheflush.c @@ -24,7 +24,20 @@ void flush_icache_all(void) if (num_online_cpus() < 2) return; - else if (riscv_use_sbi_for_rfence()) + + /* + * Make sure all previous writes to the D$ are ordered before making + * the IPI. The RISC-V spec states that a hart must execute a data fence + * before triggering a remote fence.i in order to make the modification + * visable for remote harts. + * + * IPIs on RISC-V are triggered by MMIO writes to either CLINT or + * S-IMSIC, so the fence ensures previous data writes "happen before" + * the MMIO. + */ + RISCV_FENCE(w, o); + + if (riscv_use_sbi_for_rfence()) sbi_remote_fence_i(NULL); else on_each_cpu(ipi_remote_fence_i, NULL, 1); diff --git a/arch/riscv/mm/hugetlbpage.c b/arch/riscv/mm/hugetlbpage.c index 42314f093922..375dd96bb4a0 100644 --- a/arch/riscv/mm/hugetlbpage.c +++ b/arch/riscv/mm/hugetlbpage.c @@ -148,22 +148,25 @@ unsigned long hugetlb_mask_last_page(struct hstate *h) static pte_t get_clear_contig(struct mm_struct *mm, unsigned long addr, pte_t *ptep, - unsigned long pte_num) + unsigned long ncontig) { - pte_t orig_pte = ptep_get(ptep); - unsigned long i; - - for (i = 0; i < pte_num; i++, addr += PAGE_SIZE, ptep++) { - pte_t pte = ptep_get_and_clear(mm, addr, ptep); - - if (pte_dirty(pte)) - orig_pte = pte_mkdirty(orig_pte); - - if (pte_young(pte)) - orig_pte = pte_mkyoung(orig_pte); + pte_t pte, tmp_pte; + bool present; + + pte = ptep_get_and_clear(mm, addr, ptep); + present = pte_present(pte); + while (--ncontig) { + ptep++; + addr += PAGE_SIZE; + tmp_pte = ptep_get_and_clear(mm, addr, ptep); + if (present) { + if (pte_dirty(tmp_pte)) + pte = pte_mkdirty(pte); + if (pte_young(tmp_pte)) + pte = pte_mkyoung(pte); + } } - - return orig_pte; + return pte; } static pte_t get_clear_contig_flush(struct mm_struct *mm, @@ -212,6 +215,26 @@ static void clear_flush(struct mm_struct *mm, flush_tlb_range(&vma, saddr, addr); } +static int num_contig_ptes_from_size(unsigned long sz, size_t *pgsize) +{ + unsigned long hugepage_shift; + + if (sz >= PGDIR_SIZE) + hugepage_shift = PGDIR_SHIFT; + else if (sz >= P4D_SIZE) + hugepage_shift = P4D_SHIFT; + else if (sz >= PUD_SIZE) + hugepage_shift = PUD_SHIFT; + else if (sz >= PMD_SIZE) + hugepage_shift = PMD_SHIFT; + else + hugepage_shift = PAGE_SHIFT; + + *pgsize = 1 << hugepage_shift; + + return sz >> hugepage_shift; +} + /* * When dealing with NAPOT mappings, the privileged specification indicates that * "if an update needs to be made, the OS generally should first mark all of the @@ -226,22 +249,10 @@ void set_huge_pte_at(struct mm_struct *mm, pte_t pte, unsigned long sz) { - unsigned long hugepage_shift, pgsize; + size_t pgsize; int i, pte_num; - if (sz >= PGDIR_SIZE) - hugepage_shift = PGDIR_SHIFT; - else if (sz >= P4D_SIZE) - hugepage_shift = P4D_SHIFT; - else if (sz >= PUD_SIZE) - hugepage_shift = PUD_SHIFT; - else if (sz >= PMD_SIZE) - hugepage_shift = PMD_SHIFT; - else - hugepage_shift = PAGE_SHIFT; - - pte_num = sz >> hugepage_shift; - pgsize = 1 << hugepage_shift; + pte_num = num_contig_ptes_from_size(sz, &pgsize); if (!pte_present(pte)) { for (i = 0; i < pte_num; i++, ptep++, addr += pgsize) @@ -293,15 +304,16 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma, pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, - pte_t *ptep) + pte_t *ptep, unsigned long sz) { + size_t pgsize; pte_t orig_pte = ptep_get(ptep); int pte_num; if (!pte_napot(orig_pte)) return ptep_get_and_clear(mm, addr, ptep); - pte_num = napot_pte_num(napot_cont_order(orig_pte)); + pte_num = num_contig_ptes_from_size(sz, &pgsize); return get_clear_contig(mm, addr, ptep, pte_num); } @@ -351,6 +363,7 @@ void huge_pte_clear(struct mm_struct *mm, pte_t *ptep, unsigned long sz) { + size_t pgsize; pte_t pte = ptep_get(ptep); int i, pte_num; @@ -359,8 +372,9 @@ void huge_pte_clear(struct mm_struct *mm, return; } - pte_num = napot_pte_num(napot_cont_order(pte)); - for (i = 0; i < pte_num; i++, addr += PAGE_SIZE, ptep++) + pte_num = num_contig_ptes_from_size(sz, &pgsize); + + for (i = 0; i < pte_num; i++, addr += pgsize, ptep++) pte_clear(mm, addr, ptep); } diff --git a/arch/riscv/mm/ptdump.c b/arch/riscv/mm/ptdump.c index 9d5f657a251b..1289cc6d3700 100644 --- a/arch/riscv/mm/ptdump.c +++ b/arch/riscv/mm/ptdump.c @@ -6,7 +6,6 @@ #include <linux/efi.h> #include <linux/init.h> #include <linux/debugfs.h> -#include <linux/memory_hotplug.h> #include <linux/seq_file.h> #include <linux/ptdump.h> @@ -371,9 +370,7 @@ bool ptdump_check_wx(void) static int ptdump_show(struct seq_file *m, void *v) { - get_online_mems(); ptdump_walk(m, m->private); - put_online_mems(); return 0; } diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c index 9b6e86ce3867..bb77607c87aa 100644 --- a/arch/riscv/mm/tlbflush.c +++ b/arch/riscv/mm/tlbflush.c @@ -4,6 +4,7 @@ #include <linux/smp.h> #include <linux/sched.h> #include <linux/hugetlb.h> +#include <linux/mmu_notifier.h> #include <asm/sbi.h> #include <asm/mmu_context.h> @@ -78,10 +79,17 @@ static void __ipi_flush_tlb_range_asid(void *info) local_flush_tlb_range_asid(d->start, d->size, d->stride, d->asid); } -static void __flush_tlb_range(const struct cpumask *cmask, unsigned long asid, +static inline unsigned long get_mm_asid(struct mm_struct *mm) +{ + return mm ? cntx2asid(atomic_long_read(&mm->context.id)) : FLUSH_TLB_NO_ASID; +} + +static void __flush_tlb_range(struct mm_struct *mm, + const struct cpumask *cmask, unsigned long start, unsigned long size, unsigned long stride) { + unsigned long asid = get_mm_asid(mm); unsigned int cpu; if (cpumask_empty(cmask)) @@ -105,30 +113,26 @@ static void __flush_tlb_range(const struct cpumask *cmask, unsigned long asid, } put_cpu(); -} -static inline unsigned long get_mm_asid(struct mm_struct *mm) -{ - return cntx2asid(atomic_long_read(&mm->context.id)); + if (mm) + mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, start + size); } void flush_tlb_mm(struct mm_struct *mm) { - __flush_tlb_range(mm_cpumask(mm), get_mm_asid(mm), - 0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE); + __flush_tlb_range(mm, mm_cpumask(mm), 0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE); } void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned int page_size) { - __flush_tlb_range(mm_cpumask(mm), get_mm_asid(mm), - start, end - start, page_size); + __flush_tlb_range(mm, mm_cpumask(mm), start, end - start, page_size); } void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) { - __flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm), + __flush_tlb_range(vma->vm_mm, mm_cpumask(vma->vm_mm), addr, PAGE_SIZE, PAGE_SIZE); } @@ -161,13 +165,13 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, } } - __flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm), + __flush_tlb_range(vma->vm_mm, mm_cpumask(vma->vm_mm), start, end - start, stride_size); } void flush_tlb_kernel_range(unsigned long start, unsigned long end) { - __flush_tlb_range(cpu_online_mask, FLUSH_TLB_NO_ASID, + __flush_tlb_range(NULL, cpu_online_mask, start, end - start, PAGE_SIZE); } @@ -175,7 +179,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end) void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { - __flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm), + __flush_tlb_range(vma->vm_mm, mm_cpumask(vma->vm_mm), start, end - start, PMD_SIZE); } #endif @@ -189,7 +193,10 @@ void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch, struct mm_struct *mm, unsigned long uaddr) { + unsigned long start = uaddr & PAGE_MASK; + cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm)); + mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, start + PAGE_SIZE); } void arch_flush_tlb_batched_pending(struct mm_struct *mm) @@ -199,7 +206,7 @@ void arch_flush_tlb_batched_pending(struct mm_struct *mm) void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch) { - __flush_tlb_range(&batch->cpumask, FLUSH_TLB_NO_ASID, 0, - FLUSH_TLB_MAX_SIZE, PAGE_SIZE); + __flush_tlb_range(NULL, &batch->cpumask, + 0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE); cpumask_clear(&batch->cpumask); } diff --git a/arch/riscv/purgatory/entry.S b/arch/riscv/purgatory/entry.S index 0e6ca6d5ae4b..c5db2f072c34 100644 --- a/arch/riscv/purgatory/entry.S +++ b/arch/riscv/purgatory/entry.S @@ -12,6 +12,7 @@ .text +.align 2 SYM_CODE_START(purgatory_start) lla sp, .Lstack |