From 1b57747e978f920fb2affd1952ed913276019115 Mon Sep 17 00:00:00 2001 From: Samuel Holland Date: Wed, 14 Aug 2024 01:10:54 -0700 Subject: riscv: Enable cbo.zero only when all harts support Zicboz Currently, we enable cbo.zero for usermode on each hart that supports the Zicboz extension. This means that the [ms]envcfg CSR value may differ between harts. Other features, such as pointer masking and CFI, require setting [ms]envcfg bits on a per-thread basis. The combination of these two adds quite some complexity and overhead to context switching, as we would need to maintain two separate masks for the per-hart and per-thread bits. Andrew Jones, who originally added Zicboz support, writes[1][2]: I've approached Zicboz the same way I would approach all extensions, which is to be per-hart. I'm not currently aware of a platform that is / will be composed of harts where some have Zicboz and others don't, but there's nothing stopping a platform like that from being built. So, how about we add code that confirms Zicboz is on all harts. If any hart does not have it, then we complain loudly and disable it on all the other harts. If it was just a hardware description bug, then it'll get fixed. If there's actually a platform which doesn't have Zicboz on all harts, then, when the issue is reported, we can decide to not support it, support it with defconfig, or support it under a Kconfig guard which must be enabled by the user. Let's follow his suggested solution and require the extension to be available on all harts, so the envcfg CSR value does not need to change when a thread migrates between harts. Since we are doing this for all extensions with fields in envcfg, the CSR itself only needs to be saved/ restored when it is present on all harts. This should not be a regression as no known hardware has asymmetric Zicboz support, but if anyone reports seeing the warning, we will re-evaluate our solution. Link: https://lore.kernel.org/linux-riscv/20240322-168f191eeb8479b2ea169a5e@orel/ [1] Link: https://lore.kernel.org/linux-riscv/20240323-28943722feb57a41fb0ff488@orel/ [2] Reviewed-by: Andrew Jones Reviewed-by: Conor Dooley Reviewed-by: Deepak Gupta Signed-off-by: Samuel Holland Reviewed-by: Charlie Jenkins Tested-by: Charlie Jenkins Link: https://lore.kernel.org/r/20240814081126.956287-2-samuel.holland@sifive.com Signed-off-by: Palmer Dabbelt --- arch/riscv/kernel/cpufeature.c | 7 ++++++- arch/riscv/kernel/suspend.c | 4 ++-- 2 files changed, 8 insertions(+), 3 deletions(-) (limited to 'arch/riscv/kernel') diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c index 3a8eeaa9310c..e560a253e99b 100644 --- a/arch/riscv/kernel/cpufeature.c +++ b/arch/riscv/kernel/cpufeature.c @@ -28,6 +28,8 @@ #define NUM_ALPHA_EXTS ('z' - 'a' + 1) +static bool any_cpu_has_zicboz; + unsigned long elf_hwcap __read_mostly; /* Host ISA bitmap */ @@ -98,6 +100,7 @@ static int riscv_ext_zicboz_validate(const struct riscv_isa_ext_data *data, pr_err("Zicboz disabled as cboz-block-size present, but is not a power-of-2\n"); return -EINVAL; } + any_cpu_has_zicboz = true; return 0; } @@ -919,8 +922,10 @@ unsigned long riscv_get_elf_hwcap(void) void riscv_user_isa_enable(void) { - if (riscv_cpu_has_extension_unlikely(smp_processor_id(), RISCV_ISA_EXT_ZICBOZ)) + if (riscv_has_extension_unlikely(RISCV_ISA_EXT_ZICBOZ)) csr_set(CSR_ENVCFG, ENVCFG_CBZE); + else if (any_cpu_has_zicboz) + pr_warn_once("Zicboz disabled as it is unavailable on some harts\n"); } #ifdef CONFIG_RISCV_ALTERNATIVE diff --git a/arch/riscv/kernel/suspend.c b/arch/riscv/kernel/suspend.c index c8cec0cc5833..9a8a0dc035b2 100644 --- a/arch/riscv/kernel/suspend.c +++ b/arch/riscv/kernel/suspend.c @@ -14,7 +14,7 @@ void suspend_save_csrs(struct suspend_context *context) { - if (riscv_cpu_has_extension_unlikely(smp_processor_id(), RISCV_ISA_EXT_XLINUXENVCFG)) + if (riscv_has_extension_unlikely(RISCV_ISA_EXT_XLINUXENVCFG)) context->envcfg = csr_read(CSR_ENVCFG); context->tvec = csr_read(CSR_TVEC); context->ie = csr_read(CSR_IE); @@ -37,7 +37,7 @@ void suspend_save_csrs(struct suspend_context *context) void suspend_restore_csrs(struct suspend_context *context) { csr_write(CSR_SCRATCH, 0); - if (riscv_cpu_has_extension_unlikely(smp_processor_id(), RISCV_ISA_EXT_XLINUXENVCFG)) + if (riscv_has_extension_unlikely(RISCV_ISA_EXT_XLINUXENVCFG)) csr_write(CSR_ENVCFG, context->envcfg); csr_write(CSR_TVEC, context->tvec); csr_write(CSR_IE, context->ie); -- cgit v1.2.3 From 5fc7355f01376e69964bb21b685025b042c37acc Mon Sep 17 00:00:00 2001 From: Samuel Holland Date: Wed, 14 Aug 2024 01:10:55 -0700 Subject: riscv: Add support for per-thread envcfg CSR values Some bits in the [ms]envcfg CSR, such as the CFI state and pointer masking mode, need to be controlled on a per-thread basis. Support this by keeping a copy of the CSR value in struct thread_struct and writing it during context switches. It is safe to discard the old CSR value during the context switch because the CSR is modified only by software, so the CSR will remain in sync with the copy in thread_struct. Use ALTERNATIVE directly instead of riscv_has_extension_unlikely() to minimize branchiness in the context switching code. Since thread_struct is copied during fork(), setting the value for the init task sets the default value for all other threads. Reviewed-by: Andrew Jones Reviewed-by: Deepak Gupta Signed-off-by: Samuel Holland Reviewed-by: Charlie Jenkins Link: https://lore.kernel.org/r/20240814081126.956287-3-samuel.holland@sifive.com Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/processor.h | 1 + arch/riscv/include/asm/switch_to.h | 8 ++++++++ arch/riscv/kernel/cpufeature.c | 2 +- 3 files changed, 10 insertions(+), 1 deletion(-) (limited to 'arch/riscv/kernel') diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h index efa1b3519b23..c1a492508835 100644 --- a/arch/riscv/include/asm/processor.h +++ b/arch/riscv/include/asm/processor.h @@ -102,6 +102,7 @@ struct thread_struct { unsigned long s[12]; /* s[0]: frame pointer */ struct __riscv_d_ext_state fstate; unsigned long bad_cause; + unsigned long envcfg; u32 riscv_v_flags; u32 vstate_ctrl; struct __riscv_v_ext_state vstate; diff --git a/arch/riscv/include/asm/switch_to.h b/arch/riscv/include/asm/switch_to.h index 7594df37cc9f..9685cd85e57c 100644 --- a/arch/riscv/include/asm/switch_to.h +++ b/arch/riscv/include/asm/switch_to.h @@ -70,6 +70,13 @@ static __always_inline bool has_fpu(void) { return false; } #define __switch_to_fpu(__prev, __next) do { } while (0) #endif +static inline void __switch_to_envcfg(struct task_struct *next) +{ + asm volatile (ALTERNATIVE("nop", "csrw " __stringify(CSR_ENVCFG) ", %0", + 0, RISCV_ISA_EXT_XLINUXENVCFG, 1) + :: "r" (next->thread.envcfg) : "memory"); +} + extern struct task_struct *__switch_to(struct task_struct *, struct task_struct *); @@ -103,6 +110,7 @@ do { \ __switch_to_vector(__prev, __next); \ if (switch_to_should_flush_icache(__next)) \ local_flush_icache_all(); \ + __switch_to_envcfg(__next); \ ((last) = __switch_to(__prev, __next)); \ } while (0) diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c index e560a253e99b..27bafc5dd62d 100644 --- a/arch/riscv/kernel/cpufeature.c +++ b/arch/riscv/kernel/cpufeature.c @@ -923,7 +923,7 @@ unsigned long riscv_get_elf_hwcap(void) void riscv_user_isa_enable(void) { if (riscv_has_extension_unlikely(RISCV_ISA_EXT_ZICBOZ)) - csr_set(CSR_ENVCFG, ENVCFG_CBZE); + current->thread.envcfg |= ENVCFG_CBZE; else if (any_cpu_has_zicboz) pr_warn_once("Zicboz disabled as it is unavailable on some harts\n"); } -- cgit v1.2.3 From 368546ebe7e74cb6e18f17768533ab7077392a8c Mon Sep 17 00:00:00 2001 From: Samuel Holland Date: Wed, 14 Aug 2024 01:10:56 -0700 Subject: riscv: Call riscv_user_isa_enable() only on the boot hart Now that the [ms]envcfg CSR value is maintained per thread, not per hart, riscv_user_isa_enable() only needs to be called once during boot, to set the value for the init task. This also allows it to be marked as __init. Reviewed-by: Andrew Jones Reviewed-by: Conor Dooley Reviewed-by: Deepak Gupta Signed-off-by: Samuel Holland Reviewed-by: Charlie Jenkins Link: https://lore.kernel.org/r/20240814081126.956287-4-samuel.holland@sifive.com Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/cpufeature.h | 2 +- arch/riscv/kernel/cpufeature.c | 4 ++-- arch/riscv/kernel/smpboot.c | 2 -- 3 files changed, 3 insertions(+), 5 deletions(-) (limited to 'arch/riscv/kernel') diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/cpufeature.h index 45f9c1171a48..ce9a995730c1 100644 --- a/arch/riscv/include/asm/cpufeature.h +++ b/arch/riscv/include/asm/cpufeature.h @@ -31,7 +31,7 @@ DECLARE_PER_CPU(struct riscv_cpuinfo, riscv_cpuinfo); /* Per-cpu ISA extensions. */ extern struct riscv_isainfo hart_isa[NR_CPUS]; -void riscv_user_isa_enable(void); +void __init riscv_user_isa_enable(void); #define _RISCV_ISA_EXT_DATA(_name, _id, _subset_exts, _subset_exts_size, _validate) { \ .name = #_name, \ diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c index 27bafc5dd62d..b3a057c36996 100644 --- a/arch/riscv/kernel/cpufeature.c +++ b/arch/riscv/kernel/cpufeature.c @@ -920,12 +920,12 @@ unsigned long riscv_get_elf_hwcap(void) return hwcap; } -void riscv_user_isa_enable(void) +void __init riscv_user_isa_enable(void) { if (riscv_has_extension_unlikely(RISCV_ISA_EXT_ZICBOZ)) current->thread.envcfg |= ENVCFG_CBZE; else if (any_cpu_has_zicboz) - pr_warn_once("Zicboz disabled as it is unavailable on some harts\n"); + pr_warn("Zicboz disabled as it is unavailable on some harts\n"); } #ifdef CONFIG_RISCV_ALTERNATIVE diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c index 0f8f1c95ac38..e36d20205bd7 100644 --- a/arch/riscv/kernel/smpboot.c +++ b/arch/riscv/kernel/smpboot.c @@ -233,8 +233,6 @@ asmlinkage __visible void smp_callin(void) numa_add_cpu(curr_cpuid); set_cpu_online(curr_cpuid, true); - riscv_user_isa_enable(); - /* * Remote cache and TLB flushes are ignored while the CPU is offline, * so flush them both right now just in case. -- cgit v1.2.3 From 8d20a739f17a2de9e269db72330f5655d6545dd4 Mon Sep 17 00:00:00 2001 From: Jesse Taube Date: Thu, 17 Oct 2024 12:00:18 -0700 Subject: RISC-V: Check scalar unaligned access on all CPUs Originally, the check_unaligned_access_emulated_all_cpus function only checked the boot hart. This fixes the function to check all harts. Fixes: 71c54b3d169d ("riscv: report misaligned accesses emulation to hwprobe") Signed-off-by: Jesse Taube Reviewed-by: Charlie Jenkins Reviewed-by: Evan Green Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20241017-jesse_unaligned_vector-v10-1-5b33500160f8@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/cpufeature.h | 2 ++ arch/riscv/kernel/traps_misaligned.c | 14 +++++++------- 2 files changed, 9 insertions(+), 7 deletions(-) (limited to 'arch/riscv/kernel') diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/cpufeature.h index 45f9c1171a48..dfa5cdddd367 100644 --- a/arch/riscv/include/asm/cpufeature.h +++ b/arch/riscv/include/asm/cpufeature.h @@ -8,6 +8,7 @@ #include #include +#include #include #include #include @@ -60,6 +61,7 @@ void riscv_user_isa_enable(void); #if defined(CONFIG_RISCV_MISALIGNED) bool check_unaligned_access_emulated_all_cpus(void); +void check_unaligned_access_emulated(struct work_struct *work __always_unused); void unaligned_emulation_finish(void); bool unaligned_ctl_available(void); DECLARE_PER_CPU(long, misaligned_access_speed); diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c index d4fd8af7aaf5..d076dde5ad20 100644 --- a/arch/riscv/kernel/traps_misaligned.c +++ b/arch/riscv/kernel/traps_misaligned.c @@ -526,11 +526,11 @@ int handle_misaligned_store(struct pt_regs *regs) return 0; } -static bool check_unaligned_access_emulated(int cpu) +void check_unaligned_access_emulated(struct work_struct *work __always_unused) { + int cpu = smp_processor_id(); long *mas_ptr = per_cpu_ptr(&misaligned_access_speed, cpu); unsigned long tmp_var, tmp_val; - bool misaligned_emu_detected; *mas_ptr = RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN; @@ -538,19 +538,16 @@ static bool check_unaligned_access_emulated(int cpu) " "REG_L" %[tmp], 1(%[ptr])\n" : [tmp] "=r" (tmp_val) : [ptr] "r" (&tmp_var) : "memory"); - misaligned_emu_detected = (*mas_ptr == RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED); /* * If unaligned_ctl is already set, this means that we detected that all * CPUS uses emulated misaligned access at boot time. If that changed * when hotplugging the new cpu, this is something we don't handle. */ - if (unlikely(unaligned_ctl && !misaligned_emu_detected)) { + if (unlikely(unaligned_ctl && (*mas_ptr != RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED))) { pr_crit("CPU misaligned accesses non homogeneous (expected all emulated)\n"); while (true) cpu_relax(); } - - return misaligned_emu_detected; } bool check_unaligned_access_emulated_all_cpus(void) @@ -562,8 +559,11 @@ bool check_unaligned_access_emulated_all_cpus(void) * accesses emulated since tasks requesting such control can run on any * CPU. */ + schedule_on_each_cpu(check_unaligned_access_emulated); + for_each_online_cpu(cpu) - if (!check_unaligned_access_emulated(cpu)) + if (per_cpu(misaligned_access_speed, cpu) + != RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED) return false; unaligned_ctl = true; -- cgit v1.2.3 From 9c528b5f7927b857b40f3c46afbc869827af3c94 Mon Sep 17 00:00:00 2001 From: Jesse Taube Date: Thu, 17 Oct 2024 12:00:19 -0700 Subject: RISC-V: Scalar unaligned access emulated on hotplug CPUs The check_unaligned_access_emulated() function should have been called during CPU hotplug to ensure that if all CPUs had emulated unaligned accesses, the new CPU also does. This patch adds the call to check_unaligned_access_emulated() in the hotplug path. Fixes: 55e0bf49a0d0 ("RISC-V: Probe misaligned access speed in parallel") Signed-off-by: Jesse Taube Reviewed-by: Evan Green Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20241017-jesse_unaligned_vector-v10-2-5b33500160f8@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/kernel/unaligned_access_speed.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/riscv/kernel') diff --git a/arch/riscv/kernel/unaligned_access_speed.c b/arch/riscv/kernel/unaligned_access_speed.c index 160628a2116d..f3508cc54f91 100644 --- a/arch/riscv/kernel/unaligned_access_speed.c +++ b/arch/riscv/kernel/unaligned_access_speed.c @@ -191,6 +191,7 @@ static int riscv_online_cpu(unsigned int cpu) if (per_cpu(misaligned_access_speed, cpu) != RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN) goto exit; + check_unaligned_access_emulated(NULL); buf = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER); if (!buf) { pr_warn("Allocation failure, not measuring misaligned performance\n"); -- cgit v1.2.3 From c05a62c92516d7679c819f8a5177cf84c8668954 Mon Sep 17 00:00:00 2001 From: Jesse Taube Date: Thu, 17 Oct 2024 12:00:20 -0700 Subject: RISC-V: Replace RISCV_MISALIGNED with RISCV_SCALAR_MISALIGNED Replace RISCV_MISALIGNED with RISCV_SCALAR_MISALIGNED to allow for the addition of RISCV_VECTOR_MISALIGNED in a later patch. Signed-off-by: Jesse Taube Reviewed-by: Conor Dooley Reviewed-by: Charlie Jenkins Reviewed-by: Evan Green Link: https://lore.kernel.org/r/20241017-jesse_unaligned_vector-v10-3-5b33500160f8@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/Kconfig | 6 +++--- arch/riscv/include/asm/cpufeature.h | 2 +- arch/riscv/include/asm/entry-common.h | 2 +- arch/riscv/kernel/Makefile | 4 ++-- arch/riscv/kernel/fpu.S | 4 ++-- 5 files changed, 9 insertions(+), 9 deletions(-) (limited to 'arch/riscv/kernel') diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 22dc5ea4196c..fab7c6bc1729 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -784,7 +784,7 @@ config THREAD_SIZE_ORDER Specify the Pages of thread stack size (from 4KB to 64KB), which also affects irq stack size, which is equal to thread stack size. -config RISCV_MISALIGNED +config RISCV_SCALAR_MISALIGNED bool select SYSCTL_ARCH_UNALIGN_ALLOW help @@ -801,7 +801,7 @@ choice config RISCV_PROBE_UNALIGNED_ACCESS bool "Probe for hardware unaligned access support" - select RISCV_MISALIGNED + select RISCV_SCALAR_MISALIGNED help During boot, the kernel will run a series of tests to determine the speed of unaligned accesses. This probing will dynamically determine @@ -812,7 +812,7 @@ config RISCV_PROBE_UNALIGNED_ACCESS config RISCV_EMULATED_UNALIGNED_ACCESS bool "Emulate unaligned access where system support is missing" - select RISCV_MISALIGNED + select RISCV_SCALAR_MISALIGNED help If unaligned memory accesses trap into the kernel as they are not supported by the system, the kernel will emulate the unaligned diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/cpufeature.h index dfa5cdddd367..ccc6cf141c20 100644 --- a/arch/riscv/include/asm/cpufeature.h +++ b/arch/riscv/include/asm/cpufeature.h @@ -59,7 +59,7 @@ void riscv_user_isa_enable(void); #define __RISCV_ISA_EXT_SUPERSET_VALIDATE(_name, _id, _sub_exts, _validate) \ _RISCV_ISA_EXT_DATA(_name, _id, _sub_exts, ARRAY_SIZE(_sub_exts), _validate) -#if defined(CONFIG_RISCV_MISALIGNED) +#if defined(CONFIG_RISCV_SCALAR_MISALIGNED) bool check_unaligned_access_emulated_all_cpus(void); void check_unaligned_access_emulated(struct work_struct *work __always_unused); void unaligned_emulation_finish(void); diff --git a/arch/riscv/include/asm/entry-common.h b/arch/riscv/include/asm/entry-common.h index 2293e535f865..0a4e3544c877 100644 --- a/arch/riscv/include/asm/entry-common.h +++ b/arch/riscv/include/asm/entry-common.h @@ -25,7 +25,7 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs, void handle_page_fault(struct pt_regs *regs); void handle_break(struct pt_regs *regs); -#ifdef CONFIG_RISCV_MISALIGNED +#ifdef CONFIG_RISCV_SCALAR_MISALIGNED int handle_misaligned_load(struct pt_regs *regs); int handle_misaligned_store(struct pt_regs *regs); #else diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile index 7f88cc4931f5..45624c5ea86c 100644 --- a/arch/riscv/kernel/Makefile +++ b/arch/riscv/kernel/Makefile @@ -68,8 +68,8 @@ obj-y += probes/ obj-y += tests/ obj-$(CONFIG_MMU) += vdso.o vdso/ -obj-$(CONFIG_RISCV_MISALIGNED) += traps_misaligned.o -obj-$(CONFIG_RISCV_MISALIGNED) += unaligned_access_speed.o +obj-$(CONFIG_RISCV_SCALAR_MISALIGNED) += traps_misaligned.o +obj-$(CONFIG_RISCV_SCALAR_MISALIGNED) += unaligned_access_speed.o obj-$(CONFIG_RISCV_PROBE_UNALIGNED_ACCESS) += copy-unaligned.o obj-$(CONFIG_FPU) += fpu.o diff --git a/arch/riscv/kernel/fpu.S b/arch/riscv/kernel/fpu.S index 327cf527dd7e..f74f6b60e347 100644 --- a/arch/riscv/kernel/fpu.S +++ b/arch/riscv/kernel/fpu.S @@ -170,7 +170,7 @@ SYM_FUNC_END(__fstate_restore) __access_func(f31) -#ifdef CONFIG_RISCV_MISALIGNED +#ifdef CONFIG_RISCV_SCALAR_MISALIGNED /* * Disable compressed instructions set to keep a constant offset between FP @@ -224,4 +224,4 @@ SYM_FUNC_START(get_f64_reg) fp_access_epilogue SYM_FUNC_END(get_f64_reg) -#endif /* CONFIG_RISCV_MISALIGNED */ +#endif /* CONFIG_RISCV_SCALAR_MISALIGNED */ -- cgit v1.2.3 From d1703dc7bc8ec7adb91f5ceaf1556ff1ed212858 Mon Sep 17 00:00:00 2001 From: Jesse Taube Date: Thu, 17 Oct 2024 12:00:21 -0700 Subject: RISC-V: Detect unaligned vector accesses supported Run an unaligned vector access to test if the system supports vector unaligned access. Add the result to a new key in hwprobe. This is useful for usermode to know if vector misaligned accesses are supported and if they are faster or slower than equivalent byte accesses. Signed-off-by: Jesse Taube Signed-off-by: Charlie Jenkins Link: https://lore.kernel.org/r/20241017-jesse_unaligned_vector-v10-4-5b33500160f8@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/Kconfig | 36 +++++++++ arch/riscv/include/asm/cpufeature.h | 8 +- arch/riscv/include/asm/entry-common.h | 11 --- arch/riscv/include/asm/hwprobe.h | 2 +- arch/riscv/include/asm/vector.h | 2 + arch/riscv/include/uapi/asm/hwprobe.h | 5 ++ arch/riscv/kernel/Makefile | 4 +- arch/riscv/kernel/sys_hwprobe.c | 35 ++++++++ arch/riscv/kernel/traps_misaligned.c | 125 ++++++++++++++++++++++++++++- arch/riscv/kernel/unaligned_access_speed.c | 22 ++--- arch/riscv/kernel/vector.c | 2 +- 11 files changed, 222 insertions(+), 30 deletions(-) (limited to 'arch/riscv/kernel') diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index fab7c6bc1729..05f698a88977 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -784,12 +784,26 @@ config THREAD_SIZE_ORDER Specify the Pages of thread stack size (from 4KB to 64KB), which also affects irq stack size, which is equal to thread stack size. +config RISCV_MISALIGNED + bool + help + Embed support for detecting and emulating misaligned + scalar or vector loads and stores. + config RISCV_SCALAR_MISALIGNED bool + select RISCV_MISALIGNED select SYSCTL_ARCH_UNALIGN_ALLOW help Embed support for emulating misaligned loads and stores. +config RISCV_VECTOR_MISALIGNED + bool + select RISCV_MISALIGNED + depends on RISCV_ISA_V + help + Enable detecting support for vector misaligned loads and stores. + choice prompt "Unaligned Accesses Support" default RISCV_PROBE_UNALIGNED_ACCESS @@ -841,6 +855,28 @@ config RISCV_EFFICIENT_UNALIGNED_ACCESS endchoice +choice + prompt "Vector unaligned Accesses Support" + depends on RISCV_ISA_V + default RISCV_PROBE_VECTOR_UNALIGNED_ACCESS + help + This determines the level of support for vector unaligned accesses. This + information is used by the kernel to perform optimizations. It is also + exposed to user space via the hwprobe syscall. The hardware will be + probed at boot by default. + +config RISCV_PROBE_VECTOR_UNALIGNED_ACCESS + bool "Probe speed of vector unaligned accesses" + select RISCV_VECTOR_MISALIGNED + depends on RISCV_ISA_V + help + During boot, the kernel will run a series of tests to determine the + speed of vector unaligned accesses if they are supported. This probing + will dynamically determine the speed of vector unaligned accesses on + the underlying system if they are supported. + +endchoice + source "arch/riscv/Kconfig.vendor" endmenu # "Platform type" diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/cpufeature.h index ccc6cf141c20..85bf1bce51e6 100644 --- a/arch/riscv/include/asm/cpufeature.h +++ b/arch/riscv/include/asm/cpufeature.h @@ -59,8 +59,8 @@ void riscv_user_isa_enable(void); #define __RISCV_ISA_EXT_SUPERSET_VALIDATE(_name, _id, _sub_exts, _validate) \ _RISCV_ISA_EXT_DATA(_name, _id, _sub_exts, ARRAY_SIZE(_sub_exts), _validate) -#if defined(CONFIG_RISCV_SCALAR_MISALIGNED) bool check_unaligned_access_emulated_all_cpus(void); +#if defined(CONFIG_RISCV_SCALAR_MISALIGNED) void check_unaligned_access_emulated(struct work_struct *work __always_unused); void unaligned_emulation_finish(void); bool unaligned_ctl_available(void); @@ -72,6 +72,12 @@ static inline bool unaligned_ctl_available(void) } #endif +bool check_vector_unaligned_access_emulated_all_cpus(void); +#if defined(CONFIG_RISCV_VECTOR_MISALIGNED) +void check_vector_unaligned_access_emulated(struct work_struct *work __always_unused); +DECLARE_PER_CPU(long, vector_misaligned_access); +#endif + #if defined(CONFIG_RISCV_PROBE_UNALIGNED_ACCESS) DECLARE_STATIC_KEY_FALSE(fast_unaligned_access_speed_key); diff --git a/arch/riscv/include/asm/entry-common.h b/arch/riscv/include/asm/entry-common.h index 0a4e3544c877..7b32d2b08bb6 100644 --- a/arch/riscv/include/asm/entry-common.h +++ b/arch/riscv/include/asm/entry-common.h @@ -25,18 +25,7 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs, void handle_page_fault(struct pt_regs *regs); void handle_break(struct pt_regs *regs); -#ifdef CONFIG_RISCV_SCALAR_MISALIGNED int handle_misaligned_load(struct pt_regs *regs); int handle_misaligned_store(struct pt_regs *regs); -#else -static inline int handle_misaligned_load(struct pt_regs *regs) -{ - return -1; -} -static inline int handle_misaligned_store(struct pt_regs *regs) -{ - return -1; -} -#endif #endif /* _ASM_RISCV_ENTRY_COMMON_H */ diff --git a/arch/riscv/include/asm/hwprobe.h b/arch/riscv/include/asm/hwprobe.h index ffb9484531af..1ce1df6d0ff3 100644 --- a/arch/riscv/include/asm/hwprobe.h +++ b/arch/riscv/include/asm/hwprobe.h @@ -8,7 +8,7 @@ #include -#define RISCV_HWPROBE_MAX_KEY 9 +#define RISCV_HWPROBE_MAX_KEY 10 static inline bool riscv_hwprobe_key_is_valid(__s64 key) { diff --git a/arch/riscv/include/asm/vector.h b/arch/riscv/include/asm/vector.h index be7d309cca8a..c7c023afbacd 100644 --- a/arch/riscv/include/asm/vector.h +++ b/arch/riscv/include/asm/vector.h @@ -21,6 +21,7 @@ extern unsigned long riscv_v_vsize; int riscv_v_setup_vsize(void); +bool insn_is_vector(u32 insn_buf); bool riscv_v_first_use_handler(struct pt_regs *regs); void kernel_vector_begin(void); void kernel_vector_end(void); @@ -268,6 +269,7 @@ struct pt_regs; static inline int riscv_v_setup_vsize(void) { return -EOPNOTSUPP; } static __always_inline bool has_vector(void) { return false; } +static __always_inline bool insn_is_vector(u32 insn_buf) { return false; } static inline bool riscv_v_first_use_handler(struct pt_regs *regs) { return false; } static inline bool riscv_v_vstate_query(struct pt_regs *regs) { return false; } static inline bool riscv_v_vstate_ctrl_user_allowed(void) { return false; } diff --git a/arch/riscv/include/uapi/asm/hwprobe.h b/arch/riscv/include/uapi/asm/hwprobe.h index 1e153cda57db..34c88c15322c 100644 --- a/arch/riscv/include/uapi/asm/hwprobe.h +++ b/arch/riscv/include/uapi/asm/hwprobe.h @@ -88,6 +88,11 @@ struct riscv_hwprobe { #define RISCV_HWPROBE_MISALIGNED_SCALAR_SLOW 2 #define RISCV_HWPROBE_MISALIGNED_SCALAR_FAST 3 #define RISCV_HWPROBE_MISALIGNED_SCALAR_UNSUPPORTED 4 +#define RISCV_HWPROBE_KEY_MISALIGNED_VECTOR_PERF 10 +#define RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN 0 +#define RISCV_HWPROBE_MISALIGNED_VECTOR_SLOW 2 +#define RISCV_HWPROBE_MISALIGNED_VECTOR_FAST 3 +#define RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED 4 /* Increase RISCV_HWPROBE_MAX_KEY when adding items. */ /* Flags */ diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile index 45624c5ea86c..7f88cc4931f5 100644 --- a/arch/riscv/kernel/Makefile +++ b/arch/riscv/kernel/Makefile @@ -68,8 +68,8 @@ obj-y += probes/ obj-y += tests/ obj-$(CONFIG_MMU) += vdso.o vdso/ -obj-$(CONFIG_RISCV_SCALAR_MISALIGNED) += traps_misaligned.o -obj-$(CONFIG_RISCV_SCALAR_MISALIGNED) += unaligned_access_speed.o +obj-$(CONFIG_RISCV_MISALIGNED) += traps_misaligned.o +obj-$(CONFIG_RISCV_MISALIGNED) += unaligned_access_speed.o obj-$(CONFIG_RISCV_PROBE_UNALIGNED_ACCESS) += copy-unaligned.o obj-$(CONFIG_FPU) += fpu.o diff --git a/arch/riscv/kernel/sys_hwprobe.c b/arch/riscv/kernel/sys_hwprobe.c index cea0ca2bf2a2..6441baada36b 100644 --- a/arch/riscv/kernel/sys_hwprobe.c +++ b/arch/riscv/kernel/sys_hwprobe.c @@ -201,6 +201,37 @@ static u64 hwprobe_misaligned(const struct cpumask *cpus) } #endif +#ifdef CONFIG_RISCV_VECTOR_MISALIGNED +static u64 hwprobe_vec_misaligned(const struct cpumask *cpus) +{ + int cpu; + u64 perf = -1ULL; + + /* Return if supported or not even if speed wasn't probed */ + for_each_cpu(cpu, cpus) { + int this_perf = per_cpu(vector_misaligned_access, cpu); + + if (perf == -1ULL) + perf = this_perf; + + if (perf != this_perf) { + perf = RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN; + break; + } + } + + if (perf == -1ULL) + return RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN; + + return perf; +} +#else +static u64 hwprobe_vec_misaligned(const struct cpumask *cpus) +{ + return RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN; +} +#endif + static void hwprobe_one_pair(struct riscv_hwprobe *pair, const struct cpumask *cpus) { @@ -229,6 +260,10 @@ static void hwprobe_one_pair(struct riscv_hwprobe *pair, pair->value = hwprobe_misaligned(cpus); break; + case RISCV_HWPROBE_KEY_MISALIGNED_VECTOR_PERF: + pair->value = hwprobe_vec_misaligned(cpus); + break; + case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE: pair->value = 0; if (hwprobe_ext0_has(cpus, RISCV_HWPROBE_EXT_ZICBOZ)) diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c index d076dde5ad20..ef59ecfc64cb 100644 --- a/arch/riscv/kernel/traps_misaligned.c +++ b/arch/riscv/kernel/traps_misaligned.c @@ -16,6 +16,7 @@ #include #include #include +#include #define INSN_MATCH_LB 0x3 #define INSN_MASK_LB 0x707f @@ -322,12 +323,37 @@ union reg_data { u64 data_u64; }; -static bool unaligned_ctl __read_mostly; - /* sysctl hooks */ int unaligned_enabled __read_mostly = 1; /* Enabled by default */ -int handle_misaligned_load(struct pt_regs *regs) +#ifdef CONFIG_RISCV_VECTOR_MISALIGNED +static int handle_vector_misaligned_load(struct pt_regs *regs) +{ + unsigned long epc = regs->epc; + unsigned long insn; + + if (get_insn(regs, epc, &insn)) + return -1; + + /* Only return 0 when in check_vector_unaligned_access_emulated */ + if (*this_cpu_ptr(&vector_misaligned_access) == RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN) { + *this_cpu_ptr(&vector_misaligned_access) = RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED; + regs->epc = epc + INSN_LEN(insn); + return 0; + } + + /* If vector instruction we don't emulate it yet */ + regs->epc = epc; + return -1; +} +#else +static int handle_vector_misaligned_load(struct pt_regs *regs) +{ + return -1; +} +#endif + +static int handle_scalar_misaligned_load(struct pt_regs *regs) { union reg_data val; unsigned long epc = regs->epc; @@ -435,7 +461,7 @@ int handle_misaligned_load(struct pt_regs *regs) return 0; } -int handle_misaligned_store(struct pt_regs *regs) +static int handle_scalar_misaligned_store(struct pt_regs *regs) { union reg_data val; unsigned long epc = regs->epc; @@ -526,6 +552,91 @@ int handle_misaligned_store(struct pt_regs *regs) return 0; } +int handle_misaligned_load(struct pt_regs *regs) +{ + unsigned long epc = regs->epc; + unsigned long insn; + + if (IS_ENABLED(CONFIG_RISCV_VECTOR_MISALIGNED)) { + if (get_insn(regs, epc, &insn)) + return -1; + + if (insn_is_vector(insn)) + return handle_vector_misaligned_load(regs); + } + + if (IS_ENABLED(CONFIG_RISCV_SCALAR_MISALIGNED)) + return handle_scalar_misaligned_load(regs); + + return -1; +} + +int handle_misaligned_store(struct pt_regs *regs) +{ + if (IS_ENABLED(CONFIG_RISCV_SCALAR_MISALIGNED)) + return handle_scalar_misaligned_store(regs); + + return -1; +} + +#ifdef CONFIG_RISCV_VECTOR_MISALIGNED +void check_vector_unaligned_access_emulated(struct work_struct *work __always_unused) +{ + long *mas_ptr = this_cpu_ptr(&vector_misaligned_access); + unsigned long tmp_var; + + *mas_ptr = RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN; + + kernel_vector_begin(); + /* + * In pre-13.0.0 versions of GCC, vector registers cannot appear in + * the clobber list. This inline asm clobbers v0, but since we do not + * currently build the kernel with V enabled, the v0 clobber arg is not + * needed (as the compiler will not emit vector code itself). If the kernel + * is changed to build with V enabled, the clobber arg will need to be + * added here. + */ + __asm__ __volatile__ ( + ".balign 4\n\t" + ".option push\n\t" + ".option arch, +zve32x\n\t" + " vsetivli zero, 1, e16, m1, ta, ma\n\t" // Vectors of 16b + " vle16.v v0, (%[ptr])\n\t" // Load bytes + ".option pop\n\t" + : : [ptr] "r" ((u8 *)&tmp_var + 1)); + kernel_vector_end(); +} + +bool check_vector_unaligned_access_emulated_all_cpus(void) +{ + int cpu; + + if (!has_vector()) { + for_each_online_cpu(cpu) + per_cpu(vector_misaligned_access, cpu) = RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED; + return false; + } + + schedule_on_each_cpu(check_vector_unaligned_access_emulated); + + for_each_online_cpu(cpu) + if (per_cpu(vector_misaligned_access, cpu) + == RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN) + return false; + + return true; +} +#else +bool check_vector_unaligned_access_emulated_all_cpus(void) +{ + return false; +} +#endif + +#ifdef CONFIG_RISCV_SCALAR_MISALIGNED + +static bool unaligned_ctl __read_mostly; + void check_unaligned_access_emulated(struct work_struct *work __always_unused) { int cpu = smp_processor_id(); @@ -574,3 +685,9 @@ bool unaligned_ctl_available(void) { return unaligned_ctl; } +#else +bool check_unaligned_access_emulated_all_cpus(void) +{ + return false; +} +#endif diff --git a/arch/riscv/kernel/unaligned_access_speed.c b/arch/riscv/kernel/unaligned_access_speed.c index f3508cc54f91..0b8b5e17453a 100644 --- a/arch/riscv/kernel/unaligned_access_speed.c +++ b/arch/riscv/kernel/unaligned_access_speed.c @@ -19,7 +19,8 @@ #define MISALIGNED_BUFFER_ORDER get_order(MISALIGNED_BUFFER_SIZE) #define MISALIGNED_COPY_SIZE ((MISALIGNED_BUFFER_SIZE / 2) - 0x80) -DEFINE_PER_CPU(long, misaligned_access_speed); +DEFINE_PER_CPU(long, misaligned_access_speed) = RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN; +DEFINE_PER_CPU(long, vector_misaligned_access) = RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED; #ifdef CONFIG_RISCV_PROBE_UNALIGNED_ACCESS static cpumask_t fast_misaligned_access; @@ -260,23 +261,24 @@ out: kfree(bufs); return 0; } +#else /* CONFIG_RISCV_PROBE_UNALIGNED_ACCESS */ +static int check_unaligned_access_speed_all_cpus(void) +{ + return 0; +} +#endif static int check_unaligned_access_all_cpus(void) { - bool all_cpus_emulated = check_unaligned_access_emulated_all_cpus(); + bool all_cpus_emulated; + + all_cpus_emulated = check_unaligned_access_emulated_all_cpus(); + check_vector_unaligned_access_emulated_all_cpus(); if (!all_cpus_emulated) return check_unaligned_access_speed_all_cpus(); return 0; } -#else /* CONFIG_RISCV_PROBE_UNALIGNED_ACCESS */ -static int check_unaligned_access_all_cpus(void) -{ - check_unaligned_access_emulated_all_cpus(); - - return 0; -} -#endif arch_initcall(check_unaligned_access_all_cpus); diff --git a/arch/riscv/kernel/vector.c b/arch/riscv/kernel/vector.c index 682b3feee451..821818886fab 100644 --- a/arch/riscv/kernel/vector.c +++ b/arch/riscv/kernel/vector.c @@ -66,7 +66,7 @@ void __init riscv_v_setup_ctx_cache(void) #endif } -static bool insn_is_vector(u32 insn_buf) +bool insn_is_vector(u32 insn_buf) { u32 opcode = insn_buf & __INSN_OPCODE_MASK; u32 width, csr; -- cgit v1.2.3 From e7c9d66e313bc0f7cb185c4972c3c9383a0da70f Mon Sep 17 00:00:00 2001 From: Jesse Taube Date: Thu, 17 Oct 2024 12:00:22 -0700 Subject: RISC-V: Report vector unaligned access speed hwprobe Detect if vector misaligned accesses are faster or slower than equivalent vector byte accesses. This is useful for usermode to know whether vector byte accesses or vector misaligned accesses have a better bandwidth for operations like memcpy. Signed-off-by: Jesse Taube Reviewed-by: Charlie Jenkins Link: https://lore.kernel.org/r/20241017-jesse_unaligned_vector-v10-5-5b33500160f8@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/Kconfig | 18 ++++ arch/riscv/kernel/Makefile | 3 +- arch/riscv/kernel/copy-unaligned.h | 5 + arch/riscv/kernel/sys_hwprobe.c | 6 ++ arch/riscv/kernel/unaligned_access_speed.c | 141 ++++++++++++++++++++++++++++- arch/riscv/kernel/vec-copy-unaligned.S | 58 ++++++++++++ 6 files changed, 228 insertions(+), 3 deletions(-) create mode 100644 arch/riscv/kernel/vec-copy-unaligned.S (limited to 'arch/riscv/kernel') diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 05f698a88977..d46394873afc 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -875,6 +875,24 @@ config RISCV_PROBE_VECTOR_UNALIGNED_ACCESS will dynamically determine the speed of vector unaligned accesses on the underlying system if they are supported. +config RISCV_SLOW_VECTOR_UNALIGNED_ACCESS + bool "Assume the system supports slow vector unaligned memory accesses" + depends on NONPORTABLE + help + Assume that the system supports slow vector unaligned memory accesses. The + kernel and userspace programs may not be able to run at all on systems + that do not support unaligned memory accesses. + +config RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS + bool "Assume the system supports fast vector unaligned memory accesses" + depends on NONPORTABLE + help + Assume that the system supports fast vector unaligned memory accesses. When + enabled, this option improves the performance of the kernel on such + systems. However, the kernel and userspace programs will run much more + slowly, or will not be able to run at all, on systems that do not + support efficient unaligned memory accesses. + endchoice source "arch/riscv/Kconfig.vendor" diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile index 7f88cc4931f5..30db92672ada 100644 --- a/arch/riscv/kernel/Makefile +++ b/arch/riscv/kernel/Makefile @@ -70,7 +70,8 @@ obj-$(CONFIG_MMU) += vdso.o vdso/ obj-$(CONFIG_RISCV_MISALIGNED) += traps_misaligned.o obj-$(CONFIG_RISCV_MISALIGNED) += unaligned_access_speed.o -obj-$(CONFIG_RISCV_PROBE_UNALIGNED_ACCESS) += copy-unaligned.o +obj-$(CONFIG_RISCV_PROBE_UNALIGNED_ACCESS) += copy-unaligned.o +obj-$(CONFIG_RISCV_PROBE_VECTOR_UNALIGNED_ACCESS) += vec-copy-unaligned.o obj-$(CONFIG_FPU) += fpu.o obj-$(CONFIG_FPU) += kernel_mode_fpu.o diff --git a/arch/riscv/kernel/copy-unaligned.h b/arch/riscv/kernel/copy-unaligned.h index e3d70d35b708..85d4d11450cb 100644 --- a/arch/riscv/kernel/copy-unaligned.h +++ b/arch/riscv/kernel/copy-unaligned.h @@ -10,4 +10,9 @@ void __riscv_copy_words_unaligned(void *dst, const void *src, size_t size); void __riscv_copy_bytes_unaligned(void *dst, const void *src, size_t size); +#ifdef CONFIG_RISCV_PROBE_VECTOR_UNALIGNED_ACCESS +void __riscv_copy_vec_words_unaligned(void *dst, const void *src, size_t size); +void __riscv_copy_vec_bytes_unaligned(void *dst, const void *src, size_t size); +#endif + #endif /* __RISCV_KERNEL_COPY_UNALIGNED_H */ diff --git a/arch/riscv/kernel/sys_hwprobe.c b/arch/riscv/kernel/sys_hwprobe.c index 6441baada36b..6673278e84d5 100644 --- a/arch/riscv/kernel/sys_hwprobe.c +++ b/arch/riscv/kernel/sys_hwprobe.c @@ -228,6 +228,12 @@ static u64 hwprobe_vec_misaligned(const struct cpumask *cpus) #else static u64 hwprobe_vec_misaligned(const struct cpumask *cpus) { + if (IS_ENABLED(CONFIG_RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS)) + return RISCV_HWPROBE_MISALIGNED_VECTOR_FAST; + + if (IS_ENABLED(CONFIG_RISCV_SLOW_VECTOR_UNALIGNED_ACCESS)) + return RISCV_HWPROBE_MISALIGNED_VECTOR_SLOW; + return RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN; } #endif diff --git a/arch/riscv/kernel/unaligned_access_speed.c b/arch/riscv/kernel/unaligned_access_speed.c index 0b8b5e17453a..91f189cf1611 100644 --- a/arch/riscv/kernel/unaligned_access_speed.c +++ b/arch/riscv/kernel/unaligned_access_speed.c @@ -6,11 +6,13 @@ #include #include #include +#include #include #include #include #include #include +#include #include "copy-unaligned.h" @@ -268,12 +270,147 @@ static int check_unaligned_access_speed_all_cpus(void) } #endif +#ifdef CONFIG_RISCV_PROBE_VECTOR_UNALIGNED_ACCESS +static void check_vector_unaligned_access(struct work_struct *work __always_unused) +{ + int cpu = smp_processor_id(); + u64 start_cycles, end_cycles; + u64 word_cycles; + u64 byte_cycles; + int ratio; + unsigned long start_jiffies, now; + struct page *page; + void *dst; + void *src; + long speed = RISCV_HWPROBE_MISALIGNED_VECTOR_SLOW; + + if (per_cpu(vector_misaligned_access, cpu) != RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN) + return; + + page = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER); + if (!page) { + pr_warn("Allocation failure, not measuring vector misaligned performance\n"); + return; + } + + /* Make an unaligned destination buffer. */ + dst = (void *)((unsigned long)page_address(page) | 0x1); + /* Unalign src as well, but differently (off by 1 + 2 = 3). */ + src = dst + (MISALIGNED_BUFFER_SIZE / 2); + src += 2; + word_cycles = -1ULL; + + /* Do a warmup. */ + kernel_vector_begin(); + __riscv_copy_vec_words_unaligned(dst, src, MISALIGNED_COPY_SIZE); + + start_jiffies = jiffies; + while ((now = jiffies) == start_jiffies) + cpu_relax(); + + /* + * For a fixed amount of time, repeatedly try the function, and take + * the best time in cycles as the measurement. + */ + while (time_before(jiffies, now + (1 << MISALIGNED_ACCESS_JIFFIES_LG2))) { + start_cycles = get_cycles64(); + /* Ensure the CSR read can't reorder WRT to the copy. */ + mb(); + __riscv_copy_vec_words_unaligned(dst, src, MISALIGNED_COPY_SIZE); + /* Ensure the copy ends before the end time is snapped. */ + mb(); + end_cycles = get_cycles64(); + if ((end_cycles - start_cycles) < word_cycles) + word_cycles = end_cycles - start_cycles; + } + + byte_cycles = -1ULL; + __riscv_copy_vec_bytes_unaligned(dst, src, MISALIGNED_COPY_SIZE); + start_jiffies = jiffies; + while ((now = jiffies) == start_jiffies) + cpu_relax(); + + while (time_before(jiffies, now + (1 << MISALIGNED_ACCESS_JIFFIES_LG2))) { + start_cycles = get_cycles64(); + /* Ensure the CSR read can't reorder WRT to the copy. */ + mb(); + __riscv_copy_vec_bytes_unaligned(dst, src, MISALIGNED_COPY_SIZE); + /* Ensure the copy ends before the end time is snapped. */ + mb(); + end_cycles = get_cycles64(); + if ((end_cycles - start_cycles) < byte_cycles) + byte_cycles = end_cycles - start_cycles; + } + + kernel_vector_end(); + + /* Don't divide by zero. */ + if (!word_cycles || !byte_cycles) { + pr_warn("cpu%d: rdtime lacks granularity needed to measure unaligned vector access speed\n", + cpu); + + return; + } + + if (word_cycles < byte_cycles) + speed = RISCV_HWPROBE_MISALIGNED_VECTOR_FAST; + + ratio = div_u64((byte_cycles * 100), word_cycles); + pr_info("cpu%d: Ratio of vector byte access time to vector unaligned word access is %d.%02d, unaligned accesses are %s\n", + cpu, + ratio / 100, + ratio % 100, + (speed == RISCV_HWPROBE_MISALIGNED_VECTOR_FAST) ? "fast" : "slow"); + + per_cpu(vector_misaligned_access, cpu) = speed; +} + +static int riscv_online_cpu_vec(unsigned int cpu) +{ + if (!has_vector()) + return 0; + + if (per_cpu(vector_misaligned_access, cpu) != RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED) + return 0; + + check_vector_unaligned_access_emulated(NULL); + check_vector_unaligned_access(NULL); + return 0; +} + +/* Measure unaligned access speed on all CPUs present at boot in parallel. */ +static int vec_check_unaligned_access_speed_all_cpus(void *unused __always_unused) +{ + schedule_on_each_cpu(check_vector_unaligned_access); + + /* + * Setup hotplug callbacks for any new CPUs that come online or go + * offline. + */ + cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "riscv:online", + riscv_online_cpu_vec, NULL); + + return 0; +} +#else /* CONFIG_RISCV_PROBE_VECTOR_UNALIGNED_ACCESS */ +static int vec_check_unaligned_access_speed_all_cpus(void *unused __always_unused) +{ + return 0; +} +#endif + static int check_unaligned_access_all_cpus(void) { - bool all_cpus_emulated; + bool all_cpus_emulated, all_cpus_vec_unsupported; all_cpus_emulated = check_unaligned_access_emulated_all_cpus(); - check_vector_unaligned_access_emulated_all_cpus(); + all_cpus_vec_unsupported = check_vector_unaligned_access_emulated_all_cpus(); + + if (!all_cpus_vec_unsupported && + IS_ENABLED(CONFIG_RISCV_PROBE_VECTOR_UNALIGNED_ACCESS)) { + kthread_run(vec_check_unaligned_access_speed_all_cpus, + NULL, "vec_check_unaligned_access_speed_all_cpus"); + } if (!all_cpus_emulated) return check_unaligned_access_speed_all_cpus(); diff --git a/arch/riscv/kernel/vec-copy-unaligned.S b/arch/riscv/kernel/vec-copy-unaligned.S new file mode 100644 index 000000000000..d16f19f1b3b6 --- /dev/null +++ b/arch/riscv/kernel/vec-copy-unaligned.S @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2024 Rivos Inc. */ + +#include +#include +#include + + .text + +#define WORD_EEW 32 + +#define WORD_SEW CONCATENATE(e, WORD_EEW) +#define VEC_L CONCATENATE(vle, WORD_EEW).v +#define VEC_S CONCATENATE(vle, WORD_EEW).v + +/* void __riscv_copy_vec_words_unaligned(void *, const void *, size_t) */ +/* Performs a memcpy without aligning buffers, using word loads and stores. */ +/* Note: The size is truncated to a multiple of WORD_EEW */ +SYM_FUNC_START(__riscv_copy_vec_words_unaligned) + andi a4, a2, ~(WORD_EEW-1) + beqz a4, 2f + add a3, a1, a4 + .option push + .option arch, +zve32x +1: + vsetivli t0, 8, WORD_SEW, m8, ta, ma + VEC_L v0, (a1) + VEC_S v0, (a0) + addi a0, a0, WORD_EEW + addi a1, a1, WORD_EEW + bltu a1, a3, 1b + +2: + .option pop + ret +SYM_FUNC_END(__riscv_copy_vec_words_unaligned) + +/* void __riscv_copy_vec_bytes_unaligned(void *, const void *, size_t) */ +/* Performs a memcpy without aligning buffers, using only byte accesses. */ +/* Note: The size is truncated to a multiple of 8 */ +SYM_FUNC_START(__riscv_copy_vec_bytes_unaligned) + andi a4, a2, ~(8-1) + beqz a4, 2f + add a3, a1, a4 + .option push + .option arch, +zve32x +1: + vsetivli t0, 8, e8, m8, ta, ma + vle8.v v0, (a1) + vse8.v v0, (a0) + addi a0, a0, 8 + addi a1, a1, 8 + bltu a1, a3, 1b + +2: + .option pop + ret +SYM_FUNC_END(__riscv_copy_vec_bytes_unaligned) -- cgit v1.2.3 From c6898d66fd198cd458734da40e414361f295de6a Mon Sep 17 00:00:00 2001 From: Alexandre Ghiti Date: Wed, 16 Oct 2024 10:36:25 +0200 Subject: riscv: Check that vdso does not contain any dynamic relocations MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Like other architectures, use the common cmd_vdso_check to make sure of that. Reviewed-by: Björn Töpel Tested-by: Vladimir Isaev Signed-off-by: Alexandre Ghiti Reviewed-by: Guo Ren Link: https://lore.kernel.org/r/20241016083625.136311-3-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/kernel/vdso/Makefile | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'arch/riscv/kernel') diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile index 3f1c4b2d0b06..9a1b555e8733 100644 --- a/arch/riscv/kernel/vdso/Makefile +++ b/arch/riscv/kernel/vdso/Makefile @@ -45,7 +45,7 @@ $(obj)/vdso.o: $(obj)/vdso.so # link rule for the .so file, .lds has to be first $(obj)/vdso.so.dbg: $(obj)/vdso.lds $(obj-vdso) FORCE - $(call if_changed,vdsold) + $(call if_changed,vdsold_and_check) LDFLAGS_vdso.so.dbg = -shared -soname=linux-vdso.so.1 \ --build-id=sha1 --hash-style=both --eh-frame-hdr @@ -65,7 +65,8 @@ include/generated/vdso-offsets.h: $(obj)/vdso.so.dbg FORCE # actual build commands # The DSO images are built using a special linker script # Make sure only to export the intended __vdso_xxx symbol offsets. -quiet_cmd_vdsold = VDSOLD $@ - cmd_vdsold = $(LD) $(ld_flags) -T $(filter-out FORCE,$^) -o $@.tmp && \ +quiet_cmd_vdsold_and_check = VDSOLD $@ + cmd_vdsold_and_check = $(LD) $(ld_flags) -T $(filter-out FORCE,$^) -o $@.tmp && \ $(OBJCOPY) $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@ && \ - rm $@.tmp + rm $@.tmp && \ + $(cmd_vdso_check) -- cgit v1.2.3 From 2e6f6ea452aa9fa9f150520fdecf6bda31954db4 Mon Sep 17 00:00:00 2001 From: Samuel Holland Date: Wed, 16 Oct 2024 13:27:43 -0700 Subject: riscv: Add ISA extension parsing for pointer masking The RISC-V Pointer Masking specification defines three extensions: Smmpm, Smnpm, and Ssnpm. Add support for parsing each of them. The specific extension which provides pointer masking support to userspace (Supm) depends on the kernel's privilege mode, so provide a macro to abstract this selection. Smmpm implies the existence of the mseccfg CSR. As it is the only user of this CSR so far, there is no need for an Xlinuxmseccfg extension. Reviewed-by: Charlie Jenkins Signed-off-by: Samuel Holland Link: https://lore.kernel.org/r/20241016202814.4061541-3-samuel.holland@sifive.com Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/hwcap.h | 5 +++++ arch/riscv/kernel/cpufeature.c | 3 +++ 2 files changed, 8 insertions(+) (limited to 'arch/riscv/kernel') diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h index 46d9de54179e..8608883da453 100644 --- a/arch/riscv/include/asm/hwcap.h +++ b/arch/riscv/include/asm/hwcap.h @@ -93,6 +93,9 @@ #define RISCV_ISA_EXT_ZCMOP 84 #define RISCV_ISA_EXT_ZAWRS 85 #define RISCV_ISA_EXT_SVVPTC 86 +#define RISCV_ISA_EXT_SMMPM 87 +#define RISCV_ISA_EXT_SMNPM 88 +#define RISCV_ISA_EXT_SSNPM 89 #define RISCV_ISA_EXT_XLINUXENVCFG 127 @@ -101,8 +104,10 @@ #ifdef CONFIG_RISCV_M_MODE #define RISCV_ISA_EXT_SxAIA RISCV_ISA_EXT_SMAIA +#define RISCV_ISA_EXT_SUPM RISCV_ISA_EXT_SMNPM #else #define RISCV_ISA_EXT_SxAIA RISCV_ISA_EXT_SSAIA +#define RISCV_ISA_EXT_SUPM RISCV_ISA_EXT_SSNPM #endif #endif /* _ASM_RISCV_HWCAP_H */ diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c index b3a057c36996..94596bca464e 100644 --- a/arch/riscv/kernel/cpufeature.c +++ b/arch/riscv/kernel/cpufeature.c @@ -377,9 +377,12 @@ const struct riscv_isa_ext_data riscv_isa_ext[] = { __RISCV_ISA_EXT_BUNDLE(zvksg, riscv_zvksg_bundled_exts), __RISCV_ISA_EXT_DATA(zvkt, RISCV_ISA_EXT_ZVKT), __RISCV_ISA_EXT_DATA(smaia, RISCV_ISA_EXT_SMAIA), + __RISCV_ISA_EXT_DATA(smmpm, RISCV_ISA_EXT_SMMPM), + __RISCV_ISA_EXT_SUPERSET(smnpm, RISCV_ISA_EXT_SMNPM, riscv_xlinuxenvcfg_exts), __RISCV_ISA_EXT_DATA(smstateen, RISCV_ISA_EXT_SMSTATEEN), __RISCV_ISA_EXT_DATA(ssaia, RISCV_ISA_EXT_SSAIA), __RISCV_ISA_EXT_DATA(sscofpmf, RISCV_ISA_EXT_SSCOFPMF), + __RISCV_ISA_EXT_SUPERSET(ssnpm, RISCV_ISA_EXT_SSNPM, riscv_xlinuxenvcfg_exts), __RISCV_ISA_EXT_DATA(sstc, RISCV_ISA_EXT_SSTC), __RISCV_ISA_EXT_DATA(svinval, RISCV_ISA_EXT_SVINVAL), __RISCV_ISA_EXT_DATA(svnapot, RISCV_ISA_EXT_SVNAPOT), -- cgit v1.2.3 From 09d6775f503b393d0457c7126aa43208e1724004 Mon Sep 17 00:00:00 2001 From: Samuel Holland Date: Wed, 16 Oct 2024 13:27:45 -0700 Subject: riscv: Add support for userspace pointer masking RISC-V supports pointer masking with a variable number of tag bits (which is called "PMLEN" in the specification) and which is configured at the next higher privilege level. Wire up the PR_SET_TAGGED_ADDR_CTRL and PR_GET_TAGGED_ADDR_CTRL prctls so userspace can request a lower bound on the number of tag bits and determine the actual number of tag bits. As with arm64's PR_TAGGED_ADDR_ENABLE, the pointer masking configuration is thread-scoped, inherited on clone() and fork() and cleared on execve(). Reviewed-by: Charlie Jenkins Tested-by: Charlie Jenkins Signed-off-by: Samuel Holland Link: https://lore.kernel.org/r/20241016202814.4061541-5-samuel.holland@sifive.com Signed-off-by: Palmer Dabbelt --- Documentation/arch/riscv/uabi.rst | 12 +++++ arch/riscv/Kconfig | 11 +++++ arch/riscv/include/asm/processor.h | 8 ++++ arch/riscv/include/asm/switch_to.h | 11 +++++ arch/riscv/kernel/process.c | 91 ++++++++++++++++++++++++++++++++++++++ include/uapi/linux/prctl.h | 5 ++- 6 files changed, 137 insertions(+), 1 deletion(-) (limited to 'arch/riscv/kernel') diff --git a/Documentation/arch/riscv/uabi.rst b/Documentation/arch/riscv/uabi.rst index 2b420bab0527..ddb8359a46ed 100644 --- a/Documentation/arch/riscv/uabi.rst +++ b/Documentation/arch/riscv/uabi.rst @@ -68,3 +68,15 @@ Misaligned accesses Misaligned scalar accesses are supported in userspace, but they may perform poorly. Misaligned vector accesses are only supported if the Zicclsm extension is supported. + +Pointer masking +--------------- + +Support for pointer masking in userspace (the Supm extension) is provided via +the ``PR_SET_TAGGED_ADDR_CTRL`` and ``PR_GET_TAGGED_ADDR_CTRL`` ``prctl()`` +operations. Pointer masking is disabled by default. To enable it, userspace +must call ``PR_SET_TAGGED_ADDR_CTRL`` with the ``PR_PMLEN`` field set to the +number of mask/tag bits needed by the application. ``PR_PMLEN`` is interpreted +as a lower bound; if the kernel is unable to satisfy the request, the +``PR_SET_TAGGED_ADDR_CTRL`` operation will fail. The actual number of tag bits +is returned in ``PR_PMLEN`` by the ``PR_GET_TAGGED_ADDR_CTRL`` operation. diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 22dc5ea4196c..0ef449465378 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -531,6 +531,17 @@ config RISCV_ISA_C If you don't know what to do here, say Y. +config RISCV_ISA_SUPM + bool "Supm extension for userspace pointer masking" + depends on 64BIT + default y + help + Add support for pointer masking in userspace (Supm) when the + underlying hardware extension (Smnpm or Ssnpm) is detected at boot. + + If this option is disabled, userspace will be unable to use + the prctl(PR_{SET,GET}_TAGGED_ADDR_CTRL) API. + config RISCV_ISA_SVNAPOT bool "Svnapot extension support for supervisor mode NAPOT pages" depends on 64BIT && MMU diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h index c1a492508835..5f56eb9d114a 100644 --- a/arch/riscv/include/asm/processor.h +++ b/arch/riscv/include/asm/processor.h @@ -178,6 +178,14 @@ extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val); #define RISCV_SET_ICACHE_FLUSH_CTX(arg1, arg2) riscv_set_icache_flush_ctx(arg1, arg2) extern int riscv_set_icache_flush_ctx(unsigned long ctx, unsigned long per_thread); +#ifdef CONFIG_RISCV_ISA_SUPM +/* PR_{SET,GET}_TAGGED_ADDR_CTRL prctl */ +long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg); +long get_tagged_addr_ctrl(struct task_struct *task); +#define SET_TAGGED_ADDR_CTRL(arg) set_tagged_addr_ctrl(current, arg) +#define GET_TAGGED_ADDR_CTRL() get_tagged_addr_ctrl(current) +#endif + #endif /* __ASSEMBLY__ */ #endif /* _ASM_RISCV_PROCESSOR_H */ diff --git a/arch/riscv/include/asm/switch_to.h b/arch/riscv/include/asm/switch_to.h index 9685cd85e57c..94e33216b2d9 100644 --- a/arch/riscv/include/asm/switch_to.h +++ b/arch/riscv/include/asm/switch_to.h @@ -70,6 +70,17 @@ static __always_inline bool has_fpu(void) { return false; } #define __switch_to_fpu(__prev, __next) do { } while (0) #endif +static inline void envcfg_update_bits(struct task_struct *task, + unsigned long mask, unsigned long val) +{ + unsigned long envcfg; + + envcfg = (task->thread.envcfg & ~mask) | val; + task->thread.envcfg = envcfg; + if (task == current) + csr_write(CSR_ENVCFG, envcfg); +} + static inline void __switch_to_envcfg(struct task_struct *next) { asm volatile (ALTERNATIVE("nop", "csrw " __stringify(CSR_ENVCFG) ", %0", diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c index e3142d8a6e28..200d2ed64dfe 100644 --- a/arch/riscv/kernel/process.c +++ b/arch/riscv/kernel/process.c @@ -7,6 +7,7 @@ * Copyright (C) 2017 SiFive */ +#include #include #include #include @@ -180,6 +181,10 @@ void flush_thread(void) memset(¤t->thread.vstate, 0, sizeof(struct __riscv_v_ext_state)); clear_tsk_thread_flag(current, TIF_RISCV_V_DEFER_RESTORE); #endif +#ifdef CONFIG_RISCV_ISA_SUPM + if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SUPM)) + envcfg_update_bits(current, ENVCFG_PMM, ENVCFG_PMM_PMLEN_0); +#endif } void arch_release_task_struct(struct task_struct *tsk) @@ -242,3 +247,89 @@ void __init arch_task_cache_init(void) { riscv_v_setup_ctx_cache(); } + +#ifdef CONFIG_RISCV_ISA_SUPM +enum { + PMLEN_0 = 0, + PMLEN_7 = 7, + PMLEN_16 = 16, +}; + +static bool have_user_pmlen_7; +static bool have_user_pmlen_16; + +long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg) +{ + unsigned long valid_mask = PR_PMLEN_MASK; + struct thread_info *ti = task_thread_info(task); + unsigned long pmm; + u8 pmlen; + + if (is_compat_thread(ti)) + return -EINVAL; + + if (arg & ~valid_mask) + return -EINVAL; + + /* + * Prefer the smallest PMLEN that satisfies the user's request, + * in case choosing a larger PMLEN has a performance impact. + */ + pmlen = FIELD_GET(PR_PMLEN_MASK, arg); + if (pmlen == PMLEN_0) + pmm = ENVCFG_PMM_PMLEN_0; + else if (pmlen <= PMLEN_7 && have_user_pmlen_7) + pmm = ENVCFG_PMM_PMLEN_7; + else if (pmlen <= PMLEN_16 && have_user_pmlen_16) + pmm = ENVCFG_PMM_PMLEN_16; + else + return -EINVAL; + + envcfg_update_bits(task, ENVCFG_PMM, pmm); + + return 0; +} + +long get_tagged_addr_ctrl(struct task_struct *task) +{ + struct thread_info *ti = task_thread_info(task); + long ret = 0; + + if (is_compat_thread(ti)) + return -EINVAL; + + switch (task->thread.envcfg & ENVCFG_PMM) { + case ENVCFG_PMM_PMLEN_7: + ret = FIELD_PREP(PR_PMLEN_MASK, PMLEN_7); + break; + case ENVCFG_PMM_PMLEN_16: + ret = FIELD_PREP(PR_PMLEN_MASK, PMLEN_16); + break; + } + + return ret; +} + +static bool try_to_set_pmm(unsigned long value) +{ + csr_set(CSR_ENVCFG, value); + return (csr_read_clear(CSR_ENVCFG, ENVCFG_PMM) & ENVCFG_PMM) == value; +} + +static int __init tagged_addr_init(void) +{ + if (!riscv_has_extension_unlikely(RISCV_ISA_EXT_SUPM)) + return 0; + + /* + * envcfg.PMM is a WARL field. Detect which values are supported. + * Assume the supported PMLEN values are the same on all harts. + */ + csr_clear(CSR_ENVCFG, ENVCFG_PMM); + have_user_pmlen_7 = try_to_set_pmm(ENVCFG_PMM_PMLEN_7); + have_user_pmlen_16 = try_to_set_pmm(ENVCFG_PMM_PMLEN_16); + + return 0; +} +core_initcall(tagged_addr_init); +#endif /* CONFIG_RISCV_ISA_SUPM */ diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h index 35791791a879..cefd656ebf43 100644 --- a/include/uapi/linux/prctl.h +++ b/include/uapi/linux/prctl.h @@ -230,7 +230,7 @@ struct prctl_mm_map { # define PR_PAC_APDBKEY (1UL << 3) # define PR_PAC_APGAKEY (1UL << 4) -/* Tagged user address controls for arm64 */ +/* Tagged user address controls for arm64 and RISC-V */ #define PR_SET_TAGGED_ADDR_CTRL 55 #define PR_GET_TAGGED_ADDR_CTRL 56 # define PR_TAGGED_ADDR_ENABLE (1UL << 0) @@ -244,6 +244,9 @@ struct prctl_mm_map { # define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT) /* Unused; kept only for source compatibility */ # define PR_MTE_TCF_SHIFT 1 +/* RISC-V pointer masking tag length */ +# define PR_PMLEN_SHIFT 24 +# define PR_PMLEN_MASK (0x7fUL << PR_PMLEN_SHIFT) /* Control reclaim behavior when allocating memory */ #define PR_SET_IO_FLUSHER 57 -- cgit v1.2.3 From 2e1743085887ba3f553c2bb472a75a3ff744b242 Mon Sep 17 00:00:00 2001 From: Samuel Holland Date: Wed, 16 Oct 2024 13:27:46 -0700 Subject: riscv: Add support for the tagged address ABI When pointer masking is enabled for userspace, the kernel can accept tagged pointers as arguments to some system calls. Allow this by untagging the pointers in access_ok() and the uaccess routines. The uaccess routines must peform untagging in software because U-mode and S-mode have entirely separate pointer masking configurations. In fact, hardware may not even implement pointer masking for S-mode. Since the number of tag bits is variable, untagged_addr_remote() needs to know what PMLEN to use for the remote mm. Therefore, the pointer masking mode must be the same for all threads sharing an mm. Enforce this with a lock flag in the mm context, as x86 does for LAM. The flag gets reset in init_new_context() during fork(), as the new mm is no longer multithreaded. Reviewed-by: Charlie Jenkins Tested-by: Charlie Jenkins Signed-off-by: Samuel Holland Link: https://lore.kernel.org/r/20241016202814.4061541-6-samuel.holland@sifive.com Signed-off-by: Palmer Dabbelt --- Documentation/arch/riscv/uabi.rst | 4 ++ arch/riscv/include/asm/mmu.h | 7 ++++ arch/riscv/include/asm/mmu_context.h | 13 +++++++ arch/riscv/include/asm/uaccess.h | 43 ++++++++++++++++++--- arch/riscv/kernel/process.c | 73 +++++++++++++++++++++++++++++++++--- 5 files changed, 130 insertions(+), 10 deletions(-) (limited to 'arch/riscv/kernel') diff --git a/Documentation/arch/riscv/uabi.rst b/Documentation/arch/riscv/uabi.rst index ddb8359a46ed..243e40062e34 100644 --- a/Documentation/arch/riscv/uabi.rst +++ b/Documentation/arch/riscv/uabi.rst @@ -80,3 +80,7 @@ number of mask/tag bits needed by the application. ``PR_PMLEN`` is interpreted as a lower bound; if the kernel is unable to satisfy the request, the ``PR_SET_TAGGED_ADDR_CTRL`` operation will fail. The actual number of tag bits is returned in ``PR_PMLEN`` by the ``PR_GET_TAGGED_ADDR_CTRL`` operation. + +Additionally, when pointer masking is enabled (``PR_PMLEN`` is greater than 0), +a tagged address ABI is supported, with the same interface and behavior as +documented for AArch64 (Documentation/arch/arm64/tagged-address-abi.rst). diff --git a/arch/riscv/include/asm/mmu.h b/arch/riscv/include/asm/mmu.h index c9e03e9da3dc..1cc90465d75b 100644 --- a/arch/riscv/include/asm/mmu.h +++ b/arch/riscv/include/asm/mmu.h @@ -25,9 +25,16 @@ typedef struct { #ifdef CONFIG_BINFMT_ELF_FDPIC unsigned long exec_fdpic_loadmap; unsigned long interp_fdpic_loadmap; +#endif + unsigned long flags; +#ifdef CONFIG_RISCV_ISA_SUPM + u8 pmlen; #endif } mm_context_t; +/* Lock the pointer masking mode because this mm is multithreaded */ +#define MM_CONTEXT_LOCK_PMLEN 0 + #define cntx2asid(cntx) ((cntx) & SATP_ASID_MASK) #define cntx2version(cntx) ((cntx) & ~SATP_ASID_MASK) diff --git a/arch/riscv/include/asm/mmu_context.h b/arch/riscv/include/asm/mmu_context.h index 7030837adc1a..8c4bc49a3a0f 100644 --- a/arch/riscv/include/asm/mmu_context.h +++ b/arch/riscv/include/asm/mmu_context.h @@ -20,6 +20,9 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next, static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) { +#ifdef CONFIG_RISCV_ISA_SUPM + next->context.pmlen = 0; +#endif switch_mm(prev, next, NULL); } @@ -30,11 +33,21 @@ static inline int init_new_context(struct task_struct *tsk, #ifdef CONFIG_MMU atomic_long_set(&mm->context.id, 0); #endif + if (IS_ENABLED(CONFIG_RISCV_ISA_SUPM)) + clear_bit(MM_CONTEXT_LOCK_PMLEN, &mm->context.flags); return 0; } DECLARE_STATIC_KEY_FALSE(use_asid_allocator); +#ifdef CONFIG_RISCV_ISA_SUPM +#define mm_untag_mask mm_untag_mask +static inline unsigned long mm_untag_mask(struct mm_struct *mm) +{ + return -1UL >> mm->context.pmlen; +} +#endif + #include #endif /* _ASM_RISCV_MMU_CONTEXT_H */ diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h index 72ec1d9bd3f3..fee56b0c8058 100644 --- a/arch/riscv/include/asm/uaccess.h +++ b/arch/riscv/include/asm/uaccess.h @@ -9,8 +9,41 @@ #define _ASM_RISCV_UACCESS_H #include +#include #include /* for TASK_SIZE */ +#ifdef CONFIG_RISCV_ISA_SUPM +static inline unsigned long __untagged_addr_remote(struct mm_struct *mm, unsigned long addr) +{ + if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SUPM)) { + u8 pmlen = mm->context.pmlen; + + /* Virtual addresses are sign-extended; physical addresses are zero-extended. */ + if (IS_ENABLED(CONFIG_MMU)) + return (long)(addr << pmlen) >> pmlen; + else + return (addr << pmlen) >> pmlen; + } + + return addr; +} + +#define untagged_addr(addr) ({ \ + unsigned long __addr = (__force unsigned long)(addr); \ + (__force __typeof__(addr))__untagged_addr_remote(current->mm, __addr); \ +}) + +#define untagged_addr_remote(mm, addr) ({ \ + unsigned long __addr = (__force unsigned long)(addr); \ + mmap_assert_locked(mm); \ + (__force __typeof__(addr))__untagged_addr_remote(mm, __addr); \ +}) + +#define access_ok(addr, size) likely(__access_ok(untagged_addr(addr), size)) +#else +#define untagged_addr(addr) (addr) +#endif + /* * User space memory access functions */ @@ -130,7 +163,7 @@ do { \ */ #define __get_user(x, ptr) \ ({ \ - const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \ + const __typeof__(*(ptr)) __user *__gu_ptr = untagged_addr(ptr); \ long __gu_err = 0; \ \ __chk_user_ptr(__gu_ptr); \ @@ -246,7 +279,7 @@ do { \ */ #define __put_user(x, ptr) \ ({ \ - __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \ + __typeof__(*(ptr)) __user *__gu_ptr = untagged_addr(ptr); \ __typeof__(*__gu_ptr) __val = (x); \ long __pu_err = 0; \ \ @@ -293,13 +326,13 @@ unsigned long __must_check __asm_copy_from_user(void *to, static inline unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n) { - return __asm_copy_from_user(to, from, n); + return __asm_copy_from_user(to, untagged_addr(from), n); } static inline unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n) { - return __asm_copy_to_user(to, from, n); + return __asm_copy_to_user(untagged_addr(to), from, n); } extern long strncpy_from_user(char *dest, const char __user *src, long count); @@ -314,7 +347,7 @@ unsigned long __must_check clear_user(void __user *to, unsigned long n) { might_fault(); return access_ok(to, n) ? - __clear_user(to, n) : n; + __clear_user(untagged_addr(to), n) : n; } #define __get_kernel_nofault(dst, src, type, err_label) \ diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c index 200d2ed64dfe..58b6482c2bf6 100644 --- a/arch/riscv/kernel/process.c +++ b/arch/riscv/kernel/process.c @@ -213,6 +213,10 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) unsigned long tls = args->tls; struct pt_regs *childregs = task_pt_regs(p); + /* Ensure all threads in this mm have the same pointer masking mode. */ + if (IS_ENABLED(CONFIG_RISCV_ISA_SUPM) && p->mm && (clone_flags & CLONE_VM)) + set_bit(MM_CONTEXT_LOCK_PMLEN, &p->mm->context.flags); + memset(&p->thread.s, 0, sizeof(p->thread.s)); /* p->thread holds context to be restored by __switch_to() */ @@ -258,10 +262,16 @@ enum { static bool have_user_pmlen_7; static bool have_user_pmlen_16; +/* + * Control the relaxed ABI allowing tagged user addresses into the kernel. + */ +static unsigned int tagged_addr_disabled; + long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg) { - unsigned long valid_mask = PR_PMLEN_MASK; + unsigned long valid_mask = PR_PMLEN_MASK | PR_TAGGED_ADDR_ENABLE; struct thread_info *ti = task_thread_info(task); + struct mm_struct *mm = task->mm; unsigned long pmm; u8 pmlen; @@ -276,16 +286,41 @@ long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg) * in case choosing a larger PMLEN has a performance impact. */ pmlen = FIELD_GET(PR_PMLEN_MASK, arg); - if (pmlen == PMLEN_0) + if (pmlen == PMLEN_0) { pmm = ENVCFG_PMM_PMLEN_0; - else if (pmlen <= PMLEN_7 && have_user_pmlen_7) + } else if (pmlen <= PMLEN_7 && have_user_pmlen_7) { + pmlen = PMLEN_7; pmm = ENVCFG_PMM_PMLEN_7; - else if (pmlen <= PMLEN_16 && have_user_pmlen_16) + } else if (pmlen <= PMLEN_16 && have_user_pmlen_16) { + pmlen = PMLEN_16; pmm = ENVCFG_PMM_PMLEN_16; - else + } else { return -EINVAL; + } + + /* + * Do not allow the enabling of the tagged address ABI if globally + * disabled via sysctl abi.tagged_addr_disabled, if pointer masking + * is disabled for userspace. + */ + if (arg & PR_TAGGED_ADDR_ENABLE && (tagged_addr_disabled || !pmlen)) + return -EINVAL; + + if (!(arg & PR_TAGGED_ADDR_ENABLE)) + pmlen = PMLEN_0; + + if (mmap_write_lock_killable(mm)) + return -EINTR; + + if (test_bit(MM_CONTEXT_LOCK_PMLEN, &mm->context.flags) && mm->context.pmlen != pmlen) { + mmap_write_unlock(mm); + return -EBUSY; + } envcfg_update_bits(task, ENVCFG_PMM, pmm); + mm->context.pmlen = pmlen; + + mmap_write_unlock(mm); return 0; } @@ -298,6 +333,10 @@ long get_tagged_addr_ctrl(struct task_struct *task) if (is_compat_thread(ti)) return -EINVAL; + /* + * The mm context's pmlen is set only when the tagged address ABI is + * enabled, so the effective PMLEN must be extracted from envcfg.PMM. + */ switch (task->thread.envcfg & ENVCFG_PMM) { case ENVCFG_PMM_PMLEN_7: ret = FIELD_PREP(PR_PMLEN_MASK, PMLEN_7); @@ -307,6 +346,9 @@ long get_tagged_addr_ctrl(struct task_struct *task) break; } + if (task->mm->context.pmlen) + ret |= PR_TAGGED_ADDR_ENABLE; + return ret; } @@ -316,6 +358,24 @@ static bool try_to_set_pmm(unsigned long value) return (csr_read_clear(CSR_ENVCFG, ENVCFG_PMM) & ENVCFG_PMM) == value; } +/* + * Global sysctl to disable the tagged user addresses support. This control + * only prevents the tagged address ABI enabling via prctl() and does not + * disable it for tasks that already opted in to the relaxed ABI. + */ + +static struct ctl_table tagged_addr_sysctl_table[] = { + { + .procname = "tagged_addr_disabled", + .mode = 0644, + .data = &tagged_addr_disabled, + .maxlen = sizeof(int), + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, +}; + static int __init tagged_addr_init(void) { if (!riscv_has_extension_unlikely(RISCV_ISA_EXT_SUPM)) @@ -329,6 +389,9 @@ static int __init tagged_addr_init(void) have_user_pmlen_7 = try_to_set_pmm(ENVCFG_PMM_PMLEN_7); have_user_pmlen_16 = try_to_set_pmm(ENVCFG_PMM_PMLEN_16); + if (!register_sysctl("abi", tagged_addr_sysctl_table)) + return -EINVAL; + return 0; } core_initcall(tagged_addr_init); -- cgit v1.2.3 From 78844482a1c939a972681842f8ee2a8ddb202441 Mon Sep 17 00:00:00 2001 From: Samuel Holland Date: Wed, 16 Oct 2024 13:27:47 -0700 Subject: riscv: Allow ptrace control of the tagged address ABI This allows a tracer to control the ABI of the tracee, as on arm64. Signed-off-by: Samuel Holland Link: https://lore.kernel.org/r/20241016202814.4061541-7-samuel.holland@sifive.com Signed-off-by: Palmer Dabbelt --- arch/riscv/kernel/ptrace.c | 42 ++++++++++++++++++++++++++++++++++++++++++ include/uapi/linux/elf.h | 1 + 2 files changed, 43 insertions(+) (limited to 'arch/riscv/kernel') diff --git a/arch/riscv/kernel/ptrace.c b/arch/riscv/kernel/ptrace.c index 92731ff8c79a..ea67e9fb7a58 100644 --- a/arch/riscv/kernel/ptrace.c +++ b/arch/riscv/kernel/ptrace.c @@ -28,6 +28,9 @@ enum riscv_regset { #ifdef CONFIG_RISCV_ISA_V REGSET_V, #endif +#ifdef CONFIG_RISCV_ISA_SUPM + REGSET_TAGGED_ADDR_CTRL, +#endif }; static int riscv_gpr_get(struct task_struct *target, @@ -152,6 +155,35 @@ static int riscv_vr_set(struct task_struct *target, } #endif +#ifdef CONFIG_RISCV_ISA_SUPM +static int tagged_addr_ctrl_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ + long ctrl = get_tagged_addr_ctrl(target); + + if (IS_ERR_VALUE(ctrl)) + return ctrl; + + return membuf_write(&to, &ctrl, sizeof(ctrl)); +} + +static int tagged_addr_ctrl_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + int ret; + long ctrl; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 0, -1); + if (ret) + return ret; + + return set_tagged_addr_ctrl(target, ctrl); +} +#endif + static const struct user_regset riscv_user_regset[] = { [REGSET_X] = { .core_note_type = NT_PRSTATUS, @@ -182,6 +214,16 @@ static const struct user_regset riscv_user_regset[] = { .set = riscv_vr_set, }, #endif +#ifdef CONFIG_RISCV_ISA_SUPM + [REGSET_TAGGED_ADDR_CTRL] = { + .core_note_type = NT_RISCV_TAGGED_ADDR_CTRL, + .n = 1, + .size = sizeof(long), + .align = sizeof(long), + .regset_get = tagged_addr_ctrl_get, + .set = tagged_addr_ctrl_set, + }, +#endif }; static const struct user_regset_view riscv_user_native_view = { diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h index b9935988da5c..a920cf8934dc 100644 --- a/include/uapi/linux/elf.h +++ b/include/uapi/linux/elf.h @@ -450,6 +450,7 @@ typedef struct elf64_shdr { #define NT_MIPS_MSA 0x802 /* MIPS SIMD registers */ #define NT_RISCV_CSR 0x900 /* RISC-V Control and Status Registers */ #define NT_RISCV_VECTOR 0x901 /* RISC-V vector registers */ +#define NT_RISCV_TAGGED_ADDR_CTRL 0x902 /* RISC-V tagged address control (prctl()) */ #define NT_LOONGARCH_CPUCFG 0xa00 /* LoongArch CPU config registers */ #define NT_LOONGARCH_CSR 0xa01 /* LoongArch control and status registers */ #define NT_LOONGARCH_LSX 0xa02 /* LoongArch Loongson SIMD Extension registers */ -- cgit v1.2.3 From 3c2e0aff7b4f03fbc11b7d63c8db5b94a48978cf Mon Sep 17 00:00:00 2001 From: Samuel Holland Date: Wed, 16 Oct 2024 13:27:49 -0700 Subject: riscv: hwprobe: Export the Supm ISA extension Supm is a virtual ISA extension defined in the RISC-V Pointer Masking specification, which indicates that pointer masking is available in U-mode. It can be provided by either Smnpm or Ssnpm, depending on which mode the kernel runs in. Userspace should not care about this distinction, so export Supm instead of either underlying extension. Hide the extension if the kernel was compiled without support for the pointer masking prctl() interface. Signed-off-by: Samuel Holland Link: https://lore.kernel.org/r/20241016202814.4061541-9-samuel.holland@sifive.com Signed-off-by: Palmer Dabbelt --- Documentation/arch/riscv/hwprobe.rst | 3 +++ arch/riscv/include/uapi/asm/hwprobe.h | 1 + arch/riscv/kernel/sys_hwprobe.c | 3 +++ 3 files changed, 7 insertions(+) (limited to 'arch/riscv/kernel') diff --git a/Documentation/arch/riscv/hwprobe.rst b/Documentation/arch/riscv/hwprobe.rst index 85b709257918..b9aec2e5bbd4 100644 --- a/Documentation/arch/riscv/hwprobe.rst +++ b/Documentation/arch/riscv/hwprobe.rst @@ -239,6 +239,9 @@ The following keys are defined: ratified in commit 98918c844281 ("Merge pull request #1217 from riscv/zawrs") of riscv-isa-manual. + * :c:macro:`RISCV_HWPROBE_EXT_SUPM`: The Supm extension is supported as + defined in version 1.0 of the RISC-V Pointer Masking extensions. + * :c:macro:`RISCV_HWPROBE_KEY_CPUPERF_0`: Deprecated. Returns similar values to :c:macro:`RISCV_HWPROBE_KEY_MISALIGNED_SCALAR_PERF`, but the key was mistakenly classified as a bitmask rather than a value. diff --git a/arch/riscv/include/uapi/asm/hwprobe.h b/arch/riscv/include/uapi/asm/hwprobe.h index 1e153cda57db..868ff41b93d6 100644 --- a/arch/riscv/include/uapi/asm/hwprobe.h +++ b/arch/riscv/include/uapi/asm/hwprobe.h @@ -72,6 +72,7 @@ struct riscv_hwprobe { #define RISCV_HWPROBE_EXT_ZCF (1ULL << 46) #define RISCV_HWPROBE_EXT_ZCMOP (1ULL << 47) #define RISCV_HWPROBE_EXT_ZAWRS (1ULL << 48) +#define RISCV_HWPROBE_EXT_SUPM (1ULL << 49) #define RISCV_HWPROBE_KEY_CPUPERF_0 5 #define RISCV_HWPROBE_MISALIGNED_UNKNOWN (0 << 0) #define RISCV_HWPROBE_MISALIGNED_EMULATED (1 << 0) diff --git a/arch/riscv/kernel/sys_hwprobe.c b/arch/riscv/kernel/sys_hwprobe.c index cea0ca2bf2a2..0ac78e9f7c94 100644 --- a/arch/riscv/kernel/sys_hwprobe.c +++ b/arch/riscv/kernel/sys_hwprobe.c @@ -150,6 +150,9 @@ static void hwprobe_isa_ext0(struct riscv_hwprobe *pair, EXT_KEY(ZFH); EXT_KEY(ZFHMIN); } + + if (IS_ENABLED(CONFIG_RISCV_ISA_SUPM)) + EXT_KEY(SUPM); #undef EXT_KEY } -- cgit v1.2.3 From 1658ef4314b37ff4858a6c207646ff9d280ca4f7 Mon Sep 17 00:00:00 2001 From: Alexandre Ghiti Date: Sun, 3 Nov 2024 15:51:45 +0100 Subject: riscv: Implement cmpxchg8/16() using Zabha This adds runtime support for Zabha in cmpxchg8/16() operations. Note that in the absence of Zacas support in the toolchain, CAS instructions from Zabha won't be used. Signed-off-by: Alexandre Ghiti Reviewed-by: Andrew Jones Reviewed-by: Andrea Parri Link: https://lore.kernel.org/r/20241103145153.105097-6-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/Kconfig | 18 ++++++++++ arch/riscv/Makefile | 3 ++ arch/riscv/include/asm/cmpxchg.h | 78 +++++++++++++++++++++++++--------------- arch/riscv/include/asm/hwcap.h | 1 + arch/riscv/kernel/cpufeature.c | 1 + 5 files changed, 72 insertions(+), 29 deletions(-) (limited to 'arch/riscv/kernel') diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 3542efe3088b..668be90a42e4 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -632,6 +632,24 @@ config RISCV_ISA_ZAWRS use of these instructions in the kernel when the Zawrs extension is detected at boot. +config TOOLCHAIN_HAS_ZABHA + bool + default y + depends on !64BIT || $(cc-option,-mabi=lp64 -march=rv64ima_zabha) + depends on !32BIT || $(cc-option,-mabi=ilp32 -march=rv32ima_zabha) + depends on AS_HAS_OPTION_ARCH + +config RISCV_ISA_ZABHA + bool "Zabha extension support for atomic byte/halfword operations" + depends on TOOLCHAIN_HAS_ZABHA + depends on RISCV_ALTERNATIVE + default y + help + Enable the use of the Zabha ISA-extension to implement kernel + byte/halfword atomic memory operations when it is detected at boot. + + If you don't know what to do here, say Y. + config TOOLCHAIN_HAS_ZACAS bool default y diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile index 3700a1574413..9fe1ee740dda 100644 --- a/arch/riscv/Makefile +++ b/arch/riscv/Makefile @@ -85,6 +85,9 @@ endif # Check if the toolchain supports Zacas riscv-march-$(CONFIG_TOOLCHAIN_HAS_ZACAS) := $(riscv-march-y)_zacas +# Check if the toolchain supports Zabha +riscv-march-$(CONFIG_TOOLCHAIN_HAS_ZABHA) := $(riscv-march-y)_zabha + # Remove F,D,V from isa string for all. Keep extensions between "fd" and "v" by # matching non-v and non-multi-letter extensions out with the filter ([^v_]*) KBUILD_CFLAGS += -march=$(shell echo $(riscv-march-y) | sed -E 's/(rv32ima|rv64ima)fd([^v_]*)v?/\1\2/') diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h index 39c1daf39f6a..1f4cd12e4664 100644 --- a/arch/riscv/include/asm/cmpxchg.h +++ b/arch/riscv/include/asm/cmpxchg.h @@ -108,34 +108,49 @@ * indicated by comparing RETURN with OLD. */ -#define __arch_cmpxchg_masked(sc_sfx, prepend, append, r, p, o, n) \ -({ \ - u32 *__ptr32b = (u32 *)((ulong)(p) & ~0x3); \ - ulong __s = ((ulong)(p) & (0x4 - sizeof(*p))) * BITS_PER_BYTE; \ - ulong __mask = GENMASK(((sizeof(*p)) * BITS_PER_BYTE) - 1, 0) \ - << __s; \ - ulong __newx = (ulong)(n) << __s; \ - ulong __oldx = (ulong)(o) << __s; \ - ulong __retx; \ - ulong __rc; \ - \ - __asm__ __volatile__ ( \ - prepend \ - "0: lr.w %0, %2\n" \ - " and %1, %0, %z5\n" \ - " bne %1, %z3, 1f\n" \ - " and %1, %0, %z6\n" \ - " or %1, %1, %z4\n" \ - " sc.w" sc_sfx " %1, %1, %2\n" \ - " bnez %1, 0b\n" \ - append \ - "1:\n" \ - : "=&r" (__retx), "=&r" (__rc), "+A" (*(__ptr32b)) \ - : "rJ" ((long)__oldx), "rJ" (__newx), \ - "rJ" (__mask), "rJ" (~__mask) \ - : "memory"); \ - \ - r = (__typeof__(*(p)))((__retx & __mask) >> __s); \ +#define __arch_cmpxchg_masked(sc_sfx, cas_sfx, prepend, append, r, p, o, n) \ +({ \ + if (IS_ENABLED(CONFIG_RISCV_ISA_ZABHA) && \ + IS_ENABLED(CONFIG_RISCV_ISA_ZACAS) && \ + riscv_has_extension_unlikely(RISCV_ISA_EXT_ZABHA) && \ + riscv_has_extension_unlikely(RISCV_ISA_EXT_ZACAS)) { \ + r = o; \ + \ + __asm__ __volatile__ ( \ + prepend \ + " amocas" cas_sfx " %0, %z2, %1\n" \ + append \ + : "+&r" (r), "+A" (*(p)) \ + : "rJ" (n) \ + : "memory"); \ + } else { \ + u32 *__ptr32b = (u32 *)((ulong)(p) & ~0x3); \ + ulong __s = ((ulong)(p) & (0x4 - sizeof(*p))) * BITS_PER_BYTE; \ + ulong __mask = GENMASK(((sizeof(*p)) * BITS_PER_BYTE) - 1, 0) \ + << __s; \ + ulong __newx = (ulong)(n) << __s; \ + ulong __oldx = (ulong)(o) << __s; \ + ulong __retx; \ + ulong __rc; \ + \ + __asm__ __volatile__ ( \ + prepend \ + "0: lr.w %0, %2\n" \ + " and %1, %0, %z5\n" \ + " bne %1, %z3, 1f\n" \ + " and %1, %0, %z6\n" \ + " or %1, %1, %z4\n" \ + " sc.w" sc_sfx " %1, %1, %2\n" \ + " bnez %1, 0b\n" \ + append \ + "1:\n" \ + : "=&r" (__retx), "=&r" (__rc), "+A" (*(__ptr32b)) \ + : "rJ" ((long)__oldx), "rJ" (__newx), \ + "rJ" (__mask), "rJ" (~__mask) \ + : "memory"); \ + \ + r = (__typeof__(*(p)))((__retx & __mask) >> __s); \ + } \ }) #define __arch_cmpxchg(lr_sfx, sc_cas_sfx, prepend, append, r, p, co, o, n) \ @@ -177,8 +192,13 @@ \ switch (sizeof(*__ptr)) { \ case 1: \ + __arch_cmpxchg_masked(sc_cas_sfx, ".b" sc_cas_sfx, \ + prepend, append, \ + __ret, __ptr, __old, __new); \ + break; \ case 2: \ - __arch_cmpxchg_masked(sc_cas_sfx, prepend, append, \ + __arch_cmpxchg_masked(sc_cas_sfx, ".h" sc_cas_sfx, \ + prepend, append, \ __ret, __ptr, __old, __new); \ break; \ case 4: \ diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h index 46d9de54179e..74bcb0e2bd1f 100644 --- a/arch/riscv/include/asm/hwcap.h +++ b/arch/riscv/include/asm/hwcap.h @@ -93,6 +93,7 @@ #define RISCV_ISA_EXT_ZCMOP 84 #define RISCV_ISA_EXT_ZAWRS 85 #define RISCV_ISA_EXT_SVVPTC 86 +#define RISCV_ISA_EXT_ZABHA 87 #define RISCV_ISA_EXT_XLINUXENVCFG 127 diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c index 3a8eeaa9310c..5e743d8d34f5 100644 --- a/arch/riscv/kernel/cpufeature.c +++ b/arch/riscv/kernel/cpufeature.c @@ -322,6 +322,7 @@ const struct riscv_isa_ext_data riscv_isa_ext[] = { __RISCV_ISA_EXT_DATA(zihintpause, RISCV_ISA_EXT_ZIHINTPAUSE), __RISCV_ISA_EXT_DATA(zihpm, RISCV_ISA_EXT_ZIHPM), __RISCV_ISA_EXT_DATA(zimop, RISCV_ISA_EXT_ZIMOP), + __RISCV_ISA_EXT_DATA(zabha, RISCV_ISA_EXT_ZABHA), __RISCV_ISA_EXT_DATA(zacas, RISCV_ISA_EXT_ZACAS), __RISCV_ISA_EXT_DATA(zawrs, RISCV_ISA_EXT_ZAWRS), __RISCV_ISA_EXT_DATA(zfa, RISCV_ISA_EXT_ZFA), -- cgit v1.2.3 From 2d36fe89d872f1e655670280ce13a8dbe9d366a7 Mon Sep 17 00:00:00 2001 From: Alexandre Ghiti Date: Sun, 3 Nov 2024 15:51:51 +0100 Subject: riscv: Add ISA extension parsing for Ziccrse Add support to parse the Ziccrse string in the riscv,isa string. Signed-off-by: Alexandre Ghiti Reviewed-by: Andrew Jones Reviewed-by: Andrea Parri Link: https://lore.kernel.org/r/20241103145153.105097-12-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/hwcap.h | 1 + arch/riscv/kernel/cpufeature.c | 1 + 2 files changed, 2 insertions(+) (limited to 'arch/riscv/kernel') diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h index 74bcb0e2bd1f..0aa3c3f5e682 100644 --- a/arch/riscv/include/asm/hwcap.h +++ b/arch/riscv/include/asm/hwcap.h @@ -94,6 +94,7 @@ #define RISCV_ISA_EXT_ZAWRS 85 #define RISCV_ISA_EXT_SVVPTC 86 #define RISCV_ISA_EXT_ZABHA 87 +#define RISCV_ISA_EXT_ZICCRSE 88 #define RISCV_ISA_EXT_XLINUXENVCFG 127 diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c index 5e743d8d34f5..5f453a039ec9 100644 --- a/arch/riscv/kernel/cpufeature.c +++ b/arch/riscv/kernel/cpufeature.c @@ -314,6 +314,7 @@ const struct riscv_isa_ext_data riscv_isa_ext[] = { riscv_ext_zicbom_validate), __RISCV_ISA_EXT_SUPERSET_VALIDATE(zicboz, RISCV_ISA_EXT_ZICBOZ, riscv_xlinuxenvcfg_exts, riscv_ext_zicboz_validate), + __RISCV_ISA_EXT_DATA(ziccrse, RISCV_ISA_EXT_ZICCRSE), __RISCV_ISA_EXT_DATA(zicntr, RISCV_ISA_EXT_ZICNTR), __RISCV_ISA_EXT_DATA(zicond, RISCV_ISA_EXT_ZICOND), __RISCV_ISA_EXT_DATA(zicsr, RISCV_ISA_EXT_ZICSR), -- cgit v1.2.3 From ab83647fadae2f1f723119dc066b39a461d6d288 Mon Sep 17 00:00:00 2001 From: Alexandre Ghiti Date: Sun, 3 Nov 2024 15:51:53 +0100 Subject: riscv: Add qspinlock support In order to produce a generic kernel, a user can select CONFIG_COMBO_SPINLOCKS which will fallback at runtime to the ticket spinlock implementation if Zabha or Ziccrse are not present. Note that we can't use alternatives here because the discovery of extensions is done too late and we need to start with the qspinlock implementation because the ticket spinlock implementation would pollute the spinlock value, so let's use static keys. This is largely based on Guo's work and Leonardo reviews at [1]. Link: https://lore.kernel.org/linux-riscv/20231225125847.2778638-1-guoren@kernel.org/ [1] Signed-off-by: Guo Ren Signed-off-by: Alexandre Ghiti Reviewed-by: Andrea Parri Link: https://lore.kernel.org/r/20241103145153.105097-14-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt --- .../locking/queued-spinlocks/arch-support.txt | 2 +- arch/riscv/Kconfig | 34 ++++++++++++++++ arch/riscv/include/asm/Kbuild | 4 +- arch/riscv/include/asm/spinlock.h | 47 ++++++++++++++++++++++ arch/riscv/kernel/setup.c | 37 +++++++++++++++++ include/asm-generic/qspinlock.h | 2 + include/asm-generic/ticket_spinlock.h | 2 + 7 files changed, 126 insertions(+), 2 deletions(-) create mode 100644 arch/riscv/include/asm/spinlock.h (limited to 'arch/riscv/kernel') diff --git a/Documentation/features/locking/queued-spinlocks/arch-support.txt b/Documentation/features/locking/queued-spinlocks/arch-support.txt index 22f2990392ff..cf26042480e2 100644 --- a/Documentation/features/locking/queued-spinlocks/arch-support.txt +++ b/Documentation/features/locking/queued-spinlocks/arch-support.txt @@ -20,7 +20,7 @@ | openrisc: | ok | | parisc: | TODO | | powerpc: | ok | - | riscv: | TODO | + | riscv: | ok | | s390: | TODO | | sh: | TODO | | sparc: | ok | diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 093ee6537331..f5698ecc5ccc 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -82,6 +82,7 @@ config RISCV select ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP select ARCH_WANTS_NO_INSTR select ARCH_WANTS_THP_SWAP if HAVE_ARCH_TRANSPARENT_HUGEPAGE + select ARCH_WEAK_RELEASE_ACQUIRE if ARCH_USE_QUEUED_SPINLOCKS select BINFMT_FLAT_NO_DATA_START_OFFSET if !MMU select BUILDTIME_TABLE_SORT if MMU select CLINT_TIMER if RISCV_M_MODE @@ -507,6 +508,39 @@ config NODES_SHIFT Specify the maximum number of NUMA Nodes available on the target system. Increases memory reserved to accommodate various tables. +choice + prompt "RISC-V spinlock type" + default RISCV_COMBO_SPINLOCKS + +config RISCV_TICKET_SPINLOCKS + bool "Using ticket spinlock" + +config RISCV_QUEUED_SPINLOCKS + bool "Using queued spinlock" + depends on SMP && MMU && NONPORTABLE + select ARCH_USE_QUEUED_SPINLOCKS + help + The queued spinlock implementation requires the forward progress + guarantee of cmpxchg()/xchg() atomic operations: CAS with Zabha or + LR/SC with Ziccrse provide such guarantee. + + Select this if and only if Zabha or Ziccrse is available on your + platform, RISCV_QUEUED_SPINLOCKS must not be selected for platforms + without one of those extensions. + + If unsure, select RISCV_COMBO_SPINLOCKS, which will use qspinlocks + when supported and otherwise ticket spinlocks. + +config RISCV_COMBO_SPINLOCKS + bool "Using combo spinlock" + depends on SMP && MMU + select ARCH_USE_QUEUED_SPINLOCKS + help + Embed both queued spinlock and ticket lock so that the spinlock + implementation can be chosen at runtime. + +endchoice + config RISCV_ALTERNATIVE bool depends on !XIP_KERNEL diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild index 1461af12da6e..de13d5a234f8 100644 --- a/arch/riscv/include/asm/Kbuild +++ b/arch/riscv/include/asm/Kbuild @@ -6,10 +6,12 @@ generic-y += early_ioremap.h generic-y += flat.h generic-y += kvm_para.h generic-y += mmzone.h +generic-y += mcs_spinlock.h generic-y += parport.h -generic-y += spinlock.h generic-y += spinlock_types.h +generic-y += ticket_spinlock.h generic-y += qrwlock.h generic-y += qrwlock_types.h +generic-y += qspinlock.h generic-y += user.h generic-y += vmlinux.lds.h diff --git a/arch/riscv/include/asm/spinlock.h b/arch/riscv/include/asm/spinlock.h new file mode 100644 index 000000000000..e5121b89acea --- /dev/null +++ b/arch/riscv/include/asm/spinlock.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_RISCV_SPINLOCK_H +#define __ASM_RISCV_SPINLOCK_H + +#ifdef CONFIG_RISCV_COMBO_SPINLOCKS +#define _Q_PENDING_LOOPS (1 << 9) + +#define __no_arch_spinlock_redefine +#include +#include +#include + +/* + * TODO: Use an alternative instead of a static key when we are able to parse + * the extensions string earlier in the boot process. + */ +DECLARE_STATIC_KEY_TRUE(qspinlock_key); + +#define SPINLOCK_BASE_DECLARE(op, type, type_lock) \ +static __always_inline type arch_spin_##op(type_lock lock) \ +{ \ + if (static_branch_unlikely(&qspinlock_key)) \ + return queued_spin_##op(lock); \ + return ticket_spin_##op(lock); \ +} + +SPINLOCK_BASE_DECLARE(lock, void, arch_spinlock_t *) +SPINLOCK_BASE_DECLARE(unlock, void, arch_spinlock_t *) +SPINLOCK_BASE_DECLARE(is_locked, int, arch_spinlock_t *) +SPINLOCK_BASE_DECLARE(is_contended, int, arch_spinlock_t *) +SPINLOCK_BASE_DECLARE(trylock, bool, arch_spinlock_t *) +SPINLOCK_BASE_DECLARE(value_unlocked, int, arch_spinlock_t) + +#elif defined(CONFIG_RISCV_QUEUED_SPINLOCKS) + +#include + +#else + +#include + +#endif + +#include + +#endif /* __ASM_RISCV_SPINLOCK_H */ diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index a2cde65b69e9..438e4f6ad2ad 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c @@ -244,6 +244,42 @@ static void __init parse_dtb(void) #endif } +#if defined(CONFIG_RISCV_COMBO_SPINLOCKS) +DEFINE_STATIC_KEY_TRUE(qspinlock_key); +EXPORT_SYMBOL(qspinlock_key); +#endif + +static void __init riscv_spinlock_init(void) +{ + char *using_ext = NULL; + + if (IS_ENABLED(CONFIG_RISCV_TICKET_SPINLOCKS)) { + pr_info("Ticket spinlock: enabled\n"); + return; + } + + if (IS_ENABLED(CONFIG_RISCV_ISA_ZABHA) && + IS_ENABLED(CONFIG_RISCV_ISA_ZACAS) && + riscv_isa_extension_available(NULL, ZABHA) && + riscv_isa_extension_available(NULL, ZACAS)) { + using_ext = "using Zabha"; + } else if (riscv_isa_extension_available(NULL, ZICCRSE)) { + using_ext = "using Ziccrse"; + } +#if defined(CONFIG_RISCV_COMBO_SPINLOCKS) + else { + static_branch_disable(&qspinlock_key); + pr_info("Ticket spinlock: enabled\n"); + return; + } +#endif + + if (!using_ext) + pr_err("Queued spinlock without Zabha or Ziccrse"); + else + pr_info("Queued spinlock %s: enabled\n", using_ext); +} + extern void __init init_rt_signal_env(void); void __init setup_arch(char **cmdline_p) @@ -297,6 +333,7 @@ void __init setup_arch(char **cmdline_p) riscv_set_dma_cache_alignment(); riscv_user_isa_enable(); + riscv_spinlock_init(); } bool arch_cpu_is_hotpluggable(int cpu) diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h index 0655aa5b57b2..bf47cca2c375 100644 --- a/include/asm-generic/qspinlock.h +++ b/include/asm-generic/qspinlock.h @@ -136,6 +136,7 @@ static __always_inline bool virt_spin_lock(struct qspinlock *lock) } #endif +#ifndef __no_arch_spinlock_redefine /* * Remapping spinlock architecture specific functions to the corresponding * queued spinlock functions. @@ -146,5 +147,6 @@ static __always_inline bool virt_spin_lock(struct qspinlock *lock) #define arch_spin_lock(l) queued_spin_lock(l) #define arch_spin_trylock(l) queued_spin_trylock(l) #define arch_spin_unlock(l) queued_spin_unlock(l) +#endif #endif /* __ASM_GENERIC_QSPINLOCK_H */ diff --git a/include/asm-generic/ticket_spinlock.h b/include/asm-generic/ticket_spinlock.h index cfcff22b37b3..325779970d8a 100644 --- a/include/asm-generic/ticket_spinlock.h +++ b/include/asm-generic/ticket_spinlock.h @@ -89,6 +89,7 @@ static __always_inline int ticket_spin_is_contended(arch_spinlock_t *lock) return (s16)((val >> 16) - (val & 0xffff)) > 1; } +#ifndef __no_arch_spinlock_redefine /* * Remapping spinlock architecture specific functions to the corresponding * ticket spinlock functions. @@ -99,5 +100,6 @@ static __always_inline int ticket_spin_is_contended(arch_spinlock_t *lock) #define arch_spin_lock(l) ticket_spin_lock(l) #define arch_spin_trylock(l) ticket_spin_trylock(l) #define arch_spin_unlock(l) ticket_spin_unlock(l) +#endif #endif /* __ASM_GENERIC_TICKET_SPINLOCK_H */ -- cgit v1.2.3