summaryrefslogtreecommitdiff
path: root/arch/riscv
diff options
context:
space:
mode:
Diffstat (limited to 'arch/riscv')
-rw-r--r--arch/riscv/Kconfig4
-rw-r--r--arch/riscv/Kconfig.errata11
-rw-r--r--arch/riscv/Kconfig.socs5
-rw-r--r--arch/riscv/Kconfig.vendor26
-rw-r--r--arch/riscv/Makefile.postlink8
-rw-r--r--arch/riscv/boot/dts/Makefile1
-rw-r--r--arch/riscv/boot/dts/allwinner/sun20i-d1s.dtsi3
-rw-r--r--arch/riscv/boot/dts/spacemit/Makefile2
-rw-r--r--arch/riscv/boot/dts/spacemit/k1-bananapi-f3.dts26
-rw-r--r--arch/riscv/boot/dts/spacemit/k1-pinctrl.dtsi20
-rw-r--r--arch/riscv/boot/dts/spacemit/k1.dtsi452
-rw-r--r--arch/riscv/boot/dts/starfive/jh7110-milkv-mars.dts18
-rw-r--r--arch/riscv/boot/dts/starfive/jh7110-pine64-star64.dts18
-rw-r--r--arch/riscv/boot/dts/thead/th1520.dtsi16
-rw-r--r--arch/riscv/configs/defconfig4
-rw-r--r--arch/riscv/errata/thead/errata.c28
-rw-r--r--arch/riscv/include/asm/Kbuild1
-rw-r--r--arch/riscv/include/asm/bitops.h20
-rw-r--r--arch/riscv/include/asm/bugs.h22
-rw-r--r--arch/riscv/include/asm/cpufeature.h2
-rw-r--r--arch/riscv/include/asm/csr.h15
-rw-r--r--arch/riscv/include/asm/errata_list.h3
-rw-r--r--arch/riscv/include/asm/ftrace.h45
-rw-r--r--arch/riscv/include/asm/futex.h2
-rw-r--r--arch/riscv/include/asm/hwprobe.h5
-rw-r--r--arch/riscv/include/asm/kvm_host.h5
-rw-r--r--arch/riscv/include/asm/kvm_vcpu_sbi.h1
-rw-r--r--arch/riscv/include/asm/page.h1
-rw-r--r--arch/riscv/include/asm/pgalloc.h72
-rw-r--r--arch/riscv/include/asm/pgtable.h2
-rw-r--r--arch/riscv/include/asm/sbi.h1
-rw-r--r--arch/riscv/include/asm/spinlock.h5
-rw-r--r--arch/riscv/include/asm/switch_to.h2
-rw-r--r--arch/riscv/include/asm/tlb.h18
-rw-r--r--arch/riscv/include/asm/vector.h222
-rw-r--r--arch/riscv/include/asm/vendor_extensions/thead.h47
-rw-r--r--arch/riscv/include/asm/vendor_extensions/thead_hwprobe.h19
-rw-r--r--arch/riscv/include/asm/vendor_extensions/vendor_hwprobe.h37
-rw-r--r--arch/riscv/include/asm/vendorid_list.h1
-rw-r--r--arch/riscv/include/uapi/asm/hwprobe.h3
-rw-r--r--arch/riscv/include/uapi/asm/kvm.h7
-rw-r--r--arch/riscv/include/uapi/asm/vendor/thead.h3
-rw-r--r--arch/riscv/kernel/Makefile2
-rw-r--r--arch/riscv/kernel/bugs.c60
-rw-r--r--arch/riscv/kernel/cpufeature.c59
-rw-r--r--arch/riscv/kernel/entry.S21
-rw-r--r--arch/riscv/kernel/ftrace.c17
-rw-r--r--arch/riscv/kernel/kernel_mode_vector.c8
-rw-r--r--arch/riscv/kernel/machine_kexec.c23
-rw-r--r--arch/riscv/kernel/mcount.S24
-rw-r--r--arch/riscv/kernel/module.c18
-rw-r--r--arch/riscv/kernel/probes/kprobes.c2
-rw-r--r--arch/riscv/kernel/process.c6
-rw-r--r--arch/riscv/kernel/setup.c4
-rw-r--r--arch/riscv/kernel/signal.c6
-rw-r--r--arch/riscv/kernel/smp.c1
-rw-r--r--arch/riscv/kernel/stacktrace.c4
-rw-r--r--arch/riscv/kernel/sys_hwprobe.c5
-rw-r--r--arch/riscv/kernel/traps.c6
-rw-r--r--arch/riscv/kernel/vector.c28
-rw-r--r--arch/riscv/kernel/vendor_extensions.c10
-rw-r--r--arch/riscv/kernel/vendor_extensions/Makefile2
-rw-r--r--arch/riscv/kernel/vendor_extensions/thead.c29
-rw-r--r--arch/riscv/kernel/vendor_extensions/thead_hwprobe.c19
-rw-r--r--arch/riscv/kvm/Makefile1
-rw-r--r--arch/riscv/kvm/vcpu.c7
-rw-r--r--arch/riscv/kvm/vcpu_exit.c37
-rw-r--r--arch/riscv/kvm/vcpu_onereg.c6
-rw-r--r--arch/riscv/kvm/vcpu_sbi.c4
-rw-r--r--arch/riscv/kvm/vcpu_sbi_system.c73
-rw-r--r--arch/riscv/lib/Makefile3
-rw-r--r--arch/riscv/lib/crc32-riscv.c (renamed from arch/riscv/lib/crc32.c)25
-rw-r--r--arch/riscv/mm/fault.c52
-rw-r--r--arch/riscv/mm/init.c29
-rw-r--r--arch/riscv/mm/kasan_init.c14
75 files changed, 1523 insertions, 285 deletions
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index d4a7ca0388c0..7612c52e9b1e 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -24,6 +24,7 @@ config RISCV
select ARCH_ENABLE_SPLIT_PMD_PTLOCK if PGTABLE_LEVELS > 2
select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE
select ARCH_HAS_BINFMT_FLAT
+ select ARCH_HAS_CRC32 if RISCV_ISA_ZBC
select ARCH_HAS_CURRENT_STACK_POINTER
select ARCH_HAS_DEBUG_VIRTUAL if MMU
select ARCH_HAS_DEBUG_VM_PGTABLE
@@ -146,9 +147,10 @@ config RISCV
select HAVE_DYNAMIC_FTRACE if !XIP_KERNEL && MMU && (CLANG_SUPPORTS_DYNAMIC_FTRACE || GCC_SUPPORTS_DYNAMIC_FTRACE)
select HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
select HAVE_DYNAMIC_FTRACE_WITH_ARGS if HAVE_DYNAMIC_FTRACE
+ select HAVE_FTRACE_GRAPH_FUNC
select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
select HAVE_FUNCTION_GRAPH_TRACER
- select HAVE_FUNCTION_GRAPH_RETVAL if HAVE_FUNCTION_GRAPH_TRACER
+ select HAVE_FUNCTION_GRAPH_FREGS
select HAVE_FUNCTION_TRACER if !XIP_KERNEL && !PREEMPTION
select HAVE_EBPF_JIT if MMU
select HAVE_GUP_FAST if MMU
diff --git a/arch/riscv/Kconfig.errata b/arch/riscv/Kconfig.errata
index 2acc7d876e1f..e318119d570d 100644
--- a/arch/riscv/Kconfig.errata
+++ b/arch/riscv/Kconfig.errata
@@ -119,4 +119,15 @@ config ERRATA_THEAD_PMU
If you don't know what to do here, say "Y".
+config ERRATA_THEAD_GHOSTWRITE
+ bool "Apply T-Head Ghostwrite errata"
+ depends on ERRATA_THEAD && RISCV_ISA_XTHEADVECTOR
+ default y
+ help
+ The T-Head C9xx cores have a vulnerability in the xtheadvector
+ instruction set. When this errata is enabled, the CPUs will be probed
+ to determine if they are vulnerable and disable xtheadvector.
+
+ If you don't know what to do here, say "Y".
+
endmenu # "CPU errata selection"
diff --git a/arch/riscv/Kconfig.socs b/arch/riscv/Kconfig.socs
index f51bb24bc84c..1916cf7ba450 100644
--- a/arch/riscv/Kconfig.socs
+++ b/arch/riscv/Kconfig.socs
@@ -24,6 +24,11 @@ config ARCH_SOPHGO
help
This enables support for Sophgo SoC platform hardware.
+config ARCH_SPACEMIT
+ bool "SpacemiT SoCs"
+ help
+ This enables support for SpacemiT SoC platform hardware.
+
config ARCH_STARFIVE
def_bool SOC_STARFIVE
diff --git a/arch/riscv/Kconfig.vendor b/arch/riscv/Kconfig.vendor
index 6f1cdd32ed29..b096548fe0ff 100644
--- a/arch/riscv/Kconfig.vendor
+++ b/arch/riscv/Kconfig.vendor
@@ -16,4 +16,30 @@ config RISCV_ISA_VENDOR_EXT_ANDES
If you don't know what to do here, say Y.
endmenu
+menu "T-Head"
+config RISCV_ISA_VENDOR_EXT_THEAD
+ bool "T-Head vendor extension support"
+ select RISCV_ISA_VENDOR_EXT
+ default y
+ help
+ Say N here to disable detection of and support for all T-Head vendor
+ extensions. Without this option enabled, T-Head vendor extensions will
+ not be detected at boot and their presence not reported to userspace.
+
+ If you don't know what to do here, say Y.
+
+config RISCV_ISA_XTHEADVECTOR
+ bool "xtheadvector extension support"
+ depends on RISCV_ISA_VENDOR_EXT_THEAD
+ depends on RISCV_ISA_V
+ depends on FPU
+ default y
+ help
+ Say N here if you want to disable all xtheadvector related procedures
+ in the kernel. This will disable vector for any T-Head board that
+ contains xtheadvector rather than the standard vector.
+
+ If you don't know what to do here, say Y.
+endmenu
+
endmenu
diff --git a/arch/riscv/Makefile.postlink b/arch/riscv/Makefile.postlink
index 829b9abc91f6..6b0580949b6a 100644
--- a/arch/riscv/Makefile.postlink
+++ b/arch/riscv/Makefile.postlink
@@ -10,6 +10,7 @@ __archpost:
-include include/config/auto.conf
include $(srctree)/scripts/Kbuild.include
+include $(srctree)/scripts/Makefile.lib
quiet_cmd_relocs_check = CHKREL $@
cmd_relocs_check = \
@@ -19,11 +20,6 @@ ifdef CONFIG_RELOCATABLE
quiet_cmd_cp_vmlinux_relocs = CPREL vmlinux.relocs
cmd_cp_vmlinux_relocs = cp vmlinux vmlinux.relocs
-quiet_cmd_relocs_strip = STRIPREL $@
-cmd_relocs_strip = $(OBJCOPY) --remove-section='.rel.*' \
- --remove-section='.rel__*' \
- --remove-section='.rela.*' \
- --remove-section='.rela__*' $@
endif
# `@true` prevents complaint when there is nothing to be done
@@ -33,7 +29,7 @@ vmlinux: FORCE
ifdef CONFIG_RELOCATABLE
$(call if_changed,relocs_check)
$(call if_changed,cp_vmlinux_relocs)
- $(call if_changed,relocs_strip)
+ $(call if_changed,strip_relocs)
endif
clean:
diff --git a/arch/riscv/boot/dts/Makefile b/arch/riscv/boot/dts/Makefile
index fdae05bbf556..bff887d38abe 100644
--- a/arch/riscv/boot/dts/Makefile
+++ b/arch/riscv/boot/dts/Makefile
@@ -5,6 +5,7 @@ subdir-y += microchip
subdir-y += renesas
subdir-y += sifive
subdir-y += sophgo
+subdir-y += spacemit
subdir-y += starfive
subdir-y += thead
diff --git a/arch/riscv/boot/dts/allwinner/sun20i-d1s.dtsi b/arch/riscv/boot/dts/allwinner/sun20i-d1s.dtsi
index 64c3c2e6cbe0..6367112e614a 100644
--- a/arch/riscv/boot/dts/allwinner/sun20i-d1s.dtsi
+++ b/arch/riscv/boot/dts/allwinner/sun20i-d1s.dtsi
@@ -27,7 +27,8 @@
riscv,isa = "rv64imafdc";
riscv,isa-base = "rv64i";
riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "zicntr", "zicsr",
- "zifencei", "zihpm";
+ "zifencei", "zihpm", "xtheadvector";
+ thead,vlenb = <128>;
#cooling-cells = <2>;
cpu0_intc: interrupt-controller {
diff --git a/arch/riscv/boot/dts/spacemit/Makefile b/arch/riscv/boot/dts/spacemit/Makefile
new file mode 100644
index 000000000000..ac617319a574
--- /dev/null
+++ b/arch/riscv/boot/dts/spacemit/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+dtb-$(CONFIG_ARCH_SPACEMIT) += k1-bananapi-f3.dtb
diff --git a/arch/riscv/boot/dts/spacemit/k1-bananapi-f3.dts b/arch/riscv/boot/dts/spacemit/k1-bananapi-f3.dts
new file mode 100644
index 000000000000..1d617b40a2d5
--- /dev/null
+++ b/arch/riscv/boot/dts/spacemit/k1-bananapi-f3.dts
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright (C) 2024 Yangyu Chen <cyy@cyyself.name>
+ */
+
+#include "k1.dtsi"
+#include "k1-pinctrl.dtsi"
+
+/ {
+ model = "Banana Pi BPI-F3";
+ compatible = "bananapi,bpi-f3", "spacemit,k1";
+
+ aliases {
+ serial0 = &uart0;
+ };
+
+ chosen {
+ stdout-path = "serial0";
+ };
+};
+
+&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_2_cfg>;
+ status = "okay";
+};
diff --git a/arch/riscv/boot/dts/spacemit/k1-pinctrl.dtsi b/arch/riscv/boot/dts/spacemit/k1-pinctrl.dtsi
new file mode 100644
index 000000000000..a8eac5517f85
--- /dev/null
+++ b/arch/riscv/boot/dts/spacemit/k1-pinctrl.dtsi
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright (c) 2024 Yixun Lan <dlan@gentoo.org>
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+
+#define K1_PADCONF(pin, func) (((pin) << 16) | (func))
+
+&pinctrl {
+ uart0_2_cfg: uart0-2-cfg {
+ uart0-2-pins {
+ pinmux = <K1_PADCONF(68, 2)>,
+ <K1_PADCONF(69, 2)>;
+
+ bias-pull-up = <0>;
+ drive-strength = <32>;
+ };
+ };
+};
diff --git a/arch/riscv/boot/dts/spacemit/k1.dtsi b/arch/riscv/boot/dts/spacemit/k1.dtsi
new file mode 100644
index 000000000000..c670ebf8fa12
--- /dev/null
+++ b/arch/riscv/boot/dts/spacemit/k1.dtsi
@@ -0,0 +1,452 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright (C) 2024 Yangyu Chen <cyy@cyyself.name>
+ */
+
+/dts-v1/;
+/ {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ model = "SpacemiT K1";
+ compatible = "spacemit,k1";
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ timebase-frequency = <24000000>;
+
+ cpu-map {
+ cluster0 {
+ core0 {
+ cpu = <&cpu_0>;
+ };
+ core1 {
+ cpu = <&cpu_1>;
+ };
+ core2 {
+ cpu = <&cpu_2>;
+ };
+ core3 {
+ cpu = <&cpu_3>;
+ };
+ };
+
+ cluster1 {
+ core0 {
+ cpu = <&cpu_4>;
+ };
+ core1 {
+ cpu = <&cpu_5>;
+ };
+ core2 {
+ cpu = <&cpu_6>;
+ };
+ core3 {
+ cpu = <&cpu_7>;
+ };
+ };
+ };
+
+ cpu_0: cpu@0 {
+ compatible = "spacemit,x60", "riscv";
+ device_type = "cpu";
+ reg = <0>;
+ riscv,isa = "rv64imafdcv_zicbom_zicbop_zicboz_zicntr_zicond_zicsr_zifencei_zihintpause_zihpm_zfh_zba_zbb_zbc_zbs_zkt_zvfh_zvkt_sscofpmf_sstc_svinval_svnapot_svpbmt";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", "zicbom",
+ "zicbop", "zicboz", "zicntr", "zicond", "zicsr",
+ "zifencei", "zihintpause", "zihpm", "zfh", "zba",
+ "zbb", "zbc", "zbs", "zkt", "zvfh", "zvkt",
+ "sscofpmf", "sstc", "svinval", "svnapot", "svpbmt";
+ riscv,cbom-block-size = <64>;
+ riscv,cbop-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+ i-cache-block-size = <64>;
+ i-cache-size = <32768>;
+ i-cache-sets = <128>;
+ d-cache-block-size = <64>;
+ d-cache-size = <32768>;
+ d-cache-sets = <128>;
+ next-level-cache = <&cluster0_l2_cache>;
+ mmu-type = "riscv,sv39";
+
+ cpu0_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu_1: cpu@1 {
+ compatible = "spacemit,x60", "riscv";
+ device_type = "cpu";
+ reg = <1>;
+ riscv,isa = "rv64imafdcv_zicbom_zicbop_zicboz_zicntr_zicond_zicsr_zifencei_zihintpause_zihpm_zfh_zba_zbb_zbc_zbs_zkt_zvfh_zvkt_sscofpmf_sstc_svinval_svnapot_svpbmt";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", "zicbom",
+ "zicbop", "zicboz", "zicntr", "zicond", "zicsr",
+ "zifencei", "zihintpause", "zihpm", "zfh", "zba",
+ "zbb", "zbc", "zbs", "zkt", "zvfh", "zvkt",
+ "sscofpmf", "sstc", "svinval", "svnapot", "svpbmt";
+ riscv,cbom-block-size = <64>;
+ riscv,cbop-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+ i-cache-block-size = <64>;
+ i-cache-size = <32768>;
+ i-cache-sets = <128>;
+ d-cache-block-size = <64>;
+ d-cache-size = <32768>;
+ d-cache-sets = <128>;
+ next-level-cache = <&cluster0_l2_cache>;
+ mmu-type = "riscv,sv39";
+
+ cpu1_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu_2: cpu@2 {
+ compatible = "spacemit,x60", "riscv";
+ device_type = "cpu";
+ reg = <2>;
+ riscv,isa = "rv64imafdcv_zicbom_zicbop_zicboz_zicntr_zicond_zicsr_zifencei_zihintpause_zihpm_zfh_zba_zbb_zbc_zbs_zkt_zvfh_zvkt_sscofpmf_sstc_svinval_svnapot_svpbmt";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", "zicbom",
+ "zicbop", "zicboz", "zicntr", "zicond", "zicsr",
+ "zifencei", "zihintpause", "zihpm", "zfh", "zba",
+ "zbb", "zbc", "zbs", "zkt", "zvfh", "zvkt",
+ "sscofpmf", "sstc", "svinval", "svnapot", "svpbmt";
+ riscv,cbom-block-size = <64>;
+ riscv,cbop-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+ i-cache-block-size = <64>;
+ i-cache-size = <32768>;
+ i-cache-sets = <128>;
+ d-cache-block-size = <64>;
+ d-cache-size = <32768>;
+ d-cache-sets = <128>;
+ next-level-cache = <&cluster0_l2_cache>;
+ mmu-type = "riscv,sv39";
+
+ cpu2_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu_3: cpu@3 {
+ compatible = "spacemit,x60", "riscv";
+ device_type = "cpu";
+ reg = <3>;
+ riscv,isa = "rv64imafdcv_zicbom_zicbop_zicboz_zicntr_zicond_zicsr_zifencei_zihintpause_zihpm_zfh_zba_zbb_zbc_zbs_zkt_zvfh_zvkt_sscofpmf_sstc_svinval_svnapot_svpbmt";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", "zicbom",
+ "zicbop", "zicboz", "zicntr", "zicond", "zicsr",
+ "zifencei", "zihintpause", "zihpm", "zfh", "zba",
+ "zbb", "zbc", "zbs", "zkt", "zvfh", "zvkt",
+ "sscofpmf", "sstc", "svinval", "svnapot", "svpbmt";
+ riscv,cbom-block-size = <64>;
+ riscv,cbop-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+ i-cache-block-size = <64>;
+ i-cache-size = <32768>;
+ i-cache-sets = <128>;
+ d-cache-block-size = <64>;
+ d-cache-size = <32768>;
+ d-cache-sets = <128>;
+ next-level-cache = <&cluster0_l2_cache>;
+ mmu-type = "riscv,sv39";
+
+ cpu3_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu_4: cpu@4 {
+ compatible = "spacemit,x60", "riscv";
+ device_type = "cpu";
+ reg = <4>;
+ riscv,isa = "rv64imafdcv_zicbom_zicbop_zicboz_zicntr_zicond_zicsr_zifencei_zihintpause_zihpm_zfh_zba_zbb_zbc_zbs_zkt_zvfh_zvkt_sscofpmf_sstc_svinval_svnapot_svpbmt";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", "zicbom",
+ "zicbop", "zicboz", "zicntr", "zicond", "zicsr",
+ "zifencei", "zihintpause", "zihpm", "zfh", "zba",
+ "zbb", "zbc", "zbs", "zkt", "zvfh", "zvkt",
+ "sscofpmf", "sstc", "svinval", "svnapot", "svpbmt";
+ riscv,cbom-block-size = <64>;
+ riscv,cbop-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+ i-cache-block-size = <64>;
+ i-cache-size = <32768>;
+ i-cache-sets = <128>;
+ d-cache-block-size = <64>;
+ d-cache-size = <32768>;
+ d-cache-sets = <128>;
+ next-level-cache = <&cluster1_l2_cache>;
+ mmu-type = "riscv,sv39";
+
+ cpu4_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu_5: cpu@5 {
+ compatible = "spacemit,x60", "riscv";
+ device_type = "cpu";
+ reg = <5>;
+ riscv,isa = "rv64imafdcv_zicbom_zicbop_zicboz_zicntr_zicond_zicsr_zifencei_zihintpause_zihpm_zfh_zba_zbb_zbc_zbs_zkt_zvfh_zvkt_sscofpmf_sstc_svinval_svnapot_svpbmt";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", "zicbom",
+ "zicbop", "zicboz", "zicntr", "zicond", "zicsr",
+ "zifencei", "zihintpause", "zihpm", "zfh", "zba",
+ "zbb", "zbc", "zbs", "zkt", "zvfh", "zvkt",
+ "sscofpmf", "sstc", "svinval", "svnapot", "svpbmt";
+ riscv,cbom-block-size = <64>;
+ riscv,cbop-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+ i-cache-block-size = <64>;
+ i-cache-size = <32768>;
+ i-cache-sets = <128>;
+ d-cache-block-size = <64>;
+ d-cache-size = <32768>;
+ d-cache-sets = <128>;
+ next-level-cache = <&cluster1_l2_cache>;
+ mmu-type = "riscv,sv39";
+
+ cpu5_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu_6: cpu@6 {
+ compatible = "spacemit,x60", "riscv";
+ device_type = "cpu";
+ reg = <6>;
+ riscv,isa = "rv64imafdcv_zicbom_zicbop_zicboz_zicntr_zicond_zicsr_zifencei_zihintpause_zihpm_zfh_zba_zbb_zbc_zbs_zkt_zvfh_zvkt_sscofpmf_sstc_svinval_svnapot_svpbmt";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", "zicbom",
+ "zicbop", "zicboz", "zicntr", "zicond", "zicsr",
+ "zifencei", "zihintpause", "zihpm", "zfh", "zba",
+ "zbb", "zbc", "zbs", "zkt", "zvfh", "zvkt",
+ "sscofpmf", "sstc", "svinval", "svnapot", "svpbmt";
+ riscv,cbom-block-size = <64>;
+ riscv,cbop-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+ i-cache-block-size = <64>;
+ i-cache-size = <32768>;
+ i-cache-sets = <128>;
+ d-cache-block-size = <64>;
+ d-cache-size = <32768>;
+ d-cache-sets = <128>;
+ next-level-cache = <&cluster1_l2_cache>;
+ mmu-type = "riscv,sv39";
+
+ cpu6_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cpu_7: cpu@7 {
+ compatible = "spacemit,x60", "riscv";
+ device_type = "cpu";
+ reg = <7>;
+ riscv,isa = "rv64imafdcv_zicbom_zicbop_zicboz_zicntr_zicond_zicsr_zifencei_zihintpause_zihpm_zfh_zba_zbb_zbc_zbs_zkt_zvfh_zvkt_sscofpmf_sstc_svinval_svnapot_svpbmt";
+ riscv,isa-base = "rv64i";
+ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", "zicbom",
+ "zicbop", "zicboz", "zicntr", "zicond", "zicsr",
+ "zifencei", "zihintpause", "zihpm", "zfh", "zba",
+ "zbb", "zbc", "zbs", "zkt", "zvfh", "zvkt",
+ "sscofpmf", "sstc", "svinval", "svnapot", "svpbmt";
+ riscv,cbom-block-size = <64>;
+ riscv,cbop-block-size = <64>;
+ riscv,cboz-block-size = <64>;
+ i-cache-block-size = <64>;
+ i-cache-size = <32768>;
+ i-cache-sets = <128>;
+ d-cache-block-size = <64>;
+ d-cache-size = <32768>;
+ d-cache-sets = <128>;
+ next-level-cache = <&cluster1_l2_cache>;
+ mmu-type = "riscv,sv39";
+
+ cpu7_intc: interrupt-controller {
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
+
+ cluster0_l2_cache: l2-cache0 {
+ compatible = "cache";
+ cache-block-size = <64>;
+ cache-level = <2>;
+ cache-size = <524288>;
+ cache-sets = <512>;
+ cache-unified;
+ };
+
+ cluster1_l2_cache: l2-cache1 {
+ compatible = "cache";
+ cache-block-size = <64>;
+ cache-level = <2>;
+ cache-size = <524288>;
+ cache-sets = <512>;
+ cache-unified;
+ };
+ };
+
+ soc {
+ compatible = "simple-bus";
+ interrupt-parent = <&plic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ dma-noncoherent;
+ ranges;
+
+ uart0: serial@d4017000 {
+ compatible = "spacemit,k1-uart", "intel,xscale-uart";
+ reg = <0x0 0xd4017000 0x0 0x100>;
+ interrupts = <42>;
+ clock-frequency = <14857000>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ status = "disabled";
+ };
+
+ uart2: serial@d4017100 {
+ compatible = "spacemit,k1-uart", "intel,xscale-uart";
+ reg = <0x0 0xd4017100 0x0 0x100>;
+ interrupts = <44>;
+ clock-frequency = <14857000>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ status = "disabled";
+ };
+
+ uart3: serial@d4017200 {
+ compatible = "spacemit,k1-uart", "intel,xscale-uart";
+ reg = <0x0 0xd4017200 0x0 0x100>;
+ interrupts = <45>;
+ clock-frequency = <14857000>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ status = "disabled";
+ };
+
+ uart4: serial@d4017300 {
+ compatible = "spacemit,k1-uart", "intel,xscale-uart";
+ reg = <0x0 0xd4017300 0x0 0x100>;
+ interrupts = <46>;
+ clock-frequency = <14857000>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ status = "disabled";
+ };
+
+ uart5: serial@d4017400 {
+ compatible = "spacemit,k1-uart", "intel,xscale-uart";
+ reg = <0x0 0xd4017400 0x0 0x100>;
+ interrupts = <47>;
+ clock-frequency = <14857000>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ status = "disabled";
+ };
+
+ uart6: serial@d4017500 {
+ compatible = "spacemit,k1-uart", "intel,xscale-uart";
+ reg = <0x0 0xd4017500 0x0 0x100>;
+ interrupts = <48>;
+ clock-frequency = <14857000>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ status = "disabled";
+ };
+
+ uart7: serial@d4017600 {
+ compatible = "spacemit,k1-uart", "intel,xscale-uart";
+ reg = <0x0 0xd4017600 0x0 0x100>;
+ interrupts = <49>;
+ clock-frequency = <14857000>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ status = "disabled";
+ };
+
+ uart8: serial@d4017700 {
+ compatible = "spacemit,k1-uart", "intel,xscale-uart";
+ reg = <0x0 0xd4017700 0x0 0x100>;
+ interrupts = <50>;
+ clock-frequency = <14857000>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ status = "disabled";
+ };
+
+ uart9: serial@d4017800 {
+ compatible = "spacemit,k1-uart", "intel,xscale-uart";
+ reg = <0x0 0xd4017800 0x0 0x100>;
+ interrupts = <51>;
+ clock-frequency = <14857000>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ status = "disabled";
+ };
+
+ pinctrl: pinctrl@d401e000 {
+ compatible = "spacemit,k1-pinctrl";
+ reg = <0x0 0xd401e000 0x0 0x400>;
+ };
+
+ plic: interrupt-controller@e0000000 {
+ compatible = "spacemit,k1-plic", "sifive,plic-1.0.0";
+ reg = <0x0 0xe0000000 0x0 0x4000000>;
+ interrupts-extended = <&cpu0_intc 11>, <&cpu0_intc 9>,
+ <&cpu1_intc 11>, <&cpu1_intc 9>,
+ <&cpu2_intc 11>, <&cpu2_intc 9>,
+ <&cpu3_intc 11>, <&cpu3_intc 9>,
+ <&cpu4_intc 11>, <&cpu4_intc 9>,
+ <&cpu5_intc 11>, <&cpu5_intc 9>,
+ <&cpu6_intc 11>, <&cpu6_intc 9>,
+ <&cpu7_intc 11>, <&cpu7_intc 9>;
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ riscv,ndev = <159>;
+ };
+
+ clint: timer@e4000000 {
+ compatible = "spacemit,k1-clint", "sifive,clint0";
+ reg = <0x0 0xe4000000 0x0 0x10000>;
+ interrupts-extended = <&cpu0_intc 3>, <&cpu0_intc 7>,
+ <&cpu1_intc 3>, <&cpu1_intc 7>,
+ <&cpu2_intc 3>, <&cpu2_intc 7>,
+ <&cpu3_intc 3>, <&cpu3_intc 7>,
+ <&cpu4_intc 3>, <&cpu4_intc 7>,
+ <&cpu5_intc 3>, <&cpu5_intc 7>,
+ <&cpu6_intc 3>, <&cpu6_intc 7>,
+ <&cpu7_intc 3>, <&cpu7_intc 7>;
+ };
+
+ sec_uart1: serial@f0612000 {
+ compatible = "spacemit,k1-uart", "intel,xscale-uart";
+ reg = <0x0 0xf0612000 0x0 0x100>;
+ interrupts = <43>;
+ clock-frequency = <14857000>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ status = "reserved"; /* for TEE usage */
+ };
+ };
+};
diff --git a/arch/riscv/boot/dts/starfive/jh7110-milkv-mars.dts b/arch/riscv/boot/dts/starfive/jh7110-milkv-mars.dts
index 0d248b671d4b..3bd62ab78523 100644
--- a/arch/riscv/boot/dts/starfive/jh7110-milkv-mars.dts
+++ b/arch/riscv/boot/dts/starfive/jh7110-milkv-mars.dts
@@ -53,7 +53,23 @@
status = "okay";
};
+&sysgpio {
+ usb0_pins: usb0-0 {
+ vbus-pins {
+ pinmux = <GPIOMUX(25, GPOUT_SYS_USB_DRIVE_VBUS,
+ GPOEN_ENABLE,
+ GPI_NONE)>;
+ bias-disable;
+ input-disable;
+ input-schmitt-disable;
+ slew-rate = <0>;
+ };
+ };
+};
+
&usb0 {
- dr_mode = "peripheral";
+ dr_mode = "host";
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb0_pins>;
status = "okay";
};
diff --git a/arch/riscv/boot/dts/starfive/jh7110-pine64-star64.dts b/arch/riscv/boot/dts/starfive/jh7110-pine64-star64.dts
index fe4a490ecc61..b764d4d92fd9 100644
--- a/arch/riscv/boot/dts/starfive/jh7110-pine64-star64.dts
+++ b/arch/riscv/boot/dts/starfive/jh7110-pine64-star64.dts
@@ -80,7 +80,23 @@
status = "okay";
};
+&sysgpio {
+ usb0_pins: usb0-0 {
+ vbus-pins {
+ pinmux = <GPIOMUX(25, GPOUT_SYS_USB_DRIVE_VBUS,
+ GPOEN_ENABLE,
+ GPI_NONE)>;
+ bias-disable;
+ input-disable;
+ input-schmitt-disable;
+ slew-rate = <0>;
+ };
+ };
+};
+
&usb0 {
- dr_mode = "peripheral";
+ dr_mode = "host";
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb0_pins>;
status = "okay";
};
diff --git a/arch/riscv/boot/dts/thead/th1520.dtsi b/arch/riscv/boot/dts/thead/th1520.dtsi
index acfe030e803a..527336417765 100644
--- a/arch/riscv/boot/dts/thead/th1520.dtsi
+++ b/arch/riscv/boot/dts/thead/th1520.dtsi
@@ -599,6 +599,22 @@
status = "disabled";
};
+ mbox_910t: mailbox@ffffc38000 {
+ compatible = "thead,th1520-mbox";
+ reg = <0xff 0xffc38000 0x0 0x6000>,
+ <0xff 0xffc40000 0x0 0x6000>,
+ <0xff 0xffc4c000 0x0 0x2000>,
+ <0xff 0xffc54000 0x0 0x2000>;
+ reg-names = "local", "remote-icu0", "remote-icu1", "remote-icu2";
+ clocks = <&clk CLK_MBOX0>, <&clk CLK_MBOX1>, <&clk CLK_MBOX2>,
+ <&clk CLK_MBOX3>;
+ clock-names = "clk-local", "clk-remote-icu0", "clk-remote-icu1",
+ "clk-remote-icu2";
+ interrupt-parent = <&plic>;
+ interrupts = <28 IRQ_TYPE_LEVEL_HIGH>;
+ #mbox-cells = <1>;
+ };
+
gpio@fffff41000 {
compatible = "snps,dw-apb-gpio";
reg = <0xff 0xfff41000 0x0 0x1000>;
diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig
index b4a37345703e..0f7dcbe3c45b 100644
--- a/arch/riscv/configs/defconfig
+++ b/arch/riscv/configs/defconfig
@@ -10,7 +10,6 @@ CONFIG_MEMCG=y
CONFIG_BLK_CGROUP=y
CONFIG_CGROUP_SCHED=y
CONFIG_CFS_BANDWIDTH=y
-CONFIG_RT_GROUP_SCHED=y
CONFIG_CGROUP_PIDS=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CGROUP_HUGETLB=y
@@ -30,6 +29,7 @@ CONFIG_ARCH_MICROCHIP=y
CONFIG_ARCH_RENESAS=y
CONFIG_ARCH_SIFIVE=y
CONFIG_ARCH_SOPHGO=y
+CONFIG_ARCH_SPACEMIT=y
CONFIG_SOC_STARFIVE=y
CONFIG_ARCH_SUNXI=y
CONFIG_ARCH_THEAD=y
@@ -167,6 +167,7 @@ CONFIG_PINCTRL_SOPHGO_CV1800B=y
CONFIG_PINCTRL_SOPHGO_CV1812H=y
CONFIG_PINCTRL_SOPHGO_SG2000=y
CONFIG_PINCTRL_SOPHGO_SG2002=y
+CONFIG_PINCTRL_TH1520=y
CONFIG_GPIO_DWAPB=y
CONFIG_GPIO_SIFIVE=y
CONFIG_POWER_RESET_GPIO_RESTART=y
@@ -242,6 +243,7 @@ CONFIG_RTC_DRV_SUN6I=y
CONFIG_DMADEVICES=y
CONFIG_DMA_SUN6I=m
CONFIG_DW_AXI_DMAC=y
+CONFIG_DWMAC_THEAD=m
CONFIG_VIRTIO_PCI=y
CONFIG_VIRTIO_BALLOON=y
CONFIG_VIRTIO_INPUT=y
diff --git a/arch/riscv/errata/thead/errata.c b/arch/riscv/errata/thead/errata.c
index e24770a77932..0b942183f708 100644
--- a/arch/riscv/errata/thead/errata.c
+++ b/arch/riscv/errata/thead/errata.c
@@ -10,6 +10,7 @@
#include <linux/string.h>
#include <linux/uaccess.h>
#include <asm/alternative.h>
+#include <asm/bugs.h>
#include <asm/cacheflush.h>
#include <asm/cpufeature.h>
#include <asm/dma-noncoherent.h>
@@ -142,6 +143,31 @@ static bool errata_probe_pmu(unsigned int stage,
return true;
}
+static bool errata_probe_ghostwrite(unsigned int stage,
+ unsigned long arch_id, unsigned long impid)
+{
+ if (!IS_ENABLED(CONFIG_ERRATA_THEAD_GHOSTWRITE))
+ return false;
+
+ /*
+ * target-c9xx cores report arch_id and impid as 0
+ *
+ * While ghostwrite may not affect all c9xx cores that implement
+ * xtheadvector, there is no futher granularity than c9xx. Assume
+ * vulnerable for this entire class of processors when xtheadvector is
+ * enabled.
+ */
+ if (arch_id != 0 || impid != 0)
+ return false;
+
+ if (stage != RISCV_ALTERNATIVES_EARLY_BOOT)
+ return false;
+
+ ghostwrite_set_vulnerable();
+
+ return true;
+}
+
static u32 thead_errata_probe(unsigned int stage,
unsigned long archid, unsigned long impid)
{
@@ -155,6 +181,8 @@ static u32 thead_errata_probe(unsigned int stage,
if (errata_probe_pmu(stage, archid, impid))
cpu_req_errata |= BIT(ERRATA_THEAD_PMU);
+ errata_probe_ghostwrite(stage, archid, impid);
+
return cpu_req_errata;
}
diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild
index de13d5a234f8..bd5fc9403295 100644
--- a/arch/riscv/include/asm/Kbuild
+++ b/arch/riscv/include/asm/Kbuild
@@ -4,6 +4,7 @@ syscall-y += syscall_table_64.h
generic-y += early_ioremap.h
generic-y += flat.h
+generic-y += fprobe.h
generic-y += kvm_para.h
generic-y += mmzone.h
generic-y += mcs_spinlock.h
diff --git a/arch/riscv/include/asm/bitops.h b/arch/riscv/include/asm/bitops.h
index fae152ea0508..c6bd3d8354a9 100644
--- a/arch/riscv/include/asm/bitops.h
+++ b/arch/riscv/include/asm/bitops.h
@@ -228,7 +228,7 @@ legacy:
*
* This operation may be reordered on other architectures than x86.
*/
-static inline int arch_test_and_set_bit(int nr, volatile unsigned long *addr)
+static __always_inline int arch_test_and_set_bit(int nr, volatile unsigned long *addr)
{
return __test_and_op_bit(or, __NOP, nr, addr);
}
@@ -240,7 +240,7 @@ static inline int arch_test_and_set_bit(int nr, volatile unsigned long *addr)
*
* This operation can be reordered on other architectures other than x86.
*/
-static inline int arch_test_and_clear_bit(int nr, volatile unsigned long *addr)
+static __always_inline int arch_test_and_clear_bit(int nr, volatile unsigned long *addr)
{
return __test_and_op_bit(and, __NOT, nr, addr);
}
@@ -253,7 +253,7 @@ static inline int arch_test_and_clear_bit(int nr, volatile unsigned long *addr)
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
-static inline int arch_test_and_change_bit(int nr, volatile unsigned long *addr)
+static __always_inline int arch_test_and_change_bit(int nr, volatile unsigned long *addr)
{
return __test_and_op_bit(xor, __NOP, nr, addr);
}
@@ -270,7 +270,7 @@ static inline int arch_test_and_change_bit(int nr, volatile unsigned long *addr)
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
-static inline void arch_set_bit(int nr, volatile unsigned long *addr)
+static __always_inline void arch_set_bit(int nr, volatile unsigned long *addr)
{
__op_bit(or, __NOP, nr, addr);
}
@@ -284,7 +284,7 @@ static inline void arch_set_bit(int nr, volatile unsigned long *addr)
* on non x86 architectures, so if you are writing portable code,
* make sure not to rely on its reordering guarantees.
*/
-static inline void arch_clear_bit(int nr, volatile unsigned long *addr)
+static __always_inline void arch_clear_bit(int nr, volatile unsigned long *addr)
{
__op_bit(and, __NOT, nr, addr);
}
@@ -298,7 +298,7 @@ static inline void arch_clear_bit(int nr, volatile unsigned long *addr)
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
-static inline void arch_change_bit(int nr, volatile unsigned long *addr)
+static __always_inline void arch_change_bit(int nr, volatile unsigned long *addr)
{
__op_bit(xor, __NOP, nr, addr);
}
@@ -311,7 +311,7 @@ static inline void arch_change_bit(int nr, volatile unsigned long *addr)
* This operation is atomic and provides acquire barrier semantics.
* It can be used to implement bit locks.
*/
-static inline int arch_test_and_set_bit_lock(
+static __always_inline int arch_test_and_set_bit_lock(
unsigned long nr, volatile unsigned long *addr)
{
return __test_and_op_bit_ord(or, __NOP, nr, addr, .aq);
@@ -324,7 +324,7 @@ static inline int arch_test_and_set_bit_lock(
*
* This operation is atomic and provides release barrier semantics.
*/
-static inline void arch_clear_bit_unlock(
+static __always_inline void arch_clear_bit_unlock(
unsigned long nr, volatile unsigned long *addr)
{
__op_bit_ord(and, __NOT, nr, addr, .rl);
@@ -345,13 +345,13 @@ static inline void arch_clear_bit_unlock(
* non-atomic property here: it's a lot more instructions and we still have to
* provide release semantics anyway.
*/
-static inline void arch___clear_bit_unlock(
+static __always_inline void arch___clear_bit_unlock(
unsigned long nr, volatile unsigned long *addr)
{
arch_clear_bit_unlock(nr, addr);
}
-static inline bool arch_xor_unlock_is_negative_byte(unsigned long mask,
+static __always_inline bool arch_xor_unlock_is_negative_byte(unsigned long mask,
volatile unsigned long *addr)
{
unsigned long res;
diff --git a/arch/riscv/include/asm/bugs.h b/arch/riscv/include/asm/bugs.h
new file mode 100644
index 000000000000..17ca0a947730
--- /dev/null
+++ b/arch/riscv/include/asm/bugs.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Interface for managing mitigations for riscv vulnerabilities.
+ *
+ * Copyright (C) 2024 Rivos Inc.
+ */
+
+#ifndef __ASM_BUGS_H
+#define __ASM_BUGS_H
+
+/* Watch out, ordering is important here. */
+enum mitigation_state {
+ UNAFFECTED,
+ MITIGATED,
+ VULNERABLE,
+};
+
+void ghostwrite_set_vulnerable(void);
+bool ghostwrite_enable_mitigation(void);
+enum mitigation_state ghostwrite_get_state(void);
+
+#endif /* __ASM_BUGS_H */
diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/cpufeature.h
index 4bd054c54c21..569140d6e639 100644
--- a/arch/riscv/include/asm/cpufeature.h
+++ b/arch/riscv/include/asm/cpufeature.h
@@ -34,6 +34,8 @@ DECLARE_PER_CPU(struct riscv_cpuinfo, riscv_cpuinfo);
/* Per-cpu ISA extensions. */
extern struct riscv_isainfo hart_isa[NR_CPUS];
+extern u32 thead_vlenb_of;
+
void __init riscv_user_isa_enable(void);
#define _RISCV_ISA_EXT_DATA(_name, _id, _subset_exts, _subset_exts_size, _validate) { \
diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h
index 37bdea65bbd8..6fed42e37705 100644
--- a/arch/riscv/include/asm/csr.h
+++ b/arch/riscv/include/asm/csr.h
@@ -30,6 +30,12 @@
#define SR_VS_CLEAN _AC(0x00000400, UL)
#define SR_VS_DIRTY _AC(0x00000600, UL)
+#define SR_VS_THEAD _AC(0x01800000, UL) /* xtheadvector Status */
+#define SR_VS_OFF_THEAD _AC(0x00000000, UL)
+#define SR_VS_INITIAL_THEAD _AC(0x00800000, UL)
+#define SR_VS_CLEAN_THEAD _AC(0x01000000, UL)
+#define SR_VS_DIRTY_THEAD _AC(0x01800000, UL)
+
#define SR_XS _AC(0x00018000, UL) /* Extension Status */
#define SR_XS_OFF _AC(0x00000000, UL)
#define SR_XS_INITIAL _AC(0x00008000, UL)
@@ -315,6 +321,15 @@
#define CSR_STIMECMP 0x14D
#define CSR_STIMECMPH 0x15D
+/* xtheadvector symbolic CSR names */
+#define CSR_VXSAT 0x9
+#define CSR_VXRM 0xa
+
+/* xtheadvector CSR masks */
+#define CSR_VXRM_MASK 3
+#define CSR_VXRM_SHIFT 1
+#define CSR_VXSAT_MASK 1
+
/* Supervisor-Level Window to Indirectly Accessed Registers (AIA) */
#define CSR_SISELECT 0x150
#define CSR_SIREG 0x151
diff --git a/arch/riscv/include/asm/errata_list.h b/arch/riscv/include/asm/errata_list.h
index 7c8a71a526a3..6e426ed7919a 100644
--- a/arch/riscv/include/asm/errata_list.h
+++ b/arch/riscv/include/asm/errata_list.h
@@ -25,7 +25,8 @@
#ifdef CONFIG_ERRATA_THEAD
#define ERRATA_THEAD_MAE 0
#define ERRATA_THEAD_PMU 1
-#define ERRATA_THEAD_NUMBER 2
+#define ERRATA_THEAD_GHOSTWRITE 2
+#define ERRATA_THEAD_NUMBER 3
#endif
#ifdef __ASSEMBLY__
diff --git a/arch/riscv/include/asm/ftrace.h b/arch/riscv/include/asm/ftrace.h
index 3d66437a1029..c4721ce44ca4 100644
--- a/arch/riscv/include/asm/ftrace.h
+++ b/arch/riscv/include/asm/ftrace.h
@@ -168,6 +168,11 @@ static __always_inline unsigned long ftrace_regs_get_stack_pointer(const struct
return arch_ftrace_regs(fregs)->sp;
}
+static __always_inline unsigned long ftrace_regs_get_frame_pointer(const struct ftrace_regs *fregs)
+{
+ return arch_ftrace_regs(fregs)->s0;
+}
+
static __always_inline unsigned long ftrace_regs_get_argument(struct ftrace_regs *fregs,
unsigned int n)
{
@@ -181,6 +186,11 @@ static __always_inline unsigned long ftrace_regs_get_return_value(const struct f
return arch_ftrace_regs(fregs)->a0;
}
+static __always_inline unsigned long ftrace_regs_get_return_address(const struct ftrace_regs *fregs)
+{
+ return arch_ftrace_regs(fregs)->ra;
+}
+
static __always_inline void ftrace_regs_set_return_value(struct ftrace_regs *fregs,
unsigned long ret)
{
@@ -192,6 +202,20 @@ static __always_inline void ftrace_override_function_with_return(struct ftrace_r
arch_ftrace_regs(fregs)->epc = arch_ftrace_regs(fregs)->ra;
}
+static __always_inline struct pt_regs *
+ftrace_partial_regs(const struct ftrace_regs *fregs, struct pt_regs *regs)
+{
+ struct __arch_ftrace_regs *afregs = arch_ftrace_regs(fregs);
+
+ memcpy(&regs->a0, afregs->args, sizeof(afregs->args));
+ regs->epc = afregs->epc;
+ regs->ra = afregs->ra;
+ regs->sp = afregs->sp;
+ regs->s0 = afregs->s0;
+ regs->t1 = afregs->t1;
+ return regs;
+}
+
int ftrace_regs_query_register_offset(const char *name);
void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
@@ -208,25 +232,4 @@ static inline void arch_ftrace_set_direct_caller(struct ftrace_regs *fregs, unsi
#endif /* CONFIG_DYNAMIC_FTRACE */
-#ifndef __ASSEMBLY__
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-struct fgraph_ret_regs {
- unsigned long a1;
- unsigned long a0;
- unsigned long s0;
- unsigned long ra;
-};
-
-static inline unsigned long fgraph_ret_regs_return_value(struct fgraph_ret_regs *ret_regs)
-{
- return ret_regs->a0;
-}
-
-static inline unsigned long fgraph_ret_regs_frame_pointer(struct fgraph_ret_regs *ret_regs)
-{
- return ret_regs->s0;
-}
-#endif /* ifdef CONFIG_FUNCTION_GRAPH_TRACER */
-#endif
-
#endif /* _ASM_RISCV_FTRACE_H */
diff --git a/arch/riscv/include/asm/futex.h b/arch/riscv/include/asm/futex.h
index fc8130f995c1..72be100afa23 100644
--- a/arch/riscv/include/asm/futex.h
+++ b/arch/riscv/include/asm/futex.h
@@ -85,7 +85,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
__enable_user_access();
__asm__ __volatile__ (
- "1: lr.w.aqrl %[v],%[u] \n"
+ "1: lr.w %[v],%[u] \n"
" bne %[v],%z[ov],3f \n"
"2: sc.w.aqrl %[t],%z[nv],%[u] \n"
" bnez %[t],1b \n"
diff --git a/arch/riscv/include/asm/hwprobe.h b/arch/riscv/include/asm/hwprobe.h
index 1ce1df6d0ff3..dd624523981c 100644
--- a/arch/riscv/include/asm/hwprobe.h
+++ b/arch/riscv/include/asm/hwprobe.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
- * Copyright 2023 Rivos, Inc
+ * Copyright 2023-2024 Rivos, Inc
*/
#ifndef _ASM_HWPROBE_H
@@ -8,7 +8,7 @@
#include <uapi/asm/hwprobe.h>
-#define RISCV_HWPROBE_MAX_KEY 10
+#define RISCV_HWPROBE_MAX_KEY 11
static inline bool riscv_hwprobe_key_is_valid(__s64 key)
{
@@ -21,6 +21,7 @@ static inline bool hwprobe_key_is_bitmask(__s64 key)
case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
case RISCV_HWPROBE_KEY_IMA_EXT_0:
case RISCV_HWPROBE_KEY_CPUPERF_0:
+ case RISCV_HWPROBE_KEY_VENDOR_EXT_THEAD_0:
return true;
}
diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h
index 35eab6e0f4ae..cc33e35cd628 100644
--- a/arch/riscv/include/asm/kvm_host.h
+++ b/arch/riscv/include/asm/kvm_host.h
@@ -87,6 +87,11 @@ struct kvm_vcpu_stat {
u64 csr_exit_kernel;
u64 signal_exits;
u64 exits;
+ u64 instr_illegal_exits;
+ u64 load_misaligned_exits;
+ u64 store_misaligned_exits;
+ u64 load_access_exits;
+ u64 store_access_exits;
};
struct kvm_arch_memory_slot {
diff --git a/arch/riscv/include/asm/kvm_vcpu_sbi.h b/arch/riscv/include/asm/kvm_vcpu_sbi.h
index b96705258cf9..4ed6203cdd30 100644
--- a/arch/riscv/include/asm/kvm_vcpu_sbi.h
+++ b/arch/riscv/include/asm/kvm_vcpu_sbi.h
@@ -85,6 +85,7 @@ extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_rfence;
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_srst;
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_hsm;
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_dbcn;
+extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_susp;
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_sta;
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_experimental;
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_vendor;
diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
index 71aabc5c6713..125f5ecd9565 100644
--- a/arch/riscv/include/asm/page.h
+++ b/arch/riscv/include/asm/page.h
@@ -122,6 +122,7 @@ struct kernel_mapping {
extern struct kernel_mapping kernel_map;
extern phys_addr_t phys_ram_base;
+extern unsigned long vmemmap_start_pfn;
#define is_kernel_mapping(x) \
((x) >= kernel_map.virt_addr && (x) < (kernel_map.virt_addr + kernel_map.size))
diff --git a/arch/riscv/include/asm/pgalloc.h b/arch/riscv/include/asm/pgalloc.h
index f52264304f77..3e2aebea6312 100644
--- a/arch/riscv/include/asm/pgalloc.h
+++ b/arch/riscv/include/asm/pgalloc.h
@@ -12,16 +12,25 @@
#include <asm/tlb.h>
#ifdef CONFIG_MMU
-#define __HAVE_ARCH_PUD_ALLOC_ONE
#define __HAVE_ARCH_PUD_FREE
#include <asm-generic/pgalloc.h>
+/*
+ * While riscv platforms with riscv_ipi_for_rfence as true require an IPI to
+ * perform TLB shootdown, some platforms with riscv_ipi_for_rfence as false use
+ * SBI to perform TLB shootdown. To keep software pagetable walkers safe in this
+ * case we switch to RCU based table free (MMU_GATHER_RCU_TABLE_FREE). See the
+ * comment below 'ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE' in include/asm-generic/tlb.h
+ * for more details.
+ */
static inline void riscv_tlb_remove_ptdesc(struct mmu_gather *tlb, void *pt)
{
- if (riscv_use_sbi_for_rfence())
+ if (riscv_use_sbi_for_rfence()) {
tlb_remove_ptdesc(tlb, pt);
- else
+ } else {
+ pagetable_dtor(pt);
tlb_remove_page_ptdesc(tlb, pt);
+ }
}
static inline void pmd_populate_kernel(struct mm_struct *mm,
@@ -88,15 +97,6 @@ static inline void pgd_populate_safe(struct mm_struct *mm, pgd_t *pgd,
}
}
-#define pud_alloc_one pud_alloc_one
-static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
-{
- if (pgtable_l4_enabled)
- return __pud_alloc_one(mm, addr);
-
- return NULL;
-}
-
#define pud_free pud_free
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
{
@@ -107,39 +107,8 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud)
static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
unsigned long addr)
{
- if (pgtable_l4_enabled) {
- struct ptdesc *ptdesc = virt_to_ptdesc(pud);
-
- pagetable_pud_dtor(ptdesc);
- riscv_tlb_remove_ptdesc(tlb, ptdesc);
- }
-}
-
-#define p4d_alloc_one p4d_alloc_one
-static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long addr)
-{
- if (pgtable_l5_enabled) {
- gfp_t gfp = GFP_PGTABLE_USER;
-
- if (mm == &init_mm)
- gfp = GFP_PGTABLE_KERNEL;
- return (p4d_t *)get_zeroed_page(gfp);
- }
-
- return NULL;
-}
-
-static inline void __p4d_free(struct mm_struct *mm, p4d_t *p4d)
-{
- BUG_ON((unsigned long)p4d & (PAGE_SIZE-1));
- free_page((unsigned long)p4d);
-}
-
-#define p4d_free p4d_free
-static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
-{
- if (pgtable_l5_enabled)
- __p4d_free(mm, p4d);
+ if (pgtable_l4_enabled)
+ riscv_tlb_remove_ptdesc(tlb, virt_to_ptdesc(pud));
}
static inline void __p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
@@ -161,9 +130,8 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *pgd;
- pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
+ pgd = __pgd_alloc(mm, 0);
if (likely(pgd != NULL)) {
- memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
/* Copy kernel mappings */
sync_kernel_mappings(pgd);
}
@@ -175,10 +143,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
unsigned long addr)
{
- struct ptdesc *ptdesc = virt_to_ptdesc(pmd);
-
- pagetable_pmd_dtor(ptdesc);
- riscv_tlb_remove_ptdesc(tlb, ptdesc);
+ riscv_tlb_remove_ptdesc(tlb, virt_to_ptdesc(pmd));
}
#endif /* __PAGETABLE_PMD_FOLDED */
@@ -186,10 +151,7 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
unsigned long addr)
{
- struct ptdesc *ptdesc = page_ptdesc(pte);
-
- pagetable_pte_dtor(ptdesc);
- riscv_tlb_remove_ptdesc(tlb, ptdesc);
+ riscv_tlb_remove_ptdesc(tlb, page_ptdesc(pte));
}
#endif /* CONFIG_MMU */
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index d4e99eef90ac..050fdc49b5ad 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -87,7 +87,7 @@
* Define vmemmap for pfn_to_page & page_to_pfn calls. Needed if kernel
* is configured with CONFIG_SPARSEMEM_VMEMMAP enabled.
*/
-#define vmemmap ((struct page *)VMEMMAP_START - (phys_ram_base >> PAGE_SHIFT))
+#define vmemmap ((struct page *)VMEMMAP_START - vmemmap_start_pfn)
#define PCI_IO_SIZE SZ_16M
#define PCI_IO_END VMEMMAP_START
diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h
index 6c82318065cf..3d250824178b 100644
--- a/arch/riscv/include/asm/sbi.h
+++ b/arch/riscv/include/asm/sbi.h
@@ -159,6 +159,7 @@ struct riscv_pmu_snapshot_data {
};
#define RISCV_PMU_RAW_EVENT_MASK GENMASK_ULL(47, 0)
+#define RISCV_PMU_PLAT_FW_EVENT_MASK GENMASK_ULL(61, 0)
#define RISCV_PMU_RAW_EVENT_IDX 0x20000
#define RISCV_PLAT_FW_EVENT 0xFFFF
diff --git a/arch/riscv/include/asm/spinlock.h b/arch/riscv/include/asm/spinlock.h
index e5121b89acea..52f11bfd0079 100644
--- a/arch/riscv/include/asm/spinlock.h
+++ b/arch/riscv/include/asm/spinlock.h
@@ -3,8 +3,11 @@
#ifndef __ASM_RISCV_SPINLOCK_H
#define __ASM_RISCV_SPINLOCK_H
-#ifdef CONFIG_RISCV_COMBO_SPINLOCKS
+#ifdef CONFIG_QUEUED_SPINLOCKS
#define _Q_PENDING_LOOPS (1 << 9)
+#endif
+
+#ifdef CONFIG_RISCV_COMBO_SPINLOCKS
#define __no_arch_spinlock_redefine
#include <asm/ticket_spinlock.h>
diff --git a/arch/riscv/include/asm/switch_to.h b/arch/riscv/include/asm/switch_to.h
index 94e33216b2d9..0e71eb82f920 100644
--- a/arch/riscv/include/asm/switch_to.h
+++ b/arch/riscv/include/asm/switch_to.h
@@ -117,7 +117,7 @@ do { \
__set_prev_cpu(__prev->thread); \
if (has_fpu()) \
__switch_to_fpu(__prev, __next); \
- if (has_vector()) \
+ if (has_vector() || has_xtheadvector()) \
__switch_to_vector(__prev, __next); \
if (switch_to_should_flush_icache(__next)) \
local_flush_icache_all(); \
diff --git a/arch/riscv/include/asm/tlb.h b/arch/riscv/include/asm/tlb.h
index 1f6c38420d8e..50b63b5c15bd 100644
--- a/arch/riscv/include/asm/tlb.h
+++ b/arch/riscv/include/asm/tlb.h
@@ -10,24 +10,6 @@ struct mmu_gather;
static void tlb_flush(struct mmu_gather *tlb);
-#ifdef CONFIG_MMU
-#include <linux/swap.h>
-
-/*
- * While riscv platforms with riscv_ipi_for_rfence as true require an IPI to
- * perform TLB shootdown, some platforms with riscv_ipi_for_rfence as false use
- * SBI to perform TLB shootdown. To keep software pagetable walkers safe in this
- * case we switch to RCU based table free (MMU_GATHER_RCU_TABLE_FREE). See the
- * comment below 'ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE' in include/asm-generic/tlb.h
- * for more details.
- */
-static inline void __tlb_remove_table(void *table)
-{
- free_page_and_swap_cache(table);
-}
-
-#endif /* CONFIG_MMU */
-
#define tlb_flush tlb_flush
#include <asm-generic/tlb.h>
diff --git a/arch/riscv/include/asm/vector.h b/arch/riscv/include/asm/vector.h
index c7c023afbacd..e8a83f55be2b 100644
--- a/arch/riscv/include/asm/vector.h
+++ b/arch/riscv/include/asm/vector.h
@@ -18,6 +18,27 @@
#include <asm/cpufeature.h>
#include <asm/csr.h>
#include <asm/asm.h>
+#include <asm/vendorid_list.h>
+#include <asm/vendor_extensions.h>
+#include <asm/vendor_extensions/thead.h>
+
+#define __riscv_v_vstate_or(_val, TYPE) ({ \
+ typeof(_val) _res = _val; \
+ if (has_xtheadvector()) \
+ _res = (_res & ~SR_VS_THEAD) | SR_VS_##TYPE##_THEAD; \
+ else \
+ _res = (_res & ~SR_VS) | SR_VS_##TYPE; \
+ _res; \
+})
+
+#define __riscv_v_vstate_check(_val, TYPE) ({ \
+ bool _res; \
+ if (has_xtheadvector()) \
+ _res = ((_val) & SR_VS_THEAD) == SR_VS_##TYPE##_THEAD; \
+ else \
+ _res = ((_val) & SR_VS) == SR_VS_##TYPE; \
+ _res; \
+})
extern unsigned long riscv_v_vsize;
int riscv_v_setup_vsize(void);
@@ -41,39 +62,62 @@ static __always_inline bool has_vector(void)
return riscv_has_extension_unlikely(RISCV_ISA_EXT_ZVE32X);
}
+static __always_inline bool has_xtheadvector_no_alternatives(void)
+{
+ if (IS_ENABLED(CONFIG_RISCV_ISA_XTHEADVECTOR))
+ return riscv_isa_vendor_extension_available(THEAD_VENDOR_ID, XTHEADVECTOR);
+ else
+ return false;
+}
+
+static __always_inline bool has_xtheadvector(void)
+{
+ if (IS_ENABLED(CONFIG_RISCV_ISA_XTHEADVECTOR))
+ return riscv_has_vendor_extension_unlikely(THEAD_VENDOR_ID,
+ RISCV_ISA_VENDOR_EXT_XTHEADVECTOR);
+ else
+ return false;
+}
+
static inline void __riscv_v_vstate_clean(struct pt_regs *regs)
{
- regs->status = (regs->status & ~SR_VS) | SR_VS_CLEAN;
+ regs->status = __riscv_v_vstate_or(regs->status, CLEAN);
}
static inline void __riscv_v_vstate_dirty(struct pt_regs *regs)
{
- regs->status = (regs->status & ~SR_VS) | SR_VS_DIRTY;
+ regs->status = __riscv_v_vstate_or(regs->status, DIRTY);
}
static inline void riscv_v_vstate_off(struct pt_regs *regs)
{
- regs->status = (regs->status & ~SR_VS) | SR_VS_OFF;
+ regs->status = __riscv_v_vstate_or(regs->status, OFF);
}
static inline void riscv_v_vstate_on(struct pt_regs *regs)
{
- regs->status = (regs->status & ~SR_VS) | SR_VS_INITIAL;
+ regs->status = __riscv_v_vstate_or(regs->status, INITIAL);
}
static inline bool riscv_v_vstate_query(struct pt_regs *regs)
{
- return (regs->status & SR_VS) != 0;
+ return !__riscv_v_vstate_check(regs->status, OFF);
}
static __always_inline void riscv_v_enable(void)
{
- csr_set(CSR_SSTATUS, SR_VS);
+ if (has_xtheadvector())
+ csr_set(CSR_SSTATUS, SR_VS_THEAD);
+ else
+ csr_set(CSR_SSTATUS, SR_VS);
}
static __always_inline void riscv_v_disable(void)
{
- csr_clear(CSR_SSTATUS, SR_VS);
+ if (has_xtheadvector())
+ csr_clear(CSR_SSTATUS, SR_VS_THEAD);
+ else
+ csr_clear(CSR_SSTATUS, SR_VS);
}
static __always_inline void __vstate_csr_save(struct __riscv_v_ext_state *dest)
@@ -82,10 +126,36 @@ static __always_inline void __vstate_csr_save(struct __riscv_v_ext_state *dest)
"csrr %0, " __stringify(CSR_VSTART) "\n\t"
"csrr %1, " __stringify(CSR_VTYPE) "\n\t"
"csrr %2, " __stringify(CSR_VL) "\n\t"
- "csrr %3, " __stringify(CSR_VCSR) "\n\t"
- "csrr %4, " __stringify(CSR_VLENB) "\n\t"
: "=r" (dest->vstart), "=r" (dest->vtype), "=r" (dest->vl),
- "=r" (dest->vcsr), "=r" (dest->vlenb) : :);
+ "=r" (dest->vcsr) : :);
+
+ if (has_xtheadvector()) {
+ unsigned long status;
+
+ /*
+ * CSR_VCSR is defined as
+ * [2:1] - vxrm[1:0]
+ * [0] - vxsat
+ * The earlier vector spec implemented by T-Head uses separate
+ * registers for the same bit-elements, so just combine those
+ * into the existing output field.
+ *
+ * Additionally T-Head cores need FS to be enabled when accessing
+ * the VXRM and VXSAT CSRs, otherwise ending in illegal instructions.
+ * Though the cores do not implement the VXRM and VXSAT fields in the
+ * FCSR CSR that vector-0.7.1 specifies.
+ */
+ status = csr_read_set(CSR_STATUS, SR_FS_DIRTY);
+ dest->vcsr = csr_read(CSR_VXSAT) | csr_read(CSR_VXRM) << CSR_VXRM_SHIFT;
+
+ dest->vlenb = riscv_v_vsize / 32;
+
+ if ((status & SR_FS) != SR_FS_DIRTY)
+ csr_write(CSR_STATUS, status);
+ } else {
+ dest->vcsr = csr_read(CSR_VCSR);
+ dest->vlenb = csr_read(CSR_VLENB);
+ }
}
static __always_inline void __vstate_csr_restore(struct __riscv_v_ext_state *src)
@@ -96,9 +166,25 @@ static __always_inline void __vstate_csr_restore(struct __riscv_v_ext_state *src
"vsetvl x0, %2, %1\n\t"
".option pop\n\t"
"csrw " __stringify(CSR_VSTART) ", %0\n\t"
- "csrw " __stringify(CSR_VCSR) ", %3\n\t"
- : : "r" (src->vstart), "r" (src->vtype), "r" (src->vl),
- "r" (src->vcsr) :);
+ : : "r" (src->vstart), "r" (src->vtype), "r" (src->vl));
+
+ if (has_xtheadvector()) {
+ unsigned long status = csr_read(CSR_SSTATUS);
+
+ /*
+ * Similar to __vstate_csr_save above, restore values for the
+ * separate VXRM and VXSAT CSRs from the vcsr variable.
+ */
+ status = csr_read_set(CSR_STATUS, SR_FS_DIRTY);
+
+ csr_write(CSR_VXRM, (src->vcsr >> CSR_VXRM_SHIFT) & CSR_VXRM_MASK);
+ csr_write(CSR_VXSAT, src->vcsr & CSR_VXSAT_MASK);
+
+ if ((status & SR_FS) != SR_FS_DIRTY)
+ csr_write(CSR_STATUS, status);
+ } else {
+ csr_write(CSR_VCSR, src->vcsr);
+ }
}
static inline void __riscv_v_vstate_save(struct __riscv_v_ext_state *save_to,
@@ -108,19 +194,33 @@ static inline void __riscv_v_vstate_save(struct __riscv_v_ext_state *save_to,
riscv_v_enable();
__vstate_csr_save(save_to);
- asm volatile (
- ".option push\n\t"
- ".option arch, +zve32x\n\t"
- "vsetvli %0, x0, e8, m8, ta, ma\n\t"
- "vse8.v v0, (%1)\n\t"
- "add %1, %1, %0\n\t"
- "vse8.v v8, (%1)\n\t"
- "add %1, %1, %0\n\t"
- "vse8.v v16, (%1)\n\t"
- "add %1, %1, %0\n\t"
- "vse8.v v24, (%1)\n\t"
- ".option pop\n\t"
- : "=&r" (vl) : "r" (datap) : "memory");
+ if (has_xtheadvector()) {
+ asm volatile (
+ "mv t0, %0\n\t"
+ THEAD_VSETVLI_T4X0E8M8D1
+ THEAD_VSB_V_V0T0
+ "add t0, t0, t4\n\t"
+ THEAD_VSB_V_V0T0
+ "add t0, t0, t4\n\t"
+ THEAD_VSB_V_V0T0
+ "add t0, t0, t4\n\t"
+ THEAD_VSB_V_V0T0
+ : : "r" (datap) : "memory", "t0", "t4");
+ } else {
+ asm volatile (
+ ".option push\n\t"
+ ".option arch, +zve32x\n\t"
+ "vsetvli %0, x0, e8, m8, ta, ma\n\t"
+ "vse8.v v0, (%1)\n\t"
+ "add %1, %1, %0\n\t"
+ "vse8.v v8, (%1)\n\t"
+ "add %1, %1, %0\n\t"
+ "vse8.v v16, (%1)\n\t"
+ "add %1, %1, %0\n\t"
+ "vse8.v v24, (%1)\n\t"
+ ".option pop\n\t"
+ : "=&r" (vl) : "r" (datap) : "memory");
+ }
riscv_v_disable();
}
@@ -130,19 +230,33 @@ static inline void __riscv_v_vstate_restore(struct __riscv_v_ext_state *restore_
unsigned long vl;
riscv_v_enable();
- asm volatile (
- ".option push\n\t"
- ".option arch, +zve32x\n\t"
- "vsetvli %0, x0, e8, m8, ta, ma\n\t"
- "vle8.v v0, (%1)\n\t"
- "add %1, %1, %0\n\t"
- "vle8.v v8, (%1)\n\t"
- "add %1, %1, %0\n\t"
- "vle8.v v16, (%1)\n\t"
- "add %1, %1, %0\n\t"
- "vle8.v v24, (%1)\n\t"
- ".option pop\n\t"
- : "=&r" (vl) : "r" (datap) : "memory");
+ if (has_xtheadvector()) {
+ asm volatile (
+ "mv t0, %0\n\t"
+ THEAD_VSETVLI_T4X0E8M8D1
+ THEAD_VLB_V_V0T0
+ "add t0, t0, t4\n\t"
+ THEAD_VLB_V_V0T0
+ "add t0, t0, t4\n\t"
+ THEAD_VLB_V_V0T0
+ "add t0, t0, t4\n\t"
+ THEAD_VLB_V_V0T0
+ : : "r" (datap) : "memory", "t0", "t4");
+ } else {
+ asm volatile (
+ ".option push\n\t"
+ ".option arch, +zve32x\n\t"
+ "vsetvli %0, x0, e8, m8, ta, ma\n\t"
+ "vle8.v v0, (%1)\n\t"
+ "add %1, %1, %0\n\t"
+ "vle8.v v8, (%1)\n\t"
+ "add %1, %1, %0\n\t"
+ "vle8.v v16, (%1)\n\t"
+ "add %1, %1, %0\n\t"
+ "vle8.v v24, (%1)\n\t"
+ ".option pop\n\t"
+ : "=&r" (vl) : "r" (datap) : "memory");
+ }
__vstate_csr_restore(restore_from);
riscv_v_disable();
}
@@ -152,33 +266,41 @@ static inline void __riscv_v_vstate_discard(void)
unsigned long vl, vtype_inval = 1UL << (BITS_PER_LONG - 1);
riscv_v_enable();
+ if (has_xtheadvector())
+ asm volatile (THEAD_VSETVLI_T4X0E8M8D1 : : : "t4");
+ else
+ asm volatile (
+ ".option push\n\t"
+ ".option arch, +zve32x\n\t"
+ "vsetvli %0, x0, e8, m8, ta, ma\n\t"
+ ".option pop\n\t": "=&r" (vl));
+
asm volatile (
".option push\n\t"
".option arch, +zve32x\n\t"
- "vsetvli %0, x0, e8, m8, ta, ma\n\t"
"vmv.v.i v0, -1\n\t"
"vmv.v.i v8, -1\n\t"
"vmv.v.i v16, -1\n\t"
"vmv.v.i v24, -1\n\t"
"vsetvl %0, x0, %1\n\t"
".option pop\n\t"
- : "=&r" (vl) : "r" (vtype_inval) : "memory");
+ : "=&r" (vl) : "r" (vtype_inval));
+
riscv_v_disable();
}
static inline void riscv_v_vstate_discard(struct pt_regs *regs)
{
- if ((regs->status & SR_VS) == SR_VS_OFF)
- return;
-
- __riscv_v_vstate_discard();
- __riscv_v_vstate_dirty(regs);
+ if (riscv_v_vstate_query(regs)) {
+ __riscv_v_vstate_discard();
+ __riscv_v_vstate_dirty(regs);
+ }
}
static inline void riscv_v_vstate_save(struct __riscv_v_ext_state *vstate,
struct pt_regs *regs)
{
- if ((regs->status & SR_VS) == SR_VS_DIRTY) {
+ if (__riscv_v_vstate_check(regs->status, DIRTY)) {
__riscv_v_vstate_save(vstate, vstate->datap);
__riscv_v_vstate_clean(regs);
}
@@ -187,7 +309,7 @@ static inline void riscv_v_vstate_save(struct __riscv_v_ext_state *vstate,
static inline void riscv_v_vstate_restore(struct __riscv_v_ext_state *vstate,
struct pt_regs *regs)
{
- if ((regs->status & SR_VS) != SR_VS_OFF) {
+ if (riscv_v_vstate_query(regs)) {
__riscv_v_vstate_restore(vstate, vstate->datap);
__riscv_v_vstate_clean(regs);
}
@@ -196,7 +318,7 @@ static inline void riscv_v_vstate_restore(struct __riscv_v_ext_state *vstate,
static inline void riscv_v_vstate_set_restore(struct task_struct *task,
struct pt_regs *regs)
{
- if ((regs->status & SR_VS) != SR_VS_OFF) {
+ if (riscv_v_vstate_query(regs)) {
set_tsk_thread_flag(task, TIF_RISCV_V_DEFER_RESTORE);
riscv_v_vstate_on(regs);
}
@@ -270,6 +392,8 @@ struct pt_regs;
static inline int riscv_v_setup_vsize(void) { return -EOPNOTSUPP; }
static __always_inline bool has_vector(void) { return false; }
static __always_inline bool insn_is_vector(u32 insn_buf) { return false; }
+static __always_inline bool has_xtheadvector_no_alternatives(void) { return false; }
+static __always_inline bool has_xtheadvector(void) { return false; }
static inline bool riscv_v_first_use_handler(struct pt_regs *regs) { return false; }
static inline bool riscv_v_vstate_query(struct pt_regs *regs) { return false; }
static inline bool riscv_v_vstate_ctrl_user_allowed(void) { return false; }
diff --git a/arch/riscv/include/asm/vendor_extensions/thead.h b/arch/riscv/include/asm/vendor_extensions/thead.h
new file mode 100644
index 000000000000..e85c75b3b340
--- /dev/null
+++ b/arch/riscv/include/asm/vendor_extensions/thead.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_RISCV_VENDOR_EXTENSIONS_THEAD_H
+#define _ASM_RISCV_VENDOR_EXTENSIONS_THEAD_H
+
+#include <asm/vendor_extensions.h>
+
+#include <linux/types.h>
+
+/*
+ * Extension keys must be strictly less than RISCV_ISA_VENDOR_EXT_MAX.
+ */
+#define RISCV_ISA_VENDOR_EXT_XTHEADVECTOR 0
+
+extern struct riscv_isa_vendor_ext_data_list riscv_isa_vendor_ext_list_thead;
+
+#ifdef CONFIG_RISCV_ISA_VENDOR_EXT_THEAD
+void disable_xtheadvector(void);
+#else
+static inline void disable_xtheadvector(void) { }
+#endif
+
+/* Extension specific helpers */
+
+/*
+ * Vector 0.7.1 as used for example on T-Head Xuantie cores, uses an older
+ * encoding for vsetvli (ta, ma vs. d1), so provide an instruction for
+ * vsetvli t4, x0, e8, m8, d1
+ */
+#define THEAD_VSETVLI_T4X0E8M8D1 ".long 0x00307ed7\n\t"
+
+/*
+ * While in theory, the vector-0.7.1 vsb.v and vlb.v result in the same
+ * encoding as the standard vse8.v and vle8.v, compilers seem to optimize
+ * the call resulting in a different encoding and then using a value for
+ * the "mop" field that is not part of vector-0.7.1
+ * So encode specific variants for vstate_save and _restore.
+ */
+#define THEAD_VSB_V_V0T0 ".long 0x02028027\n\t"
+#define THEAD_VSB_V_V8T0 ".long 0x02028427\n\t"
+#define THEAD_VSB_V_V16T0 ".long 0x02028827\n\t"
+#define THEAD_VSB_V_V24T0 ".long 0x02028c27\n\t"
+#define THEAD_VLB_V_V0T0 ".long 0x012028007\n\t"
+#define THEAD_VLB_V_V8T0 ".long 0x012028407\n\t"
+#define THEAD_VLB_V_V16T0 ".long 0x012028807\n\t"
+#define THEAD_VLB_V_V24T0 ".long 0x012028c07\n\t"
+
+#endif
diff --git a/arch/riscv/include/asm/vendor_extensions/thead_hwprobe.h b/arch/riscv/include/asm/vendor_extensions/thead_hwprobe.h
new file mode 100644
index 000000000000..65a9c5612466
--- /dev/null
+++ b/arch/riscv/include/asm/vendor_extensions/thead_hwprobe.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_RISCV_VENDOR_EXTENSIONS_THEAD_HWPROBE_H
+#define _ASM_RISCV_VENDOR_EXTENSIONS_THEAD_HWPROBE_H
+
+#include <linux/cpumask.h>
+
+#include <uapi/asm/hwprobe.h>
+
+#ifdef CONFIG_RISCV_ISA_VENDOR_EXT_THEAD
+void hwprobe_isa_vendor_ext_thead_0(struct riscv_hwprobe *pair, const struct cpumask *cpus);
+#else
+static inline void hwprobe_isa_vendor_ext_thead_0(struct riscv_hwprobe *pair,
+ const struct cpumask *cpus)
+{
+ pair->value = 0;
+}
+#endif
+
+#endif
diff --git a/arch/riscv/include/asm/vendor_extensions/vendor_hwprobe.h b/arch/riscv/include/asm/vendor_extensions/vendor_hwprobe.h
new file mode 100644
index 000000000000..6b9293e984a9
--- /dev/null
+++ b/arch/riscv/include/asm/vendor_extensions/vendor_hwprobe.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2024 Rivos, Inc
+ */
+
+#ifndef _ASM_RISCV_SYS_HWPROBE_H
+#define _ASM_RISCV_SYS_HWPROBE_H
+
+#include <asm/cpufeature.h>
+
+#define VENDOR_EXT_KEY(ext) \
+ do { \
+ if (__riscv_isa_extension_available(isainfo->isa, RISCV_ISA_VENDOR_EXT_##ext)) \
+ pair->value |= RISCV_HWPROBE_VENDOR_EXT_##ext; \
+ else \
+ missing |= RISCV_HWPROBE_VENDOR_EXT_##ext; \
+ } while (false)
+
+/*
+ * Loop through and record extensions that 1) anyone has, and 2) anyone
+ * doesn't have.
+ *
+ * _extension_checks is an arbitrary C block to set the values of pair->value
+ * and missing. It should be filled with VENDOR_EXT_KEY expressions.
+ */
+#define VENDOR_EXTENSION_SUPPORTED(pair, cpus, per_hart_vendor_bitmap, _extension_checks) \
+ do { \
+ int cpu; \
+ u64 missing = 0; \
+ for_each_cpu(cpu, (cpus)) { \
+ struct riscv_isavendorinfo *isainfo = &(per_hart_vendor_bitmap)[cpu]; \
+ _extension_checks \
+ } \
+ (pair)->value &= ~missing; \
+ } while (false) \
+
+#endif /* _ASM_RISCV_SYS_HWPROBE_H */
diff --git a/arch/riscv/include/asm/vendorid_list.h b/arch/riscv/include/asm/vendorid_list.h
index 2f2bb0c84f9a..a5150cdf34d8 100644
--- a/arch/riscv/include/asm/vendorid_list.h
+++ b/arch/riscv/include/asm/vendorid_list.h
@@ -6,6 +6,7 @@
#define ASM_VENDOR_LIST_H
#define ANDES_VENDOR_ID 0x31e
+#define MICROCHIP_VENDOR_ID 0x029
#define SIFIVE_VENDOR_ID 0x489
#define THEAD_VENDOR_ID 0x5b7
diff --git a/arch/riscv/include/uapi/asm/hwprobe.h b/arch/riscv/include/uapi/asm/hwprobe.h
index 3af142b99f77..c3c1cc951cb9 100644
--- a/arch/riscv/include/uapi/asm/hwprobe.h
+++ b/arch/riscv/include/uapi/asm/hwprobe.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
- * Copyright 2023 Rivos, Inc
+ * Copyright 2023-2024 Rivos, Inc
*/
#ifndef _UAPI_ASM_HWPROBE_H
@@ -94,6 +94,7 @@ struct riscv_hwprobe {
#define RISCV_HWPROBE_MISALIGNED_VECTOR_SLOW 2
#define RISCV_HWPROBE_MISALIGNED_VECTOR_FAST 3
#define RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED 4
+#define RISCV_HWPROBE_KEY_VENDOR_EXT_THEAD_0 11
/* Increase RISCV_HWPROBE_MAX_KEY when adding items. */
/* Flags */
diff --git a/arch/riscv/include/uapi/asm/kvm.h b/arch/riscv/include/uapi/asm/kvm.h
index 3482c9a73d1b..f06bc5efcd79 100644
--- a/arch/riscv/include/uapi/asm/kvm.h
+++ b/arch/riscv/include/uapi/asm/kvm.h
@@ -179,6 +179,9 @@ enum KVM_RISCV_ISA_EXT_ID {
KVM_RISCV_ISA_EXT_SSNPM,
KVM_RISCV_ISA_EXT_SVADE,
KVM_RISCV_ISA_EXT_SVADU,
+ KVM_RISCV_ISA_EXT_SVVPTC,
+ KVM_RISCV_ISA_EXT_ZABHA,
+ KVM_RISCV_ISA_EXT_ZICCRSE,
KVM_RISCV_ISA_EXT_MAX,
};
@@ -198,6 +201,7 @@ enum KVM_RISCV_SBI_EXT_ID {
KVM_RISCV_SBI_EXT_VENDOR,
KVM_RISCV_SBI_EXT_DBCN,
KVM_RISCV_SBI_EXT_STA,
+ KVM_RISCV_SBI_EXT_SUSP,
KVM_RISCV_SBI_EXT_MAX,
};
@@ -211,9 +215,6 @@ struct kvm_riscv_sbi_sta {
#define KVM_RISCV_TIMER_STATE_OFF 0
#define KVM_RISCV_TIMER_STATE_ON 1
-#define KVM_REG_SIZE(id) \
- (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
-
/* If you need to interpret the index values, here is the key: */
#define KVM_REG_RISCV_TYPE_MASK 0x00000000FF000000
#define KVM_REG_RISCV_TYPE_SHIFT 24
diff --git a/arch/riscv/include/uapi/asm/vendor/thead.h b/arch/riscv/include/uapi/asm/vendor/thead.h
new file mode 100644
index 000000000000..43790ebe5faf
--- /dev/null
+++ b/arch/riscv/include/uapi/asm/vendor/thead.h
@@ -0,0 +1,3 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+
+#define RISCV_HWPROBE_VENDOR_EXT_XTHEADVECTOR (1 << 0)
diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
index 063d1faf5a53..8d186bfced45 100644
--- a/arch/riscv/kernel/Makefile
+++ b/arch/riscv/kernel/Makefile
@@ -123,3 +123,5 @@ obj-$(CONFIG_COMPAT) += compat_vdso/
obj-$(CONFIG_64BIT) += pi/
obj-$(CONFIG_ACPI) += acpi.o
obj-$(CONFIG_ACPI_NUMA) += acpi_numa.o
+
+obj-$(CONFIG_GENERIC_CPU_VULNERABILITIES) += bugs.o
diff --git a/arch/riscv/kernel/bugs.c b/arch/riscv/kernel/bugs.c
new file mode 100644
index 000000000000..3655fe7d678c
--- /dev/null
+++ b/arch/riscv/kernel/bugs.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024 Rivos Inc.
+ */
+
+#include <linux/cpu.h>
+#include <linux/device.h>
+#include <linux/sprintf.h>
+
+#include <asm/bugs.h>
+#include <asm/vendor_extensions/thead.h>
+
+static enum mitigation_state ghostwrite_state;
+
+void ghostwrite_set_vulnerable(void)
+{
+ ghostwrite_state = VULNERABLE;
+}
+
+/*
+ * Vendor extension alternatives will use the value set at the time of boot
+ * alternative patching, thus this must be called before boot alternatives are
+ * patched (and after extension probing) to be effective.
+ *
+ * Returns true if mitgated, false otherwise.
+ */
+bool ghostwrite_enable_mitigation(void)
+{
+ if (IS_ENABLED(CONFIG_RISCV_ISA_XTHEADVECTOR) &&
+ ghostwrite_state == VULNERABLE && !cpu_mitigations_off()) {
+ disable_xtheadvector();
+ ghostwrite_state = MITIGATED;
+ return true;
+ }
+
+ return false;
+}
+
+enum mitigation_state ghostwrite_get_state(void)
+{
+ return ghostwrite_state;
+}
+
+ssize_t cpu_show_ghostwrite(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ if (IS_ENABLED(CONFIG_RISCV_ISA_XTHEADVECTOR)) {
+ switch (ghostwrite_state) {
+ case UNAFFECTED:
+ return sprintf(buf, "Not affected\n");
+ case MITIGATED:
+ return sprintf(buf, "Mitigation: xtheadvector disabled\n");
+ case VULNERABLE:
+ fallthrough;
+ default:
+ return sprintf(buf, "Vulnerable\n");
+ }
+ } else {
+ return sprintf(buf, "Not affected\n");
+ }
+}
diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c
index c0916ed318c2..c6ba750536c3 100644
--- a/arch/riscv/kernel/cpufeature.c
+++ b/arch/riscv/kernel/cpufeature.c
@@ -17,6 +17,7 @@
#include <linux/of.h>
#include <asm/acpi.h>
#include <asm/alternative.h>
+#include <asm/bugs.h>
#include <asm/cacheflush.h>
#include <asm/cpufeature.h>
#include <asm/hwcap.h>
@@ -26,6 +27,7 @@
#include <asm/sbi.h>
#include <asm/vector.h>
#include <asm/vendor_extensions.h>
+#include <asm/vendor_extensions/thead.h>
#define NUM_ALPHA_EXTS ('z' - 'a' + 1)
@@ -39,6 +41,8 @@ static DECLARE_BITMAP(riscv_isa, RISCV_ISA_EXT_MAX) __read_mostly;
/* Per-cpu ISA extensions. */
struct riscv_isainfo hart_isa[NR_CPUS];
+u32 thead_vlenb_of;
+
/**
* riscv_isa_extension_base() - Get base extension word
*
@@ -791,9 +795,50 @@ static void __init riscv_fill_vendor_ext_list(int cpu)
}
}
+static int has_thead_homogeneous_vlenb(void)
+{
+ int cpu;
+ u32 prev_vlenb = 0;
+ u32 vlenb;
+
+ /* Ignore thead,vlenb property if xtheavector is not enabled in the kernel */
+ if (!IS_ENABLED(CONFIG_RISCV_ISA_XTHEADVECTOR))
+ return 0;
+
+ for_each_possible_cpu(cpu) {
+ struct device_node *cpu_node;
+
+ cpu_node = of_cpu_device_node_get(cpu);
+ if (!cpu_node) {
+ pr_warn("Unable to find cpu node\n");
+ return -ENOENT;
+ }
+
+ if (of_property_read_u32(cpu_node, "thead,vlenb", &vlenb)) {
+ of_node_put(cpu_node);
+
+ if (prev_vlenb)
+ return -ENOENT;
+ continue;
+ }
+
+ if (prev_vlenb && vlenb != prev_vlenb) {
+ of_node_put(cpu_node);
+ return -ENOENT;
+ }
+
+ prev_vlenb = vlenb;
+ of_node_put(cpu_node);
+ }
+
+ thead_vlenb_of = vlenb;
+ return 0;
+}
+
static int __init riscv_fill_hwcap_from_ext_list(unsigned long *isa2hwcap)
{
unsigned int cpu;
+ bool mitigated;
for_each_possible_cpu(cpu) {
unsigned long this_hwcap = 0;
@@ -844,6 +889,17 @@ static int __init riscv_fill_hwcap_from_ext_list(unsigned long *isa2hwcap)
riscv_fill_vendor_ext_list(cpu);
}
+ /*
+ * Execute ghostwrite mitigation immediately after detecting extensions
+ * to disable xtheadvector if necessary.
+ */
+ mitigated = ghostwrite_enable_mitigation();
+
+ if (!mitigated && has_xtheadvector_no_alternatives() && has_thead_homogeneous_vlenb() < 0) {
+ pr_warn("Unsupported heterogeneous vlenb detected, vector extension disabled.\n");
+ disable_xtheadvector();
+ }
+
if (bitmap_empty(riscv_isa, RISCV_ISA_EXT_MAX))
return -ENOENT;
@@ -896,7 +952,8 @@ void __init riscv_fill_hwcap(void)
elf_hwcap &= ~COMPAT_HWCAP_ISA_F;
}
- if (__riscv_isa_extension_available(NULL, RISCV_ISA_EXT_ZVE32X)) {
+ if (__riscv_isa_extension_available(NULL, RISCV_ISA_EXT_ZVE32X) ||
+ has_xtheadvector_no_alternatives()) {
/*
* This cannot fail when called on the boot hart
*/
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index c200d329d4bd..33a5a9f2a0d4 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -23,21 +23,21 @@
REG_S a0, TASK_TI_A0(tp)
csrr a0, CSR_CAUSE
/* Exclude IRQs */
- blt a0, zero, _new_vmalloc_restore_context_a0
+ blt a0, zero, .Lnew_vmalloc_restore_context_a0
REG_S a1, TASK_TI_A1(tp)
/* Only check new_vmalloc if we are in page/protection fault */
li a1, EXC_LOAD_PAGE_FAULT
- beq a0, a1, _new_vmalloc_kernel_address
+ beq a0, a1, .Lnew_vmalloc_kernel_address
li a1, EXC_STORE_PAGE_FAULT
- beq a0, a1, _new_vmalloc_kernel_address
+ beq a0, a1, .Lnew_vmalloc_kernel_address
li a1, EXC_INST_PAGE_FAULT
- bne a0, a1, _new_vmalloc_restore_context_a1
+ bne a0, a1, .Lnew_vmalloc_restore_context_a1
-_new_vmalloc_kernel_address:
+.Lnew_vmalloc_kernel_address:
/* Is it a kernel address? */
csrr a0, CSR_TVAL
- bge a0, zero, _new_vmalloc_restore_context_a1
+ bge a0, zero, .Lnew_vmalloc_restore_context_a1
/* Check if a new vmalloc mapping appeared that could explain the trap */
REG_S a2, TASK_TI_A2(tp)
@@ -69,7 +69,7 @@ _new_vmalloc_kernel_address:
/* Check the value of new_vmalloc for this cpu */
REG_L a2, 0(a0)
and a2, a2, a1
- beq a2, zero, _new_vmalloc_restore_context
+ beq a2, zero, .Lnew_vmalloc_restore_context
/* Atomically reset the current cpu bit in new_vmalloc */
amoxor.d a0, a1, (a0)
@@ -83,11 +83,11 @@ _new_vmalloc_kernel_address:
csrw CSR_SCRATCH, x0
sret
-_new_vmalloc_restore_context:
+.Lnew_vmalloc_restore_context:
REG_L a2, TASK_TI_A2(tp)
-_new_vmalloc_restore_context_a1:
+.Lnew_vmalloc_restore_context_a1:
REG_L a1, TASK_TI_A1(tp)
-_new_vmalloc_restore_context_a0:
+.Lnew_vmalloc_restore_context_a0:
REG_L a0, TASK_TI_A0(tp)
.endm
@@ -278,6 +278,7 @@ SYM_CODE_START_NOALIGN(ret_from_exception)
#else
sret
#endif
+SYM_INNER_LABEL(ret_from_exception_end, SYM_L_GLOBAL)
SYM_CODE_END(ret_from_exception)
ASM_NOKPROBE(ret_from_exception)
diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c
index 8cb9b211611d..3524db5e4fa0 100644
--- a/arch/riscv/kernel/ftrace.c
+++ b/arch/riscv/kernel/ftrace.c
@@ -214,7 +214,22 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct ftrace_regs *fregs)
{
- prepare_ftrace_return(&arch_ftrace_regs(fregs)->ra, ip, arch_ftrace_regs(fregs)->s0);
+ unsigned long return_hooker = (unsigned long)&return_to_handler;
+ unsigned long frame_pointer = arch_ftrace_regs(fregs)->s0;
+ unsigned long *parent = &arch_ftrace_regs(fregs)->ra;
+ unsigned long old;
+
+ if (unlikely(atomic_read(&current->tracing_graph_pause)))
+ return;
+
+ /*
+ * We don't suffer access faults, so no extra fault-recovery assembly
+ * is needed here.
+ */
+ old = *parent;
+
+ if (!function_graph_enter_regs(old, ip, frame_pointer, parent, fregs))
+ *parent = return_hooker;
}
#else /* CONFIG_DYNAMIC_FTRACE_WITH_ARGS */
extern void ftrace_graph_call(void);
diff --git a/arch/riscv/kernel/kernel_mode_vector.c b/arch/riscv/kernel/kernel_mode_vector.c
index 6afe80c7f03a..99972a48e86b 100644
--- a/arch/riscv/kernel/kernel_mode_vector.c
+++ b/arch/riscv/kernel/kernel_mode_vector.c
@@ -143,7 +143,7 @@ static int riscv_v_start_kernel_context(bool *is_nested)
/* Transfer the ownership of V from user to kernel, then save */
riscv_v_start(RISCV_PREEMPT_V | RISCV_PREEMPT_V_DIRTY);
- if ((task_pt_regs(current)->status & SR_VS) == SR_VS_DIRTY) {
+ if (__riscv_v_vstate_check(task_pt_regs(current)->status, DIRTY)) {
uvstate = &current->thread.vstate;
__riscv_v_vstate_save(uvstate, uvstate->datap);
}
@@ -160,7 +160,7 @@ asmlinkage void riscv_v_context_nesting_start(struct pt_regs *regs)
return;
depth = riscv_v_ctx_get_depth();
- if (depth == 0 && (regs->status & SR_VS) == SR_VS_DIRTY)
+ if (depth == 0 && __riscv_v_vstate_check(regs->status, DIRTY))
riscv_preempt_v_set_dirty();
riscv_v_ctx_depth_inc();
@@ -208,7 +208,7 @@ void kernel_vector_begin(void)
{
bool nested = false;
- if (WARN_ON(!has_vector()))
+ if (WARN_ON(!(has_vector() || has_xtheadvector())))
return;
BUG_ON(!may_use_simd());
@@ -236,7 +236,7 @@ EXPORT_SYMBOL_GPL(kernel_vector_begin);
*/
void kernel_vector_end(void)
{
- if (WARN_ON(!has_vector()))
+ if (WARN_ON(!(has_vector() || has_xtheadvector())))
return;
riscv_v_disable();
diff --git a/arch/riscv/kernel/machine_kexec.c b/arch/riscv/kernel/machine_kexec.c
index 3c830a6f7ef4..2306ce3e5f22 100644
--- a/arch/riscv/kernel/machine_kexec.c
+++ b/arch/riscv/kernel/machine_kexec.c
@@ -114,29 +114,6 @@ void machine_shutdown(void)
#endif
}
-static void machine_kexec_mask_interrupts(void)
-{
- unsigned int i;
- struct irq_desc *desc;
-
- for_each_irq_desc(i, desc) {
- struct irq_chip *chip;
-
- chip = irq_desc_get_chip(desc);
- if (!chip)
- continue;
-
- if (chip->irq_eoi && irqd_irq_inprogress(&desc->irq_data))
- chip->irq_eoi(&desc->irq_data);
-
- if (chip->irq_mask)
- chip->irq_mask(&desc->irq_data);
-
- if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data))
- chip->irq_disable(&desc->irq_data);
- }
-}
-
/*
* machine_crash_shutdown - Prepare to kexec after a kernel crash
*
diff --git a/arch/riscv/kernel/mcount.S b/arch/riscv/kernel/mcount.S
index 3a42f6287909..068168046e0e 100644
--- a/arch/riscv/kernel/mcount.S
+++ b/arch/riscv/kernel/mcount.S
@@ -12,6 +12,8 @@
#include <asm/asm-offsets.h>
#include <asm/ftrace.h>
+#define ABI_SIZE_ON_STACK 80
+
.text
.macro SAVE_ABI_STATE
@@ -26,12 +28,12 @@
* register if a0 was not saved.
*/
.macro SAVE_RET_ABI_STATE
- addi sp, sp, -4*SZREG
- REG_S s0, 2*SZREG(sp)
- REG_S ra, 3*SZREG(sp)
- REG_S a0, 1*SZREG(sp)
- REG_S a1, 0*SZREG(sp)
- addi s0, sp, 4*SZREG
+ addi sp, sp, -ABI_SIZE_ON_STACK
+ REG_S ra, 1*SZREG(sp)
+ REG_S s0, 8*SZREG(sp)
+ REG_S a0, 10*SZREG(sp)
+ REG_S a1, 11*SZREG(sp)
+ addi s0, sp, ABI_SIZE_ON_STACK
.endm
.macro RESTORE_ABI_STATE
@@ -41,11 +43,11 @@
.endm
.macro RESTORE_RET_ABI_STATE
- REG_L ra, 3*SZREG(sp)
- REG_L s0, 2*SZREG(sp)
- REG_L a0, 1*SZREG(sp)
- REG_L a1, 0*SZREG(sp)
- addi sp, sp, 4*SZREG
+ REG_L ra, 1*SZREG(sp)
+ REG_L s0, 8*SZREG(sp)
+ REG_L a0, 10*SZREG(sp)
+ REG_L a1, 11*SZREG(sp)
+ addi sp, sp, ABI_SIZE_ON_STACK
.endm
SYM_TYPED_FUNC_START(ftrace_stub)
diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c
index 1cd461f3d872..47d0ebeec93c 100644
--- a/arch/riscv/kernel/module.c
+++ b/arch/riscv/kernel/module.c
@@ -23,7 +23,7 @@ struct used_bucket {
struct relocation_head {
struct hlist_node node;
- struct list_head *rel_entry;
+ struct list_head rel_entry;
void *location;
};
@@ -634,7 +634,7 @@ process_accumulated_relocations(struct module *me,
location = rel_head_iter->location;
list_for_each_entry_safe(rel_entry_iter,
rel_entry_iter_tmp,
- rel_head_iter->rel_entry,
+ &rel_head_iter->rel_entry,
head) {
curr_type = rel_entry_iter->type;
reloc_handlers[curr_type].reloc_handler(
@@ -704,16 +704,7 @@ static int add_relocation_to_accumulate(struct module *me, int type,
return -ENOMEM;
}
- rel_head->rel_entry =
- kmalloc(sizeof(struct list_head), GFP_KERNEL);
-
- if (!rel_head->rel_entry) {
- kfree(entry);
- kfree(rel_head);
- return -ENOMEM;
- }
-
- INIT_LIST_HEAD(rel_head->rel_entry);
+ INIT_LIST_HEAD(&rel_head->rel_entry);
rel_head->location = location;
INIT_HLIST_NODE(&rel_head->node);
if (!current_head->first) {
@@ -722,7 +713,6 @@ static int add_relocation_to_accumulate(struct module *me, int type,
if (!bucket) {
kfree(entry);
- kfree(rel_head->rel_entry);
kfree(rel_head);
return -ENOMEM;
}
@@ -735,7 +725,7 @@ static int add_relocation_to_accumulate(struct module *me, int type,
}
/* Add relocation to head of discovered rel_head */
- list_add_tail(&entry->head, rel_head->rel_entry);
+ list_add_tail(&entry->head, &rel_head->rel_entry);
return 0;
}
diff --git a/arch/riscv/kernel/probes/kprobes.c b/arch/riscv/kernel/probes/kprobes.c
index 380a0e8cecc0..c0738d6c6498 100644
--- a/arch/riscv/kernel/probes/kprobes.c
+++ b/arch/riscv/kernel/probes/kprobes.c
@@ -30,7 +30,7 @@ static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
p->ainsn.api.restore = (unsigned long)p->addr + len;
patch_text_nosync(p->ainsn.api.insn, &p->opcode, len);
- patch_text_nosync(p->ainsn.api.insn + len, &insn, GET_INSN_LENGTH(insn));
+ patch_text_nosync((void *)p->ainsn.api.insn + len, &insn, GET_INSN_LENGTH(insn));
}
static void __kprobes arch_prepare_simulate(struct kprobe *p)
diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c
index 58b6482c2bf6..7c244de77180 100644
--- a/arch/riscv/kernel/process.c
+++ b/arch/riscv/kernel/process.c
@@ -190,7 +190,7 @@ void flush_thread(void)
void arch_release_task_struct(struct task_struct *tsk)
{
/* Free the vector context of datap. */
- if (has_vector())
+ if (has_vector() || has_xtheadvector())
riscv_v_thread_free(tsk);
}
@@ -240,7 +240,7 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
p->thread.s[0] = 0;
}
p->thread.riscv_v_flags = 0;
- if (has_vector())
+ if (has_vector() || has_xtheadvector())
riscv_v_thread_alloc(p);
p->thread.ra = (unsigned long)ret_from_fork;
p->thread.sp = (unsigned long)childregs; /* kernel sp */
@@ -364,7 +364,7 @@ static bool try_to_set_pmm(unsigned long value)
* disable it for tasks that already opted in to the relaxed ABI.
*/
-static struct ctl_table tagged_addr_sysctl_table[] = {
+static const struct ctl_table tagged_addr_sysctl_table[] = {
{
.procname = "tagged_addr_disabled",
.mode = 0644,
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index 45010e71df86..f1793630fc51 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -147,9 +147,7 @@ static void __init init_resources(void)
res_idx = num_resources - 1;
mem_res_sz = num_resources * sizeof(*mem_res);
- mem_res = memblock_alloc(mem_res_sz, SMP_CACHE_BYTES);
- if (!mem_res)
- panic("%s: Failed to allocate %zu bytes\n", __func__, mem_res_sz);
+ mem_res = memblock_alloc_or_panic(mem_res_sz, SMP_CACHE_BYTES);
/*
* Start by adding the reserved regions, if they overlap
diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c
index dcd282419456..94e905eea1de 100644
--- a/arch/riscv/kernel/signal.c
+++ b/arch/riscv/kernel/signal.c
@@ -189,7 +189,7 @@ static long restore_sigcontext(struct pt_regs *regs,
return 0;
case RISCV_V_MAGIC:
- if (!has_vector() || !riscv_v_vstate_query(regs) ||
+ if (!(has_vector() || has_xtheadvector()) || !riscv_v_vstate_query(regs) ||
size != riscv_v_sc_size)
return -EINVAL;
@@ -211,7 +211,7 @@ static size_t get_rt_frame_size(bool cal_all)
frame_size = sizeof(*frame);
- if (has_vector()) {
+ if (has_vector() || has_xtheadvector()) {
if (cal_all || riscv_v_vstate_query(task_pt_regs(current)))
total_context_size += riscv_v_sc_size;
}
@@ -284,7 +284,7 @@ static long setup_sigcontext(struct rt_sigframe __user *frame,
if (has_fpu())
err |= save_fp_state(regs, &sc->sc_fpregs);
/* Save the vector state. */
- if (has_vector() && riscv_v_vstate_query(regs))
+ if ((has_vector() || has_xtheadvector()) && riscv_v_vstate_query(regs))
err |= save_v_state(regs, (void __user **)&sc_ext_ptr);
/* Write zero to fp-reserved space and check it on restore_sigcontext */
err |= __put_user(0, &sc->sc_extdesc.reserved);
diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c
index c180a647a30e..d58b5e751286 100644
--- a/arch/riscv/kernel/smp.c
+++ b/arch/riscv/kernel/smp.c
@@ -43,6 +43,7 @@ enum ipi_message_type {
unsigned long __cpuid_to_hartid_map[NR_CPUS] __ro_after_init = {
[0 ... NR_CPUS-1] = INVALID_HARTID
};
+EXPORT_SYMBOL_GPL(__cpuid_to_hartid_map);
void __init smp_setup_processor_id(void)
{
diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c
index 153a2db4c5fa..d4355c770c36 100644
--- a/arch/riscv/kernel/stacktrace.c
+++ b/arch/riscv/kernel/stacktrace.c
@@ -17,6 +17,7 @@
#ifdef CONFIG_FRAME_POINTER
extern asmlinkage void handle_exception(void);
+extern unsigned long ret_from_exception_end;
static inline int fp_is_valid(unsigned long fp, unsigned long sp)
{
@@ -71,7 +72,8 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
fp = frame->fp;
pc = ftrace_graph_ret_addr(current, &graph_idx, frame->ra,
&frame->ra);
- if (pc == (unsigned long)handle_exception) {
+ if (pc >= (unsigned long)handle_exception &&
+ pc < (unsigned long)&ret_from_exception_end) {
if (unlikely(!__kernel_text_address(pc) || !fn(arg, pc)))
break;
diff --git a/arch/riscv/kernel/sys_hwprobe.c b/arch/riscv/kernel/sys_hwprobe.c
index cb93adfffc48..bcd3b816306c 100644
--- a/arch/riscv/kernel/sys_hwprobe.c
+++ b/arch/riscv/kernel/sys_hwprobe.c
@@ -15,6 +15,7 @@
#include <asm/uaccess.h>
#include <asm/unistd.h>
#include <asm/vector.h>
+#include <asm/vendor_extensions/thead_hwprobe.h>
#include <vdso/vsyscall.h>
@@ -286,6 +287,10 @@ static void hwprobe_one_pair(struct riscv_hwprobe *pair,
pair->value = riscv_timebase;
break;
+ case RISCV_HWPROBE_KEY_VENDOR_EXT_THEAD_0:
+ hwprobe_isa_vendor_ext_thead_0(pair, cpus);
+ break;
+
/*
* For forward compatibility, unknown keys don't fail the whole
* call, but get their element key set to -1 and value set to 0
diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
index 51ebfd23e007..8ff8e8b36524 100644
--- a/arch/riscv/kernel/traps.c
+++ b/arch/riscv/kernel/traps.c
@@ -35,7 +35,7 @@
int show_unhandled_signals = 1;
-static DEFINE_SPINLOCK(die_lock);
+static DEFINE_RAW_SPINLOCK(die_lock);
static int copy_code(struct pt_regs *regs, u16 *val, const u16 *insns)
{
@@ -81,7 +81,7 @@ void die(struct pt_regs *regs, const char *str)
oops_enter();
- spin_lock_irqsave(&die_lock, flags);
+ raw_spin_lock_irqsave(&die_lock, flags);
console_verbose();
bust_spinlocks(1);
@@ -100,7 +100,7 @@ void die(struct pt_regs *regs, const char *str)
bust_spinlocks(0);
add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
- spin_unlock_irqrestore(&die_lock, flags);
+ raw_spin_unlock_irqrestore(&die_lock, flags);
oops_exit();
if (in_interrupt())
diff --git a/arch/riscv/kernel/vector.c b/arch/riscv/kernel/vector.c
index 821818886fab..184f780c932d 100644
--- a/arch/riscv/kernel/vector.c
+++ b/arch/riscv/kernel/vector.c
@@ -33,7 +33,17 @@ int riscv_v_setup_vsize(void)
{
unsigned long this_vsize;
- /* There are 32 vector registers with vlenb length. */
+ /*
+ * There are 32 vector registers with vlenb length.
+ *
+ * If the thead,vlenb property was provided by the firmware, use that
+ * instead of probing the CSRs.
+ */
+ if (thead_vlenb_of) {
+ riscv_v_vsize = thead_vlenb_of * 32;
+ return 0;
+ }
+
riscv_v_enable();
this_vsize = csr_read(CSR_VLENB) * 32;
riscv_v_disable();
@@ -53,7 +63,7 @@ int riscv_v_setup_vsize(void)
void __init riscv_v_setup_ctx_cache(void)
{
- if (!has_vector())
+ if (!(has_vector() || has_xtheadvector()))
return;
riscv_v_user_cachep = kmem_cache_create_usercopy("riscv_vector_ctx",
@@ -173,7 +183,7 @@ bool riscv_v_first_use_handler(struct pt_regs *regs)
u32 __user *epc = (u32 __user *)regs->epc;
u32 insn = (u32)regs->badaddr;
- if (!has_vector())
+ if (!(has_vector() || has_xtheadvector()))
return false;
/* Do not handle if V is not supported, or disabled */
@@ -216,7 +226,7 @@ void riscv_v_vstate_ctrl_init(struct task_struct *tsk)
bool inherit;
int cur, next;
- if (!has_vector())
+ if (!(has_vector() || has_xtheadvector()))
return;
next = riscv_v_ctrl_get_next(tsk);
@@ -238,7 +248,7 @@ void riscv_v_vstate_ctrl_init(struct task_struct *tsk)
long riscv_v_vstate_ctrl_get_current(void)
{
- if (!has_vector())
+ if (!(has_vector() || has_xtheadvector()))
return -EINVAL;
return current->thread.vstate_ctrl & PR_RISCV_V_VSTATE_CTRL_MASK;
@@ -249,7 +259,7 @@ long riscv_v_vstate_ctrl_set_current(unsigned long arg)
bool inherit;
int cur, next;
- if (!has_vector())
+ if (!(has_vector() || has_xtheadvector()))
return -EINVAL;
if (arg & ~PR_RISCV_V_VSTATE_CTRL_MASK)
@@ -287,7 +297,7 @@ long riscv_v_vstate_ctrl_set_current(unsigned long arg)
#ifdef CONFIG_SYSCTL
-static struct ctl_table riscv_v_default_vstate_table[] = {
+static const struct ctl_table riscv_v_default_vstate_table[] = {
{
.procname = "riscv_v_default_allow",
.data = &riscv_v_implicit_uacc,
@@ -299,7 +309,7 @@ static struct ctl_table riscv_v_default_vstate_table[] = {
static int __init riscv_v_sysctl_init(void)
{
- if (has_vector())
+ if (has_vector() || has_xtheadvector())
if (!register_sysctl("abi", riscv_v_default_vstate_table))
return -EINVAL;
return 0;
@@ -309,7 +319,7 @@ static int __init riscv_v_sysctl_init(void)
static int __init riscv_v_sysctl_init(void) { return 0; }
#endif /* ! CONFIG_SYSCTL */
-static int riscv_v_init(void)
+static int __init riscv_v_init(void)
{
return riscv_v_sysctl_init();
}
diff --git a/arch/riscv/kernel/vendor_extensions.c b/arch/riscv/kernel/vendor_extensions.c
index a8126d118341..a31ff84740eb 100644
--- a/arch/riscv/kernel/vendor_extensions.c
+++ b/arch/riscv/kernel/vendor_extensions.c
@@ -6,6 +6,7 @@
#include <asm/vendorid_list.h>
#include <asm/vendor_extensions.h>
#include <asm/vendor_extensions/andes.h>
+#include <asm/vendor_extensions/thead.h>
#include <linux/array_size.h>
#include <linux/types.h>
@@ -14,6 +15,9 @@ struct riscv_isa_vendor_ext_data_list *riscv_isa_vendor_ext_list[] = {
#ifdef CONFIG_RISCV_ISA_VENDOR_EXT_ANDES
&riscv_isa_vendor_ext_list_andes,
#endif
+#ifdef CONFIG_RISCV_ISA_VENDOR_EXT_THEAD
+ &riscv_isa_vendor_ext_list_thead,
+#endif
};
const size_t riscv_isa_vendor_ext_list_size = ARRAY_SIZE(riscv_isa_vendor_ext_list);
@@ -41,6 +45,12 @@ bool __riscv_isa_vendor_extension_available(int cpu, unsigned long vendor, unsig
cpu_bmap = riscv_isa_vendor_ext_list_andes.per_hart_isa_bitmap;
break;
#endif
+ #ifdef CONFIG_RISCV_ISA_VENDOR_EXT_THEAD
+ case THEAD_VENDOR_ID:
+ bmap = &riscv_isa_vendor_ext_list_thead.all_harts_isa_bitmap;
+ cpu_bmap = riscv_isa_vendor_ext_list_thead.per_hart_isa_bitmap;
+ break;
+ #endif
default:
return false;
}
diff --git a/arch/riscv/kernel/vendor_extensions/Makefile b/arch/riscv/kernel/vendor_extensions/Makefile
index 6a61aed944f1..866414c81a9f 100644
--- a/arch/riscv/kernel/vendor_extensions/Makefile
+++ b/arch/riscv/kernel/vendor_extensions/Makefile
@@ -1,3 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_RISCV_ISA_VENDOR_EXT_ANDES) += andes.o
+obj-$(CONFIG_RISCV_ISA_VENDOR_EXT_THEAD) += thead.o
+obj-$(CONFIG_RISCV_ISA_VENDOR_EXT_THEAD) += thead_hwprobe.o
diff --git a/arch/riscv/kernel/vendor_extensions/thead.c b/arch/riscv/kernel/vendor_extensions/thead.c
new file mode 100644
index 000000000000..519dbf70710a
--- /dev/null
+++ b/arch/riscv/kernel/vendor_extensions/thead.c
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <asm/cpufeature.h>
+#include <asm/vendor_extensions.h>
+#include <asm/vendor_extensions/thead.h>
+
+#include <linux/array_size.h>
+#include <linux/cpumask.h>
+#include <linux/types.h>
+
+/* All T-Head vendor extensions supported in Linux */
+static const struct riscv_isa_ext_data riscv_isa_vendor_ext_thead[] = {
+ __RISCV_ISA_EXT_DATA(xtheadvector, RISCV_ISA_VENDOR_EXT_XTHEADVECTOR),
+};
+
+struct riscv_isa_vendor_ext_data_list riscv_isa_vendor_ext_list_thead = {
+ .ext_data_count = ARRAY_SIZE(riscv_isa_vendor_ext_thead),
+ .ext_data = riscv_isa_vendor_ext_thead,
+};
+
+void disable_xtheadvector(void)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+ clear_bit(RISCV_ISA_VENDOR_EXT_XTHEADVECTOR, riscv_isa_vendor_ext_list_thead.per_hart_isa_bitmap[cpu].isa);
+
+ clear_bit(RISCV_ISA_VENDOR_EXT_XTHEADVECTOR, riscv_isa_vendor_ext_list_thead.all_harts_isa_bitmap.isa);
+}
diff --git a/arch/riscv/kernel/vendor_extensions/thead_hwprobe.c b/arch/riscv/kernel/vendor_extensions/thead_hwprobe.c
new file mode 100644
index 000000000000..2eba34011786
--- /dev/null
+++ b/arch/riscv/kernel/vendor_extensions/thead_hwprobe.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <asm/vendor_extensions/thead.h>
+#include <asm/vendor_extensions/thead_hwprobe.h>
+#include <asm/vendor_extensions/vendor_hwprobe.h>
+
+#include <linux/cpumask.h>
+#include <linux/types.h>
+
+#include <uapi/asm/hwprobe.h>
+#include <uapi/asm/vendor/thead.h>
+
+void hwprobe_isa_vendor_ext_thead_0(struct riscv_hwprobe *pair, const struct cpumask *cpus)
+{
+ VENDOR_EXTENSION_SUPPORTED(pair, cpus,
+ riscv_isa_vendor_ext_list_thead.per_hart_isa_bitmap, {
+ VENDOR_EXT_KEY(XTHEADVECTOR);
+ });
+}
diff --git a/arch/riscv/kvm/Makefile b/arch/riscv/kvm/Makefile
index 0fb1840c3e0a..4e0bba91d284 100644
--- a/arch/riscv/kvm/Makefile
+++ b/arch/riscv/kvm/Makefile
@@ -30,6 +30,7 @@ kvm-y += vcpu_sbi_hsm.o
kvm-$(CONFIG_RISCV_PMU_SBI) += vcpu_sbi_pmu.o
kvm-y += vcpu_sbi_replace.o
kvm-y += vcpu_sbi_sta.o
+kvm-y += vcpu_sbi_system.o
kvm-$(CONFIG_RISCV_SBI_V01) += vcpu_sbi_v01.o
kvm-y += vcpu_switch.o
kvm-y += vcpu_timer.o
diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c
index e048dcc6e65e..60d684c76c58 100644
--- a/arch/riscv/kvm/vcpu.c
+++ b/arch/riscv/kvm/vcpu.c
@@ -34,7 +34,12 @@ const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
STATS_DESC_COUNTER(VCPU, csr_exit_user),
STATS_DESC_COUNTER(VCPU, csr_exit_kernel),
STATS_DESC_COUNTER(VCPU, signal_exits),
- STATS_DESC_COUNTER(VCPU, exits)
+ STATS_DESC_COUNTER(VCPU, exits),
+ STATS_DESC_COUNTER(VCPU, instr_illegal_exits),
+ STATS_DESC_COUNTER(VCPU, load_misaligned_exits),
+ STATS_DESC_COUNTER(VCPU, store_misaligned_exits),
+ STATS_DESC_COUNTER(VCPU, load_access_exits),
+ STATS_DESC_COUNTER(VCPU, store_access_exits),
};
const struct kvm_stats_header kvm_vcpu_stats_header = {
diff --git a/arch/riscv/kvm/vcpu_exit.c b/arch/riscv/kvm/vcpu_exit.c
index fa98e5c024b2..6e0c18412795 100644
--- a/arch/riscv/kvm/vcpu_exit.c
+++ b/arch/riscv/kvm/vcpu_exit.c
@@ -165,6 +165,17 @@ void kvm_riscv_vcpu_trap_redirect(struct kvm_vcpu *vcpu,
vcpu->arch.guest_context.sstatus |= SR_SPP;
}
+static inline int vcpu_redirect(struct kvm_vcpu *vcpu, struct kvm_cpu_trap *trap)
+{
+ int ret = -EFAULT;
+
+ if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV) {
+ kvm_riscv_vcpu_trap_redirect(vcpu, trap);
+ ret = 1;
+ }
+ return ret;
+}
+
/*
* Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
* proper exit to userspace.
@@ -183,14 +194,32 @@ int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
run->exit_reason = KVM_EXIT_UNKNOWN;
switch (trap->scause) {
case EXC_INST_ILLEGAL:
+ kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_ILLEGAL_INSN);
+ vcpu->stat.instr_illegal_exits++;
+ ret = vcpu_redirect(vcpu, trap);
+ break;
case EXC_LOAD_MISALIGNED:
+ kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_MISALIGNED_LOAD);
+ vcpu->stat.load_misaligned_exits++;
+ ret = vcpu_redirect(vcpu, trap);
+ break;
case EXC_STORE_MISALIGNED:
+ kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_MISALIGNED_STORE);
+ vcpu->stat.store_misaligned_exits++;
+ ret = vcpu_redirect(vcpu, trap);
+ break;
case EXC_LOAD_ACCESS:
+ kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_ACCESS_LOAD);
+ vcpu->stat.load_access_exits++;
+ ret = vcpu_redirect(vcpu, trap);
+ break;
case EXC_STORE_ACCESS:
- if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV) {
- kvm_riscv_vcpu_trap_redirect(vcpu, trap);
- ret = 1;
- }
+ kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_ACCESS_STORE);
+ vcpu->stat.store_access_exits++;
+ ret = vcpu_redirect(vcpu, trap);
+ break;
+ case EXC_INST_ACCESS:
+ ret = vcpu_redirect(vcpu, trap);
break;
case EXC_VIRTUAL_INST_FAULT:
if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV)
diff --git a/arch/riscv/kvm/vcpu_onereg.c b/arch/riscv/kvm/vcpu_onereg.c
index 753f66c8b70a..f6d27b59c641 100644
--- a/arch/riscv/kvm/vcpu_onereg.c
+++ b/arch/riscv/kvm/vcpu_onereg.c
@@ -46,6 +46,8 @@ static const unsigned long kvm_isa_ext_arr[] = {
KVM_ISA_EXT_ARR(SVINVAL),
KVM_ISA_EXT_ARR(SVNAPOT),
KVM_ISA_EXT_ARR(SVPBMT),
+ KVM_ISA_EXT_ARR(SVVPTC),
+ KVM_ISA_EXT_ARR(ZABHA),
KVM_ISA_EXT_ARR(ZACAS),
KVM_ISA_EXT_ARR(ZAWRS),
KVM_ISA_EXT_ARR(ZBA),
@@ -65,6 +67,7 @@ static const unsigned long kvm_isa_ext_arr[] = {
KVM_ISA_EXT_ARR(ZFHMIN),
KVM_ISA_EXT_ARR(ZICBOM),
KVM_ISA_EXT_ARR(ZICBOZ),
+ KVM_ISA_EXT_ARR(ZICCRSE),
KVM_ISA_EXT_ARR(ZICNTR),
KVM_ISA_EXT_ARR(ZICOND),
KVM_ISA_EXT_ARR(ZICSR),
@@ -145,6 +148,8 @@ static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
case KVM_RISCV_ISA_EXT_SSTC:
case KVM_RISCV_ISA_EXT_SVINVAL:
case KVM_RISCV_ISA_EXT_SVNAPOT:
+ case KVM_RISCV_ISA_EXT_SVVPTC:
+ case KVM_RISCV_ISA_EXT_ZABHA:
case KVM_RISCV_ISA_EXT_ZACAS:
case KVM_RISCV_ISA_EXT_ZAWRS:
case KVM_RISCV_ISA_EXT_ZBA:
@@ -162,6 +167,7 @@ static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
case KVM_RISCV_ISA_EXT_ZFA:
case KVM_RISCV_ISA_EXT_ZFH:
case KVM_RISCV_ISA_EXT_ZFHMIN:
+ case KVM_RISCV_ISA_EXT_ZICCRSE:
case KVM_RISCV_ISA_EXT_ZICNTR:
case KVM_RISCV_ISA_EXT_ZICOND:
case KVM_RISCV_ISA_EXT_ZICSR:
diff --git a/arch/riscv/kvm/vcpu_sbi.c b/arch/riscv/kvm/vcpu_sbi.c
index 6e704ed86a83..d1c83a77735e 100644
--- a/arch/riscv/kvm/vcpu_sbi.c
+++ b/arch/riscv/kvm/vcpu_sbi.c
@@ -71,6 +71,10 @@ static const struct kvm_riscv_sbi_extension_entry sbi_ext[] = {
.ext_ptr = &vcpu_sbi_ext_dbcn,
},
{
+ .ext_idx = KVM_RISCV_SBI_EXT_SUSP,
+ .ext_ptr = &vcpu_sbi_ext_susp,
+ },
+ {
.ext_idx = KVM_RISCV_SBI_EXT_STA,
.ext_ptr = &vcpu_sbi_ext_sta,
},
diff --git a/arch/riscv/kvm/vcpu_sbi_system.c b/arch/riscv/kvm/vcpu_sbi_system.c
new file mode 100644
index 000000000000..5d55e08791fa
--- /dev/null
+++ b/arch/riscv/kvm/vcpu_sbi_system.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2024 Ventana Micro Systems Inc.
+ */
+
+#include <linux/kvm_host.h>
+
+#include <asm/kvm_vcpu_sbi.h>
+#include <asm/sbi.h>
+
+static int kvm_sbi_ext_susp_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ struct kvm_vcpu_sbi_return *retdata)
+{
+ struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
+ struct kvm_cpu_context *reset_cntx;
+ unsigned long funcid = cp->a6;
+ unsigned long hva, i;
+ struct kvm_vcpu *tmp;
+
+ switch (funcid) {
+ case SBI_EXT_SUSP_SYSTEM_SUSPEND:
+ if (cp->a0 != SBI_SUSP_SLEEP_TYPE_SUSPEND_TO_RAM) {
+ retdata->err_val = SBI_ERR_INVALID_PARAM;
+ return 0;
+ }
+
+ if (!(cp->sstatus & SR_SPP)) {
+ retdata->err_val = SBI_ERR_FAILURE;
+ return 0;
+ }
+
+ hva = kvm_vcpu_gfn_to_hva_prot(vcpu, cp->a1 >> PAGE_SHIFT, NULL);
+ if (kvm_is_error_hva(hva)) {
+ retdata->err_val = SBI_ERR_INVALID_ADDRESS;
+ return 0;
+ }
+
+ kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
+ if (tmp == vcpu)
+ continue;
+ if (!kvm_riscv_vcpu_stopped(tmp)) {
+ retdata->err_val = SBI_ERR_DENIED;
+ return 0;
+ }
+ }
+
+ spin_lock(&vcpu->arch.reset_cntx_lock);
+ reset_cntx = &vcpu->arch.guest_reset_context;
+ reset_cntx->sepc = cp->a1;
+ reset_cntx->a0 = vcpu->vcpu_id;
+ reset_cntx->a1 = cp->a2;
+ spin_unlock(&vcpu->arch.reset_cntx_lock);
+
+ kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
+
+ /* userspace provides the suspend implementation */
+ kvm_riscv_vcpu_sbi_forward(vcpu, run);
+ retdata->uexit = true;
+ break;
+ default:
+ retdata->err_val = SBI_ERR_NOT_SUPPORTED;
+ break;
+ }
+
+ return 0;
+}
+
+const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_susp = {
+ .extid_start = SBI_EXT_SUSP,
+ .extid_end = SBI_EXT_SUSP,
+ .default_disabled = true,
+ .handler = kvm_sbi_ext_susp_handler,
+};
diff --git a/arch/riscv/lib/Makefile b/arch/riscv/lib/Makefile
index 8eec6b69a875..79368a895fee 100644
--- a/arch/riscv/lib/Makefile
+++ b/arch/riscv/lib/Makefile
@@ -15,8 +15,7 @@ endif
lib-$(CONFIG_MMU) += uaccess.o
lib-$(CONFIG_64BIT) += tishift.o
lib-$(CONFIG_RISCV_ISA_ZICBOZ) += clear_page.o
-lib-$(CONFIG_RISCV_ISA_ZBC) += crc32.o
-
+obj-$(CONFIG_CRC32_ARCH) += crc32-riscv.o
obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
lib-$(CONFIG_RISCV_ISA_V) += xor.o
lib-$(CONFIG_RISCV_ISA_V) += riscv_v_helpers.o
diff --git a/arch/riscv/lib/crc32.c b/arch/riscv/lib/crc32-riscv.c
index d7dc599af3ef..53d56ab422c7 100644
--- a/arch/riscv/lib/crc32.c
+++ b/arch/riscv/lib/crc32-riscv.c
@@ -14,6 +14,7 @@
#include <linux/crc32poly.h>
#include <linux/crc32.h>
#include <linux/byteorder/generic.h>
+#include <linux/module.h>
/*
* Refer to https://www.corsix.org/content/barrett-reduction-polynomials for
@@ -217,17 +218,19 @@ legacy:
return crc_fb(crc, p, len);
}
-u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len)
+u32 __pure crc32_le_arch(u32 crc, const u8 *p, size_t len)
{
return crc32_le_generic(crc, p, len, CRC32_POLY_LE, CRC32_POLY_QT_LE,
crc32_le_base);
}
+EXPORT_SYMBOL(crc32_le_arch);
-u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len)
+u32 __pure crc32c_le_arch(u32 crc, const u8 *p, size_t len)
{
return crc32_le_generic(crc, p, len, CRC32C_POLY_LE,
- CRC32C_POLY_QT_LE, __crc32c_le_base);
+ CRC32C_POLY_QT_LE, crc32c_le_base);
}
+EXPORT_SYMBOL(crc32c_le_arch);
static inline u32 crc32_be_unaligned(u32 crc, unsigned char const *p,
size_t len)
@@ -253,7 +256,7 @@ static inline u32 crc32_be_unaligned(u32 crc, unsigned char const *p,
return crc;
}
-u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
+u32 __pure crc32_be_arch(u32 crc, const u8 *p, size_t len)
{
size_t offset, head_len, tail_len;
unsigned long const *p_ul;
@@ -292,3 +295,17 @@ u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
legacy:
return crc32_be_base(crc, p, len);
}
+EXPORT_SYMBOL(crc32_be_arch);
+
+u32 crc32_optimizations(void)
+{
+ if (riscv_has_extension_likely(RISCV_ISA_EXT_ZBC))
+ return CRC32_LE_OPTIMIZATION |
+ CRC32_BE_OPTIMIZATION |
+ CRC32C_OPTIMIZATION;
+ return 0;
+}
+EXPORT_SYMBOL(crc32_optimizations);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Accelerated CRC32 implementation with Zbc extension");
diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
index a9f2b4af8f3f..0194324a0c50 100644
--- a/arch/riscv/mm/fault.c
+++ b/arch/riscv/mm/fault.c
@@ -22,6 +22,57 @@
#include "../kernel/head.h"
+static void show_pte(unsigned long addr)
+{
+ pgd_t *pgdp, pgd;
+ p4d_t *p4dp, p4d;
+ pud_t *pudp, pud;
+ pmd_t *pmdp, pmd;
+ pte_t *ptep, pte;
+ struct mm_struct *mm = current->mm;
+
+ if (!mm)
+ mm = &init_mm;
+
+ pr_alert("Current %s pgtable: %luK pagesize, %d-bit VAs, pgdp=0x%016llx\n",
+ current->comm, PAGE_SIZE / SZ_1K, VA_BITS,
+ mm == &init_mm ? (u64)__pa_symbol(mm->pgd) : virt_to_phys(mm->pgd));
+
+ pgdp = pgd_offset(mm, addr);
+ pgd = pgdp_get(pgdp);
+ pr_alert("[%016lx] pgd=%016lx", addr, pgd_val(pgd));
+ if (pgd_none(pgd) || pgd_bad(pgd) || pgd_leaf(pgd))
+ goto out;
+
+ p4dp = p4d_offset(pgdp, addr);
+ p4d = p4dp_get(p4dp);
+ pr_cont(", p4d=%016lx", p4d_val(p4d));
+ if (p4d_none(p4d) || p4d_bad(p4d) || p4d_leaf(p4d))
+ goto out;
+
+ pudp = pud_offset(p4dp, addr);
+ pud = pudp_get(pudp);
+ pr_cont(", pud=%016lx", pud_val(pud));
+ if (pud_none(pud) || pud_bad(pud) || pud_leaf(pud))
+ goto out;
+
+ pmdp = pmd_offset(pudp, addr);
+ pmd = pmdp_get(pmdp);
+ pr_cont(", pmd=%016lx", pmd_val(pmd));
+ if (pmd_none(pmd) || pmd_bad(pmd) || pmd_leaf(pmd))
+ goto out;
+
+ ptep = pte_offset_map(pmdp, addr);
+ if (!ptep)
+ goto out;
+
+ pte = ptep_get(ptep);
+ pr_cont(", pte=%016lx", pte_val(pte));
+ pte_unmap(ptep);
+out:
+ pr_cont("\n");
+}
+
static void die_kernel_fault(const char *msg, unsigned long addr,
struct pt_regs *regs)
{
@@ -31,6 +82,7 @@ static void die_kernel_fault(const char *msg, unsigned long addr,
addr);
bust_spinlocks(0);
+ show_pte(addr);
die(regs, "Oops");
make_task_dead(SIGKILL);
}
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index fc53ce748c80..15b2eda4c364 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -33,6 +33,7 @@
#include <asm/pgtable.h>
#include <asm/sections.h>
#include <asm/soc.h>
+#include <asm/sparsemem.h>
#include <asm/tlbflush.h>
#include "../kernel/head.h"
@@ -62,6 +63,13 @@ EXPORT_SYMBOL(pgtable_l5_enabled);
phys_addr_t phys_ram_base __ro_after_init;
EXPORT_SYMBOL(phys_ram_base);
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+#define VMEMMAP_ADDR_ALIGN (1ULL << SECTION_SIZE_BITS)
+
+unsigned long vmemmap_start_pfn __ro_after_init;
+EXPORT_SYMBOL(vmemmap_start_pfn);
+#endif
+
unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
__page_aligned_bss;
EXPORT_SYMBOL(empty_zero_page);
@@ -240,8 +248,12 @@ static void __init setup_bootmem(void)
* Make sure we align the start of the memory on a PMD boundary so that
* at worst, we map the linear mapping with PMD mappings.
*/
- if (!IS_ENABLED(CONFIG_XIP_KERNEL))
+ if (!IS_ENABLED(CONFIG_XIP_KERNEL)) {
phys_ram_base = memblock_start_of_DRAM() & PMD_MASK;
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+ vmemmap_start_pfn = round_down(phys_ram_base, VMEMMAP_ADDR_ALIGN) >> PAGE_SHIFT;
+#endif
+ }
/*
* In 64-bit, any use of __va/__pa before this point is wrong as we
@@ -256,8 +268,12 @@ static void __init setup_bootmem(void)
*/
if (IS_ENABLED(CONFIG_64BIT) && IS_ENABLED(CONFIG_MMU)) {
max_mapped_addr = __pa(PAGE_OFFSET) + KERN_VIRT_SIZE;
- memblock_cap_memory_range(phys_ram_base,
- max_mapped_addr - phys_ram_base);
+ if (memblock_end_of_DRAM() > max_mapped_addr) {
+ memblock_cap_memory_range(phys_ram_base,
+ max_mapped_addr - phys_ram_base);
+ pr_warn("Physical memory overflows the linear mapping size: region above %pa removed",
+ &max_mapped_addr);
+ }
}
/*
@@ -1101,6 +1117,9 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
kernel_map.xiprom_sz = (uintptr_t)(&_exiprom) - (uintptr_t)(&_xiprom);
phys_ram_base = CONFIG_PHYS_RAM_BASE;
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+ vmemmap_start_pfn = round_down(phys_ram_base, VMEMMAP_ADDR_ALIGN) >> PAGE_SHIFT;
+#endif
kernel_map.phys_addr = (uintptr_t)CONFIG_PHYS_RAM_BASE;
kernel_map.size = (uintptr_t)(&_end) - (uintptr_t)(&_start);
@@ -1558,7 +1577,7 @@ static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd)
return;
}
- pagetable_pte_dtor(ptdesc);
+ pagetable_dtor(ptdesc);
if (PageReserved(page))
free_reserved_page(page);
else
@@ -1580,7 +1599,7 @@ static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud, bool is_vmemm
}
if (!is_vmemmap)
- pagetable_pmd_dtor(ptdesc);
+ pagetable_dtor(ptdesc);
if (PageReserved(page))
free_reserved_page(page);
else
diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c
index c301c8d291d2..41c635d6aca4 100644
--- a/arch/riscv/mm/kasan_init.c
+++ b/arch/riscv/mm/kasan_init.c
@@ -32,7 +32,7 @@ static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned
pte_t *ptep, *p;
if (pmd_none(pmdp_get(pmd))) {
- p = memblock_alloc(PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE);
+ p = memblock_alloc_or_panic(PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE);
set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(p)), PAGE_TABLE));
}
@@ -54,7 +54,7 @@ static void __init kasan_populate_pmd(pud_t *pud, unsigned long vaddr, unsigned
unsigned long next;
if (pud_none(pudp_get(pud))) {
- p = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
+ p = memblock_alloc_or_panic(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
set_pud(pud, pfn_pud(PFN_DOWN(__pa(p)), PAGE_TABLE));
}
@@ -85,7 +85,7 @@ static void __init kasan_populate_pud(p4d_t *p4d,
unsigned long next;
if (p4d_none(p4dp_get(p4d))) {
- p = memblock_alloc(PTRS_PER_PUD * sizeof(pud_t), PAGE_SIZE);
+ p = memblock_alloc_or_panic(PTRS_PER_PUD * sizeof(pud_t), PAGE_SIZE);
set_p4d(p4d, pfn_p4d(PFN_DOWN(__pa(p)), PAGE_TABLE));
}
@@ -116,7 +116,7 @@ static void __init kasan_populate_p4d(pgd_t *pgd,
unsigned long next;
if (pgd_none(pgdp_get(pgd))) {
- p = memblock_alloc(PTRS_PER_P4D * sizeof(p4d_t), PAGE_SIZE);
+ p = memblock_alloc_or_panic(PTRS_PER_P4D * sizeof(p4d_t), PAGE_SIZE);
set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
}
@@ -385,7 +385,7 @@ static void __init kasan_shallow_populate_pud(p4d_t *p4d,
next = pud_addr_end(vaddr, end);
if (pud_none(pudp_get(pud_k))) {
- p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ p = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
set_pud(pud_k, pfn_pud(PFN_DOWN(__pa(p)), PAGE_TABLE));
continue;
}
@@ -405,7 +405,7 @@ static void __init kasan_shallow_populate_p4d(pgd_t *pgd,
next = p4d_addr_end(vaddr, end);
if (p4d_none(p4dp_get(p4d_k))) {
- p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ p = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
set_p4d(p4d_k, pfn_p4d(PFN_DOWN(__pa(p)), PAGE_TABLE));
continue;
}
@@ -424,7 +424,7 @@ static void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long
next = pgd_addr_end(vaddr, end);
if (pgd_none(pgdp_get(pgd_k))) {
- p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ p = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
set_pgd(pgd_k, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
continue;
}