diff options
Diffstat (limited to 'arch/riscv')
-rw-r--r-- | arch/riscv/Kconfig | 2 | ||||
-rw-r--r-- | arch/riscv/Kconfig.socs | 1 | ||||
-rw-r--r-- | arch/riscv/Makefile | 2 | ||||
-rw-r--r-- | arch/riscv/boot/dts/sifive/fu740-c000.dtsi | 2 | ||||
-rw-r--r-- | arch/riscv/include/asm/atomic.h | 128 | ||||
-rw-r--r-- | arch/riscv/include/asm/cmpxchg.h | 34 | ||||
-rw-r--r-- | arch/riscv/include/asm/pgtable.h | 5 | ||||
-rw-r--r-- | arch/riscv/kernel/probes/kprobes.c | 17 | ||||
-rw-r--r-- | arch/riscv/kernel/smpboot.c | 1 | ||||
-rw-r--r-- | arch/riscv/kernel/stacktrace.c | 2 | ||||
-rw-r--r-- | arch/riscv/mm/kasan_init.c | 10 |
11 files changed, 93 insertions, 111 deletions
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 18ec0f9bb8d5..15f9490a7aad 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -332,7 +332,7 @@ config NODES_SHIFT int "Maximum NUMA Nodes (as a power of 2)" range 1 10 default "2" - depends on NEED_MULTIPLE_NODES + depends on NUMA help Specify the maximum number of NUMA Nodes available on the target system. Increases memory reserved to accommodate various tables. diff --git a/arch/riscv/Kconfig.socs b/arch/riscv/Kconfig.socs index ed963761fbd2..30676ebb16eb 100644 --- a/arch/riscv/Kconfig.socs +++ b/arch/riscv/Kconfig.socs @@ -14,6 +14,7 @@ config SOC_SIFIVE select CLK_SIFIVE select CLK_SIFIVE_PRCI select SIFIVE_PLIC + select RISCV_ERRATA_ALTERNATIVE select ERRATA_SIFIVE help This enables support for SiFive SoC platform hardware. diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile index 4be020695428..99ecd8bcfd77 100644 --- a/arch/riscv/Makefile +++ b/arch/riscv/Makefile @@ -16,7 +16,7 @@ ifeq ($(CONFIG_DYNAMIC_FTRACE),y) CC_FLAGS_FTRACE := -fpatchable-function-entry=8 endif -ifeq ($(CONFIG_64BIT)$(CONFIG_CMODEL_MEDLOW),yy) +ifeq ($(CONFIG_CMODEL_MEDLOW),y) KBUILD_CFLAGS_MODULE += -mcmodel=medany endif diff --git a/arch/riscv/boot/dts/sifive/fu740-c000.dtsi b/arch/riscv/boot/dts/sifive/fu740-c000.dtsi index 8eef82e4199f..abbb960f90a0 100644 --- a/arch/riscv/boot/dts/sifive/fu740-c000.dtsi +++ b/arch/riscv/boot/dts/sifive/fu740-c000.dtsi @@ -273,7 +273,7 @@ cache-size = <2097152>; cache-unified; interrupt-parent = <&plic0>; - interrupts = <19 20 21 22>; + interrupts = <19 21 22 20>; reg = <0x0 0x2010000 0x0 0x1000>; }; gpio: gpio@10060000 { diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h index 400a8c8b6de7..ac9bdf4fc404 100644 --- a/arch/riscv/include/asm/atomic.h +++ b/arch/riscv/include/asm/atomic.h @@ -25,22 +25,22 @@ #define __atomic_release_fence() \ __asm__ __volatile__(RISCV_RELEASE_BARRIER "" ::: "memory"); -static __always_inline int atomic_read(const atomic_t *v) +static __always_inline int arch_atomic_read(const atomic_t *v) { return READ_ONCE(v->counter); } -static __always_inline void atomic_set(atomic_t *v, int i) +static __always_inline void arch_atomic_set(atomic_t *v, int i) { WRITE_ONCE(v->counter, i); } #ifndef CONFIG_GENERIC_ATOMIC64 #define ATOMIC64_INIT(i) { (i) } -static __always_inline s64 atomic64_read(const atomic64_t *v) +static __always_inline s64 arch_atomic64_read(const atomic64_t *v) { return READ_ONCE(v->counter); } -static __always_inline void atomic64_set(atomic64_t *v, s64 i) +static __always_inline void arch_atomic64_set(atomic64_t *v, s64 i) { WRITE_ONCE(v->counter, i); } @@ -53,7 +53,7 @@ static __always_inline void atomic64_set(atomic64_t *v, s64 i) */ #define ATOMIC_OP(op, asm_op, I, asm_type, c_type, prefix) \ static __always_inline \ -void atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \ +void arch_atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \ { \ __asm__ __volatile__ ( \ " amo" #asm_op "." #asm_type " zero, %1, %0" \ @@ -87,7 +87,7 @@ ATOMIC_OPS(xor, xor, i) */ #define ATOMIC_FETCH_OP(op, asm_op, I, asm_type, c_type, prefix) \ static __always_inline \ -c_type atomic##prefix##_fetch_##op##_relaxed(c_type i, \ +c_type arch_atomic##prefix##_fetch_##op##_relaxed(c_type i, \ atomic##prefix##_t *v) \ { \ register c_type ret; \ @@ -99,7 +99,7 @@ c_type atomic##prefix##_fetch_##op##_relaxed(c_type i, \ return ret; \ } \ static __always_inline \ -c_type atomic##prefix##_fetch_##op(c_type i, atomic##prefix##_t *v) \ +c_type arch_atomic##prefix##_fetch_##op(c_type i, atomic##prefix##_t *v) \ { \ register c_type ret; \ __asm__ __volatile__ ( \ @@ -112,15 +112,15 @@ c_type atomic##prefix##_fetch_##op(c_type i, atomic##prefix##_t *v) \ #define ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_type, c_type, prefix) \ static __always_inline \ -c_type atomic##prefix##_##op##_return_relaxed(c_type i, \ +c_type arch_atomic##prefix##_##op##_return_relaxed(c_type i, \ atomic##prefix##_t *v) \ { \ - return atomic##prefix##_fetch_##op##_relaxed(i, v) c_op I; \ + return arch_atomic##prefix##_fetch_##op##_relaxed(i, v) c_op I; \ } \ static __always_inline \ -c_type atomic##prefix##_##op##_return(c_type i, atomic##prefix##_t *v) \ +c_type arch_atomic##prefix##_##op##_return(c_type i, atomic##prefix##_t *v) \ { \ - return atomic##prefix##_fetch_##op(i, v) c_op I; \ + return arch_atomic##prefix##_fetch_##op(i, v) c_op I; \ } #ifdef CONFIG_GENERIC_ATOMIC64 @@ -138,26 +138,26 @@ c_type atomic##prefix##_##op##_return(c_type i, atomic##prefix##_t *v) \ ATOMIC_OPS(add, add, +, i) ATOMIC_OPS(sub, add, +, -i) -#define atomic_add_return_relaxed atomic_add_return_relaxed -#define atomic_sub_return_relaxed atomic_sub_return_relaxed -#define atomic_add_return atomic_add_return -#define atomic_sub_return atomic_sub_return +#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed +#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed +#define arch_atomic_add_return arch_atomic_add_return +#define arch_atomic_sub_return arch_atomic_sub_return -#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed -#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed -#define atomic_fetch_add atomic_fetch_add -#define atomic_fetch_sub atomic_fetch_sub +#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed +#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed +#define arch_atomic_fetch_add arch_atomic_fetch_add +#define arch_atomic_fetch_sub arch_atomic_fetch_sub #ifndef CONFIG_GENERIC_ATOMIC64 -#define atomic64_add_return_relaxed atomic64_add_return_relaxed -#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed -#define atomic64_add_return atomic64_add_return -#define atomic64_sub_return atomic64_sub_return - -#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed -#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed -#define atomic64_fetch_add atomic64_fetch_add -#define atomic64_fetch_sub atomic64_fetch_sub +#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed +#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed +#define arch_atomic64_add_return arch_atomic64_add_return +#define arch_atomic64_sub_return arch_atomic64_sub_return + +#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed +#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed +#define arch_atomic64_fetch_add arch_atomic64_fetch_add +#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub #endif #undef ATOMIC_OPS @@ -175,20 +175,20 @@ ATOMIC_OPS(and, and, i) ATOMIC_OPS( or, or, i) ATOMIC_OPS(xor, xor, i) -#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed -#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed -#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed -#define atomic_fetch_and atomic_fetch_and -#define atomic_fetch_or atomic_fetch_or -#define atomic_fetch_xor atomic_fetch_xor +#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed +#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed +#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed +#define arch_atomic_fetch_and arch_atomic_fetch_and +#define arch_atomic_fetch_or arch_atomic_fetch_or +#define arch_atomic_fetch_xor arch_atomic_fetch_xor #ifndef CONFIG_GENERIC_ATOMIC64 -#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed -#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed -#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed -#define atomic64_fetch_and atomic64_fetch_and -#define atomic64_fetch_or atomic64_fetch_or -#define atomic64_fetch_xor atomic64_fetch_xor +#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed +#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed +#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed +#define arch_atomic64_fetch_and arch_atomic64_fetch_and +#define arch_atomic64_fetch_or arch_atomic64_fetch_or +#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor #endif #undef ATOMIC_OPS @@ -197,7 +197,7 @@ ATOMIC_OPS(xor, xor, i) #undef ATOMIC_OP_RETURN /* This is required to provide a full barrier on success. */ -static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) +static __always_inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u) { int prev, rc; @@ -214,10 +214,10 @@ static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) : "memory"); return prev; } -#define atomic_fetch_add_unless atomic_fetch_add_unless +#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless #ifndef CONFIG_GENERIC_ATOMIC64 -static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) +static __always_inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) { s64 prev; long rc; @@ -235,7 +235,7 @@ static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u : "memory"); return prev; } -#define atomic64_fetch_add_unless atomic64_fetch_add_unless +#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless #endif /* @@ -244,45 +244,45 @@ static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u */ #define ATOMIC_OP(c_t, prefix, size) \ static __always_inline \ -c_t atomic##prefix##_xchg_relaxed(atomic##prefix##_t *v, c_t n) \ +c_t arch_atomic##prefix##_xchg_relaxed(atomic##prefix##_t *v, c_t n) \ { \ return __xchg_relaxed(&(v->counter), n, size); \ } \ static __always_inline \ -c_t atomic##prefix##_xchg_acquire(atomic##prefix##_t *v, c_t n) \ +c_t arch_atomic##prefix##_xchg_acquire(atomic##prefix##_t *v, c_t n) \ { \ return __xchg_acquire(&(v->counter), n, size); \ } \ static __always_inline \ -c_t atomic##prefix##_xchg_release(atomic##prefix##_t *v, c_t n) \ +c_t arch_atomic##prefix##_xchg_release(atomic##prefix##_t *v, c_t n) \ { \ return __xchg_release(&(v->counter), n, size); \ } \ static __always_inline \ -c_t atomic##prefix##_xchg(atomic##prefix##_t *v, c_t n) \ +c_t arch_atomic##prefix##_xchg(atomic##prefix##_t *v, c_t n) \ { \ return __xchg(&(v->counter), n, size); \ } \ static __always_inline \ -c_t atomic##prefix##_cmpxchg_relaxed(atomic##prefix##_t *v, \ +c_t arch_atomic##prefix##_cmpxchg_relaxed(atomic##prefix##_t *v, \ c_t o, c_t n) \ { \ return __cmpxchg_relaxed(&(v->counter), o, n, size); \ } \ static __always_inline \ -c_t atomic##prefix##_cmpxchg_acquire(atomic##prefix##_t *v, \ +c_t arch_atomic##prefix##_cmpxchg_acquire(atomic##prefix##_t *v, \ c_t o, c_t n) \ { \ return __cmpxchg_acquire(&(v->counter), o, n, size); \ } \ static __always_inline \ -c_t atomic##prefix##_cmpxchg_release(atomic##prefix##_t *v, \ +c_t arch_atomic##prefix##_cmpxchg_release(atomic##prefix##_t *v, \ c_t o, c_t n) \ { \ return __cmpxchg_release(&(v->counter), o, n, size); \ } \ static __always_inline \ -c_t atomic##prefix##_cmpxchg(atomic##prefix##_t *v, c_t o, c_t n) \ +c_t arch_atomic##prefix##_cmpxchg(atomic##prefix##_t *v, c_t o, c_t n) \ { \ return __cmpxchg(&(v->counter), o, n, size); \ } @@ -298,19 +298,19 @@ c_t atomic##prefix##_cmpxchg(atomic##prefix##_t *v, c_t o, c_t n) \ ATOMIC_OPS() -#define atomic_xchg_relaxed atomic_xchg_relaxed -#define atomic_xchg_acquire atomic_xchg_acquire -#define atomic_xchg_release atomic_xchg_release -#define atomic_xchg atomic_xchg -#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed -#define atomic_cmpxchg_acquire atomic_cmpxchg_acquire -#define atomic_cmpxchg_release atomic_cmpxchg_release -#define atomic_cmpxchg atomic_cmpxchg +#define arch_atomic_xchg_relaxed arch_atomic_xchg_relaxed +#define arch_atomic_xchg_acquire arch_atomic_xchg_acquire +#define arch_atomic_xchg_release arch_atomic_xchg_release +#define arch_atomic_xchg arch_atomic_xchg +#define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg_relaxed +#define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg_acquire +#define arch_atomic_cmpxchg_release arch_atomic_cmpxchg_release +#define arch_atomic_cmpxchg arch_atomic_cmpxchg #undef ATOMIC_OPS #undef ATOMIC_OP -static __always_inline int atomic_sub_if_positive(atomic_t *v, int offset) +static __always_inline int arch_atomic_sub_if_positive(atomic_t *v, int offset) { int prev, rc; @@ -328,10 +328,10 @@ static __always_inline int atomic_sub_if_positive(atomic_t *v, int offset) return prev - offset; } -#define atomic_dec_if_positive(v) atomic_sub_if_positive(v, 1) +#define arch_atomic_dec_if_positive(v) arch_atomic_sub_if_positive(v, 1) #ifndef CONFIG_GENERIC_ATOMIC64 -static __always_inline s64 atomic64_sub_if_positive(atomic64_t *v, s64 offset) +static __always_inline s64 arch_atomic64_sub_if_positive(atomic64_t *v, s64 offset) { s64 prev; long rc; @@ -350,7 +350,7 @@ static __always_inline s64 atomic64_sub_if_positive(atomic64_t *v, s64 offset) return prev - offset; } -#define atomic64_dec_if_positive(v) atomic64_sub_if_positive(v, 1) +#define arch_atomic64_dec_if_positive(v) arch_atomic64_sub_if_positive(v, 1) #endif #endif /* _ASM_RISCV_ATOMIC_H */ diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h index 262e5bbb2776..36dc962f6343 100644 --- a/arch/riscv/include/asm/cmpxchg.h +++ b/arch/riscv/include/asm/cmpxchg.h @@ -37,7 +37,7 @@ __ret; \ }) -#define xchg_relaxed(ptr, x) \ +#define arch_xchg_relaxed(ptr, x) \ ({ \ __typeof__(*(ptr)) _x_ = (x); \ (__typeof__(*(ptr))) __xchg_relaxed((ptr), \ @@ -72,7 +72,7 @@ __ret; \ }) -#define xchg_acquire(ptr, x) \ +#define arch_xchg_acquire(ptr, x) \ ({ \ __typeof__(*(ptr)) _x_ = (x); \ (__typeof__(*(ptr))) __xchg_acquire((ptr), \ @@ -107,7 +107,7 @@ __ret; \ }) -#define xchg_release(ptr, x) \ +#define arch_xchg_release(ptr, x) \ ({ \ __typeof__(*(ptr)) _x_ = (x); \ (__typeof__(*(ptr))) __xchg_release((ptr), \ @@ -140,7 +140,7 @@ __ret; \ }) -#define xchg(ptr, x) \ +#define arch_xchg(ptr, x) \ ({ \ __typeof__(*(ptr)) _x_ = (x); \ (__typeof__(*(ptr))) __xchg((ptr), _x_, sizeof(*(ptr))); \ @@ -149,13 +149,13 @@ #define xchg32(ptr, x) \ ({ \ BUILD_BUG_ON(sizeof(*(ptr)) != 4); \ - xchg((ptr), (x)); \ + arch_xchg((ptr), (x)); \ }) #define xchg64(ptr, x) \ ({ \ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ - xchg((ptr), (x)); \ + arch_xchg((ptr), (x)); \ }) /* @@ -199,7 +199,7 @@ __ret; \ }) -#define cmpxchg_relaxed(ptr, o, n) \ +#define arch_cmpxchg_relaxed(ptr, o, n) \ ({ \ __typeof__(*(ptr)) _o_ = (o); \ __typeof__(*(ptr)) _n_ = (n); \ @@ -245,7 +245,7 @@ __ret; \ }) -#define cmpxchg_acquire(ptr, o, n) \ +#define arch_cmpxchg_acquire(ptr, o, n) \ ({ \ __typeof__(*(ptr)) _o_ = (o); \ __typeof__(*(ptr)) _n_ = (n); \ @@ -291,7 +291,7 @@ __ret; \ }) -#define cmpxchg_release(ptr, o, n) \ +#define arch_cmpxchg_release(ptr, o, n) \ ({ \ __typeof__(*(ptr)) _o_ = (o); \ __typeof__(*(ptr)) _n_ = (n); \ @@ -337,7 +337,7 @@ __ret; \ }) -#define cmpxchg(ptr, o, n) \ +#define arch_cmpxchg(ptr, o, n) \ ({ \ __typeof__(*(ptr)) _o_ = (o); \ __typeof__(*(ptr)) _n_ = (n); \ @@ -345,31 +345,31 @@ _o_, _n_, sizeof(*(ptr))); \ }) -#define cmpxchg_local(ptr, o, n) \ +#define arch_cmpxchg_local(ptr, o, n) \ (__cmpxchg_relaxed((ptr), (o), (n), sizeof(*(ptr)))) #define cmpxchg32(ptr, o, n) \ ({ \ BUILD_BUG_ON(sizeof(*(ptr)) != 4); \ - cmpxchg((ptr), (o), (n)); \ + arch_cmpxchg((ptr), (o), (n)); \ }) #define cmpxchg32_local(ptr, o, n) \ ({ \ BUILD_BUG_ON(sizeof(*(ptr)) != 4); \ - cmpxchg_relaxed((ptr), (o), (n)) \ + arch_cmpxchg_relaxed((ptr), (o), (n)) \ }) -#define cmpxchg64(ptr, o, n) \ +#define arch_cmpxchg64(ptr, o, n) \ ({ \ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ - cmpxchg((ptr), (o), (n)); \ + arch_cmpxchg((ptr), (o), (n)); \ }) -#define cmpxchg64_local(ptr, o, n) \ +#define arch_cmpxchg64_local(ptr, o, n) \ ({ \ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ - cmpxchg_relaxed((ptr), (o), (n)); \ + arch_cmpxchg_relaxed((ptr), (o), (n)); \ }) #endif /* _ASM_RISCV_CMPXCHG_H */ diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index 9469f464e71a..380cd3a7e548 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -30,9 +30,8 @@ #define BPF_JIT_REGION_SIZE (SZ_128M) #ifdef CONFIG_64BIT -/* KASLR should leave at least 128MB for BPF after the kernel */ -#define BPF_JIT_REGION_START PFN_ALIGN((unsigned long)&_end) -#define BPF_JIT_REGION_END (BPF_JIT_REGION_START + BPF_JIT_REGION_SIZE) +#define BPF_JIT_REGION_START (BPF_JIT_REGION_END - BPF_JIT_REGION_SIZE) +#define BPF_JIT_REGION_END (MODULES_END) #else #define BPF_JIT_REGION_START (PAGE_OFFSET - BPF_JIT_REGION_SIZE) #define BPF_JIT_REGION_END (VMALLOC_END) diff --git a/arch/riscv/kernel/probes/kprobes.c b/arch/riscv/kernel/probes/kprobes.c index 15cc65ac7ca6..247e33fa5bc7 100644 --- a/arch/riscv/kernel/probes/kprobes.c +++ b/arch/riscv/kernel/probes/kprobes.c @@ -279,23 +279,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr) case KPROBE_HIT_ACTIVE: case KPROBE_HIT_SSDONE: /* - * We increment the nmissed count for accounting, - * we can also use npre/npostfault count for accounting - * these specific fault cases. - */ - kprobes_inc_nmissed_count(cur); - - /* - * We come here because instructions in the pre/post - * handler caused the page_fault, this could happen - * if handler tries to access user space by - * copy_from_user(), get_user() etc. Let the - * user-specified handler try to fix it first. - */ - if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) - return 1; - - /* * In case the user-specified fault handler returned * zero, try to fix up. */ diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c index 9a408e2942ac..bd82375db51a 100644 --- a/arch/riscv/kernel/smpboot.c +++ b/arch/riscv/kernel/smpboot.c @@ -180,7 +180,6 @@ asmlinkage __visible void smp_callin(void) * Disable preemption before enabling interrupts, so we don't try to * schedule a CPU that hasn't actually started yet. */ - preempt_disable(); local_irq_enable(); cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); } diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c index bde85fc53357..ff467b98c3e3 100644 --- a/arch/riscv/kernel/stacktrace.c +++ b/arch/riscv/kernel/stacktrace.c @@ -132,7 +132,7 @@ unsigned long get_wchan(struct task_struct *task) { unsigned long pc = 0; - if (likely(task && task != current && task->state != TASK_RUNNING)) + if (likely(task && task != current && !task_is_running(task))) walk_stackframe(task, NULL, save_wchan, &pc); return pc; } diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c index 9daacae93e33..d7189c8714a9 100644 --- a/arch/riscv/mm/kasan_init.c +++ b/arch/riscv/mm/kasan_init.c @@ -169,7 +169,7 @@ static void __init kasan_shallow_populate(void *start, void *end) void __init kasan_init(void) { - phys_addr_t _start, _end; + phys_addr_t p_start, p_end; u64 i; /* @@ -189,9 +189,9 @@ void __init kasan_init(void) (void *)kasan_mem_to_shadow((void *)VMALLOC_END)); /* Populate the linear mapping */ - for_each_mem_range(i, &_start, &_end) { - void *start = (void *)__va(_start); - void *end = (void *)__va(_end); + for_each_mem_range(i, &p_start, &p_end) { + void *start = (void *)__va(p_start); + void *end = (void *)__va(p_end); if (start >= end) break; @@ -201,7 +201,7 @@ void __init kasan_init(void) /* Populate kernel, BPF, modules mapping */ kasan_populate(kasan_mem_to_shadow((const void *)MODULES_VADDR), - kasan_mem_to_shadow((const void *)BPF_JIT_REGION_END)); + kasan_mem_to_shadow((const void *)MODULES_VADDR + SZ_2G)); for (i = 0; i < PTRS_PER_PTE; i++) set_pte(&kasan_early_shadow_pte[i], |