diff options
Diffstat (limited to 'arch/arm64/include')
31 files changed, 346 insertions, 319 deletions
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 136d13f3d6e9..e8bd0af0141c 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -791,6 +791,16 @@ alternative_endif .endm /* + * Branch Target Identifier (BTI) + */ + .macro bti, targets + .equ .L__bti_targets_c, 34 + .equ .L__bti_targets_j, 36 + .equ .L__bti_targets_jc,38 + hint #.L__bti_targets_\targets + .endm + +/* * This macro emits a program property note section identifying * architecture features which require special handling, mainly for * use in assembly files included in the VDSO. diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h index 13869b76b58c..fe0db8d416fb 100644 --- a/arch/arm64/include/asm/atomic_ll_sc.h +++ b/arch/arm64/include/asm/atomic_ll_sc.h @@ -44,11 +44,11 @@ __ll_sc_atomic_##op(int i, atomic_t *v) \ \ asm volatile("// atomic_" #op "\n" \ __LL_SC_FALLBACK( \ -" prfm pstl1strm, %2\n" \ -"1: ldxr %w0, %2\n" \ -" " #asm_op " %w0, %w0, %w3\n" \ -" stxr %w1, %w0, %2\n" \ -" cbnz %w1, 1b\n") \ + " prfm pstl1strm, %2\n" \ + "1: ldxr %w0, %2\n" \ + " " #asm_op " %w0, %w0, %w3\n" \ + " stxr %w1, %w0, %2\n" \ + " cbnz %w1, 1b\n") \ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ : __stringify(constraint) "r" (i)); \ } @@ -62,12 +62,12 @@ __ll_sc_atomic_##op##_return##name(int i, atomic_t *v) \ \ asm volatile("// atomic_" #op "_return" #name "\n" \ __LL_SC_FALLBACK( \ -" prfm pstl1strm, %2\n" \ -"1: ld" #acq "xr %w0, %2\n" \ -" " #asm_op " %w0, %w0, %w3\n" \ -" st" #rel "xr %w1, %w0, %2\n" \ -" cbnz %w1, 1b\n" \ -" " #mb ) \ + " prfm pstl1strm, %2\n" \ + "1: ld" #acq "xr %w0, %2\n" \ + " " #asm_op " %w0, %w0, %w3\n" \ + " st" #rel "xr %w1, %w0, %2\n" \ + " cbnz %w1, 1b\n" \ + " " #mb ) \ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ : __stringify(constraint) "r" (i) \ : cl); \ @@ -84,12 +84,12 @@ __ll_sc_atomic_fetch_##op##name(int i, atomic_t *v) \ \ asm volatile("// atomic_fetch_" #op #name "\n" \ __LL_SC_FALLBACK( \ -" prfm pstl1strm, %3\n" \ -"1: ld" #acq "xr %w0, %3\n" \ -" " #asm_op " %w1, %w0, %w4\n" \ -" st" #rel "xr %w2, %w1, %3\n" \ -" cbnz %w2, 1b\n" \ -" " #mb ) \ + " prfm pstl1strm, %3\n" \ + "1: ld" #acq "xr %w0, %3\n" \ + " " #asm_op " %w1, %w0, %w4\n" \ + " st" #rel "xr %w2, %w1, %3\n" \ + " cbnz %w2, 1b\n" \ + " " #mb ) \ : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \ : __stringify(constraint) "r" (i) \ : cl); \ @@ -143,11 +143,11 @@ __ll_sc_atomic64_##op(s64 i, atomic64_t *v) \ \ asm volatile("// atomic64_" #op "\n" \ __LL_SC_FALLBACK( \ -" prfm pstl1strm, %2\n" \ -"1: ldxr %0, %2\n" \ -" " #asm_op " %0, %0, %3\n" \ -" stxr %w1, %0, %2\n" \ -" cbnz %w1, 1b") \ + " prfm pstl1strm, %2\n" \ + "1: ldxr %0, %2\n" \ + " " #asm_op " %0, %0, %3\n" \ + " stxr %w1, %0, %2\n" \ + " cbnz %w1, 1b") \ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ : __stringify(constraint) "r" (i)); \ } @@ -161,12 +161,12 @@ __ll_sc_atomic64_##op##_return##name(s64 i, atomic64_t *v) \ \ asm volatile("// atomic64_" #op "_return" #name "\n" \ __LL_SC_FALLBACK( \ -" prfm pstl1strm, %2\n" \ -"1: ld" #acq "xr %0, %2\n" \ -" " #asm_op " %0, %0, %3\n" \ -" st" #rel "xr %w1, %0, %2\n" \ -" cbnz %w1, 1b\n" \ -" " #mb ) \ + " prfm pstl1strm, %2\n" \ + "1: ld" #acq "xr %0, %2\n" \ + " " #asm_op " %0, %0, %3\n" \ + " st" #rel "xr %w1, %0, %2\n" \ + " cbnz %w1, 1b\n" \ + " " #mb ) \ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ : __stringify(constraint) "r" (i) \ : cl); \ @@ -176,19 +176,19 @@ __ll_sc_atomic64_##op##_return##name(s64 i, atomic64_t *v) \ #define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint)\ static inline long \ -__ll_sc_atomic64_fetch_##op##name(s64 i, atomic64_t *v) \ +__ll_sc_atomic64_fetch_##op##name(s64 i, atomic64_t *v) \ { \ s64 result, val; \ unsigned long tmp; \ \ asm volatile("// atomic64_fetch_" #op #name "\n" \ __LL_SC_FALLBACK( \ -" prfm pstl1strm, %3\n" \ -"1: ld" #acq "xr %0, %3\n" \ -" " #asm_op " %1, %0, %4\n" \ -" st" #rel "xr %w2, %1, %3\n" \ -" cbnz %w2, 1b\n" \ -" " #mb ) \ + " prfm pstl1strm, %3\n" \ + "1: ld" #acq "xr %0, %3\n" \ + " " #asm_op " %1, %0, %4\n" \ + " st" #rel "xr %w2, %1, %3\n" \ + " cbnz %w2, 1b\n" \ + " " #mb ) \ : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \ : __stringify(constraint) "r" (i) \ : cl); \ @@ -241,14 +241,14 @@ __ll_sc_atomic64_dec_if_positive(atomic64_t *v) asm volatile("// atomic64_dec_if_positive\n" __LL_SC_FALLBACK( -" prfm pstl1strm, %2\n" -"1: ldxr %0, %2\n" -" subs %0, %0, #1\n" -" b.lt 2f\n" -" stlxr %w1, %0, %2\n" -" cbnz %w1, 1b\n" -" dmb ish\n" -"2:") + " prfm pstl1strm, %2\n" + "1: ldxr %0, %2\n" + " subs %0, %0, #1\n" + " b.lt 2f\n" + " stlxr %w1, %0, %2\n" + " cbnz %w1, 1b\n" + " dmb ish\n" + "2:") : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) : : "cc", "memory"); diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h index da3280f639cd..5d460f6b7675 100644 --- a/arch/arm64/include/asm/atomic_lse.h +++ b/arch/arm64/include/asm/atomic_lse.h @@ -11,13 +11,13 @@ #define __ASM_ATOMIC_LSE_H #define ATOMIC_OP(op, asm_op) \ -static inline void __lse_atomic_##op(int i, atomic_t *v) \ +static inline void __lse_atomic_##op(int i, atomic_t *v) \ { \ asm volatile( \ __LSE_PREAMBLE \ -" " #asm_op " %w[i], %[v]\n" \ - : [i] "+r" (i), [v] "+Q" (v->counter) \ - : "r" (v)); \ + " " #asm_op " %w[i], %[v]\n" \ + : [v] "+Q" (v->counter) \ + : [i] "r" (i)); \ } ATOMIC_OP(andnot, stclr) @@ -25,19 +25,27 @@ ATOMIC_OP(or, stset) ATOMIC_OP(xor, steor) ATOMIC_OP(add, stadd) +static inline void __lse_atomic_sub(int i, atomic_t *v) +{ + __lse_atomic_add(-i, v); +} + #undef ATOMIC_OP #define ATOMIC_FETCH_OP(name, mb, op, asm_op, cl...) \ static inline int __lse_atomic_fetch_##op##name(int i, atomic_t *v) \ { \ + int old; \ + \ asm volatile( \ __LSE_PREAMBLE \ -" " #asm_op #mb " %w[i], %w[i], %[v]" \ - : [i] "+r" (i), [v] "+Q" (v->counter) \ - : "r" (v) \ + " " #asm_op #mb " %w[i], %w[old], %[v]" \ + : [v] "+Q" (v->counter), \ + [old] "=r" (old) \ + : [i] "r" (i) \ : cl); \ \ - return i; \ + return old; \ } #define ATOMIC_FETCH_OPS(op, asm_op) \ @@ -54,51 +62,46 @@ ATOMIC_FETCH_OPS(add, ldadd) #undef ATOMIC_FETCH_OP #undef ATOMIC_FETCH_OPS -#define ATOMIC_OP_ADD_RETURN(name, mb, cl...) \ +#define ATOMIC_FETCH_OP_SUB(name) \ +static inline int __lse_atomic_fetch_sub##name(int i, atomic_t *v) \ +{ \ + return __lse_atomic_fetch_add##name(-i, v); \ +} + +ATOMIC_FETCH_OP_SUB(_relaxed) +ATOMIC_FETCH_OP_SUB(_acquire) +ATOMIC_FETCH_OP_SUB(_release) +ATOMIC_FETCH_OP_SUB( ) + +#undef ATOMIC_FETCH_OP_SUB + +#define ATOMIC_OP_ADD_SUB_RETURN(name) \ static inline int __lse_atomic_add_return##name(int i, atomic_t *v) \ { \ - u32 tmp; \ - \ - asm volatile( \ - __LSE_PREAMBLE \ - " ldadd" #mb " %w[i], %w[tmp], %[v]\n" \ - " add %w[i], %w[i], %w[tmp]" \ - : [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \ - : "r" (v) \ - : cl); \ + return __lse_atomic_fetch_add##name(i, v) + i; \ +} \ \ - return i; \ +static inline int __lse_atomic_sub_return##name(int i, atomic_t *v) \ +{ \ + return __lse_atomic_fetch_sub(i, v) - i; \ } -ATOMIC_OP_ADD_RETURN(_relaxed, ) -ATOMIC_OP_ADD_RETURN(_acquire, a, "memory") -ATOMIC_OP_ADD_RETURN(_release, l, "memory") -ATOMIC_OP_ADD_RETURN( , al, "memory") +ATOMIC_OP_ADD_SUB_RETURN(_relaxed) +ATOMIC_OP_ADD_SUB_RETURN(_acquire) +ATOMIC_OP_ADD_SUB_RETURN(_release) +ATOMIC_OP_ADD_SUB_RETURN( ) -#undef ATOMIC_OP_ADD_RETURN +#undef ATOMIC_OP_ADD_SUB_RETURN static inline void __lse_atomic_and(int i, atomic_t *v) { - asm volatile( - __LSE_PREAMBLE - " mvn %w[i], %w[i]\n" - " stclr %w[i], %[v]" - : [i] "+&r" (i), [v] "+Q" (v->counter) - : "r" (v)); + return __lse_atomic_andnot(~i, v); } #define ATOMIC_FETCH_OP_AND(name, mb, cl...) \ static inline int __lse_atomic_fetch_and##name(int i, atomic_t *v) \ { \ - asm volatile( \ - __LSE_PREAMBLE \ - " mvn %w[i], %w[i]\n" \ - " ldclr" #mb " %w[i], %w[i], %[v]" \ - : [i] "+&r" (i), [v] "+Q" (v->counter) \ - : "r" (v) \ - : cl); \ - \ - return i; \ + return __lse_atomic_fetch_andnot##name(~i, v); \ } ATOMIC_FETCH_OP_AND(_relaxed, ) @@ -108,69 +111,14 @@ ATOMIC_FETCH_OP_AND( , al, "memory") #undef ATOMIC_FETCH_OP_AND -static inline void __lse_atomic_sub(int i, atomic_t *v) -{ - asm volatile( - __LSE_PREAMBLE - " neg %w[i], %w[i]\n" - " stadd %w[i], %[v]" - : [i] "+&r" (i), [v] "+Q" (v->counter) - : "r" (v)); -} - -#define ATOMIC_OP_SUB_RETURN(name, mb, cl...) \ -static inline int __lse_atomic_sub_return##name(int i, atomic_t *v) \ -{ \ - u32 tmp; \ - \ - asm volatile( \ - __LSE_PREAMBLE \ - " neg %w[i], %w[i]\n" \ - " ldadd" #mb " %w[i], %w[tmp], %[v]\n" \ - " add %w[i], %w[i], %w[tmp]" \ - : [i] "+&r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \ - : "r" (v) \ - : cl); \ - \ - return i; \ -} - -ATOMIC_OP_SUB_RETURN(_relaxed, ) -ATOMIC_OP_SUB_RETURN(_acquire, a, "memory") -ATOMIC_OP_SUB_RETURN(_release, l, "memory") -ATOMIC_OP_SUB_RETURN( , al, "memory") - -#undef ATOMIC_OP_SUB_RETURN - -#define ATOMIC_FETCH_OP_SUB(name, mb, cl...) \ -static inline int __lse_atomic_fetch_sub##name(int i, atomic_t *v) \ -{ \ - asm volatile( \ - __LSE_PREAMBLE \ - " neg %w[i], %w[i]\n" \ - " ldadd" #mb " %w[i], %w[i], %[v]" \ - : [i] "+&r" (i), [v] "+Q" (v->counter) \ - : "r" (v) \ - : cl); \ - \ - return i; \ -} - -ATOMIC_FETCH_OP_SUB(_relaxed, ) -ATOMIC_FETCH_OP_SUB(_acquire, a, "memory") -ATOMIC_FETCH_OP_SUB(_release, l, "memory") -ATOMIC_FETCH_OP_SUB( , al, "memory") - -#undef ATOMIC_FETCH_OP_SUB - #define ATOMIC64_OP(op, asm_op) \ static inline void __lse_atomic64_##op(s64 i, atomic64_t *v) \ { \ asm volatile( \ __LSE_PREAMBLE \ -" " #asm_op " %[i], %[v]\n" \ - : [i] "+r" (i), [v] "+Q" (v->counter) \ - : "r" (v)); \ + " " #asm_op " %[i], %[v]\n" \ + : [v] "+Q" (v->counter) \ + : [i] "r" (i)); \ } ATOMIC64_OP(andnot, stclr) @@ -178,19 +126,27 @@ ATOMIC64_OP(or, stset) ATOMIC64_OP(xor, steor) ATOMIC64_OP(add, stadd) +static inline void __lse_atomic64_sub(s64 i, atomic64_t *v) +{ + __lse_atomic64_add(-i, v); +} + #undef ATOMIC64_OP #define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...) \ static inline long __lse_atomic64_fetch_##op##name(s64 i, atomic64_t *v)\ { \ + s64 old; \ + \ asm volatile( \ __LSE_PREAMBLE \ -" " #asm_op #mb " %[i], %[i], %[v]" \ - : [i] "+r" (i), [v] "+Q" (v->counter) \ - : "r" (v) \ + " " #asm_op #mb " %[i], %[old], %[v]" \ + : [v] "+Q" (v->counter), \ + [old] "=r" (old) \ + : [i] "r" (i) \ : cl); \ \ - return i; \ + return old; \ } #define ATOMIC64_FETCH_OPS(op, asm_op) \ @@ -207,51 +163,46 @@ ATOMIC64_FETCH_OPS(add, ldadd) #undef ATOMIC64_FETCH_OP #undef ATOMIC64_FETCH_OPS -#define ATOMIC64_OP_ADD_RETURN(name, mb, cl...) \ +#define ATOMIC64_FETCH_OP_SUB(name) \ +static inline long __lse_atomic64_fetch_sub##name(s64 i, atomic64_t *v) \ +{ \ + return __lse_atomic64_fetch_add##name(-i, v); \ +} + +ATOMIC64_FETCH_OP_SUB(_relaxed) +ATOMIC64_FETCH_OP_SUB(_acquire) +ATOMIC64_FETCH_OP_SUB(_release) +ATOMIC64_FETCH_OP_SUB( ) + +#undef ATOMIC64_FETCH_OP_SUB + +#define ATOMIC64_OP_ADD_SUB_RETURN(name) \ static inline long __lse_atomic64_add_return##name(s64 i, atomic64_t *v)\ { \ - unsigned long tmp; \ - \ - asm volatile( \ - __LSE_PREAMBLE \ - " ldadd" #mb " %[i], %x[tmp], %[v]\n" \ - " add %[i], %[i], %x[tmp]" \ - : [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \ - : "r" (v) \ - : cl); \ + return __lse_atomic64_fetch_add##name(i, v) + i; \ +} \ \ - return i; \ +static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v)\ +{ \ + return __lse_atomic64_fetch_sub##name(i, v) - i; \ } -ATOMIC64_OP_ADD_RETURN(_relaxed, ) -ATOMIC64_OP_ADD_RETURN(_acquire, a, "memory") -ATOMIC64_OP_ADD_RETURN(_release, l, "memory") -ATOMIC64_OP_ADD_RETURN( , al, "memory") +ATOMIC64_OP_ADD_SUB_RETURN(_relaxed) +ATOMIC64_OP_ADD_SUB_RETURN(_acquire) +ATOMIC64_OP_ADD_SUB_RETURN(_release) +ATOMIC64_OP_ADD_SUB_RETURN( ) -#undef ATOMIC64_OP_ADD_RETURN +#undef ATOMIC64_OP_ADD_SUB_RETURN static inline void __lse_atomic64_and(s64 i, atomic64_t *v) { - asm volatile( - __LSE_PREAMBLE - " mvn %[i], %[i]\n" - " stclr %[i], %[v]" - : [i] "+&r" (i), [v] "+Q" (v->counter) - : "r" (v)); + return __lse_atomic64_andnot(~i, v); } #define ATOMIC64_FETCH_OP_AND(name, mb, cl...) \ static inline long __lse_atomic64_fetch_and##name(s64 i, atomic64_t *v) \ { \ - asm volatile( \ - __LSE_PREAMBLE \ - " mvn %[i], %[i]\n" \ - " ldclr" #mb " %[i], %[i], %[v]" \ - : [i] "+&r" (i), [v] "+Q" (v->counter) \ - : "r" (v) \ - : cl); \ - \ - return i; \ + return __lse_atomic64_fetch_andnot##name(~i, v); \ } ATOMIC64_FETCH_OP_AND(_relaxed, ) @@ -261,61 +212,6 @@ ATOMIC64_FETCH_OP_AND( , al, "memory") #undef ATOMIC64_FETCH_OP_AND -static inline void __lse_atomic64_sub(s64 i, atomic64_t *v) -{ - asm volatile( - __LSE_PREAMBLE - " neg %[i], %[i]\n" - " stadd %[i], %[v]" - : [i] "+&r" (i), [v] "+Q" (v->counter) - : "r" (v)); -} - -#define ATOMIC64_OP_SUB_RETURN(name, mb, cl...) \ -static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v) \ -{ \ - unsigned long tmp; \ - \ - asm volatile( \ - __LSE_PREAMBLE \ - " neg %[i], %[i]\n" \ - " ldadd" #mb " %[i], %x[tmp], %[v]\n" \ - " add %[i], %[i], %x[tmp]" \ - : [i] "+&r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \ - : "r" (v) \ - : cl); \ - \ - return i; \ -} - -ATOMIC64_OP_SUB_RETURN(_relaxed, ) -ATOMIC64_OP_SUB_RETURN(_acquire, a, "memory") -ATOMIC64_OP_SUB_RETURN(_release, l, "memory") -ATOMIC64_OP_SUB_RETURN( , al, "memory") - -#undef ATOMIC64_OP_SUB_RETURN - -#define ATOMIC64_FETCH_OP_SUB(name, mb, cl...) \ -static inline long __lse_atomic64_fetch_sub##name(s64 i, atomic64_t *v) \ -{ \ - asm volatile( \ - __LSE_PREAMBLE \ - " neg %[i], %[i]\n" \ - " ldadd" #mb " %[i], %[i], %[v]" \ - : [i] "+&r" (i), [v] "+Q" (v->counter) \ - : "r" (v) \ - : cl); \ - \ - return i; \ -} - -ATOMIC64_FETCH_OP_SUB(_relaxed, ) -ATOMIC64_FETCH_OP_SUB(_acquire, a, "memory") -ATOMIC64_FETCH_OP_SUB(_release, l, "memory") -ATOMIC64_FETCH_OP_SUB( , al, "memory") - -#undef ATOMIC64_FETCH_OP_SUB - static inline s64 __lse_atomic64_dec_if_positive(atomic64_t *v) { unsigned long tmp; @@ -353,7 +249,7 @@ __lse__cmpxchg_case_##name##sz(volatile void *ptr, \ " mov %" #w "[tmp], %" #w "[old]\n" \ " cas" #mb #sfx "\t%" #w "[tmp], %" #w "[new], %[v]\n" \ " mov %" #w "[ret], %" #w "[tmp]" \ - : [ret] "+r" (x0), [v] "+Q" (*(unsigned long *)ptr), \ + : [ret] "+r" (x0), [v] "+Q" (*(u##sz *)ptr), \ [tmp] "=&r" (tmp) \ : [old] "r" (x1), [new] "r" (x2) \ : cl); \ diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index 1c5a00598458..62217be36217 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -26,6 +26,14 @@ #define __tsb_csync() asm volatile("hint #18" : : : "memory") #define csdb() asm volatile("hint #20" : : : "memory") +/* + * Data Gathering Hint: + * This instruction prevents merging memory accesses with Normal-NC or + * Device-GRE attributes before the hint instruction with any memory accesses + * appearing after the hint instruction. + */ +#define dgh() asm volatile("hint #6" : : : "memory") + #ifdef CONFIG_ARM64_PSEUDO_NMI #define pmr_sync() \ do { \ @@ -46,6 +54,7 @@ #define dma_rmb() dmb(oshld) #define dma_wmb() dmb(oshst) +#define io_stop_wc() dgh() #define tsb_csync() \ do { \ diff --git a/arch/arm64/include/asm/bitops.h b/arch/arm64/include/asm/bitops.h index 81a3e519b07d..9b3c787132d2 100644 --- a/arch/arm64/include/asm/bitops.h +++ b/arch/arm64/include/asm/bitops.h @@ -18,7 +18,6 @@ #include <asm-generic/bitops/ffz.h> #include <asm-generic/bitops/fls64.h> -#include <asm-generic/bitops/find.h> #include <asm-generic/bitops/sched.h> #include <asm-generic/bitops/hweight.h> diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h index f9bef42c1411..497acf134d99 100644 --- a/arch/arm64/include/asm/cmpxchg.h +++ b/arch/arm64/include/asm/cmpxchg.h @@ -243,7 +243,7 @@ static inline void __cmpwait_case_##sz(volatile void *ptr, \ " cbnz %" #w "[tmp], 1f\n" \ " wfe\n" \ "1:" \ - : [tmp] "=&r" (tmp), [v] "+Q" (*(unsigned long *)ptr) \ + : [tmp] "=&r" (tmp), [v] "+Q" (*(u##sz *)ptr) \ : [val] "r" (val)); \ } diff --git a/arch/arm64/include/asm/cpu.h b/arch/arm64/include/asm/cpu.h index 0f6d16faa540..a58e366f0b07 100644 --- a/arch/arm64/include/asm/cpu.h +++ b/arch/arm64/include/asm/cpu.h @@ -51,6 +51,7 @@ struct cpuinfo_arm64 { u64 reg_id_aa64dfr1; u64 reg_id_aa64isar0; u64 reg_id_aa64isar1; + u64 reg_id_aa64isar2; u64 reg_id_aa64mmfr0; u64 reg_id_aa64mmfr1; u64 reg_id_aa64mmfr2; diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index 19b8441aa8f2..999b9149f856 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -73,7 +73,9 @@ #define ARM_CPU_PART_CORTEX_A76 0xD0B #define ARM_CPU_PART_NEOVERSE_N1 0xD0C #define ARM_CPU_PART_CORTEX_A77 0xD0D +#define ARM_CPU_PART_CORTEX_A510 0xD46 #define ARM_CPU_PART_CORTEX_A710 0xD47 +#define ARM_CPU_PART_CORTEX_X2 0xD48 #define ARM_CPU_PART_NEOVERSE_N2 0xD49 #define APM_CPU_PART_POTENZA 0x000 @@ -115,7 +117,9 @@ #define MIDR_CORTEX_A76 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76) #define MIDR_NEOVERSE_N1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N1) #define MIDR_CORTEX_A77 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A77) +#define MIDR_CORTEX_A510 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A510) #define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710) +#define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2) #define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2) #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX) #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX) diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h index d3e1825337be..ad55079abe47 100644 --- a/arch/arm64/include/asm/efi.h +++ b/arch/arm64/include/asm/efi.h @@ -14,7 +14,6 @@ #ifdef CONFIG_EFI extern void efi_init(void); -extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt); #else #define efi_init() #endif diff --git a/arch/arm64/include/asm/extable.h b/arch/arm64/include/asm/extable.h index 8b300dd28def..72b0e71cc3de 100644 --- a/arch/arm64/include/asm/extable.h +++ b/arch/arm64/include/asm/extable.h @@ -33,15 +33,6 @@ do { \ (b)->data = (tmp).data; \ } while (0) -static inline bool in_bpf_jit(struct pt_regs *regs) -{ - if (!IS_ENABLED(CONFIG_BPF_JIT)) - return false; - - return regs->pc >= BPF_JIT_REGION_START && - regs->pc < BPF_JIT_REGION_END; -} - #ifdef CONFIG_BPF_JIT bool ex_handler_bpf(const struct exception_table_entry *ex, struct pt_regs *regs); diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index dbb4b30a5648..cb24385e3632 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -51,8 +51,8 @@ extern void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *state, extern void fpsimd_flush_task_state(struct task_struct *target); extern void fpsimd_save_and_flush_cpu_state(void); -/* Maximum VL that SVE VL-agnostic software can transparently support */ -#define SVE_VL_ARCH_MAX 0x100 +/* Maximum VL that SVE/SME VL-agnostic software can transparently support */ +#define VL_ARCH_MAX 0x100 /* Offset of FFR in the SVE register dump */ static inline size_t sve_ffr_offset(int vl) @@ -122,7 +122,7 @@ extern void fpsimd_sync_to_sve(struct task_struct *task); extern void sve_sync_to_fpsimd(struct task_struct *task); extern void sve_sync_from_fpsimd_zeropad(struct task_struct *task); -extern int sve_set_vector_length(struct task_struct *task, +extern int vec_set_vector_length(struct task_struct *task, enum vec_type type, unsigned long vl, unsigned long flags); extern int sve_set_current_vl(unsigned long arg); diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h index b100e0055eab..f68fbb207473 100644 --- a/arch/arm64/include/asm/hwcap.h +++ b/arch/arm64/include/asm/hwcap.h @@ -106,6 +106,8 @@ #define KERNEL_HWCAP_BTI __khwcap2_feature(BTI) #define KERNEL_HWCAP_MTE __khwcap2_feature(MTE) #define KERNEL_HWCAP_ECV __khwcap2_feature(ECV) +#define KERNEL_HWCAP_AFP __khwcap2_feature(AFP) +#define KERNEL_HWCAP_RPRES __khwcap2_feature(RPRES) /* * This yields a mask that user programs can use to figure out what diff --git a/arch/arm64/include/asm/hyperv-tlfs.h b/arch/arm64/include/asm/hyperv-tlfs.h index 4d964a7f02ee..bc6c7ac934a1 100644 --- a/arch/arm64/include/asm/hyperv-tlfs.h +++ b/arch/arm64/include/asm/hyperv-tlfs.h @@ -64,6 +64,15 @@ #define HV_REGISTER_STIMER0_CONFIG 0x000B0000 #define HV_REGISTER_STIMER0_COUNT 0x000B0001 +union hv_msi_entry { + u64 as_uint64[2]; + struct { + u64 address; + u32 data; + u32 reserved; + } __packed; +}; + #include <asm-generic/hyperv-tlfs.h> #endif diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 50d5e4de244c..d5b0386ef765 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -63,6 +63,7 @@ enum __kvm_host_smccc_func { /* Hypercalls available after pKVM finalisation */ __KVM_HOST_SMCCC_FUNC___pkvm_host_share_hyp, + __KVM_HOST_SMCCC_FUNC___pkvm_host_unshare_hyp, __KVM_HOST_SMCCC_FUNC___kvm_adjust_pc, __KVM_HOST_SMCCC_FUNC___kvm_vcpu_run, __KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context, diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index f4871e47b2d0..d62405ce3e6d 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -41,6 +41,8 @@ void kvm_inject_vabt(struct kvm_vcpu *vcpu); void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); +void kvm_vcpu_wfi(struct kvm_vcpu *vcpu); + static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu) { return !(vcpu->arch.hcr_el2 & HCR_RW); @@ -386,7 +388,7 @@ static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu) *vcpu_cpsr(vcpu) |= PSR_AA32_E_BIT; } else { u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1); - sctlr |= (1 << 25); + sctlr |= SCTLR_ELx_EE; vcpu_write_sys_reg(vcpu, sctlr, SCTLR_EL1); } } diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 2a5f7f38006f..5bc01e62c08a 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -26,7 +26,6 @@ #include <asm/fpsimd.h> #include <asm/kvm.h> #include <asm/kvm_asm.h> -#include <asm/thread_info.h> #define __KVM_HAVE_ARCH_INTC_INITIALIZED @@ -298,9 +297,6 @@ struct kvm_vcpu_arch { /* Exception Information */ struct kvm_vcpu_fault_info fault; - /* State of various workarounds, see kvm_asm.h for bit assignment */ - u64 workaround_flags; - /* Miscellaneous vcpu state flags */ u64 flags; @@ -321,8 +317,8 @@ struct kvm_vcpu_arch { struct kvm_guest_debug_arch vcpu_debug_state; struct kvm_guest_debug_arch external_debug_state; - struct thread_info *host_thread_info; /* hyp VA */ struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */ + struct task_struct *parent_task; struct { /* {Break,watch}point registers */ @@ -367,9 +363,6 @@ struct kvm_vcpu_arch { int target; DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES); - /* Detect first run of a vcpu */ - bool has_run_once; - /* Virtual SError ESR to restore when HCR_EL2.VSE is set */ u64 vsesr_el2; @@ -411,20 +404,17 @@ struct kvm_vcpu_arch { #define KVM_ARM64_DEBUG_DIRTY (1 << 0) #define KVM_ARM64_FP_ENABLED (1 << 1) /* guest FP regs loaded */ #define KVM_ARM64_FP_HOST (1 << 2) /* host FP regs loaded */ -#define KVM_ARM64_HOST_SVE_IN_USE (1 << 3) /* backup for host TIF_SVE */ #define KVM_ARM64_HOST_SVE_ENABLED (1 << 4) /* SVE enabled for EL0 */ #define KVM_ARM64_GUEST_HAS_SVE (1 << 5) /* SVE exposed to guest */ #define KVM_ARM64_VCPU_SVE_FINALIZED (1 << 6) /* SVE config completed */ #define KVM_ARM64_GUEST_HAS_PTRAUTH (1 << 7) /* PTRAUTH exposed to guest */ #define KVM_ARM64_PENDING_EXCEPTION (1 << 8) /* Exception pending */ +/* + * Overlaps with KVM_ARM64_EXCEPT_MASK on purpose so that it can't be + * set together with an exception... + */ +#define KVM_ARM64_INCREMENT_PC (1 << 9) /* Increment PC */ #define KVM_ARM64_EXCEPT_MASK (7 << 9) /* Target EL/MODE */ -#define KVM_ARM64_DEBUG_STATE_SAVE_SPE (1 << 12) /* Save SPE context if active */ -#define KVM_ARM64_DEBUG_STATE_SAVE_TRBE (1 << 13) /* Save TRBE context if active */ - -#define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \ - KVM_GUESTDBG_USE_SW_BP | \ - KVM_GUESTDBG_USE_HW | \ - KVM_GUESTDBG_SINGLESTEP) /* * When KVM_ARM64_PENDING_EXCEPTION is set, KVM_ARM64_EXCEPT_MASK can * take the following values: @@ -442,11 +432,14 @@ struct kvm_vcpu_arch { #define KVM_ARM64_EXCEPT_AA64_EL1 (0 << 11) #define KVM_ARM64_EXCEPT_AA64_EL2 (1 << 11) -/* - * Overlaps with KVM_ARM64_EXCEPT_MASK on purpose so that it can't be - * set together with an exception... - */ -#define KVM_ARM64_INCREMENT_PC (1 << 9) /* Increment PC */ +#define KVM_ARM64_DEBUG_STATE_SAVE_SPE (1 << 12) /* Save SPE context if active */ +#define KVM_ARM64_DEBUG_STATE_SAVE_TRBE (1 << 13) /* Save TRBE context if active */ +#define KVM_ARM64_FP_FOREIGN_FPSTATE (1 << 14) + +#define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \ + KVM_GUESTDBG_USE_SW_BP | \ + KVM_GUESTDBG_USE_HW | \ + KVM_GUESTDBG_SINGLESTEP) #define vcpu_has_sve(vcpu) (system_supports_sve() && \ ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_SVE)) @@ -606,6 +599,8 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, void kvm_arm_halt_guest(struct kvm *kvm); void kvm_arm_resume_guest(struct kvm *kvm); +#define vcpu_has_run_once(vcpu) !!rcu_access_pointer((vcpu)->pid) + #ifndef __KVM_NVHE_HYPERVISOR__ #define kvm_call_hyp_nvhe(f, ...) \ ({ \ @@ -675,8 +670,15 @@ unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len); int kvm_handle_mmio_return(struct kvm_vcpu *vcpu); int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa); -int kvm_perf_init(void); -int kvm_perf_teardown(void); +/* + * Returns true if a Performance Monitoring Interrupt (PMI), a.k.a. perf event, + * arrived in guest context. For arm64, any event that arrives while a vCPU is + * loaded is considered to be "in guest". + */ +static inline bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu) +{ + return IS_ENABLED(CONFIG_GUEST_PERF_EVENTS) && !!vcpu; +} long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu); gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu); @@ -717,7 +719,6 @@ void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu); static inline void kvm_arch_hardware_unsetup(void) {} static inline void kvm_arch_sync_events(struct kvm *kvm) {} static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} -static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} void kvm_arm_init_debug(void); void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu); @@ -737,8 +738,10 @@ long kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm, /* Guest/host FPSIMD coordination helpers */ int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu); +void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu); +void kvm_vcpu_unshare_task_fp(struct kvm_vcpu *vcpu); static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr) { @@ -749,12 +752,7 @@ static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr) void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu); -#ifdef CONFIG_KVM /* Avoid conflicts with core headers if CONFIG_KVM=n */ -static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) -{ - return kvm_arch_vcpu_run_map_fp(vcpu); -} - +#ifdef CONFIG_KVM void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr); void kvm_clr_pmu_events(u32 clr); diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h index 5afd14ab15b9..462882f356c7 100644 --- a/arch/arm64/include/asm/kvm_hyp.h +++ b/arch/arm64/include/asm/kvm_hyp.h @@ -90,7 +90,6 @@ void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu); void __fpsimd_save_state(struct user_fpsimd_state *fp_regs); void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs); -void __sve_save_state(void *sve_pffr, u32 *fpsr); void __sve_restore_state(void *sve_pffr, u32 *fpsr); #ifndef __KVM_NVHE_HYPERVISOR__ diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 02d378887743..81839e9a8a24 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -150,6 +150,8 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v) #include <asm/kvm_pgtable.h> #include <asm/stage2_pgtable.h> +int kvm_share_hyp(void *from, void *to); +void kvm_unshare_hyp(void *from, void *to); int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot); int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size, void __iomem **kaddr, diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h index 027783829584..9f339dffbc1a 100644 --- a/arch/arm64/include/asm/kvm_pgtable.h +++ b/arch/arm64/include/asm/kvm_pgtable.h @@ -252,6 +252,27 @@ int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys, enum kvm_pgtable_prot prot); /** + * kvm_pgtable_hyp_unmap() - Remove a mapping from a hypervisor stage-1 page-table. + * @pgt: Page-table structure initialised by kvm_pgtable_hyp_init(). + * @addr: Virtual address from which to remove the mapping. + * @size: Size of the mapping. + * + * The offset of @addr within a page is ignored, @size is rounded-up to + * the next page boundary and @phys is rounded-down to the previous page + * boundary. + * + * TLB invalidation is performed for each page-table entry cleared during the + * unmapping operation and the reference count for the page-table page + * containing the cleared entry is decremented, with unreferenced pages being + * freed. The unmapping operation will stop early if it encounters either an + * invalid page-table entry or a valid block mapping which maps beyond the range + * being unmapped. + * + * Return: Number of bytes unmapped, which may be 0. + */ +u64 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size); + +/** * kvm_get_vtcr() - Helper to construct VTCR_EL2 * @mmfr0: Sanitized value of SYS_ID_AA64MMFR0_EL1 register. * @mmfr1: Sanitized value of SYS_ID_AA64MMFR1_EL1 register. @@ -270,8 +291,7 @@ u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift); /** * __kvm_pgtable_stage2_init() - Initialise a guest stage-2 page-table. * @pgt: Uninitialised page-table structure to initialise. - * @arch: Arch-specific KVM structure representing the guest virtual - * machine. + * @mmu: S2 MMU context for this S2 translation * @mm_ops: Memory management callbacks. * @flags: Stage-2 configuration flags. * @force_pte_cb: Function that returns true if page level mappings must @@ -279,13 +299,13 @@ u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift); * * Return: 0 on success, negative error code on failure. */ -int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_arch *arch, +int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu, struct kvm_pgtable_mm_ops *mm_ops, enum kvm_pgtable_stage2_flags flags, kvm_pgtable_force_pte_cb_t force_pte_cb); -#define kvm_pgtable_stage2_init(pgt, arch, mm_ops) \ - __kvm_pgtable_stage2_init(pgt, arch, mm_ops, 0, NULL) +#define kvm_pgtable_stage2_init(pgt, mmu, mm_ops) \ + __kvm_pgtable_stage2_init(pgt, mmu, mm_ops, 0, NULL) /** * kvm_pgtable_stage2_destroy() - Destroy an unused guest stage-2 page-table. diff --git a/arch/arm64/include/asm/kvm_pkvm.h b/arch/arm64/include/asm/kvm_pkvm.h new file mode 100644 index 000000000000..9f4ad2a8df59 --- /dev/null +++ b/arch/arm64/include/asm/kvm_pkvm.h @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020 - Google LLC + * Author: Quentin Perret <qperret@google.com> + */ +#ifndef __ARM64_KVM_PKVM_H__ +#define __ARM64_KVM_PKVM_H__ + +#include <linux/memblock.h> +#include <asm/kvm_pgtable.h> + +#define HYP_MEMBLOCK_REGIONS 128 + +extern struct memblock_region kvm_nvhe_sym(hyp_memory)[]; +extern unsigned int kvm_nvhe_sym(hyp_memblock_nr); + +static inline unsigned long __hyp_pgtable_max_pages(unsigned long nr_pages) +{ + unsigned long total = 0, i; + + /* Provision the worst case scenario */ + for (i = 0; i < KVM_PGTABLE_MAX_LEVELS; i++) { + nr_pages = DIV_ROUND_UP(nr_pages, PTRS_PER_PTE); + total += nr_pages; + } + + return total; +} + +static inline unsigned long __hyp_pgtable_total_pages(void) +{ + unsigned long res = 0, i; + + /* Cover all of memory with page-granularity */ + for (i = 0; i < kvm_nvhe_sym(hyp_memblock_nr); i++) { + struct memblock_region *reg = &kvm_nvhe_sym(hyp_memory)[i]; + res += __hyp_pgtable_max_pages(reg->size >> PAGE_SHIFT); + } + + return res; +} + +static inline unsigned long hyp_s1_pgtable_pages(void) +{ + unsigned long res; + + res = __hyp_pgtable_total_pages(); + + /* Allow 1 GiB for private mappings */ + res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT); + + return res; +} + +static inline unsigned long host_s2_pgtable_pages(void) +{ + unsigned long res; + + /* + * Include an extra 16 pages to safely upper-bound the worst case of + * concatenated pgds. + */ + res = __hyp_pgtable_total_pages() + 16; + + /* Allow 1 GiB for MMIO mappings */ + res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT); + + return res; +} + +#endif /* __ARM64_KVM_PKVM_H__ */ diff --git a/arch/arm64/include/asm/linkage.h b/arch/arm64/include/asm/linkage.h index 9906541a6861..b77e9b3f5371 100644 --- a/arch/arm64/include/asm/linkage.h +++ b/arch/arm64/include/asm/linkage.h @@ -1,48 +1,43 @@ #ifndef __ASM_LINKAGE_H #define __ASM_LINKAGE_H +#ifdef __ASSEMBLY__ +#include <asm/assembler.h> +#endif + #define __ALIGN .align 2 #define __ALIGN_STR ".align 2" -#if defined(CONFIG_ARM64_BTI_KERNEL) && defined(__aarch64__) - -/* - * Since current versions of gas reject the BTI instruction unless we - * set the architecture version to v8.5 we use the hint instruction - * instead. - */ -#define BTI_C hint 34 ; - /* - * When using in-kernel BTI we need to ensure that PCS-conformant assembly - * functions have suitable annotations. Override SYM_FUNC_START to insert - * a BTI landing pad at the start of everything. + * When using in-kernel BTI we need to ensure that PCS-conformant + * assembly functions have suitable annotations. Override + * SYM_FUNC_START to insert a BTI landing pad at the start of + * everything, the override is done unconditionally so we're more + * likely to notice any drift from the overridden definitions. */ #define SYM_FUNC_START(name) \ SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN) \ - BTI_C + bti c ; #define SYM_FUNC_START_NOALIGN(name) \ SYM_START(name, SYM_L_GLOBAL, SYM_A_NONE) \ - BTI_C + bti c ; #define SYM_FUNC_START_LOCAL(name) \ SYM_START(name, SYM_L_LOCAL, SYM_A_ALIGN) \ - BTI_C + bti c ; #define SYM_FUNC_START_LOCAL_NOALIGN(name) \ SYM_START(name, SYM_L_LOCAL, SYM_A_NONE) \ - BTI_C + bti c ; #define SYM_FUNC_START_WEAK(name) \ SYM_START(name, SYM_L_WEAK, SYM_A_ALIGN) \ - BTI_C + bti c ; #define SYM_FUNC_START_WEAK_NOALIGN(name) \ SYM_START(name, SYM_L_WEAK, SYM_A_NONE) \ - BTI_C - -#endif + bti c ; /* * Annotate a function as position independent, i.e., safe to be called before diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 1b9a1e242612..0af70d9abede 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -44,11 +44,8 @@ #define _PAGE_OFFSET(va) (-(UL(1) << (va))) #define PAGE_OFFSET (_PAGE_OFFSET(VA_BITS)) #define KIMAGE_VADDR (MODULES_END) -#define BPF_JIT_REGION_START (_PAGE_END(VA_BITS_MIN)) -#define BPF_JIT_REGION_SIZE (SZ_128M) -#define BPF_JIT_REGION_END (BPF_JIT_REGION_START + BPF_JIT_REGION_SIZE) #define MODULES_END (MODULES_VADDR + MODULES_VSIZE) -#define MODULES_VADDR (BPF_JIT_REGION_END) +#define MODULES_VADDR (_PAGE_END(VA_BITS_MIN)) #define MODULES_VSIZE (SZ_128M) #define VMEMMAP_START (-(UL(1) << (VA_BITS - VMEMMAP_SHIFT))) #define VMEMMAP_END (VMEMMAP_START + VMEMMAP_SIZE) diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index e9c30859f80c..48f8466a4be9 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -15,6 +15,7 @@ #ifndef __ASSEMBLY__ #include <linux/refcount.h> +#include <asm/cpufeature.h> typedef struct { atomic64_t id; diff --git a/arch/arm64/include/asm/mte-kasan.h b/arch/arm64/include/asm/mte-kasan.h index 478b9bcf69ad..e4704a403237 100644 --- a/arch/arm64/include/asm/mte-kasan.h +++ b/arch/arm64/include/asm/mte-kasan.h @@ -84,10 +84,12 @@ static inline void __dc_gzva(u64 p) static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag, bool init) { - u64 curr, mask, dczid_bs, end1, end2, end3; + u64 curr, mask, dczid, dczid_bs, dczid_dzp, end1, end2, end3; /* Read DC G(Z)VA block size from the system register. */ - dczid_bs = 4ul << (read_cpuid(DCZID_EL0) & 0xf); + dczid = read_cpuid(DCZID_EL0); + dczid_bs = 4ul << (dczid & 0xf); + dczid_dzp = (dczid >> 4) & 1; curr = (u64)__tag_set(addr, tag); mask = dczid_bs - 1; @@ -106,7 +108,7 @@ static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag, */ #define SET_MEMTAG_RANGE(stg_post, dc_gva) \ do { \ - if (size >= 2 * dczid_bs) { \ + if (!dczid_dzp && size >= 2 * dczid_bs) {\ do { \ curr = stg_post(curr); \ } while (curr < end1); \ diff --git a/arch/arm64/include/asm/spinlock_types.h b/arch/arm64/include/asm/spinlock_types.h index 18782f0c4721..11ab1c077697 100644 --- a/arch/arm64/include/asm/spinlock_types.h +++ b/arch/arm64/include/asm/spinlock_types.h @@ -5,7 +5,7 @@ #ifndef __ASM_SPINLOCK_TYPES_H #define __ASM_SPINLOCK_TYPES_H -#if !defined(__LINUX_SPINLOCK_TYPES_H) && !defined(__ASM_SPINLOCK_H) +#if !defined(__LINUX_SPINLOCK_TYPES_RAW_H) && !defined(__ASM_SPINLOCK_H) # error "please don't include this file directly" #endif diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h index 6564a01cc085..e77cdef9ca29 100644 --- a/arch/arm64/include/asm/stacktrace.h +++ b/arch/arm64/include/asm/stacktrace.h @@ -47,6 +47,10 @@ struct stack_info { * @prev_type: The type of stack this frame record was on, or a synthetic * value of STACK_TYPE_UNKNOWN. This is used to detect a * transition from one stack to another. + * + * @kr_cur: When KRETPROBES is selected, holds the kretprobe instance + * associated with the most recently encountered replacement lr + * value. */ struct stackframe { unsigned long fp; @@ -59,9 +63,6 @@ struct stackframe { #endif }; -extern int unwind_frame(struct task_struct *tsk, struct stackframe *frame); -extern void walk_stackframe(struct task_struct *tsk, struct stackframe *frame, - bool (*fn)(void *, unsigned long), void *data); extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk, const char *loglvl); @@ -146,7 +147,4 @@ static inline bool on_accessible_stack(const struct task_struct *tsk, return false; } -void start_backtrace(struct stackframe *frame, unsigned long fp, - unsigned long pc); - #endif /* __ASM_STACKTRACE_H */ diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 16b3f1a1d468..898bee0004ae 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -182,6 +182,7 @@ #define SYS_ID_AA64ISAR0_EL1 sys_reg(3, 0, 0, 6, 0) #define SYS_ID_AA64ISAR1_EL1 sys_reg(3, 0, 0, 6, 1) +#define SYS_ID_AA64ISAR2_EL1 sys_reg(3, 0, 0, 6, 2) #define SYS_ID_AA64MMFR0_EL1 sys_reg(3, 0, 0, 7, 0) #define SYS_ID_AA64MMFR1_EL1 sys_reg(3, 0, 0, 7, 1) @@ -771,6 +772,20 @@ #define ID_AA64ISAR1_GPI_NI 0x0 #define ID_AA64ISAR1_GPI_IMP_DEF 0x1 +/* id_aa64isar2 */ +#define ID_AA64ISAR2_RPRES_SHIFT 4 +#define ID_AA64ISAR2_WFXT_SHIFT 0 + +#define ID_AA64ISAR2_RPRES_8BIT 0x0 +#define ID_AA64ISAR2_RPRES_12BIT 0x1 +/* + * Value 0x1 has been removed from the architecture, and is + * reserved, but has not yet been removed from the ARM ARM + * as of ARM DDI 0487G.b. + */ +#define ID_AA64ISAR2_WFXT_NI 0x0 +#define ID_AA64ISAR2_WFXT_SUPPORTED 0x2 + /* id_aa64pfr0 */ #define ID_AA64PFR0_CSV3_SHIFT 60 #define ID_AA64PFR0_CSV2_SHIFT 56 @@ -889,6 +904,7 @@ #endif /* id_aa64mmfr1 */ +#define ID_AA64MMFR1_AFP_SHIFT 44 #define ID_AA64MMFR1_ETS_SHIFT 36 #define ID_AA64MMFR1_TWED_SHIFT 32 #define ID_AA64MMFR1_XNX_SHIFT 28 @@ -937,6 +953,7 @@ #define ID_AA64DFR0_PMUVER_8_1 0x4 #define ID_AA64DFR0_PMUVER_8_4 0x5 #define ID_AA64DFR0_PMUVER_8_5 0x6 +#define ID_AA64DFR0_PMUVER_8_7 0x7 #define ID_AA64DFR0_PMUVER_IMP_DEF 0xf #define ID_AA64DFR0_PMSVER_8_2 0x1 diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h index ec2db3419c41..f386b90a79c8 100644 --- a/arch/arm64/include/asm/topology.h +++ b/arch/arm64/include/asm/topology.h @@ -32,7 +32,7 @@ void update_freq_counters_refs(void); /* Replace task scheduler's default thermal pressure API */ #define arch_scale_thermal_pressure topology_get_thermal_pressure -#define arch_set_thermal_pressure topology_set_thermal_pressure +#define arch_update_thermal_pressure topology_update_thermal_pressure #include <asm-generic/topology.h> diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h index 6bdb5f5db438..4e65da3445c7 100644 --- a/arch/arm64/include/asm/unistd.h +++ b/arch/arm64/include/asm/unistd.h @@ -38,7 +38,7 @@ #define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE + 5) #define __ARM_NR_COMPAT_END (__ARM_NR_COMPAT_BASE + 0x800) -#define __NR_compat_syscalls 450 +#define __NR_compat_syscalls 451 #endif #define __ARCH_WANT_SYS_CLONE diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h index 41ea1195e44b..604a2053d006 100644 --- a/arch/arm64/include/asm/unistd32.h +++ b/arch/arm64/include/asm/unistd32.h @@ -905,6 +905,8 @@ __SYSCALL(__NR_landlock_restrict_self, sys_landlock_restrict_self) __SYSCALL(__NR_process_mrelease, sys_process_mrelease) #define __NR_futex_waitv 449 __SYSCALL(__NR_futex_waitv, sys_futex_waitv) +#define __NR_set_mempolicy_home_node 450 +__SYSCALL(__NR_set_mempolicy_home_node, sys_set_mempolicy_home_node) /* * Please add new compat syscalls above this comment and update diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h index 7b23b16f21ce..f03731847d9d 100644 --- a/arch/arm64/include/uapi/asm/hwcap.h +++ b/arch/arm64/include/uapi/asm/hwcap.h @@ -76,5 +76,7 @@ #define HWCAP2_BTI (1 << 17) #define HWCAP2_MTE (1 << 18) #define HWCAP2_ECV (1 << 19) +#define HWCAP2_AFP (1 << 20) +#define HWCAP2_RPRES (1 << 21) #endif /* _UAPI__ASM_HWCAP_H */ |