diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2021-06-28 21:45:29 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-06-28 21:45:29 +0300 |
commit | a15286c63d113d4296c58867994cd266a28f5d6d (patch) | |
tree | aaebbc86918c0c1943c26115d5bb1dd6c2fc2d9b /arch/arm | |
parent | b89c07dea16137696d0f2d479ef665ef7c1022ab (diff) | |
parent | 0e8a89d49d45197770f2e57fb15f1bc9ded96eb0 (diff) | |
download | linux-a15286c63d113d4296c58867994cd266a28f5d6d.tar.xz |
Merge tag 'locking-core-2021-06-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar:
- Core locking & atomics:
- Convert all architectures to ARCH_ATOMIC: move every architecture
to ARCH_ATOMIC, then get rid of ARCH_ATOMIC and all the
transitory facilities and #ifdefs.
Much reduction in complexity from that series:
63 files changed, 756 insertions(+), 4094 deletions(-)
- Self-test enhancements
- Futexes:
- Add the new FUTEX_LOCK_PI2 ABI, which is a variant that doesn't
set FLAGS_CLOCKRT (.e. uses CLOCK_MONOTONIC).
[ The temptation to repurpose FUTEX_LOCK_PI's implicit setting of
FLAGS_CLOCKRT & invert the flag's meaning to avoid having to
introduce a new variant was resisted successfully. ]
- Enhance futex self-tests
- Lockdep:
- Fix dependency path printouts
- Optimize trace saving
- Broaden & fix wait-context checks
- Misc cleanups and fixes.
* tag 'locking-core-2021-06-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (52 commits)
locking/lockdep: Correct the description error for check_redundant()
futex: Provide FUTEX_LOCK_PI2 to support clock selection
futex: Prepare futex_lock_pi() for runtime clock selection
lockdep/selftest: Remove wait-type RCU_CALLBACK tests
lockdep/selftests: Fix selftests vs PROVE_RAW_LOCK_NESTING
lockdep: Fix wait-type for empty stack
locking/selftests: Add a selftest for check_irq_usage()
lockding/lockdep: Avoid to find wrong lock dep path in check_irq_usage()
locking/lockdep: Remove the unnecessary trace saving
locking/lockdep: Fix the dep path printing for backwards BFS
selftests: futex: Add futex compare requeue test
selftests: futex: Add futex wait test
seqlock: Remove trailing semicolon in macros
locking/lockdep: Reduce LOCKDEP dependency list
locking/lockdep,doc: Improve readability of the block matrix
locking/atomics: atomic-instrumented: simplify ifdeffery
locking/atomic: delete !ARCH_ATOMIC remnants
locking/atomic: xtensa: move to ARCH_ATOMIC
locking/atomic: sparc: move to ARCH_ATOMIC
locking/atomic: sh: move to ARCH_ATOMIC
...
Diffstat (limited to 'arch/arm')
-rw-r--r-- | arch/arm/include/asm/atomic.h | 96 | ||||
-rw-r--r-- | arch/arm/include/asm/cmpxchg.h | 20 | ||||
-rw-r--r-- | arch/arm/include/asm/sync_bitops.h | 2 |
3 files changed, 59 insertions, 59 deletions
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index 455eb19a5ac1..db8512d9a918 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h @@ -22,8 +22,8 @@ * strex/ldrex monitor on some implementations. The reason we can use it for * atomic_set() is the clrex or dummy strex done on every exception return. */ -#define atomic_read(v) READ_ONCE((v)->counter) -#define atomic_set(v,i) WRITE_ONCE(((v)->counter), (i)) +#define arch_atomic_read(v) READ_ONCE((v)->counter) +#define arch_atomic_set(v,i) WRITE_ONCE(((v)->counter), (i)) #if __LINUX_ARM_ARCH__ >= 6 @@ -34,7 +34,7 @@ */ #define ATOMIC_OP(op, c_op, asm_op) \ -static inline void atomic_##op(int i, atomic_t *v) \ +static inline void arch_atomic_##op(int i, atomic_t *v) \ { \ unsigned long tmp; \ int result; \ @@ -52,7 +52,7 @@ static inline void atomic_##op(int i, atomic_t *v) \ } \ #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ -static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \ +static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \ { \ unsigned long tmp; \ int result; \ @@ -73,7 +73,7 @@ static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \ } #define ATOMIC_FETCH_OP(op, c_op, asm_op) \ -static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \ +static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \ { \ unsigned long tmp; \ int result, val; \ @@ -93,17 +93,17 @@ static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \ return result; \ } -#define atomic_add_return_relaxed atomic_add_return_relaxed -#define atomic_sub_return_relaxed atomic_sub_return_relaxed -#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed -#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed +#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed +#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed +#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed +#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed -#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed -#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed -#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed -#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed +#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed +#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed +#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed +#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed -static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new) +static inline int arch_atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new) { int oldval; unsigned long res; @@ -123,9 +123,9 @@ static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new) return oldval; } -#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed +#define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg_relaxed -static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) +static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u) { int oldval, newval; unsigned long tmp; @@ -151,7 +151,7 @@ static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) return oldval; } -#define atomic_fetch_add_unless atomic_fetch_add_unless +#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless #else /* ARM_ARCH_6 */ @@ -160,7 +160,7 @@ static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) #endif #define ATOMIC_OP(op, c_op, asm_op) \ -static inline void atomic_##op(int i, atomic_t *v) \ +static inline void arch_atomic_##op(int i, atomic_t *v) \ { \ unsigned long flags; \ \ @@ -170,7 +170,7 @@ static inline void atomic_##op(int i, atomic_t *v) \ } \ #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ -static inline int atomic_##op##_return(int i, atomic_t *v) \ +static inline int arch_atomic_##op##_return(int i, atomic_t *v) \ { \ unsigned long flags; \ int val; \ @@ -184,7 +184,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ } #define ATOMIC_FETCH_OP(op, c_op, asm_op) \ -static inline int atomic_fetch_##op(int i, atomic_t *v) \ +static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \ { \ unsigned long flags; \ int val; \ @@ -197,7 +197,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \ return val; \ } -static inline int atomic_cmpxchg(atomic_t *v, int old, int new) +static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new) { int ret; unsigned long flags; @@ -211,7 +211,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new) return ret; } -#define atomic_fetch_andnot atomic_fetch_andnot +#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot #endif /* __LINUX_ARM_ARCH__ */ @@ -223,7 +223,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new) ATOMIC_OPS(add, +=, add) ATOMIC_OPS(sub, -=, sub) -#define atomic_andnot atomic_andnot +#define arch_atomic_andnot arch_atomic_andnot #undef ATOMIC_OPS #define ATOMIC_OPS(op, c_op, asm_op) \ @@ -240,7 +240,7 @@ ATOMIC_OPS(xor, ^=, eor) #undef ATOMIC_OP_RETURN #undef ATOMIC_OP -#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) +#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new)) #ifndef CONFIG_GENERIC_ATOMIC64 typedef struct { @@ -250,7 +250,7 @@ typedef struct { #define ATOMIC64_INIT(i) { (i) } #ifdef CONFIG_ARM_LPAE -static inline s64 atomic64_read(const atomic64_t *v) +static inline s64 arch_atomic64_read(const atomic64_t *v) { s64 result; @@ -263,7 +263,7 @@ static inline s64 atomic64_read(const atomic64_t *v) return result; } -static inline void atomic64_set(atomic64_t *v, s64 i) +static inline void arch_atomic64_set(atomic64_t *v, s64 i) { __asm__ __volatile__("@ atomic64_set\n" " strd %2, %H2, [%1]" @@ -272,7 +272,7 @@ static inline void atomic64_set(atomic64_t *v, s64 i) ); } #else -static inline s64 atomic64_read(const atomic64_t *v) +static inline s64 arch_atomic64_read(const atomic64_t *v) { s64 result; @@ -285,7 +285,7 @@ static inline s64 atomic64_read(const atomic64_t *v) return result; } -static inline void atomic64_set(atomic64_t *v, s64 i) +static inline void arch_atomic64_set(atomic64_t *v, s64 i) { s64 tmp; @@ -302,7 +302,7 @@ static inline void atomic64_set(atomic64_t *v, s64 i) #endif #define ATOMIC64_OP(op, op1, op2) \ -static inline void atomic64_##op(s64 i, atomic64_t *v) \ +static inline void arch_atomic64_##op(s64 i, atomic64_t *v) \ { \ s64 result; \ unsigned long tmp; \ @@ -322,7 +322,7 @@ static inline void atomic64_##op(s64 i, atomic64_t *v) \ #define ATOMIC64_OP_RETURN(op, op1, op2) \ static inline s64 \ -atomic64_##op##_return_relaxed(s64 i, atomic64_t *v) \ +arch_atomic64_##op##_return_relaxed(s64 i, atomic64_t *v) \ { \ s64 result; \ unsigned long tmp; \ @@ -345,7 +345,7 @@ atomic64_##op##_return_relaxed(s64 i, atomic64_t *v) \ #define ATOMIC64_FETCH_OP(op, op1, op2) \ static inline s64 \ -atomic64_fetch_##op##_relaxed(s64 i, atomic64_t *v) \ +arch_atomic64_fetch_##op##_relaxed(s64 i, atomic64_t *v) \ { \ s64 result, val; \ unsigned long tmp; \ @@ -374,34 +374,34 @@ atomic64_fetch_##op##_relaxed(s64 i, atomic64_t *v) \ ATOMIC64_OPS(add, adds, adc) ATOMIC64_OPS(sub, subs, sbc) -#define atomic64_add_return_relaxed atomic64_add_return_relaxed -#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed -#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed -#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed +#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed +#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed +#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed +#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed #undef ATOMIC64_OPS #define ATOMIC64_OPS(op, op1, op2) \ ATOMIC64_OP(op, op1, op2) \ ATOMIC64_FETCH_OP(op, op1, op2) -#define atomic64_andnot atomic64_andnot +#define arch_atomic64_andnot arch_atomic64_andnot ATOMIC64_OPS(and, and, and) ATOMIC64_OPS(andnot, bic, bic) ATOMIC64_OPS(or, orr, orr) ATOMIC64_OPS(xor, eor, eor) -#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed -#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed -#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed -#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed +#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed +#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed +#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed +#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed #undef ATOMIC64_OPS #undef ATOMIC64_FETCH_OP #undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP -static inline s64 atomic64_cmpxchg_relaxed(atomic64_t *ptr, s64 old, s64 new) +static inline s64 arch_atomic64_cmpxchg_relaxed(atomic64_t *ptr, s64 old, s64 new) { s64 oldval; unsigned long res; @@ -422,9 +422,9 @@ static inline s64 atomic64_cmpxchg_relaxed(atomic64_t *ptr, s64 old, s64 new) return oldval; } -#define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed +#define arch_atomic64_cmpxchg_relaxed arch_atomic64_cmpxchg_relaxed -static inline s64 atomic64_xchg_relaxed(atomic64_t *ptr, s64 new) +static inline s64 arch_atomic64_xchg_relaxed(atomic64_t *ptr, s64 new) { s64 result; unsigned long tmp; @@ -442,9 +442,9 @@ static inline s64 atomic64_xchg_relaxed(atomic64_t *ptr, s64 new) return result; } -#define atomic64_xchg_relaxed atomic64_xchg_relaxed +#define arch_atomic64_xchg_relaxed arch_atomic64_xchg_relaxed -static inline s64 atomic64_dec_if_positive(atomic64_t *v) +static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v) { s64 result; unsigned long tmp; @@ -470,9 +470,9 @@ static inline s64 atomic64_dec_if_positive(atomic64_t *v) return result; } -#define atomic64_dec_if_positive atomic64_dec_if_positive +#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive -static inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) +static inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) { s64 oldval, newval; unsigned long tmp; @@ -500,7 +500,7 @@ static inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) return oldval; } -#define atomic64_fetch_add_unless atomic64_fetch_add_unless +#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless #endif /* !CONFIG_GENERIC_ATOMIC64 */ #endif diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h index 8b701f8e175c..4dfe538dfc68 100644 --- a/arch/arm/include/asm/cmpxchg.h +++ b/arch/arm/include/asm/cmpxchg.h @@ -114,7 +114,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size return ret; } -#define xchg_relaxed(ptr, x) ({ \ +#define arch_xchg_relaxed(ptr, x) ({ \ (__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), \ sizeof(*(ptr))); \ }) @@ -128,20 +128,20 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size #error "SMP is not supported on this platform" #endif -#define xchg xchg_relaxed +#define arch_xchg arch_xchg_relaxed /* * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make * them available. */ -#define cmpxchg_local(ptr, o, n) ({ \ - (__typeof(*ptr))__cmpxchg_local_generic((ptr), \ +#define arch_cmpxchg_local(ptr, o, n) ({ \ + (__typeof(*ptr))__generic_cmpxchg_local((ptr), \ (unsigned long)(o), \ (unsigned long)(n), \ sizeof(*(ptr))); \ }) -#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) +#define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n)) #include <asm-generic/cmpxchg.h> @@ -207,7 +207,7 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, return oldval; } -#define cmpxchg_relaxed(ptr,o,n) ({ \ +#define arch_cmpxchg_relaxed(ptr,o,n) ({ \ (__typeof__(*(ptr)))__cmpxchg((ptr), \ (unsigned long)(o), \ (unsigned long)(n), \ @@ -224,7 +224,7 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, #ifdef CONFIG_CPU_V6 /* min ARCH == ARMv6 */ case 1: case 2: - ret = __cmpxchg_local_generic(ptr, old, new, size); + ret = __generic_cmpxchg_local(ptr, old, new, size); break; #endif default: @@ -234,7 +234,7 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, return ret; } -#define cmpxchg_local(ptr, o, n) ({ \ +#define arch_cmpxchg_local(ptr, o, n) ({ \ (__typeof(*ptr))__cmpxchg_local((ptr), \ (unsigned long)(o), \ (unsigned long)(n), \ @@ -266,13 +266,13 @@ static inline unsigned long long __cmpxchg64(unsigned long long *ptr, return oldval; } -#define cmpxchg64_relaxed(ptr, o, n) ({ \ +#define arch_cmpxchg64_relaxed(ptr, o, n) ({ \ (__typeof__(*(ptr)))__cmpxchg64((ptr), \ (unsigned long long)(o), \ (unsigned long long)(n)); \ }) -#define cmpxchg64_local(ptr, o, n) cmpxchg64_relaxed((ptr), (o), (n)) +#define arch_cmpxchg64_local(ptr, o, n) arch_cmpxchg64_relaxed((ptr), (o), (n)) #endif /* __LINUX_ARM_ARCH__ >= 6 */ diff --git a/arch/arm/include/asm/sync_bitops.h b/arch/arm/include/asm/sync_bitops.h index 39ff217136d1..6f5d627c44a3 100644 --- a/arch/arm/include/asm/sync_bitops.h +++ b/arch/arm/include/asm/sync_bitops.h @@ -21,7 +21,7 @@ #define sync_test_and_clear_bit(nr, p) _test_and_clear_bit(nr, p) #define sync_test_and_change_bit(nr, p) _test_and_change_bit(nr, p) #define sync_test_bit(nr, addr) test_bit(nr, addr) -#define sync_cmpxchg cmpxchg +#define arch_sync_cmpxchg arch_cmpxchg #endif |