diff options
author | Peter Zijlstra <peterz@infradead.org> | 2020-01-25 00:13:03 +0300 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2020-06-11 09:03:24 +0300 |
commit | 37f8173dd84936ea78000ed1cad24f8b18d48ebb (patch) | |
tree | 0b715066a7f5c16a71988e176627c46b61481b3c /arch/x86/include/asm/atomic64_32.h | |
parent | 765dcd209947e7b3666c08fb109ab8b879f7a471 (diff) | |
download | linux-37f8173dd84936ea78000ed1cad24f8b18d48ebb.tar.xz |
locking/atomics: Flip fallbacks and instrumentation
Currently instrumentation of atomic primitives is done at the architecture
level, while composites or fallbacks are provided at the generic level.
The result is that there are no uninstrumented variants of the
fallbacks. Since there is now need of such variants to isolate text poke
from any form of instrumentation invert this ordering.
Doing this means moving the instrumentation into the generic code as
well as having (for now) two variants of the fallbacks.
Notes:
- the various *cond_read* primitives are not proper fallbacks
and got moved into linux/atomic.c. No arch_ variants are
generated because the base primitives smp_cond_load*()
are instrumented.
- once all architectures are moved over to arch_atomic_ one of the
fallback variants can be removed and some 2300 lines reclaimed.
- atomic_{read,set}*() are no longer double-instrumented
Reported-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Mark Rutland <mark.rutland@arm.com>
Link: https://lkml.kernel.org/r/20200505134058.769149955@linutronix.de
Diffstat (limited to 'arch/x86/include/asm/atomic64_32.h')
-rw-r--r-- | arch/x86/include/asm/atomic64_32.h | 9 |
1 files changed, 9 insertions, 0 deletions
diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h index 52cfaecb13f9..5efd01b548d1 100644 --- a/arch/x86/include/asm/atomic64_32.h +++ b/arch/x86/include/asm/atomic64_32.h @@ -75,6 +75,7 @@ static inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n) { return arch_cmpxchg64(&v->counter, o, n); } +#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg /** * arch_atomic64_xchg - xchg atomic64 variable @@ -94,6 +95,7 @@ static inline s64 arch_atomic64_xchg(atomic64_t *v, s64 n) : "memory"); return o; } +#define arch_atomic64_xchg arch_atomic64_xchg /** * arch_atomic64_set - set atomic64 variable @@ -138,6 +140,7 @@ static inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v) ASM_NO_INPUT_CLOBBER("memory")); return i; } +#define arch_atomic64_add_return arch_atomic64_add_return /* * Other variants with different arithmetic operators: @@ -149,6 +152,7 @@ static inline s64 arch_atomic64_sub_return(s64 i, atomic64_t *v) ASM_NO_INPUT_CLOBBER("memory")); return i; } +#define arch_atomic64_sub_return arch_atomic64_sub_return static inline s64 arch_atomic64_inc_return(atomic64_t *v) { @@ -242,6 +246,7 @@ static inline int arch_atomic64_add_unless(atomic64_t *v, s64 a, s64 u) "S" (v) : "memory"); return (int)a; } +#define arch_atomic64_add_unless arch_atomic64_add_unless static inline int arch_atomic64_inc_not_zero(atomic64_t *v) { @@ -281,6 +286,7 @@ static inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v) return old; } +#define arch_atomic64_fetch_and arch_atomic64_fetch_and static inline void arch_atomic64_or(s64 i, atomic64_t *v) { @@ -299,6 +305,7 @@ static inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v) return old; } +#define arch_atomic64_fetch_or arch_atomic64_fetch_or static inline void arch_atomic64_xor(s64 i, atomic64_t *v) { @@ -317,6 +324,7 @@ static inline s64 arch_atomic64_fetch_xor(s64 i, atomic64_t *v) return old; } +#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor static inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v) { @@ -327,6 +335,7 @@ static inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v) return old; } +#define arch_atomic64_fetch_add arch_atomic64_fetch_add #define arch_atomic64_fetch_sub(i, v) arch_atomic64_fetch_add(-(i), (v)) |