diff options
author | Will Deacon <will.deacon@arm.com> | 2015-08-06 19:54:44 +0300 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-08-12 12:59:10 +0300 |
commit | 0ca326de7aa9cb253db9c1a3eb3f0487c8dbf912 (patch) | |
tree | 2964ef144f8b873c9af610577b4e3c92c0af9ac8 /arch/arm/include/asm/atomic.h | |
parent | cd074aea9261784e44f292e1132830ec221802c6 (diff) | |
download | linux-0ca326de7aa9cb253db9c1a3eb3f0487c8dbf912.tar.xz |
locking, ARM, atomics: Define our SMP atomics in terms of _relaxed() operations
By defining our SMP atomics in terms of relaxed operations, we gain
a small reduction in code size and have acquire/release/fence variants
generated automatically by the core code.
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Waiman.Long@hp.com
Cc: paulmck@linux.vnet.ibm.com
Link: http://lkml.kernel.org/r/1438880084-18856-9-git-send-email-will.deacon@arm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/arm/include/asm/atomic.h')
-rw-r--r-- | arch/arm/include/asm/atomic.h | 37 |
1 files changed, 16 insertions, 21 deletions
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index 82b75a7cb762..fe3ef397f5a4 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h @@ -57,12 +57,11 @@ static inline void atomic_##op(int i, atomic_t *v) \ } \ #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ -static inline int atomic_##op##_return(int i, atomic_t *v) \ +static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \ { \ unsigned long tmp; \ int result; \ \ - smp_mb(); \ prefetchw(&v->counter); \ \ __asm__ __volatile__("@ atomic_" #op "_return\n" \ @@ -75,17 +74,17 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ : "r" (&v->counter), "Ir" (i) \ : "cc"); \ \ - smp_mb(); \ - \ return result; \ } -static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) +#define atomic_add_return_relaxed atomic_add_return_relaxed +#define atomic_sub_return_relaxed atomic_sub_return_relaxed + +static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new) { int oldval; unsigned long res; - smp_mb(); prefetchw(&ptr->counter); do { @@ -99,10 +98,9 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) : "cc"); } while (res); - smp_mb(); - return oldval; } +#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed static inline int __atomic_add_unless(atomic_t *v, int a, int u) { @@ -297,12 +295,12 @@ static inline void atomic64_##op(long long i, atomic64_t *v) \ } \ #define ATOMIC64_OP_RETURN(op, op1, op2) \ -static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \ +static inline long long \ +atomic64_##op##_return_relaxed(long long i, atomic64_t *v) \ { \ long long result; \ unsigned long tmp; \ \ - smp_mb(); \ prefetchw(&v->counter); \ \ __asm__ __volatile__("@ atomic64_" #op "_return\n" \ @@ -316,8 +314,6 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \ : "r" (&v->counter), "r" (i) \ : "cc"); \ \ - smp_mb(); \ - \ return result; \ } @@ -328,6 +324,9 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \ ATOMIC64_OPS(add, adds, adc) ATOMIC64_OPS(sub, subs, sbc) +#define atomic64_add_return_relaxed atomic64_add_return_relaxed +#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed + #define atomic64_andnot atomic64_andnot ATOMIC64_OP(and, and, and) @@ -339,13 +338,12 @@ ATOMIC64_OP(xor, eor, eor) #undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP -static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old, - long long new) +static inline long long +atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new) { long long oldval; unsigned long res; - smp_mb(); prefetchw(&ptr->counter); do { @@ -360,17 +358,15 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old, : "cc"); } while (res); - smp_mb(); - return oldval; } +#define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed -static inline long long atomic64_xchg(atomic64_t *ptr, long long new) +static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new) { long long result; unsigned long tmp; - smp_mb(); prefetchw(&ptr->counter); __asm__ __volatile__("@ atomic64_xchg\n" @@ -382,10 +378,9 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new) : "r" (&ptr->counter), "r" (new) : "cc"); - smp_mb(); - return result; } +#define atomic64_xchg_relaxed atomic64_xchg_relaxed static inline long long atomic64_dec_if_positive(atomic64_t *v) { |