diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-08-13 22:23:39 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-08-13 22:23:39 +0300 |
commit | de5d1b39ea0b38a9f4dfb08966042b7b91e2df30 (patch) | |
tree | 3591bdac4fe6756b4e3dc68b2ed1c792c4104218 /arch/ia64/include | |
parent | 1c594774283a7cfe6dc0f8ffdfb2dbfc497502c4 (diff) | |
parent | fd2efaa4eb5317c3a86357a83a7d456a1b86a0ac (diff) | |
download | linux-de5d1b39ea0b38a9f4dfb08966042b7b91e2df30.tar.xz |
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking/atomics update from Thomas Gleixner:
"The locking, atomics and memory model brains delivered:
- A larger update to the atomics code which reworks the ordering
barriers, consolidates the atomic primitives, provides the new
atomic64_fetch_add_unless() primitive and cleans up the include
hell.
- Simplify cmpxchg() instrumentation and add instrumentation for
xchg() and cmpxchg_double().
- Updates to the memory model and documentation"
* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (48 commits)
locking/atomics: Rework ordering barriers
locking/atomics: Instrument cmpxchg_double*()
locking/atomics: Instrument xchg()
locking/atomics: Simplify cmpxchg() instrumentation
locking/atomics/x86: Reduce arch_cmpxchg64*() instrumentation
tools/memory-model: Rename litmus tests to comply to norm7
tools/memory-model/Documentation: Fix typo, smb->smp
sched/Documentation: Update wake_up() & co. memory-barrier guarantees
locking/spinlock, sched/core: Clarify requirements for smp_mb__after_spinlock()
sched/core: Use smp_mb() in wake_woken_function()
tools/memory-model: Add informal LKMM documentation to MAINTAINERS
locking/atomics/Documentation: Describe atomic_set() as a write operation
tools/memory-model: Make scripts executable
tools/memory-model: Remove ACCESS_ONCE() from model
tools/memory-model: Remove ACCESS_ONCE() from recipes
locking/memory-barriers.txt/kokr: Update Korean translation to fix broken DMA vs. MMIO ordering example
MAINTAINERS: Add Daniel Lustig as an LKMM reviewer
tools/memory-model: Fix ISA2+pooncelock+pooncelock+pombonce name
tools/memory-model: Add litmus test for full multicopy atomicity
locking/refcount: Always allow checked forms
...
Diffstat (limited to 'arch/ia64/include')
-rw-r--r-- | arch/ia64/include/asm/atomic.h | 81 |
1 files changed, 0 insertions, 81 deletions
diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h index 2524fb60fbc2..206530d0751b 100644 --- a/arch/ia64/include/asm/atomic.h +++ b/arch/ia64/include/asm/atomic.h @@ -215,91 +215,10 @@ ATOMIC64_FETCH_OP(xor, ^) (cmpxchg(&((v)->counter), old, new)) #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) -static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) -{ - int c, old; - c = atomic_read(v); - for (;;) { - if (unlikely(c == (u))) - break; - old = atomic_cmpxchg((v), c, c + (a)); - if (likely(old == c)) - break; - c = old; - } - return c; -} - - -static __inline__ long atomic64_add_unless(atomic64_t *v, long a, long u) -{ - long c, old; - c = atomic64_read(v); - for (;;) { - if (unlikely(c == (u))) - break; - old = atomic64_cmpxchg((v), c, c + (a)); - if (likely(old == c)) - break; - c = old; - } - return c != (u); -} - -#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) - -static __inline__ long atomic64_dec_if_positive(atomic64_t *v) -{ - long c, old, dec; - c = atomic64_read(v); - for (;;) { - dec = c - 1; - if (unlikely(dec < 0)) - break; - old = atomic64_cmpxchg((v), c, dec); - if (likely(old == c)) - break; - c = old; - } - return dec; -} - -/* - * Atomically add I to V and return TRUE if the resulting value is - * negative. - */ -static __inline__ int -atomic_add_negative (int i, atomic_t *v) -{ - return atomic_add_return(i, v) < 0; -} - -static __inline__ long -atomic64_add_negative (__s64 i, atomic64_t *v) -{ - return atomic64_add_return(i, v) < 0; -} - -#define atomic_dec_return(v) atomic_sub_return(1, (v)) -#define atomic_inc_return(v) atomic_add_return(1, (v)) -#define atomic64_dec_return(v) atomic64_sub_return(1, (v)) -#define atomic64_inc_return(v) atomic64_add_return(1, (v)) - -#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0) -#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) -#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0) -#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0) -#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0) -#define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0) - #define atomic_add(i,v) (void)atomic_add_return((i), (v)) #define atomic_sub(i,v) (void)atomic_sub_return((i), (v)) -#define atomic_inc(v) atomic_add(1, (v)) -#define atomic_dec(v) atomic_sub(1, (v)) #define atomic64_add(i,v) (void)atomic64_add_return((i), (v)) #define atomic64_sub(i,v) (void)atomic64_sub_return((i), (v)) -#define atomic64_inc(v) atomic64_add(1, (v)) -#define atomic64_dec(v) atomic64_sub(1, (v)) #endif /* _ASM_IA64_ATOMIC_H */ |