diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-08-13 22:23:39 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-08-13 22:23:39 +0300 |
commit | de5d1b39ea0b38a9f4dfb08966042b7b91e2df30 (patch) | |
tree | 3591bdac4fe6756b4e3dc68b2ed1c792c4104218 /arch/h8300/include/asm/atomic.h | |
parent | 1c594774283a7cfe6dc0f8ffdfb2dbfc497502c4 (diff) | |
parent | fd2efaa4eb5317c3a86357a83a7d456a1b86a0ac (diff) | |
download | linux-de5d1b39ea0b38a9f4dfb08966042b7b91e2df30.tar.xz |
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking/atomics update from Thomas Gleixner:
"The locking, atomics and memory model brains delivered:
- A larger update to the atomics code which reworks the ordering
barriers, consolidates the atomic primitives, provides the new
atomic64_fetch_add_unless() primitive and cleans up the include
hell.
- Simplify cmpxchg() instrumentation and add instrumentation for
xchg() and cmpxchg_double().
- Updates to the memory model and documentation"
* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (48 commits)
locking/atomics: Rework ordering barriers
locking/atomics: Instrument cmpxchg_double*()
locking/atomics: Instrument xchg()
locking/atomics: Simplify cmpxchg() instrumentation
locking/atomics/x86: Reduce arch_cmpxchg64*() instrumentation
tools/memory-model: Rename litmus tests to comply to norm7
tools/memory-model/Documentation: Fix typo, smb->smp
sched/Documentation: Update wake_up() & co. memory-barrier guarantees
locking/spinlock, sched/core: Clarify requirements for smp_mb__after_spinlock()
sched/core: Use smp_mb() in wake_woken_function()
tools/memory-model: Add informal LKMM documentation to MAINTAINERS
locking/atomics/Documentation: Describe atomic_set() as a write operation
tools/memory-model: Make scripts executable
tools/memory-model: Remove ACCESS_ONCE() from model
tools/memory-model: Remove ACCESS_ONCE() from recipes
locking/memory-barriers.txt/kokr: Update Korean translation to fix broken DMA vs. MMIO ordering example
MAINTAINERS: Add Daniel Lustig as an LKMM reviewer
tools/memory-model: Fix ISA2+pooncelock+pooncelock+pombonce name
tools/memory-model: Add litmus test for full multicopy atomicity
locking/refcount: Always allow checked forms
...
Diffstat (limited to 'arch/h8300/include/asm/atomic.h')
-rw-r--r-- | arch/h8300/include/asm/atomic.h | 19 |
1 files changed, 4 insertions, 15 deletions
diff --git a/arch/h8300/include/asm/atomic.h b/arch/h8300/include/asm/atomic.h index 941e7554e886..c6b6a06231b2 100644 --- a/arch/h8300/include/asm/atomic.h +++ b/arch/h8300/include/asm/atomic.h @@ -2,8 +2,10 @@ #ifndef __ARCH_H8300_ATOMIC__ #define __ARCH_H8300_ATOMIC__ +#include <linux/compiler.h> #include <linux/types.h> #include <asm/cmpxchg.h> +#include <asm/irqflags.h> /* * Atomic operations that C can't guarantee us. Useful for @@ -15,8 +17,6 @@ #define atomic_read(v) READ_ONCE((v)->counter) #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) -#include <linux/kernel.h> - #define ATOMIC_OP_RETURN(op, c_op) \ static inline int atomic_##op##_return(int i, atomic_t *v) \ { \ @@ -69,18 +69,6 @@ ATOMIC_OPS(sub, -=) #undef ATOMIC_OP_RETURN #undef ATOMIC_OP -#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) -#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) - -#define atomic_inc_return(v) atomic_add_return(1, v) -#define atomic_dec_return(v) atomic_sub_return(1, v) - -#define atomic_inc(v) (void)atomic_inc_return(v) -#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) - -#define atomic_dec(v) (void)atomic_dec_return(v) -#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0) - static inline int atomic_cmpxchg(atomic_t *v, int old, int new) { int ret; @@ -94,7 +82,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new) return ret; } -static inline int __atomic_add_unless(atomic_t *v, int a, int u) +static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) { int ret; h8300flags flags; @@ -106,5 +94,6 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) arch_local_irq_restore(flags); return ret; } +#define atomic_fetch_add_unless atomic_fetch_add_unless #endif /* __ARCH_H8300_ATOMIC __ */ |