diff options
author | Al Viro <viro@zeniv.linux.org.uk> | 2024-04-02 05:29:44 +0300 |
---|---|---|
committer | Paul E. McKenney <paulmck@kernel.org> | 2024-04-10 08:06:00 +0300 |
commit | 7e00072915b3eb4739c57e716031f40de05e7a64 (patch) | |
tree | e4e4bba575fe6caa6b4634c53ce43a6cf4ac3f0d /arch/parisc | |
parent | 29b8e53c1274f0ffda915ac6c0e5c59d14ee208e (diff) | |
download | linux-7e00072915b3eb4739c57e716031f40de05e7a64.tar.xz |
parisc: unify implementations of __cmpxchg_u{8,32,64}
identical except for type name involved
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Diffstat (limited to 'arch/parisc')
-rw-r--r-- | arch/parisc/lib/bitops.c | 51 |
1 files changed, 16 insertions, 35 deletions
diff --git a/arch/parisc/lib/bitops.c b/arch/parisc/lib/bitops.c index ae2231d92198..cae30a3eb6d9 100644 --- a/arch/parisc/lib/bitops.c +++ b/arch/parisc/lib/bitops.c @@ -56,38 +56,19 @@ unsigned long notrace __xchg8(char x, volatile char *ptr) } -u64 notrace __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new) -{ - unsigned long flags; - u64 prev; - - _atomic_spin_lock_irqsave(ptr, flags); - if ((prev = *ptr) == old) - *ptr = new; - _atomic_spin_unlock_irqrestore(ptr, flags); - return prev; -} - -u32 notrace __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new) -{ - unsigned long flags; - u32 prev; - - _atomic_spin_lock_irqsave(ptr, flags); - if ((prev = *ptr) == old) - *ptr = new; - _atomic_spin_unlock_irqrestore(ptr, flags); - return prev; -} - -u8 notrace __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new) -{ - unsigned long flags; - u8 prev; - - _atomic_spin_lock_irqsave(ptr, flags); - if ((prev = *ptr) == old) - *ptr = new; - _atomic_spin_unlock_irqrestore(ptr, flags); - return prev; -} +#define CMPXCHG(T) \ + T notrace __cmpxchg_##T(volatile T *ptr, T old, T new) \ + { \ + unsigned long flags; \ + T prev; \ + \ + _atomic_spin_lock_irqsave(ptr, flags); \ + if ((prev = *ptr) == old) \ + *ptr = new; \ + _atomic_spin_unlock_irqrestore(ptr, flags); \ + return prev; \ + } + +CMPXCHG(u64) +CMPXCHG(u32) +CMPXCHG(u8) |