diff options
author | Michael S. Tsirkin <mst@redhat.com> | 2016-01-07 18:54:54 +0300 |
---|---|---|
committer | Michael S. Tsirkin <mst@redhat.com> | 2016-01-12 21:47:01 +0300 |
commit | 3226aad81aa670015a59e51458a0deb2d3bcb600 (patch) | |
tree | 0cc2ab0abf363ad3f851fdcb14c0f7c1386e89de /arch/sh/include/asm/cmpxchg-llsc.h | |
parent | a65961272e1ebdb60804bbe2bb440481fcbd1c76 (diff) | |
download | linux-3226aad81aa670015a59e51458a0deb2d3bcb600.tar.xz |
sh: support 1 and 2 byte xchg
This completes the xchg implementation for sh architecture. Note: The
llsc variant is tricky since this only supports 4 byte atomics, the
existing implementation of 1 byte xchg is wrong: we need to do a 4 byte
cmpxchg and retry if any bytes changed meanwhile.
Write this in C for clarity.
Suggested-by: Rich Felker <dalias@libc.org>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Diffstat (limited to 'arch/sh/include/asm/cmpxchg-llsc.h')
-rw-r--r-- | arch/sh/include/asm/cmpxchg-llsc.h | 58 |
1 files changed, 36 insertions, 22 deletions
diff --git a/arch/sh/include/asm/cmpxchg-llsc.h b/arch/sh/include/asm/cmpxchg-llsc.h index 47136661a203..e754794e282f 100644 --- a/arch/sh/include/asm/cmpxchg-llsc.h +++ b/arch/sh/include/asm/cmpxchg-llsc.h @@ -1,6 +1,9 @@ #ifndef __ASM_SH_CMPXCHG_LLSC_H #define __ASM_SH_CMPXCHG_LLSC_H +#include <linux/bitops.h> +#include <asm/byteorder.h> + static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val) { unsigned long retval; @@ -22,29 +25,8 @@ static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val) return retval; } -static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val) -{ - unsigned long retval; - unsigned long tmp; - - __asm__ __volatile__ ( - "1: \n\t" - "movli.l @%2, %0 ! xchg_u8 \n\t" - "mov %0, %1 \n\t" - "mov %3, %0 \n\t" - "movco.l %0, @%2 \n\t" - "bf 1b \n\t" - "synco \n\t" - : "=&z"(tmp), "=&r" (retval) - : "r" (m), "r" (val & 0xff) - : "t", "memory" - ); - - return retval; -} - static inline unsigned long -__cmpxchg_u32(volatile int *m, unsigned long old, unsigned long new) +__cmpxchg_u32(volatile u32 *m, unsigned long old, unsigned long new) { unsigned long retval; unsigned long tmp; @@ -68,4 +50,36 @@ __cmpxchg_u32(volatile int *m, unsigned long old, unsigned long new) return retval; } +static inline u32 __xchg_cmpxchg(volatile void *ptr, u32 x, int size) +{ + int off = (unsigned long)ptr % sizeof(u32); + volatile u32 *p = ptr - off; +#ifdef __BIG_ENDIAN + int bitoff = (sizeof(u32) - 1 - off) * BITS_PER_BYTE; +#else + int bitoff = off * BITS_PER_BYTE; +#endif + u32 bitmask = ((0x1 << size * BITS_PER_BYTE) - 1) << bitoff; + u32 oldv, newv; + u32 ret; + + do { + oldv = READ_ONCE(*p); + ret = (oldv & bitmask) >> bitoff; + newv = (oldv & ~bitmask) | (x << bitoff); + } while (__cmpxchg_u32(p, oldv, newv) != oldv); + + return ret; +} + +static inline unsigned long xchg_u16(volatile u16 *m, unsigned long val) +{ + return __xchg_cmpxchg(m, val, sizeof *m); +} + +static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val) +{ + return __xchg_cmpxchg(m, val, sizeof *m); +} + #endif /* __ASM_SH_CMPXCHG_LLSC_H */ |