diff options
author | Chris Metcalf <cmetcalf@tilera.com> | 2011-05-16 21:59:39 +0400 |
---|---|---|
committer | Chris Metcalf <cmetcalf@tilera.com> | 2011-05-20 06:55:49 +0400 |
commit | 8aaf1dda42576b0f8dffb004065baa806f4df9b6 (patch) | |
tree | e9376caaf70b54e4b236840a1cc77a443c07b341 /arch/tile/include/asm/atomic_64.h | |
parent | 4800a5bb13c09a572f7c74662a77c9eca229eba1 (diff) | |
download | linux-8aaf1dda42576b0f8dffb004065baa806f4df9b6.tar.xz |
arch/tile: use better definitions of xchg() and cmpxchg()
These definitions use a ({}) construct to avoid some cases where
we were getting warnings about unused return values. We also
promote the definition to the common <asm/atomic.h>, since it applies
to both the 32- and 64-bit atomics.
In addition, define __HAVE_ARCH_CMPXCHG for TILE-Gx since it has
efficient direct atomic instructions.
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
Diffstat (limited to 'arch/tile/include/asm/atomic_64.h')
-rw-r--r-- | arch/tile/include/asm/atomic_64.h | 17 |
1 files changed, 2 insertions, 15 deletions
diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h index 321705294800..1c1e60d8ccb6 100644 --- a/arch/tile/include/asm/atomic_64.h +++ b/arch/tile/include/asm/atomic_64.h @@ -148,21 +148,8 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u) #define smp_mb__before_atomic_inc() smp_mb() #define smp_mb__after_atomic_inc() smp_mb() -#define xchg(ptr, x) \ - ((typeof(*(ptr))) \ - ((sizeof(*(ptr)) == sizeof(atomic_t)) ? \ - atomic_xchg((atomic_t *)(ptr), (long)(x)) : \ - (sizeof(*(ptr)) == sizeof(atomic_long_t)) ? \ - atomic_long_xchg((atomic_long_t *)(ptr), (long)(x)) : \ - __xchg_called_with_bad_pointer())) - -#define cmpxchg(ptr, o, n) \ - ((typeof(*(ptr))) \ - ((sizeof(*(ptr)) == sizeof(atomic_t)) ? \ - atomic_cmpxchg((atomic_t *)(ptr), (long)(o), (long)(n)) : \ - (sizeof(*(ptr)) == sizeof(atomic_long_t)) ? \ - atomic_long_cmpxchg((atomic_long_t *)(ptr), (long)(o), (long)(n)) : \ - __cmpxchg_called_with_bad_pointer())) +/* Define this to indicate that cmpxchg is an efficient operation. */ +#define __HAVE_ARCH_CMPXCHG #endif /* !__ASSEMBLY__ */ |