summaryrefslogtreecommitdiff
path: root/arch/tile/include/asm/atomic_32.h
diff options
context:
space:
mode:
authorChris Metcalf <cmetcalf@tilera.com>2013-09-06 16:56:45 +0400
committerChris Metcalf <cmetcalf@tilera.com>2013-09-06 21:06:25 +0400
commit6dc9658fa1af9f58d358692b68135f464c167e10 (patch)
treea112036cab626e664489d87e40012d59331e1404 /arch/tile/include/asm/atomic_32.h
parentb40f451d56de69477a2244a0b4082f644699673f (diff)
downloadlinux-6dc9658fa1af9f58d358692b68135f464c167e10.tar.xz
tile: rework <asm/cmpxchg.h>
The macrology in cmpxchg.h was designed to allow arbitrary pointer and integer values to be passed through the routines. To support cmpxchg() on 64-bit values on the 32-bit tilepro architecture, we used the idiom "(typeof(val))(typeof(val-val))". This way, in the "size 8" branch of the switch, when the underlying cmpxchg routine returns a 64-bit quantity, we cast it first to a typeof(val-val) quantity (i.e. size_t if "val" is a pointer) with no warnings about casting between pointers and integers of different sizes, then cast onwards to typeof(val), again with no warnings. If val is not a pointer type, the additional cast is a no-op. We can't replace the typeof(val-val) cast with (for example) unsigned long, since then if "val" is really a 64-bit type, we cast away the high bits. HOWEVER, this fails with current gcc (through 4.7 at least) if "val" is a pointer to an incomplete type. Unfortunately gcc isn't smart enough to realize that "val - val" will always be a size_t type even if it's an incomplete type pointer. Accordingly, I've reworked the way we handle the casting. We have given up the ability to use cmpxchg() on 64-bit values on tilepro, which is OK in the kernel since we should use cmpxchg64() explicitly on such values anyway. As a result, I can just use simple "unsigned long" casts internally. As I reworked it, I realized it would be cleaner to move the architecture-specific conditionals for cmpxchg and xchg out of the atomic.h headers and into cmpxchg, and then use the cmpxchg() and xchg() primitives directly in atomic.h and elsewhere. This allowed the cmpxchg.h header to stand on its own without relying on the implicit include of it that is performed by <asm/atomic.h>. It also allowed collapsing the atomic_xchg/atomic_cmpxchg routines from atomic_{32,64}.h into atomic.h. I improved the tests that guard the allowed size of the arguments to the routines to use a __compiletime_error() test. (By avoiding the use of BUILD_BUG, I could include cmpxchg.h into bitops.h as well and use the macros there, which is otherwise impossible due to include order dependency issues.) The tilepro _atomic_xxx internal methods were previously set up to take atomic_t and atomic64_t arguments, which isn't as convenient with the new model, so I modified them to take int or u64 arguments, which is consistent with how they used the arguments internally anyway, so provided some nice simplification there too. Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
Diffstat (limited to 'arch/tile/include/asm/atomic_32.h')
-rw-r--r--arch/tile/include/asm/atomic_32.h85
1 files changed, 9 insertions, 76 deletions
diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h
index 96156f5ba640..0d0395b1b152 100644
--- a/arch/tile/include/asm/atomic_32.h
+++ b/arch/tile/include/asm/atomic_32.h
@@ -22,40 +22,6 @@
#ifndef __ASSEMBLY__
-/* Tile-specific routines to support <linux/atomic.h>. */
-int _atomic_xchg(atomic_t *v, int n);
-int _atomic_xchg_add(atomic_t *v, int i);
-int _atomic_xchg_add_unless(atomic_t *v, int a, int u);
-int _atomic_cmpxchg(atomic_t *v, int o, int n);
-
-/**
- * atomic_xchg - atomically exchange contents of memory with a new value
- * @v: pointer of type atomic_t
- * @i: integer value to store in memory
- *
- * Atomically sets @v to @i and returns old @v
- */
-static inline int atomic_xchg(atomic_t *v, int n)
-{
- smp_mb(); /* barrier for proper semantics */
- return _atomic_xchg(v, n);
-}
-
-/**
- * atomic_cmpxchg - atomically exchange contents of memory if it matches
- * @v: pointer of type atomic_t
- * @o: old value that memory should have
- * @n: new value to write to memory if it matches
- *
- * Atomically checks if @v holds @o and replaces it with @n if so.
- * Returns the old value at @v.
- */
-static inline int atomic_cmpxchg(atomic_t *v, int o, int n)
-{
- smp_mb(); /* barrier for proper semantics */
- return _atomic_cmpxchg(v, o, n);
-}
-
/**
* atomic_add - add integer to atomic variable
* @i: integer value to add
@@ -65,7 +31,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int o, int n)
*/
static inline void atomic_add(int i, atomic_t *v)
{
- _atomic_xchg_add(v, i);
+ _atomic_xchg_add(&v->counter, i);
}
/**
@@ -78,7 +44,7 @@ static inline void atomic_add(int i, atomic_t *v)
static inline int atomic_add_return(int i, atomic_t *v)
{
smp_mb(); /* barrier for proper semantics */
- return _atomic_xchg_add(v, i) + i;
+ return _atomic_xchg_add(&v->counter, i) + i;
}
/**
@@ -93,7 +59,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
smp_mb(); /* barrier for proper semantics */
- return _atomic_xchg_add_unless(v, a, u);
+ return _atomic_xchg_add_unless(&v->counter, a, u);
}
/**
@@ -108,7 +74,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
*/
static inline void atomic_set(atomic_t *v, int n)
{
- _atomic_xchg(v, n);
+ _atomic_xchg(&v->counter, n);
}
/* A 64bit atomic type */
@@ -119,11 +85,6 @@ typedef struct {
#define ATOMIC64_INIT(val) { (val) }
-u64 _atomic64_xchg(atomic64_t *v, u64 n);
-u64 _atomic64_xchg_add(atomic64_t *v, u64 i);
-u64 _atomic64_xchg_add_unless(atomic64_t *v, u64 a, u64 u);
-u64 _atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n);
-
/**
* atomic64_read - read atomic variable
* @v: pointer of type atomic64_t
@@ -137,35 +98,7 @@ static inline u64 atomic64_read(const atomic64_t *v)
* Casting away const is safe since the atomic support routines
* do not write to memory if the value has not been modified.
*/
- return _atomic64_xchg_add((atomic64_t *)v, 0);
-}
-
-/**
- * atomic64_xchg - atomically exchange contents of memory with a new value
- * @v: pointer of type atomic64_t
- * @i: integer value to store in memory
- *
- * Atomically sets @v to @i and returns old @v
- */
-static inline u64 atomic64_xchg(atomic64_t *v, u64 n)
-{
- smp_mb(); /* barrier for proper semantics */
- return _atomic64_xchg(v, n);
-}
-
-/**
- * atomic64_cmpxchg - atomically exchange contents of memory if it matches
- * @v: pointer of type atomic64_t
- * @o: old value that memory should have
- * @n: new value to write to memory if it matches
- *
- * Atomically checks if @v holds @o and replaces it with @n if so.
- * Returns the old value at @v.
- */
-static inline u64 atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n)
-{
- smp_mb(); /* barrier for proper semantics */
- return _atomic64_cmpxchg(v, o, n);
+ return _atomic64_xchg_add((u64 *)&v->counter, 0);
}
/**
@@ -177,7 +110,7 @@ static inline u64 atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n)
*/
static inline void atomic64_add(u64 i, atomic64_t *v)
{
- _atomic64_xchg_add(v, i);
+ _atomic64_xchg_add(&v->counter, i);
}
/**
@@ -190,7 +123,7 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
{
smp_mb(); /* barrier for proper semantics */
- return _atomic64_xchg_add(v, i) + i;
+ return _atomic64_xchg_add(&v->counter, i) + i;
}
/**
@@ -205,7 +138,7 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
{
smp_mb(); /* barrier for proper semantics */
- return _atomic64_xchg_add_unless(v, a, u) != u;
+ return _atomic64_xchg_add_unless(&v->counter, a, u) != u;
}
/**
@@ -220,7 +153,7 @@ static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
*/
static inline void atomic64_set(atomic64_t *v, u64 n)
{
- _atomic64_xchg(v, n);
+ _atomic64_xchg(&v->counter, n);
}
#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)