diff options
author | Brian Gerst <brgerst@gmail.com> | 2010-01-07 19:53:34 +0300 |
---|---|---|
committer | H. Peter Anvin <hpa@zytor.com> | 2010-01-07 22:47:55 +0300 |
commit | 3ce59bb8352e1c53446bef1ead1c63956dfef64a (patch) | |
tree | 5a2f965e26443c5a293802920e0b81859abbdf63 /arch/x86/include/asm/atomic_64.h | |
parent | 1a3b1d89eded68d64e5ea409ad37827310059441 (diff) | |
download | linux-3ce59bb8352e1c53446bef1ead1c63956dfef64a.tar.xz |
x86: Sync asm/atomic_32.h and asm/atomic_64.h
Prepare for merging into asm/atomic.h.
Signed-off-by: Brian Gerst <brgerst@gmail.com>
LKML-Reference: <1262883215-4034-3-git-send-email-brgerst@gmail.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch/x86/include/asm/atomic_64.h')
-rw-r--r-- | arch/x86/include/asm/atomic_64.h | 81 |
1 files changed, 53 insertions, 28 deletions
diff --git a/arch/x86/include/asm/atomic_64.h b/arch/x86/include/asm/atomic_64.h index 042c33100c69..77407887cfcd 100644 --- a/arch/x86/include/asm/atomic_64.h +++ b/arch/x86/include/asm/atomic_64.h @@ -1,7 +1,9 @@ #ifndef _ASM_X86_ATOMIC_64_H #define _ASM_X86_ATOMIC_64_H +#include <linux/compiler.h> #include <linux/types.h> +#include <asm/processor.h> #include <asm/alternative.h> #include <asm/cmpxchg.h> @@ -45,12 +47,12 @@ static inline void atomic_set(atomic_t *v, int i) static inline void atomic_add(int i, atomic_t *v) { asm volatile(LOCK_PREFIX "addl %1,%0" - : "=m" (v->counter) - : "ir" (i), "m" (v->counter)); + : "+m" (v->counter) + : "ir" (i)); } /** - * atomic_sub - subtract the atomic variable + * atomic_sub - subtract integer from atomic variable * @i: integer value to subtract * @v: pointer of type atomic_t * @@ -59,8 +61,8 @@ static inline void atomic_add(int i, atomic_t *v) static inline void atomic_sub(int i, atomic_t *v) { asm volatile(LOCK_PREFIX "subl %1,%0" - : "=m" (v->counter) - : "ir" (i), "m" (v->counter)); + : "+m" (v->counter) + : "ir" (i)); } /** @@ -77,8 +79,8 @@ static inline int atomic_sub_and_test(int i, atomic_t *v) unsigned char c; asm volatile(LOCK_PREFIX "subl %2,%0; sete %1" - : "=m" (v->counter), "=qm" (c) - : "ir" (i), "m" (v->counter) : "memory"); + : "+m" (v->counter), "=qm" (c) + : "ir" (i) : "memory"); return c; } @@ -91,8 +93,7 @@ static inline int atomic_sub_and_test(int i, atomic_t *v) static inline void atomic_inc(atomic_t *v) { asm volatile(LOCK_PREFIX "incl %0" - : "=m" (v->counter) - : "m" (v->counter)); + : "+m" (v->counter)); } /** @@ -104,8 +105,7 @@ static inline void atomic_inc(atomic_t *v) static inline void atomic_dec(atomic_t *v) { asm volatile(LOCK_PREFIX "decl %0" - : "=m" (v->counter) - : "m" (v->counter)); + : "+m" (v->counter)); } /** @@ -121,8 +121,8 @@ static inline int atomic_dec_and_test(atomic_t *v) unsigned char c; asm volatile(LOCK_PREFIX "decl %0; sete %1" - : "=m" (v->counter), "=qm" (c) - : "m" (v->counter) : "memory"); + : "+m" (v->counter), "=qm" (c) + : : "memory"); return c != 0; } @@ -139,8 +139,8 @@ static inline int atomic_inc_and_test(atomic_t *v) unsigned char c; asm volatile(LOCK_PREFIX "incl %0; sete %1" - : "=m" (v->counter), "=qm" (c) - : "m" (v->counter) : "memory"); + : "+m" (v->counter), "=qm" (c) + : : "memory"); return c != 0; } @@ -158,13 +158,13 @@ static inline int atomic_add_negative(int i, atomic_t *v) unsigned char c; asm volatile(LOCK_PREFIX "addl %2,%0; sets %1" - : "=m" (v->counter), "=qm" (c) - : "ir" (i), "m" (v->counter) : "memory"); + : "+m" (v->counter), "=qm" (c) + : "ir" (i) : "memory"); return c; } /** - * atomic_add_return - add and return + * atomic_add_return - add integer and return * @i: integer value to add * @v: pointer of type atomic_t * @@ -172,13 +172,36 @@ static inline int atomic_add_negative(int i, atomic_t *v) */ static inline int atomic_add_return(int i, atomic_t *v) { - int __i = i; + int __i; +#ifdef CONFIG_M386 + unsigned long flags; + if (unlikely(boot_cpu_data.x86 <= 3)) + goto no_xadd; +#endif + /* Modern 486+ processor */ + __i = i; asm volatile(LOCK_PREFIX "xaddl %0, %1" : "+r" (i), "+m" (v->counter) : : "memory"); return i + __i; + +#ifdef CONFIG_M386 +no_xadd: /* Legacy 386 processor */ + local_irq_save(flags); + __i = atomic_read(v); + atomic_set(v, i + __i); + local_irq_restore(flags); + return i + __i; +#endif } +/** + * atomic_sub_return - subtract integer and return + * @v: pointer of type atomic_t + * @i: integer value to subtract + * + * Atomically subtracts @i from @v and returns @v - @i + */ static inline int atomic_sub_return(int i, atomic_t *v) { return atomic_add_return(-i, v); @@ -187,23 +210,23 @@ static inline int atomic_sub_return(int i, atomic_t *v) #define atomic_inc_return(v) (atomic_add_return(1, v)) #define atomic_dec_return(v) (atomic_sub_return(1, v)) -static inline long atomic_cmpxchg(atomic_t *v, int old, int new) +static inline int atomic_cmpxchg(atomic_t *v, int old, int new) { return cmpxchg(&v->counter, old, new); } -static inline long atomic_xchg(atomic_t *v, int new) +static inline int atomic_xchg(atomic_t *v, int new) { return xchg(&v->counter, new); } /** - * atomic_add_unless - add unless the number is a given value + * atomic_add_unless - add unless the number is already a given value * @v: pointer of type atomic_t * @a: the amount to add to v... * @u: ...unless v is equal to u. * - * Atomically adds @a to @v, so long as it was not @u. + * Atomically adds @a to @v, so long as @v was not already @u. * Returns non-zero if @v was not @u, and zero otherwise. */ static inline int atomic_add_unless(atomic_t *v, int a, int u) @@ -236,6 +259,7 @@ static inline short int atomic_inc_short(short int *v) return *v; } +#ifdef CONFIG_X86_64 /** * atomic_or_long - OR of two long integers * @v1: pointer to type unsigned long @@ -248,15 +272,16 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2) { asm(LOCK_PREFIX "orq %1, %0" : "+m" (*v1) : "r" (v2)); } +#endif /* These are x86-specific, used by some header files */ -#define atomic_clear_mask(mask, addr) \ - asm volatile(LOCK_PREFIX "andl %0,%1" \ +#define atomic_clear_mask(mask, addr) \ + asm volatile(LOCK_PREFIX "andl %0,%1" \ : : "r" (~(mask)), "m" (*(addr)) : "memory") -#define atomic_set_mask(mask, addr) \ - asm volatile(LOCK_PREFIX "orl %0,%1" \ - : : "r" ((unsigned)(mask)), "m" (*(addr)) \ +#define atomic_set_mask(mask, addr) \ + asm volatile(LOCK_PREFIX "orl %0,%1" \ + : : "r" ((unsigned)(mask)), "m" (*(addr)) \ : "memory") /* Atomic operations are already serializing on x86 */ |