From 60063497a95e716c9a689af3be2687d261f115b4 Mon Sep 17 00:00:00 2001 From: Arun Sharma Date: Tue, 26 Jul 2011 16:09:06 -0700 Subject: atomic: use This allows us to move duplicated code in (atomic_inc_not_zero() for now) to Signed-off-by: Arun Sharma Reviewed-by: Eric Dumazet Cc: Ingo Molnar Cc: David Miller Cc: Eric Dumazet Acked-by: Mike Frysinger Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/s390/include/asm/atomic.h | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/s390/include/asm/atomic.h') diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h index d9db13810d15..29d756329228 100644 --- a/arch/s390/include/asm/atomic.h +++ b/arch/s390/include/asm/atomic.h @@ -108,7 +108,6 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) return c != u; } -#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) #undef __CS_LOOP -- cgit v1.2.3 From f24219b4e90cf70ec4a211b17fbabc725a0ddf3c Mon Sep 17 00:00:00 2001 From: Arun Sharma Date: Tue, 26 Jul 2011 16:09:07 -0700 Subject: atomic: move atomic_add_unless to generic code This is in preparation for more generic atomic primitives based on __atomic_add_unless. Signed-off-by: Arun Sharma Signed-off-by: Hans-Christian Egtvedt Reviewed-by: Eric Dumazet Cc: Ingo Molnar Cc: David Miller Acked-by: Mike Frysinger Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/alpha/include/asm/atomic.h | 10 +++---- arch/arm/include/asm/atomic.h | 4 +-- arch/avr32/include/asm/atomic.h | 57 +++++++++++++++++--------------------- arch/blackfin/include/asm/atomic.h | 4 +-- arch/cris/include/asm/atomic.h | 4 +-- arch/frv/include/asm/atomic.h | 4 +-- arch/h8300/include/asm/atomic.h | 4 +-- arch/ia64/include/asm/atomic.h | 4 +-- arch/m32r/include/asm/atomic.h | 8 +++--- arch/m68k/include/asm/atomic.h | 4 +-- arch/mips/include/asm/atomic.h | 10 +++---- arch/mn10300/include/asm/atomic.h | 4 +-- arch/parisc/include/asm/atomic.h | 10 +++---- arch/powerpc/include/asm/atomic.h | 14 +++++----- arch/s390/include/asm/atomic.h | 4 +-- arch/sh/include/asm/atomic.h | 8 +++--- arch/sparc/include/asm/atomic_32.h | 2 +- arch/sparc/include/asm/atomic_64.h | 4 +-- arch/tile/include/asm/atomic_32.h | 10 +++---- arch/tile/include/asm/atomic_64.h | 4 +-- arch/x86/include/asm/atomic.h | 8 +++--- arch/x86/include/asm/atomic64_32.h | 2 +- arch/x86/include/asm/atomic64_64.h | 2 +- arch/xtensa/include/asm/atomic.h | 8 +++--- include/asm-generic/atomic.h | 4 +-- include/linux/atomic.h | 14 ++++++++++ 26 files changed, 109 insertions(+), 102 deletions(-) (limited to 'arch/s390/include/asm/atomic.h') diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h index 88b7491490bc..3d6704910268 100644 --- a/arch/alpha/include/asm/atomic.h +++ b/arch/alpha/include/asm/atomic.h @@ -176,15 +176,15 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v) #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) /** - * atomic_add_unless - add unless the number is a given value + * __atomic_add_unless - add unless the number is a given value * @v: pointer of type atomic_t * @a: the amount to add to v... * @u: ...unless v is equal to u. * * Atomically adds @a to @v, so long as it was not @u. - * Returns non-zero if @v was not @u, and zero otherwise. + * Returns the old value of @v. */ -static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) +static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) { int c, old; c = atomic_read(v); @@ -196,7 +196,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) break; c = old; } - return c != (u); + return c; } @@ -207,7 +207,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) * @u: ...unless v is equal to u. * * Atomically adds @a to @v, so long as it was not @u. - * Returns non-zero if @v was not @u, and zero otherwise. + * Returns the old value of @v. */ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) { diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index 4d501f1bdc9d..3757e91c5281 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h @@ -208,14 +208,14 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) -static inline int atomic_add_unless(atomic_t *v, int a, int u) +static inline int __atomic_add_unless(atomic_t *v, int a, int u) { int c, old; c = atomic_read(v); while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c) c = old; - return c != u; + return c; } #define atomic_inc(v) atomic_add(1, v) diff --git a/arch/avr32/include/asm/atomic.h b/arch/avr32/include/asm/atomic.h index f229c3849f03..dc6c3a41a2d7 100644 --- a/arch/avr32/include/asm/atomic.h +++ b/arch/avr32/include/asm/atomic.h @@ -78,70 +78,63 @@ static inline int atomic_add_return(int i, atomic_t *v) /* * atomic_sub_unless - sub unless the number is a given value * @v: pointer of type atomic_t - * @a: the amount to add to v... + * @a: the amount to subtract from v... * @u: ...unless v is equal to u. * - * If the atomic value v is not equal to u, this function subtracts a - * from v, and returns non zero. If v is equal to u then it returns - * zero. This is done as an atomic operation. + * Atomically subtract @a from @v, so long as it was not @u. + * Returns the old value of @v. */ -static inline int atomic_sub_unless(atomic_t *v, int a, int u) +static inline void atomic_sub_unless(atomic_t *v, int a, int u) { - int tmp, result = 0; + int tmp; asm volatile( "/* atomic_sub_unless */\n" "1: ssrf 5\n" - " ld.w %0, %3\n" - " cp.w %0, %5\n" + " ld.w %0, %2\n" + " cp.w %0, %4\n" " breq 1f\n" - " sub %0, %4\n" - " stcond %2, %0\n" + " sub %0, %3\n" + " stcond %1, %0\n" " brne 1b\n" - " mov %1, 1\n" "1:" - : "=&r"(tmp), "=&r"(result), "=o"(v->counter) - : "m"(v->counter), "rKs21"(a), "rKs21"(u), "1"(result) + : "=&r"(tmp), "=o"(v->counter) + : "m"(v->counter), "rKs21"(a), "rKs21"(u) : "cc", "memory"); - - return result; } /* - * atomic_add_unless - add unless the number is a given value + * __atomic_add_unless - add unless the number is a given value * @v: pointer of type atomic_t * @a: the amount to add to v... * @u: ...unless v is equal to u. * - * If the atomic value v is not equal to u, this function adds a to v, - * and returns non zero. If v is equal to u then it returns zero. This - * is done as an atomic operation. + * Atomically adds @a to @v, so long as it was not @u. + * Returns the old value of @v. */ -static inline int atomic_add_unless(atomic_t *v, int a, int u) +static inline int __atomic_add_unless(atomic_t *v, int a, int u) { - int tmp, result; + int tmp, old = atomic_read(v); if (__builtin_constant_p(a) && (a >= -1048575) && (a <= 1048576)) - result = atomic_sub_unless(v, -a, u); + atomic_sub_unless(v, -a, u); else { - result = 0; asm volatile( - "/* atomic_add_unless */\n" + "/* __atomic_add_unless */\n" "1: ssrf 5\n" - " ld.w %0, %3\n" - " cp.w %0, %5\n" + " ld.w %0, %2\n" + " cp.w %0, %4\n" " breq 1f\n" - " add %0, %4\n" - " stcond %2, %0\n" + " add %0, %3\n" + " stcond %1, %0\n" " brne 1b\n" - " mov %1, 1\n" "1:" - : "=&r"(tmp), "=&r"(result), "=o"(v->counter) - : "m"(v->counter), "r"(a), "ir"(u), "1"(result) + : "=&r"(tmp), "=o"(v->counter) + : "m"(v->counter), "r"(a), "ir"(u) : "cc", "memory"); } - return result; + return old; } /* diff --git a/arch/blackfin/include/asm/atomic.h b/arch/blackfin/include/asm/atomic.h index f2cf5b714ea4..292c86f74f85 100644 --- a/arch/blackfin/include/asm/atomic.h +++ b/arch/blackfin/include/asm/atomic.h @@ -89,13 +89,13 @@ static inline void atomic_set_mask(int mask, atomic_t *v) #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) -#define atomic_add_unless(v, a, u) \ +#define __atomic_add_unless(v, a, u) \ ({ \ int c, old; \ c = atomic_read(v); \ while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ c = old; \ - c != (u); \ + c; \ }) /* diff --git a/arch/cris/include/asm/atomic.h b/arch/cris/include/asm/atomic.h index ce9f67e4d977..7e90532c5253 100644 --- a/arch/cris/include/asm/atomic.h +++ b/arch/cris/include/asm/atomic.h @@ -138,7 +138,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new) #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) -static inline int atomic_add_unless(atomic_t *v, int a, int u) +static inline int __atomic_add_unless(atomic_t *v, int a, int u) { int ret; unsigned long flags; @@ -148,7 +148,7 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) if (ret != u) v->counter += a; cris_atomic_restore(v, flags); - return ret != u; + return ret; } /* Atomic operations are already serializing */ diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h index b07b75f411f2..a51dcdfe1fbf 100644 --- a/arch/frv/include/asm/atomic.h +++ b/arch/frv/include/asm/atomic.h @@ -241,7 +241,7 @@ extern uint32_t __xchg_32(uint32_t i, volatile void *v); #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter)) #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter)) -static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) +static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) { int c, old; c = atomic_read(v); @@ -253,7 +253,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) break; c = old; } - return c != (u); + return c; } diff --git a/arch/h8300/include/asm/atomic.h b/arch/h8300/include/asm/atomic.h index b641714774ea..e6d1663625f0 100644 --- a/arch/h8300/include/asm/atomic.h +++ b/arch/h8300/include/asm/atomic.h @@ -104,7 +104,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new) #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) -static inline int atomic_add_unless(atomic_t *v, int a, int u) +static inline int __atomic_add_unless(atomic_t *v, int a, int u) { int ret; unsigned long flags; @@ -114,7 +114,7 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) if (ret != u) v->counter += a; local_irq_restore(flags); - return ret != u; + return ret; } static __inline__ void atomic_clear_mask(unsigned long mask, unsigned long *v) diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h index fdb887005dff..22aca210bd05 100644 --- a/arch/ia64/include/asm/atomic.h +++ b/arch/ia64/include/asm/atomic.h @@ -90,7 +90,7 @@ ia64_atomic64_sub (__s64 i, atomic64_t *v) (cmpxchg(&((v)->counter), old, new)) #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) -static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) +static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) { int c, old; c = atomic_read(v); @@ -102,7 +102,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) break; c = old; } - return c != (u); + return c; } diff --git a/arch/m32r/include/asm/atomic.h b/arch/m32r/include/asm/atomic.h index d64d894dc549..c839426ac732 100644 --- a/arch/m32r/include/asm/atomic.h +++ b/arch/m32r/include/asm/atomic.h @@ -239,15 +239,15 @@ static __inline__ int atomic_dec_return(atomic_t *v) #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) /** - * atomic_add_unless - add unless the number is a given value + * __atomic_add_unless - add unless the number is a given value * @v: pointer of type atomic_t * @a: the amount to add to v... * @u: ...unless v is equal to u. * * Atomically adds @a to @v, so long as it was not @u. - * Returns non-zero if @v was not @u, and zero otherwise. + * Returns the old value of @v. */ -static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) +static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) { int c, old; c = atomic_read(v); @@ -259,7 +259,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) break; c = old; } - return c != (u); + return c; } diff --git a/arch/m68k/include/asm/atomic.h b/arch/m68k/include/asm/atomic.h index e844a2d2ba23..2269350974f1 100644 --- a/arch/m68k/include/asm/atomic.h +++ b/arch/m68k/include/asm/atomic.h @@ -183,7 +183,7 @@ static inline void atomic_set_mask(unsigned long mask, unsigned long *v) __asm__ __volatile__("orl %1,%0" : "+m" (*v) : ASM_DI (mask)); } -static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) +static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) { int c, old; c = atomic_read(v); @@ -195,7 +195,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) break; c = old; } - return c != (u); + return c; } diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h index 833a4023648a..31cb23debb7e 100644 --- a/arch/mips/include/asm/atomic.h +++ b/arch/mips/include/asm/atomic.h @@ -303,15 +303,15 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) #define atomic_xchg(v, new) (xchg(&((v)->counter), (new))) /** - * atomic_add_unless - add unless the number is a given value + * __atomic_add_unless - add unless the number is a given value * @v: pointer of type atomic_t * @a: the amount to add to v... * @u: ...unless v is equal to u. * * Atomically adds @a to @v, so long as it was not @u. - * Returns non-zero if @v was not @u, and zero otherwise. + * Returns the old value of @v. */ -static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) +static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) { int c, old; c = atomic_read(v); @@ -323,7 +323,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) break; c = old; } - return c != (u); + return c; } #define atomic_dec_return(v) atomic_sub_return(1, (v)) @@ -679,7 +679,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) * @u: ...unless v is equal to u. * * Atomically adds @a to @v, so long as it was not @u. - * Returns non-zero if @v was not @u, and zero otherwise. + * Returns the old value of @v. */ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) { diff --git a/arch/mn10300/include/asm/atomic.h b/arch/mn10300/include/asm/atomic.h index 041b9d69d86c..a2e6759af4db 100644 --- a/arch/mn10300/include/asm/atomic.h +++ b/arch/mn10300/include/asm/atomic.h @@ -260,13 +260,13 @@ static inline void atomic_dec(atomic_t *v) #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0) -#define atomic_add_unless(v, a, u) \ +#define __atomic_add_unless(v, a, u) \ ({ \ int c, old; \ c = atomic_read(v); \ while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ c = old; \ - c != (u); \ + c; \ }) diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h index 192488999b63..1914f179879d 100644 --- a/arch/parisc/include/asm/atomic.h +++ b/arch/parisc/include/asm/atomic.h @@ -197,15 +197,15 @@ static __inline__ int atomic_read(const atomic_t *v) #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) /** - * atomic_add_unless - add unless the number is a given value + * __atomic_add_unless - add unless the number is a given value * @v: pointer of type atomic_t * @a: the amount to add to v... * @u: ...unless v is equal to u. * * Atomically adds @a to @v, so long as it was not @u. - * Returns non-zero if @v was not @u, and zero otherwise. + * Returns the old value of @v. */ -static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) +static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) { int c, old; c = atomic_read(v); @@ -217,7 +217,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) break; c = old; } - return c != (u); + return c; } @@ -316,7 +316,7 @@ atomic64_read(const atomic64_t *v) * @u: ...unless v is equal to u. * * Atomically adds @a to @v, so long as it was not @u. - * Returns non-zero if @v was not @u, and zero otherwise. + * Returns the old value of @v. */ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) { diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h index b2bcbee622ea..952e161fbb89 100644 --- a/arch/powerpc/include/asm/atomic.h +++ b/arch/powerpc/include/asm/atomic.h @@ -181,21 +181,21 @@ static __inline__ int atomic_dec_return(atomic_t *v) #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) /** - * atomic_add_unless - add unless the number is a given value + * __atomic_add_unless - add unless the number is a given value * @v: pointer of type atomic_t * @a: the amount to add to v... * @u: ...unless v is equal to u. * * Atomically adds @a to @v, so long as it was not @u. - * Returns non-zero if @v was not @u, and zero otherwise. + * Returns the old value of @v. */ -static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) +static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) { int t; __asm__ __volatile__ ( PPC_RELEASE_BARRIER -"1: lwarx %0,0,%1 # atomic_add_unless\n\ +"1: lwarx %0,0,%1 # __atomic_add_unless\n\ cmpw 0,%0,%3 \n\ beq- 2f \n\ add %0,%2,%0 \n" @@ -209,7 +209,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) : "r" (&v->counter), "r" (a), "r" (u) : "cc", "memory"); - return t != u; + return t; } @@ -443,7 +443,7 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v) * @u: ...unless v is equal to u. * * Atomically adds @a to @v, so long as it was not @u. - * Returns non-zero if @v was not @u, and zero otherwise. + * Returns the old value of @v. */ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) { @@ -451,7 +451,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) __asm__ __volatile__ ( PPC_RELEASE_BARRIER -"1: ldarx %0,0,%1 # atomic_add_unless\n\ +"1: ldarx %0,0,%1 # __atomic_add_unless\n\ cmpd 0,%0,%3 \n\ beq- 2f \n\ add %0,%2,%0 \n" diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h index 29d756329228..7b0b0a7193e5 100644 --- a/arch/s390/include/asm/atomic.h +++ b/arch/s390/include/asm/atomic.h @@ -93,7 +93,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new) return old; } -static inline int atomic_add_unless(atomic_t *v, int a, int u) +static inline int __atomic_add_unless(atomic_t *v, int a, int u) { int c, old; c = atomic_read(v); @@ -105,7 +105,7 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) break; c = old; } - return c != u; + return c; } diff --git a/arch/sh/include/asm/atomic.h b/arch/sh/include/asm/atomic.h index 8ddb2635cf92..2177596d4b38 100644 --- a/arch/sh/include/asm/atomic.h +++ b/arch/sh/include/asm/atomic.h @@ -38,15 +38,15 @@ #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) /** - * atomic_add_unless - add unless the number is a given value + * __atomic_add_unless - add unless the number is a given value * @v: pointer of type atomic_t * @a: the amount to add to v... * @u: ...unless v is equal to u. * * Atomically adds @a to @v, so long as it was not @u. - * Returns non-zero if @v was not @u, and zero otherwise. + * Returns the old value of @v. */ -static inline int atomic_add_unless(atomic_t *v, int a, int u) +static inline int __atomic_add_unless(atomic_t *v, int a, int u) { int c, old; c = atomic_read(v); @@ -59,7 +59,7 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) c = old; } - return c != (u); + return c; } #define smp_mb__before_atomic_dec() smp_mb() diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h index 7646f2cef5d0..bdce95e77f85 100644 --- a/arch/sparc/include/asm/atomic_32.h +++ b/arch/sparc/include/asm/atomic_32.h @@ -22,7 +22,7 @@ extern int __atomic_add_return(int, atomic_t *); extern int atomic_cmpxchg(atomic_t *, int, int); #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) -extern int atomic_add_unless(atomic_t *, int, int); +extern int __atomic_add_unless(atomic_t *, int, int); extern void atomic_set(atomic_t *, int); #define atomic_read(v) (*(volatile int *)&(v)->counter) diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h index 337139ef91be..1fc3d0a62d66 100644 --- a/arch/sparc/include/asm/atomic_64.h +++ b/arch/sparc/include/asm/atomic_64.h @@ -70,7 +70,7 @@ extern long atomic64_sub_ret(long, atomic64_t *); #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) -static inline int atomic_add_unless(atomic_t *v, int a, int u) +static inline int __atomic_add_unless(atomic_t *v, int a, int u) { int c, old; c = atomic_read(v); @@ -82,7 +82,7 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) break; c = old; } - return c != (u); + return c; } diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h index 246feed4794d..c03349e0ca9f 100644 --- a/arch/tile/include/asm/atomic_32.h +++ b/arch/tile/include/asm/atomic_32.h @@ -81,18 +81,18 @@ static inline int atomic_add_return(int i, atomic_t *v) } /** - * atomic_add_unless - add unless the number is already a given value + * __atomic_add_unless - add unless the number is already a given value * @v: pointer of type atomic_t * @a: the amount to add to v... * @u: ...unless v is equal to u. * * Atomically adds @a to @v, so long as @v was not already @u. - * Returns non-zero if @v was not @u, and zero otherwise. + * Returns the old value of @v. */ -static inline int atomic_add_unless(atomic_t *v, int a, int u) +static inline int __atomic_add_unless(atomic_t *v, int a, int u) { smp_mb(); /* barrier for proper semantics */ - return _atomic_xchg_add_unless(v, a, u) != u; + return _atomic_xchg_add_unless(v, a, u); } /** @@ -199,7 +199,7 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v) * @u: ...unless v is equal to u. * * Atomically adds @a to @v, so long as @v was not already @u. - * Returns non-zero if @v was not @u, and zero otherwise. + * Returns the old value of @v. */ static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u) { diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h index a48dda30cbcc..27fe667fddfe 100644 --- a/arch/tile/include/asm/atomic_64.h +++ b/arch/tile/include/asm/atomic_64.h @@ -64,7 +64,7 @@ static inline int atomic_add_return(int i, atomic_t *v) return val; } -static inline int atomic_add_unless(atomic_t *v, int a, int u) +static inline int __atomic_add_unless(atomic_t *v, int a, int u) { int guess, oldval = v->counter; do { @@ -73,7 +73,7 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) guess = oldval; oldval = atomic_cmpxchg(v, guess, guess + a); } while (guess != oldval); - return oldval != u; + return oldval; } /* Now the true 64-bit operations. */ diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h index 897969bdd4e6..5fe9cb335cd2 100644 --- a/arch/x86/include/asm/atomic.h +++ b/arch/x86/include/asm/atomic.h @@ -221,15 +221,15 @@ static inline int atomic_xchg(atomic_t *v, int new) } /** - * atomic_add_unless - add unless the number is already a given value + * __atomic_add_unless - add unless the number is already a given value * @v: pointer of type atomic_t * @a: the amount to add to v... * @u: ...unless v is equal to u. * * Atomically adds @a to @v, so long as @v was not already @u. - * Returns non-zero if @v was not @u, and zero otherwise. + * Returns the old value of @v. */ -static inline int atomic_add_unless(atomic_t *v, int a, int u) +static inline int __atomic_add_unless(atomic_t *v, int a, int u) { int c, old; c = atomic_read(v); @@ -241,7 +241,7 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) break; c = old; } - return c != (u); + return c; } diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h index 2a934aa19a43..24098aafce0d 100644 --- a/arch/x86/include/asm/atomic64_32.h +++ b/arch/x86/include/asm/atomic64_32.h @@ -263,7 +263,7 @@ static inline int atomic64_add_negative(long long i, atomic64_t *v) * @u: ...unless v is equal to u. * * Atomically adds @a to @v, so long as it was not @u. - * Returns non-zero if @v was not @u, and zero otherwise. + * Returns the old value of @v. */ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) { diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h index 49fd1ea22951..017594d403f6 100644 --- a/arch/x86/include/asm/atomic64_64.h +++ b/arch/x86/include/asm/atomic64_64.h @@ -202,7 +202,7 @@ static inline long atomic64_xchg(atomic64_t *v, long new) * @u: ...unless v is equal to u. * * Atomically adds @a to @v, so long as it was not @u. - * Returns non-zero if @v was not @u, and zero otherwise. + * Returns the old value of @v. */ static inline int atomic64_add_unless(atomic64_t *v, long a, long u) { diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h index 7cca2fb18baf..e464212f9ee6 100644 --- a/arch/xtensa/include/asm/atomic.h +++ b/arch/xtensa/include/asm/atomic.h @@ -225,15 +225,15 @@ static inline int atomic_sub_return(int i, atomic_t * v) #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) /** - * atomic_add_unless - add unless the number is a given value + * __atomic_add_unless - add unless the number is a given value * @v: pointer of type atomic_t * @a: the amount to add to v... * @u: ...unless v is equal to u. * * Atomically adds @a to @v, so long as it was not @u. - * Returns non-zero if @v was not @u, and zero otherwise. + * Returns the old value of @v. */ -static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) +static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) { int c, old; c = atomic_read(v); @@ -245,7 +245,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) break; c = old; } - return c != (u); + return c; } diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h index 7f84414c5f53..bd18bfd78e9b 100644 --- a/include/asm-generic/atomic.h +++ b/include/asm-generic/atomic.h @@ -129,13 +129,13 @@ static inline void atomic_dec(atomic_t *v) #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) -static inline int atomic_add_unless(atomic_t *v, int a, int u) +static inline int __atomic_add_unless(atomic_t *v, int a, int u) { int c, old; c = atomic_read(v); while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c) c = old; - return c != u; + return c; } static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) diff --git a/include/linux/atomic.h b/include/linux/atomic.h index 66fed6364122..d5e167a6a398 100644 --- a/include/linux/atomic.h +++ b/include/linux/atomic.h @@ -2,6 +2,20 @@ #define _LINUX_ATOMIC_H #include +/** + * atomic_add_unless - add unless the number is already a given value + * @v: pointer of type atomic_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as @v was not already @u. + * Returns non-zero if @v was not @u, and zero otherwise. + */ +static inline int atomic_add_unless(atomic_t *v, int a, int u) +{ + return __atomic_add_unless(v, a, u) != u; +} + /** * atomic_inc_not_zero - increment unless the number is zero * @v: pointer of type atomic_t -- cgit v1.2.3 From 7847777a45f9f8bfc8617dbf107bde1ecb59caee Mon Sep 17 00:00:00 2001 From: Arun Sharma Date: Tue, 26 Jul 2011 16:09:08 -0700 Subject: atomic: cleanup asm-generic atomic*.h inclusion After changing all consumers of atomics to include , we ran into some compile time errors due to this dependency chain: linux/atomic.h -> asm/atomic.h -> asm-generic/atomic-long.h where atomic-long.h could use funcs defined later in linux/atomic.h without a prototype. This patches moves the code that includes asm-generic/atomic*.h to linux/atomic.h. Archs that need need to select CONFIG_GENERIC_ATOMIC64 from now on (some of them used to include it unconditionally). Compile tested on i386 and x86_64 with allnoconfig. Signed-off-by: Arun Sharma Cc: Eric Dumazet Cc: Ingo Molnar Cc: David Miller Acked-by: Mike Frysinger Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/alpha/include/asm/atomic.h | 1 - arch/arm/include/asm/atomic.h | 5 +---- arch/avr32/include/asm/atomic.h | 2 -- arch/blackfin/include/asm/atomic.h | 3 --- arch/cris/include/asm/atomic.h | 1 - arch/frv/include/asm/atomic.h | 1 - arch/h8300/include/asm/atomic.h | 1 - arch/ia64/include/asm/atomic.h | 1 - arch/m32r/include/asm/atomic.h | 1 - arch/m68k/include/asm/atomic.h | 2 -- arch/mips/include/asm/atomic.h | 6 ------ arch/mn10300/include/asm/atomic.h | 2 -- arch/parisc/include/asm/atomic.h | 5 ----- arch/powerpc/include/asm/atomic.h | 4 ---- arch/s390/include/asm/atomic.h | 2 -- arch/sh/include/asm/atomic.h | 3 --- arch/sparc/include/asm/atomic_32.h | 1 - arch/sparc/include/asm/atomic_64.h | 1 - arch/tile/include/asm/atomic.h | 5 ----- arch/x86/include/asm/atomic.h | 1 - arch/xtensa/include/asm/atomic.h | 1 - include/asm-generic/atomic.h | 2 -- include/linux/atomic.h | 4 ++++ 23 files changed, 5 insertions(+), 50 deletions(-) (limited to 'arch/s390/include/asm/atomic.h') diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h index 3d6704910268..640f909ddd41 100644 --- a/arch/alpha/include/asm/atomic.h +++ b/arch/alpha/include/asm/atomic.h @@ -255,5 +255,4 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) #define smp_mb__before_atomic_inc() smp_mb() #define smp_mb__after_atomic_inc() smp_mb() -#include #endif /* _ALPHA_ATOMIC_H */ diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index 3757e91c5281..86976d034382 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h @@ -459,9 +459,6 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u) #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL) -#else /* !CONFIG_GENERIC_ATOMIC64 */ -#include -#endif -#include +#endif /* !CONFIG_GENERIC_ATOMIC64 */ #endif #endif diff --git a/arch/avr32/include/asm/atomic.h b/arch/avr32/include/asm/atomic.h index dc6c3a41a2d7..e0ac2631c87e 100644 --- a/arch/avr32/include/asm/atomic.h +++ b/arch/avr32/include/asm/atomic.h @@ -188,6 +188,4 @@ static inline int atomic_sub_if_positive(int i, atomic_t *v) #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() -#include - #endif /* __ASM_AVR32_ATOMIC_H */ diff --git a/arch/blackfin/include/asm/atomic.h b/arch/blackfin/include/asm/atomic.h index 292c86f74f85..135225696fd2 100644 --- a/arch/blackfin/include/asm/atomic.h +++ b/arch/blackfin/include/asm/atomic.h @@ -111,10 +111,7 @@ static inline void atomic_set_mask(int mask, atomic_t *v) #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0) #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) -#include #endif -#include - #endif diff --git a/arch/cris/include/asm/atomic.h b/arch/cris/include/asm/atomic.h index 7e90532c5253..bbf093814db2 100644 --- a/arch/cris/include/asm/atomic.h +++ b/arch/cris/include/asm/atomic.h @@ -157,5 +157,4 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() -#include #endif diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h index a51dcdfe1fbf..0d8a7d661740 100644 --- a/arch/frv/include/asm/atomic.h +++ b/arch/frv/include/asm/atomic.h @@ -257,5 +257,4 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) } -#include #endif /* _ASM_ATOMIC_H */ diff --git a/arch/h8300/include/asm/atomic.h b/arch/h8300/include/asm/atomic.h index e6d1663625f0..f5a38c1f5489 100644 --- a/arch/h8300/include/asm/atomic.h +++ b/arch/h8300/include/asm/atomic.h @@ -145,5 +145,4 @@ static __inline__ void atomic_set_mask(unsigned long mask, unsigned long *v) #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() -#include #endif /* __ARCH_H8300_ATOMIC __ */ diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h index 22aca210bd05..3fad89ee01cb 100644 --- a/arch/ia64/include/asm/atomic.h +++ b/arch/ia64/include/asm/atomic.h @@ -215,5 +215,4 @@ atomic64_add_negative (__s64 i, atomic64_t *v) #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() -#include #endif /* _ASM_IA64_ATOMIC_H */ diff --git a/arch/m32r/include/asm/atomic.h b/arch/m32r/include/asm/atomic.h index c839426ac732..1e7f29fb21f2 100644 --- a/arch/m32r/include/asm/atomic.h +++ b/arch/m32r/include/asm/atomic.h @@ -313,5 +313,4 @@ static __inline__ void atomic_set_mask(unsigned long mask, atomic_t *addr) #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() -#include #endif /* _ASM_M32R_ATOMIC_H */ diff --git a/arch/m68k/include/asm/atomic.h b/arch/m68k/include/asm/atomic.h index 2269350974f1..65c6be6c8180 100644 --- a/arch/m68k/include/asm/atomic.h +++ b/arch/m68k/include/asm/atomic.h @@ -205,6 +205,4 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() -#include -#include #endif /* __ARCH_M68K_ATOMIC __ */ diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h index 31cb23debb7e..1d93f81d57e7 100644 --- a/arch/mips/include/asm/atomic.h +++ b/arch/mips/include/asm/atomic.h @@ -765,10 +765,6 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) */ #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0) -#else /* !CONFIG_64BIT */ - -#include - #endif /* CONFIG_64BIT */ /* @@ -780,6 +776,4 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) #define smp_mb__before_atomic_inc() smp_mb__before_llsc() #define smp_mb__after_atomic_inc() smp_llsc_mb() -#include - #endif /* _ASM_ATOMIC_H */ diff --git a/arch/mn10300/include/asm/atomic.h b/arch/mn10300/include/asm/atomic.h index a2e6759af4db..b9a8f8461262 100644 --- a/arch/mn10300/include/asm/atomic.h +++ b/arch/mn10300/include/asm/atomic.h @@ -343,8 +343,6 @@ static inline void atomic_set_mask(unsigned long mask, unsigned long *addr) #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() -#include - #endif /* __KERNEL__ */ #endif /* CONFIG_SMP */ #endif /* _ASM_ATOMIC_H */ diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h index 1914f179879d..b1dc71f5534e 100644 --- a/arch/parisc/include/asm/atomic.h +++ b/arch/parisc/include/asm/atomic.h @@ -335,12 +335,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) -#else /* CONFIG_64BIT */ - -#include - #endif /* !CONFIG_64BIT */ -#include #endif /* _ASM_PARISC_ATOMIC_H_ */ diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h index 952e161fbb89..e2a4c26ad377 100644 --- a/arch/powerpc/include/asm/atomic.h +++ b/arch/powerpc/include/asm/atomic.h @@ -469,11 +469,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) -#else /* __powerpc64__ */ -#include - #endif /* __powerpc64__ */ -#include #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_ATOMIC_H_ */ diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h index 7b0b0a7193e5..8517d2ae3b5c 100644 --- a/arch/s390/include/asm/atomic.h +++ b/arch/s390/include/asm/atomic.h @@ -331,6 +331,4 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v) #define smp_mb__before_atomic_inc() smp_mb() #define smp_mb__after_atomic_inc() smp_mb() -#include - #endif /* __ARCH_S390_ATOMIC__ */ diff --git a/arch/sh/include/asm/atomic.h b/arch/sh/include/asm/atomic.h index 2177596d4b38..63a27dbc952e 100644 --- a/arch/sh/include/asm/atomic.h +++ b/arch/sh/include/asm/atomic.h @@ -67,7 +67,4 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) #define smp_mb__before_atomic_inc() smp_mb() #define smp_mb__after_atomic_inc() smp_mb() -#include -#include - #endif /* __ASM_SH_ATOMIC_H */ diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h index bdce95e77f85..5c3c8b69884d 100644 --- a/arch/sparc/include/asm/atomic_32.h +++ b/arch/sparc/include/asm/atomic_32.h @@ -160,5 +160,4 @@ static inline int __atomic24_sub(int i, atomic24_t *v) #endif /* !(__KERNEL__) */ -#include #endif /* !(__ARCH_SPARC_ATOMIC__) */ diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h index 1fc3d0a62d66..9f421df46aec 100644 --- a/arch/sparc/include/asm/atomic_64.h +++ b/arch/sparc/include/asm/atomic_64.h @@ -113,5 +113,4 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u) #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() -#include #endif /* !(__ARCH_SPARC64_ATOMIC__) */ diff --git a/arch/tile/include/asm/atomic.h b/arch/tile/include/asm/atomic.h index e3272715c3cb..921dbeb8a70c 100644 --- a/arch/tile/include/asm/atomic.h +++ b/arch/tile/include/asm/atomic.h @@ -177,9 +177,4 @@ extern unsigned long __cmpxchg_called_with_bad_pointer(void); #include #endif -/* Provide the appropriate atomic_long_t definitions. */ -#ifndef __ASSEMBLY__ -#include -#endif - #endif /* _ASM_TILE_ATOMIC_H */ diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h index 5fe9cb335cd2..10572e309ab2 100644 --- a/arch/x86/include/asm/atomic.h +++ b/arch/x86/include/asm/atomic.h @@ -318,5 +318,4 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2) # include "atomic64_64.h" #endif -#include #endif /* _ASM_X86_ATOMIC_H */ diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h index e464212f9ee6..23592eff67ad 100644 --- a/arch/xtensa/include/asm/atomic.h +++ b/arch/xtensa/include/asm/atomic.h @@ -291,7 +291,6 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v) #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() -#include #endif /* __KERNEL__ */ #endif /* _XTENSA_ATOMIC_H */ diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h index bd18bfd78e9b..a8fad943d37b 100644 --- a/include/asm-generic/atomic.h +++ b/include/asm-generic/atomic.h @@ -154,7 +154,5 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() -#include - #endif /* __KERNEL__ */ #endif /* __ASM_GENERIC_ATOMIC_H */ diff --git a/include/linux/atomic.h b/include/linux/atomic.h index d5e167a6a398..42e7f6372ce2 100644 --- a/include/linux/atomic.h +++ b/include/linux/atomic.h @@ -96,4 +96,8 @@ static inline void atomic_or(int i, atomic_t *v) } #endif /* #ifndef CONFIG_ARCH_HAS_ATOMIC_OR */ +#include +#ifdef CONFIG_GENERIC_ATOMIC64 +#include +#endif #endif /* _LINUX_ATOMIC_H */ -- cgit v1.2.3