diff options
author | Ivan Kokshaysky <ink@jurassic.park.msu.ru> | 2005-10-21 22:06:15 +0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-10-23 06:38:33 +0400 |
commit | d475f3f47a0427dfee483cecf9a7e9109e991423 (patch) | |
tree | bbaa882b849acf13e624e3e1d8f4d31280a4b74b | |
parent | 4595f251058609d97a5d792de08c34a7956af816 (diff) | |
download | linux-d475f3f47a0427dfee483cecf9a7e9109e991423.tar.xz |
[PATCH] alpha: additional smp barriers
As stated in Documentation/atomic_ops.txt, atomic functions
returning values must have the memory barriers both before and after
the operation.
Thanks to DaveM for pointing that out.
Signed-off-by: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | include/asm-alpha/atomic.h | 12 |
1 files changed, 8 insertions, 4 deletions
diff --git a/include/asm-alpha/atomic.h b/include/asm-alpha/atomic.h index 1b383e3cb68c..0b40bad00289 100644 --- a/include/asm-alpha/atomic.h +++ b/include/asm-alpha/atomic.h @@ -100,18 +100,19 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v) static __inline__ long atomic_add_return(int i, atomic_t * v) { long temp, result; + smp_mb(); __asm__ __volatile__( "1: ldl_l %0,%1\n" " addl %0,%3,%2\n" " addl %0,%3,%0\n" " stl_c %0,%1\n" " beq %0,2f\n" - " mb\n" ".subsection 2\n" "2: br 1b\n" ".previous" :"=&r" (temp), "=m" (v->counter), "=&r" (result) :"Ir" (i), "m" (v->counter) : "memory"); + smp_mb(); return result; } @@ -120,54 +121,57 @@ static __inline__ long atomic_add_return(int i, atomic_t * v) static __inline__ long atomic64_add_return(long i, atomic64_t * v) { long temp, result; + smp_mb(); __asm__ __volatile__( "1: ldq_l %0,%1\n" " addq %0,%3,%2\n" " addq %0,%3,%0\n" " stq_c %0,%1\n" " beq %0,2f\n" - " mb\n" ".subsection 2\n" "2: br 1b\n" ".previous" :"=&r" (temp), "=m" (v->counter), "=&r" (result) :"Ir" (i), "m" (v->counter) : "memory"); + smp_mb(); return result; } static __inline__ long atomic_sub_return(int i, atomic_t * v) { long temp, result; + smp_mb(); __asm__ __volatile__( "1: ldl_l %0,%1\n" " subl %0,%3,%2\n" " subl %0,%3,%0\n" " stl_c %0,%1\n" " beq %0,2f\n" - " mb\n" ".subsection 2\n" "2: br 1b\n" ".previous" :"=&r" (temp), "=m" (v->counter), "=&r" (result) :"Ir" (i), "m" (v->counter) : "memory"); + smp_mb(); return result; } static __inline__ long atomic64_sub_return(long i, atomic64_t * v) { long temp, result; + smp_mb(); __asm__ __volatile__( "1: ldq_l %0,%1\n" " subq %0,%3,%2\n" " subq %0,%3,%0\n" " stq_c %0,%1\n" " beq %0,2f\n" - " mb\n" ".subsection 2\n" "2: br 1b\n" ".previous" :"=&r" (temp), "=m" (v->counter), "=&r" (result) :"Ir" (i), "m" (v->counter) : "memory"); + smp_mb(); return result; } |