summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2007-07-14 16:24:05 +0400
committerRalf Baechle <ralf@linux-mips.org>2007-07-20 21:57:39 +0400
commit17099b1142f6c0359fca60a3464dea8fb30badea (patch)
tree26b9f3955dca84ccab594a76680c2a71e166768a /include
parented203dadcd1373e80e95b04075e1eefc554a914b (diff)
downloadlinux-17099b1142f6c0359fca60a3464dea8fb30badea.tar.xz
[MIPS] Make support for weakly ordered LL/SC a config option.
None of weakly ordered processor supported in tree need this but it seems like this could change ... Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'include')
-rw-r--r--include/asm-mips/atomic.h33
-rw-r--r--include/asm-mips/barrier.h9
-rw-r--r--include/asm-mips/bitops.h10
-rw-r--r--include/asm-mips/futex.h8
-rw-r--r--include/asm-mips/spinlock.h18
-rw-r--r--include/asm-mips/system.h8
6 files changed, 48 insertions, 38 deletions
diff --git a/include/asm-mips/atomic.h b/include/asm-mips/atomic.h
index 1b60624dab7e..7d8003769a44 100644
--- a/include/asm-mips/atomic.h
+++ b/include/asm-mips/atomic.h
@@ -138,7 +138,7 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
{
unsigned long result;
- smp_mb();
+ smp_llsc_mb();
if (cpu_has_llsc && R10000_LLSC_WAR) {
unsigned long temp;
@@ -181,7 +181,7 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
raw_local_irq_restore(flags);
}
- smp_mb();
+ smp_llsc_mb();
return result;
}
@@ -190,7 +190,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
{
unsigned long result;
- smp_mb();
+ smp_llsc_mb();
if (cpu_has_llsc && R10000_LLSC_WAR) {
unsigned long temp;
@@ -233,7 +233,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
raw_local_irq_restore(flags);
}
- smp_mb();
+ smp_llsc_mb();
return result;
}
@@ -250,7 +250,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
{
unsigned long result;
- smp_mb();
+ smp_llsc_mb();
if (cpu_has_llsc && R10000_LLSC_WAR) {
unsigned long temp;
@@ -302,7 +302,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
raw_local_irq_restore(flags);
}
- smp_mb();
+ smp_llsc_mb();
return result;
}
@@ -519,7 +519,7 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
{
unsigned long result;
- smp_mb();
+ smp_llsc_mb();
if (cpu_has_llsc && R10000_LLSC_WAR) {
unsigned long temp;
@@ -562,7 +562,7 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
raw_local_irq_restore(flags);
}
- smp_mb();
+ smp_llsc_mb();
return result;
}
@@ -571,7 +571,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
{
unsigned long result;
- smp_mb();
+ smp_llsc_mb();
if (cpu_has_llsc && R10000_LLSC_WAR) {
unsigned long temp;
@@ -614,7 +614,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
raw_local_irq_restore(flags);
}
- smp_mb();
+ smp_llsc_mb();
return result;
}
@@ -631,7 +631,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
{
unsigned long result;
- smp_mb();
+ smp_llsc_mb();
if (cpu_has_llsc && R10000_LLSC_WAR) {
unsigned long temp;
@@ -683,7 +683,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
raw_local_irq_restore(flags);
}
- smp_mb();
+ smp_llsc_mb();
return result;
}
@@ -791,10 +791,11 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
* atomic*_return operations are serializing but not the non-*_return
* versions.
*/
-#define smp_mb__before_atomic_dec() smp_mb()
-#define smp_mb__after_atomic_dec() smp_mb()
-#define smp_mb__before_atomic_inc() smp_mb()
-#define smp_mb__after_atomic_inc() smp_mb()
+#define smp_mb__before_atomic_dec() smp_llsc_mb()
+#define smp_mb__after_atomic_dec() smp_llsc_mb()
+#define smp_mb__before_atomic_inc() smp_llsc_mb()
+#define smp_mb__after_atomic_inc() smp_llsc_mb()
#include <asm-generic/atomic.h>
+
#endif /* _ASM_ATOMIC_H */
diff --git a/include/asm-mips/barrier.h b/include/asm-mips/barrier.h
index ed82631b0017..9d8cfbb5e796 100644
--- a/include/asm-mips/barrier.h
+++ b/include/asm-mips/barrier.h
@@ -121,6 +121,11 @@
#else
#define __WEAK_ORDERING_MB " \n"
#endif
+#if defined(CONFIG_WEAK_REORDERING_BEYOND_LLSC) && defined(CONFIG_SMP)
+#define __WEAK_LLSC_MB " sync \n"
+#else
+#define __WEAK_LLSC_MB " \n"
+#endif
#define smp_mb() __asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory")
#define smp_rmb() __asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory")
@@ -129,4 +134,8 @@
#define set_mb(var, value) \
do { var = value; smp_mb(); } while (0)
+#define smp_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
+#define smp_llsc_rmb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
+#define smp_llsc_wmb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
+
#endif /* __ASM_BARRIER_H */
diff --git a/include/asm-mips/bitops.h b/include/asm-mips/bitops.h
index d9e81af53f78..148bc79557f1 100644
--- a/include/asm-mips/bitops.h
+++ b/include/asm-mips/bitops.h
@@ -38,8 +38,8 @@
/*
* clear_bit() doesn't provide any barrier for the compiler.
*/
-#define smp_mb__before_clear_bit() smp_mb()
-#define smp_mb__after_clear_bit() smp_mb()
+#define smp_mb__before_clear_bit() smp_llsc_mb()
+#define smp_mb__after_clear_bit() smp_llsc_mb()
/*
* set_bit - Atomically set a bit in memory
@@ -289,7 +289,7 @@ static inline int test_and_set_bit(unsigned long nr,
raw_local_irq_restore(flags);
}
- smp_mb();
+ smp_llsc_mb();
return res != 0;
}
@@ -377,7 +377,7 @@ static inline int test_and_clear_bit(unsigned long nr,
raw_local_irq_restore(flags);
}
- smp_mb();
+ smp_llsc_mb();
return res != 0;
}
@@ -445,7 +445,7 @@ static inline int test_and_change_bit(unsigned long nr,
raw_local_irq_restore(flags);
}
- smp_mb();
+ smp_llsc_mb();
return res != 0;
}
diff --git a/include/asm-mips/futex.h b/include/asm-mips/futex.h
index 47e5679c2353..b623882bce19 100644
--- a/include/asm-mips/futex.h
+++ b/include/asm-mips/futex.h
@@ -29,7 +29,7 @@
" .set mips3 \n" \
"2: sc $1, %2 \n" \
" beqzl $1, 1b \n" \
- __WEAK_ORDERING_MB \
+ __WEAK_LLSC_MB \
"3: \n" \
" .set pop \n" \
" .set mips0 \n" \
@@ -55,7 +55,7 @@
" .set mips3 \n" \
"2: sc $1, %2 \n" \
" beqz $1, 1b \n" \
- __WEAK_ORDERING_MB \
+ __WEAK_LLSC_MB \
"3: \n" \
" .set pop \n" \
" .set mips0 \n" \
@@ -152,7 +152,7 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
" .set mips3 \n"
"2: sc $1, %1 \n"
" beqzl $1, 1b \n"
- __WEAK_ORDERING_MB
+ __WEAK_LLSC_MB
"3: \n"
" .set pop \n"
" .section .fixup,\"ax\" \n"
@@ -179,7 +179,7 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
" .set mips3 \n"
"2: sc $1, %1 \n"
" beqz $1, 1b \n"
- __WEAK_ORDERING_MB
+ __WEAK_LLSC_MB
"3: \n"
" .set pop \n"
" .section .fixup,\"ax\" \n"
diff --git a/include/asm-mips/spinlock.h b/include/asm-mips/spinlock.h
index 35e431cd796b..bb897016c491 100644
--- a/include/asm-mips/spinlock.h
+++ b/include/asm-mips/spinlock.h
@@ -67,7 +67,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
: "memory");
}
- smp_mb();
+ smp_llsc_mb();
}
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
@@ -118,7 +118,7 @@ static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock)
: "memory");
}
- smp_mb();
+ smp_llsc_mb();
return res == 0;
}
@@ -183,7 +183,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
: "memory");
}
- smp_mb();
+ smp_llsc_mb();
}
/* Note the use of sub, not subu which will make the kernel die with an
@@ -193,7 +193,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
{
unsigned int tmp;
- smp_mb();
+ smp_llsc_mb();
if (R10000_LLSC_WAR) {
__asm__ __volatile__(
@@ -262,7 +262,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
: "memory");
}
- smp_mb();
+ smp_llsc_mb();
}
static inline void __raw_write_unlock(raw_rwlock_t *rw)
@@ -293,7 +293,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw)
" .set reorder \n"
" beqzl %1, 1b \n"
" nop \n"
- __WEAK_ORDERING_MB
+ __WEAK_LLSC_MB
" li %2, 1 \n"
"2: \n"
: "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
@@ -310,7 +310,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw)
" beqz %1, 1b \n"
" nop \n"
" .set reorder \n"
- __WEAK_ORDERING_MB
+ __WEAK_LLSC_MB
" li %2, 1 \n"
"2: \n"
: "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
@@ -336,7 +336,7 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
" sc %1, %0 \n"
" beqzl %1, 1b \n"
" nop \n"
- __WEAK_ORDERING_MB
+ __WEAK_LLSC_MB
" li %2, 1 \n"
" .set reorder \n"
"2: \n"
@@ -354,7 +354,7 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
" beqz %1, 3f \n"
" li %2, 1 \n"
"2: \n"
- __WEAK_ORDERING_MB
+ __WEAK_LLSC_MB
" .subsection 2 \n"
"3: b 1b \n"
" li %2, 0 \n"
diff --git a/include/asm-mips/system.h b/include/asm-mips/system.h
index 76339165bc20..eba2e3da9abe 100644
--- a/include/asm-mips/system.h
+++ b/include/asm-mips/system.h
@@ -117,7 +117,7 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
raw_local_irq_restore(flags); /* implies memory barrier */
}
- smp_mb();
+ smp_llsc_mb();
return retval;
}
@@ -165,7 +165,7 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
raw_local_irq_restore(flags); /* implies memory barrier */
}
- smp_mb();
+ smp_llsc_mb();
return retval;
}
@@ -246,7 +246,7 @@ static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old,
raw_local_irq_restore(flags); /* implies memory barrier */
}
- smp_mb();
+ smp_llsc_mb();
return retval;
}
@@ -352,7 +352,7 @@ static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old,
raw_local_irq_restore(flags); /* implies memory barrier */
}
- smp_mb();
+ smp_llsc_mb();
return retval;
}