summaryrefslogtreecommitdiff
path: root/arch/parisc/include/asm/spinlock.h
diff options
context:
space:
mode:
authorJohn David Anglin <dave.anglin@bell.net>2018-10-17 04:07:59 +0300
committerHelge Deller <deller@gmx.de>2018-10-17 09:18:01 +0300
commitd27dfa13b9f77ae7e6ed09d70a0426ed26c1a8f9 (patch)
treec5cf891ba797b92a699bc6c3da95016ffc057c88 /arch/parisc/include/asm/spinlock.h
parent2e37787df0ed0047848dba2eba53510a9185b991 (diff)
downloadlinux-d27dfa13b9f77ae7e6ed09d70a0426ed26c1a8f9.tar.xz
parisc: Release spinlocks using ordered store
This patch updates the spin unlock code to use an ordered store with release semanatics. All prior accesses are guaranteed to be performed before an ordered store is performed. Using an ordered store is significantly faster than using the sync memory barrier. Signed-off-by: John David Anglin <dave.anglin@bell.net> Signed-off-by: Helge Deller <deller@gmx.de>
Diffstat (limited to 'arch/parisc/include/asm/spinlock.h')
-rw-r--r--arch/parisc/include/asm/spinlock.h4
1 files changed, 2 insertions, 2 deletions
diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h
index 8a63515f03bf..16aec9ba2580 100644
--- a/arch/parisc/include/asm/spinlock.h
+++ b/arch/parisc/include/asm/spinlock.h
@@ -37,8 +37,8 @@ static inline void arch_spin_unlock(arch_spinlock_t *x)
volatile unsigned int *a;
a = __ldcw_align(x);
- mb();
- *a = 1;
+ /* Release with ordered store. */
+ __asm__ __volatile__("stw,ma %0,0(%1)" : : "r"(1), "r"(a) : "memory");
}
static inline int arch_spin_trylock(arch_spinlock_t *x)