diff options
author | Mark Brown <broonie@linaro.org> | 2013-08-22 14:09:03 +0400 |
---|---|---|
committer | Mark Brown <broonie@linaro.org> | 2013-08-22 14:09:03 +0400 |
commit | 64393c6e647c8a791243063d282c787b46721be7 (patch) | |
tree | 89ca12bd5d52e8df24d2bc13a36d30479a4e825d /include/linux/spinlock.h | |
parent | a2388a498ad2f85be01aca29e364abf427d9b53c (diff) | |
parent | 741a509f34d8d702f70d0ad99b8152c57d76961e (diff) | |
download | linux-64393c6e647c8a791243063d282c787b46721be7.tar.xz |
Merge remote-tracking branch 'asoc/topic/ac97' into asoc-fsl
Diffstat (limited to 'include/linux/spinlock.h')
-rw-r--r-- | include/linux/spinlock.h | 14 |
1 files changed, 11 insertions, 3 deletions
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 7d537ced949a..75f34949d9ab 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -117,9 +117,17 @@ do { \ #endif /*arch_spin_is_contended*/ #endif -/* The lock does not imply full memory barrier. */ -#ifndef ARCH_HAS_SMP_MB_AFTER_LOCK -static inline void smp_mb__after_lock(void) { smp_mb(); } +/* + * Despite its name it doesn't necessarily has to be a full barrier. + * It should only guarantee that a STORE before the critical section + * can not be reordered with a LOAD inside this section. + * spin_lock() is the one-way barrier, this LOAD can not escape out + * of the region. So the default implementation simply ensures that + * a STORE can not move into the critical section, smp_wmb() should + * serialize it with another STORE done by spin_lock(). + */ +#ifndef smp_mb__before_spinlock +#define smp_mb__before_spinlock() smp_wmb() #endif /** |