diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-01-19 03:44:24 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-01-19 03:44:24 +0300 |
commit | a200dcb34693084e56496960d855afdeaaf9578f (patch) | |
tree | bf65e4350460b7f98247278469f7600d1808c3fc /arch/mips | |
parent | d05d82f7110b08fd36178a641b69a1f206e1142b (diff) | |
parent | 43e361f23c49dbddf74f56ddf6cdd85c5dbff6da (diff) | |
download | linux-a200dcb34693084e56496960d855afdeaaf9578f.tar.xz |
Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
Pull virtio barrier rework+fixes from Michael Tsirkin:
"This adds a new kind of barrier, and reworks virtio and xen to use it.
Plus some fixes here and there"
* tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost: (44 commits)
checkpatch: add virt barriers
checkpatch: check for __smp outside barrier.h
checkpatch.pl: add missing memory barriers
virtio: make find_vqs() checkpatch.pl-friendly
virtio_balloon: fix race between migration and ballooning
virtio_balloon: fix race by fill and leak
s390: more efficient smp barriers
s390: use generic memory barriers
xen/events: use virt_xxx barriers
xen/io: use virt_xxx barriers
xenbus: use virt_xxx barriers
virtio_ring: use virt_store_mb
sh: move xchg_cmpxchg to a header by itself
sh: support 1 and 2 byte xchg
virtio_ring: update weak barriers to use virt_xxx
Revert "virtio_ring: Update weak barriers to use dma_wmb/rmb"
asm-generic: implement virt_xxx memory barriers
x86: define __smp_xxx
xtensa: define __smp_xxx
tile: define __smp_xxx
...
Diffstat (limited to 'arch/mips')
-rw-r--r-- | arch/mips/include/asm/barrier.h | 51 |
1 files changed, 16 insertions, 35 deletions
diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h index 752e0b86c171..d296633d890e 100644 --- a/arch/mips/include/asm/barrier.h +++ b/arch/mips/include/asm/barrier.h @@ -10,9 +10,6 @@ #include <asm/addrspace.h> -#define read_barrier_depends() do { } while(0) -#define smp_read_barrier_depends() do { } while(0) - #ifdef CONFIG_CPU_HAS_SYNC #define __sync() \ __asm__ __volatile__( \ @@ -87,23 +84,21 @@ #define wmb() fast_wmb() #define rmb() fast_rmb() -#define dma_wmb() fast_wmb() -#define dma_rmb() fast_rmb() -#if defined(CONFIG_WEAK_ORDERING) && defined(CONFIG_SMP) +#if defined(CONFIG_WEAK_ORDERING) # ifdef CONFIG_CPU_CAVIUM_OCTEON -# define smp_mb() __sync() -# define smp_rmb() barrier() -# define smp_wmb() __syncw() +# define __smp_mb() __sync() +# define __smp_rmb() barrier() +# define __smp_wmb() __syncw() # else -# define smp_mb() __asm__ __volatile__("sync" : : :"memory") -# define smp_rmb() __asm__ __volatile__("sync" : : :"memory") -# define smp_wmb() __asm__ __volatile__("sync" : : :"memory") +# define __smp_mb() __asm__ __volatile__("sync" : : :"memory") +# define __smp_rmb() __asm__ __volatile__("sync" : : :"memory") +# define __smp_wmb() __asm__ __volatile__("sync" : : :"memory") # endif #else -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() +#define __smp_mb() barrier() +#define __smp_rmb() barrier() +#define __smp_wmb() barrier() #endif #if defined(CONFIG_WEAK_REORDERING_BEYOND_LLSC) && defined(CONFIG_SMP) @@ -112,13 +107,11 @@ #define __WEAK_LLSC_MB " \n" #endif -#define smp_store_mb(var, value) \ - do { WRITE_ONCE(var, value); smp_mb(); } while (0) - #define smp_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory") #ifdef CONFIG_CPU_CAVIUM_OCTEON #define smp_mb__before_llsc() smp_wmb() +#define __smp_mb__before_llsc() __smp_wmb() /* Cause previous writes to become visible on all CPUs as soon as possible */ #define nudge_writes() __asm__ __volatile__(".set push\n\t" \ ".set arch=octeon\n\t" \ @@ -126,25 +119,13 @@ ".set pop" : : : "memory") #else #define smp_mb__before_llsc() smp_llsc_mb() +#define __smp_mb__before_llsc() smp_llsc_mb() #define nudge_writes() mb() #endif -#define smp_store_release(p, v) \ -do { \ - compiletime_assert_atomic_type(*p); \ - smp_mb(); \ - WRITE_ONCE(*p, v); \ -} while (0) - -#define smp_load_acquire(p) \ -({ \ - typeof(*p) ___p1 = READ_ONCE(*p); \ - compiletime_assert_atomic_type(*p); \ - smp_mb(); \ - ___p1; \ -}) - -#define smp_mb__before_atomic() smp_mb__before_llsc() -#define smp_mb__after_atomic() smp_llsc_mb() +#define __smp_mb__before_atomic() __smp_mb__before_llsc() +#define __smp_mb__after_atomic() smp_llsc_mb() + +#include <asm-generic/barrier.h> #endif /* __ASM_BARRIER_H */ |