diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2022-01-11 20:51:26 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2022-01-11 20:51:26 +0300 |
commit | 1be5bdf8cd5a194d981e65687367b0828c839c37 (patch) | |
tree | d6560826c211fd7a4ddcc1d45040dab123271df8 /include/asm-generic/barrier.h | |
parent | 1c824bf768d69fce36de748c60c7197a2b838944 (diff) | |
parent | b473a3891c46393e9c4ccb4e3197d7fb259c7100 (diff) | |
download | linux-1be5bdf8cd5a194d981e65687367b0828c839c37.tar.xz |
Merge tag 'kcsan.2022.01.09a' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu
Pull KCSAN updates from Paul McKenney:
"This provides KCSAN fixes and also the ability to take memory barriers
into account for weakly-ordered systems. This last can increase the
probability of detecting certain types of data races"
* tag 'kcsan.2022.01.09a' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu: (29 commits)
kcsan: Only test clear_bit_unlock_is_negative_byte if arch defines it
kcsan: Avoid nested contexts reading inconsistent reorder_access
kcsan: Turn barrier instrumentation into macros
kcsan: Make barrier tests compatible with lockdep
kcsan: Support WEAK_MEMORY with Clang where no objtool support exists
compiler_attributes.h: Add __disable_sanitizer_instrumentation
objtool, kcsan: Remove memory barrier instrumentation from noinstr
objtool, kcsan: Add memory barrier instrumentation to whitelist
sched, kcsan: Enable memory barrier instrumentation
mm, kcsan: Enable barrier instrumentation
x86/qspinlock, kcsan: Instrument barrier of pv_queued_spin_unlock()
x86/barriers, kcsan: Use generic instrumentation for non-smp barriers
asm-generic/bitops, kcsan: Add instrumentation for barriers
locking/atomics, kcsan: Add instrumentation for barriers
locking/barriers, kcsan: Support generic instrumentation
locking/barriers, kcsan: Add instrumentation for barriers
kcsan: selftest: Add test case to check memory barrier instrumentation
kcsan: Ignore GCC 11+ warnings about TSan runtime support
kcsan: test: Add test cases for memory barrier instrumentation
kcsan: test: Match reordered or normal accesses
...
Diffstat (limited to 'include/asm-generic/barrier.h')
-rw-r--r-- | include/asm-generic/barrier.h | 54 |
1 files changed, 40 insertions, 14 deletions
diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h index 4c2c1b830344..3d503e74037f 100644 --- a/include/asm-generic/barrier.h +++ b/include/asm-generic/barrier.h @@ -14,6 +14,7 @@ #ifndef __ASSEMBLY__ #include <linux/compiler.h> +#include <linux/kcsan-checks.h> #include <asm/rwonce.h> #ifndef nop @@ -21,6 +22,31 @@ #endif /* + * Architectures that want generic instrumentation can define __ prefixed + * variants of all barriers. + */ + +#ifdef __mb +#define mb() do { kcsan_mb(); __mb(); } while (0) +#endif + +#ifdef __rmb +#define rmb() do { kcsan_rmb(); __rmb(); } while (0) +#endif + +#ifdef __wmb +#define wmb() do { kcsan_wmb(); __wmb(); } while (0) +#endif + +#ifdef __dma_rmb +#define dma_rmb() do { kcsan_rmb(); __dma_rmb(); } while (0) +#endif + +#ifdef __dma_wmb +#define dma_wmb() do { kcsan_wmb(); __dma_wmb(); } while (0) +#endif + +/* * Force strict CPU ordering. And yes, this is required on UP too when we're * talking to devices. * @@ -62,15 +88,15 @@ #ifdef CONFIG_SMP #ifndef smp_mb -#define smp_mb() __smp_mb() +#define smp_mb() do { kcsan_mb(); __smp_mb(); } while (0) #endif #ifndef smp_rmb -#define smp_rmb() __smp_rmb() +#define smp_rmb() do { kcsan_rmb(); __smp_rmb(); } while (0) #endif #ifndef smp_wmb -#define smp_wmb() __smp_wmb() +#define smp_wmb() do { kcsan_wmb(); __smp_wmb(); } while (0) #endif #else /* !CONFIG_SMP */ @@ -123,19 +149,19 @@ do { \ #ifdef CONFIG_SMP #ifndef smp_store_mb -#define smp_store_mb(var, value) __smp_store_mb(var, value) +#define smp_store_mb(var, value) do { kcsan_mb(); __smp_store_mb(var, value); } while (0) #endif #ifndef smp_mb__before_atomic -#define smp_mb__before_atomic() __smp_mb__before_atomic() +#define smp_mb__before_atomic() do { kcsan_mb(); __smp_mb__before_atomic(); } while (0) #endif #ifndef smp_mb__after_atomic -#define smp_mb__after_atomic() __smp_mb__after_atomic() +#define smp_mb__after_atomic() do { kcsan_mb(); __smp_mb__after_atomic(); } while (0) #endif #ifndef smp_store_release -#define smp_store_release(p, v) __smp_store_release(p, v) +#define smp_store_release(p, v) do { kcsan_release(); __smp_store_release(p, v); } while (0) #endif #ifndef smp_load_acquire @@ -178,13 +204,13 @@ do { \ #endif /* CONFIG_SMP */ /* Barriers for virtual machine guests when talking to an SMP host */ -#define virt_mb() __smp_mb() -#define virt_rmb() __smp_rmb() -#define virt_wmb() __smp_wmb() -#define virt_store_mb(var, value) __smp_store_mb(var, value) -#define virt_mb__before_atomic() __smp_mb__before_atomic() -#define virt_mb__after_atomic() __smp_mb__after_atomic() -#define virt_store_release(p, v) __smp_store_release(p, v) +#define virt_mb() do { kcsan_mb(); __smp_mb(); } while (0) +#define virt_rmb() do { kcsan_rmb(); __smp_rmb(); } while (0) +#define virt_wmb() do { kcsan_wmb(); __smp_wmb(); } while (0) +#define virt_store_mb(var, value) do { kcsan_mb(); __smp_store_mb(var, value); } while (0) +#define virt_mb__before_atomic() do { kcsan_mb(); __smp_mb__before_atomic(); } while (0) +#define virt_mb__after_atomic() do { kcsan_mb(); __smp_mb__after_atomic(); } while (0) +#define virt_store_release(p, v) do { kcsan_release(); __smp_store_release(p, v); } while (0) #define virt_load_acquire(p) __smp_load_acquire(p) /** |