diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-07-07 03:07:56 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-07-07 03:07:56 +0300 |
commit | c7e9ad7da219cf3f8a7cc45eb1c02fdd91199e8d (patch) | |
tree | 3c4667adbb13c479ed7f65e0349d4f1f507d0082 /tools/include/linux/compiler.h | |
parent | 1c4c7159ed2468f3ac4ce5a7f08d79663d381a93 (diff) | |
parent | ebf2d2689de551d90965090bb991fc640a0c0d41 (diff) | |
download | linux-c7e9ad7da219cf3f8a7cc45eb1c02fdd91199e8d.tar.xz |
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf fixes from Ingo Molnar:
- fix the perf build, by fixing the rbtree.c sharing bug between kernel
and tools/perf by creating a local copy of rbtree.c (more will be
done for v4.3)
- fix an AUX buffer (Intel-PT support) refcounting bug
- fix copy_from_user_nmi() return value"
* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
perf/x86: Fix copy_from_user_nmi() return if range is not ok
perf: Fix AUX buffer refcounting
tools: Copy rbtree_augmented.h from the kernel
tools: Move rbtree.h from tools/perf/
tools: Copy lib/rbtree.c to tools/lib/
perf tools: Copy rbtree.h from the kernel
tools: Adopt {READ,WRITE_ONCE} from the kernel
Diffstat (limited to 'tools/include/linux/compiler.h')
-rw-r--r-- | tools/include/linux/compiler.h | 58 |
1 files changed, 58 insertions, 0 deletions
diff --git a/tools/include/linux/compiler.h b/tools/include/linux/compiler.h index f0e72674c52d..9098083869c8 100644 --- a/tools/include/linux/compiler.h +++ b/tools/include/linux/compiler.h @@ -41,4 +41,62 @@ #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) +#include <linux/types.h> + +static __always_inline void __read_once_size(const volatile void *p, void *res, int size) +{ + switch (size) { + case 1: *(__u8 *)res = *(volatile __u8 *)p; break; + case 2: *(__u16 *)res = *(volatile __u16 *)p; break; + case 4: *(__u32 *)res = *(volatile __u32 *)p; break; + case 8: *(__u64 *)res = *(volatile __u64 *)p; break; + default: + barrier(); + __builtin_memcpy((void *)res, (const void *)p, size); + barrier(); + } +} + +static __always_inline void __write_once_size(volatile void *p, void *res, int size) +{ + switch (size) { + case 1: *(volatile __u8 *)p = *(__u8 *)res; break; + case 2: *(volatile __u16 *)p = *(__u16 *)res; break; + case 4: *(volatile __u32 *)p = *(__u32 *)res; break; + case 8: *(volatile __u64 *)p = *(__u64 *)res; break; + default: + barrier(); + __builtin_memcpy((void *)p, (const void *)res, size); + barrier(); + } +} + +/* + * Prevent the compiler from merging or refetching reads or writes. The + * compiler is also forbidden from reordering successive instances of + * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the + * compiler is aware of some particular ordering. One way to make the + * compiler aware of ordering is to put the two invocations of READ_ONCE, + * WRITE_ONCE or ACCESS_ONCE() in different C statements. + * + * In contrast to ACCESS_ONCE these two macros will also work on aggregate + * data types like structs or unions. If the size of the accessed data + * type exceeds the word size of the machine (e.g., 32 bits or 64 bits) + * READ_ONCE() and WRITE_ONCE() will fall back to memcpy and print a + * compile-time warning. + * + * Their two major use cases are: (1) Mediating communication between + * process-level code and irq/NMI handlers, all running on the same CPU, + * and (2) Ensuring that the compiler does not fold, spindle, or otherwise + * mutilate accesses that either do not require ordering or that interact + * with an explicit memory barrier or atomic instruction that provides the + * required ordering. + */ + +#define READ_ONCE(x) \ + ({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; }) + +#define WRITE_ONCE(x, val) \ + ({ union { typeof(x) __val; char __c[1]; } __u = { .__val = (val) }; __write_once_size(&(x), __u.__c, sizeof(x)); __u.__val; }) + #endif /* _TOOLS_LINUX_COMPILER_H */ |