summaryrefslogtreecommitdiff
path: root/arch/x86/include/asm/percpu.h
diff options
context:
space:
mode:
authorBrian Gerst <brgerst@gmail.com>2009-01-18 18:38:59 +0300
committerTejun Heo <tj@kernel.org>2009-01-18 18:38:59 +0300
commit87b264065880fa696c121dad8498a60524e0f6de (patch)
tree5a40a9ca966e5feda1040f9ef2c7798ac4d1e234 /arch/x86/include/asm/percpu.h
parentc2558e0eba66b49993e619da66c95a50a97830a3 (diff)
downloadlinux-87b264065880fa696c121dad8498a60524e0f6de.tar.xz
x86-64: Use absolute displacements for per-cpu accesses.
Accessing memory through %gs should not use rip-relative addressing. Adding a P prefix for the argument tells gcc to not add (%rip) to the memory references. Signed-off-by: Brian Gerst <brgerst@gmail.com> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'arch/x86/include/asm/percpu.h')
-rw-r--r--arch/x86/include/asm/percpu.h26
1 files changed, 13 insertions, 13 deletions
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 03aa4b00a1c3..165d5272ece1 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -39,10 +39,10 @@
#include <linux/stringify.h>
#ifdef CONFIG_SMP
-#define __percpu_seg_str "%%"__stringify(__percpu_seg)":"
+#define __percpu_arg(x) "%%"__stringify(__percpu_seg)":%P" #x
#define __my_cpu_offset percpu_read(this_cpu_off)
#else
-#define __percpu_seg_str
+#define __percpu_arg(x) "%" #x
#endif
/* For arch-specific code, we can use direct single-insn ops (they
@@ -58,22 +58,22 @@ do { \
} \
switch (sizeof(var)) { \
case 1: \
- asm(op "b %1,"__percpu_seg_str"%0" \
+ asm(op "b %1,"__percpu_arg(0) \
: "+m" (var) \
: "ri" ((T__)val)); \
break; \
case 2: \
- asm(op "w %1,"__percpu_seg_str"%0" \
+ asm(op "w %1,"__percpu_arg(0) \
: "+m" (var) \
: "ri" ((T__)val)); \
break; \
case 4: \
- asm(op "l %1,"__percpu_seg_str"%0" \
+ asm(op "l %1,"__percpu_arg(0) \
: "+m" (var) \
: "ri" ((T__)val)); \
break; \
case 8: \
- asm(op "q %1,"__percpu_seg_str"%0" \
+ asm(op "q %1,"__percpu_arg(0) \
: "+m" (var) \
: "r" ((T__)val)); \
break; \
@@ -86,22 +86,22 @@ do { \
typeof(var) ret__; \
switch (sizeof(var)) { \
case 1: \
- asm(op "b "__percpu_seg_str"%1,%0" \
+ asm(op "b "__percpu_arg(1)",%0" \
: "=r" (ret__) \
: "m" (var)); \
break; \
case 2: \
- asm(op "w "__percpu_seg_str"%1,%0" \
+ asm(op "w "__percpu_arg(1)",%0" \
: "=r" (ret__) \
: "m" (var)); \
break; \
case 4: \
- asm(op "l "__percpu_seg_str"%1,%0" \
+ asm(op "l "__percpu_arg(1)",%0" \
: "=r" (ret__) \
: "m" (var)); \
break; \
case 8: \
- asm(op "q "__percpu_seg_str"%1,%0" \
+ asm(op "q "__percpu_arg(1)",%0" \
: "=r" (ret__) \
: "m" (var)); \
break; \
@@ -122,9 +122,9 @@ do { \
#define x86_test_and_clear_bit_percpu(bit, var) \
({ \
int old__; \
- asm volatile("btr %1,"__percpu_seg_str"%c2\n\tsbbl %0,%0" \
- : "=r" (old__) \
- : "dIr" (bit), "i" (&per_cpu__##var) : "memory"); \
+ asm volatile("btr %2,"__percpu_arg(1)"\n\tsbbl %0,%0" \
+ : "=r" (old__), "+m" (per_cpu__##var) \
+ : "dIr" (bit)); \
old__; \
})