summaryrefslogtreecommitdiff
path: root/arch/x86/include/asm/mutex_64.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-09-04 19:39:38 +0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-04 19:39:38 +0400
commit3d7e5fc37f91c3ad4974262e173d9ba36139652a (patch)
tree471484562a2c0341dbee0b4d2e10fdfd5d049c58 /arch/x86/include/asm/mutex_64.h
parent6924a4672dd07dbe11d76fe597d17a092434232f (diff)
parentf69fa9a91f60fff6f2d8b658b7d84d235d9d89b7 (diff)
downloadlinux-3d7e5fc37f91c3ad4974262e173d9ba36139652a.tar.xz
Merge branch 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86/asm changes from Ingo Molnar: "Main changes: - Apply low level mutex optimization on x86-64, by Wedson Almeida Filho. - Change bitops to be naturally 'long', by H Peter Anvin. - Add TSX-NI opcodes support to the x86 (instrumentation) decoder, by Masami Hiramatsu. - Add clang compatibility adjustments/workarounds, by Jan-Simon Möller" * 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86, doc: Update uaccess.h comment to reflect clang changes x86, asm: Fix a compilation issue with clang x86, asm: Extend definitions of _ASM_* with a raw format x86, insn: Add new opcodes as of June, 2013 x86/ia32/asm: Remove unused argument in macro x86, bitops: Change bitops to be native operand size x86: Use asm-goto to implement mutex fast path on x86-64
Diffstat (limited to 'arch/x86/include/asm/mutex_64.h')
-rw-r--r--arch/x86/include/asm/mutex_64.h30
1 files changed, 30 insertions, 0 deletions
diff --git a/arch/x86/include/asm/mutex_64.h b/arch/x86/include/asm/mutex_64.h
index 2c543fff241b..e7e6751648ed 100644
--- a/arch/x86/include/asm/mutex_64.h
+++ b/arch/x86/include/asm/mutex_64.h
@@ -16,6 +16,20 @@
*
* Atomically decrements @v and calls <fail_fn> if the result is negative.
*/
+#ifdef CC_HAVE_ASM_GOTO
+static inline void __mutex_fastpath_lock(atomic_t *v,
+ void (*fail_fn)(atomic_t *))
+{
+ asm volatile goto(LOCK_PREFIX " decl %0\n"
+ " jns %l[exit]\n"
+ : : "m" (v->counter)
+ : "memory", "cc"
+ : exit);
+ fail_fn(v);
+exit:
+ return;
+}
+#else
#define __mutex_fastpath_lock(v, fail_fn) \
do { \
unsigned long dummy; \
@@ -32,6 +46,7 @@ do { \
: "rax", "rsi", "rdx", "rcx", \
"r8", "r9", "r10", "r11", "memory"); \
} while (0)
+#endif
/**
* __mutex_fastpath_lock_retval - try to take the lock by moving the count
@@ -56,6 +71,20 @@ static inline int __mutex_fastpath_lock_retval(atomic_t *count)
*
* Atomically increments @v and calls <fail_fn> if the result is nonpositive.
*/
+#ifdef CC_HAVE_ASM_GOTO
+static inline void __mutex_fastpath_unlock(atomic_t *v,
+ void (*fail_fn)(atomic_t *))
+{
+ asm volatile goto(LOCK_PREFIX " incl %0\n"
+ " jg %l[exit]\n"
+ : : "m" (v->counter)
+ : "memory", "cc"
+ : exit);
+ fail_fn(v);
+exit:
+ return;
+}
+#else
#define __mutex_fastpath_unlock(v, fail_fn) \
do { \
unsigned long dummy; \
@@ -72,6 +101,7 @@ do { \
: "rax", "rsi", "rdx", "rcx", \
"r8", "r9", "r10", "r11", "memory"); \
} while (0)
+#endif
#define __mutex_slowpath_needs_to_unlock() 1