diff options
author | Philipp Hachtmann <phacht@linux.vnet.ibm.com> | 2014-04-07 20:25:23 +0400 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2014-05-20 10:58:42 +0400 |
commit | 6c8cd5bbda7e6be166cf2e2dd4be5890193e17ac (patch) | |
tree | 0245d1a206b04c2cd2b5b4914dfb696205673861 /arch/s390/include/asm/spinlock.h | |
parent | 5b3f683e694a835f5dfdab06102be1a50604c3b7 (diff) | |
download | linux-6c8cd5bbda7e6be166cf2e2dd4be5890193e17ac.tar.xz |
s390/spinlock: optimize spinlock code sequence
Use lowcore constant to improve the code generated for spinlocks.
[ Martin Schwidefsky: patch breakdown and code beautification ]
Signed-off-by: Philipp Hachtmann <phacht@linux.vnet.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/include/asm/spinlock.h')
-rw-r--r-- | arch/s390/include/asm/spinlock.h | 15 |
1 files changed, 9 insertions, 6 deletions
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index b60212a02d08..5a0b2882ad48 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h @@ -11,6 +11,8 @@ #include <linux/smp.h> +#define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval) + extern int spin_retry; static inline int @@ -40,6 +42,11 @@ int arch_spin_trylock_retry(arch_spinlock_t *); void arch_spin_relax(arch_spinlock_t *); void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags); +static inline u32 arch_spin_lockval(int cpu) +{ + return ~cpu; +} + static inline int arch_spin_value_unlocked(arch_spinlock_t lock) { return lock.lock == 0; @@ -52,16 +59,12 @@ static inline int arch_spin_is_locked(arch_spinlock_t *lp) static inline int arch_spin_trylock_once(arch_spinlock_t *lp) { - unsigned int new = ~smp_processor_id(); - - return _raw_compare_and_swap(&lp->lock, 0, new); + return _raw_compare_and_swap(&lp->lock, 0, SPINLOCK_LOCKVAL); } static inline int arch_spin_tryrelease_once(arch_spinlock_t *lp) { - unsigned int old = ~smp_processor_id(); - - return _raw_compare_and_swap(&lp->lock, old, 0); + return _raw_compare_and_swap(&lp->lock, SPINLOCK_LOCKVAL, 0); } static inline void arch_spin_lock(arch_spinlock_t *lp) |