diff options
author | Philipp Hachtmann <phacht@linux.vnet.ibm.com> | 2014-04-07 20:25:23 +0400 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2014-05-20 10:58:41 +0400 |
commit | 5b3f683e694a835f5dfdab06102be1a50604c3b7 (patch) | |
tree | d9f1b030cd5d211c2a014194a288ad807f20308e /arch/s390/include | |
parent | 50be634507284eea38df78154d22615d21200b42 (diff) | |
download | linux-5b3f683e694a835f5dfdab06102be1a50604c3b7.tar.xz |
s390/spinlock: cleanup spinlock code
Improve the spinlock code in several aspects:
- Have _raw_compare_and_swap return true if the operation has been
successful instead of returning the old value.
- Remove the "volatile" from arch_spinlock_t and arch_rwlock_t
- Rename 'owner_cpu' to 'lock'
- Add helper functions arch_spin_trylock_once / arch_spin_tryrelease_once
[ Martin Schwidefsky: patch breakdown and code beautification ]
Signed-off-by: Philipp Hachtmann <phacht@linux.vnet.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/include')
-rw-r--r-- | arch/s390/include/asm/spinlock.h | 98 | ||||
-rw-r--r-- | arch/s390/include/asm/spinlock_types.h | 6 |
2 files changed, 56 insertions, 48 deletions
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index 83e5d216105e..b60212a02d08 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h @@ -14,15 +14,16 @@ extern int spin_retry; static inline int -_raw_compare_and_swap(volatile unsigned int *lock, - unsigned int old, unsigned int new) +_raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new) { + unsigned int old_expected = old; + asm volatile( " cs %0,%3,%1" : "=d" (old), "=Q" (*lock) : "0" (old), "d" (new), "Q" (*lock) : "cc", "memory" ); - return old; + return old == old_expected; } /* @@ -34,57 +35,66 @@ _raw_compare_and_swap(volatile unsigned int *lock, * (the type definitions are in asm/spinlock_types.h) */ -#define arch_spin_is_locked(x) ((x)->owner_cpu != 0) -#define arch_spin_unlock_wait(lock) \ - do { while (arch_spin_is_locked(lock)) \ - arch_spin_relax(lock); } while (0) - -extern void arch_spin_lock_wait(arch_spinlock_t *); -extern void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags); -extern int arch_spin_trylock_retry(arch_spinlock_t *); -extern void arch_spin_relax(arch_spinlock_t *lock); +void arch_spin_lock_wait(arch_spinlock_t *); +int arch_spin_trylock_retry(arch_spinlock_t *); +void arch_spin_relax(arch_spinlock_t *); +void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags); static inline int arch_spin_value_unlocked(arch_spinlock_t lock) { - return lock.owner_cpu == 0; + return lock.lock == 0; } -static inline void arch_spin_lock(arch_spinlock_t *lp) +static inline int arch_spin_is_locked(arch_spinlock_t *lp) +{ + return ACCESS_ONCE(lp->lock) != 0; +} + +static inline int arch_spin_trylock_once(arch_spinlock_t *lp) { - int old; + unsigned int new = ~smp_processor_id(); - old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); - if (likely(old == 0)) - return; - arch_spin_lock_wait(lp); + return _raw_compare_and_swap(&lp->lock, 0, new); } -static inline void arch_spin_lock_flags(arch_spinlock_t *lp, - unsigned long flags) +static inline int arch_spin_tryrelease_once(arch_spinlock_t *lp) { - int old; + unsigned int old = ~smp_processor_id(); - old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); - if (likely(old == 0)) - return; - arch_spin_lock_wait_flags(lp, flags); + return _raw_compare_and_swap(&lp->lock, old, 0); } -static inline int arch_spin_trylock(arch_spinlock_t *lp) +static inline void arch_spin_lock(arch_spinlock_t *lp) { - int old; + if (unlikely(!arch_spin_trylock_once(lp))) + arch_spin_lock_wait(lp); +} - old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); - if (likely(old == 0)) - return 1; - return arch_spin_trylock_retry(lp); +static inline void arch_spin_lock_flags(arch_spinlock_t *lp, + unsigned long flags) +{ + if (unlikely(!arch_spin_trylock_once(lp))) + arch_spin_lock_wait_flags(lp, flags); +} + +static inline int arch_spin_trylock(arch_spinlock_t *lp) +{ + if (unlikely(!arch_spin_trylock_once(lp))) + return arch_spin_trylock_retry(lp); + return 1; } static inline void arch_spin_unlock(arch_spinlock_t *lp) { - _raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0); + arch_spin_tryrelease_once(lp); } - + +static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) +{ + while (arch_spin_is_locked(lock)) + arch_spin_relax(lock); +} + /* * Read-write spinlocks, allowing multiple readers * but only one writer. @@ -119,7 +129,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw) { unsigned int old; old = rw->lock & 0x7fffffffU; - if (_raw_compare_and_swap(&rw->lock, old, old + 1) != old) + if (!_raw_compare_and_swap(&rw->lock, old, old + 1)) _raw_read_lock_wait(rw); } @@ -127,30 +137,28 @@ static inline void arch_read_lock_flags(arch_rwlock_t *rw, unsigned long flags) { unsigned int old; old = rw->lock & 0x7fffffffU; - if (_raw_compare_and_swap(&rw->lock, old, old + 1) != old) + if (!_raw_compare_and_swap(&rw->lock, old, old + 1)) _raw_read_lock_wait_flags(rw, flags); } static inline void arch_read_unlock(arch_rwlock_t *rw) { - unsigned int old, cmp; + unsigned int old; - old = rw->lock; do { - cmp = old; - old = _raw_compare_and_swap(&rw->lock, old, old - 1); - } while (cmp != old); + old = ACCESS_ONCE(rw->lock); + } while (!_raw_compare_and_swap(&rw->lock, old, old - 1)); } static inline void arch_write_lock(arch_rwlock_t *rw) { - if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0)) + if (unlikely(!_raw_compare_and_swap(&rw->lock, 0, 0x80000000))) _raw_write_lock_wait(rw); } static inline void arch_write_lock_flags(arch_rwlock_t *rw, unsigned long flags) { - if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0)) + if (unlikely(!_raw_compare_and_swap(&rw->lock, 0, 0x80000000))) _raw_write_lock_wait_flags(rw, flags); } @@ -163,14 +171,14 @@ static inline int arch_read_trylock(arch_rwlock_t *rw) { unsigned int old; old = rw->lock & 0x7fffffffU; - if (likely(_raw_compare_and_swap(&rw->lock, old, old + 1) == old)) + if (likely(_raw_compare_and_swap(&rw->lock, old, old + 1))) return 1; return _raw_read_trylock_retry(rw); } static inline int arch_write_trylock(arch_rwlock_t *rw) { - if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)) + if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000))) return 1; return _raw_write_trylock_retry(rw); } diff --git a/arch/s390/include/asm/spinlock_types.h b/arch/s390/include/asm/spinlock_types.h index 9c76656a0af0..b2cd6ff7c2c5 100644 --- a/arch/s390/include/asm/spinlock_types.h +++ b/arch/s390/include/asm/spinlock_types.h @@ -6,13 +6,13 @@ #endif typedef struct { - volatile unsigned int owner_cpu; + unsigned int lock; } __attribute__ ((aligned (4))) arch_spinlock_t; -#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } +#define __ARCH_SPIN_LOCK_UNLOCKED { .lock = 0, } typedef struct { - volatile unsigned int lock; + unsigned int lock; } arch_rwlock_t; #define __ARCH_RW_LOCK_UNLOCKED { 0 } |