diff options
| author | Matthew Wilcox <willy@infradead.org> | 2018-08-20 17:19:14 +0300 | 
|---|---|---|
| committer | Ingo Molnar <mingo@kernel.org> | 2018-10-02 10:49:42 +0300 | 
| commit | 27df89689e257cccb604fdf56c91a75a25aa554a (patch) | |
| tree | 783e97bf76ae8c269f28e59f21ec62c6dc3dfa3f | |
| parent | cb92173d1f0474784c6171a9d3fdbbca0ee53554 (diff) | |
| download | linux-27df89689e257cccb604fdf56c91a75a25aa554a.tar.xz | |
locking/spinlocks: Remove an instruction from spin and write locks
Both spin locks and write locks currently do:
 f0 0f b1 17             lock cmpxchg %edx,(%rdi)
 85 c0                   test   %eax,%eax
 75 05                   jne    [slowpath]
This 'test' insn is superfluous; the cmpxchg insn sets the Z flag
appropriately.  Peter pointed out that using atomic_try_cmpxchg_acquire()
will let the compiler know this is true.  Comparing before/after
disassemblies show the only effect is to remove this insn.
Take this opportunity to make the spin & write lock code resemble each
other more closely and have similar likely() hints.
Suggested-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Matthew Wilcox <willy@infradead.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Will Deacon <will.deacon@arm.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Waiman Long <longman@redhat.com>
Link: http://lkml.kernel.org/r/20180820162639.GC25153@bombadil.infradead.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
| -rw-r--r-- | include/asm-generic/qrwlock.h | 7 | ||||
| -rw-r--r-- | include/asm-generic/qspinlock.h | 16 | 
2 files changed, 13 insertions, 10 deletions
| diff --git a/include/asm-generic/qrwlock.h b/include/asm-generic/qrwlock.h index 0f7062bd55e5..36254d2da8e0 100644 --- a/include/asm-generic/qrwlock.h +++ b/include/asm-generic/qrwlock.h @@ -71,8 +71,8 @@ static inline int queued_write_trylock(struct qrwlock *lock)  	if (unlikely(cnts))  		return 0; -	return likely(atomic_cmpxchg_acquire(&lock->cnts, -					     cnts, cnts | _QW_LOCKED) == cnts); +	return likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, +				_QW_LOCKED));  }  /**   * queued_read_lock - acquire read lock of a queue rwlock @@ -96,8 +96,9 @@ static inline void queued_read_lock(struct qrwlock *lock)   */  static inline void queued_write_lock(struct qrwlock *lock)  { +	u32 cnts = 0;  	/* Optimize for the unfair lock case where the fair flag is 0. */ -	if (atomic_cmpxchg_acquire(&lock->cnts, 0, _QW_LOCKED) == 0) +	if (likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED)))  		return;  	queued_write_lock_slowpath(lock); diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h index 9cc457597ddf..7541fa707f5b 100644 --- a/include/asm-generic/qspinlock.h +++ b/include/asm-generic/qspinlock.h @@ -66,10 +66,12 @@ static __always_inline int queued_spin_is_contended(struct qspinlock *lock)   */  static __always_inline int queued_spin_trylock(struct qspinlock *lock)  { -	if (!atomic_read(&lock->val) && -	   (atomic_cmpxchg_acquire(&lock->val, 0, _Q_LOCKED_VAL) == 0)) -		return 1; -	return 0; +	u32 val = atomic_read(&lock->val); + +	if (unlikely(val)) +		return 0; + +	return likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL));  }  extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); @@ -80,11 +82,11 @@ extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);   */  static __always_inline void queued_spin_lock(struct qspinlock *lock)  { -	u32 val; +	u32 val = 0; -	val = atomic_cmpxchg_acquire(&lock->val, 0, _Q_LOCKED_VAL); -	if (likely(val == 0)) +	if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL)))  		return; +  	queued_spin_lock_slowpath(lock, val);  } | 
