diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2017-06-30 01:47:44 +0300 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2017-08-17 18:08:58 +0300 |
commit | d3a024abbc438277851c510b51ec9b158821488b (patch) | |
tree | e9b6f5c09bfd6825629bcbd20f0a17371ce29533 /include/asm-generic | |
parent | a4f08141d0b876407ca4e4b0ae1b2dec35851532 (diff) | |
download | linux-d3a024abbc438277851c510b51ec9b158821488b.tar.xz |
locking: Remove spin_unlock_wait() generic definitions
There is no agreed-upon definition of spin_unlock_wait()'s semantics,
and it appears that all callers could do just as well with a lock/unlock
pair. This commit therefore removes spin_unlock_wait() and related
definitions from core code.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Alan Stern <stern@rowland.harvard.edu>
Cc: Andrea Parri <parri.andrea@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/asm-generic')
-rw-r--r-- | include/asm-generic/qspinlock.h | 14 |
1 files changed, 0 insertions, 14 deletions
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h index 9f0681bf1e87..66260777d644 100644 --- a/include/asm-generic/qspinlock.h +++ b/include/asm-generic/qspinlock.h @@ -22,17 +22,6 @@ #include <asm-generic/qspinlock_types.h> /** - * queued_spin_unlock_wait - wait until the _current_ lock holder releases the lock - * @lock : Pointer to queued spinlock structure - * - * There is a very slight possibility of live-lock if the lockers keep coming - * and the waiter is just unfortunate enough to not see any unlock state. - */ -#ifndef queued_spin_unlock_wait -extern void queued_spin_unlock_wait(struct qspinlock *lock); -#endif - -/** * queued_spin_is_locked - is the spinlock locked? * @lock: Pointer to queued spinlock structure * Return: 1 if it is locked, 0 otherwise @@ -41,8 +30,6 @@ extern void queued_spin_unlock_wait(struct qspinlock *lock); static __always_inline int queued_spin_is_locked(struct qspinlock *lock) { /* - * See queued_spin_unlock_wait(). - * * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL * isn't immediately observable. */ @@ -135,6 +122,5 @@ static __always_inline bool virt_spin_lock(struct qspinlock *lock) #define arch_spin_trylock(l) queued_spin_trylock(l) #define arch_spin_unlock(l) queued_spin_unlock(l) #define arch_spin_lock_flags(l, f) queued_spin_lock(l) -#define arch_spin_unlock_wait(l) queued_spin_unlock_wait(l) #endif /* __ASM_GENERIC_QSPINLOCK_H */ |