From 626e5fbc14358901ddaa90ce510e0fbeab310432 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 26 Apr 2018 11:34:24 +0100 Subject: locking/qspinlock: Use smp_store_release() in queued_spin_unlock() A qspinlock can be unlocked simply by writing zero to the locked byte. This can be implemented in the generic code, so do that and remove the arch-specific override for x86 in the !PV case. Signed-off-by: Will Deacon Acked-by: Peter Zijlstra (Intel) Acked-by: Waiman Long Cc: Linus Torvalds Cc: Thomas Gleixner Cc: boqun.feng@gmail.com Cc: linux-arm-kernel@lists.infradead.org Cc: paulmck@linux.vnet.ibm.com Link: http://lkml.kernel.org/r/1524738868-31318-11-git-send-email-will.deacon@arm.com Signed-off-by: Ingo Molnar --- include/asm-generic/qspinlock.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/asm-generic') diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h index b37b4ad7eb94..a8ed0a352d75 100644 --- a/include/asm-generic/qspinlock.h +++ b/include/asm-generic/qspinlock.h @@ -100,7 +100,7 @@ static __always_inline void queued_spin_unlock(struct qspinlock *lock) /* * unlock() needs release semantics: */ - (void)atomic_sub_return_release(_Q_LOCKED_VAL, &lock->val); + smp_store_release(&lock->locked, 0); } #endif -- cgit v1.2.3