diff options
author | Arnd Bergmann <arnd@arndb.de> | 2020-10-19 10:09:21 +0300 |
---|---|---|
committer | Arnd Bergmann <arnd@arndb.de> | 2020-10-26 22:19:48 +0300 |
commit | f44ca0871b7a98b075560711d48849914a102221 (patch) | |
tree | 40dbcc672bdb5d9248133cd637653ff812a6563b /include/asm-generic/qspinlock.h | |
parent | 6f6573a4044adefbd07f1bd951a2041150e888d7 (diff) | |
download | linux-f44ca0871b7a98b075560711d48849914a102221.tar.xz |
qspinlock: use signed temporaries for cmpxchg
When building with W=2, the build log is flooded with
include/asm-generic/qrwlock.h:65:56: warning: pointer targets in passing argument 2 of 'atomic_try_cmpxchg_acquire' differ in signedness [-Wpointer-sign]
include/asm-generic/qrwlock.h:92:53: warning: pointer targets in passing argument 2 of 'atomic_try_cmpxchg_acquire' differ in signedness [-Wpointer-sign]
include/asm-generic/qspinlock.h:68:55: warning: pointer targets in passing argument 2 of 'atomic_try_cmpxchg_acquire' differ in signedness [-Wpointer-sign]
include/asm-generic/qspinlock.h:82:52: warning: pointer targets in passing argument 2 of 'atomic_try_cmpxchg_acquire' differ in signedness [-Wpointer-sign]
The atomics are built on top of signed integers, but the caller
doesn't actually care. Just use signed types as well.
Fixes: 27df89689e25 ("locking/spinlocks: Remove an instruction from spin and write locks")
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'include/asm-generic/qspinlock.h')
-rw-r--r-- | include/asm-generic/qspinlock.h | 4 |
1 files changed, 2 insertions, 2 deletions
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h index 4fe7fd0fe834..d74b13825501 100644 --- a/include/asm-generic/qspinlock.h +++ b/include/asm-generic/qspinlock.h @@ -60,7 +60,7 @@ static __always_inline int queued_spin_is_contended(struct qspinlock *lock) */ static __always_inline int queued_spin_trylock(struct qspinlock *lock) { - u32 val = atomic_read(&lock->val); + int val = atomic_read(&lock->val); if (unlikely(val)) return 0; @@ -77,7 +77,7 @@ extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); */ static __always_inline void queued_spin_lock(struct qspinlock *lock) { - u32 val = 0; + int val = 0; if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL))) return; |