summaryrefslogtreecommitdiff
path: root/arch/arm64/include
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2018-09-13 16:28:33 +0300
committerWill Deacon <will.deacon@arm.com>2018-12-07 20:28:01 +0300
commitb4f9209bfcd5964551de434342818334ab9c8c7e (patch)
tree69578d6a718bb6f9ff0f3f4cad3cecb801a88758 /arch/arm64/include
parent5ef3fe4cecdf82fdd71ce78988403963d01444d4 (diff)
downloadlinux-b4f9209bfcd5964551de434342818334ab9c8c7e.tar.xz
arm64: Avoid masking "old" for LSE cmpxchg() implementation
The CAS instructions implicitly access only the relevant bits of the "old" argument, so there is no need for explicit masking via type-casting as there is in the LL/SC implementation. Move the casting into the LL/SC code and remove it altogether for the LSE implementation. Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm64/include')
-rw-r--r--arch/arm64/include/asm/atomic_ll_sc.h8
-rw-r--r--arch/arm64/include/asm/atomic_lse.h4
-rw-r--r--arch/arm64/include/asm/cmpxchg.h4
3 files changed, 12 insertions, 4 deletions
diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h
index f02d3bf7b9e6..b53f70dd6e10 100644
--- a/arch/arm64/include/asm/atomic_ll_sc.h
+++ b/arch/arm64/include/asm/atomic_ll_sc.h
@@ -257,6 +257,14 @@ __LL_SC_PREFIX(__cmpxchg_case_##name##sz(volatile void *ptr, \
unsigned long tmp; \
u##sz oldval; \
\
+ /* \
+ * Sub-word sizes require explicit casting so that the compare \
+ * part of the cmpxchg doesn't end up interpreting non-zero \
+ * upper bits of the register containing "old". \
+ */ \
+ if (sz < 32) \
+ old = (u##sz)old; \
+ \
asm volatile( \
" prfm pstl1strm, %[v]\n" \
"1: ld" #acq "xr" #sfx "\t%" #w "[oldval], %[v]\n" \
diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
index 4d6f917b654e..a424355240c5 100644
--- a/arch/arm64/include/asm/atomic_lse.h
+++ b/arch/arm64/include/asm/atomic_lse.h
@@ -448,11 +448,11 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
#define __CMPXCHG_CASE(w, sfx, name, sz, mb, cl...) \
static inline u##sz __cmpxchg_case_##name##sz(volatile void *ptr, \
- unsigned long old, \
+ u##sz old, \
u##sz new) \
{ \
register unsigned long x0 asm ("x0") = (unsigned long)ptr; \
- register unsigned long x1 asm ("x1") = old; \
+ register u##sz x1 asm ("x1") = old; \
register u##sz x2 asm ("x2") = new; \
\
asm volatile(ARM64_LSE_ATOMIC_INSN( \
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index 1f0340fc6dad..3f9376f1c409 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -123,9 +123,9 @@ static inline unsigned long __cmpxchg##sfx(volatile void *ptr, \
{ \
switch (size) { \
case 1: \
- return __cmpxchg_case##sfx##_8(ptr, (u8)old, new); \
+ return __cmpxchg_case##sfx##_8(ptr, old, new); \
case 2: \
- return __cmpxchg_case##sfx##_16(ptr, (u16)old, new); \
+ return __cmpxchg_case##sfx##_16(ptr, old, new); \
case 4: \
return __cmpxchg_case##sfx##_32(ptr, old, new); \
case 8: \