summaryrefslogtreecommitdiff
path: root/arch/parisc
diff options
context:
space:
mode:
authorJoel Stanley <joel@jms.id.au>2020-08-31 03:46:52 +0300
committerJoel Stanley <joel@jms.id.au>2020-08-31 03:46:57 +0300
commit0dd0c8c492fa70707ca4f0d36dcb2e3c64105b16 (patch)
treea420abd8f26264544246602c60d161a7cc4de390 /arch/parisc
parent31d8605658d37d9197a989838508481d5dc1d8bc (diff)
parent9ece50d8a470ca7235ffd6ac0f9c5f0f201fe2c8 (diff)
downloadlinux-dev-5.8.tar.xz
Merge tag 'v5.8.5' into dev-5.8dev-5.8
This is the 5.8.5 stable release Signed-off-by: Joel Stanley <joel@jms.id.au>
Diffstat (limited to 'arch/parisc')
-rw-r--r--arch/parisc/include/asm/barrier.h61
-rw-r--r--arch/parisc/include/asm/spinlock.h33
-rw-r--r--arch/parisc/kernel/entry.S48
-rw-r--r--arch/parisc/kernel/syscall.S24
4 files changed, 100 insertions, 66 deletions
diff --git a/arch/parisc/include/asm/barrier.h b/arch/parisc/include/asm/barrier.h
index dbaaca84f27f..640d46edf32e 100644
--- a/arch/parisc/include/asm/barrier.h
+++ b/arch/parisc/include/asm/barrier.h
@@ -26,6 +26,67 @@
#define __smp_rmb() mb()
#define __smp_wmb() mb()
+#define __smp_store_release(p, v) \
+do { \
+ typeof(p) __p = (p); \
+ union { typeof(*p) __val; char __c[1]; } __u = \
+ { .__val = (__force typeof(*p)) (v) }; \
+ compiletime_assert_atomic_type(*p); \
+ switch (sizeof(*p)) { \
+ case 1: \
+ asm volatile("stb,ma %0,0(%1)" \
+ : : "r"(*(__u8 *)__u.__c), "r"(__p) \
+ : "memory"); \
+ break; \
+ case 2: \
+ asm volatile("sth,ma %0,0(%1)" \
+ : : "r"(*(__u16 *)__u.__c), "r"(__p) \
+ : "memory"); \
+ break; \
+ case 4: \
+ asm volatile("stw,ma %0,0(%1)" \
+ : : "r"(*(__u32 *)__u.__c), "r"(__p) \
+ : "memory"); \
+ break; \
+ case 8: \
+ if (IS_ENABLED(CONFIG_64BIT)) \
+ asm volatile("std,ma %0,0(%1)" \
+ : : "r"(*(__u64 *)__u.__c), "r"(__p) \
+ : "memory"); \
+ break; \
+ } \
+} while (0)
+
+#define __smp_load_acquire(p) \
+({ \
+ union { typeof(*p) __val; char __c[1]; } __u; \
+ typeof(p) __p = (p); \
+ compiletime_assert_atomic_type(*p); \
+ switch (sizeof(*p)) { \
+ case 1: \
+ asm volatile("ldb,ma 0(%1),%0" \
+ : "=r"(*(__u8 *)__u.__c) : "r"(__p) \
+ : "memory"); \
+ break; \
+ case 2: \
+ asm volatile("ldh,ma 0(%1),%0" \
+ : "=r"(*(__u16 *)__u.__c) : "r"(__p) \
+ : "memory"); \
+ break; \
+ case 4: \
+ asm volatile("ldw,ma 0(%1),%0" \
+ : "=r"(*(__u32 *)__u.__c) : "r"(__p) \
+ : "memory"); \
+ break; \
+ case 8: \
+ if (IS_ENABLED(CONFIG_64BIT)) \
+ asm volatile("ldd,ma 0(%1),%0" \
+ : "=r"(*(__u64 *)__u.__c) : "r"(__p) \
+ : "memory"); \
+ break; \
+ } \
+ __u.__val; \
+})
#include <asm-generic/barrier.h>
#endif /* !__ASSEMBLY__ */
diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h
index 70fecb8dc4e2..51b6c47f802f 100644
--- a/arch/parisc/include/asm/spinlock.h
+++ b/arch/parisc/include/asm/spinlock.h
@@ -10,34 +10,25 @@
static inline int arch_spin_is_locked(arch_spinlock_t *x)
{
volatile unsigned int *a = __ldcw_align(x);
- smp_mb();
return *a == 0;
}
-static inline void arch_spin_lock(arch_spinlock_t *x)
-{
- volatile unsigned int *a;
-
- a = __ldcw_align(x);
- while (__ldcw(a) == 0)
- while (*a == 0)
- cpu_relax();
-}
+#define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0)
static inline void arch_spin_lock_flags(arch_spinlock_t *x,
unsigned long flags)
{
volatile unsigned int *a;
- unsigned long flags_dis;
a = __ldcw_align(x);
- while (__ldcw(a) == 0) {
- local_save_flags(flags_dis);
- local_irq_restore(flags);
+ while (__ldcw(a) == 0)
while (*a == 0)
- cpu_relax();
- local_irq_restore(flags_dis);
- }
+ if (flags & PSW_SM_I) {
+ local_irq_enable();
+ cpu_relax();
+ local_irq_disable();
+ } else
+ cpu_relax();
}
#define arch_spin_lock_flags arch_spin_lock_flags
@@ -46,12 +37,8 @@ static inline void arch_spin_unlock(arch_spinlock_t *x)
volatile unsigned int *a;
a = __ldcw_align(x);
-#ifdef CONFIG_SMP
- (void) __ldcw(a);
-#else
- mb();
-#endif
- *a = 1;
+ /* Release with ordered store. */
+ __asm__ __volatile__("stw,ma %0,0(%1)" : : "r"(1), "r"(a) : "memory");
}
static inline int arch_spin_trylock(arch_spinlock_t *x)
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index 4b484ec7c7da..519f9056fd00 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -454,7 +454,6 @@
nop
LDREG 0(\ptp),\pte
bb,<,n \pte,_PAGE_PRESENT_BIT,3f
- LDCW 0(\tmp),\tmp1
b \fault
stw \spc,0(\tmp)
99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
@@ -464,23 +463,26 @@
3:
.endm
- /* Release pa_tlb_lock lock without reloading lock address. */
- .macro tlb_unlock0 spc,tmp,tmp1
+ /* Release pa_tlb_lock lock without reloading lock address.
+ Note that the values in the register spc are limited to
+ NR_SPACE_IDS (262144). Thus, the stw instruction always
+ stores a nonzero value even when register spc is 64 bits.
+ We use an ordered store to ensure all prior accesses are
+ performed prior to releasing the lock. */
+ .macro tlb_unlock0 spc,tmp
#ifdef CONFIG_SMP
98: or,COND(=) %r0,\spc,%r0
- LDCW 0(\tmp),\tmp1
- or,COND(=) %r0,\spc,%r0
- stw \spc,0(\tmp)
+ stw,ma \spc,0(\tmp)
99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
#endif
.endm
/* Release pa_tlb_lock lock. */
- .macro tlb_unlock1 spc,tmp,tmp1
+ .macro tlb_unlock1 spc,tmp
#ifdef CONFIG_SMP
98: load_pa_tlb_lock \tmp
99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
- tlb_unlock0 \spc,\tmp,\tmp1
+ tlb_unlock0 \spc,\tmp
#endif
.endm
@@ -1163,7 +1165,7 @@ dtlb_miss_20w:
idtlbt pte,prot
- tlb_unlock1 spc,t0,t1
+ tlb_unlock1 spc,t0
rfir
nop
@@ -1189,7 +1191,7 @@ nadtlb_miss_20w:
idtlbt pte,prot
- tlb_unlock1 spc,t0,t1
+ tlb_unlock1 spc,t0
rfir
nop
@@ -1223,7 +1225,7 @@ dtlb_miss_11:
mtsp t1, %sr1 /* Restore sr1 */
- tlb_unlock1 spc,t0,t1
+ tlb_unlock1 spc,t0
rfir
nop
@@ -1256,7 +1258,7 @@ nadtlb_miss_11:
mtsp t1, %sr1 /* Restore sr1 */
- tlb_unlock1 spc,t0,t1
+ tlb_unlock1 spc,t0
rfir
nop
@@ -1285,7 +1287,7 @@ dtlb_miss_20:
idtlbt pte,prot
- tlb_unlock1 spc,t0,t1
+ tlb_unlock1 spc,t0
rfir
nop
@@ -1313,7 +1315,7 @@ nadtlb_miss_20:
idtlbt pte,prot
- tlb_unlock1 spc,t0,t1
+ tlb_unlock1 spc,t0
rfir
nop
@@ -1420,7 +1422,7 @@ itlb_miss_20w:
iitlbt pte,prot
- tlb_unlock1 spc,t0,t1
+ tlb_unlock1 spc,t0
rfir
nop
@@ -1444,7 +1446,7 @@ naitlb_miss_20w:
iitlbt pte,prot
- tlb_unlock1 spc,t0,t1
+ tlb_unlock1 spc,t0
rfir
nop
@@ -1478,7 +1480,7 @@ itlb_miss_11:
mtsp t1, %sr1 /* Restore sr1 */
- tlb_unlock1 spc,t0,t1
+ tlb_unlock1 spc,t0
rfir
nop
@@ -1502,7 +1504,7 @@ naitlb_miss_11:
mtsp t1, %sr1 /* Restore sr1 */
- tlb_unlock1 spc,t0,t1
+ tlb_unlock1 spc,t0
rfir
nop
@@ -1532,7 +1534,7 @@ itlb_miss_20:
iitlbt pte,prot
- tlb_unlock1 spc,t0,t1
+ tlb_unlock1 spc,t0
rfir
nop
@@ -1552,7 +1554,7 @@ naitlb_miss_20:
iitlbt pte,prot
- tlb_unlock1 spc,t0,t1
+ tlb_unlock1 spc,t0
rfir
nop
@@ -1582,7 +1584,7 @@ dbit_trap_20w:
idtlbt pte,prot
- tlb_unlock0 spc,t0,t1
+ tlb_unlock0 spc,t0
rfir
nop
#else
@@ -1608,7 +1610,7 @@ dbit_trap_11:
mtsp t1, %sr1 /* Restore sr1 */
- tlb_unlock0 spc,t0,t1
+ tlb_unlock0 spc,t0
rfir
nop
@@ -1628,7 +1630,7 @@ dbit_trap_20:
idtlbt pte,prot
- tlb_unlock0 spc,t0,t1
+ tlb_unlock0 spc,t0
rfir
nop
#endif
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index f05c9d5b6b9e..3ad61a177f5b 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -640,11 +640,7 @@ cas_action:
sub,<> %r28, %r25, %r0
2: stw %r24, 0(%r26)
/* Free lock */
-#ifdef CONFIG_SMP
-98: LDCW 0(%sr2,%r20), %r1 /* Barrier */
-99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
-#endif
- stw %r20, 0(%sr2,%r20)
+ stw,ma %r20, 0(%sr2,%r20)
#if ENABLE_LWS_DEBUG
/* Clear thread register indicator */
stw %r0, 4(%sr2,%r20)
@@ -658,11 +654,7 @@ cas_action:
3:
/* Error occurred on load or store */
/* Free lock */
-#ifdef CONFIG_SMP
-98: LDCW 0(%sr2,%r20), %r1 /* Barrier */
-99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
-#endif
- stw %r20, 0(%sr2,%r20)
+ stw,ma %r20, 0(%sr2,%r20)
#if ENABLE_LWS_DEBUG
stw %r0, 4(%sr2,%r20)
#endif
@@ -863,11 +855,7 @@ cas2_action:
cas2_end:
/* Free lock */
-#ifdef CONFIG_SMP
-98: LDCW 0(%sr2,%r20), %r1 /* Barrier */
-99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
-#endif
- stw %r20, 0(%sr2,%r20)
+ stw,ma %r20, 0(%sr2,%r20)
/* Enable interrupts */
ssm PSW_SM_I, %r0
/* Return to userspace, set no error */
@@ -877,11 +865,7 @@ cas2_end:
22:
/* Error occurred on load or store */
/* Free lock */
-#ifdef CONFIG_SMP
-98: LDCW 0(%sr2,%r20), %r1 /* Barrier */
-99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
-#endif
- stw %r20, 0(%sr2,%r20)
+ stw,ma %r20, 0(%sr2,%r20)
ssm PSW_SM_I, %r0
ldo 1(%r0),%r28
b lws_exit