diff options
author | Will Deacon <will.deacon@arm.com> | 2015-05-29 15:31:10 +0300 |
---|---|---|
committer | Will Deacon <will.deacon@arm.com> | 2015-07-27 17:28:53 +0300 |
commit | 0ea366f5e1b6413a6095dce60ea49ae51e468b61 (patch) | |
tree | fce4fc690edf16784d21a714415a74a8ce53eb2b /arch/arm64/include/asm/atomic_ll_sc.h | |
parent | a82e62382fcbbf5c3348e802af73583e0cac39c0 (diff) | |
download | linux-0ea366f5e1b6413a6095dce60ea49ae51e468b61.tar.xz |
arm64: atomics: prefetch the destination word for write prior to stxr
The cost of changing a cacheline from shared to exclusive state can be
significant, especially when this is triggered by an exclusive store,
since it may result in having to retry the transaction.
This patch makes use of prfm to prefetch cachelines for write prior to
ldxr/stxr loops when using the ll/sc atomic routines.
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm64/include/asm/atomic_ll_sc.h')
-rw-r--r-- | arch/arm64/include/asm/atomic_ll_sc.h | 9 |
1 files changed, 9 insertions, 0 deletions
diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h index 5a9fb37272d4..50d6abd3c439 100644 --- a/arch/arm64/include/asm/atomic_ll_sc.h +++ b/arch/arm64/include/asm/atomic_ll_sc.h @@ -45,6 +45,7 @@ __LL_SC_PREFIX(atomic_##op(int i, atomic_t *v)) \ int result; \ \ asm volatile("// atomic_" #op "\n" \ +" prfm pstl1strm, %2\n" \ "1: ldxr %w0, %2\n" \ " " #asm_op " %w0, %w0, %w3\n" \ " stxr %w1, %w0, %2\n" \ @@ -62,6 +63,7 @@ __LL_SC_PREFIX(atomic_##op##_return(int i, atomic_t *v)) \ int result; \ \ asm volatile("// atomic_" #op "_return\n" \ +" prfm pstl1strm, %2\n" \ "1: ldxr %w0, %2\n" \ " " #asm_op " %w0, %w0, %w3\n" \ " stlxr %w1, %w0, %2\n" \ @@ -98,6 +100,7 @@ __LL_SC_PREFIX(atomic_cmpxchg(atomic_t *ptr, int old, int new)) int oldval; asm volatile("// atomic_cmpxchg\n" +" prfm pstl1strm, %2\n" "1: ldxr %w1, %2\n" " eor %w0, %w1, %w3\n" " cbnz %w0, 2f\n" @@ -121,6 +124,7 @@ __LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v)) \ unsigned long tmp; \ \ asm volatile("// atomic64_" #op "\n" \ +" prfm pstl1strm, %2\n" \ "1: ldxr %0, %2\n" \ " " #asm_op " %0, %0, %3\n" \ " stxr %w1, %0, %2\n" \ @@ -138,6 +142,7 @@ __LL_SC_PREFIX(atomic64_##op##_return(long i, atomic64_t *v)) \ unsigned long tmp; \ \ asm volatile("// atomic64_" #op "_return\n" \ +" prfm pstl1strm, %2\n" \ "1: ldxr %0, %2\n" \ " " #asm_op " %0, %0, %3\n" \ " stlxr %w1, %0, %2\n" \ @@ -174,6 +179,7 @@ __LL_SC_PREFIX(atomic64_cmpxchg(atomic64_t *ptr, long old, long new)) unsigned long res; asm volatile("// atomic64_cmpxchg\n" +" prfm pstl1strm, %2\n" "1: ldxr %1, %2\n" " eor %0, %1, %3\n" " cbnz %w0, 2f\n" @@ -196,6 +202,7 @@ __LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v)) unsigned long tmp; asm volatile("// atomic64_dec_if_positive\n" +" prfm pstl1strm, %2\n" "1: ldxr %0, %2\n" " subs %0, %0, #1\n" " b.mi 2f\n" @@ -220,6 +227,7 @@ __LL_SC_PREFIX(__cmpxchg_case_##name(volatile void *ptr, \ unsigned long tmp, oldval; \ \ asm volatile( \ + " prfm pstl1strm, %2\n" \ "1: ldxr" #sz "\t%" #w "[oldval], %[v]\n" \ " eor %" #w "[tmp], %" #w "[oldval], %" #w "[old]\n" \ " cbnz %" #w "[tmp], 2f\n" \ @@ -259,6 +267,7 @@ __LL_SC_PREFIX(__cmpxchg_double##name(unsigned long old1, \ unsigned long tmp, ret; \ \ asm volatile("// __cmpxchg_double" #name "\n" \ + " prfm pstl1strm, %2\n" \ "1: ldxp %0, %1, %2\n" \ " eor %0, %0, %3\n" \ " eor %1, %1, %4\n" \ |