summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGuo Ren <guoren@linux.alibaba.com>2023-12-31 11:29:53 +0300
committerJi Sheng Teoh <jisheng.teoh@starfivetech.com>2024-02-21 05:00:54 +0300
commit62033cdf2cc2d63f3ca9db3d6cfe376115383e9a (patch)
tree4afe69c5469df25d4c8cc8abfa0b303534dc89e8
parenta7074873b0a164e4defb8a664007c76988419f3c (diff)
downloadlinux-62033cdf2cc2d63f3ca9db3d6cfe376115383e9a.tar.xz
riscv: xchg: Prefetch the destination word for sc.w
The cost of changing a cacheline from shared to exclusive state can be significant, especially when this is triggered by an exclusive store, since it may result in having to retry the transaction. This patch makes use of prefetch.w to prefetch cachelines for write prior to lr/sc loops when using the xchg_small atomic routine. This patch is inspired by commit: 0ea366f5e1b6 ("arm64: atomics: prefetch the destination word for write prior to stxr"). Signed-off-by: Guo Ren <guoren@linux.alibaba.com> Signed-off-by: Guo Ren <guoren@kernel.org>
-rw-r--r--arch/riscv/include/asm/cmpxchg.h4
1 files changed, 3 insertions, 1 deletions
diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h
index 26cea2395aae..d7b9d7951f08 100644
--- a/arch/riscv/include/asm/cmpxchg.h
+++ b/arch/riscv/include/asm/cmpxchg.h
@@ -10,6 +10,7 @@
#include <asm/barrier.h>
#include <asm/fence.h>
+#include <asm/processor.h>
#define __arch_xchg_masked(prepend, append, r, p, n) \
({ \
@@ -23,6 +24,7 @@
\
__asm__ __volatile__ ( \
prepend \
+ PREFETCHW_ASM(%5) \
"0: lr.w %0, %2\n" \
" and %1, %0, %z4\n" \
" or %1, %1, %z3\n" \
@@ -30,7 +32,7 @@
" bnez %1, 0b\n" \
append \
: "=&r" (__retx), "=&r" (__rc), "+A" (*(__ptr32b)) \
- : "rJ" (__newx), "rJ" (~__mask) \
+ : "rJ" (__newx), "rJ" (~__mask), "rJ" (__ptr32b) \
: "memory"); \
\
r = (__typeof__(*(p)))((__retx & __mask) >> __s); \