summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMayuresh Chitale <mchitale@ventanamicro.com>2023-06-23 15:38:49 +0300
committerJi Sheng Teoh <jisheng.teoh@starfivetech.com>2024-01-31 15:00:10 +0300
commit48567af56333f93bbb168a2ac4ffe5177061e8f5 (patch)
treea0248a71181289d1ba6267a93e95c48427b87e98
parentc657db3b77ea6a5406ff86fd0b1aa4ce26b45c7e (diff)
downloadlinux-48567af56333f93bbb168a2ac4ffe5177061e8f5.tar.xz
riscv: mm: use svinval instructions instead of sfence.vma
When svinval is supported the local_flush_tlb_page* functions would prefer to use the following sequence to optimize the tlb flushes instead of a simple sfence.vma: sfence.w.inval svinval.vma . . svinval.vma sfence.inval.ir The maximum number of consecutive svinval.vma instructions that can be executed in local_flush_tlb_page* functions is limited to 64. This is required to avoid soft lockups and the approach is similar to that used in arm64. Signed-off-by: Mayuresh Chitale <mchitale@ventanamicro.com> Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
-rw-r--r--arch/riscv/include/asm/tlbflush.h1
-rw-r--r--arch/riscv/mm/tlbflush.c66
2 files changed, 59 insertions, 8 deletions
diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
index a09196f8de68..56490c04b0bd 100644
--- a/arch/riscv/include/asm/tlbflush.h
+++ b/arch/riscv/include/asm/tlbflush.h
@@ -30,6 +30,7 @@ static inline void local_flush_tlb_page(unsigned long addr)
#endif /* CONFIG_MMU */
#if defined(CONFIG_SMP) && defined(CONFIG_MMU)
+extern unsigned long tlb_flush_all_threshold;
void flush_tlb_all(void);
void flush_tlb_mm(struct mm_struct *mm);
void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
index 77be59aadc73..f63cdf8644f3 100644
--- a/arch/riscv/mm/tlbflush.c
+++ b/arch/riscv/mm/tlbflush.c
@@ -5,6 +5,17 @@
#include <linux/sched.h>
#include <asm/sbi.h>
#include <asm/mmu_context.h>
+#include <asm/hwcap.h>
+#include <asm/insn-def.h>
+
+#define has_svinval() riscv_has_extension_unlikely(RISCV_ISA_EXT_SVINVAL)
+
+/*
+ * Flush entire TLB if number of entries to be flushed is greater
+ * than the threshold below. Platforms may override the threshold
+ * value based on marchid, mvendorid, and mimpid.
+ */
+unsigned long tlb_flush_all_threshold __read_mostly = 64;
static inline void local_flush_tlb_all_asid(unsigned long asid)
{
@@ -24,21 +35,60 @@ static inline void local_flush_tlb_page_asid(unsigned long addr,
}
static inline void local_flush_tlb_range(unsigned long start,
- unsigned long size, unsigned long stride)
+ unsigned long size,
+ unsigned long stride)
{
- if (size <= stride)
- local_flush_tlb_page(start);
- else
+ unsigned long end = start + size;
+ unsigned long num_entries = DIV_ROUND_UP(size, stride);
+
+ if (!num_entries || num_entries > tlb_flush_all_threshold) {
local_flush_tlb_all();
+ return;
+ }
+
+ if (has_svinval())
+ asm volatile(SFENCE_W_INVAL() ::: "memory");
+
+ while (start < end) {
+ if (has_svinval())
+ asm volatile(SINVAL_VMA(%0, zero)
+ : : "r" (start) : "memory");
+ else
+ local_flush_tlb_page(start);
+ start += stride;
+ }
+
+ if (has_svinval())
+ asm volatile(SFENCE_INVAL_IR() ::: "memory");
}
static inline void local_flush_tlb_range_asid(unsigned long start,
- unsigned long size, unsigned long stride, unsigned long asid)
+ unsigned long size,
+ unsigned long stride,
+ unsigned long asid)
{
- if (size <= stride)
- local_flush_tlb_page_asid(start, asid);
- else
+ unsigned long end = start + size;
+ unsigned long num_entries = DIV_ROUND_UP(size, stride);
+
+ if (!num_entries || num_entries > tlb_flush_all_threshold) {
local_flush_tlb_all_asid(asid);
+ return;
+ }
+
+ if (has_svinval())
+ asm volatile(SFENCE_W_INVAL() ::: "memory");
+
+ while (start < end) {
+ if (has_svinval())
+ asm volatile(SINVAL_VMA(%0, %1) : : "r" (start),
+ "r" (asid) : "memory");
+ else
+ local_flush_tlb_page_asid(start, asid);
+ start += stride;
+ }
+
+ if (has_svinval())
+ asm volatile(SFENCE_INVAL_IR() ::: "memory");
}
static void __ipi_flush_tlb_all(void *info)