summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRyan Roberts <ryan.roberts@arm.com>2026-03-02 16:55:49 +0300
committerCatalin Marinas <catalin.marinas@arm.com>2026-03-13 20:23:03 +0300
commitd2bf3226952c64d1c2ce4995cce60b3fb8ae5f33 (patch)
tree37a2bf9b4fe4442d024eacf10e9a067bfe12249a
parent5b3fb8a6b429c33ee669d08b1a883d881e9614a1 (diff)
downloadlinux-d2bf3226952c64d1c2ce4995cce60b3fb8ae5f33.tar.xz
arm64: mm: Introduce a C wrapper for by-range TLB invalidation
As part of efforts to reduce our reliance on complex preprocessor macros for TLB invalidation routines, introduce a new C wrapper for by-range TLB invalidation which can be used instead of the __tlbi() macro and can additionally be called from C code. Each specific tlbi range op is implemented as a C function and the appropriate function pointer is passed to __tlbi_range(). Since everything is declared inline and is statically resolvable, the compiler will convert the indirect function call to a direct inline execution. Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> Reviewed-by: Jonathan Cameron <jonathan.cameron@huawei.com> Signed-off-by: Ryan Roberts <ryan.roberts@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
-rw-r--r--arch/arm64/include/asm/tlbflush.h32
1 files changed, 31 insertions, 1 deletions
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index a0e3ebe29986..b3b86e5f7034 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -468,6 +468,36 @@ static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
* operations can only span an even number of pages. We save this for last to
* ensure 64KB start alignment is maintained for the LPA2 case.
*/
+static __always_inline void rvae1is(u64 arg)
+{
+ __tlbi(rvae1is, arg);
+}
+
+static __always_inline void rvale1(u64 arg)
+{
+ __tlbi(rvale1, arg);
+}
+
+static __always_inline void rvale1is(u64 arg)
+{
+ __tlbi(rvale1is, arg);
+}
+
+static __always_inline void rvaale1is(u64 arg)
+{
+ __tlbi(rvaale1is, arg);
+}
+
+static __always_inline void ripas2e1is(u64 arg)
+{
+ __tlbi(ripas2e1is, arg);
+}
+
+static __always_inline void __tlbi_range(tlbi_op op, u64 arg)
+{
+ op(arg);
+}
+
#define __flush_tlb_range_op(op, start, pages, stride, \
asid, tlb_level, tlbi_user, lpa2) \
do { \
@@ -495,7 +525,7 @@ do { \
if (num >= 0) { \
addr = __TLBI_VADDR_RANGE(__flush_start >> shift, asid, \
scale, num, tlb_level); \
- __tlbi(r##op, addr); \
+ __tlbi_range(r##op, addr); \
if (tlbi_user) \
__tlbi_user(r##op, addr); \
__flush_start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT; \