]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
arm64: mm: Introduce a C wrapper for by-range TLB invalidation
authorRyan Roberts <ryan.roberts@arm.com>
Mon, 2 Mar 2026 13:55:49 +0000 (13:55 +0000)
committerCatalin Marinas <catalin.marinas@arm.com>
Fri, 13 Mar 2026 17:23:03 +0000 (17:23 +0000)
As part of efforts to reduce our reliance on complex preprocessor macros
for TLB invalidation routines, introduce a new C wrapper for by-range
TLB invalidation which can be used instead of the __tlbi() macro and can
additionally be called from C code.

Each specific tlbi range op is implemented as a C function and the
appropriate function pointer is passed to __tlbi_range(). Since
everything is declared inline and is statically resolvable, the compiler
will convert the indirect function call to a direct inline execution.

Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Reviewed-by: Jonathan Cameron <jonathan.cameron@huawei.com>
Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
arch/arm64/include/asm/tlbflush.h

index a0e3ebe2998647b5f994e3bccb2983364fbdaa75..b3b86e5f7034e4146eaa6d5f1b67d6d6480558ac 100644 (file)
@@ -468,6 +468,36 @@ static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
  *    operations can only span an even number of pages. We save this for last to
  *    ensure 64KB start alignment is maintained for the LPA2 case.
  */
+static __always_inline void rvae1is(u64 arg)
+{
+       __tlbi(rvae1is, arg);
+}
+
+static __always_inline void rvale1(u64 arg)
+{
+       __tlbi(rvale1, arg);
+}
+
+static __always_inline void rvale1is(u64 arg)
+{
+       __tlbi(rvale1is, arg);
+}
+
+static __always_inline void rvaale1is(u64 arg)
+{
+       __tlbi(rvaale1is, arg);
+}
+
+static __always_inline void ripas2e1is(u64 arg)
+{
+       __tlbi(ripas2e1is, arg);
+}
+
+static __always_inline void __tlbi_range(tlbi_op op, u64 arg)
+{
+       op(arg);
+}
+
 #define __flush_tlb_range_op(op, start, pages, stride,                 \
                                asid, tlb_level, tlbi_user, lpa2)       \
 do {                                                                   \
@@ -495,7 +525,7 @@ do {                                                                        \
                if (num >= 0) {                                         \
                        addr = __TLBI_VADDR_RANGE(__flush_start >> shift, asid, \
                                                scale, num, tlb_level); \
-                       __tlbi(r##op, addr);                            \
+                       __tlbi_range(r##op, addr);                      \
                        if (tlbi_user)                                  \
                                __tlbi_user(r##op, addr);               \
                        __flush_start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT; \