]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
arm64: mm: Push __TLBI_VADDR() into __tlbi_level()
authorWill Deacon <will@kernel.org>
Mon, 2 Mar 2026 13:55:51 +0000 (13:55 +0000)
committerCatalin Marinas <catalin.marinas@arm.com>
Fri, 13 Mar 2026 17:23:03 +0000 (17:23 +0000)
The __TLBI_VADDR() macro takes an ASID and an address and converts them
into a single argument formatted correctly for a TLB invalidation
instruction.

Rather than have callers worry about this (especially in the case where
the ASID is zero), push the macro down into __tlbi_level() via a new
__tlbi_level_asid() helper.

Signed-off-by: Will Deacon <will@kernel.org>
Reviewed-by: Linu Cherian <linu.cherian@arm.com>
Reviewed-by: Jonathan Cameron <jonathan.cameron@huawei.com>
Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
arch/arm64/include/asm/tlbflush.h
arch/arm64/kernel/sys_compat.c
arch/arm64/kvm/hyp/nvhe/mm.c
arch/arm64/kvm/hyp/nvhe/tlb.c
arch/arm64/kvm/hyp/pgtable.c
arch/arm64/kvm/hyp/vhe/tlb.c

index e586d9b71ea2dd0a560ded566cf7adf585131015..2832305606b72b9a4837aa3030119ed118098f71 100644 (file)
@@ -142,9 +142,10 @@ static __always_inline void ipas2e1is(u64 arg)
        __tlbi(ipas2e1is, arg);
 }
 
-static __always_inline void __tlbi_level(tlbi_op op, u64 addr, u32 level)
+static __always_inline void __tlbi_level_asid(tlbi_op op, u64 addr, u32 level,
+                                             u16 asid)
 {
-       u64 arg = addr;
+       u64 arg = __TLBI_VADDR(addr, asid);
 
        if (alternative_has_cap_unlikely(ARM64_HAS_ARMv8_4_TTL) && level <= 3) {
                u64 ttl = level | (get_trans_granule() << 2);
@@ -155,6 +156,11 @@ static __always_inline void __tlbi_level(tlbi_op op, u64 addr, u32 level)
        op(arg);
 }
 
+static inline void __tlbi_level(tlbi_op op, u64 addr, u32 level)
+{
+       __tlbi_level_asid(op, addr, level, 0);
+}
+
 /*
  * This macro creates a properly formatted VA operand for the TLB RANGE. The
  * value bit assignments are:
@@ -511,8 +517,7 @@ do {                                                                        \
                if (!system_supports_tlb_range() ||                     \
                    __flush_pages == 1 ||                               \
                    (lpa2 && __flush_start != ALIGN(__flush_start, SZ_64K))) {  \
-                       addr = __TLBI_VADDR(__flush_start, asid);       \
-                       __tlbi_level(op, addr, tlb_level);              \
+                       __tlbi_level_asid(op, __flush_start, tlb_level, asid);  \
                        __flush_start += stride;                        \
                        __flush_pages -= stride >> PAGE_SHIFT;          \
                        continue;                                       \
@@ -685,6 +690,7 @@ static inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd)
 #define huge_pmd_needs_flush huge_pmd_needs_flush
 
 #undef __tlbi_user
+#undef __TLBI_VADDR
 #endif
 
 #endif
index b9d4998c97efac79bdd32133cf6edbce5f08b208..7e9860143add85f1d465e60830462052f066e601 100644 (file)
@@ -36,7 +36,7 @@ __do_compat_cache_op(unsigned long start, unsigned long end)
                         * The workaround requires an inner-shareable tlbi.
                         * We pick the reserved-ASID to minimise the impact.
                         */
-                       __tlbi(aside1is, __TLBI_VADDR(0, 0));
+                       __tlbi(aside1is, 0UL);
                        __tlbi_sync_s1ish();
                }
 
index 218976287d3fe71d8f5048c0dae494034d25e613..4d8fcc7a3a41e062a86672eab432ee75eaecd59a 100644 (file)
@@ -270,7 +270,7 @@ static void fixmap_clear_slot(struct hyp_fixmap_slot *slot)
         * https://lore.kernel.org/kvm/20221017115209.2099-1-will@kernel.org/T/#mf10dfbaf1eaef9274c581b81c53758918c1d0f03
         */
        dsb(ishst);
-       __tlbi_level(vale2is, __TLBI_VADDR(addr, 0), level);
+       __tlbi_level(vale2is, addr, level);
        __tlbi_sync_s1ish_hyp();
        isb();
 }
index 3dc1ce0d27fe664469d2cd31b4e675089f976f9d..b29140995d484a66ebb2b5574b406bf57be46c21 100644 (file)
@@ -158,7 +158,6 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
         * Instead, we invalidate Stage-2 for this IPA, and the
         * whole of Stage-1. Weep...
         */
-       ipa >>= 12;
        __tlbi_level(ipas2e1is, ipa, level);
 
        /*
@@ -188,7 +187,6 @@ void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,
         * Instead, we invalidate Stage-2 for this IPA, and the
         * whole of Stage-1. Weep...
         */
-       ipa >>= 12;
        __tlbi_level(ipas2e1, ipa, level);
 
        /*
index 9b480f947da26d0be8b3843b7279250470e87a94..30226f2d5564ac051560d5e2b36ebf06d22e22c0 100644 (file)
@@ -490,14 +490,14 @@ static int hyp_unmap_walker(const struct kvm_pgtable_visit_ctx *ctx,
 
                kvm_clear_pte(ctx->ptep);
                dsb(ishst);
-               __tlbi_level(vae2is, __TLBI_VADDR(ctx->addr, 0), TLBI_TTL_UNKNOWN);
+               __tlbi_level(vae2is, ctx->addr, TLBI_TTL_UNKNOWN);
        } else {
                if (ctx->end - ctx->addr < granule)
                        return -EINVAL;
 
                kvm_clear_pte(ctx->ptep);
                dsb(ishst);
-               __tlbi_level(vale2is, __TLBI_VADDR(ctx->addr, 0), ctx->level);
+               __tlbi_level(vale2is, ctx->addr, ctx->level);
                *unmapped += granule;
        }
 
index 35855dadfb1b3a5ed6c329c49f8fed564baa7fb7..f7b9dfe3f3a5a9193423bf471181a7aa0db774a8 100644 (file)
@@ -104,7 +104,6 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
         * Instead, we invalidate Stage-2 for this IPA, and the
         * whole of Stage-1. Weep...
         */
-       ipa >>= 12;
        __tlbi_level(ipas2e1is, ipa, level);
 
        /*
@@ -136,7 +135,6 @@ void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,
         * Instead, we invalidate Stage-2 for this IPA, and the
         * whole of Stage-1. Weep...
         */
-       ipa >>= 12;
        __tlbi_level(ipas2e1, ipa, level);
 
        /*