]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
arm64: mm: Implicitly invalidate user ASID based on TLBI operation
authorRyan Roberts <ryan.roberts@arm.com>
Mon, 2 Mar 2026 13:55:50 +0000 (13:55 +0000)
committerCatalin Marinas <catalin.marinas@arm.com>
Fri, 13 Mar 2026 17:23:03 +0000 (17:23 +0000)
When kpti is enabled, separate ASIDs are used for userspace and
kernelspace, requiring ASID-qualified TLB invalidation by virtual
address to invalidate both of them.

Push the logic for invalidating the two ASIDs down into the low-level
tlbi-op-specific functions and remove the burden from the caller to
handle the kpti-specific behaviour.

Co-developed-by: Will Deacon <will@kernel.org>
Signed-off-by: Will Deacon <will@kernel.org>
Reviewed-by: Jonathan Cameron <jonathan.cameron@huawei.com>
Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
arch/arm64/include/asm/tlbflush.h

index b3b86e5f7034e4146eaa6d5f1b67d6d6480558ac..e586d9b71ea2dd0a560ded566cf7adf585131015 100644 (file)
@@ -102,6 +102,7 @@ typedef void (*tlbi_op)(u64 arg);
 static __always_inline void vae1is(u64 arg)
 {
        __tlbi(vae1is, arg);
+       __tlbi_user(vae1is, arg);
 }
 
 static __always_inline void vae2is(u64 arg)
@@ -112,11 +113,13 @@ static __always_inline void vae2is(u64 arg)
 static __always_inline void vale1(u64 arg)
 {
        __tlbi(vale1, arg);
+       __tlbi_user(vale1, arg);
 }
 
 static __always_inline void vale1is(u64 arg)
 {
        __tlbi(vale1is, arg);
+       __tlbi_user(vale1is, arg);
 }
 
 static __always_inline void vale2is(u64 arg)
@@ -152,11 +155,6 @@ static __always_inline void __tlbi_level(tlbi_op op, u64 addr, u32 level)
        op(arg);
 }
 
-#define __tlbi_user_level(op, arg, level) do {                         \
-       if (arm64_kernel_unmapped_at_el0())                             \
-               __tlbi_level(op, (arg | USER_ASID_FLAG), level);        \
-} while (0)
-
 /*
  * This macro creates a properly formatted VA operand for the TLB RANGE. The
  * value bit assignments are:
@@ -444,8 +442,6 @@ static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
  * @stride:    Flush granularity
  * @asid:      The ASID of the task (0 for IPA instructions)
  * @tlb_level: Translation Table level hint, if known
- * @tlbi_user: If 'true', call an additional __tlbi_user()
- *              (typically for user ASIDs). 'flase' for IPA instructions
  * @lpa2:      If 'true', the lpa2 scheme is used as set out below
  *
  * When the CPU does not support TLB range operations, flush the TLB
@@ -471,16 +467,19 @@ static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
 static __always_inline void rvae1is(u64 arg)
 {
        __tlbi(rvae1is, arg);
+       __tlbi_user(rvae1is, arg);
 }
 
 static __always_inline void rvale1(u64 arg)
 {
        __tlbi(rvale1, arg);
+       __tlbi_user(rvale1, arg);
 }
 
 static __always_inline void rvale1is(u64 arg)
 {
        __tlbi(rvale1is, arg);
+       __tlbi_user(rvale1is, arg);
 }
 
 static __always_inline void rvaale1is(u64 arg)
@@ -499,7 +498,7 @@ static __always_inline void __tlbi_range(tlbi_op op, u64 arg)
 }
 
 #define __flush_tlb_range_op(op, start, pages, stride,                 \
-                               asid, tlb_level, tlbi_user, lpa2)       \
+                               asid, tlb_level, lpa2)                  \
 do {                                                                   \
        typeof(start) __flush_start = start;                            \
        typeof(pages) __flush_pages = pages;                            \
@@ -514,8 +513,6 @@ do {                                                                        \
                    (lpa2 && __flush_start != ALIGN(__flush_start, SZ_64K))) {  \
                        addr = __TLBI_VADDR(__flush_start, asid);       \
                        __tlbi_level(op, addr, tlb_level);              \
-                       if (tlbi_user)                                  \
-                               __tlbi_user_level(op, addr, tlb_level); \
                        __flush_start += stride;                        \
                        __flush_pages -= stride >> PAGE_SHIFT;          \
                        continue;                                       \
@@ -526,8 +523,6 @@ do {                                                                        \
                        addr = __TLBI_VADDR_RANGE(__flush_start >> shift, asid, \
                                                scale, num, tlb_level); \
                        __tlbi_range(r##op, addr);                      \
-                       if (tlbi_user)                                  \
-                               __tlbi_user(r##op, addr);               \
                        __flush_start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT; \
                        __flush_pages -= __TLBI_RANGE_PAGES(num, scale);\
                }                                                       \
@@ -536,7 +531,7 @@ do {                                                                        \
 } while (0)
 
 #define __flush_s2_tlb_range_op(op, start, pages, stride, tlb_level) \
-       __flush_tlb_range_op(op, start, pages, stride, 0, tlb_level, false, kvm_lpa2_is_enabled());
+       __flush_tlb_range_op(op, start, pages, stride, 0, tlb_level, kvm_lpa2_is_enabled());
 
 static inline bool __flush_tlb_range_limit_excess(unsigned long start,
                unsigned long end, unsigned long pages, unsigned long stride)
@@ -576,10 +571,10 @@ static inline void __flush_tlb_range_nosync(struct mm_struct *mm,
 
        if (last_level)
                __flush_tlb_range_op(vale1is, start, pages, stride, asid,
-                                    tlb_level, true, lpa2_is_enabled());
+                                    tlb_level, lpa2_is_enabled());
        else
                __flush_tlb_range_op(vae1is, start, pages, stride, asid,
-                                    tlb_level, true, lpa2_is_enabled());
+                                    tlb_level, lpa2_is_enabled());
 
        mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, end);
 }
@@ -604,7 +599,7 @@ static inline void local_flush_tlb_contpte(struct vm_area_struct *vma,
        dsb(nshst);
        asid = ASID(vma->vm_mm);
        __flush_tlb_range_op(vale1, addr, CONT_PTES, PAGE_SIZE, asid,
-                            3, true, lpa2_is_enabled());
+                            3, lpa2_is_enabled());
        mmu_notifier_arch_invalidate_secondary_tlbs(vma->vm_mm, addr,
                                                    addr + CONT_PTE_SIZE);
        dsb(nsh);
@@ -638,7 +633,7 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end
 
        dsb(ishst);
        __flush_tlb_range_op(vaale1is, start, pages, stride, 0,
-                            TLBI_TTL_UNKNOWN, false, lpa2_is_enabled());
+                            TLBI_TTL_UNKNOWN, lpa2_is_enabled());
        __tlbi_sync_s1ish();
        isb();
 }
@@ -689,6 +684,7 @@ static inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd)
 }
 #define huge_pmd_needs_flush huge_pmd_needs_flush
 
+#undef __tlbi_user
 #endif
 
 #endif