From 752a0d1d483e9479f5c59519256fd190139d0b39 Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Mon, 2 Mar 2026 13:56:00 +0000 Subject: [PATCH] arm64: mm: Provide level hint for flush_tlb_page() Previously tlb invalidations issued by __flush_tlb_page() did not contain a level hint. From the core API documentation, this function is clearly only ever intended to target level 3 (PTE) tlb entries: | 4) ``void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)`` | | This time we need to remove the PAGE_SIZE sized translation | from the TLB. However, the arm64 documentation is more relaxed allowing any last level: | this operation only invalidates a single, last-level page-table | entry and therefore does not affect any walk-caches It turns out that the function was actually being used to invalidate a level 2 mapping via flush_tlb_fix_spurious_fault_pmd(). The bug was benign because the level hint was not set so the HW would still invalidate the PMD mapping, and also because the TLBF_NONOTIFY flag was set, the bounds of the mapping were never used for anything else. Now that we have the new and improved range-invalidation API, it is trival to fix flush_tlb_fix_spurious_fault_pmd() to explicitly flush the whole range (locally, without notification and last level only). So let's do that, and then update __flush_tlb_page() to hint level 3. Reviewed-by: Linu Cherian Signed-off-by: Ryan Roberts [catalin.marinas@arm.com: use "level 3" in the __flush_tlb_page() description] [catalin.marinas@arm.com: tweak the commit message to include the core API text] Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/pgtable.h | 5 +++-- arch/arm64/include/asm/tlbflush.h | 4 ++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 7039931df462..b1a96a8f2b17 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -103,8 +103,9 @@ static inline void arch_leave_lazy_mmu_mode(void) #define flush_tlb_fix_spurious_fault(vma, address, ptep) \ __flush_tlb_page(vma, address, TLBF_NOBROADCAST | TLBF_NONOTIFY) -#define flush_tlb_fix_spurious_fault_pmd(vma, address, pmdp) \ - __flush_tlb_page(vma, address, TLBF_NOBROADCAST | TLBF_NONOTIFY) +#define flush_tlb_fix_spurious_fault_pmd(vma, address, pmdp) \ + __flush_tlb_range(vma, address, address + PMD_SIZE, PMD_SIZE, 2, \ + TLBF_NOBROADCAST | TLBF_NONOTIFY | TLBF_NOWALKCACHE) /* * ZERO_PAGE is a global shared page that is always zero: used diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index 5096ec7ab865..47fa4d39a461 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -300,7 +300,7 @@ static inline void __tlbi_sync_s1ish_hyp(void) * __flush_tlb_page(vma, addr, flags) * Invalidate a single user mapping for address 'addr' in the * address space corresponding to 'vma->mm'. Note that this - * operation only invalidates a single, last-level page-table entry + * operation only invalidates a single level 3 page-table entry * and therefore does not affect any walk-caches. flags may contain * any combination of TLBF_NONOTIFY (don't call mmu notifiers), * TLBF_NOSYNC (don't issue trailing dsb) and TLBF_NOBROADCAST @@ -591,7 +591,7 @@ static inline void __flush_tlb_page(struct vm_area_struct *vma, unsigned long start = round_down(uaddr, PAGE_SIZE); unsigned long end = start + PAGE_SIZE; - __do_flush_tlb_range(vma, start, end, PAGE_SIZE, TLBI_TTL_UNKNOWN, + __do_flush_tlb_range(vma, start, end, PAGE_SIZE, 3, TLBF_NOWALKCACHE | flags); } -- 2.47.3