From c3f0778ffeca271b3b221fcdec66784c1eb9440d Mon Sep 17 00:00:00 2001 From: Kevin Brodsky Date: Mon, 15 Dec 2025 15:03:12 +0000 Subject: [PATCH] powerpc/mm: implement arch_flush_lazy_mmu_mode() Upcoming changes to the lazy_mmu API will cause arch_flush_lazy_mmu_mode() to be called when leaving a nested lazy_mmu section. Move the relevant logic from arch_leave_lazy_mmu_mode() to arch_flush_lazy_mmu_mode() and have the former call the latter. The radix_enabled() check is required in both as arch_flush_lazy_mmu_mode() will be called directly from the generic layer in a subsequent patch. Note: the additional this_cpu_ptr() and radix_enabled() calls on the arch_leave_lazy_mmu_mode() path will be removed in a subsequent patch. Link: https://lkml.kernel.org/r/20251215150323.2218608-4-kevin.brodsky@arm.com Signed-off-by: Kevin Brodsky Acked-by: David Hildenbrand Tested-by: Venkat Rao Bagalkote Reviewed-by: Yeoreum Yun Cc: Alexander Gordeev Cc: Andreas Larsson Cc: Anshuman Khandual Cc: Borislav Betkov Cc: Boris Ostrovsky Cc: Catalin Marinas Cc: Christophe Leroy Cc: David Hildenbrand (Red Hat) Cc: David S. Miller Cc: David Woodhouse Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Jann Horn Cc: Juegren Gross Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Madhavan Srinivasan Cc: Michael Ellerman Cc: Michal Hocko Cc: Mike Rapoport Cc: Nicholas Piggin Cc: Peter Zijlstra Cc: Ritesh Harjani (IBM) Cc: Ryan Roberts Cc: Suren Baghdasaryan Cc: Thomas Gleinxer Cc: Vlastimil Babka Cc: Will Deacon Signed-off-by: Andrew Morton --- .../powerpc/include/asm/book3s/64/tlbflush-hash.h | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h b/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h index 146287d9580f..2d45f57df169 100644 --- a/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h +++ b/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h @@ -41,7 +41,7 @@ static inline void arch_enter_lazy_mmu_mode(void) batch->active = 1; } -static inline void arch_leave_lazy_mmu_mode(void) +static inline void arch_flush_lazy_mmu_mode(void) { struct ppc64_tlb_batch *batch; @@ -51,12 +51,21 @@ static inline void arch_leave_lazy_mmu_mode(void) if (batch->index) __flush_tlb_pending(batch); +} + +static inline void arch_leave_lazy_mmu_mode(void) +{ + struct ppc64_tlb_batch *batch; + + if (radix_enabled()) + return; + batch = this_cpu_ptr(&ppc64_tlb_batch); + + arch_flush_lazy_mmu_mode(); batch->active = 0; preempt_enable(); } -#define arch_flush_lazy_mmu_mode() do {} while (0) - extern void hash__tlbiel_all(unsigned int action); extern void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, -- 2.47.3