From 313a05a15a1b29c29c7eb4ae8cf44a7cf0fcf419 Mon Sep 17 00:00:00 2001 From: Kevin Brodsky Date: Mon, 15 Dec 2025 15:03:20 +0000 Subject: [PATCH] powerpc/mm: replace batch->active with is_lazy_mmu_mode_active() A per-CPU batch struct is activated when entering lazy MMU mode; its lifetime is the same as the lazy MMU section (it is deactivated when leaving the mode). Preemption is disabled in that interval to ensure that the per-CPU reference remains valid. The generic lazy_mmu layer now tracks whether a task is in lazy MMU mode. We can therefore use the generic helper is_lazy_mmu_mode_active() to tell whether a batch struct is active instead of tracking it explicitly. Link: https://lkml.kernel.org/r/20251215150323.2218608-12-kevin.brodsky@arm.com Signed-off-by: Kevin Brodsky Acked-by: David Hildenbrand Reviewed-by: Ritesh Harjani (IBM) Tested-by: Venkat Rao Bagalkote Reviewed-by: Yeoreum Yun Cc: Alexander Gordeev Cc: Andreas Larsson Cc: Anshuman Khandual Cc: Borislav Betkov Cc: Boris Ostrovsky Cc: Catalin Marinas Cc: Christophe Leroy Cc: David Hildenbrand (Red Hat) Cc: David S. Miller Cc: David Woodhouse Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Jann Horn Cc: Juegren Gross Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Madhavan Srinivasan Cc: Michael Ellerman Cc: Michal Hocko Cc: Mike Rapoport Cc: Nicholas Piggin Cc: Peter Zijlstra Cc: Ryan Roberts Cc: Suren Baghdasaryan Cc: Thomas Gleinxer Cc: Vlastimil Babka Cc: Will Deacon Signed-off-by: Andrew Morton --- arch/powerpc/include/asm/book3s/64/tlbflush-hash.h | 9 --------- arch/powerpc/mm/book3s64/hash_tlb.c | 2 +- 2 files changed, 1 insertion(+), 10 deletions(-) diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h b/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h index 565c1b7c3eae1..6cc9abcd7b3d2 100644 --- a/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h +++ b/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h @@ -12,7 +12,6 @@ #define PPC64_TLB_BATCH_NR 192 struct ppc64_tlb_batch { - int active; unsigned long index; struct mm_struct *mm; real_pte_t pte[PPC64_TLB_BATCH_NR]; @@ -26,8 +25,6 @@ extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch); static inline void arch_enter_lazy_mmu_mode(void) { - struct ppc64_tlb_batch *batch; - if (radix_enabled()) return; /* @@ -35,8 +32,6 @@ static inline void arch_enter_lazy_mmu_mode(void) * operating on kernel page tables. */ preempt_disable(); - batch = this_cpu_ptr(&ppc64_tlb_batch); - batch->active = 1; } static inline void arch_flush_lazy_mmu_mode(void) @@ -53,14 +48,10 @@ static inline void arch_flush_lazy_mmu_mode(void) static inline void arch_leave_lazy_mmu_mode(void) { - struct ppc64_tlb_batch *batch; - if (radix_enabled()) return; - batch = this_cpu_ptr(&ppc64_tlb_batch); arch_flush_lazy_mmu_mode(); - batch->active = 0; preempt_enable(); } diff --git a/arch/powerpc/mm/book3s64/hash_tlb.c b/arch/powerpc/mm/book3s64/hash_tlb.c index 787f7a0e27f0c..fbdeb8981ae7b 100644 --- a/arch/powerpc/mm/book3s64/hash_tlb.c +++ b/arch/powerpc/mm/book3s64/hash_tlb.c @@ -100,7 +100,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, * Check if we have an active batch on this CPU. If not, just * flush now and return. */ - if (!batch->active) { + if (!is_lazy_mmu_mode_active()) { flush_hash_page(vpn, rpte, psize, ssize, mm_is_thread_local(mm)); put_cpu_var(ppc64_tlb_batch); return; -- 2.47.3