]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
powerpc/mm: replace batch->active with is_lazy_mmu_mode_active()
authorKevin Brodsky <kevin.brodsky@arm.com>
Mon, 15 Dec 2025 15:03:20 +0000 (15:03 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 21 Jan 2026 03:24:34 +0000 (19:24 -0800)
A per-CPU batch struct is activated when entering lazy MMU mode; its
lifetime is the same as the lazy MMU section (it is deactivated when
leaving the mode).  Preemption is disabled in that interval to ensure that
the per-CPU reference remains valid.

The generic lazy_mmu layer now tracks whether a task is in lazy MMU mode.
We can therefore use the generic helper is_lazy_mmu_mode_active() to tell
whether a batch struct is active instead of tracking it explicitly.

Link: https://lkml.kernel.org/r/20251215150323.2218608-12-kevin.brodsky@arm.com
Signed-off-by: Kevin Brodsky <kevin.brodsky@arm.com>
Acked-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com>
Tested-by: Venkat Rao Bagalkote <venkat88@linux.ibm.com>
Reviewed-by: Yeoreum Yun <yeoreum.yun@arm.com>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Andreas Larsson <andreas@gaisler.com>
Cc: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Borislav Betkov <bp@alien8.de>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: David Hildenbrand (Red Hat) <david@kernel.org>
Cc: David S. Miller <davem@davemloft.net>
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jann Horn <jannh@google.com>
Cc: Juegren Gross <jgross@suse.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Thomas Gleinxer <tglx@linutronix.de>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
arch/powerpc/include/asm/book3s/64/tlbflush-hash.h
arch/powerpc/mm/book3s64/hash_tlb.c

index 565c1b7c3eae14009e6269f203e8d77fdfe4f10b..6cc9abcd7b3d285753b10818a735854d199df467 100644 (file)
@@ -12,7 +12,6 @@
 #define PPC64_TLB_BATCH_NR 192
 
 struct ppc64_tlb_batch {
-       int                     active;
        unsigned long           index;
        struct mm_struct        *mm;
        real_pte_t              pte[PPC64_TLB_BATCH_NR];
@@ -26,8 +25,6 @@ extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch);
 
 static inline void arch_enter_lazy_mmu_mode(void)
 {
-       struct ppc64_tlb_batch *batch;
-
        if (radix_enabled())
                return;
        /*
@@ -35,8 +32,6 @@ static inline void arch_enter_lazy_mmu_mode(void)
         * operating on kernel page tables.
         */
        preempt_disable();
-       batch = this_cpu_ptr(&ppc64_tlb_batch);
-       batch->active = 1;
 }
 
 static inline void arch_flush_lazy_mmu_mode(void)
@@ -53,14 +48,10 @@ static inline void arch_flush_lazy_mmu_mode(void)
 
 static inline void arch_leave_lazy_mmu_mode(void)
 {
-       struct ppc64_tlb_batch *batch;
-
        if (radix_enabled())
                return;
-       batch = this_cpu_ptr(&ppc64_tlb_batch);
 
        arch_flush_lazy_mmu_mode();
-       batch->active = 0;
        preempt_enable();
 }
 
index 787f7a0e27f0c568b981c199b4c8835de5da0761..fbdeb8981ae7b0651b00cc69ae6be41a426a516c 100644 (file)
@@ -100,7 +100,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
         * Check if we have an active batch on this CPU. If not, just
         * flush now and return.
         */
-       if (!batch->active) {
+       if (!is_lazy_mmu_mode_active()) {
                flush_hash_page(vpn, rpte, psize, ssize, mm_is_thread_local(mm));
                put_cpu_var(ppc64_tlb_batch);
                return;