]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
powerpc/64s: do not re-activate batched TLB flush
authorAlexander Gordeev <agordeev@linux.ibm.com>
Mon, 15 Dec 2025 15:03:10 +0000 (15:03 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 21 Jan 2026 03:24:32 +0000 (19:24 -0800)
Patch series "Nesting support for lazy MMU mode", v6.

When the lazy MMU mode was introduced eons ago, it wasn't made clear
whether such a sequence was legal:

arch_enter_lazy_mmu_mode()
...
arch_enter_lazy_mmu_mode()
...
arch_leave_lazy_mmu_mode()
...
arch_leave_lazy_mmu_mode()

It seems fair to say that nested calls to
arch_{enter,leave}_lazy_mmu_mode() were not expected, and most
architectures never explicitly supported it.

Nesting does in fact occur in certain configurations, and avoiding it has
proved difficult.  This series therefore enables lazy_mmu sections to
nest, on all architectures.

Nesting is handled using a counter in task_struct (patch 8), like other
stateless APIs such as pagefault_{disable,enable}().  This is fully
handled in a new generic layer in <linux/pgtable.h>; the arch_* API
remains unchanged.  A new pair of calls, lazy_mmu_mode_{pause,resume}(),
is also introduced to allow functions that are called with the lazy MMU
mode enabled to temporarily pause it, regardless of nesting.

An arch now opts in to using the lazy MMU mode by selecting
CONFIG_ARCH_LAZY_MMU; this is more appropriate now that we have a generic
API, especially with state conditionally added to task_struct.

This patch (of 14):

Since commit b9ef323ea168 ("powerpc/64s: Disable preemption in hash lazy
mmu mode") a task can not be preempted while in lazy MMU mode.  Therefore,
the batch re-activation code is never called, so remove it.

Link: https://lkml.kernel.org/r/20251215150323.2218608-1-kevin.brodsky@arm.com
Link: https://lkml.kernel.org/r/20251215150323.2218608-2-kevin.brodsky@arm.com
Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
Signed-off-by: Kevin Brodsky <kevin.brodsky@arm.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com>
Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>
Tested-by: Venkat Rao Bagalkote <venkat88@linux.ibm.com>
Reviewed-by: Yeoreum Yun <yeoreum.yun@arm.com>
Cc: Andreas Larsson <andreas@gaisler.com>
Cc: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Borislav Betkov <bp@alien8.de>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: David S. Miller <davem@davemloft.net>
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jann Horn <jannh@google.com>
Cc: Juegren Gross <jgross@suse.com>
Cc: levi.yun <yeoreum.yun@arm.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Thomas Gleinxer <tglx@linutronix.de>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Will Deacon <will@kernel.org>
Cc: David Hildenbrand (Red Hat) <david@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
arch/powerpc/include/asm/thread_info.h
arch/powerpc/kernel/process.c

index b0f200aba2b3df129eae033965db12a07aac3e9c..97f35f9b1a96e5766a7bbb7a61e8a6a562a297ab 100644 (file)
@@ -154,12 +154,10 @@ void arch_setup_new_exec(void);
 /* Don't move TLF_NAPPING without adjusting the code in entry_32.S */
 #define TLF_NAPPING            0       /* idle thread enabled NAP mode */
 #define TLF_SLEEPING           1       /* suspend code enabled SLEEP mode */
-#define TLF_LAZY_MMU           3       /* tlb_batch is active */
 #define TLF_RUNLATCH           4       /* Is the runlatch enabled? */
 
 #define _TLF_NAPPING           (1 << TLF_NAPPING)
 #define _TLF_SLEEPING          (1 << TLF_SLEEPING)
-#define _TLF_LAZY_MMU          (1 << TLF_LAZY_MMU)
 #define _TLF_RUNLATCH          (1 << TLF_RUNLATCH)
 
 #ifndef __ASSEMBLER__
index a45fe147868bc43d4de53c8b5f5c298b46a959c6..a15d0b619b1f1ebea5b1c23c6370ebd978b3f3fd 100644 (file)
@@ -1281,9 +1281,6 @@ struct task_struct *__switch_to(struct task_struct *prev,
 {
        struct thread_struct *new_thread, *old_thread;
        struct task_struct *last;
-#ifdef CONFIG_PPC_64S_HASH_MMU
-       struct ppc64_tlb_batch *batch;
-#endif
 
        new_thread = &new->thread;
        old_thread = &current->thread;
@@ -1291,14 +1288,6 @@ struct task_struct *__switch_to(struct task_struct *prev,
        WARN_ON(!irqs_disabled());
 
 #ifdef CONFIG_PPC_64S_HASH_MMU
-       batch = this_cpu_ptr(&ppc64_tlb_batch);
-       if (batch->active) {
-               current_thread_info()->local_flags |= _TLF_LAZY_MMU;
-               if (batch->index)
-                       __flush_tlb_pending(batch);
-               batch->active = 0;
-       }
-
        /*
         * On POWER9 the copy-paste buffer can only paste into
         * foreign real addresses, so unprivileged processes can not
@@ -1369,20 +1358,6 @@ struct task_struct *__switch_to(struct task_struct *prev,
         */
 
 #ifdef CONFIG_PPC_BOOK3S_64
-#ifdef CONFIG_PPC_64S_HASH_MMU
-       /*
-        * This applies to a process that was context switched while inside
-        * arch_enter_lazy_mmu_mode(), to re-activate the batch that was
-        * deactivated above, before _switch(). This will never be the case
-        * for new tasks.
-        */
-       if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
-               current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
-               batch = this_cpu_ptr(&ppc64_tlb_batch);
-               batch->active = 1;
-       }
-#endif
-
        /*
         * Math facilities are masked out of the child MSR in copy_thread.
         * A new task does not need to restore_math because it will