]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
mm: remove arch_flush_tlb_batched_pending() arch helper
authorRyan Roberts <ryan.roberts@arm.com>
Mon, 9 Jun 2025 10:31:30 +0000 (11:31 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 25 Jul 2025 02:12:32 +0000 (19:12 -0700)
Since commit 4b634918384c ("arm64/mm: Close theoretical race where stale
TLB entry remains valid"), all arches that use tlbbatch for reclaim
(arm64, riscv, x86) implement arch_flush_tlb_batched_pending() with a
flush_tlb_mm().

So let's simplify by removing the unnecessary abstraction and doing the
flush_tlb_mm() directly in flush_tlb_batched_pending().  This effectively
reverts commit db6c1f6f236d ("mm/tlbbatch: introduce
arch_flush_tlb_batched_pending()").

Link: https://lkml.kernel.org/r/20250609103132.447370-1-ryan.roberts@arm.com
Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
Suggested-by: Will Deacon <will@kernel.org>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Acked-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Anshuman Khandual <anshuman.khandual@arm.com>
Acked-by: Will Deacon <will@kernel.org>
Cc: Albert Ou <aou@eecs.berkeley.edu>
Cc: Alexandre Ghiti <alex@ghiti.fr>
Cc: Borislav Betkov <bp@alien8.de>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Paul Walmsley <paul.walmsley@sifive.com>
Cc: Rik van Riel <riel@surriel.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Thomas Gleinxer <tglx@linutronix.de>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
arch/arm64/include/asm/tlbflush.h
arch/riscv/include/asm/tlbflush.h
arch/riscv/mm/tlbflush.c
arch/x86/include/asm/tlbflush.h
mm/rmap.c

index aa9efee17277d4118d3ea745ccb9e2ec5bb9e45a..18a5dc0c9a540f53cb2ed651d749012f38b9311f 100644 (file)
@@ -322,17 +322,6 @@ static inline bool arch_tlbbatch_should_defer(struct mm_struct *mm)
        return true;
 }
 
-/*
- * If mprotect/munmap/etc occurs during TLB batched flushing, we need to ensure
- * all the previously issued TLBIs targeting mm have completed. But since we
- * can be executing on a remote CPU, a DSB cannot guarantee this like it can
- * for arch_tlbbatch_flush(). Our only option is to flush the entire mm.
- */
-static inline void arch_flush_tlb_batched_pending(struct mm_struct *mm)
-{
-       flush_tlb_mm(mm);
-}
-
 /*
  * To support TLB batched flush for multiple pages unmapping, we only send
  * the TLBI for each page in arch_tlbbatch_add_pending() and wait for the
index 1a20dd746a49f3445c7eab42f2cc3af8ef469699..eed0abc4051436f2b10b2f2685d8d7bf8fd9dd1f 100644 (file)
@@ -63,7 +63,6 @@ void flush_pud_tlb_range(struct vm_area_struct *vma, unsigned long start,
 bool arch_tlbbatch_should_defer(struct mm_struct *mm);
 void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
                struct mm_struct *mm, unsigned long start, unsigned long end);
-void arch_flush_tlb_batched_pending(struct mm_struct *mm);
 void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
 
 extern unsigned long tlb_flush_all_threshold;
index e737ba7949b12a3647275c53d17275a215abccca..8404530ec00f93458ab6a225e2b6f4fab2526f87 100644 (file)
@@ -234,11 +234,6 @@ void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
        mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, end);
 }
 
-void arch_flush_tlb_batched_pending(struct mm_struct *mm)
-{
-       flush_tlb_mm(mm);
-}
-
 void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
 {
        __flush_tlb_range(NULL, &batch->cpumask,
index e9b81876ebe43b1cd98de91480d2a2d8518f9449..00daedfefc1b02e7e000676bbf6ee5a4c2a74ebb 100644 (file)
@@ -356,11 +356,6 @@ static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *b
        mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL);
 }
 
-static inline void arch_flush_tlb_batched_pending(struct mm_struct *mm)
-{
-       flush_tlb_mm(mm);
-}
-
 extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
 
 static inline bool pte_flags_need_flush(unsigned long oldflags,
index 4c833b43fef971f677e0f3f837647b0c643ede84..f93ce27132abcc10ae6f3ad58df9793d5a8d92a6 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -746,7 +746,7 @@ void flush_tlb_batched_pending(struct mm_struct *mm)
        int flushed = batch >> TLB_FLUSH_BATCH_FLUSHED_SHIFT;
 
        if (pending != flushed) {
-               arch_flush_tlb_batched_pending(mm);
+               flush_tlb_mm(mm);
                /*
                 * If the new TLB flushing is pending during flushing, leave
                 * mm->tlb_flush_batched as is, to avoid losing flushing.