]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mm: support tlbbatch flush for a range of PTEs
authorBarry Song <v-songbaohua@oppo.com>
Fri, 14 Feb 2025 09:30:13 +0000 (22:30 +1300)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 17 Mar 2025 05:06:16 +0000 (22:06 -0700)
This patch lays the groundwork for supporting batch PTE unmapping in
try_to_unmap_one().  It introduces range handling for TLB batch flushing,
with the range currently set to the size of PAGE_SIZE.

The function __flush_tlb_range_nosync() is architecture-specific and is
only used within arch/arm64.  This function requires the mm structure
instead of the vma structure.  To allow its reuse by
arch_tlbbatch_add_pending(), which operates with mm but not vma, this
patch modifies the argument of __flush_tlb_range_nosync() to take mm as
its parameter.

Link: https://lkml.kernel.org/r/20250214093015.51024-3-21cnbao@gmail.com
Signed-off-by: Barry Song <v-songbaohua@oppo.com>
Acked-by: Will Deacon <will@kernel.org>
Reviewed-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Shaoqin Huang <shahuang@redhat.com>
Cc: Gavin Shan <gshan@redhat.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Lance Yang <ioworker0@gmail.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Yosry Ahmed <yosryahmed@google.com>
Cc: Paul Walmsley <paul.walmsley@sifive.com>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Albert Ou <aou@eecs.berkeley.edu>
Cc: Yicong Yang <yangyicong@hisilicon.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Chis Li <chrisl@kernel.org>
Cc: "Huang, Ying" <ying.huang@intel.com>
Cc: Kairui Song <kasong@tencent.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Mauricio Faria de Oliveira <mfo@canonical.com>
Cc: Tangquan Zheng <zhengtangquan@oppo.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
arch/arm64/include/asm/tlbflush.h
arch/arm64/mm/contpte.c
arch/riscv/include/asm/tlbflush.h
arch/riscv/mm/tlbflush.c
arch/x86/include/asm/tlbflush.h
mm/rmap.c

index bc94e036a26b99b14d8acd767f61320362555dd5..b7e1920570bdda0f9db5cae6999f23429c62f35e 100644 (file)
@@ -322,13 +322,6 @@ static inline bool arch_tlbbatch_should_defer(struct mm_struct *mm)
        return true;
 }
 
-static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
-                                            struct mm_struct *mm,
-                                            unsigned long uaddr)
-{
-       __flush_tlb_page_nosync(mm, uaddr);
-}
-
 /*
  * If mprotect/munmap/etc occurs during TLB batched flushing, we need to
  * synchronise all the TLBI issued with a DSB to avoid the race mentioned in
@@ -448,7 +441,7 @@ static inline bool __flush_tlb_range_limit_excess(unsigned long start,
        return false;
 }
 
-static inline void __flush_tlb_range_nosync(struct vm_area_struct *vma,
+static inline void __flush_tlb_range_nosync(struct mm_struct *mm,
                                     unsigned long start, unsigned long end,
                                     unsigned long stride, bool last_level,
                                     int tlb_level)
@@ -460,12 +453,12 @@ static inline void __flush_tlb_range_nosync(struct vm_area_struct *vma,
        pages = (end - start) >> PAGE_SHIFT;
 
        if (__flush_tlb_range_limit_excess(start, end, pages, stride)) {
-               flush_tlb_mm(vma->vm_mm);
+               flush_tlb_mm(mm);
                return;
        }
 
        dsb(ishst);
-       asid = ASID(vma->vm_mm);
+       asid = ASID(mm);
 
        if (last_level)
                __flush_tlb_range_op(vale1is, start, pages, stride, asid,
@@ -474,7 +467,7 @@ static inline void __flush_tlb_range_nosync(struct vm_area_struct *vma,
                __flush_tlb_range_op(vae1is, start, pages, stride, asid,
                                     tlb_level, true, lpa2_is_enabled());
 
-       mmu_notifier_arch_invalidate_secondary_tlbs(vma->vm_mm, start, end);
+       mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, end);
 }
 
 static inline void __flush_tlb_range(struct vm_area_struct *vma,
@@ -482,7 +475,7 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
                                     unsigned long stride, bool last_level,
                                     int tlb_level)
 {
-       __flush_tlb_range_nosync(vma, start, end, stride,
+       __flush_tlb_range_nosync(vma->vm_mm, start, end, stride,
                                 last_level, tlb_level);
        dsb(ish);
 }
@@ -533,6 +526,12 @@ static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr)
        dsb(ish);
        isb();
 }
+
+static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
+               struct mm_struct *mm, unsigned long start, unsigned long end)
+{
+       __flush_tlb_range_nosync(mm, start, end, PAGE_SIZE, true, 3);
+}
 #endif
 
 #endif
index 55107d27d3f8834f85f4d968e770fc47e1b61521..bcac4f55f9c14d4b86df4678a748a1a16108fa71 100644 (file)
@@ -335,7 +335,7 @@ int contpte_ptep_clear_flush_young(struct vm_area_struct *vma,
                 * eliding the trailing DSB applies here.
                 */
                addr = ALIGN_DOWN(addr, CONT_PTE_SIZE);
-               __flush_tlb_range_nosync(vma, addr, addr + CONT_PTE_SIZE,
+               __flush_tlb_range_nosync(vma->vm_mm, addr, addr + CONT_PTE_SIZE,
                                         PAGE_SIZE, true, 3);
        }
 
index 72e559934952939aa410ef7cdda4ef7ad2696a26..ce0dd0fed7646a56dea6010ec6082c5c1476783f 100644 (file)
@@ -60,8 +60,7 @@ void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
 
 bool arch_tlbbatch_should_defer(struct mm_struct *mm);
 void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
-                              struct mm_struct *mm,
-                              unsigned long uaddr);
+               struct mm_struct *mm, unsigned long start, unsigned long end);
 void arch_flush_tlb_batched_pending(struct mm_struct *mm);
 void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
 
index 9b6e86ce38674455ca03cee29ed2b62d5c3a3841..74dd9307fbf1b4418b615e5f82afb81dd8a4f257 100644 (file)
@@ -186,8 +186,7 @@ bool arch_tlbbatch_should_defer(struct mm_struct *mm)
 }
 
 void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
-                              struct mm_struct *mm,
-                              unsigned long uaddr)
+               struct mm_struct *mm, unsigned long start, unsigned long end)
 {
        cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
 }
index 02fc2aa06e9e0ecdba3fe948cafe5892b72e86c0..29373da7b00a609e8575dc2a28afb9ace562ddba 100644 (file)
@@ -279,8 +279,7 @@ static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
 }
 
 static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
-                                            struct mm_struct *mm,
-                                            unsigned long uaddr)
+               struct mm_struct *mm, unsigned long start, unsigned long end)
 {
        inc_mm_tlb_gen(mm);
        cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
index 5c208e1c8266a8262ca93dce783e4f7174d69427..765e541ac9be864887a1f3154ee6971f7a4fed87 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -672,7 +672,7 @@ void try_to_unmap_flush_dirty(void)
        (TLB_FLUSH_BATCH_PENDING_MASK / 2)
 
 static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval,
-                                     unsigned long uaddr)
+               unsigned long start, unsigned long end)
 {
        struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
        int batch;
@@ -681,7 +681,7 @@ static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval,
        if (!pte_accessible(mm, pteval))
                return;
 
-       arch_tlbbatch_add_pending(&tlb_ubc->arch, mm, uaddr);
+       arch_tlbbatch_add_pending(&tlb_ubc->arch, mm, start, end);
        tlb_ubc->flush_required = true;
 
        /*
@@ -757,7 +757,7 @@ void flush_tlb_batched_pending(struct mm_struct *mm)
 }
 #else
 static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval,
-                                     unsigned long uaddr)
+               unsigned long start, unsigned long end)
 {
 }
 
@@ -1887,7 +1887,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
                                 */
                                pteval = ptep_get_and_clear(mm, address, pvmw.pte);
 
-                               set_tlb_ubc_flush_pending(mm, pteval, address);
+                               set_tlb_ubc_flush_pending(mm, pteval, address, address + PAGE_SIZE);
                        } else {
                                pteval = ptep_clear_flush(vma, address, pvmw.pte);
                        }
@@ -2270,7 +2270,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
                                 */
                                pteval = ptep_get_and_clear(mm, address, pvmw.pte);
 
-                               set_tlb_ubc_flush_pending(mm, pteval, address);
+                               set_tlb_ubc_flush_pending(mm, pteval, address, address + PAGE_SIZE);
                        } else {
                                pteval = ptep_clear_flush(vma, address, pvmw.pte);
                        }