]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
riscv: Call secondary mmu notifier when flushing the tlb
authorAlexandre Ghiti <alexghiti@rivosinc.com>
Mon, 13 Jan 2025 14:24:24 +0000 (15:24 +0100)
committerAlexandre Ghiti <alexghiti@rivosinc.com>
Tue, 18 Mar 2025 09:11:43 +0000 (09:11 +0000)
This is required to allow the IOMMU driver to correctly flush its own
TLB.

Reviewed-by: Clément Léger <cleger@rivosinc.com>
Reviewed-by: Samuel Holland <samuel.holland@sifive.com>
Link: https://lore.kernel.org/r/20250113142424.30487-1-alexghiti@rivosinc.com
Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com>
arch/riscv/mm/tlbflush.c

index 9b6e86ce38674455ca03cee29ed2b62d5c3a3841..bb77607c87aa2d60c567b28e79dd884ac4c0b567 100644 (file)
@@ -4,6 +4,7 @@
 #include <linux/smp.h>
 #include <linux/sched.h>
 #include <linux/hugetlb.h>
+#include <linux/mmu_notifier.h>
 #include <asm/sbi.h>
 #include <asm/mmu_context.h>
 
@@ -78,10 +79,17 @@ static void __ipi_flush_tlb_range_asid(void *info)
        local_flush_tlb_range_asid(d->start, d->size, d->stride, d->asid);
 }
 
-static void __flush_tlb_range(const struct cpumask *cmask, unsigned long asid,
+static inline unsigned long get_mm_asid(struct mm_struct *mm)
+{
+       return mm ? cntx2asid(atomic_long_read(&mm->context.id)) : FLUSH_TLB_NO_ASID;
+}
+
+static void __flush_tlb_range(struct mm_struct *mm,
+                             const struct cpumask *cmask,
                              unsigned long start, unsigned long size,
                              unsigned long stride)
 {
+       unsigned long asid = get_mm_asid(mm);
        unsigned int cpu;
 
        if (cpumask_empty(cmask))
@@ -105,30 +113,26 @@ static void __flush_tlb_range(const struct cpumask *cmask, unsigned long asid,
        }
 
        put_cpu();
-}
 
-static inline unsigned long get_mm_asid(struct mm_struct *mm)
-{
-       return cntx2asid(atomic_long_read(&mm->context.id));
+       if (mm)
+               mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, start + size);
 }
 
 void flush_tlb_mm(struct mm_struct *mm)
 {
-       __flush_tlb_range(mm_cpumask(mm), get_mm_asid(mm),
-                         0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
+       __flush_tlb_range(mm, mm_cpumask(mm), 0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
 }
 
 void flush_tlb_mm_range(struct mm_struct *mm,
                        unsigned long start, unsigned long end,
                        unsigned int page_size)
 {
-       __flush_tlb_range(mm_cpumask(mm), get_mm_asid(mm),
-                         start, end - start, page_size);
+       __flush_tlb_range(mm, mm_cpumask(mm), start, end - start, page_size);
 }
 
 void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
 {
-       __flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm),
+       __flush_tlb_range(vma->vm_mm, mm_cpumask(vma->vm_mm),
                          addr, PAGE_SIZE, PAGE_SIZE);
 }
 
@@ -161,13 +165,13 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
                }
        }
 
-       __flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm),
+       __flush_tlb_range(vma->vm_mm, mm_cpumask(vma->vm_mm),
                          start, end - start, stride_size);
 }
 
 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
 {
-       __flush_tlb_range(cpu_online_mask, FLUSH_TLB_NO_ASID,
+       __flush_tlb_range(NULL, cpu_online_mask,
                          start, end - start, PAGE_SIZE);
 }
 
@@ -175,7 +179,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
 void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
                        unsigned long end)
 {
-       __flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm),
+       __flush_tlb_range(vma->vm_mm, mm_cpumask(vma->vm_mm),
                          start, end - start, PMD_SIZE);
 }
 #endif
@@ -189,7 +193,10 @@ void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
                               struct mm_struct *mm,
                               unsigned long uaddr)
 {
+       unsigned long start = uaddr & PAGE_MASK;
+
        cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
+       mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, start + PAGE_SIZE);
 }
 
 void arch_flush_tlb_batched_pending(struct mm_struct *mm)
@@ -199,7 +206,7 @@ void arch_flush_tlb_batched_pending(struct mm_struct *mm)
 
 void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
 {
-       __flush_tlb_range(&batch->cpumask, FLUSH_TLB_NO_ASID, 0,
-                         FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
+       __flush_tlb_range(NULL, &batch->cpumask,
+                         0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
        cpumask_clear(&batch->cpumask);
 }