#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
/*
- * Outside of a few very special situations (e.g. hibernation), we always
- * use broadcast TLB invalidation instructions, therefore a spurious page
- * fault on one CPU which has been handled concurrently by another CPU
- * does not need to perform additional invalidation.
+ * We use local TLB invalidation instruction when reusing page in
+ * write protection fault handler to avoid TLBI broadcast in the hot
+ * path. This will cause spurious page faults if stale read-only TLB
+ * entries exist.
*/
-#define flush_tlb_fix_spurious_fault(vma, address, ptep) do { } while (0)
+#define flush_tlb_fix_spurious_fault(vma, address, ptep) \
+ local_flush_tlb_page_nonotify(vma, address)
+
+#define flush_tlb_fix_spurious_fault_pmd(vma, address, pmdp) \
+ local_flush_tlb_page_nonotify(vma, address)
/*
* ZERO_PAGE is a global shared page that is always zero: used
* cannot be easily determined, the value TLBI_TTL_UNKNOWN will
* perform a non-hinted invalidation.
*
+ * local_flush_tlb_page(vma, addr)
+ * Local variant of flush_tlb_page(). Stale TLB entries may
+ * remain in remote CPUs.
+ *
+ * local_flush_tlb_page_nonotify(vma, addr)
+ * Same as local_flush_tlb_page() except MMU notifier will not be
+ * called.
+ *
+ * local_flush_tlb_contpte(vma, addr)
+ * Invalidate the virtual-address range
+ * '[addr, addr+CONT_PTE_SIZE)' mapped with contpte on local CPU
+ * for the user address space corresponding to 'vma->mm'. Stale
+ * TLB entries may remain in remote CPUs.
*
* Finally, take a look at asm/tlb.h to see how tlb_flush() is implemented
* on top of these routines, since that is our interface to the mmu_gather
mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL);
}
+static inline void __local_flush_tlb_page_nonotify_nosync(struct mm_struct *mm,
+ unsigned long uaddr)
+{
+ unsigned long addr;
+
+ dsb(nshst);
+ addr = __TLBI_VADDR(uaddr, ASID(mm));
+ __tlbi(vale1, addr);
+ __tlbi_user(vale1, addr);
+}
+
+static inline void local_flush_tlb_page_nonotify(struct vm_area_struct *vma,
+ unsigned long uaddr)
+{
+ __local_flush_tlb_page_nonotify_nosync(vma->vm_mm, uaddr);
+ dsb(nsh);
+}
+
+static inline void local_flush_tlb_page(struct vm_area_struct *vma,
+ unsigned long uaddr)
+{
+ __local_flush_tlb_page_nonotify_nosync(vma->vm_mm, uaddr);
+ mmu_notifier_arch_invalidate_secondary_tlbs(vma->vm_mm, uaddr & PAGE_MASK,
+ (uaddr & PAGE_MASK) + PAGE_SIZE);
+ dsb(nsh);
+}
+
static inline void __flush_tlb_page_nosync(struct mm_struct *mm,
unsigned long uaddr)
{
dsb(ish);
}
+static inline void local_flush_tlb_contpte(struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ unsigned long asid;
+
+ addr = round_down(addr, CONT_PTE_SIZE);
+
+ dsb(nshst);
+ asid = ASID(vma->vm_mm);
+ __flush_tlb_range_op(vale1, addr, CONT_PTES, PAGE_SIZE, asid,
+ 3, true, lpa2_is_enabled());
+ mmu_notifier_arch_invalidate_secondary_tlbs(vma->vm_mm, addr,
+ addr + CONT_PTE_SIZE);
+ dsb(nsh);
+}
+
static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
__ptep_set_access_flags(vma, addr, ptep, entry, 0);
if (dirty)
- __flush_tlb_range(vma, start_addr, addr,
- PAGE_SIZE, true, 3);
+ local_flush_tlb_contpte(vma, start_addr);
} else {
__contpte_try_unfold(vma->vm_mm, addr, ptep, orig_pte);
__ptep_set_access_flags(vma, addr, ptep, entry, dirty);
pteval = cmpxchg_relaxed(&pte_val(*ptep), old_pteval, pteval);
} while (pteval != old_pteval);
- /* Invalidate a stale read-only entry */
+ /*
+ * Invalidate the local stale read-only entry. Remote stale entries
+ * may still cause page faults and be invalidated via
+ * flush_tlb_fix_spurious_fault().
+ */
if (dirty)
- flush_tlb_page(vma, address);
+ local_flush_tlb_page(vma, address);
return 1;
}