unsigned long start,
unsigned long end,
unsigned long stride,
- bool last_level)
+ tlbf_t flags)
{
switch (stride) {
#ifndef __PAGETABLE_PMD_FOLDED
case PUD_SIZE:
- __flush_tlb_range(vma, start, end, PUD_SIZE, last_level, 1);
+ __flush_tlb_range(vma, start, end, PUD_SIZE, 1, flags);
break;
#endif
case CONT_PMD_SIZE:
case PMD_SIZE:
- __flush_tlb_range(vma, start, end, PMD_SIZE, last_level, 2);
+ __flush_tlb_range(vma, start, end, PMD_SIZE, 2, flags);
break;
case CONT_PTE_SIZE:
- __flush_tlb_range(vma, start, end, PAGE_SIZE, last_level, 3);
+ __flush_tlb_range(vma, start, end, PAGE_SIZE, 3, flags);
break;
default:
- __flush_tlb_range(vma, start, end, PAGE_SIZE, last_level, TLBI_TTL_UNKNOWN);
+ __flush_tlb_range(vma, start, end, PAGE_SIZE, TLBI_TTL_UNKNOWN, flags);
}
}
{
unsigned long stride = huge_page_size(hstate_vma(vma));
- __flush_hugetlb_tlb_range(vma, start, end, stride, false);
+ __flush_hugetlb_tlb_range(vma, start, end, stride, TLBF_NONE);
}
#endif /* __ASM_HUGETLB_H */
/* Set stride and tlb_level in flush_*_tlb_range */
#define flush_pmd_tlb_range(vma, addr, end) \
- __flush_tlb_range(vma, addr, end, PMD_SIZE, false, 2)
+ __flush_tlb_range(vma, addr, end, PMD_SIZE, 2, TLBF_NONE)
#define flush_pud_tlb_range(vma, addr, end) \
- __flush_tlb_range(vma, addr, end, PUD_SIZE, false, 1)
+ __flush_tlb_range(vma, addr, end, PUD_SIZE, 1, TLBF_NONE)
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
/*
static inline void tlb_flush(struct mmu_gather *tlb)
{
struct vm_area_struct vma = TLB_FLUSH_VMA(tlb->mm, 0);
- bool last_level = !tlb->freed_tables;
+ tlbf_t flags = tlb->freed_tables ? TLBF_NONE : TLBF_NOWALKCACHE;
unsigned long stride = tlb_get_unmap_size(tlb);
int tlb_level = tlb_get_level(tlb);
* reallocate our ASID without invalidating the entire TLB.
*/
if (tlb->fullmm) {
- if (!last_level)
+ if (tlb->freed_tables)
flush_tlb_mm(tlb->mm);
return;
}
__flush_tlb_range(&vma, tlb->start, tlb->end, stride,
- last_level, tlb_level);
+ tlb_level, flags);
}
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
* CPUs, ensuring that any walk-cache entries associated with the
* translation are also invalidated.
*
- * __flush_tlb_range(vma, start, end, stride, last_level, tlb_level)
+ * __flush_tlb_range(vma, start, end, stride, tlb_level, flags)
* Invalidate the virtual-address range '[start, end)' on all
* CPUs for the user address space corresponding to 'vma->mm'.
* The invalidation operations are issued at a granularity
- * determined by 'stride' and only affect any walk-cache entries
- * if 'last_level' is equal to false. tlb_level is the level at
+ * determined by 'stride'. tlb_level is the level at
* which the invalidation must take place. If the level is wrong,
* no invalidation may take place. In the case where the level
* cannot be easily determined, the value TLBI_TTL_UNKNOWN will
- * perform a non-hinted invalidation.
+ * perform a non-hinted invalidation. flags may be TLBF_NONE (0) or
+ * TLBF_NOWALKCACHE (elide eviction of walk cache entries).
*
* local_flush_tlb_page(vma, addr)
* Local variant of flush_tlb_page(). Stale TLB entries may
return pages >= (MAX_DVM_OPS * stride) >> PAGE_SHIFT;
}
+typedef unsigned __bitwise tlbf_t;
+
+/* No special behaviour. */
+#define TLBF_NONE ((__force tlbf_t)0)
+
+/* Invalidate tlb entries only, leaving the page table walk cache intact. */
+#define TLBF_NOWALKCACHE ((__force tlbf_t)BIT(0))
+
static inline void __flush_tlb_range_nosync(struct mm_struct *mm,
unsigned long start, unsigned long end,
- unsigned long stride, bool last_level,
- int tlb_level)
+ unsigned long stride, int tlb_level,
+ tlbf_t flags)
{
unsigned long asid, pages;
dsb(ishst);
asid = ASID(mm);
- if (last_level)
+ if (flags & TLBF_NOWALKCACHE)
__flush_s1_tlb_range_op(vale1is, start, pages, stride,
asid, tlb_level);
else
static inline void __flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end,
- unsigned long stride, bool last_level,
- int tlb_level)
+ unsigned long stride, int tlb_level,
+ tlbf_t flags)
{
__flush_tlb_range_nosync(vma->vm_mm, start, end, stride,
- last_level, tlb_level);
+ tlb_level, flags);
__tlbi_sync_s1ish();
}
* Set the tlb_level to TLBI_TTL_UNKNOWN because we can not get enough
* information here.
*/
- __flush_tlb_range(vma, start, end, PAGE_SIZE, false, TLBI_TTL_UNKNOWN);
+ __flush_tlb_range(vma, start, end, PAGE_SIZE, TLBI_TTL_UNKNOWN, TLBF_NONE);
}
static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
struct mm_struct *mm, unsigned long start, unsigned long end)
{
- __flush_tlb_range_nosync(mm, start, end, PAGE_SIZE, true, 3);
+ __flush_tlb_range_nosync(mm, start, end, PAGE_SIZE, 3, TLBF_NOWALKCACHE);
}
static inline bool __pte_flags_need_flush(ptdesc_t oldval, ptdesc_t newval)
*/
if (!system_supports_bbml2_noabort())
- __flush_tlb_range(&vma, start_addr, addr, PAGE_SIZE, true, 3);
+ __flush_tlb_range(&vma, start_addr, addr, PAGE_SIZE, 3,
+ TLBF_NOWALKCACHE);
__set_ptes(mm, start_addr, start_ptep, pte, CONT_PTES);
}
* eliding the trailing DSB applies here.
*/
__flush_tlb_range_nosync(vma->vm_mm, addr, end,
- PAGE_SIZE, true, 3);
+ PAGE_SIZE, 3, TLBF_NOWALKCACHE);
}
return young;
struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
unsigned long end = addr + (pgsize * ncontig);
- __flush_hugetlb_tlb_range(&vma, addr, end, pgsize, true);
+ __flush_hugetlb_tlb_range(&vma, addr, end, pgsize, TLBF_NOWALKCACHE);
return orig_pte;
}
if (mm == &init_mm)
flush_tlb_kernel_range(saddr, addr);
else
- __flush_hugetlb_tlb_range(&vma, saddr, addr, pgsize, true);
+ __flush_hugetlb_tlb_range(&vma, saddr, addr, pgsize, TLBF_NOWALKCACHE);
}
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
*/
if (pte_accessible(vma->vm_mm, pte) && pte_user_exec(pte))
__flush_tlb_range(vma, addr, nr * PAGE_SIZE,
- PAGE_SIZE, true, 3);
+ PAGE_SIZE, 3, TLBF_NOWALKCACHE);
}
return pte;