#define __flush_s2_tlb_range_op(op, start, pages, stride, tlb_level) \
__flush_tlb_range_op(op, r##op, start, pages, stride, 0, tlb_level, kvm_lpa2_is_enabled())
-static inline bool __flush_tlb_range_limit_excess(unsigned long start,
- unsigned long end, unsigned long pages, unsigned long stride)
+static inline bool __flush_tlb_range_limit_excess(unsigned long pages,
+ unsigned long stride)
{
/*
- * When the system does not support TLB range based flush
- * operation, (MAX_DVM_OPS - 1) pages can be handled. But
- * with TLB range based operation, MAX_TLBI_RANGE_PAGES
- * pages can be handled.
+ * Assume that the worst case number of DVM ops required to flush a
+ * given range on a system that supports tlb-range is 20 (4 scales, 1
+ * final page, 15 for alignment on LPA2 systems), which is much smaller
+ * than MAX_DVM_OPS.
*/
- if ((!system_supports_tlb_range() &&
- (end - start) >= (MAX_DVM_OPS * stride)) ||
- pages > MAX_TLBI_RANGE_PAGES)
- return true;
+ if (system_supports_tlb_range())
+ return pages > MAX_TLBI_RANGE_PAGES;
- return false;
+ return pages >= (MAX_DVM_OPS * stride) >> PAGE_SHIFT;
}
static inline void __flush_tlb_range_nosync(struct mm_struct *mm,
end = round_up(end, stride);
pages = (end - start) >> PAGE_SHIFT;
- if (__flush_tlb_range_limit_excess(start, end, pages, stride)) {
+ if (__flush_tlb_range_limit_excess(pages, stride)) {
flush_tlb_mm(mm);
return;
}
end = round_up(end, stride);
pages = (end - start) >> PAGE_SHIFT;
- if (__flush_tlb_range_limit_excess(start, end, pages, stride)) {
+ if (__flush_tlb_range_limit_excess(pages, stride)) {
flush_tlb_all();
return;
}