]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
arm64: tlbflush: add __flush_tlb_range_limit_excess()
authorKefeng Wang <wangkefeng.wang@huawei.com>
Mon, 23 Sep 2024 13:13:50 +0000 (21:13 +0800)
committerCatalin Marinas <catalin.marinas@arm.com>
Wed, 16 Oct 2024 11:01:53 +0000 (12:01 +0100)
The __flush_tlb_range_limit_excess() helper will be used when
flush tlb kernel range soon.

Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Reviewed-by: Anshuman Khandual <anshuman.khandual@arm.com>
Link: https://lore.kernel.org/r/20240923131351.713304-2-wangkefeng.wang@huawei.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
arch/arm64/include/asm/tlbflush.h

index 95fbc8c0560798da03a2c8ad0f9a9fa02bc55be0..5f5e7d1f2e7d036dd1a91b9ffcbd0f13021eda93 100644 (file)
@@ -431,6 +431,23 @@ do {                                                                       \
 #define __flush_s2_tlb_range_op(op, start, pages, stride, tlb_level) \
        __flush_tlb_range_op(op, start, pages, stride, 0, tlb_level, false, kvm_lpa2_is_enabled());
 
+static inline bool __flush_tlb_range_limit_excess(unsigned long start,
+               unsigned long end, unsigned long pages, unsigned long stride)
+{
+       /*
+        * When the system does not support TLB range based flush
+        * operation, (MAX_DVM_OPS - 1) pages can be handled. But
+        * with TLB range based operation, MAX_TLBI_RANGE_PAGES
+        * pages can be handled.
+        */
+       if ((!system_supports_tlb_range() &&
+            (end - start) >= (MAX_DVM_OPS * stride)) ||
+           pages > MAX_TLBI_RANGE_PAGES)
+               return true;
+
+       return false;
+}
+
 static inline void __flush_tlb_range_nosync(struct vm_area_struct *vma,
                                     unsigned long start, unsigned long end,
                                     unsigned long stride, bool last_level,
@@ -442,15 +459,7 @@ static inline void __flush_tlb_range_nosync(struct vm_area_struct *vma,
        end = round_up(end, stride);
        pages = (end - start) >> PAGE_SHIFT;
 
-       /*
-        * When not uses TLB range ops, we can handle up to
-        * (MAX_DVM_OPS - 1) pages;
-        * When uses TLB range ops, we can handle up to
-        * MAX_TLBI_RANGE_PAGES pages.
-        */
-       if ((!system_supports_tlb_range() &&
-            (end - start) >= (MAX_DVM_OPS * stride)) ||
-           pages > MAX_TLBI_RANGE_PAGES) {
+       if (__flush_tlb_range_limit_excess(start, end, pages, stride)) {
                flush_tlb_mm(vma->vm_mm);
                return;
        }