]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
x86/mm: Consolidate full flush threshold decision
authorRik van Riel <riel@surriel.com>
Wed, 26 Feb 2025 03:00:36 +0000 (22:00 -0500)
committerIngo Molnar <mingo@kernel.org>
Wed, 19 Mar 2025 10:08:07 +0000 (11:08 +0100)
Reduce code duplication by consolidating the decision point for whether to do
individual invalidations or a full flush inside get_flush_tlb_info().

Suggested-by: Dave Hansen <dave.hansen@intel.com>
Signed-off-by: Rik van Riel <riel@surriel.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Reviewed-by: Borislav Petkov (AMD) <bp@alien8.de>
Acked-by: Dave Hansen <dave.hansen@intel.com>
Link: https://lore.kernel.org/r/20250226030129.530345-2-riel@surriel.com
arch/x86/mm/tlb.c

index ffc25b3480415a33e538d88d602a2355d9b830b3..dbcb5c968ff9ec7f58ed3608c31f5ae38bdfd91f 100644 (file)
@@ -1000,6 +1000,15 @@ static struct flush_tlb_info *get_flush_tlb_info(struct mm_struct *mm,
        BUG_ON(this_cpu_inc_return(flush_tlb_info_idx) != 1);
 #endif
 
+       /*
+        * If the number of flushes is so large that a full flush
+        * would be faster, do a full flush.
+        */
+       if ((end - start) >> stride_shift > tlb_single_page_flush_ceiling) {
+               start = 0;
+               end = TLB_FLUSH_ALL;
+       }
+
        info->start             = start;
        info->end               = end;
        info->mm                = mm;
@@ -1026,17 +1035,8 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
                                bool freed_tables)
 {
        struct flush_tlb_info *info;
+       int cpu = get_cpu();
        u64 new_tlb_gen;
-       int cpu;
-
-       cpu = get_cpu();
-
-       /* Should we flush just the requested range? */
-       if ((end == TLB_FLUSH_ALL) ||
-           ((end - start) >> stride_shift) > tlb_single_page_flush_ceiling) {
-               start = 0;
-               end = TLB_FLUSH_ALL;
-       }
 
        /* This is also a barrier that synchronizes with switch_mm(). */
        new_tlb_gen = inc_mm_tlb_gen(mm);
@@ -1089,22 +1089,19 @@ static void do_kernel_range_flush(void *info)
 
 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
 {
-       /* Balance as user space task's flush, a bit conservative */
-       if (end == TLB_FLUSH_ALL ||
-           (end - start) > tlb_single_page_flush_ceiling << PAGE_SHIFT) {
-               on_each_cpu(do_flush_tlb_all, NULL, 1);
-       } else {
-               struct flush_tlb_info *info;
+       struct flush_tlb_info *info;
+
+       guard(preempt)();
 
-               preempt_disable();
-               info = get_flush_tlb_info(NULL, start, end, 0, false,
-                                         TLB_GENERATION_INVALID);
+       info = get_flush_tlb_info(NULL, start, end, PAGE_SHIFT, false,
+                                 TLB_GENERATION_INVALID);
 
+       if (info->end == TLB_FLUSH_ALL)
+               on_each_cpu(do_flush_tlb_all, NULL, 1);
+       else
                on_each_cpu(do_kernel_range_flush, info, 1);
 
-               put_flush_tlb_info();
-               preempt_enable();
-       }
+       put_flush_tlb_info();
 }
 
 /*