]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
mm/vmalloc: move resched point into alloc_vmap_area()
authorUladzislau Rezki (Sony) <urezki@gmail.com>
Wed, 17 Sep 2025 18:59:06 +0000 (20:59 +0200)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 23 Sep 2025 21:14:16 +0000 (14:14 -0700)
Currently vm_area_alloc_pages() contains two cond_resched() points.
However, the page allocator already has its own in slow path so an extra
resched is not optimal because it delays the loops.

The place where CPU time can be consumed is in the VA-space search in
alloc_vmap_area(), especially if the space is really fragmented using
synthetic stress tests, after a fast path falls back to a slow one.

Move a single cond_resched() there, after dropping free_vmap_area_lock in
a slow path.  This keeps fairness where it matters while removing
redundant yields from the page-allocation path.

[akpm@linux-foundation.org: tweak comment grammar]
Link: https://lkml.kernel.org/r/20250917185906.1595454-1-urezki@gmail.com
Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
Cc: Baoquan He <bhe@redhat.com>
Cc: Michal Hocko <mhocko@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/vmalloc.c

index 4249e1e019479c29f8d348204663d6c5a0ecd39f..798b2ed21e46059f341ed0d46c7fe56bbe357b22 100644 (file)
@@ -2057,6 +2057,12 @@ retry:
                addr = __alloc_vmap_area(&free_vmap_area_root, &free_vmap_area_list,
                        size, align, vstart, vend);
                spin_unlock(&free_vmap_area_lock);
+
+               /*
+                * This is not a fast path.  Check if yielding is needed. This
+                * is the only reschedule point in the vmalloc() path.
+                */
+               cond_resched();
        }
 
        trace_alloc_vmap_area(addr, size, align, vstart, vend, IS_ERR_VALUE(addr));
@@ -3622,7 +3628,6 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
                                                        pages + nr_allocated);
 
                        nr_allocated += nr;
-                       cond_resched();
 
                        /*
                         * If zero or pages were obtained partly,
@@ -3664,7 +3669,6 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
                for (i = 0; i < (1U << order); i++)
                        pages[nr_allocated + i] = page + i;
 
-               cond_resched();
                nr_allocated += 1U << order;
        }