]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
sched/fair: Use cpumask_weight_and() in sched_balance_find_dst_group()
authorYury Norov (NVIDIA) <yury.norov@gmail.com>
Sun, 7 Dec 2025 03:42:47 +0000 (22:42 -0500)
committerPeter Zijlstra <peterz@infradead.org>
Thu, 8 Jan 2026 11:43:56 +0000 (12:43 +0100)
In the group_has_spare case, the function creates a temporary cpumask
to just calculate weight of (p->cpus_ptr & sched_group_span(local)).

We've got a dedicated helper for it.

Signed-off-by: Yury Norov (NVIDIA) <yury.norov@gmail.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org>
Reviewed-by: K Prateek Nayak <kprateek.nayak@amd.com>
Reviewed-by: Fernand Sieber <sieberf@amazon.com>
Link: https://patch.msgid.link/20251207034247.402926-1-yury.norov@gmail.com
kernel/sched/fair.c

index 842a0f20414f1ae88eccee7ac56d28a02c0cf5fc..ebee20f75fa02f901fd508188f85ca0f8fbd1cef 100644 (file)
@@ -10974,10 +10974,9 @@ sched_balance_find_dst_group(struct sched_domain *sd, struct task_struct *p, int
                         * take care of it.
                         */
                        if (p->nr_cpus_allowed != NR_CPUS) {
-                               struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_rq_mask);
-
-                               cpumask_and(cpus, sched_group_span(local), p->cpus_ptr);
-                               imb_numa_nr = min(cpumask_weight(cpus), sd->imb_numa_nr);
+                               unsigned int w = cpumask_weight_and(p->cpus_ptr,
+                                                               sched_group_span(local));
+                               imb_numa_nr = min(w, sd->imb_numa_nr);
                        }
 
                        imbalance = abs(local_sgs.idle_cpus - idlest_sgs.idle_cpus);