]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
sched/mmcid: Use cpumask_weighted_or()
authorThomas Gleixner <tglx@linutronix.de>
Wed, 19 Nov 2025 17:26:59 +0000 (18:26 +0100)
committerPeter Zijlstra <peterz@infradead.org>
Thu, 20 Nov 2025 11:14:54 +0000 (12:14 +0100)
Use cpumask_weighted_or() instead of cpumask_or() and cpumask_weight() on
the result, which walks the same bitmap twice. Results in 10-20% less
cycles, which reduces the runqueue lock hold time.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Acked-by: Yury Norov (NVIDIA) <yury.norov@gmail.com>
Link: https://patch.msgid.link/20251119172549.511736272@linutronix.de
kernel/sched/core.c

index 2ea77e72f7c679a5899a1aac780346d795b3f499..f6bbfa1f5c15aebaa32e57dfb8a788dcd77c1741 100644 (file)
@@ -10377,6 +10377,7 @@ void call_trace_sched_update_nr_running(struct rq *rq, int count)
 static inline void mm_update_cpus_allowed(struct mm_struct *mm, const struct cpumask *affmsk)
 {
        struct cpumask *mm_allowed;
+       unsigned int weight;
 
        if (!mm)
                return;
@@ -10387,8 +10388,8 @@ static inline void mm_update_cpus_allowed(struct mm_struct *mm, const struct cpu
         */
        guard(raw_spinlock)(&mm->mm_cid.lock);
        mm_allowed = mm_cpus_allowed(mm);
-       cpumask_or(mm_allowed, mm_allowed, affmsk);
-       WRITE_ONCE(mm->mm_cid.nr_cpus_allowed, cpumask_weight(mm_allowed));
+       weight = cpumask_weighted_or(mm_allowed, mm_allowed, affmsk);
+       WRITE_ONCE(mm->mm_cid.nr_cpus_allowed, weight);
 }
 
 void sched_mm_cid_exit_signals(struct task_struct *t)