From: Thomas Gleixner Date: Wed, 19 Nov 2025 17:26:55 +0000 (+0100) Subject: sched/mmcid: Prevent pointless work in mm_update_cpus_allowed() X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=0d032a43ebeb9bf255cd7e3dad5f7a6371571648;p=thirdparty%2Fkernel%2Flinux.git sched/mmcid: Prevent pointless work in mm_update_cpus_allowed() mm_update_cpus_allowed() is not required to be invoked for affinity changes due to migrate_disable() and migrate_enable(). migrate_disable() restricts the task temporarily to a CPU on which the task was already allowed to run, so nothing changes. migrate_enable() restores the actual task affinity mask. If that mask changed between migrate_disable() and migrate_enable() then that change was already accounted for. Move the invocation to the proper place to avoid that. Signed-off-by: Thomas Gleixner Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Mathieu Desnoyers Link: https://patch.msgid.link/20251119172549.385208276@linutronix.de --- diff --git a/kernel/sched/core.c b/kernel/sched/core.c index f5e37c233b016..2ea77e72f7c67 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2684,6 +2684,7 @@ void set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx cpumask_copy(&p->cpus_mask, ctx->new_mask); p->nr_cpus_allowed = cpumask_weight(ctx->new_mask); + mm_update_cpus_allowed(p->mm, ctx->new_mask); /* * Swap in a new user_cpus_ptr if SCA_USER flag set @@ -2730,7 +2731,6 @@ __do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx) put_prev_task(rq, p); p->sched_class->set_cpus_allowed(p, ctx); - mm_update_cpus_allowed(p->mm, ctx->new_mask); if (queued) enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); @@ -10376,12 +10376,17 @@ void call_trace_sched_update_nr_running(struct rq *rq, int count) */ static inline void mm_update_cpus_allowed(struct mm_struct *mm, const struct cpumask *affmsk) { - struct cpumask *mm_allowed = mm_cpus_allowed(mm); + struct cpumask *mm_allowed; if (!mm) return; - /* The mm_cpus_allowed is the union of each thread allowed CPUs masks. */ + + /* + * mm::mm_cid::mm_cpus_allowed is the superset of each threads + * allowed CPUs mask which means it can only grow. + */ guard(raw_spinlock)(&mm->mm_cid.lock); + mm_allowed = mm_cpus_allowed(mm); cpumask_or(mm_allowed, mm_allowed, affmsk); WRITE_ONCE(mm->mm_cid.nr_cpus_allowed, cpumask_weight(mm_allowed)); }