From: Peter Zijlstra Date: Tue, 9 Sep 2025 11:16:23 +0000 (+0200) Subject: sched: Make __do_set_cpus_allowed() use the sched_change pattern X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=650952d3fb3889b04cbda722351b5d6090a1c10b;p=thirdparty%2Flinux.git sched: Make __do_set_cpus_allowed() use the sched_change pattern Now that do_set_cpus_allowed() holds all the regular locks, convert it to use the sched_change pattern helper. Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Juri Lelli Acked-by: Tejun Heo Acked-by: Vincent Guittot --- diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 638bffd4c1a2b..e932439ae6dab 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2664,28 +2664,12 @@ void set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx static void do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx) { - struct rq *rq = task_rq(p); - bool queued, running; - - lockdep_assert_held(&p->pi_lock); - lockdep_assert_rq_held(rq); - - queued = task_on_rq_queued(p); - running = task_current_donor(rq, p); - - if (queued) - dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK); + u32 flags = DEQUEUE_SAVE | DEQUEUE_NOCLOCK; - if (running) - put_prev_task(rq, p); - - p->sched_class->set_cpus_allowed(p, ctx); - mm_set_cpus_allowed(p->mm, ctx->new_mask); - - if (queued) - enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); - if (running) - set_next_task(rq, p); + scoped_guard (sched_change, p, flags) { + p->sched_class->set_cpus_allowed(p, ctx); + mm_set_cpus_allowed(p->mm, ctx->new_mask); + } } /*