cpumask_copy(&p->cpus_mask, ctx->new_mask);
p->nr_cpus_allowed = cpumask_weight(ctx->new_mask);
+ mm_update_cpus_allowed(p->mm, ctx->new_mask);
/*
* Swap in a new user_cpus_ptr if SCA_USER flag set
put_prev_task(rq, p);
p->sched_class->set_cpus_allowed(p, ctx);
- mm_update_cpus_allowed(p->mm, ctx->new_mask);
if (queued)
enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
*/
static inline void mm_update_cpus_allowed(struct mm_struct *mm, const struct cpumask *affmsk)
{
- struct cpumask *mm_allowed = mm_cpus_allowed(mm);
+ struct cpumask *mm_allowed;
if (!mm)
return;
- /* The mm_cpus_allowed is the union of each thread allowed CPUs masks. */
+
+ /*
+ * mm::mm_cid::mm_cpus_allowed is the superset of each threads
+ * allowed CPUs mask which means it can only grow.
+ */
guard(raw_spinlock)(&mm->mm_cid.lock);
+ mm_allowed = mm_cpus_allowed(mm);
cpumask_or(mm_allowed, mm_allowed, affmsk);
WRITE_ONCE(mm->mm_cid.nr_cpus_allowed, cpumask_weight(mm_allowed));
}