if (kthread->preferred_affinity) {
pref = kthread->preferred_affinity;
} else {
- if (WARN_ON_ONCE(kthread->node == NUMA_NO_NODE))
- return;
- pref = cpumask_of_node(kthread->node);
+ if (kthread->node == NUMA_NO_NODE)
+ pref = housekeeping_cpumask(HK_TYPE_KTHREAD);
+ else
+ pref = cpumask_of_node(kthread->node);
}
cpumask_and(cpumask, pref, housekeeping_cpumask(HK_TYPE_KTHREAD));
struct kthread *kthread = to_kthread(current);
cpumask_var_t affinity;
- WARN_ON_ONCE(kthread_is_per_cpu(current));
+ if (WARN_ON_ONCE(kthread_is_per_cpu(current)))
+ return;
- if (kthread->node == NUMA_NO_NODE) {
- housekeeping_affine(current, HK_TYPE_KTHREAD);
- } else {
- if (!zalloc_cpumask_var(&affinity, GFP_KERNEL)) {
- WARN_ON_ONCE(1);
- return;
- }
+ if (!zalloc_cpumask_var(&affinity, GFP_KERNEL)) {
+ WARN_ON_ONCE(1);
+ return;
+ }
- mutex_lock(&kthread_affinity_lock);
- WARN_ON_ONCE(!list_empty(&kthread->affinity_node));
- list_add_tail(&kthread->affinity_node, &kthread_affinity_list);
- /*
- * The node cpumask is racy when read from kthread() but:
- * - a racing CPU going down will either fail on the subsequent
- * call to set_cpus_allowed_ptr() or be migrated to housekeepers
- * afterwards by the scheduler.
- * - a racing CPU going up will be handled by kthreads_online_cpu()
- */
- kthread_fetch_affinity(kthread, affinity);
- set_cpus_allowed_ptr(current, affinity);
- mutex_unlock(&kthread_affinity_lock);
+ mutex_lock(&kthread_affinity_lock);
+ WARN_ON_ONCE(!list_empty(&kthread->affinity_node));
+ list_add_tail(&kthread->affinity_node, &kthread_affinity_list);
+ /*
+ * The node cpumask is racy when read from kthread() but:
+ * - a racing CPU going down will either fail on the subsequent
+ * call to set_cpus_allowed_ptr() or be migrated to housekeepers
+ * afterwards by the scheduler.
+ * - a racing CPU going up will be handled by kthreads_online_cpu()
+ */
+ kthread_fetch_affinity(kthread, affinity);
+ set_cpus_allowed_ptr(current, affinity);
+ mutex_unlock(&kthread_affinity_lock);
- free_cpumask_var(affinity);
- }
+ free_cpumask_var(affinity);
}
static int kthread(void *_create)
ret = -EINVAL;
continue;
}
- kthread_fetch_affinity(k, affinity);
- set_cpus_allowed_ptr(k->task, affinity);
+
+ /*
+ * Unbound kthreads without preferred affinity are already affine
+ * to housekeeping, whether those CPUs are online or not. So no need
+ * to handle newly online CPUs for them.
+ *
+ * But kthreads with a preferred affinity or node are different:
+ * if none of their preferred CPUs are online and part of
+ * housekeeping at the same time, they must be affine to housekeeping.
+ * But as soon as one of their preferred CPU becomes online, they must
+ * be affine to them.
+ */
+ if (k->preferred_affinity || k->node != NUMA_NO_NODE) {
+ kthread_fetch_affinity(k, affinity);
+ set_cpus_allowed_ptr(k->task, affinity);
+ }
}
free_cpumask_var(affinity);