static LIST_HEAD(kthread_create_list);
struct task_struct *kthreadd_task;
-static LIST_HEAD(kthreads_hotplug);
-static DEFINE_MUTEX(kthreads_hotplug_lock);
+static LIST_HEAD(kthread_affinity_list);
+static DEFINE_MUTEX(kthread_affinity_lock);
struct kthread_create_info
{
/* To store the full name if task comm is truncated. */
char *full_name;
struct task_struct *task;
- struct list_head hotplug_node;
+ struct list_head affinity_node;
struct cpumask *preferred_affinity;
};
init_completion(&kthread->exited);
init_completion(&kthread->parked);
- INIT_LIST_HEAD(&kthread->hotplug_node);
+ INIT_LIST_HEAD(&kthread->affinity_node);
p->vfork_done = &kthread->exited;
kthread->task = p;
{
struct kthread *kthread = to_kthread(current);
kthread->result = result;
- if (!list_empty(&kthread->hotplug_node)) {
- mutex_lock(&kthreads_hotplug_lock);
- list_del(&kthread->hotplug_node);
- mutex_unlock(&kthreads_hotplug_lock);
+ if (!list_empty(&kthread->affinity_node)) {
+ mutex_lock(&kthread_affinity_lock);
+ list_del(&kthread->affinity_node);
+ mutex_unlock(&kthread_affinity_lock);
if (kthread->preferred_affinity) {
kfree(kthread->preferred_affinity);
return;
}
- mutex_lock(&kthreads_hotplug_lock);
- WARN_ON_ONCE(!list_empty(&kthread->hotplug_node));
- list_add_tail(&kthread->hotplug_node, &kthreads_hotplug);
+ mutex_lock(&kthread_affinity_lock);
+ WARN_ON_ONCE(!list_empty(&kthread->affinity_node));
+ list_add_tail(&kthread->affinity_node, &kthread_affinity_list);
/*
* The node cpumask is racy when read from kthread() but:
* - a racing CPU going down will either fail on the subsequent
*/
kthread_fetch_affinity(kthread, affinity);
set_cpus_allowed_ptr(current, affinity);
- mutex_unlock(&kthreads_hotplug_lock);
+ mutex_unlock(&kthread_affinity_lock);
free_cpumask_var(affinity);
}
goto out;
}
- mutex_lock(&kthreads_hotplug_lock);
+ mutex_lock(&kthread_affinity_lock);
cpumask_copy(kthread->preferred_affinity, mask);
- WARN_ON_ONCE(!list_empty(&kthread->hotplug_node));
- list_add_tail(&kthread->hotplug_node, &kthreads_hotplug);
+ WARN_ON_ONCE(!list_empty(&kthread->affinity_node));
+ list_add_tail(&kthread->affinity_node, &kthread_affinity_list);
kthread_fetch_affinity(kthread, affinity);
scoped_guard (raw_spinlock_irqsave, &p->pi_lock)
set_cpus_allowed_force(p, affinity);
- mutex_unlock(&kthreads_hotplug_lock);
+ mutex_unlock(&kthread_affinity_lock);
out:
free_cpumask_var(affinity);
struct kthread *k;
int ret;
- guard(mutex)(&kthreads_hotplug_lock);
+ guard(mutex)(&kthread_affinity_lock);
- if (list_empty(&kthreads_hotplug))
+ if (list_empty(&kthread_affinity_list))
return 0;
if (!zalloc_cpumask_var(&affinity, GFP_KERNEL))
ret = 0;
- list_for_each_entry(k, &kthreads_hotplug, hotplug_node) {
+ list_for_each_entry(k, &kthread_affinity_list, affinity_node) {
if (WARN_ON_ONCE((k->task->flags & PF_NO_SETAFFINITY) ||
kthread_is_per_cpu(k->task))) {
ret = -EINVAL;