]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
kthread: Refine naming of affinity related fields
authorFrederic Weisbecker <frederic@kernel.org>
Mon, 12 May 2025 09:37:11 +0000 (11:37 +0200)
committerFrederic Weisbecker <frederic@kernel.org>
Tue, 3 Feb 2026 14:23:35 +0000 (15:23 +0100)
The kthreads preferred affinity related fields use "hotplug" as the base
of their naming because the affinity management was initially deemed to
deal with CPU hotplug.

The scope of this role is going to broaden now and also deal with
cpuset isolated partition updates.

Switch the naming accordingly.

Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
Acked-by: Waiman Long <longman@redhat.com>
Cc: Marco Crivellari <marco.crivellari@suse.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Tejun Heo <tj@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Waiman Long <longman@redhat.com>
kernel/kthread.c

index 99a3808d086f081eabb45daf3450184182dfc8a2..f1e4f1f35caee23314f8d6965e363111b01f07eb 100644 (file)
@@ -35,8 +35,8 @@ static DEFINE_SPINLOCK(kthread_create_lock);
 static LIST_HEAD(kthread_create_list);
 struct task_struct *kthreadd_task;
 
-static LIST_HEAD(kthreads_hotplug);
-static DEFINE_MUTEX(kthreads_hotplug_lock);
+static LIST_HEAD(kthread_affinity_list);
+static DEFINE_MUTEX(kthread_affinity_lock);
 
 struct kthread_create_info
 {
@@ -69,7 +69,7 @@ struct kthread {
        /* To store the full name if task comm is truncated. */
        char *full_name;
        struct task_struct *task;
-       struct list_head hotplug_node;
+       struct list_head affinity_node;
        struct cpumask *preferred_affinity;
 };
 
@@ -128,7 +128,7 @@ bool set_kthread_struct(struct task_struct *p)
 
        init_completion(&kthread->exited);
        init_completion(&kthread->parked);
-       INIT_LIST_HEAD(&kthread->hotplug_node);
+       INIT_LIST_HEAD(&kthread->affinity_node);
        p->vfork_done = &kthread->exited;
 
        kthread->task = p;
@@ -323,10 +323,10 @@ void __noreturn kthread_exit(long result)
 {
        struct kthread *kthread = to_kthread(current);
        kthread->result = result;
-       if (!list_empty(&kthread->hotplug_node)) {
-               mutex_lock(&kthreads_hotplug_lock);
-               list_del(&kthread->hotplug_node);
-               mutex_unlock(&kthreads_hotplug_lock);
+       if (!list_empty(&kthread->affinity_node)) {
+               mutex_lock(&kthread_affinity_lock);
+               list_del(&kthread->affinity_node);
+               mutex_unlock(&kthread_affinity_lock);
 
                if (kthread->preferred_affinity) {
                        kfree(kthread->preferred_affinity);
@@ -390,9 +390,9 @@ static void kthread_affine_node(void)
                        return;
                }
 
-               mutex_lock(&kthreads_hotplug_lock);
-               WARN_ON_ONCE(!list_empty(&kthread->hotplug_node));
-               list_add_tail(&kthread->hotplug_node, &kthreads_hotplug);
+               mutex_lock(&kthread_affinity_lock);
+               WARN_ON_ONCE(!list_empty(&kthread->affinity_node));
+               list_add_tail(&kthread->affinity_node, &kthread_affinity_list);
                /*
                 * The node cpumask is racy when read from kthread() but:
                 * - a racing CPU going down will either fail on the subsequent
@@ -402,7 +402,7 @@ static void kthread_affine_node(void)
                 */
                kthread_fetch_affinity(kthread, affinity);
                set_cpus_allowed_ptr(current, affinity);
-               mutex_unlock(&kthreads_hotplug_lock);
+               mutex_unlock(&kthread_affinity_lock);
 
                free_cpumask_var(affinity);
        }
@@ -873,16 +873,16 @@ int kthread_affine_preferred(struct task_struct *p, const struct cpumask *mask)
                goto out;
        }
 
-       mutex_lock(&kthreads_hotplug_lock);
+       mutex_lock(&kthread_affinity_lock);
        cpumask_copy(kthread->preferred_affinity, mask);
-       WARN_ON_ONCE(!list_empty(&kthread->hotplug_node));
-       list_add_tail(&kthread->hotplug_node, &kthreads_hotplug);
+       WARN_ON_ONCE(!list_empty(&kthread->affinity_node));
+       list_add_tail(&kthread->affinity_node, &kthread_affinity_list);
        kthread_fetch_affinity(kthread, affinity);
 
        scoped_guard (raw_spinlock_irqsave, &p->pi_lock)
                set_cpus_allowed_force(p, affinity);
 
-       mutex_unlock(&kthreads_hotplug_lock);
+       mutex_unlock(&kthread_affinity_lock);
 out:
        free_cpumask_var(affinity);
 
@@ -903,9 +903,9 @@ static int kthreads_online_cpu(unsigned int cpu)
        struct kthread *k;
        int ret;
 
-       guard(mutex)(&kthreads_hotplug_lock);
+       guard(mutex)(&kthread_affinity_lock);
 
-       if (list_empty(&kthreads_hotplug))
+       if (list_empty(&kthread_affinity_list))
                return 0;
 
        if (!zalloc_cpumask_var(&affinity, GFP_KERNEL))
@@ -913,7 +913,7 @@ static int kthreads_online_cpu(unsigned int cpu)
 
        ret = 0;
 
-       list_for_each_entry(k, &kthreads_hotplug, hotplug_node) {
+       list_for_each_entry(k, &kthread_affinity_list, affinity_node) {
                if (WARN_ON_ONCE((k->task->flags & PF_NO_SETAFFINITY) ||
                                 kthread_is_per_cpu(k->task))) {
                        ret = -EINVAL;