]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
cpu: Revert "cpu/hotplug: Prevent self deadlock on CPU hot-unplug"
authorFrederic Weisbecker <frederic@kernel.org>
Mon, 25 Aug 2025 16:01:58 +0000 (18:01 +0200)
committerFrederic Weisbecker <frederic@kernel.org>
Thu, 22 Jan 2026 17:32:41 +0000 (18:32 +0100)
1) The commit:

2b8272ff4a70 ("cpu/hotplug: Prevent self deadlock on CPU hot-unplug")

was added to fix an issue where the hotplug control task (BP) was
throttled between CPUHP_AP_IDLE_DEAD and CPUHP_HRTIMERS_PREPARE waiting
in the hrtimer blindspot for the bandwidth callback queued in the dead
CPU.

2) Later on, the commit:

38685e2a0476 ("cpu/hotplug: Don't offline the last non-isolated CPU")

plugged on the target selection for the workqueue offloaded CPU down
process to prevent from destroying the last CPU domain.

3) Finally:

5c0930ccaad5 ("hrtimers: Push pending hrtimers away from outgoing CPU earlier")

removed entirely the conditions for the race exposed and partially fixed
in 1). The offloading of the CPU down process to a workqueue on another
CPU then becomes unnecessary. But the last CPU belonging to scheduler
domains must still remain online.

Therefore revert the now obsolete commit
2b8272ff4a70b866106ae13c36be7ecbef5d5da2 and move the housekeeping check
under the cpu_hotplug_lock write held. Since HK_TYPE_DOMAIN will include
both isolcpus and cpuset isolated partition, the hotplug lock will
synchronize against concurrent cpuset partition updates.

Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
Cc: Marco Crivellari <marco.crivellari@suse.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Tejun Heo <tj@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Waiman Long <longman@redhat.com>
kernel/cpu.c

index 8df2d773fe3b5258caf5ab01539650294a474843..40b8496f47c5eeb18b513474fcd501a1116bdd8c 100644 (file)
@@ -1410,6 +1410,16 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
 
        cpus_write_lock();
 
+       /*
+        * Keep at least one housekeeping cpu onlined to avoid generating
+        * an empty sched_domain span.
+        */
+       if (cpumask_any_and(cpu_online_mask,
+                           housekeeping_cpumask(HK_TYPE_DOMAIN)) >= nr_cpu_ids) {
+               ret = -EBUSY;
+               goto out;
+       }
+
        cpuhp_tasks_frozen = tasks_frozen;
 
        prev_state = cpuhp_set_state(cpu, st, target);
@@ -1456,22 +1466,8 @@ out:
        return ret;
 }
 
-struct cpu_down_work {
-       unsigned int            cpu;
-       enum cpuhp_state        target;
-};
-
-static long __cpu_down_maps_locked(void *arg)
-{
-       struct cpu_down_work *work = arg;
-
-       return _cpu_down(work->cpu, 0, work->target);
-}
-
 static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
 {
-       struct cpu_down_work work = { .cpu = cpu, .target = target, };
-
        /*
         * If the platform does not support hotplug, report it explicitly to
         * differentiate it from a transient offlining failure.
@@ -1480,18 +1476,7 @@ static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
                return -EOPNOTSUPP;
        if (cpu_hotplug_disabled)
                return -EBUSY;
-
-       /*
-        * Ensure that the control task does not run on the to be offlined
-        * CPU to prevent a deadlock against cfs_b->period_timer.
-        * Also keep at least one housekeeping cpu onlined to avoid generating
-        * an empty sched_domain span.
-        */
-       for_each_cpu_and(cpu, cpu_online_mask, housekeeping_cpumask(HK_TYPE_DOMAIN)) {
-               if (cpu != work.cpu)
-                       return work_on_cpu(cpu, __cpu_down_maps_locked, &work);
-       }
-       return -EBUSY;
+       return _cpu_down(cpu, 0, target);
 }
 
 static int cpu_down(unsigned int cpu, enum cpuhp_state target)