cpus_write_lock();
+ /*
+ * Keep at least one housekeeping cpu onlined to avoid generating
+ * an empty sched_domain span.
+ */
+ if (cpumask_any_and(cpu_online_mask,
+ housekeeping_cpumask(HK_TYPE_DOMAIN)) >= nr_cpu_ids) {
+ ret = -EBUSY;
+ goto out;
+ }
+
cpuhp_tasks_frozen = tasks_frozen;
prev_state = cpuhp_set_state(cpu, st, target);
return ret;
}
-struct cpu_down_work {
- unsigned int cpu;
- enum cpuhp_state target;
-};
-
-static long __cpu_down_maps_locked(void *arg)
-{
- struct cpu_down_work *work = arg;
-
- return _cpu_down(work->cpu, 0, work->target);
-}
-
static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
{
- struct cpu_down_work work = { .cpu = cpu, .target = target, };
-
/*
* If the platform does not support hotplug, report it explicitly to
* differentiate it from a transient offlining failure.
return -EOPNOTSUPP;
if (cpu_hotplug_disabled)
return -EBUSY;
-
- /*
- * Ensure that the control task does not run on the to be offlined
- * CPU to prevent a deadlock against cfs_b->period_timer.
- * Also keep at least one housekeeping cpu onlined to avoid generating
- * an empty sched_domain span.
- */
- for_each_cpu_and(cpu, cpu_online_mask, housekeeping_cpumask(HK_TYPE_DOMAIN)) {
- if (cpu != work.cpu)
- return work_on_cpu(cpu, __cpu_down_maps_locked, &work);
- }
- return -EBUSY;
+ return _cpu_down(cpu, 0, target);
}
static int cpu_down(unsigned int cpu, enum cpuhp_state target)