2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
4 * This code is licenced under the GPL.
6 #include <linux/proc_fs.h>
8 #include <linux/init.h>
9 #include <linux/notifier.h>
10 #include <linux/sched/signal.h>
11 #include <linux/sched/hotplug.h>
12 #include <linux/sched/task.h>
13 #include <linux/sched/smt.h>
14 #include <linux/unistd.h>
15 #include <linux/cpu.h>
16 #include <linux/oom.h>
17 #include <linux/rcupdate.h>
18 #include <linux/export.h>
19 #include <linux/bug.h>
20 #include <linux/kthread.h>
21 #include <linux/stop_machine.h>
22 #include <linux/mutex.h>
23 #include <linux/gfp.h>
24 #include <linux/suspend.h>
25 #include <linux/lockdep.h>
26 #include <linux/tick.h>
27 #include <linux/irq.h>
28 #include <linux/nmi.h>
29 #include <linux/smpboot.h>
30 #include <linux/relay.h>
31 #include <linux/slab.h>
32 #include <linux/percpu-rwsem.h>
34 #include <trace/events/power.h>
35 #define CREATE_TRACE_POINTS
36 #include <trace/events/cpuhp.h>
41 * cpuhp_cpu_state - Per cpu hotplug state storage
42 * @state: The current cpu state
43 * @target: The target state
44 * @thread: Pointer to the hotplug thread
45 * @should_run: Thread should execute
46 * @rollback: Perform a rollback
47 * @single: Single callback invocation
48 * @bringup: Single callback bringup or teardown selector
49 * @cb_state: The state for a single callback (install/uninstall)
50 * @result: Result of the operation
51 * @done_up: Signal completion to the issuer of the task for cpu-up
52 * @done_down: Signal completion to the issuer of the task for cpu-down
54 struct cpuhp_cpu_state
{
55 enum cpuhp_state state
;
56 enum cpuhp_state target
;
57 enum cpuhp_state fail
;
59 struct task_struct
*thread
;
65 struct hlist_node
*node
;
66 struct hlist_node
*last
;
67 enum cpuhp_state cb_state
;
69 struct completion done_up
;
70 struct completion done_down
;
74 static DEFINE_PER_CPU(struct cpuhp_cpu_state
, cpuhp_state
) = {
75 .fail
= CPUHP_INVALID
,
78 #if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
79 static struct lockdep_map cpuhp_state_up_map
=
80 STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map
);
81 static struct lockdep_map cpuhp_state_down_map
=
82 STATIC_LOCKDEP_MAP_INIT("cpuhp_state-down", &cpuhp_state_down_map
);
85 static void inline cpuhp_lock_acquire(bool bringup
)
87 lock_map_acquire(bringup
? &cpuhp_state_up_map
: &cpuhp_state_down_map
);
90 static void inline cpuhp_lock_release(bool bringup
)
92 lock_map_release(bringup
? &cpuhp_state_up_map
: &cpuhp_state_down_map
);
96 static void inline cpuhp_lock_acquire(bool bringup
) { }
97 static void inline cpuhp_lock_release(bool bringup
) { }
102 * cpuhp_step - Hotplug state machine step
103 * @name: Name of the step
104 * @startup: Startup function of the step
105 * @teardown: Teardown function of the step
106 * @skip_onerr: Do not invoke the functions on error rollback
107 * Will go away once the notifiers are gone
108 * @cant_stop: Bringup/teardown can't be stopped at this step
113 int (*single
)(unsigned int cpu
);
114 int (*multi
)(unsigned int cpu
,
115 struct hlist_node
*node
);
118 int (*single
)(unsigned int cpu
);
119 int (*multi
)(unsigned int cpu
,
120 struct hlist_node
*node
);
122 struct hlist_head list
;
128 static DEFINE_MUTEX(cpuhp_state_mutex
);
129 static struct cpuhp_step cpuhp_bp_states
[];
130 static struct cpuhp_step cpuhp_ap_states
[];
132 static bool cpuhp_is_ap_state(enum cpuhp_state state
)
135 * The extra check for CPUHP_TEARDOWN_CPU is only for documentation
136 * purposes as that state is handled explicitly in cpu_down.
138 return state
> CPUHP_BRINGUP_CPU
&& state
!= CPUHP_TEARDOWN_CPU
;
141 static struct cpuhp_step
*cpuhp_get_step(enum cpuhp_state state
)
143 struct cpuhp_step
*sp
;
145 sp
= cpuhp_is_ap_state(state
) ? cpuhp_ap_states
: cpuhp_bp_states
;
150 * cpuhp_invoke_callback _ Invoke the callbacks for a given state
151 * @cpu: The cpu for which the callback should be invoked
152 * @state: The state to do callbacks for
153 * @bringup: True if the bringup callback should be invoked
154 * @node: For multi-instance, do a single entry callback for install/remove
155 * @lastp: For multi-instance rollback, remember how far we got
157 * Called from cpu hotplug and from the state register machinery.
159 static int cpuhp_invoke_callback(unsigned int cpu
, enum cpuhp_state state
,
160 bool bringup
, struct hlist_node
*node
,
161 struct hlist_node
**lastp
)
163 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
164 struct cpuhp_step
*step
= cpuhp_get_step(state
);
165 int (*cbm
)(unsigned int cpu
, struct hlist_node
*node
);
166 int (*cb
)(unsigned int cpu
);
169 if (st
->fail
== state
) {
170 st
->fail
= CPUHP_INVALID
;
172 if (!(bringup
? step
->startup
.single
: step
->teardown
.single
))
178 if (!step
->multi_instance
) {
179 WARN_ON_ONCE(lastp
&& *lastp
);
180 cb
= bringup
? step
->startup
.single
: step
->teardown
.single
;
183 trace_cpuhp_enter(cpu
, st
->target
, state
, cb
);
185 trace_cpuhp_exit(cpu
, st
->state
, state
, ret
);
188 cbm
= bringup
? step
->startup
.multi
: step
->teardown
.multi
;
192 /* Single invocation for instance add/remove */
194 WARN_ON_ONCE(lastp
&& *lastp
);
195 trace_cpuhp_multi_enter(cpu
, st
->target
, state
, cbm
, node
);
196 ret
= cbm(cpu
, node
);
197 trace_cpuhp_exit(cpu
, st
->state
, state
, ret
);
201 /* State transition. Invoke on all instances */
203 hlist_for_each(node
, &step
->list
) {
204 if (lastp
&& node
== *lastp
)
207 trace_cpuhp_multi_enter(cpu
, st
->target
, state
, cbm
, node
);
208 ret
= cbm(cpu
, node
);
209 trace_cpuhp_exit(cpu
, st
->state
, state
, ret
);
223 /* Rollback the instances if one failed */
224 cbm
= !bringup
? step
->startup
.multi
: step
->teardown
.multi
;
228 hlist_for_each(node
, &step
->list
) {
232 trace_cpuhp_multi_enter(cpu
, st
->target
, state
, cbm
, node
);
233 ret
= cbm(cpu
, node
);
234 trace_cpuhp_exit(cpu
, st
->state
, state
, ret
);
236 * Rollback must not fail,
244 static inline void wait_for_ap_thread(struct cpuhp_cpu_state
*st
, bool bringup
)
246 struct completion
*done
= bringup
? &st
->done_up
: &st
->done_down
;
247 wait_for_completion(done
);
250 static inline void complete_ap_thread(struct cpuhp_cpu_state
*st
, bool bringup
)
252 struct completion
*done
= bringup
? &st
->done_up
: &st
->done_down
;
257 * The former STARTING/DYING states, ran with IRQs disabled and must not fail.
259 static bool cpuhp_is_atomic_state(enum cpuhp_state state
)
261 return CPUHP_AP_IDLE_DEAD
<= state
&& state
< CPUHP_AP_ONLINE
;
264 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
265 static DEFINE_MUTEX(cpu_add_remove_lock
);
266 bool cpuhp_tasks_frozen
;
267 EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen
);
270 * The following two APIs (cpu_maps_update_begin/done) must be used when
271 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
273 void cpu_maps_update_begin(void)
275 mutex_lock(&cpu_add_remove_lock
);
278 void cpu_maps_update_done(void)
280 mutex_unlock(&cpu_add_remove_lock
);
284 * If set, cpu_up and cpu_down will return -EBUSY and do nothing.
285 * Should always be manipulated under cpu_add_remove_lock
287 static int cpu_hotplug_disabled
;
289 #ifdef CONFIG_HOTPLUG_CPU
291 DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock
);
293 void cpus_read_lock(void)
295 percpu_down_read(&cpu_hotplug_lock
);
297 EXPORT_SYMBOL_GPL(cpus_read_lock
);
299 void cpus_read_unlock(void)
301 percpu_up_read(&cpu_hotplug_lock
);
303 EXPORT_SYMBOL_GPL(cpus_read_unlock
);
305 void cpus_write_lock(void)
307 percpu_down_write(&cpu_hotplug_lock
);
310 void cpus_write_unlock(void)
312 percpu_up_write(&cpu_hotplug_lock
);
315 void lockdep_assert_cpus_held(void)
318 * We can't have hotplug operations before userspace starts running,
319 * and some init codepaths will knowingly not take the hotplug lock.
320 * This is all valid, so mute lockdep until it makes sense to report
323 if (system_state
< SYSTEM_RUNNING
)
326 percpu_rwsem_assert_held(&cpu_hotplug_lock
);
330 * Wait for currently running CPU hotplug operations to complete (if any) and
331 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
332 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
333 * hotplug path before performing hotplug operations. So acquiring that lock
334 * guarantees mutual exclusion from any currently running hotplug operations.
336 void cpu_hotplug_disable(void)
338 cpu_maps_update_begin();
339 cpu_hotplug_disabled
++;
340 cpu_maps_update_done();
342 EXPORT_SYMBOL_GPL(cpu_hotplug_disable
);
344 static void __cpu_hotplug_enable(void)
346 if (WARN_ONCE(!cpu_hotplug_disabled
, "Unbalanced cpu hotplug enable\n"))
348 cpu_hotplug_disabled
--;
351 void cpu_hotplug_enable(void)
353 cpu_maps_update_begin();
354 __cpu_hotplug_enable();
355 cpu_maps_update_done();
357 EXPORT_SYMBOL_GPL(cpu_hotplug_enable
);
358 #endif /* CONFIG_HOTPLUG_CPU */
361 * Architectures that need SMT-specific errata handling during SMT hotplug
362 * should override this.
364 void __weak
arch_smt_update(void) { }
366 #ifdef CONFIG_HOTPLUG_SMT
367 enum cpuhp_smt_control cpu_smt_control __read_mostly
= CPU_SMT_ENABLED
;
369 void __init
cpu_smt_disable(bool force
)
371 if (cpu_smt_control
== CPU_SMT_FORCE_DISABLED
||
372 cpu_smt_control
== CPU_SMT_NOT_SUPPORTED
)
376 pr_info("SMT: Force disabled\n");
377 cpu_smt_control
= CPU_SMT_FORCE_DISABLED
;
379 pr_info("SMT: disabled\n");
380 cpu_smt_control
= CPU_SMT_DISABLED
;
385 * The decision whether SMT is supported can only be done after the full
386 * CPU identification. Called from architecture code.
388 void __init
cpu_smt_check_topology(void)
390 if (!topology_smt_supported())
391 cpu_smt_control
= CPU_SMT_NOT_SUPPORTED
;
394 static int __init
smt_cmdline_disable(char *str
)
396 cpu_smt_disable(str
&& !strcmp(str
, "force"));
399 early_param("nosmt", smt_cmdline_disable
);
401 static inline bool cpu_smt_allowed(unsigned int cpu
)
403 if (cpu_smt_control
== CPU_SMT_ENABLED
)
406 if (topology_is_primary_thread(cpu
))
410 * On x86 it's required to boot all logical CPUs at least once so
411 * that the init code can get a chance to set CR4.MCE on each
412 * CPU. Otherwise, a broadacasted MCE observing CR4.MCE=0b on any
413 * core will shutdown the machine.
415 return !per_cpu(cpuhp_state
, cpu
).booted_once
;
418 static inline bool cpu_smt_allowed(unsigned int cpu
) { return true; }
421 static inline enum cpuhp_state
422 cpuhp_set_state(struct cpuhp_cpu_state
*st
, enum cpuhp_state target
)
424 enum cpuhp_state prev_state
= st
->state
;
426 st
->rollback
= false;
431 st
->bringup
= st
->state
< target
;
437 cpuhp_reset_state(struct cpuhp_cpu_state
*st
, enum cpuhp_state prev_state
)
442 * If we have st->last we need to undo partial multi_instance of this
443 * state first. Otherwise start undo at the previous state.
452 st
->target
= prev_state
;
453 st
->bringup
= !st
->bringup
;
456 /* Regular hotplug invocation of the AP hotplug thread */
457 static void __cpuhp_kick_ap(struct cpuhp_cpu_state
*st
)
459 if (!st
->single
&& st
->state
== st
->target
)
464 * Make sure the above stores are visible before should_run becomes
465 * true. Paired with the mb() above in cpuhp_thread_fun()
468 st
->should_run
= true;
469 wake_up_process(st
->thread
);
470 wait_for_ap_thread(st
, st
->bringup
);
473 static int cpuhp_kick_ap(struct cpuhp_cpu_state
*st
, enum cpuhp_state target
)
475 enum cpuhp_state prev_state
;
478 prev_state
= cpuhp_set_state(st
, target
);
480 if ((ret
= st
->result
)) {
481 cpuhp_reset_state(st
, prev_state
);
488 static int bringup_wait_for_ap(unsigned int cpu
)
490 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
492 /* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */
493 wait_for_ap_thread(st
, true);
494 if (WARN_ON_ONCE((!cpu_online(cpu
))))
497 /* Unpark the hotplug thread of the target cpu */
498 kthread_unpark(st
->thread
);
501 * SMT soft disabling on X86 requires to bring the CPU out of the
502 * BIOS 'wait for SIPI' state in order to set the CR4.MCE bit. The
503 * CPU marked itself as booted_once in cpu_notify_starting() so the
504 * cpu_smt_allowed() check will now return false if this is not the
507 if (!cpu_smt_allowed(cpu
))
510 if (st
->target
<= CPUHP_AP_ONLINE_IDLE
)
513 return cpuhp_kick_ap(st
, st
->target
);
516 static int bringup_cpu(unsigned int cpu
)
518 struct task_struct
*idle
= idle_thread_get(cpu
);
522 * Some architectures have to walk the irq descriptors to
523 * setup the vector space for the cpu which comes online.
524 * Prevent irq alloc/free across the bringup.
528 /* Arch-specific enabling code. */
529 ret
= __cpu_up(cpu
, idle
);
533 return bringup_wait_for_ap(cpu
);
537 * Hotplug state machine related functions
540 static void undo_cpu_up(unsigned int cpu
, struct cpuhp_cpu_state
*st
)
542 for (st
->state
--; st
->state
> st
->target
; st
->state
--) {
543 struct cpuhp_step
*step
= cpuhp_get_step(st
->state
);
545 if (!step
->skip_onerr
)
546 cpuhp_invoke_callback(cpu
, st
->state
, false, NULL
, NULL
);
550 static inline bool can_rollback_cpu(struct cpuhp_cpu_state
*st
)
552 if (IS_ENABLED(CONFIG_HOTPLUG_CPU
))
555 * When CPU hotplug is disabled, then taking the CPU down is not
556 * possible because takedown_cpu() and the architecture and
557 * subsystem specific mechanisms are not available. So the CPU
558 * which would be completely unplugged again needs to stay around
559 * in the current state.
561 return st
->state
<= CPUHP_BRINGUP_CPU
;
564 static int cpuhp_up_callbacks(unsigned int cpu
, struct cpuhp_cpu_state
*st
,
565 enum cpuhp_state target
)
567 enum cpuhp_state prev_state
= st
->state
;
570 while (st
->state
< target
) {
572 ret
= cpuhp_invoke_callback(cpu
, st
->state
, true, NULL
, NULL
);
574 if (can_rollback_cpu(st
)) {
575 st
->target
= prev_state
;
576 undo_cpu_up(cpu
, st
);
585 * The cpu hotplug threads manage the bringup and teardown of the cpus
587 static void cpuhp_create(unsigned int cpu
)
589 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
591 init_completion(&st
->done_up
);
592 init_completion(&st
->done_down
);
595 static int cpuhp_should_run(unsigned int cpu
)
597 struct cpuhp_cpu_state
*st
= this_cpu_ptr(&cpuhp_state
);
599 return st
->should_run
;
603 * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
604 * callbacks when a state gets [un]installed at runtime.
606 * Each invocation of this function by the smpboot thread does a single AP
609 * It has 3 modes of operation:
610 * - single: runs st->cb_state
611 * - up: runs ++st->state, while st->state < st->target
612 * - down: runs st->state--, while st->state > st->target
614 * When complete or on error, should_run is cleared and the completion is fired.
616 static void cpuhp_thread_fun(unsigned int cpu
)
618 struct cpuhp_cpu_state
*st
= this_cpu_ptr(&cpuhp_state
);
619 bool bringup
= st
->bringup
;
620 enum cpuhp_state state
;
622 if (WARN_ON_ONCE(!st
->should_run
))
626 * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures
627 * that if we see ->should_run we also see the rest of the state.
631 cpuhp_lock_acquire(bringup
);
634 state
= st
->cb_state
;
635 st
->should_run
= false;
640 st
->should_run
= (st
->state
< st
->target
);
641 WARN_ON_ONCE(st
->state
> st
->target
);
645 st
->should_run
= (st
->state
> st
->target
);
646 WARN_ON_ONCE(st
->state
< st
->target
);
650 WARN_ON_ONCE(!cpuhp_is_ap_state(state
));
653 struct cpuhp_step
*step
= cpuhp_get_step(state
);
654 if (step
->skip_onerr
)
658 if (cpuhp_is_atomic_state(state
)) {
660 st
->result
= cpuhp_invoke_callback(cpu
, state
, bringup
, st
->node
, &st
->last
);
664 * STARTING/DYING must not fail!
666 WARN_ON_ONCE(st
->result
);
668 st
->result
= cpuhp_invoke_callback(cpu
, state
, bringup
, st
->node
, &st
->last
);
673 * If we fail on a rollback, we're up a creek without no
674 * paddle, no way forward, no way back. We loose, thanks for
677 WARN_ON_ONCE(st
->rollback
);
678 st
->should_run
= false;
682 cpuhp_lock_release(bringup
);
685 complete_ap_thread(st
, bringup
);
688 /* Invoke a single callback on a remote cpu */
690 cpuhp_invoke_ap_callback(int cpu
, enum cpuhp_state state
, bool bringup
,
691 struct hlist_node
*node
)
693 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
696 if (!cpu_online(cpu
))
699 cpuhp_lock_acquire(false);
700 cpuhp_lock_release(false);
702 cpuhp_lock_acquire(true);
703 cpuhp_lock_release(true);
706 * If we are up and running, use the hotplug thread. For early calls
707 * we invoke the thread function directly.
710 return cpuhp_invoke_callback(cpu
, state
, bringup
, node
, NULL
);
712 st
->rollback
= false;
716 st
->bringup
= bringup
;
717 st
->cb_state
= state
;
723 * If we failed and did a partial, do a rollback.
725 if ((ret
= st
->result
) && st
->last
) {
727 st
->bringup
= !bringup
;
733 * Clean up the leftovers so the next hotplug operation wont use stale
736 st
->node
= st
->last
= NULL
;
740 static int cpuhp_kick_ap_work(unsigned int cpu
)
742 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
743 enum cpuhp_state prev_state
= st
->state
;
746 cpuhp_lock_acquire(false);
747 cpuhp_lock_release(false);
749 cpuhp_lock_acquire(true);
750 cpuhp_lock_release(true);
752 trace_cpuhp_enter(cpu
, st
->target
, prev_state
, cpuhp_kick_ap_work
);
753 ret
= cpuhp_kick_ap(st
, st
->target
);
754 trace_cpuhp_exit(cpu
, st
->state
, prev_state
, ret
);
759 static struct smp_hotplug_thread cpuhp_threads
= {
760 .store
= &cpuhp_state
.thread
,
761 .create
= &cpuhp_create
,
762 .thread_should_run
= cpuhp_should_run
,
763 .thread_fn
= cpuhp_thread_fun
,
764 .thread_comm
= "cpuhp/%u",
768 void __init
cpuhp_threads_init(void)
770 BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads
));
771 kthread_unpark(this_cpu_read(cpuhp_state
.thread
));
774 #ifdef CONFIG_HOTPLUG_CPU
776 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
779 * This function walks all processes, finds a valid mm struct for each one and
780 * then clears a corresponding bit in mm's cpumask. While this all sounds
781 * trivial, there are various non-obvious corner cases, which this function
782 * tries to solve in a safe manner.
784 * Also note that the function uses a somewhat relaxed locking scheme, so it may
785 * be called only for an already offlined CPU.
787 void clear_tasks_mm_cpumask(int cpu
)
789 struct task_struct
*p
;
792 * This function is called after the cpu is taken down and marked
793 * offline, so its not like new tasks will ever get this cpu set in
794 * their mm mask. -- Peter Zijlstra
795 * Thus, we may use rcu_read_lock() here, instead of grabbing
796 * full-fledged tasklist_lock.
798 WARN_ON(cpu_online(cpu
));
800 for_each_process(p
) {
801 struct task_struct
*t
;
804 * Main thread might exit, but other threads may still have
805 * a valid mm. Find one.
807 t
= find_lock_task_mm(p
);
810 cpumask_clear_cpu(cpu
, mm_cpumask(t
->mm
));
816 /* Take this CPU down. */
817 static int take_cpu_down(void *_param
)
819 struct cpuhp_cpu_state
*st
= this_cpu_ptr(&cpuhp_state
);
820 enum cpuhp_state target
= max((int)st
->target
, CPUHP_AP_OFFLINE
);
821 int err
, cpu
= smp_processor_id();
824 /* Ensure this CPU doesn't handle any more interrupts. */
825 err
= __cpu_disable();
830 * We get here while we are in CPUHP_TEARDOWN_CPU state and we must not
831 * do this step again.
833 WARN_ON(st
->state
!= CPUHP_TEARDOWN_CPU
);
835 /* Invoke the former CPU_DYING callbacks */
836 for (; st
->state
> target
; st
->state
--) {
837 ret
= cpuhp_invoke_callback(cpu
, st
->state
, false, NULL
, NULL
);
839 * DYING must not fail!
844 /* Give up timekeeping duties */
845 tick_handover_do_timer();
846 /* Park the stopper thread */
847 stop_machine_park(cpu
);
851 static int takedown_cpu(unsigned int cpu
)
853 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
856 /* Park the smpboot threads */
857 kthread_park(per_cpu_ptr(&cpuhp_state
, cpu
)->thread
);
860 * Prevent irq alloc/free while the dying cpu reorganizes the
861 * interrupt affinities.
866 * So now all preempt/rcu users must observe !cpu_active().
868 err
= stop_machine_cpuslocked(take_cpu_down
, NULL
, cpumask_of(cpu
));
870 /* CPU refused to die */
872 /* Unpark the hotplug thread so we can rollback there */
873 kthread_unpark(per_cpu_ptr(&cpuhp_state
, cpu
)->thread
);
876 BUG_ON(cpu_online(cpu
));
879 * The CPUHP_AP_SCHED_MIGRATE_DYING callback will have removed all
880 * runnable tasks from the cpu, there's only the idle task left now
881 * that the migration thread is done doing the stop_machine thing.
883 * Wait for the stop thread to go away.
885 wait_for_ap_thread(st
, false);
886 BUG_ON(st
->state
!= CPUHP_AP_IDLE_DEAD
);
888 /* Interrupts are moved away from the dying cpu, reenable alloc/free */
891 hotplug_cpu__broadcast_tick_pull(cpu
);
892 /* This actually kills the CPU. */
895 tick_cleanup_dead_cpu(cpu
);
896 rcutree_migrate_callbacks(cpu
);
900 static void cpuhp_complete_idle_dead(void *arg
)
902 struct cpuhp_cpu_state
*st
= arg
;
904 complete_ap_thread(st
, false);
907 void cpuhp_report_idle_dead(void)
909 struct cpuhp_cpu_state
*st
= this_cpu_ptr(&cpuhp_state
);
911 BUG_ON(st
->state
!= CPUHP_AP_OFFLINE
);
912 rcu_report_dead(smp_processor_id());
913 st
->state
= CPUHP_AP_IDLE_DEAD
;
915 * We cannot call complete after rcu_report_dead() so we delegate it
918 smp_call_function_single(cpumask_first(cpu_online_mask
),
919 cpuhp_complete_idle_dead
, st
, 0);
922 static void undo_cpu_down(unsigned int cpu
, struct cpuhp_cpu_state
*st
)
924 for (st
->state
++; st
->state
< st
->target
; st
->state
++) {
925 struct cpuhp_step
*step
= cpuhp_get_step(st
->state
);
927 if (!step
->skip_onerr
)
928 cpuhp_invoke_callback(cpu
, st
->state
, true, NULL
, NULL
);
932 static int cpuhp_down_callbacks(unsigned int cpu
, struct cpuhp_cpu_state
*st
,
933 enum cpuhp_state target
)
935 enum cpuhp_state prev_state
= st
->state
;
938 for (; st
->state
> target
; st
->state
--) {
939 ret
= cpuhp_invoke_callback(cpu
, st
->state
, false, NULL
, NULL
);
941 st
->target
= prev_state
;
942 if (st
->state
< prev_state
)
943 undo_cpu_down(cpu
, st
);
950 /* Requires cpu_add_remove_lock to be held */
951 static int __ref
_cpu_down(unsigned int cpu
, int tasks_frozen
,
952 enum cpuhp_state target
)
954 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
955 int prev_state
, ret
= 0;
957 if (num_online_cpus() == 1)
960 if (!cpu_present(cpu
))
965 cpuhp_tasks_frozen
= tasks_frozen
;
967 prev_state
= cpuhp_set_state(st
, target
);
969 * If the current CPU state is in the range of the AP hotplug thread,
970 * then we need to kick the thread.
972 if (st
->state
> CPUHP_TEARDOWN_CPU
) {
973 st
->target
= max((int)target
, CPUHP_TEARDOWN_CPU
);
974 ret
= cpuhp_kick_ap_work(cpu
);
976 * The AP side has done the error rollback already. Just
977 * return the error code..
983 * We might have stopped still in the range of the AP hotplug
984 * thread. Nothing to do anymore.
986 if (st
->state
> CPUHP_TEARDOWN_CPU
)
992 * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
993 * to do the further cleanups.
995 ret
= cpuhp_down_callbacks(cpu
, st
, target
);
996 if (ret
&& st
->state
== CPUHP_TEARDOWN_CPU
&& st
->state
< prev_state
) {
997 cpuhp_reset_state(st
, prev_state
);
1002 cpus_write_unlock();
1004 * Do post unplug cleanup. This is still protected against
1005 * concurrent CPU hotplug via cpu_add_remove_lock.
1007 lockup_detector_cleanup();
1012 static int cpu_down_maps_locked(unsigned int cpu
, enum cpuhp_state target
)
1014 if (cpu_hotplug_disabled
)
1016 return _cpu_down(cpu
, 0, target
);
1019 static int do_cpu_down(unsigned int cpu
, enum cpuhp_state target
)
1023 cpu_maps_update_begin();
1024 err
= cpu_down_maps_locked(cpu
, target
);
1025 cpu_maps_update_done();
1029 int cpu_down(unsigned int cpu
)
1031 return do_cpu_down(cpu
, CPUHP_OFFLINE
);
1033 EXPORT_SYMBOL(cpu_down
);
1036 #define takedown_cpu NULL
1037 #endif /*CONFIG_HOTPLUG_CPU*/
1040 * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
1041 * @cpu: cpu that just started
1043 * It must be called by the arch code on the new cpu, before the new cpu
1044 * enables interrupts and before the "boot" cpu returns from __cpu_up().
1046 void notify_cpu_starting(unsigned int cpu
)
1048 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1049 enum cpuhp_state target
= min((int)st
->target
, CPUHP_AP_ONLINE
);
1052 rcu_cpu_starting(cpu
); /* Enables RCU usage on this CPU. */
1053 st
->booted_once
= true;
1054 while (st
->state
< target
) {
1056 ret
= cpuhp_invoke_callback(cpu
, st
->state
, true, NULL
, NULL
);
1058 * STARTING must not fail!
1065 * Called from the idle task. Wake up the controlling task which brings the
1066 * hotplug thread of the upcoming CPU up and then delegates the rest of the
1067 * online bringup to the hotplug thread.
1069 void cpuhp_online_idle(enum cpuhp_state state
)
1071 struct cpuhp_cpu_state
*st
= this_cpu_ptr(&cpuhp_state
);
1073 /* Happens for the boot cpu */
1074 if (state
!= CPUHP_AP_ONLINE_IDLE
)
1078 * Unpart the stopper thread before we start the idle loop (and start
1079 * scheduling); this ensures the stopper task is always available.
1081 stop_machine_unpark(smp_processor_id());
1083 st
->state
= CPUHP_AP_ONLINE_IDLE
;
1084 complete_ap_thread(st
, true);
1087 /* Requires cpu_add_remove_lock to be held */
1088 static int _cpu_up(unsigned int cpu
, int tasks_frozen
, enum cpuhp_state target
)
1090 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1091 struct task_struct
*idle
;
1096 if (!cpu_present(cpu
)) {
1102 * The caller of do_cpu_up might have raced with another
1103 * caller. Ignore it for now.
1105 if (st
->state
>= target
)
1108 if (st
->state
== CPUHP_OFFLINE
) {
1109 /* Let it fail before we try to bring the cpu up */
1110 idle
= idle_thread_get(cpu
);
1112 ret
= PTR_ERR(idle
);
1117 cpuhp_tasks_frozen
= tasks_frozen
;
1119 cpuhp_set_state(st
, target
);
1121 * If the current CPU state is in the range of the AP hotplug thread,
1122 * then we need to kick the thread once more.
1124 if (st
->state
> CPUHP_BRINGUP_CPU
) {
1125 ret
= cpuhp_kick_ap_work(cpu
);
1127 * The AP side has done the error rollback already. Just
1128 * return the error code..
1135 * Try to reach the target state. We max out on the BP at
1136 * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
1137 * responsible for bringing it up to the target state.
1139 target
= min((int)target
, CPUHP_BRINGUP_CPU
);
1140 ret
= cpuhp_up_callbacks(cpu
, st
, target
);
1142 cpus_write_unlock();
1147 static int do_cpu_up(unsigned int cpu
, enum cpuhp_state target
)
1151 if (!cpu_possible(cpu
)) {
1152 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
1154 #if defined(CONFIG_IA64)
1155 pr_err("please check additional_cpus= boot parameter\n");
1160 err
= try_online_node(cpu_to_node(cpu
));
1164 cpu_maps_update_begin();
1166 if (cpu_hotplug_disabled
) {
1170 if (!cpu_smt_allowed(cpu
)) {
1175 err
= _cpu_up(cpu
, 0, target
);
1177 cpu_maps_update_done();
1181 int cpu_up(unsigned int cpu
)
1183 return do_cpu_up(cpu
, CPUHP_ONLINE
);
1185 EXPORT_SYMBOL_GPL(cpu_up
);
1187 #ifdef CONFIG_PM_SLEEP_SMP
1188 static cpumask_var_t frozen_cpus
;
1190 int freeze_secondary_cpus(int primary
)
1194 cpu_maps_update_begin();
1195 if (!cpu_online(primary
))
1196 primary
= cpumask_first(cpu_online_mask
);
1198 * We take down all of the non-boot CPUs in one shot to avoid races
1199 * with the userspace trying to use the CPU hotplug at the same time
1201 cpumask_clear(frozen_cpus
);
1203 pr_info("Disabling non-boot CPUs ...\n");
1204 for_each_online_cpu(cpu
) {
1207 trace_suspend_resume(TPS("CPU_OFF"), cpu
, true);
1208 error
= _cpu_down(cpu
, 1, CPUHP_OFFLINE
);
1209 trace_suspend_resume(TPS("CPU_OFF"), cpu
, false);
1211 cpumask_set_cpu(cpu
, frozen_cpus
);
1213 pr_err("Error taking CPU%d down: %d\n", cpu
, error
);
1219 BUG_ON(num_online_cpus() > 1);
1221 pr_err("Non-boot CPUs are not disabled\n");
1224 * Make sure the CPUs won't be enabled by someone else. We need to do
1225 * this even in case of failure as all disable_nonboot_cpus() users are
1226 * supposed to do enable_nonboot_cpus() on the failure path.
1228 cpu_hotplug_disabled
++;
1230 cpu_maps_update_done();
1234 void __weak
arch_enable_nonboot_cpus_begin(void)
1238 void __weak
arch_enable_nonboot_cpus_end(void)
1242 void enable_nonboot_cpus(void)
1246 /* Allow everyone to use the CPU hotplug again */
1247 cpu_maps_update_begin();
1248 __cpu_hotplug_enable();
1249 if (cpumask_empty(frozen_cpus
))
1252 pr_info("Enabling non-boot CPUs ...\n");
1254 arch_enable_nonboot_cpus_begin();
1256 for_each_cpu(cpu
, frozen_cpus
) {
1257 trace_suspend_resume(TPS("CPU_ON"), cpu
, true);
1258 error
= _cpu_up(cpu
, 1, CPUHP_ONLINE
);
1259 trace_suspend_resume(TPS("CPU_ON"), cpu
, false);
1261 pr_info("CPU%d is up\n", cpu
);
1264 pr_warn("Error taking CPU%d up: %d\n", cpu
, error
);
1267 arch_enable_nonboot_cpus_end();
1269 cpumask_clear(frozen_cpus
);
1271 cpu_maps_update_done();
1274 static int __init
alloc_frozen_cpus(void)
1276 if (!alloc_cpumask_var(&frozen_cpus
, GFP_KERNEL
|__GFP_ZERO
))
1280 core_initcall(alloc_frozen_cpus
);
1283 * When callbacks for CPU hotplug notifications are being executed, we must
1284 * ensure that the state of the system with respect to the tasks being frozen
1285 * or not, as reported by the notification, remains unchanged *throughout the
1286 * duration* of the execution of the callbacks.
1287 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
1289 * This synchronization is implemented by mutually excluding regular CPU
1290 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
1291 * Hibernate notifications.
1294 cpu_hotplug_pm_callback(struct notifier_block
*nb
,
1295 unsigned long action
, void *ptr
)
1299 case PM_SUSPEND_PREPARE
:
1300 case PM_HIBERNATION_PREPARE
:
1301 cpu_hotplug_disable();
1304 case PM_POST_SUSPEND
:
1305 case PM_POST_HIBERNATION
:
1306 cpu_hotplug_enable();
1317 static int __init
cpu_hotplug_pm_sync_init(void)
1320 * cpu_hotplug_pm_callback has higher priority than x86
1321 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
1322 * to disable cpu hotplug to avoid cpu hotplug race.
1324 pm_notifier(cpu_hotplug_pm_callback
, 0);
1327 core_initcall(cpu_hotplug_pm_sync_init
);
1329 #endif /* CONFIG_PM_SLEEP_SMP */
1333 #endif /* CONFIG_SMP */
1335 /* Boot processor state steps */
1336 static struct cpuhp_step cpuhp_bp_states
[] = {
1339 .startup
.single
= NULL
,
1340 .teardown
.single
= NULL
,
1343 [CPUHP_CREATE_THREADS
]= {
1344 .name
= "threads:prepare",
1345 .startup
.single
= smpboot_create_threads
,
1346 .teardown
.single
= NULL
,
1349 [CPUHP_PERF_PREPARE
] = {
1350 .name
= "perf:prepare",
1351 .startup
.single
= perf_event_init_cpu
,
1352 .teardown
.single
= perf_event_exit_cpu
,
1354 [CPUHP_WORKQUEUE_PREP
] = {
1355 .name
= "workqueue:prepare",
1356 .startup
.single
= workqueue_prepare_cpu
,
1357 .teardown
.single
= NULL
,
1359 [CPUHP_HRTIMERS_PREPARE
] = {
1360 .name
= "hrtimers:prepare",
1361 .startup
.single
= hrtimers_prepare_cpu
,
1362 .teardown
.single
= hrtimers_dead_cpu
,
1364 [CPUHP_SMPCFD_PREPARE
] = {
1365 .name
= "smpcfd:prepare",
1366 .startup
.single
= smpcfd_prepare_cpu
,
1367 .teardown
.single
= smpcfd_dead_cpu
,
1369 [CPUHP_RELAY_PREPARE
] = {
1370 .name
= "relay:prepare",
1371 .startup
.single
= relay_prepare_cpu
,
1372 .teardown
.single
= NULL
,
1374 [CPUHP_SLAB_PREPARE
] = {
1375 .name
= "slab:prepare",
1376 .startup
.single
= slab_prepare_cpu
,
1377 .teardown
.single
= slab_dead_cpu
,
1379 [CPUHP_RCUTREE_PREP
] = {
1380 .name
= "RCU/tree:prepare",
1381 .startup
.single
= rcutree_prepare_cpu
,
1382 .teardown
.single
= rcutree_dead_cpu
,
1385 * On the tear-down path, timers_dead_cpu() must be invoked
1386 * before blk_mq_queue_reinit_notify() from notify_dead(),
1387 * otherwise a RCU stall occurs.
1389 [CPUHP_TIMERS_PREPARE
] = {
1390 .name
= "timers:dead",
1391 .startup
.single
= timers_prepare_cpu
,
1392 .teardown
.single
= timers_dead_cpu
,
1394 /* Kicks the plugged cpu into life */
1395 [CPUHP_BRINGUP_CPU
] = {
1396 .name
= "cpu:bringup",
1397 .startup
.single
= bringup_cpu
,
1398 .teardown
.single
= NULL
,
1402 * Handled on controll processor until the plugged processor manages
1405 [CPUHP_TEARDOWN_CPU
] = {
1406 .name
= "cpu:teardown",
1407 .startup
.single
= NULL
,
1408 .teardown
.single
= takedown_cpu
,
1412 [CPUHP_BRINGUP_CPU
] = { },
1416 /* Application processor state steps */
1417 static struct cpuhp_step cpuhp_ap_states
[] = {
1419 /* Final state before CPU kills itself */
1420 [CPUHP_AP_IDLE_DEAD
] = {
1421 .name
= "idle:dead",
1424 * Last state before CPU enters the idle loop to die. Transient state
1425 * for synchronization.
1427 [CPUHP_AP_OFFLINE
] = {
1428 .name
= "ap:offline",
1431 /* First state is scheduler control. Interrupts are disabled */
1432 [CPUHP_AP_SCHED_STARTING
] = {
1433 .name
= "sched:starting",
1434 .startup
.single
= sched_cpu_starting
,
1435 .teardown
.single
= sched_cpu_dying
,
1437 [CPUHP_AP_RCUTREE_DYING
] = {
1438 .name
= "RCU/tree:dying",
1439 .startup
.single
= NULL
,
1440 .teardown
.single
= rcutree_dying_cpu
,
1442 [CPUHP_AP_SMPCFD_DYING
] = {
1443 .name
= "smpcfd:dying",
1444 .startup
.single
= NULL
,
1445 .teardown
.single
= smpcfd_dying_cpu
,
1447 /* Entry state on starting. Interrupts enabled from here on. Transient
1448 * state for synchronsization */
1449 [CPUHP_AP_ONLINE
] = {
1450 .name
= "ap:online",
1452 /* Handle smpboot threads park/unpark */
1453 [CPUHP_AP_SMPBOOT_THREADS
] = {
1454 .name
= "smpboot/threads:online",
1455 .startup
.single
= smpboot_unpark_threads
,
1456 .teardown
.single
= smpboot_park_threads
,
1458 [CPUHP_AP_IRQ_AFFINITY_ONLINE
] = {
1459 .name
= "irq/affinity:online",
1460 .startup
.single
= irq_affinity_online_cpu
,
1461 .teardown
.single
= NULL
,
1463 [CPUHP_AP_PERF_ONLINE
] = {
1464 .name
= "perf:online",
1465 .startup
.single
= perf_event_init_cpu
,
1466 .teardown
.single
= perf_event_exit_cpu
,
1468 [CPUHP_AP_WORKQUEUE_ONLINE
] = {
1469 .name
= "workqueue:online",
1470 .startup
.single
= workqueue_online_cpu
,
1471 .teardown
.single
= workqueue_offline_cpu
,
1473 [CPUHP_AP_RCUTREE_ONLINE
] = {
1474 .name
= "RCU/tree:online",
1475 .startup
.single
= rcutree_online_cpu
,
1476 .teardown
.single
= rcutree_offline_cpu
,
1480 * The dynamically registered state space is here
1484 /* Last state is scheduler control setting the cpu active */
1485 [CPUHP_AP_ACTIVE
] = {
1486 .name
= "sched:active",
1487 .startup
.single
= sched_cpu_activate
,
1488 .teardown
.single
= sched_cpu_deactivate
,
1492 /* CPU is fully up and running. */
1495 .startup
.single
= NULL
,
1496 .teardown
.single
= NULL
,
1500 /* Sanity check for callbacks */
1501 static int cpuhp_cb_check(enum cpuhp_state state
)
1503 if (state
<= CPUHP_OFFLINE
|| state
>= CPUHP_ONLINE
)
1509 * Returns a free for dynamic slot assignment of the Online state. The states
1510 * are protected by the cpuhp_slot_states mutex and an empty slot is identified
1511 * by having no name assigned.
1513 static int cpuhp_reserve_state(enum cpuhp_state state
)
1515 enum cpuhp_state i
, end
;
1516 struct cpuhp_step
*step
;
1519 case CPUHP_AP_ONLINE_DYN
:
1520 step
= cpuhp_ap_states
+ CPUHP_AP_ONLINE_DYN
;
1521 end
= CPUHP_AP_ONLINE_DYN_END
;
1523 case CPUHP_BP_PREPARE_DYN
:
1524 step
= cpuhp_bp_states
+ CPUHP_BP_PREPARE_DYN
;
1525 end
= CPUHP_BP_PREPARE_DYN_END
;
1531 for (i
= state
; i
<= end
; i
++, step
++) {
1535 WARN(1, "No more dynamic states available for CPU hotplug\n");
1539 static int cpuhp_store_callbacks(enum cpuhp_state state
, const char *name
,
1540 int (*startup
)(unsigned int cpu
),
1541 int (*teardown
)(unsigned int cpu
),
1542 bool multi_instance
)
1544 /* (Un)Install the callbacks for further cpu hotplug operations */
1545 struct cpuhp_step
*sp
;
1549 * If name is NULL, then the state gets removed.
1551 * CPUHP_AP_ONLINE_DYN and CPUHP_BP_PREPARE_DYN are handed out on
1552 * the first allocation from these dynamic ranges, so the removal
1553 * would trigger a new allocation and clear the wrong (already
1554 * empty) state, leaving the callbacks of the to be cleared state
1555 * dangling, which causes wreckage on the next hotplug operation.
1557 if (name
&& (state
== CPUHP_AP_ONLINE_DYN
||
1558 state
== CPUHP_BP_PREPARE_DYN
)) {
1559 ret
= cpuhp_reserve_state(state
);
1564 sp
= cpuhp_get_step(state
);
1565 if (name
&& sp
->name
)
1568 sp
->startup
.single
= startup
;
1569 sp
->teardown
.single
= teardown
;
1571 sp
->multi_instance
= multi_instance
;
1572 INIT_HLIST_HEAD(&sp
->list
);
1576 static void *cpuhp_get_teardown_cb(enum cpuhp_state state
)
1578 return cpuhp_get_step(state
)->teardown
.single
;
1582 * Call the startup/teardown function for a step either on the AP or
1583 * on the current CPU.
1585 static int cpuhp_issue_call(int cpu
, enum cpuhp_state state
, bool bringup
,
1586 struct hlist_node
*node
)
1588 struct cpuhp_step
*sp
= cpuhp_get_step(state
);
1592 * If there's nothing to do, we done.
1593 * Relies on the union for multi_instance.
1595 if ((bringup
&& !sp
->startup
.single
) ||
1596 (!bringup
&& !sp
->teardown
.single
))
1599 * The non AP bound callbacks can fail on bringup. On teardown
1600 * e.g. module removal we crash for now.
1603 if (cpuhp_is_ap_state(state
))
1604 ret
= cpuhp_invoke_ap_callback(cpu
, state
, bringup
, node
);
1606 ret
= cpuhp_invoke_callback(cpu
, state
, bringup
, node
, NULL
);
1608 ret
= cpuhp_invoke_callback(cpu
, state
, bringup
, node
, NULL
);
1610 BUG_ON(ret
&& !bringup
);
1615 * Called from __cpuhp_setup_state on a recoverable failure.
1617 * Note: The teardown callbacks for rollback are not allowed to fail!
1619 static void cpuhp_rollback_install(int failedcpu
, enum cpuhp_state state
,
1620 struct hlist_node
*node
)
1624 /* Roll back the already executed steps on the other cpus */
1625 for_each_present_cpu(cpu
) {
1626 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1627 int cpustate
= st
->state
;
1629 if (cpu
>= failedcpu
)
1632 /* Did we invoke the startup call on that cpu ? */
1633 if (cpustate
>= state
)
1634 cpuhp_issue_call(cpu
, state
, false, node
);
1638 int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state
,
1639 struct hlist_node
*node
,
1642 struct cpuhp_step
*sp
;
1646 lockdep_assert_cpus_held();
1648 sp
= cpuhp_get_step(state
);
1649 if (sp
->multi_instance
== false)
1652 mutex_lock(&cpuhp_state_mutex
);
1654 if (!invoke
|| !sp
->startup
.multi
)
1658 * Try to call the startup callback for each present cpu
1659 * depending on the hotplug state of the cpu.
1661 for_each_present_cpu(cpu
) {
1662 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1663 int cpustate
= st
->state
;
1665 if (cpustate
< state
)
1668 ret
= cpuhp_issue_call(cpu
, state
, true, node
);
1670 if (sp
->teardown
.multi
)
1671 cpuhp_rollback_install(cpu
, state
, node
);
1677 hlist_add_head(node
, &sp
->list
);
1679 mutex_unlock(&cpuhp_state_mutex
);
1683 int __cpuhp_state_add_instance(enum cpuhp_state state
, struct hlist_node
*node
,
1689 ret
= __cpuhp_state_add_instance_cpuslocked(state
, node
, invoke
);
1693 EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance
);
1696 * __cpuhp_setup_state_cpuslocked - Setup the callbacks for an hotplug machine state
1697 * @state: The state to setup
1698 * @invoke: If true, the startup function is invoked for cpus where
1699 * cpu state >= @state
1700 * @startup: startup callback function
1701 * @teardown: teardown callback function
1702 * @multi_instance: State is set up for multiple instances which get
1705 * The caller needs to hold cpus read locked while calling this function.
1708 * Positive state number if @state is CPUHP_AP_ONLINE_DYN
1709 * 0 for all other states
1710 * On failure: proper (negative) error code
1712 int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state
,
1713 const char *name
, bool invoke
,
1714 int (*startup
)(unsigned int cpu
),
1715 int (*teardown
)(unsigned int cpu
),
1716 bool multi_instance
)
1721 lockdep_assert_cpus_held();
1723 if (cpuhp_cb_check(state
) || !name
)
1726 mutex_lock(&cpuhp_state_mutex
);
1728 ret
= cpuhp_store_callbacks(state
, name
, startup
, teardown
,
1731 dynstate
= state
== CPUHP_AP_ONLINE_DYN
;
1732 if (ret
> 0 && dynstate
) {
1737 if (ret
|| !invoke
|| !startup
)
1741 * Try to call the startup callback for each present cpu
1742 * depending on the hotplug state of the cpu.
1744 for_each_present_cpu(cpu
) {
1745 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1746 int cpustate
= st
->state
;
1748 if (cpustate
< state
)
1751 ret
= cpuhp_issue_call(cpu
, state
, true, NULL
);
1754 cpuhp_rollback_install(cpu
, state
, NULL
);
1755 cpuhp_store_callbacks(state
, NULL
, NULL
, NULL
, false);
1760 mutex_unlock(&cpuhp_state_mutex
);
1762 * If the requested state is CPUHP_AP_ONLINE_DYN, return the
1763 * dynamically allocated state in case of success.
1765 if (!ret
&& dynstate
)
1769 EXPORT_SYMBOL(__cpuhp_setup_state_cpuslocked
);
1771 int __cpuhp_setup_state(enum cpuhp_state state
,
1772 const char *name
, bool invoke
,
1773 int (*startup
)(unsigned int cpu
),
1774 int (*teardown
)(unsigned int cpu
),
1775 bool multi_instance
)
1780 ret
= __cpuhp_setup_state_cpuslocked(state
, name
, invoke
, startup
,
1781 teardown
, multi_instance
);
1785 EXPORT_SYMBOL(__cpuhp_setup_state
);
1787 int __cpuhp_state_remove_instance(enum cpuhp_state state
,
1788 struct hlist_node
*node
, bool invoke
)
1790 struct cpuhp_step
*sp
= cpuhp_get_step(state
);
1793 BUG_ON(cpuhp_cb_check(state
));
1795 if (!sp
->multi_instance
)
1799 mutex_lock(&cpuhp_state_mutex
);
1801 if (!invoke
|| !cpuhp_get_teardown_cb(state
))
1804 * Call the teardown callback for each present cpu depending
1805 * on the hotplug state of the cpu. This function is not
1806 * allowed to fail currently!
1808 for_each_present_cpu(cpu
) {
1809 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1810 int cpustate
= st
->state
;
1812 if (cpustate
>= state
)
1813 cpuhp_issue_call(cpu
, state
, false, node
);
1818 mutex_unlock(&cpuhp_state_mutex
);
1823 EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance
);
1826 * __cpuhp_remove_state_cpuslocked - Remove the callbacks for an hotplug machine state
1827 * @state: The state to remove
1828 * @invoke: If true, the teardown function is invoked for cpus where
1829 * cpu state >= @state
1831 * The caller needs to hold cpus read locked while calling this function.
1832 * The teardown callback is currently not allowed to fail. Think
1833 * about module removal!
1835 void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state
, bool invoke
)
1837 struct cpuhp_step
*sp
= cpuhp_get_step(state
);
1840 BUG_ON(cpuhp_cb_check(state
));
1842 lockdep_assert_cpus_held();
1844 mutex_lock(&cpuhp_state_mutex
);
1845 if (sp
->multi_instance
) {
1846 WARN(!hlist_empty(&sp
->list
),
1847 "Error: Removing state %d which has instances left.\n",
1852 if (!invoke
|| !cpuhp_get_teardown_cb(state
))
1856 * Call the teardown callback for each present cpu depending
1857 * on the hotplug state of the cpu. This function is not
1858 * allowed to fail currently!
1860 for_each_present_cpu(cpu
) {
1861 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1862 int cpustate
= st
->state
;
1864 if (cpustate
>= state
)
1865 cpuhp_issue_call(cpu
, state
, false, NULL
);
1868 cpuhp_store_callbacks(state
, NULL
, NULL
, NULL
, false);
1869 mutex_unlock(&cpuhp_state_mutex
);
1871 EXPORT_SYMBOL(__cpuhp_remove_state_cpuslocked
);
1873 void __cpuhp_remove_state(enum cpuhp_state state
, bool invoke
)
1876 __cpuhp_remove_state_cpuslocked(state
, invoke
);
1879 EXPORT_SYMBOL(__cpuhp_remove_state
);
1881 #if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
1882 static ssize_t
show_cpuhp_state(struct device
*dev
,
1883 struct device_attribute
*attr
, char *buf
)
1885 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, dev
->id
);
1887 return sprintf(buf
, "%d\n", st
->state
);
1889 static DEVICE_ATTR(state
, 0444, show_cpuhp_state
, NULL
);
1891 static ssize_t
write_cpuhp_target(struct device
*dev
,
1892 struct device_attribute
*attr
,
1893 const char *buf
, size_t count
)
1895 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, dev
->id
);
1896 struct cpuhp_step
*sp
;
1899 ret
= kstrtoint(buf
, 10, &target
);
1903 #ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
1904 if (target
< CPUHP_OFFLINE
|| target
> CPUHP_ONLINE
)
1907 if (target
!= CPUHP_OFFLINE
&& target
!= CPUHP_ONLINE
)
1911 ret
= lock_device_hotplug_sysfs();
1915 mutex_lock(&cpuhp_state_mutex
);
1916 sp
= cpuhp_get_step(target
);
1917 ret
= !sp
->name
|| sp
->cant_stop
? -EINVAL
: 0;
1918 mutex_unlock(&cpuhp_state_mutex
);
1922 if (st
->state
< target
)
1923 ret
= do_cpu_up(dev
->id
, target
);
1925 ret
= do_cpu_down(dev
->id
, target
);
1927 unlock_device_hotplug();
1928 return ret
? ret
: count
;
1931 static ssize_t
show_cpuhp_target(struct device
*dev
,
1932 struct device_attribute
*attr
, char *buf
)
1934 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, dev
->id
);
1936 return sprintf(buf
, "%d\n", st
->target
);
1938 static DEVICE_ATTR(target
, 0644, show_cpuhp_target
, write_cpuhp_target
);
1941 static ssize_t
write_cpuhp_fail(struct device
*dev
,
1942 struct device_attribute
*attr
,
1943 const char *buf
, size_t count
)
1945 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, dev
->id
);
1946 struct cpuhp_step
*sp
;
1949 ret
= kstrtoint(buf
, 10, &fail
);
1953 if (fail
< CPUHP_OFFLINE
|| fail
> CPUHP_ONLINE
)
1957 * Cannot fail STARTING/DYING callbacks.
1959 if (cpuhp_is_atomic_state(fail
))
1963 * Cannot fail anything that doesn't have callbacks.
1965 mutex_lock(&cpuhp_state_mutex
);
1966 sp
= cpuhp_get_step(fail
);
1967 if (!sp
->startup
.single
&& !sp
->teardown
.single
)
1969 mutex_unlock(&cpuhp_state_mutex
);
1978 static ssize_t
show_cpuhp_fail(struct device
*dev
,
1979 struct device_attribute
*attr
, char *buf
)
1981 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, dev
->id
);
1983 return sprintf(buf
, "%d\n", st
->fail
);
1986 static DEVICE_ATTR(fail
, 0644, show_cpuhp_fail
, write_cpuhp_fail
);
1988 static struct attribute
*cpuhp_cpu_attrs
[] = {
1989 &dev_attr_state
.attr
,
1990 &dev_attr_target
.attr
,
1991 &dev_attr_fail
.attr
,
1995 static const struct attribute_group cpuhp_cpu_attr_group
= {
1996 .attrs
= cpuhp_cpu_attrs
,
2001 static ssize_t
show_cpuhp_states(struct device
*dev
,
2002 struct device_attribute
*attr
, char *buf
)
2004 ssize_t cur
, res
= 0;
2007 mutex_lock(&cpuhp_state_mutex
);
2008 for (i
= CPUHP_OFFLINE
; i
<= CPUHP_ONLINE
; i
++) {
2009 struct cpuhp_step
*sp
= cpuhp_get_step(i
);
2012 cur
= sprintf(buf
, "%3d: %s\n", i
, sp
->name
);
2017 mutex_unlock(&cpuhp_state_mutex
);
2020 static DEVICE_ATTR(states
, 0444, show_cpuhp_states
, NULL
);
2022 static struct attribute
*cpuhp_cpu_root_attrs
[] = {
2023 &dev_attr_states
.attr
,
2027 static const struct attribute_group cpuhp_cpu_root_attr_group
= {
2028 .attrs
= cpuhp_cpu_root_attrs
,
2033 #ifdef CONFIG_HOTPLUG_SMT
2035 static const char *smt_states
[] = {
2036 [CPU_SMT_ENABLED
] = "on",
2037 [CPU_SMT_DISABLED
] = "off",
2038 [CPU_SMT_FORCE_DISABLED
] = "forceoff",
2039 [CPU_SMT_NOT_SUPPORTED
] = "notsupported",
2043 show_smt_control(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2045 return snprintf(buf
, PAGE_SIZE
- 2, "%s\n", smt_states
[cpu_smt_control
]);
2048 static void cpuhp_offline_cpu_device(unsigned int cpu
)
2050 struct device
*dev
= get_cpu_device(cpu
);
2052 dev
->offline
= true;
2053 /* Tell user space about the state change */
2054 kobject_uevent(&dev
->kobj
, KOBJ_OFFLINE
);
2057 static void cpuhp_online_cpu_device(unsigned int cpu
)
2059 struct device
*dev
= get_cpu_device(cpu
);
2061 dev
->offline
= false;
2062 /* Tell user space about the state change */
2063 kobject_uevent(&dev
->kobj
, KOBJ_ONLINE
);
2066 int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval
)
2070 cpu_maps_update_begin();
2071 for_each_online_cpu(cpu
) {
2072 if (topology_is_primary_thread(cpu
))
2074 ret
= cpu_down_maps_locked(cpu
, CPUHP_OFFLINE
);
2078 * As this needs to hold the cpu maps lock it's impossible
2079 * to call device_offline() because that ends up calling
2080 * cpu_down() which takes cpu maps lock. cpu maps lock
2081 * needs to be held as this might race against in kernel
2082 * abusers of the hotplug machinery (thermal management).
2084 * So nothing would update device:offline state. That would
2085 * leave the sysfs entry stale and prevent onlining after
2086 * smt control has been changed to 'off' again. This is
2087 * called under the sysfs hotplug lock, so it is properly
2088 * serialized against the regular offline usage.
2090 cpuhp_offline_cpu_device(cpu
);
2093 cpu_smt_control
= ctrlval
;
2094 cpu_maps_update_done();
2098 int cpuhp_smt_enable(void)
2102 cpu_maps_update_begin();
2103 cpu_smt_control
= CPU_SMT_ENABLED
;
2104 for_each_present_cpu(cpu
) {
2105 /* Skip online CPUs and CPUs on offline nodes */
2106 if (cpu_online(cpu
) || !node_online(cpu_to_node(cpu
)))
2108 ret
= _cpu_up(cpu
, 0, CPUHP_ONLINE
);
2111 /* See comment in cpuhp_smt_disable() */
2112 cpuhp_online_cpu_device(cpu
);
2114 cpu_maps_update_done();
2119 store_smt_control(struct device
*dev
, struct device_attribute
*attr
,
2120 const char *buf
, size_t count
)
2124 if (sysfs_streq(buf
, "on"))
2125 ctrlval
= CPU_SMT_ENABLED
;
2126 else if (sysfs_streq(buf
, "off"))
2127 ctrlval
= CPU_SMT_DISABLED
;
2128 else if (sysfs_streq(buf
, "forceoff"))
2129 ctrlval
= CPU_SMT_FORCE_DISABLED
;
2133 if (cpu_smt_control
== CPU_SMT_FORCE_DISABLED
)
2136 if (cpu_smt_control
== CPU_SMT_NOT_SUPPORTED
)
2139 ret
= lock_device_hotplug_sysfs();
2143 if (ctrlval
!= cpu_smt_control
) {
2145 case CPU_SMT_ENABLED
:
2146 ret
= cpuhp_smt_enable();
2148 case CPU_SMT_DISABLED
:
2149 case CPU_SMT_FORCE_DISABLED
:
2150 ret
= cpuhp_smt_disable(ctrlval
);
2155 unlock_device_hotplug();
2156 return ret
? ret
: count
;
2158 static DEVICE_ATTR(control
, 0644, show_smt_control
, store_smt_control
);
2161 show_smt_active(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2163 bool active
= topology_max_smt_threads() > 1;
2165 return snprintf(buf
, PAGE_SIZE
- 2, "%d\n", active
);
2167 static DEVICE_ATTR(active
, 0444, show_smt_active
, NULL
);
2169 static struct attribute
*cpuhp_smt_attrs
[] = {
2170 &dev_attr_control
.attr
,
2171 &dev_attr_active
.attr
,
2175 static const struct attribute_group cpuhp_smt_attr_group
= {
2176 .attrs
= cpuhp_smt_attrs
,
2181 static int __init
cpu_smt_state_init(void)
2183 return sysfs_create_group(&cpu_subsys
.dev_root
->kobj
,
2184 &cpuhp_smt_attr_group
);
2188 static inline int cpu_smt_state_init(void) { return 0; }
2191 static int __init
cpuhp_sysfs_init(void)
2195 ret
= cpu_smt_state_init();
2199 ret
= sysfs_create_group(&cpu_subsys
.dev_root
->kobj
,
2200 &cpuhp_cpu_root_attr_group
);
2204 for_each_possible_cpu(cpu
) {
2205 struct device
*dev
= get_cpu_device(cpu
);
2209 ret
= sysfs_create_group(&dev
->kobj
, &cpuhp_cpu_attr_group
);
2215 device_initcall(cpuhp_sysfs_init
);
2219 * cpu_bit_bitmap[] is a special, "compressed" data structure that
2220 * represents all NR_CPUS bits binary values of 1<<nr.
2222 * It is used by cpumask_of() to get a constant address to a CPU
2223 * mask value that has a single bit set only.
2226 /* cpu_bit_bitmap[0] is empty - so we can back into it */
2227 #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
2228 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
2229 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
2230 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
2232 const unsigned long cpu_bit_bitmap
[BITS_PER_LONG
+1][BITS_TO_LONGS(NR_CPUS
)] = {
2234 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
2235 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
2236 #if BITS_PER_LONG > 32
2237 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
2238 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
2241 EXPORT_SYMBOL_GPL(cpu_bit_bitmap
);
2243 const DECLARE_BITMAP(cpu_all_bits
, NR_CPUS
) = CPU_BITS_ALL
;
2244 EXPORT_SYMBOL(cpu_all_bits
);
2246 #ifdef CONFIG_INIT_ALL_POSSIBLE
2247 struct cpumask __cpu_possible_mask __read_mostly
2250 struct cpumask __cpu_possible_mask __read_mostly
;
2252 EXPORT_SYMBOL(__cpu_possible_mask
);
2254 struct cpumask __cpu_online_mask __read_mostly
;
2255 EXPORT_SYMBOL(__cpu_online_mask
);
2257 struct cpumask __cpu_present_mask __read_mostly
;
2258 EXPORT_SYMBOL(__cpu_present_mask
);
2260 struct cpumask __cpu_active_mask __read_mostly
;
2261 EXPORT_SYMBOL(__cpu_active_mask
);
2263 void init_cpu_present(const struct cpumask
*src
)
2265 cpumask_copy(&__cpu_present_mask
, src
);
2268 void init_cpu_possible(const struct cpumask
*src
)
2270 cpumask_copy(&__cpu_possible_mask
, src
);
2273 void init_cpu_online(const struct cpumask
*src
)
2275 cpumask_copy(&__cpu_online_mask
, src
);
2279 * Activate the first processor.
2281 void __init
boot_cpu_init(void)
2283 int cpu
= smp_processor_id();
2285 /* Mark the boot cpu "present", "online" etc for SMP and UP case */
2286 set_cpu_online(cpu
, true);
2287 set_cpu_active(cpu
, true);
2288 set_cpu_present(cpu
, true);
2289 set_cpu_possible(cpu
, true);
2292 __boot_cpu_id
= cpu
;
2297 * Must be called _AFTER_ setting up the per_cpu areas
2299 void __init
boot_cpu_hotplug_init(void)
2302 this_cpu_write(cpuhp_state
.booted_once
, true);
2304 this_cpu_write(cpuhp_state
.state
, CPUHP_ONLINE
);
2308 * These are used for a global "mitigations=" cmdline option for toggling
2309 * optional CPU mitigations.
2311 enum cpu_mitigations
{
2312 CPU_MITIGATIONS_OFF
,
2313 CPU_MITIGATIONS_AUTO
,
2314 CPU_MITIGATIONS_AUTO_NOSMT
,
2317 static enum cpu_mitigations cpu_mitigations __ro_after_init
=
2318 CPU_MITIGATIONS_AUTO
;
2320 static int __init
mitigations_parse_cmdline(char *arg
)
2322 if (!strcmp(arg
, "off"))
2323 cpu_mitigations
= CPU_MITIGATIONS_OFF
;
2324 else if (!strcmp(arg
, "auto"))
2325 cpu_mitigations
= CPU_MITIGATIONS_AUTO
;
2326 else if (!strcmp(arg
, "auto,nosmt"))
2327 cpu_mitigations
= CPU_MITIGATIONS_AUTO_NOSMT
;
2329 pr_crit("Unsupported mitigations=%s, system may still be vulnerable\n",
2334 early_param("mitigations", mitigations_parse_cmdline
);
2336 /* mitigations=off */
2337 bool cpu_mitigations_off(void)
2339 return cpu_mitigations
== CPU_MITIGATIONS_OFF
;
2341 EXPORT_SYMBOL_GPL(cpu_mitigations_off
);
2343 /* mitigations=auto,nosmt */
2344 bool cpu_mitigations_auto_nosmt(void)
2346 return cpu_mitigations
== CPU_MITIGATIONS_AUTO_NOSMT
;
2348 EXPORT_SYMBOL_GPL(cpu_mitigations_auto_nosmt
);