2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
4 * This code is licenced under the GPL.
6 #include <linux/sched/mm.h>
7 #include <linux/proc_fs.h>
9 #include <linux/init.h>
10 #include <linux/notifier.h>
11 #include <linux/sched/signal.h>
12 #include <linux/sched/hotplug.h>
13 #include <linux/sched/isolation.h>
14 #include <linux/sched/task.h>
15 #include <linux/sched/smt.h>
16 #include <linux/unistd.h>
17 #include <linux/cpu.h>
18 #include <linux/oom.h>
19 #include <linux/rcupdate.h>
20 #include <linux/delay.h>
21 #include <linux/export.h>
22 #include <linux/bug.h>
23 #include <linux/kthread.h>
24 #include <linux/stop_machine.h>
25 #include <linux/mutex.h>
26 #include <linux/gfp.h>
27 #include <linux/suspend.h>
28 #include <linux/lockdep.h>
29 #include <linux/tick.h>
30 #include <linux/irq.h>
31 #include <linux/nmi.h>
32 #include <linux/smpboot.h>
33 #include <linux/relay.h>
34 #include <linux/slab.h>
35 #include <linux/scs.h>
36 #include <linux/percpu-rwsem.h>
37 #include <linux/cpuset.h>
38 #include <linux/random.h>
39 #include <linux/cc_platform.h>
41 #include <trace/events/power.h>
42 #define CREATE_TRACE_POINTS
43 #include <trace/events/cpuhp.h>
48 * struct cpuhp_cpu_state - Per cpu hotplug state storage
49 * @state: The current cpu state
50 * @target: The target state
51 * @fail: Current CPU hotplug callback state
52 * @thread: Pointer to the hotplug thread
53 * @should_run: Thread should execute
54 * @rollback: Perform a rollback
55 * @single: Single callback invocation
56 * @bringup: Single callback bringup or teardown selector
58 * @node: Remote CPU node; for multi-instance, do a
59 * single entry callback for install/remove
60 * @last: For multi-instance rollback, remember how far we got
61 * @cb_state: The state for a single callback (install/uninstall)
62 * @result: Result of the operation
63 * @ap_sync_state: State for AP synchronization
64 * @done_up: Signal completion to the issuer of the task for cpu-up
65 * @done_down: Signal completion to the issuer of the task for cpu-down
67 struct cpuhp_cpu_state
{
68 enum cpuhp_state state
;
69 enum cpuhp_state target
;
70 enum cpuhp_state fail
;
72 struct task_struct
*thread
;
77 struct hlist_node
*node
;
78 struct hlist_node
*last
;
79 enum cpuhp_state cb_state
;
81 atomic_t ap_sync_state
;
82 struct completion done_up
;
83 struct completion done_down
;
87 static DEFINE_PER_CPU(struct cpuhp_cpu_state
, cpuhp_state
) = {
88 .fail
= CPUHP_INVALID
,
92 cpumask_t cpus_booted_once_mask
;
95 #if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
96 static struct lockdep_map cpuhp_state_up_map
=
97 STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map
);
98 static struct lockdep_map cpuhp_state_down_map
=
99 STATIC_LOCKDEP_MAP_INIT("cpuhp_state-down", &cpuhp_state_down_map
);
102 static inline void cpuhp_lock_acquire(bool bringup
)
104 lock_map_acquire(bringup
? &cpuhp_state_up_map
: &cpuhp_state_down_map
);
107 static inline void cpuhp_lock_release(bool bringup
)
109 lock_map_release(bringup
? &cpuhp_state_up_map
: &cpuhp_state_down_map
);
113 static inline void cpuhp_lock_acquire(bool bringup
) { }
114 static inline void cpuhp_lock_release(bool bringup
) { }
119 * struct cpuhp_step - Hotplug state machine step
120 * @name: Name of the step
121 * @startup: Startup function of the step
122 * @teardown: Teardown function of the step
123 * @cant_stop: Bringup/teardown can't be stopped at this step
124 * @multi_instance: State has multiple instances which get added afterwards
129 int (*single
)(unsigned int cpu
);
130 int (*multi
)(unsigned int cpu
,
131 struct hlist_node
*node
);
134 int (*single
)(unsigned int cpu
);
135 int (*multi
)(unsigned int cpu
,
136 struct hlist_node
*node
);
139 struct hlist_head list
;
145 static DEFINE_MUTEX(cpuhp_state_mutex
);
146 static struct cpuhp_step cpuhp_hp_states
[];
148 static struct cpuhp_step
*cpuhp_get_step(enum cpuhp_state state
)
150 return cpuhp_hp_states
+ state
;
153 static bool cpuhp_step_empty(bool bringup
, struct cpuhp_step
*step
)
155 return bringup
? !step
->startup
.single
: !step
->teardown
.single
;
159 * cpuhp_invoke_callback - Invoke the callbacks for a given state
160 * @cpu: The cpu for which the callback should be invoked
161 * @state: The state to do callbacks for
162 * @bringup: True if the bringup callback should be invoked
163 * @node: For multi-instance, do a single entry callback for install/remove
164 * @lastp: For multi-instance rollback, remember how far we got
166 * Called from cpu hotplug and from the state register machinery.
168 * Return: %0 on success or a negative errno code
170 static int cpuhp_invoke_callback(unsigned int cpu
, enum cpuhp_state state
,
171 bool bringup
, struct hlist_node
*node
,
172 struct hlist_node
**lastp
)
174 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
175 struct cpuhp_step
*step
= cpuhp_get_step(state
);
176 int (*cbm
)(unsigned int cpu
, struct hlist_node
*node
);
177 int (*cb
)(unsigned int cpu
);
180 if (st
->fail
== state
) {
181 st
->fail
= CPUHP_INVALID
;
185 if (cpuhp_step_empty(bringup
, step
)) {
190 if (!step
->multi_instance
) {
191 WARN_ON_ONCE(lastp
&& *lastp
);
192 cb
= bringup
? step
->startup
.single
: step
->teardown
.single
;
194 trace_cpuhp_enter(cpu
, st
->target
, state
, cb
);
196 trace_cpuhp_exit(cpu
, st
->state
, state
, ret
);
199 cbm
= bringup
? step
->startup
.multi
: step
->teardown
.multi
;
201 /* Single invocation for instance add/remove */
203 WARN_ON_ONCE(lastp
&& *lastp
);
204 trace_cpuhp_multi_enter(cpu
, st
->target
, state
, cbm
, node
);
205 ret
= cbm(cpu
, node
);
206 trace_cpuhp_exit(cpu
, st
->state
, state
, ret
);
210 /* State transition. Invoke on all instances */
212 hlist_for_each(node
, &step
->list
) {
213 if (lastp
&& node
== *lastp
)
216 trace_cpuhp_multi_enter(cpu
, st
->target
, state
, cbm
, node
);
217 ret
= cbm(cpu
, node
);
218 trace_cpuhp_exit(cpu
, st
->state
, state
, ret
);
232 /* Rollback the instances if one failed */
233 cbm
= !bringup
? step
->startup
.multi
: step
->teardown
.multi
;
237 hlist_for_each(node
, &step
->list
) {
241 trace_cpuhp_multi_enter(cpu
, st
->target
, state
, cbm
, node
);
242 ret
= cbm(cpu
, node
);
243 trace_cpuhp_exit(cpu
, st
->state
, state
, ret
);
245 * Rollback must not fail,
253 static bool cpuhp_is_ap_state(enum cpuhp_state state
)
256 * The extra check for CPUHP_TEARDOWN_CPU is only for documentation
257 * purposes as that state is handled explicitly in cpu_down.
259 return state
> CPUHP_BRINGUP_CPU
&& state
!= CPUHP_TEARDOWN_CPU
;
262 static inline void wait_for_ap_thread(struct cpuhp_cpu_state
*st
, bool bringup
)
264 struct completion
*done
= bringup
? &st
->done_up
: &st
->done_down
;
265 wait_for_completion(done
);
268 static inline void complete_ap_thread(struct cpuhp_cpu_state
*st
, bool bringup
)
270 struct completion
*done
= bringup
? &st
->done_up
: &st
->done_down
;
275 * The former STARTING/DYING states, ran with IRQs disabled and must not fail.
277 static bool cpuhp_is_atomic_state(enum cpuhp_state state
)
279 return CPUHP_AP_IDLE_DEAD
<= state
&& state
< CPUHP_AP_ONLINE
;
282 /* Synchronization state management */
283 enum cpuhp_sync_state
{
286 SYNC_STATE_SHOULD_DIE
,
288 SYNC_STATE_SHOULD_ONLINE
,
292 #ifdef CONFIG_HOTPLUG_CORE_SYNC
294 * cpuhp_ap_update_sync_state - Update synchronization state during bringup/teardown
295 * @state: The synchronization state to set
297 * No synchronization point. Just update of the synchronization state, but implies
298 * a full barrier so that the AP changes are visible before the control CPU proceeds.
300 static inline void cpuhp_ap_update_sync_state(enum cpuhp_sync_state state
)
302 atomic_t
*st
= this_cpu_ptr(&cpuhp_state
.ap_sync_state
);
304 (void)atomic_xchg(st
, state
);
307 void __weak
arch_cpuhp_sync_state_poll(void) { cpu_relax(); }
309 static bool cpuhp_wait_for_sync_state(unsigned int cpu
, enum cpuhp_sync_state state
,
310 enum cpuhp_sync_state next_state
)
312 atomic_t
*st
= per_cpu_ptr(&cpuhp_state
.ap_sync_state
, cpu
);
313 ktime_t now
, end
, start
= ktime_get();
316 end
= start
+ 10ULL * NSEC_PER_SEC
;
318 sync
= atomic_read(st
);
321 if (!atomic_try_cmpxchg(st
, &sync
, next_state
))
328 /* Timeout. Leave the state unchanged */
330 } else if (now
- start
< NSEC_PER_MSEC
) {
331 /* Poll for one millisecond */
332 arch_cpuhp_sync_state_poll();
334 usleep_range_state(USEC_PER_MSEC
, 2 * USEC_PER_MSEC
, TASK_UNINTERRUPTIBLE
);
336 sync
= atomic_read(st
);
340 #else /* CONFIG_HOTPLUG_CORE_SYNC */
341 static inline void cpuhp_ap_update_sync_state(enum cpuhp_sync_state state
) { }
342 #endif /* !CONFIG_HOTPLUG_CORE_SYNC */
344 #ifdef CONFIG_HOTPLUG_CORE_SYNC_DEAD
346 * cpuhp_ap_report_dead - Update synchronization state to DEAD
348 * No synchronization point. Just update of the synchronization state.
350 void cpuhp_ap_report_dead(void)
352 cpuhp_ap_update_sync_state(SYNC_STATE_DEAD
);
355 void __weak
arch_cpuhp_cleanup_dead_cpu(unsigned int cpu
) { }
358 * Late CPU shutdown synchronization point. Cannot use cpuhp_state::done_down
359 * because the AP cannot issue complete() at this stage.
361 static void cpuhp_bp_sync_dead(unsigned int cpu
)
363 atomic_t
*st
= per_cpu_ptr(&cpuhp_state
.ap_sync_state
, cpu
);
364 int sync
= atomic_read(st
);
367 /* CPU can have reported dead already. Don't overwrite that! */
368 if (sync
== SYNC_STATE_DEAD
)
370 } while (!atomic_try_cmpxchg(st
, &sync
, SYNC_STATE_SHOULD_DIE
));
372 if (cpuhp_wait_for_sync_state(cpu
, SYNC_STATE_DEAD
, SYNC_STATE_DEAD
)) {
373 /* CPU reached dead state. Invoke the cleanup function */
374 arch_cpuhp_cleanup_dead_cpu(cpu
);
378 /* No further action possible. Emit message and give up. */
379 pr_err("CPU%u failed to report dead state\n", cpu
);
381 #else /* CONFIG_HOTPLUG_CORE_SYNC_DEAD */
382 static inline void cpuhp_bp_sync_dead(unsigned int cpu
) { }
383 #endif /* !CONFIG_HOTPLUG_CORE_SYNC_DEAD */
385 #ifdef CONFIG_HOTPLUG_CORE_SYNC_FULL
387 * cpuhp_ap_sync_alive - Synchronize AP with the control CPU once it is alive
389 * Updates the AP synchronization state to SYNC_STATE_ALIVE and waits
390 * for the BP to release it.
392 void cpuhp_ap_sync_alive(void)
394 atomic_t
*st
= this_cpu_ptr(&cpuhp_state
.ap_sync_state
);
396 cpuhp_ap_update_sync_state(SYNC_STATE_ALIVE
);
398 /* Wait for the control CPU to release it. */
399 while (atomic_read(st
) != SYNC_STATE_SHOULD_ONLINE
)
403 static bool cpuhp_can_boot_ap(unsigned int cpu
)
405 atomic_t
*st
= per_cpu_ptr(&cpuhp_state
.ap_sync_state
, cpu
);
406 int sync
= atomic_read(st
);
410 case SYNC_STATE_DEAD
:
411 /* CPU is properly dead */
413 case SYNC_STATE_KICKED
:
414 /* CPU did not come up in previous attempt */
416 case SYNC_STATE_ALIVE
:
417 /* CPU is stuck cpuhp_ap_sync_alive(). */
420 /* CPU failed to report online or dead and is in limbo state. */
424 /* Prepare for booting */
425 if (!atomic_try_cmpxchg(st
, &sync
, SYNC_STATE_KICKED
))
431 void __weak
arch_cpuhp_cleanup_kick_cpu(unsigned int cpu
) { }
434 * Early CPU bringup synchronization point. Cannot use cpuhp_state::done_up
435 * because the AP cannot issue complete() so early in the bringup.
437 static int cpuhp_bp_sync_alive(unsigned int cpu
)
441 if (!IS_ENABLED(CONFIG_HOTPLUG_CORE_SYNC_FULL
))
444 if (!cpuhp_wait_for_sync_state(cpu
, SYNC_STATE_ALIVE
, SYNC_STATE_SHOULD_ONLINE
)) {
445 pr_err("CPU%u failed to report alive state\n", cpu
);
449 /* Let the architecture cleanup the kick alive mechanics. */
450 arch_cpuhp_cleanup_kick_cpu(cpu
);
453 #else /* CONFIG_HOTPLUG_CORE_SYNC_FULL */
454 static inline int cpuhp_bp_sync_alive(unsigned int cpu
) { return 0; }
455 static inline bool cpuhp_can_boot_ap(unsigned int cpu
) { return true; }
456 #endif /* !CONFIG_HOTPLUG_CORE_SYNC_FULL */
458 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
459 static DEFINE_MUTEX(cpu_add_remove_lock
);
460 bool cpuhp_tasks_frozen
;
461 EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen
);
464 * The following two APIs (cpu_maps_update_begin/done) must be used when
465 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
467 void cpu_maps_update_begin(void)
469 mutex_lock(&cpu_add_remove_lock
);
472 void cpu_maps_update_done(void)
474 mutex_unlock(&cpu_add_remove_lock
);
478 * If set, cpu_up and cpu_down will return -EBUSY and do nothing.
479 * Should always be manipulated under cpu_add_remove_lock
481 static int cpu_hotplug_disabled
;
483 #ifdef CONFIG_HOTPLUG_CPU
485 DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock
);
487 void cpus_read_lock(void)
489 percpu_down_read(&cpu_hotplug_lock
);
491 EXPORT_SYMBOL_GPL(cpus_read_lock
);
493 int cpus_read_trylock(void)
495 return percpu_down_read_trylock(&cpu_hotplug_lock
);
497 EXPORT_SYMBOL_GPL(cpus_read_trylock
);
499 void cpus_read_unlock(void)
501 percpu_up_read(&cpu_hotplug_lock
);
503 EXPORT_SYMBOL_GPL(cpus_read_unlock
);
505 void cpus_write_lock(void)
507 percpu_down_write(&cpu_hotplug_lock
);
510 void cpus_write_unlock(void)
512 percpu_up_write(&cpu_hotplug_lock
);
515 void lockdep_assert_cpus_held(void)
518 * We can't have hotplug operations before userspace starts running,
519 * and some init codepaths will knowingly not take the hotplug lock.
520 * This is all valid, so mute lockdep until it makes sense to report
523 if (system_state
< SYSTEM_RUNNING
)
526 percpu_rwsem_assert_held(&cpu_hotplug_lock
);
529 #ifdef CONFIG_LOCKDEP
530 int lockdep_is_cpus_held(void)
532 return percpu_rwsem_is_held(&cpu_hotplug_lock
);
536 static void lockdep_acquire_cpus_lock(void)
538 rwsem_acquire(&cpu_hotplug_lock
.dep_map
, 0, 0, _THIS_IP_
);
541 static void lockdep_release_cpus_lock(void)
543 rwsem_release(&cpu_hotplug_lock
.dep_map
, _THIS_IP_
);
547 * Wait for currently running CPU hotplug operations to complete (if any) and
548 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
549 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
550 * hotplug path before performing hotplug operations. So acquiring that lock
551 * guarantees mutual exclusion from any currently running hotplug operations.
553 void cpu_hotplug_disable(void)
555 cpu_maps_update_begin();
556 cpu_hotplug_disabled
++;
557 cpu_maps_update_done();
559 EXPORT_SYMBOL_GPL(cpu_hotplug_disable
);
561 static void __cpu_hotplug_enable(void)
563 if (WARN_ONCE(!cpu_hotplug_disabled
, "Unbalanced cpu hotplug enable\n"))
565 cpu_hotplug_disabled
--;
568 void cpu_hotplug_enable(void)
570 cpu_maps_update_begin();
571 __cpu_hotplug_enable();
572 cpu_maps_update_done();
574 EXPORT_SYMBOL_GPL(cpu_hotplug_enable
);
578 static void lockdep_acquire_cpus_lock(void)
582 static void lockdep_release_cpus_lock(void)
586 #endif /* CONFIG_HOTPLUG_CPU */
589 * Architectures that need SMT-specific errata handling during SMT hotplug
590 * should override this.
592 void __weak
arch_smt_update(void) { }
594 #ifdef CONFIG_HOTPLUG_SMT
596 enum cpuhp_smt_control cpu_smt_control __read_mostly
= CPU_SMT_ENABLED
;
597 static unsigned int cpu_smt_max_threads __ro_after_init
;
598 unsigned int cpu_smt_num_threads __read_mostly
= UINT_MAX
;
600 void __init
cpu_smt_disable(bool force
)
602 if (!cpu_smt_possible())
606 pr_info("SMT: Force disabled\n");
607 cpu_smt_control
= CPU_SMT_FORCE_DISABLED
;
609 pr_info("SMT: disabled\n");
610 cpu_smt_control
= CPU_SMT_DISABLED
;
612 cpu_smt_num_threads
= 1;
616 * The decision whether SMT is supported can only be done after the full
617 * CPU identification. Called from architecture code.
619 void __init
cpu_smt_set_num_threads(unsigned int num_threads
,
620 unsigned int max_threads
)
622 WARN_ON(!num_threads
|| (num_threads
> max_threads
));
624 if (max_threads
== 1)
625 cpu_smt_control
= CPU_SMT_NOT_SUPPORTED
;
627 cpu_smt_max_threads
= max_threads
;
630 * If SMT has been disabled via the kernel command line or SMT is
631 * not supported, set cpu_smt_num_threads to 1 for consistency.
632 * If enabled, take the architecture requested number of threads
633 * to bring up into account.
635 if (cpu_smt_control
!= CPU_SMT_ENABLED
)
636 cpu_smt_num_threads
= 1;
637 else if (num_threads
< cpu_smt_num_threads
)
638 cpu_smt_num_threads
= num_threads
;
641 static int __init
smt_cmdline_disable(char *str
)
643 cpu_smt_disable(str
&& !strcmp(str
, "force"));
646 early_param("nosmt", smt_cmdline_disable
);
649 * For Archicture supporting partial SMT states check if the thread is allowed.
650 * Otherwise this has already been checked through cpu_smt_max_threads when
651 * setting the SMT level.
653 static inline bool cpu_smt_thread_allowed(unsigned int cpu
)
655 #ifdef CONFIG_SMT_NUM_THREADS_DYNAMIC
656 return topology_smt_thread_allowed(cpu
);
662 static inline bool cpu_bootable(unsigned int cpu
)
664 if (cpu_smt_control
== CPU_SMT_ENABLED
&& cpu_smt_thread_allowed(cpu
))
667 /* All CPUs are bootable if controls are not configured */
668 if (cpu_smt_control
== CPU_SMT_NOT_IMPLEMENTED
)
671 /* All CPUs are bootable if CPU is not SMT capable */
672 if (cpu_smt_control
== CPU_SMT_NOT_SUPPORTED
)
675 if (topology_is_primary_thread(cpu
))
679 * On x86 it's required to boot all logical CPUs at least once so
680 * that the init code can get a chance to set CR4.MCE on each
681 * CPU. Otherwise, a broadcasted MCE observing CR4.MCE=0b on any
682 * core will shutdown the machine.
684 return !cpumask_test_cpu(cpu
, &cpus_booted_once_mask
);
687 /* Returns true if SMT is supported and not forcefully (irreversibly) disabled */
688 bool cpu_smt_possible(void)
690 return cpu_smt_control
!= CPU_SMT_FORCE_DISABLED
&&
691 cpu_smt_control
!= CPU_SMT_NOT_SUPPORTED
;
693 EXPORT_SYMBOL_GPL(cpu_smt_possible
);
696 static inline bool cpu_bootable(unsigned int cpu
) { return true; }
699 static inline enum cpuhp_state
700 cpuhp_set_state(int cpu
, struct cpuhp_cpu_state
*st
, enum cpuhp_state target
)
702 enum cpuhp_state prev_state
= st
->state
;
703 bool bringup
= st
->state
< target
;
705 st
->rollback
= false;
710 st
->bringup
= bringup
;
711 if (cpu_dying(cpu
) != !bringup
)
712 set_cpu_dying(cpu
, !bringup
);
718 cpuhp_reset_state(int cpu
, struct cpuhp_cpu_state
*st
,
719 enum cpuhp_state prev_state
)
721 bool bringup
= !st
->bringup
;
723 st
->target
= prev_state
;
726 * Already rolling back. No need invert the bringup value or to change
735 * If we have st->last we need to undo partial multi_instance of this
736 * state first. Otherwise start undo at the previous state.
745 st
->bringup
= bringup
;
746 if (cpu_dying(cpu
) != !bringup
)
747 set_cpu_dying(cpu
, !bringup
);
750 /* Regular hotplug invocation of the AP hotplug thread */
751 static void __cpuhp_kick_ap(struct cpuhp_cpu_state
*st
)
753 if (!st
->single
&& st
->state
== st
->target
)
758 * Make sure the above stores are visible before should_run becomes
759 * true. Paired with the mb() above in cpuhp_thread_fun()
762 st
->should_run
= true;
763 wake_up_process(st
->thread
);
764 wait_for_ap_thread(st
, st
->bringup
);
767 static int cpuhp_kick_ap(int cpu
, struct cpuhp_cpu_state
*st
,
768 enum cpuhp_state target
)
770 enum cpuhp_state prev_state
;
773 prev_state
= cpuhp_set_state(cpu
, st
, target
);
775 if ((ret
= st
->result
)) {
776 cpuhp_reset_state(cpu
, st
, prev_state
);
783 static int bringup_wait_for_ap_online(unsigned int cpu
)
785 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
787 /* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */
788 wait_for_ap_thread(st
, true);
789 if (WARN_ON_ONCE((!cpu_online(cpu
))))
792 /* Unpark the hotplug thread of the target cpu */
793 kthread_unpark(st
->thread
);
796 * SMT soft disabling on X86 requires to bring the CPU out of the
797 * BIOS 'wait for SIPI' state in order to set the CR4.MCE bit. The
798 * CPU marked itself as booted_once in notify_cpu_starting() so the
799 * cpu_bootable() check will now return false if this is not the
802 if (!cpu_bootable(cpu
))
807 #ifdef CONFIG_HOTPLUG_SPLIT_STARTUP
808 static int cpuhp_kick_ap_alive(unsigned int cpu
)
810 if (!cpuhp_can_boot_ap(cpu
))
813 return arch_cpuhp_kick_ap_alive(cpu
, idle_thread_get(cpu
));
816 static int cpuhp_bringup_ap(unsigned int cpu
)
818 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
822 * Some architectures have to walk the irq descriptors to
823 * setup the vector space for the cpu which comes online.
824 * Prevent irq alloc/free across the bringup.
828 ret
= cpuhp_bp_sync_alive(cpu
);
832 ret
= bringup_wait_for_ap_online(cpu
);
838 if (st
->target
<= CPUHP_AP_ONLINE_IDLE
)
841 return cpuhp_kick_ap(cpu
, st
, st
->target
);
848 static int bringup_cpu(unsigned int cpu
)
850 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
851 struct task_struct
*idle
= idle_thread_get(cpu
);
854 if (!cpuhp_can_boot_ap(cpu
))
858 * Some architectures have to walk the irq descriptors to
859 * setup the vector space for the cpu which comes online.
861 * Prevent irq alloc/free across the bringup by acquiring the
862 * sparse irq lock. Hold it until the upcoming CPU completes the
863 * startup in cpuhp_online_idle() which allows to avoid
864 * intermediate synchronization points in the architecture code.
868 ret
= __cpu_up(cpu
, idle
);
872 ret
= cpuhp_bp_sync_alive(cpu
);
876 ret
= bringup_wait_for_ap_online(cpu
);
882 if (st
->target
<= CPUHP_AP_ONLINE_IDLE
)
885 return cpuhp_kick_ap(cpu
, st
, st
->target
);
893 static int finish_cpu(unsigned int cpu
)
895 struct task_struct
*idle
= idle_thread_get(cpu
);
896 struct mm_struct
*mm
= idle
->active_mm
;
899 * idle_task_exit() will have switched to &init_mm, now
900 * clean up any remaining active_mm state.
903 idle
->active_mm
= &init_mm
;
909 * Hotplug state machine related functions
913 * Get the next state to run. Empty ones will be skipped. Returns true if a
916 * st->state will be modified ahead of time, to match state_to_run, as if it
919 static bool cpuhp_next_state(bool bringup
,
920 enum cpuhp_state
*state_to_run
,
921 struct cpuhp_cpu_state
*st
,
922 enum cpuhp_state target
)
926 if (st
->state
>= target
)
929 *state_to_run
= ++st
->state
;
931 if (st
->state
<= target
)
934 *state_to_run
= st
->state
--;
937 if (!cpuhp_step_empty(bringup
, cpuhp_get_step(*state_to_run
)))
944 static int __cpuhp_invoke_callback_range(bool bringup
,
946 struct cpuhp_cpu_state
*st
,
947 enum cpuhp_state target
,
950 enum cpuhp_state state
;
953 while (cpuhp_next_state(bringup
, &state
, st
, target
)) {
956 err
= cpuhp_invoke_callback(cpu
, state
, bringup
, NULL
, NULL
);
961 pr_warn("CPU %u %s state %s (%d) failed (%d)\n",
962 cpu
, bringup
? "UP" : "DOWN",
963 cpuhp_get_step(st
->state
)->name
,
975 static inline int cpuhp_invoke_callback_range(bool bringup
,
977 struct cpuhp_cpu_state
*st
,
978 enum cpuhp_state target
)
980 return __cpuhp_invoke_callback_range(bringup
, cpu
, st
, target
, false);
983 static inline void cpuhp_invoke_callback_range_nofail(bool bringup
,
985 struct cpuhp_cpu_state
*st
,
986 enum cpuhp_state target
)
988 __cpuhp_invoke_callback_range(bringup
, cpu
, st
, target
, true);
991 static inline bool can_rollback_cpu(struct cpuhp_cpu_state
*st
)
993 if (IS_ENABLED(CONFIG_HOTPLUG_CPU
))
996 * When CPU hotplug is disabled, then taking the CPU down is not
997 * possible because takedown_cpu() and the architecture and
998 * subsystem specific mechanisms are not available. So the CPU
999 * which would be completely unplugged again needs to stay around
1000 * in the current state.
1002 return st
->state
<= CPUHP_BRINGUP_CPU
;
1005 static int cpuhp_up_callbacks(unsigned int cpu
, struct cpuhp_cpu_state
*st
,
1006 enum cpuhp_state target
)
1008 enum cpuhp_state prev_state
= st
->state
;
1011 ret
= cpuhp_invoke_callback_range(true, cpu
, st
, target
);
1013 pr_debug("CPU UP failed (%d) CPU %u state %s (%d)\n",
1014 ret
, cpu
, cpuhp_get_step(st
->state
)->name
,
1017 cpuhp_reset_state(cpu
, st
, prev_state
);
1018 if (can_rollback_cpu(st
))
1019 WARN_ON(cpuhp_invoke_callback_range(false, cpu
, st
,
1026 * The cpu hotplug threads manage the bringup and teardown of the cpus
1028 static int cpuhp_should_run(unsigned int cpu
)
1030 struct cpuhp_cpu_state
*st
= this_cpu_ptr(&cpuhp_state
);
1032 return st
->should_run
;
1036 * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
1037 * callbacks when a state gets [un]installed at runtime.
1039 * Each invocation of this function by the smpboot thread does a single AP
1042 * It has 3 modes of operation:
1043 * - single: runs st->cb_state
1044 * - up: runs ++st->state, while st->state < st->target
1045 * - down: runs st->state--, while st->state > st->target
1047 * When complete or on error, should_run is cleared and the completion is fired.
1049 static void cpuhp_thread_fun(unsigned int cpu
)
1051 struct cpuhp_cpu_state
*st
= this_cpu_ptr(&cpuhp_state
);
1052 bool bringup
= st
->bringup
;
1053 enum cpuhp_state state
;
1055 if (WARN_ON_ONCE(!st
->should_run
))
1059 * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures
1060 * that if we see ->should_run we also see the rest of the state.
1065 * The BP holds the hotplug lock, but we're now running on the AP,
1066 * ensure that anybody asserting the lock is held, will actually find
1069 lockdep_acquire_cpus_lock();
1070 cpuhp_lock_acquire(bringup
);
1073 state
= st
->cb_state
;
1074 st
->should_run
= false;
1076 st
->should_run
= cpuhp_next_state(bringup
, &state
, st
, st
->target
);
1077 if (!st
->should_run
)
1081 WARN_ON_ONCE(!cpuhp_is_ap_state(state
));
1083 if (cpuhp_is_atomic_state(state
)) {
1084 local_irq_disable();
1085 st
->result
= cpuhp_invoke_callback(cpu
, state
, bringup
, st
->node
, &st
->last
);
1089 * STARTING/DYING must not fail!
1091 WARN_ON_ONCE(st
->result
);
1093 st
->result
= cpuhp_invoke_callback(cpu
, state
, bringup
, st
->node
, &st
->last
);
1098 * If we fail on a rollback, we're up a creek without no
1099 * paddle, no way forward, no way back. We loose, thanks for
1102 WARN_ON_ONCE(st
->rollback
);
1103 st
->should_run
= false;
1107 cpuhp_lock_release(bringup
);
1108 lockdep_release_cpus_lock();
1110 if (!st
->should_run
)
1111 complete_ap_thread(st
, bringup
);
1114 /* Invoke a single callback on a remote cpu */
1116 cpuhp_invoke_ap_callback(int cpu
, enum cpuhp_state state
, bool bringup
,
1117 struct hlist_node
*node
)
1119 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1122 if (!cpu_online(cpu
))
1125 cpuhp_lock_acquire(false);
1126 cpuhp_lock_release(false);
1128 cpuhp_lock_acquire(true);
1129 cpuhp_lock_release(true);
1132 * If we are up and running, use the hotplug thread. For early calls
1133 * we invoke the thread function directly.
1136 return cpuhp_invoke_callback(cpu
, state
, bringup
, node
, NULL
);
1138 st
->rollback
= false;
1142 st
->bringup
= bringup
;
1143 st
->cb_state
= state
;
1146 __cpuhp_kick_ap(st
);
1149 * If we failed and did a partial, do a rollback.
1151 if ((ret
= st
->result
) && st
->last
) {
1152 st
->rollback
= true;
1153 st
->bringup
= !bringup
;
1155 __cpuhp_kick_ap(st
);
1159 * Clean up the leftovers so the next hotplug operation wont use stale
1162 st
->node
= st
->last
= NULL
;
1166 static int cpuhp_kick_ap_work(unsigned int cpu
)
1168 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1169 enum cpuhp_state prev_state
= st
->state
;
1172 cpuhp_lock_acquire(false);
1173 cpuhp_lock_release(false);
1175 cpuhp_lock_acquire(true);
1176 cpuhp_lock_release(true);
1178 trace_cpuhp_enter(cpu
, st
->target
, prev_state
, cpuhp_kick_ap_work
);
1179 ret
= cpuhp_kick_ap(cpu
, st
, st
->target
);
1180 trace_cpuhp_exit(cpu
, st
->state
, prev_state
, ret
);
1185 static struct smp_hotplug_thread cpuhp_threads
= {
1186 .store
= &cpuhp_state
.thread
,
1187 .thread_should_run
= cpuhp_should_run
,
1188 .thread_fn
= cpuhp_thread_fun
,
1189 .thread_comm
= "cpuhp/%u",
1190 .selfparking
= true,
1193 static __init
void cpuhp_init_state(void)
1195 struct cpuhp_cpu_state
*st
;
1198 for_each_possible_cpu(cpu
) {
1199 st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1200 init_completion(&st
->done_up
);
1201 init_completion(&st
->done_down
);
1205 void __init
cpuhp_threads_init(void)
1208 BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads
));
1209 kthread_unpark(this_cpu_read(cpuhp_state
.thread
));
1214 * Serialize hotplug trainwrecks outside of the cpu_hotplug_lock
1217 * The operation is still serialized against concurrent CPU hotplug via
1218 * cpu_add_remove_lock, i.e. CPU map protection. But it is _not_
1219 * serialized against other hotplug related activity like adding or
1220 * removing of state callbacks and state instances, which invoke either the
1221 * startup or the teardown callback of the affected state.
1223 * This is required for subsystems which are unfixable vs. CPU hotplug and
1224 * evade lock inversion problems by scheduling work which has to be
1225 * completed _before_ cpu_up()/_cpu_down() returns.
1227 * Don't even think about adding anything to this for any new code or even
1228 * drivers. It's only purpose is to keep existing lock order trainwrecks
1231 * For cpu_down() there might be valid reasons to finish cleanups which are
1232 * not required to be done under cpu_hotplug_lock, but that's a different
1233 * story and would be not invoked via this.
1235 static void cpu_up_down_serialize_trainwrecks(bool tasks_frozen
)
1238 * cpusets delegate hotplug operations to a worker to "solve" the
1239 * lock order problems. Wait for the worker, but only if tasks are
1240 * _not_ frozen (suspend, hibernate) as that would wait forever.
1242 * The wait is required because otherwise the hotplug operation
1243 * returns with inconsistent state, which could even be observed in
1244 * user space when a new CPU is brought up. The CPU plug uevent
1245 * would be delivered and user space reacting on it would fail to
1246 * move tasks to the newly plugged CPU up to the point where the
1247 * work has finished because up to that point the newly plugged CPU
1248 * is not assignable in cpusets/cgroups. On unplug that's not
1249 * necessarily a visible issue, but it is still inconsistent state,
1250 * which is the real problem which needs to be "fixed". This can't
1251 * prevent the transient state between scheduling the work and
1252 * returning from waiting for it.
1255 cpuset_wait_for_hotplug();
1258 #ifdef CONFIG_HOTPLUG_CPU
1259 #ifndef arch_clear_mm_cpumask_cpu
1260 #define arch_clear_mm_cpumask_cpu(cpu, mm) cpumask_clear_cpu(cpu, mm_cpumask(mm))
1264 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
1267 * This function walks all processes, finds a valid mm struct for each one and
1268 * then clears a corresponding bit in mm's cpumask. While this all sounds
1269 * trivial, there are various non-obvious corner cases, which this function
1270 * tries to solve in a safe manner.
1272 * Also note that the function uses a somewhat relaxed locking scheme, so it may
1273 * be called only for an already offlined CPU.
1275 void clear_tasks_mm_cpumask(int cpu
)
1277 struct task_struct
*p
;
1280 * This function is called after the cpu is taken down and marked
1281 * offline, so its not like new tasks will ever get this cpu set in
1282 * their mm mask. -- Peter Zijlstra
1283 * Thus, we may use rcu_read_lock() here, instead of grabbing
1284 * full-fledged tasklist_lock.
1286 WARN_ON(cpu_online(cpu
));
1288 for_each_process(p
) {
1289 struct task_struct
*t
;
1292 * Main thread might exit, but other threads may still have
1293 * a valid mm. Find one.
1295 t
= find_lock_task_mm(p
);
1298 arch_clear_mm_cpumask_cpu(cpu
, t
->mm
);
1304 /* Take this CPU down. */
1305 static int take_cpu_down(void *_param
)
1307 struct cpuhp_cpu_state
*st
= this_cpu_ptr(&cpuhp_state
);
1308 enum cpuhp_state target
= max((int)st
->target
, CPUHP_AP_OFFLINE
);
1309 int err
, cpu
= smp_processor_id();
1311 /* Ensure this CPU doesn't handle any more interrupts. */
1312 err
= __cpu_disable();
1317 * Must be called from CPUHP_TEARDOWN_CPU, which means, as we are going
1318 * down, that the current state is CPUHP_TEARDOWN_CPU - 1.
1320 WARN_ON(st
->state
!= (CPUHP_TEARDOWN_CPU
- 1));
1323 * Invoke the former CPU_DYING callbacks. DYING must not fail!
1325 cpuhp_invoke_callback_range_nofail(false, cpu
, st
, target
);
1327 /* Give up timekeeping duties */
1328 tick_handover_do_timer();
1329 /* Remove CPU from timer broadcasting */
1330 tick_offline_cpu(cpu
);
1331 /* Park the stopper thread */
1332 stop_machine_park(cpu
);
1336 static int takedown_cpu(unsigned int cpu
)
1338 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1341 /* Park the smpboot threads */
1342 kthread_park(st
->thread
);
1345 * Prevent irq alloc/free while the dying cpu reorganizes the
1346 * interrupt affinities.
1351 * So now all preempt/rcu users must observe !cpu_active().
1353 err
= stop_machine_cpuslocked(take_cpu_down
, NULL
, cpumask_of(cpu
));
1355 /* CPU refused to die */
1356 irq_unlock_sparse();
1357 /* Unpark the hotplug thread so we can rollback there */
1358 kthread_unpark(st
->thread
);
1361 BUG_ON(cpu_online(cpu
));
1364 * The teardown callback for CPUHP_AP_SCHED_STARTING will have removed
1365 * all runnable tasks from the CPU, there's only the idle task left now
1366 * that the migration thread is done doing the stop_machine thing.
1368 * Wait for the stop thread to go away.
1370 wait_for_ap_thread(st
, false);
1371 BUG_ON(st
->state
!= CPUHP_AP_IDLE_DEAD
);
1373 /* Interrupts are moved away from the dying cpu, reenable alloc/free */
1374 irq_unlock_sparse();
1376 hotplug_cpu__broadcast_tick_pull(cpu
);
1377 /* This actually kills the CPU. */
1380 cpuhp_bp_sync_dead(cpu
);
1382 tick_cleanup_dead_cpu(cpu
);
1385 * Callbacks must be re-integrated right away to the RCU state machine.
1386 * Otherwise an RCU callback could block a further teardown function
1387 * waiting for its completion.
1389 rcutree_migrate_callbacks(cpu
);
1394 static void cpuhp_complete_idle_dead(void *arg
)
1396 struct cpuhp_cpu_state
*st
= arg
;
1398 complete_ap_thread(st
, false);
1401 void cpuhp_report_idle_dead(void)
1403 struct cpuhp_cpu_state
*st
= this_cpu_ptr(&cpuhp_state
);
1405 BUG_ON(st
->state
!= CPUHP_AP_OFFLINE
);
1406 rcutree_report_cpu_dead();
1407 st
->state
= CPUHP_AP_IDLE_DEAD
;
1409 * We cannot call complete after rcutree_report_cpu_dead() so we delegate it
1412 smp_call_function_single(cpumask_first(cpu_online_mask
),
1413 cpuhp_complete_idle_dead
, st
, 0);
1416 static int cpuhp_down_callbacks(unsigned int cpu
, struct cpuhp_cpu_state
*st
,
1417 enum cpuhp_state target
)
1419 enum cpuhp_state prev_state
= st
->state
;
1422 ret
= cpuhp_invoke_callback_range(false, cpu
, st
, target
);
1424 pr_debug("CPU DOWN failed (%d) CPU %u state %s (%d)\n",
1425 ret
, cpu
, cpuhp_get_step(st
->state
)->name
,
1428 cpuhp_reset_state(cpu
, st
, prev_state
);
1430 if (st
->state
< prev_state
)
1431 WARN_ON(cpuhp_invoke_callback_range(true, cpu
, st
,
1438 /* Requires cpu_add_remove_lock to be held */
1439 static int __ref
_cpu_down(unsigned int cpu
, int tasks_frozen
,
1440 enum cpuhp_state target
)
1442 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1443 int prev_state
, ret
= 0;
1445 if (num_online_cpus() == 1)
1448 if (!cpu_present(cpu
))
1453 cpuhp_tasks_frozen
= tasks_frozen
;
1455 prev_state
= cpuhp_set_state(cpu
, st
, target
);
1457 * If the current CPU state is in the range of the AP hotplug thread,
1458 * then we need to kick the thread.
1460 if (st
->state
> CPUHP_TEARDOWN_CPU
) {
1461 st
->target
= max((int)target
, CPUHP_TEARDOWN_CPU
);
1462 ret
= cpuhp_kick_ap_work(cpu
);
1464 * The AP side has done the error rollback already. Just
1465 * return the error code..
1471 * We might have stopped still in the range of the AP hotplug
1472 * thread. Nothing to do anymore.
1474 if (st
->state
> CPUHP_TEARDOWN_CPU
)
1477 st
->target
= target
;
1480 * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
1481 * to do the further cleanups.
1483 ret
= cpuhp_down_callbacks(cpu
, st
, target
);
1484 if (ret
&& st
->state
< prev_state
) {
1485 if (st
->state
== CPUHP_TEARDOWN_CPU
) {
1486 cpuhp_reset_state(cpu
, st
, prev_state
);
1487 __cpuhp_kick_ap(st
);
1489 WARN(1, "DEAD callback error for CPU%d", cpu
);
1494 cpus_write_unlock();
1496 * Do post unplug cleanup. This is still protected against
1497 * concurrent CPU hotplug via cpu_add_remove_lock.
1499 lockup_detector_cleanup();
1501 cpu_up_down_serialize_trainwrecks(tasks_frozen
);
1505 struct cpu_down_work
{
1507 enum cpuhp_state target
;
1510 static long __cpu_down_maps_locked(void *arg
)
1512 struct cpu_down_work
*work
= arg
;
1514 return _cpu_down(work
->cpu
, 0, work
->target
);
1517 static int cpu_down_maps_locked(unsigned int cpu
, enum cpuhp_state target
)
1519 struct cpu_down_work work
= { .cpu
= cpu
, .target
= target
, };
1522 * If the platform does not support hotplug, report it explicitly to
1523 * differentiate it from a transient offlining failure.
1525 if (cc_platform_has(CC_ATTR_HOTPLUG_DISABLED
))
1527 if (cpu_hotplug_disabled
)
1531 * Ensure that the control task does not run on the to be offlined
1532 * CPU to prevent a deadlock against cfs_b->period_timer.
1533 * Also keep at least one housekeeping cpu onlined to avoid generating
1534 * an empty sched_domain span.
1536 for_each_cpu_and(cpu
, cpu_online_mask
, housekeeping_cpumask(HK_TYPE_DOMAIN
)) {
1537 if (cpu
!= work
.cpu
)
1538 return work_on_cpu(cpu
, __cpu_down_maps_locked
, &work
);
1543 static int cpu_down(unsigned int cpu
, enum cpuhp_state target
)
1547 cpu_maps_update_begin();
1548 err
= cpu_down_maps_locked(cpu
, target
);
1549 cpu_maps_update_done();
1554 * cpu_device_down - Bring down a cpu device
1555 * @dev: Pointer to the cpu device to offline
1557 * This function is meant to be used by device core cpu subsystem only.
1559 * Other subsystems should use remove_cpu() instead.
1561 * Return: %0 on success or a negative errno code
1563 int cpu_device_down(struct device
*dev
)
1565 return cpu_down(dev
->id
, CPUHP_OFFLINE
);
1568 int remove_cpu(unsigned int cpu
)
1572 lock_device_hotplug();
1573 ret
= device_offline(get_cpu_device(cpu
));
1574 unlock_device_hotplug();
1578 EXPORT_SYMBOL_GPL(remove_cpu
);
1580 void smp_shutdown_nonboot_cpus(unsigned int primary_cpu
)
1585 cpu_maps_update_begin();
1588 * Make certain the cpu I'm about to reboot on is online.
1590 * This is inline to what migrate_to_reboot_cpu() already do.
1592 if (!cpu_online(primary_cpu
))
1593 primary_cpu
= cpumask_first(cpu_online_mask
);
1595 for_each_online_cpu(cpu
) {
1596 if (cpu
== primary_cpu
)
1599 error
= cpu_down_maps_locked(cpu
, CPUHP_OFFLINE
);
1601 pr_err("Failed to offline CPU%d - error=%d",
1608 * Ensure all but the reboot CPU are offline.
1610 BUG_ON(num_online_cpus() > 1);
1613 * Make sure the CPUs won't be enabled by someone else after this
1614 * point. Kexec will reboot to a new kernel shortly resetting
1615 * everything along the way.
1617 cpu_hotplug_disabled
++;
1619 cpu_maps_update_done();
1623 #define takedown_cpu NULL
1624 #endif /*CONFIG_HOTPLUG_CPU*/
1627 * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
1628 * @cpu: cpu that just started
1630 * It must be called by the arch code on the new cpu, before the new cpu
1631 * enables interrupts and before the "boot" cpu returns from __cpu_up().
1633 void notify_cpu_starting(unsigned int cpu
)
1635 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1636 enum cpuhp_state target
= min((int)st
->target
, CPUHP_AP_ONLINE
);
1638 rcutree_report_cpu_starting(cpu
); /* Enables RCU usage on this CPU. */
1639 cpumask_set_cpu(cpu
, &cpus_booted_once_mask
);
1642 * STARTING must not fail!
1644 cpuhp_invoke_callback_range_nofail(true, cpu
, st
, target
);
1648 * Called from the idle task. Wake up the controlling task which brings the
1649 * hotplug thread of the upcoming CPU up and then delegates the rest of the
1650 * online bringup to the hotplug thread.
1652 void cpuhp_online_idle(enum cpuhp_state state
)
1654 struct cpuhp_cpu_state
*st
= this_cpu_ptr(&cpuhp_state
);
1656 /* Happens for the boot cpu */
1657 if (state
!= CPUHP_AP_ONLINE_IDLE
)
1660 cpuhp_ap_update_sync_state(SYNC_STATE_ONLINE
);
1663 * Unpark the stopper thread before we start the idle loop (and start
1664 * scheduling); this ensures the stopper task is always available.
1666 stop_machine_unpark(smp_processor_id());
1668 st
->state
= CPUHP_AP_ONLINE_IDLE
;
1669 complete_ap_thread(st
, true);
1672 /* Requires cpu_add_remove_lock to be held */
1673 static int _cpu_up(unsigned int cpu
, int tasks_frozen
, enum cpuhp_state target
)
1675 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1676 struct task_struct
*idle
;
1681 if (!cpu_present(cpu
)) {
1687 * The caller of cpu_up() might have raced with another
1688 * caller. Nothing to do.
1690 if (st
->state
>= target
)
1693 if (st
->state
== CPUHP_OFFLINE
) {
1694 /* Let it fail before we try to bring the cpu up */
1695 idle
= idle_thread_get(cpu
);
1697 ret
= PTR_ERR(idle
);
1702 * Reset stale stack state from the last time this CPU was online.
1704 scs_task_reset(idle
);
1705 kasan_unpoison_task_stack(idle
);
1708 cpuhp_tasks_frozen
= tasks_frozen
;
1710 cpuhp_set_state(cpu
, st
, target
);
1712 * If the current CPU state is in the range of the AP hotplug thread,
1713 * then we need to kick the thread once more.
1715 if (st
->state
> CPUHP_BRINGUP_CPU
) {
1716 ret
= cpuhp_kick_ap_work(cpu
);
1718 * The AP side has done the error rollback already. Just
1719 * return the error code..
1726 * Try to reach the target state. We max out on the BP at
1727 * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
1728 * responsible for bringing it up to the target state.
1730 target
= min((int)target
, CPUHP_BRINGUP_CPU
);
1731 ret
= cpuhp_up_callbacks(cpu
, st
, target
);
1733 cpus_write_unlock();
1735 cpu_up_down_serialize_trainwrecks(tasks_frozen
);
1739 static int cpu_up(unsigned int cpu
, enum cpuhp_state target
)
1743 if (!cpu_possible(cpu
)) {
1744 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
1749 err
= try_online_node(cpu_to_node(cpu
));
1753 cpu_maps_update_begin();
1755 if (cpu_hotplug_disabled
) {
1759 if (!cpu_bootable(cpu
)) {
1764 err
= _cpu_up(cpu
, 0, target
);
1766 cpu_maps_update_done();
1771 * cpu_device_up - Bring up a cpu device
1772 * @dev: Pointer to the cpu device to online
1774 * This function is meant to be used by device core cpu subsystem only.
1776 * Other subsystems should use add_cpu() instead.
1778 * Return: %0 on success or a negative errno code
1780 int cpu_device_up(struct device
*dev
)
1782 return cpu_up(dev
->id
, CPUHP_ONLINE
);
1785 int add_cpu(unsigned int cpu
)
1789 lock_device_hotplug();
1790 ret
= device_online(get_cpu_device(cpu
));
1791 unlock_device_hotplug();
1795 EXPORT_SYMBOL_GPL(add_cpu
);
1798 * bringup_hibernate_cpu - Bring up the CPU that we hibernated on
1799 * @sleep_cpu: The cpu we hibernated on and should be brought up.
1801 * On some architectures like arm64, we can hibernate on any CPU, but on
1802 * wake up the CPU we hibernated on might be offline as a side effect of
1803 * using maxcpus= for example.
1805 * Return: %0 on success or a negative errno code
1807 int bringup_hibernate_cpu(unsigned int sleep_cpu
)
1811 if (!cpu_online(sleep_cpu
)) {
1812 pr_info("Hibernated on a CPU that is offline! Bringing CPU up.\n");
1813 ret
= cpu_up(sleep_cpu
, CPUHP_ONLINE
);
1815 pr_err("Failed to bring hibernate-CPU up!\n");
1822 static void __init
cpuhp_bringup_mask(const struct cpumask
*mask
, unsigned int ncpus
,
1823 enum cpuhp_state target
)
1827 for_each_cpu(cpu
, mask
) {
1828 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1830 if (cpu_up(cpu
, target
) && can_rollback_cpu(st
)) {
1832 * If this failed then cpu_up() might have only
1833 * rolled back to CPUHP_BP_KICK_AP for the final
1834 * online. Clean it up. NOOP if already rolled back.
1836 WARN_ON(cpuhp_invoke_callback_range(false, cpu
, st
, CPUHP_OFFLINE
));
1844 #ifdef CONFIG_HOTPLUG_PARALLEL
1845 static bool __cpuhp_parallel_bringup __ro_after_init
= true;
1847 static int __init
parallel_bringup_parse_param(char *arg
)
1849 return kstrtobool(arg
, &__cpuhp_parallel_bringup
);
1851 early_param("cpuhp.parallel", parallel_bringup_parse_param
);
1853 static inline bool cpuhp_smt_aware(void)
1855 return cpu_smt_max_threads
> 1;
1858 static inline const struct cpumask
*cpuhp_get_primary_thread_mask(void)
1860 return cpu_primary_thread_mask
;
1864 * On architectures which have enabled parallel bringup this invokes all BP
1865 * prepare states for each of the to be onlined APs first. The last state
1866 * sends the startup IPI to the APs. The APs proceed through the low level
1867 * bringup code in parallel and then wait for the control CPU to release
1868 * them one by one for the final onlining procedure.
1870 * This avoids waiting for each AP to respond to the startup IPI in
1871 * CPUHP_BRINGUP_CPU.
1873 static bool __init
cpuhp_bringup_cpus_parallel(unsigned int ncpus
)
1875 const struct cpumask
*mask
= cpu_present_mask
;
1877 if (__cpuhp_parallel_bringup
)
1878 __cpuhp_parallel_bringup
= arch_cpuhp_init_parallel_bringup();
1879 if (!__cpuhp_parallel_bringup
)
1882 if (cpuhp_smt_aware()) {
1883 const struct cpumask
*pmask
= cpuhp_get_primary_thread_mask();
1884 static struct cpumask tmp_mask __initdata
;
1887 * X86 requires to prevent that SMT siblings stopped while
1888 * the primary thread does a microcode update for various
1889 * reasons. Bring the primary threads up first.
1891 cpumask_and(&tmp_mask
, mask
, pmask
);
1892 cpuhp_bringup_mask(&tmp_mask
, ncpus
, CPUHP_BP_KICK_AP
);
1893 cpuhp_bringup_mask(&tmp_mask
, ncpus
, CPUHP_ONLINE
);
1894 /* Account for the online CPUs */
1895 ncpus
-= num_online_cpus();
1898 /* Create the mask for secondary CPUs */
1899 cpumask_andnot(&tmp_mask
, mask
, pmask
);
1903 /* Bring the not-yet started CPUs up */
1904 cpuhp_bringup_mask(mask
, ncpus
, CPUHP_BP_KICK_AP
);
1905 cpuhp_bringup_mask(mask
, ncpus
, CPUHP_ONLINE
);
1909 static inline bool cpuhp_bringup_cpus_parallel(unsigned int ncpus
) { return false; }
1910 #endif /* CONFIG_HOTPLUG_PARALLEL */
1912 void __init
bringup_nonboot_cpus(unsigned int setup_max_cpus
)
1914 /* Try parallel bringup optimization if enabled */
1915 if (cpuhp_bringup_cpus_parallel(setup_max_cpus
))
1918 /* Full per CPU serialized bringup */
1919 cpuhp_bringup_mask(cpu_present_mask
, setup_max_cpus
, CPUHP_ONLINE
);
1922 #ifdef CONFIG_PM_SLEEP_SMP
1923 static cpumask_var_t frozen_cpus
;
1925 int freeze_secondary_cpus(int primary
)
1929 cpu_maps_update_begin();
1930 if (primary
== -1) {
1931 primary
= cpumask_first(cpu_online_mask
);
1932 if (!housekeeping_cpu(primary
, HK_TYPE_TIMER
))
1933 primary
= housekeeping_any_cpu(HK_TYPE_TIMER
);
1935 if (!cpu_online(primary
))
1936 primary
= cpumask_first(cpu_online_mask
);
1940 * We take down all of the non-boot CPUs in one shot to avoid races
1941 * with the userspace trying to use the CPU hotplug at the same time
1943 cpumask_clear(frozen_cpus
);
1945 pr_info("Disabling non-boot CPUs ...\n");
1946 for_each_online_cpu(cpu
) {
1950 if (pm_wakeup_pending()) {
1951 pr_info("Wakeup pending. Abort CPU freeze\n");
1956 trace_suspend_resume(TPS("CPU_OFF"), cpu
, true);
1957 error
= _cpu_down(cpu
, 1, CPUHP_OFFLINE
);
1958 trace_suspend_resume(TPS("CPU_OFF"), cpu
, false);
1960 cpumask_set_cpu(cpu
, frozen_cpus
);
1962 pr_err("Error taking CPU%d down: %d\n", cpu
, error
);
1968 BUG_ON(num_online_cpus() > 1);
1970 pr_err("Non-boot CPUs are not disabled\n");
1973 * Make sure the CPUs won't be enabled by someone else. We need to do
1974 * this even in case of failure as all freeze_secondary_cpus() users are
1975 * supposed to do thaw_secondary_cpus() on the failure path.
1977 cpu_hotplug_disabled
++;
1979 cpu_maps_update_done();
1983 void __weak
arch_thaw_secondary_cpus_begin(void)
1987 void __weak
arch_thaw_secondary_cpus_end(void)
1991 void thaw_secondary_cpus(void)
1995 /* Allow everyone to use the CPU hotplug again */
1996 cpu_maps_update_begin();
1997 __cpu_hotplug_enable();
1998 if (cpumask_empty(frozen_cpus
))
2001 pr_info("Enabling non-boot CPUs ...\n");
2003 arch_thaw_secondary_cpus_begin();
2005 for_each_cpu(cpu
, frozen_cpus
) {
2006 trace_suspend_resume(TPS("CPU_ON"), cpu
, true);
2007 error
= _cpu_up(cpu
, 1, CPUHP_ONLINE
);
2008 trace_suspend_resume(TPS("CPU_ON"), cpu
, false);
2010 pr_info("CPU%d is up\n", cpu
);
2013 pr_warn("Error taking CPU%d up: %d\n", cpu
, error
);
2016 arch_thaw_secondary_cpus_end();
2018 cpumask_clear(frozen_cpus
);
2020 cpu_maps_update_done();
2023 static int __init
alloc_frozen_cpus(void)
2025 if (!alloc_cpumask_var(&frozen_cpus
, GFP_KERNEL
|__GFP_ZERO
))
2029 core_initcall(alloc_frozen_cpus
);
2032 * When callbacks for CPU hotplug notifications are being executed, we must
2033 * ensure that the state of the system with respect to the tasks being frozen
2034 * or not, as reported by the notification, remains unchanged *throughout the
2035 * duration* of the execution of the callbacks.
2036 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
2038 * This synchronization is implemented by mutually excluding regular CPU
2039 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
2040 * Hibernate notifications.
2043 cpu_hotplug_pm_callback(struct notifier_block
*nb
,
2044 unsigned long action
, void *ptr
)
2048 case PM_SUSPEND_PREPARE
:
2049 case PM_HIBERNATION_PREPARE
:
2050 cpu_hotplug_disable();
2053 case PM_POST_SUSPEND
:
2054 case PM_POST_HIBERNATION
:
2055 cpu_hotplug_enable();
2066 static int __init
cpu_hotplug_pm_sync_init(void)
2069 * cpu_hotplug_pm_callback has higher priority than x86
2070 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
2071 * to disable cpu hotplug to avoid cpu hotplug race.
2073 pm_notifier(cpu_hotplug_pm_callback
, 0);
2076 core_initcall(cpu_hotplug_pm_sync_init
);
2078 #endif /* CONFIG_PM_SLEEP_SMP */
2082 #endif /* CONFIG_SMP */
2084 /* Boot processor state steps */
2085 static struct cpuhp_step cpuhp_hp_states
[] = {
2088 .startup
.single
= NULL
,
2089 .teardown
.single
= NULL
,
2092 [CPUHP_CREATE_THREADS
]= {
2093 .name
= "threads:prepare",
2094 .startup
.single
= smpboot_create_threads
,
2095 .teardown
.single
= NULL
,
2098 [CPUHP_PERF_PREPARE
] = {
2099 .name
= "perf:prepare",
2100 .startup
.single
= perf_event_init_cpu
,
2101 .teardown
.single
= perf_event_exit_cpu
,
2103 [CPUHP_RANDOM_PREPARE
] = {
2104 .name
= "random:prepare",
2105 .startup
.single
= random_prepare_cpu
,
2106 .teardown
.single
= NULL
,
2108 [CPUHP_WORKQUEUE_PREP
] = {
2109 .name
= "workqueue:prepare",
2110 .startup
.single
= workqueue_prepare_cpu
,
2111 .teardown
.single
= NULL
,
2113 [CPUHP_HRTIMERS_PREPARE
] = {
2114 .name
= "hrtimers:prepare",
2115 .startup
.single
= hrtimers_prepare_cpu
,
2116 .teardown
.single
= NULL
,
2118 [CPUHP_SMPCFD_PREPARE
] = {
2119 .name
= "smpcfd:prepare",
2120 .startup
.single
= smpcfd_prepare_cpu
,
2121 .teardown
.single
= smpcfd_dead_cpu
,
2123 [CPUHP_RELAY_PREPARE
] = {
2124 .name
= "relay:prepare",
2125 .startup
.single
= relay_prepare_cpu
,
2126 .teardown
.single
= NULL
,
2128 [CPUHP_RCUTREE_PREP
] = {
2129 .name
= "RCU/tree:prepare",
2130 .startup
.single
= rcutree_prepare_cpu
,
2131 .teardown
.single
= rcutree_dead_cpu
,
2134 * On the tear-down path, timers_dead_cpu() must be invoked
2135 * before blk_mq_queue_reinit_notify() from notify_dead(),
2136 * otherwise a RCU stall occurs.
2138 [CPUHP_TIMERS_PREPARE
] = {
2139 .name
= "timers:prepare",
2140 .startup
.single
= timers_prepare_cpu
,
2141 .teardown
.single
= timers_dead_cpu
,
2144 #ifdef CONFIG_HOTPLUG_SPLIT_STARTUP
2146 * Kicks the AP alive. AP will wait in cpuhp_ap_sync_alive() until
2147 * the next step will release it.
2149 [CPUHP_BP_KICK_AP
] = {
2150 .name
= "cpu:kick_ap",
2151 .startup
.single
= cpuhp_kick_ap_alive
,
2155 * Waits for the AP to reach cpuhp_ap_sync_alive() and then
2156 * releases it for the complete bringup.
2158 [CPUHP_BRINGUP_CPU
] = {
2159 .name
= "cpu:bringup",
2160 .startup
.single
= cpuhp_bringup_ap
,
2161 .teardown
.single
= finish_cpu
,
2166 * All-in-one CPU bringup state which includes the kick alive.
2168 [CPUHP_BRINGUP_CPU
] = {
2169 .name
= "cpu:bringup",
2170 .startup
.single
= bringup_cpu
,
2171 .teardown
.single
= finish_cpu
,
2175 /* Final state before CPU kills itself */
2176 [CPUHP_AP_IDLE_DEAD
] = {
2177 .name
= "idle:dead",
2180 * Last state before CPU enters the idle loop to die. Transient state
2181 * for synchronization.
2183 [CPUHP_AP_OFFLINE
] = {
2184 .name
= "ap:offline",
2187 /* First state is scheduler control. Interrupts are disabled */
2188 [CPUHP_AP_SCHED_STARTING
] = {
2189 .name
= "sched:starting",
2190 .startup
.single
= sched_cpu_starting
,
2191 .teardown
.single
= sched_cpu_dying
,
2193 [CPUHP_AP_RCUTREE_DYING
] = {
2194 .name
= "RCU/tree:dying",
2195 .startup
.single
= NULL
,
2196 .teardown
.single
= rcutree_dying_cpu
,
2198 [CPUHP_AP_SMPCFD_DYING
] = {
2199 .name
= "smpcfd:dying",
2200 .startup
.single
= NULL
,
2201 .teardown
.single
= smpcfd_dying_cpu
,
2203 [CPUHP_AP_HRTIMERS_DYING
] = {
2204 .name
= "hrtimers:dying",
2205 .startup
.single
= NULL
,
2206 .teardown
.single
= hrtimers_cpu_dying
,
2209 /* Entry state on starting. Interrupts enabled from here on. Transient
2210 * state for synchronsization */
2211 [CPUHP_AP_ONLINE
] = {
2212 .name
= "ap:online",
2215 * Handled on control processor until the plugged processor manages
2218 [CPUHP_TEARDOWN_CPU
] = {
2219 .name
= "cpu:teardown",
2220 .startup
.single
= NULL
,
2221 .teardown
.single
= takedown_cpu
,
2225 [CPUHP_AP_SCHED_WAIT_EMPTY
] = {
2226 .name
= "sched:waitempty",
2227 .startup
.single
= NULL
,
2228 .teardown
.single
= sched_cpu_wait_empty
,
2231 /* Handle smpboot threads park/unpark */
2232 [CPUHP_AP_SMPBOOT_THREADS
] = {
2233 .name
= "smpboot/threads:online",
2234 .startup
.single
= smpboot_unpark_threads
,
2235 .teardown
.single
= smpboot_park_threads
,
2237 [CPUHP_AP_IRQ_AFFINITY_ONLINE
] = {
2238 .name
= "irq/affinity:online",
2239 .startup
.single
= irq_affinity_online_cpu
,
2240 .teardown
.single
= NULL
,
2242 [CPUHP_AP_PERF_ONLINE
] = {
2243 .name
= "perf:online",
2244 .startup
.single
= perf_event_init_cpu
,
2245 .teardown
.single
= perf_event_exit_cpu
,
2247 [CPUHP_AP_WATCHDOG_ONLINE
] = {
2248 .name
= "lockup_detector:online",
2249 .startup
.single
= lockup_detector_online_cpu
,
2250 .teardown
.single
= lockup_detector_offline_cpu
,
2252 [CPUHP_AP_WORKQUEUE_ONLINE
] = {
2253 .name
= "workqueue:online",
2254 .startup
.single
= workqueue_online_cpu
,
2255 .teardown
.single
= workqueue_offline_cpu
,
2257 [CPUHP_AP_RANDOM_ONLINE
] = {
2258 .name
= "random:online",
2259 .startup
.single
= random_online_cpu
,
2260 .teardown
.single
= NULL
,
2262 [CPUHP_AP_RCUTREE_ONLINE
] = {
2263 .name
= "RCU/tree:online",
2264 .startup
.single
= rcutree_online_cpu
,
2265 .teardown
.single
= rcutree_offline_cpu
,
2269 * The dynamically registered state space is here
2273 /* Last state is scheduler control setting the cpu active */
2274 [CPUHP_AP_ACTIVE
] = {
2275 .name
= "sched:active",
2276 .startup
.single
= sched_cpu_activate
,
2277 .teardown
.single
= sched_cpu_deactivate
,
2281 /* CPU is fully up and running. */
2284 .startup
.single
= NULL
,
2285 .teardown
.single
= NULL
,
2289 /* Sanity check for callbacks */
2290 static int cpuhp_cb_check(enum cpuhp_state state
)
2292 if (state
<= CPUHP_OFFLINE
|| state
>= CPUHP_ONLINE
)
2298 * Returns a free for dynamic slot assignment of the Online state. The states
2299 * are protected by the cpuhp_slot_states mutex and an empty slot is identified
2300 * by having no name assigned.
2302 static int cpuhp_reserve_state(enum cpuhp_state state
)
2304 enum cpuhp_state i
, end
;
2305 struct cpuhp_step
*step
;
2308 case CPUHP_AP_ONLINE_DYN
:
2309 step
= cpuhp_hp_states
+ CPUHP_AP_ONLINE_DYN
;
2310 end
= CPUHP_AP_ONLINE_DYN_END
;
2312 case CPUHP_BP_PREPARE_DYN
:
2313 step
= cpuhp_hp_states
+ CPUHP_BP_PREPARE_DYN
;
2314 end
= CPUHP_BP_PREPARE_DYN_END
;
2320 for (i
= state
; i
<= end
; i
++, step
++) {
2324 WARN(1, "No more dynamic states available for CPU hotplug\n");
2328 static int cpuhp_store_callbacks(enum cpuhp_state state
, const char *name
,
2329 int (*startup
)(unsigned int cpu
),
2330 int (*teardown
)(unsigned int cpu
),
2331 bool multi_instance
)
2333 /* (Un)Install the callbacks for further cpu hotplug operations */
2334 struct cpuhp_step
*sp
;
2338 * If name is NULL, then the state gets removed.
2340 * CPUHP_AP_ONLINE_DYN and CPUHP_BP_PREPARE_DYN are handed out on
2341 * the first allocation from these dynamic ranges, so the removal
2342 * would trigger a new allocation and clear the wrong (already
2343 * empty) state, leaving the callbacks of the to be cleared state
2344 * dangling, which causes wreckage on the next hotplug operation.
2346 if (name
&& (state
== CPUHP_AP_ONLINE_DYN
||
2347 state
== CPUHP_BP_PREPARE_DYN
)) {
2348 ret
= cpuhp_reserve_state(state
);
2353 sp
= cpuhp_get_step(state
);
2354 if (name
&& sp
->name
)
2357 sp
->startup
.single
= startup
;
2358 sp
->teardown
.single
= teardown
;
2360 sp
->multi_instance
= multi_instance
;
2361 INIT_HLIST_HEAD(&sp
->list
);
2365 static void *cpuhp_get_teardown_cb(enum cpuhp_state state
)
2367 return cpuhp_get_step(state
)->teardown
.single
;
2371 * Call the startup/teardown function for a step either on the AP or
2372 * on the current CPU.
2374 static int cpuhp_issue_call(int cpu
, enum cpuhp_state state
, bool bringup
,
2375 struct hlist_node
*node
)
2377 struct cpuhp_step
*sp
= cpuhp_get_step(state
);
2381 * If there's nothing to do, we done.
2382 * Relies on the union for multi_instance.
2384 if (cpuhp_step_empty(bringup
, sp
))
2387 * The non AP bound callbacks can fail on bringup. On teardown
2388 * e.g. module removal we crash for now.
2391 if (cpuhp_is_ap_state(state
))
2392 ret
= cpuhp_invoke_ap_callback(cpu
, state
, bringup
, node
);
2394 ret
= cpuhp_invoke_callback(cpu
, state
, bringup
, node
, NULL
);
2396 ret
= cpuhp_invoke_callback(cpu
, state
, bringup
, node
, NULL
);
2398 BUG_ON(ret
&& !bringup
);
2403 * Called from __cpuhp_setup_state on a recoverable failure.
2405 * Note: The teardown callbacks for rollback are not allowed to fail!
2407 static void cpuhp_rollback_install(int failedcpu
, enum cpuhp_state state
,
2408 struct hlist_node
*node
)
2412 /* Roll back the already executed steps on the other cpus */
2413 for_each_present_cpu(cpu
) {
2414 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
2415 int cpustate
= st
->state
;
2417 if (cpu
>= failedcpu
)
2420 /* Did we invoke the startup call on that cpu ? */
2421 if (cpustate
>= state
)
2422 cpuhp_issue_call(cpu
, state
, false, node
);
2426 int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state
,
2427 struct hlist_node
*node
,
2430 struct cpuhp_step
*sp
;
2434 lockdep_assert_cpus_held();
2436 sp
= cpuhp_get_step(state
);
2437 if (sp
->multi_instance
== false)
2440 mutex_lock(&cpuhp_state_mutex
);
2442 if (!invoke
|| !sp
->startup
.multi
)
2446 * Try to call the startup callback for each present cpu
2447 * depending on the hotplug state of the cpu.
2449 for_each_present_cpu(cpu
) {
2450 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
2451 int cpustate
= st
->state
;
2453 if (cpustate
< state
)
2456 ret
= cpuhp_issue_call(cpu
, state
, true, node
);
2458 if (sp
->teardown
.multi
)
2459 cpuhp_rollback_install(cpu
, state
, node
);
2465 hlist_add_head(node
, &sp
->list
);
2467 mutex_unlock(&cpuhp_state_mutex
);
2471 int __cpuhp_state_add_instance(enum cpuhp_state state
, struct hlist_node
*node
,
2477 ret
= __cpuhp_state_add_instance_cpuslocked(state
, node
, invoke
);
2481 EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance
);
2484 * __cpuhp_setup_state_cpuslocked - Setup the callbacks for an hotplug machine state
2485 * @state: The state to setup
2486 * @name: Name of the step
2487 * @invoke: If true, the startup function is invoked for cpus where
2488 * cpu state >= @state
2489 * @startup: startup callback function
2490 * @teardown: teardown callback function
2491 * @multi_instance: State is set up for multiple instances which get
2494 * The caller needs to hold cpus read locked while calling this function.
2497 * Positive state number if @state is CPUHP_AP_ONLINE_DYN;
2498 * 0 for all other states
2499 * On failure: proper (negative) error code
2501 int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state
,
2502 const char *name
, bool invoke
,
2503 int (*startup
)(unsigned int cpu
),
2504 int (*teardown
)(unsigned int cpu
),
2505 bool multi_instance
)
2510 lockdep_assert_cpus_held();
2512 if (cpuhp_cb_check(state
) || !name
)
2515 mutex_lock(&cpuhp_state_mutex
);
2517 ret
= cpuhp_store_callbacks(state
, name
, startup
, teardown
,
2520 dynstate
= state
== CPUHP_AP_ONLINE_DYN
;
2521 if (ret
> 0 && dynstate
) {
2526 if (ret
|| !invoke
|| !startup
)
2530 * Try to call the startup callback for each present cpu
2531 * depending on the hotplug state of the cpu.
2533 for_each_present_cpu(cpu
) {
2534 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
2535 int cpustate
= st
->state
;
2537 if (cpustate
< state
)
2540 ret
= cpuhp_issue_call(cpu
, state
, true, NULL
);
2543 cpuhp_rollback_install(cpu
, state
, NULL
);
2544 cpuhp_store_callbacks(state
, NULL
, NULL
, NULL
, false);
2549 mutex_unlock(&cpuhp_state_mutex
);
2551 * If the requested state is CPUHP_AP_ONLINE_DYN, return the
2552 * dynamically allocated state in case of success.
2554 if (!ret
&& dynstate
)
2558 EXPORT_SYMBOL(__cpuhp_setup_state_cpuslocked
);
2560 int __cpuhp_setup_state(enum cpuhp_state state
,
2561 const char *name
, bool invoke
,
2562 int (*startup
)(unsigned int cpu
),
2563 int (*teardown
)(unsigned int cpu
),
2564 bool multi_instance
)
2569 ret
= __cpuhp_setup_state_cpuslocked(state
, name
, invoke
, startup
,
2570 teardown
, multi_instance
);
2574 EXPORT_SYMBOL(__cpuhp_setup_state
);
2576 int __cpuhp_state_remove_instance(enum cpuhp_state state
,
2577 struct hlist_node
*node
, bool invoke
)
2579 struct cpuhp_step
*sp
= cpuhp_get_step(state
);
2582 BUG_ON(cpuhp_cb_check(state
));
2584 if (!sp
->multi_instance
)
2588 mutex_lock(&cpuhp_state_mutex
);
2590 if (!invoke
|| !cpuhp_get_teardown_cb(state
))
2593 * Call the teardown callback for each present cpu depending
2594 * on the hotplug state of the cpu. This function is not
2595 * allowed to fail currently!
2597 for_each_present_cpu(cpu
) {
2598 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
2599 int cpustate
= st
->state
;
2601 if (cpustate
>= state
)
2602 cpuhp_issue_call(cpu
, state
, false, node
);
2607 mutex_unlock(&cpuhp_state_mutex
);
2612 EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance
);
2615 * __cpuhp_remove_state_cpuslocked - Remove the callbacks for an hotplug machine state
2616 * @state: The state to remove
2617 * @invoke: If true, the teardown function is invoked for cpus where
2618 * cpu state >= @state
2620 * The caller needs to hold cpus read locked while calling this function.
2621 * The teardown callback is currently not allowed to fail. Think
2622 * about module removal!
2624 void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state
, bool invoke
)
2626 struct cpuhp_step
*sp
= cpuhp_get_step(state
);
2629 BUG_ON(cpuhp_cb_check(state
));
2631 lockdep_assert_cpus_held();
2633 mutex_lock(&cpuhp_state_mutex
);
2634 if (sp
->multi_instance
) {
2635 WARN(!hlist_empty(&sp
->list
),
2636 "Error: Removing state %d which has instances left.\n",
2641 if (!invoke
|| !cpuhp_get_teardown_cb(state
))
2645 * Call the teardown callback for each present cpu depending
2646 * on the hotplug state of the cpu. This function is not
2647 * allowed to fail currently!
2649 for_each_present_cpu(cpu
) {
2650 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
2651 int cpustate
= st
->state
;
2653 if (cpustate
>= state
)
2654 cpuhp_issue_call(cpu
, state
, false, NULL
);
2657 cpuhp_store_callbacks(state
, NULL
, NULL
, NULL
, false);
2658 mutex_unlock(&cpuhp_state_mutex
);
2660 EXPORT_SYMBOL(__cpuhp_remove_state_cpuslocked
);
2662 void __cpuhp_remove_state(enum cpuhp_state state
, bool invoke
)
2665 __cpuhp_remove_state_cpuslocked(state
, invoke
);
2668 EXPORT_SYMBOL(__cpuhp_remove_state
);
2670 #ifdef CONFIG_HOTPLUG_SMT
2671 static void cpuhp_offline_cpu_device(unsigned int cpu
)
2673 struct device
*dev
= get_cpu_device(cpu
);
2675 dev
->offline
= true;
2676 /* Tell user space about the state change */
2677 kobject_uevent(&dev
->kobj
, KOBJ_OFFLINE
);
2680 static void cpuhp_online_cpu_device(unsigned int cpu
)
2682 struct device
*dev
= get_cpu_device(cpu
);
2684 dev
->offline
= false;
2685 /* Tell user space about the state change */
2686 kobject_uevent(&dev
->kobj
, KOBJ_ONLINE
);
2689 int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval
)
2693 cpu_maps_update_begin();
2694 for_each_online_cpu(cpu
) {
2695 if (topology_is_primary_thread(cpu
))
2698 * Disable can be called with CPU_SMT_ENABLED when changing
2699 * from a higher to lower number of SMT threads per core.
2701 if (ctrlval
== CPU_SMT_ENABLED
&& cpu_smt_thread_allowed(cpu
))
2703 ret
= cpu_down_maps_locked(cpu
, CPUHP_OFFLINE
);
2707 * As this needs to hold the cpu maps lock it's impossible
2708 * to call device_offline() because that ends up calling
2709 * cpu_down() which takes cpu maps lock. cpu maps lock
2710 * needs to be held as this might race against in kernel
2711 * abusers of the hotplug machinery (thermal management).
2713 * So nothing would update device:offline state. That would
2714 * leave the sysfs entry stale and prevent onlining after
2715 * smt control has been changed to 'off' again. This is
2716 * called under the sysfs hotplug lock, so it is properly
2717 * serialized against the regular offline usage.
2719 cpuhp_offline_cpu_device(cpu
);
2722 cpu_smt_control
= ctrlval
;
2723 cpu_maps_update_done();
2727 int cpuhp_smt_enable(void)
2731 cpu_maps_update_begin();
2732 cpu_smt_control
= CPU_SMT_ENABLED
;
2733 for_each_present_cpu(cpu
) {
2734 /* Skip online CPUs and CPUs on offline nodes */
2735 if (cpu_online(cpu
) || !node_online(cpu_to_node(cpu
)))
2737 if (!cpu_smt_thread_allowed(cpu
))
2739 ret
= _cpu_up(cpu
, 0, CPUHP_ONLINE
);
2742 /* See comment in cpuhp_smt_disable() */
2743 cpuhp_online_cpu_device(cpu
);
2745 cpu_maps_update_done();
2750 #if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
2751 static ssize_t
state_show(struct device
*dev
,
2752 struct device_attribute
*attr
, char *buf
)
2754 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, dev
->id
);
2756 return sprintf(buf
, "%d\n", st
->state
);
2758 static DEVICE_ATTR_RO(state
);
2760 static ssize_t
target_store(struct device
*dev
, struct device_attribute
*attr
,
2761 const char *buf
, size_t count
)
2763 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, dev
->id
);
2764 struct cpuhp_step
*sp
;
2767 ret
= kstrtoint(buf
, 10, &target
);
2771 #ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
2772 if (target
< CPUHP_OFFLINE
|| target
> CPUHP_ONLINE
)
2775 if (target
!= CPUHP_OFFLINE
&& target
!= CPUHP_ONLINE
)
2779 ret
= lock_device_hotplug_sysfs();
2783 mutex_lock(&cpuhp_state_mutex
);
2784 sp
= cpuhp_get_step(target
);
2785 ret
= !sp
->name
|| sp
->cant_stop
? -EINVAL
: 0;
2786 mutex_unlock(&cpuhp_state_mutex
);
2790 if (st
->state
< target
)
2791 ret
= cpu_up(dev
->id
, target
);
2792 else if (st
->state
> target
)
2793 ret
= cpu_down(dev
->id
, target
);
2794 else if (WARN_ON(st
->target
!= target
))
2795 st
->target
= target
;
2797 unlock_device_hotplug();
2798 return ret
? ret
: count
;
2801 static ssize_t
target_show(struct device
*dev
,
2802 struct device_attribute
*attr
, char *buf
)
2804 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, dev
->id
);
2806 return sprintf(buf
, "%d\n", st
->target
);
2808 static DEVICE_ATTR_RW(target
);
2810 static ssize_t
fail_store(struct device
*dev
, struct device_attribute
*attr
,
2811 const char *buf
, size_t count
)
2813 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, dev
->id
);
2814 struct cpuhp_step
*sp
;
2817 ret
= kstrtoint(buf
, 10, &fail
);
2821 if (fail
== CPUHP_INVALID
) {
2826 if (fail
< CPUHP_OFFLINE
|| fail
> CPUHP_ONLINE
)
2830 * Cannot fail STARTING/DYING callbacks.
2832 if (cpuhp_is_atomic_state(fail
))
2836 * DEAD callbacks cannot fail...
2837 * ... neither can CPUHP_BRINGUP_CPU during hotunplug. The latter
2838 * triggering STARTING callbacks, a failure in this state would
2841 if (fail
<= CPUHP_BRINGUP_CPU
&& st
->state
> CPUHP_BRINGUP_CPU
)
2845 * Cannot fail anything that doesn't have callbacks.
2847 mutex_lock(&cpuhp_state_mutex
);
2848 sp
= cpuhp_get_step(fail
);
2849 if (!sp
->startup
.single
&& !sp
->teardown
.single
)
2851 mutex_unlock(&cpuhp_state_mutex
);
2860 static ssize_t
fail_show(struct device
*dev
,
2861 struct device_attribute
*attr
, char *buf
)
2863 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, dev
->id
);
2865 return sprintf(buf
, "%d\n", st
->fail
);
2868 static DEVICE_ATTR_RW(fail
);
2870 static struct attribute
*cpuhp_cpu_attrs
[] = {
2871 &dev_attr_state
.attr
,
2872 &dev_attr_target
.attr
,
2873 &dev_attr_fail
.attr
,
2877 static const struct attribute_group cpuhp_cpu_attr_group
= {
2878 .attrs
= cpuhp_cpu_attrs
,
2883 static ssize_t
states_show(struct device
*dev
,
2884 struct device_attribute
*attr
, char *buf
)
2886 ssize_t cur
, res
= 0;
2889 mutex_lock(&cpuhp_state_mutex
);
2890 for (i
= CPUHP_OFFLINE
; i
<= CPUHP_ONLINE
; i
++) {
2891 struct cpuhp_step
*sp
= cpuhp_get_step(i
);
2894 cur
= sprintf(buf
, "%3d: %s\n", i
, sp
->name
);
2899 mutex_unlock(&cpuhp_state_mutex
);
2902 static DEVICE_ATTR_RO(states
);
2904 static struct attribute
*cpuhp_cpu_root_attrs
[] = {
2905 &dev_attr_states
.attr
,
2909 static const struct attribute_group cpuhp_cpu_root_attr_group
= {
2910 .attrs
= cpuhp_cpu_root_attrs
,
2915 #ifdef CONFIG_HOTPLUG_SMT
2917 static bool cpu_smt_num_threads_valid(unsigned int threads
)
2919 if (IS_ENABLED(CONFIG_SMT_NUM_THREADS_DYNAMIC
))
2920 return threads
>= 1 && threads
<= cpu_smt_max_threads
;
2921 return threads
== 1 || threads
== cpu_smt_max_threads
;
2925 __store_smt_control(struct device
*dev
, struct device_attribute
*attr
,
2926 const char *buf
, size_t count
)
2928 int ctrlval
, ret
, num_threads
, orig_threads
;
2931 if (cpu_smt_control
== CPU_SMT_FORCE_DISABLED
)
2934 if (cpu_smt_control
== CPU_SMT_NOT_SUPPORTED
)
2937 if (sysfs_streq(buf
, "on")) {
2938 ctrlval
= CPU_SMT_ENABLED
;
2939 num_threads
= cpu_smt_max_threads
;
2940 } else if (sysfs_streq(buf
, "off")) {
2941 ctrlval
= CPU_SMT_DISABLED
;
2943 } else if (sysfs_streq(buf
, "forceoff")) {
2944 ctrlval
= CPU_SMT_FORCE_DISABLED
;
2946 } else if (kstrtoint(buf
, 10, &num_threads
) == 0) {
2947 if (num_threads
== 1)
2948 ctrlval
= CPU_SMT_DISABLED
;
2949 else if (cpu_smt_num_threads_valid(num_threads
))
2950 ctrlval
= CPU_SMT_ENABLED
;
2957 ret
= lock_device_hotplug_sysfs();
2961 orig_threads
= cpu_smt_num_threads
;
2962 cpu_smt_num_threads
= num_threads
;
2964 force_off
= ctrlval
!= cpu_smt_control
&& ctrlval
== CPU_SMT_FORCE_DISABLED
;
2966 if (num_threads
> orig_threads
)
2967 ret
= cpuhp_smt_enable();
2968 else if (num_threads
< orig_threads
|| force_off
)
2969 ret
= cpuhp_smt_disable(ctrlval
);
2971 unlock_device_hotplug();
2972 return ret
? ret
: count
;
2975 #else /* !CONFIG_HOTPLUG_SMT */
2977 __store_smt_control(struct device
*dev
, struct device_attribute
*attr
,
2978 const char *buf
, size_t count
)
2982 #endif /* CONFIG_HOTPLUG_SMT */
2984 static const char *smt_states
[] = {
2985 [CPU_SMT_ENABLED
] = "on",
2986 [CPU_SMT_DISABLED
] = "off",
2987 [CPU_SMT_FORCE_DISABLED
] = "forceoff",
2988 [CPU_SMT_NOT_SUPPORTED
] = "notsupported",
2989 [CPU_SMT_NOT_IMPLEMENTED
] = "notimplemented",
2992 static ssize_t
control_show(struct device
*dev
,
2993 struct device_attribute
*attr
, char *buf
)
2995 const char *state
= smt_states
[cpu_smt_control
];
2997 #ifdef CONFIG_HOTPLUG_SMT
2999 * If SMT is enabled but not all threads are enabled then show the
3000 * number of threads. If all threads are enabled show "on". Otherwise
3001 * show the state name.
3003 if (cpu_smt_control
== CPU_SMT_ENABLED
&&
3004 cpu_smt_num_threads
!= cpu_smt_max_threads
)
3005 return sysfs_emit(buf
, "%d\n", cpu_smt_num_threads
);
3008 return snprintf(buf
, PAGE_SIZE
- 2, "%s\n", state
);
3011 static ssize_t
control_store(struct device
*dev
, struct device_attribute
*attr
,
3012 const char *buf
, size_t count
)
3014 return __store_smt_control(dev
, attr
, buf
, count
);
3016 static DEVICE_ATTR_RW(control
);
3018 static ssize_t
active_show(struct device
*dev
,
3019 struct device_attribute
*attr
, char *buf
)
3021 return snprintf(buf
, PAGE_SIZE
- 2, "%d\n", sched_smt_active());
3023 static DEVICE_ATTR_RO(active
);
3025 static struct attribute
*cpuhp_smt_attrs
[] = {
3026 &dev_attr_control
.attr
,
3027 &dev_attr_active
.attr
,
3031 static const struct attribute_group cpuhp_smt_attr_group
= {
3032 .attrs
= cpuhp_smt_attrs
,
3037 static int __init
cpu_smt_sysfs_init(void)
3039 struct device
*dev_root
;
3042 dev_root
= bus_get_dev_root(&cpu_subsys
);
3044 ret
= sysfs_create_group(&dev_root
->kobj
, &cpuhp_smt_attr_group
);
3045 put_device(dev_root
);
3050 static int __init
cpuhp_sysfs_init(void)
3052 struct device
*dev_root
;
3055 ret
= cpu_smt_sysfs_init();
3059 dev_root
= bus_get_dev_root(&cpu_subsys
);
3061 ret
= sysfs_create_group(&dev_root
->kobj
, &cpuhp_cpu_root_attr_group
);
3062 put_device(dev_root
);
3067 for_each_possible_cpu(cpu
) {
3068 struct device
*dev
= get_cpu_device(cpu
);
3072 ret
= sysfs_create_group(&dev
->kobj
, &cpuhp_cpu_attr_group
);
3078 device_initcall(cpuhp_sysfs_init
);
3079 #endif /* CONFIG_SYSFS && CONFIG_HOTPLUG_CPU */
3082 * cpu_bit_bitmap[] is a special, "compressed" data structure that
3083 * represents all NR_CPUS bits binary values of 1<<nr.
3085 * It is used by cpumask_of() to get a constant address to a CPU
3086 * mask value that has a single bit set only.
3089 /* cpu_bit_bitmap[0] is empty - so we can back into it */
3090 #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
3091 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
3092 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
3093 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
3095 const unsigned long cpu_bit_bitmap
[BITS_PER_LONG
+1][BITS_TO_LONGS(NR_CPUS
)] = {
3097 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
3098 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
3099 #if BITS_PER_LONG > 32
3100 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
3101 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
3104 EXPORT_SYMBOL_GPL(cpu_bit_bitmap
);
3106 const DECLARE_BITMAP(cpu_all_bits
, NR_CPUS
) = CPU_BITS_ALL
;
3107 EXPORT_SYMBOL(cpu_all_bits
);
3109 #ifdef CONFIG_INIT_ALL_POSSIBLE
3110 struct cpumask __cpu_possible_mask __read_mostly
3113 struct cpumask __cpu_possible_mask __read_mostly
;
3115 EXPORT_SYMBOL(__cpu_possible_mask
);
3117 struct cpumask __cpu_online_mask __read_mostly
;
3118 EXPORT_SYMBOL(__cpu_online_mask
);
3120 struct cpumask __cpu_present_mask __read_mostly
;
3121 EXPORT_SYMBOL(__cpu_present_mask
);
3123 struct cpumask __cpu_active_mask __read_mostly
;
3124 EXPORT_SYMBOL(__cpu_active_mask
);
3126 struct cpumask __cpu_dying_mask __read_mostly
;
3127 EXPORT_SYMBOL(__cpu_dying_mask
);
3129 atomic_t __num_online_cpus __read_mostly
;
3130 EXPORT_SYMBOL(__num_online_cpus
);
3132 void init_cpu_present(const struct cpumask
*src
)
3134 cpumask_copy(&__cpu_present_mask
, src
);
3137 void init_cpu_possible(const struct cpumask
*src
)
3139 cpumask_copy(&__cpu_possible_mask
, src
);
3142 void init_cpu_online(const struct cpumask
*src
)
3144 cpumask_copy(&__cpu_online_mask
, src
);
3147 void set_cpu_online(unsigned int cpu
, bool online
)
3150 * atomic_inc/dec() is required to handle the horrid abuse of this
3151 * function by the reboot and kexec code which invoke it from
3152 * IPI/NMI broadcasts when shutting down CPUs. Invocation from
3153 * regular CPU hotplug is properly serialized.
3155 * Note, that the fact that __num_online_cpus is of type atomic_t
3156 * does not protect readers which are not serialized against
3157 * concurrent hotplug operations.
3160 if (!cpumask_test_and_set_cpu(cpu
, &__cpu_online_mask
))
3161 atomic_inc(&__num_online_cpus
);
3163 if (cpumask_test_and_clear_cpu(cpu
, &__cpu_online_mask
))
3164 atomic_dec(&__num_online_cpus
);
3169 * Activate the first processor.
3171 void __init
boot_cpu_init(void)
3173 int cpu
= smp_processor_id();
3175 /* Mark the boot cpu "present", "online" etc for SMP and UP case */
3176 set_cpu_online(cpu
, true);
3177 set_cpu_active(cpu
, true);
3178 set_cpu_present(cpu
, true);
3179 set_cpu_possible(cpu
, true);
3182 __boot_cpu_id
= cpu
;
3187 * Must be called _AFTER_ setting up the per_cpu areas
3189 void __init
boot_cpu_hotplug_init(void)
3192 cpumask_set_cpu(smp_processor_id(), &cpus_booted_once_mask
);
3193 atomic_set(this_cpu_ptr(&cpuhp_state
.ap_sync_state
), SYNC_STATE_ONLINE
);
3195 this_cpu_write(cpuhp_state
.state
, CPUHP_ONLINE
);
3196 this_cpu_write(cpuhp_state
.target
, CPUHP_ONLINE
);
3200 * These are used for a global "mitigations=" cmdline option for toggling
3201 * optional CPU mitigations.
3203 enum cpu_mitigations
{
3204 CPU_MITIGATIONS_OFF
,
3205 CPU_MITIGATIONS_AUTO
,
3206 CPU_MITIGATIONS_AUTO_NOSMT
,
3209 static enum cpu_mitigations cpu_mitigations __ro_after_init
=
3210 CPU_MITIGATIONS_AUTO
;
3212 static int __init
mitigations_parse_cmdline(char *arg
)
3214 if (!strcmp(arg
, "off"))
3215 cpu_mitigations
= CPU_MITIGATIONS_OFF
;
3216 else if (!strcmp(arg
, "auto"))
3217 cpu_mitigations
= CPU_MITIGATIONS_AUTO
;
3218 else if (!strcmp(arg
, "auto,nosmt"))
3219 cpu_mitigations
= CPU_MITIGATIONS_AUTO_NOSMT
;
3221 pr_crit("Unsupported mitigations=%s, system may still be vulnerable\n",
3226 early_param("mitigations", mitigations_parse_cmdline
);
3228 /* mitigations=off */
3229 bool cpu_mitigations_off(void)
3231 return cpu_mitigations
== CPU_MITIGATIONS_OFF
;
3233 EXPORT_SYMBOL_GPL(cpu_mitigations_off
);
3235 /* mitigations=auto,nosmt */
3236 bool cpu_mitigations_auto_nosmt(void)
3238 return cpu_mitigations
== CPU_MITIGATIONS_AUTO_NOSMT
;
3240 EXPORT_SYMBOL_GPL(cpu_mitigations_auto_nosmt
);