1 // SPDX-License-Identifier: GPL-2.0-only
3 * Generic helpers for smp ipi calls
5 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/irq_work.h>
11 #include <linux/rcupdate.h>
12 #include <linux/rculist.h>
13 #include <linux/kernel.h>
14 #include <linux/export.h>
15 #include <linux/percpu.h>
16 #include <linux/init.h>
17 #include <linux/gfp.h>
18 #include <linux/smp.h>
19 #include <linux/cpu.h>
20 #include <linux/sched.h>
21 #include <linux/sched/idle.h>
22 #include <linux/hypervisor.h>
27 #define CSD_TYPE(_csd) ((_csd)->flags & CSD_FLAG_TYPE_MASK)
29 struct call_function_data
{
30 call_single_data_t __percpu
*csd
;
31 cpumask_var_t cpumask
;
32 cpumask_var_t cpumask_ipi
;
35 static DEFINE_PER_CPU_ALIGNED(struct call_function_data
, cfd_data
);
37 static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head
, call_single_queue
);
39 static void flush_smp_call_function_queue(bool warn_cpu_offline
);
41 int smpcfd_prepare_cpu(unsigned int cpu
)
43 struct call_function_data
*cfd
= &per_cpu(cfd_data
, cpu
);
45 if (!zalloc_cpumask_var_node(&cfd
->cpumask
, GFP_KERNEL
,
48 if (!zalloc_cpumask_var_node(&cfd
->cpumask_ipi
, GFP_KERNEL
,
50 free_cpumask_var(cfd
->cpumask
);
53 cfd
->csd
= alloc_percpu(call_single_data_t
);
55 free_cpumask_var(cfd
->cpumask
);
56 free_cpumask_var(cfd
->cpumask_ipi
);
63 int smpcfd_dead_cpu(unsigned int cpu
)
65 struct call_function_data
*cfd
= &per_cpu(cfd_data
, cpu
);
67 free_cpumask_var(cfd
->cpumask
);
68 free_cpumask_var(cfd
->cpumask_ipi
);
69 free_percpu(cfd
->csd
);
73 int smpcfd_dying_cpu(unsigned int cpu
)
76 * The IPIs for the smp-call-function callbacks queued by other
77 * CPUs might arrive late, either due to hardware latencies or
78 * because this CPU disabled interrupts (inside stop-machine)
79 * before the IPIs were sent. So flush out any pending callbacks
80 * explicitly (without waiting for the IPIs to arrive), to
81 * ensure that the outgoing CPU doesn't go offline with work
84 flush_smp_call_function_queue(false);
89 void __init
call_function_init(void)
93 for_each_possible_cpu(i
)
94 init_llist_head(&per_cpu(call_single_queue
, i
));
96 smpcfd_prepare_cpu(smp_processor_id());
100 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
102 * For non-synchronous ipi calls the csd can still be in use by the
103 * previous function call. For multi-cpu calls its even more interesting
104 * as we'll have to ensure no other cpu is observing our csd.
106 static __always_inline
void csd_lock_wait(call_single_data_t
*csd
)
108 smp_cond_load_acquire(&csd
->flags
, !(VAL
& CSD_FLAG_LOCK
));
111 static __always_inline
void csd_lock(call_single_data_t
*csd
)
114 csd
->flags
|= CSD_FLAG_LOCK
;
117 * prevent CPU from reordering the above assignment
118 * to ->flags with any subsequent assignments to other
119 * fields of the specified call_single_data_t structure:
124 static __always_inline
void csd_unlock(call_single_data_t
*csd
)
126 WARN_ON(!(csd
->flags
& CSD_FLAG_LOCK
));
129 * ensure we're all done before releasing data:
131 smp_store_release(&csd
->flags
, 0);
134 static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t
, csd_data
);
136 extern void send_call_function_single_ipi(int cpu
);
138 void __smp_call_single_queue(int cpu
, struct llist_node
*node
)
141 * The list addition should be visible before sending the IPI
142 * handler locks the list to pull the entry off it because of
143 * normal cache coherency rules implied by spinlocks.
145 * If IPIs can go out of order to the cache coherency protocol
146 * in an architecture, sufficient synchronisation should be added
147 * to arch code to make it appear to obey cache coherency WRT
148 * locking and barrier primitives. Generic code isn't really
149 * equipped to do the right thing...
151 if (llist_add(node
, &per_cpu(call_single_queue
, cpu
)))
152 send_call_function_single_ipi(cpu
);
156 * Insert a previously allocated call_single_data_t element
157 * for execution on the given CPU. data must already have
158 * ->func, ->info, and ->flags set.
160 static int generic_exec_single(int cpu
, call_single_data_t
*csd
)
162 if (cpu
== smp_processor_id()) {
163 smp_call_func_t func
= csd
->func
;
164 void *info
= csd
->info
;
168 * We can unlock early even for the synchronous on-stack case,
169 * since we're doing this from the same CPU..
172 local_irq_save(flags
);
174 local_irq_restore(flags
);
178 if ((unsigned)cpu
>= nr_cpu_ids
|| !cpu_online(cpu
)) {
183 __smp_call_single_queue(cpu
, &csd
->llist
);
189 * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks
191 * Invoked by arch to handle an IPI for call function single.
192 * Must be called with interrupts disabled.
194 void generic_smp_call_function_single_interrupt(void)
196 flush_smp_call_function_queue(true);
199 extern void irq_work_single(void *);
202 * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
204 * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an
205 * offline CPU. Skip this check if set to 'false'.
207 * Flush any pending smp-call-function callbacks queued on this CPU. This is
208 * invoked by the generic IPI handler, as well as by a CPU about to go offline,
209 * to ensure that all pending IPI callbacks are run before it goes completely
212 * Loop through the call_single_queue and run all the queued callbacks.
213 * Must be called with interrupts disabled.
215 static void flush_smp_call_function_queue(bool warn_cpu_offline
)
217 call_single_data_t
*csd
, *csd_next
;
218 struct llist_node
*entry
, *prev
;
219 struct llist_head
*head
;
222 lockdep_assert_irqs_disabled();
224 head
= this_cpu_ptr(&call_single_queue
);
225 entry
= llist_del_all(head
);
226 entry
= llist_reverse_order(entry
);
228 /* There shouldn't be any pending callbacks on an offline CPU. */
229 if (unlikely(warn_cpu_offline
&& !cpu_online(smp_processor_id()) &&
230 !warned
&& !llist_empty(head
))) {
232 WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
235 * We don't have to use the _safe() variant here
236 * because we are not invoking the IPI handlers yet.
238 llist_for_each_entry(csd
, entry
, llist
) {
239 switch (CSD_TYPE(csd
)) {
242 case CSD_TYPE_IRQ_WORK
:
243 pr_warn("IPI callback %pS sent to offline CPU\n",
248 pr_warn("IPI callback, unknown type %d, sent to offline CPU\n",
256 * First; run all SYNC callbacks, people are waiting for us.
259 llist_for_each_entry_safe(csd
, csd_next
, entry
, llist
) {
260 /* Do we wait until *after* callback? */
261 if (CSD_TYPE(csd
) == CSD_TYPE_SYNC
) {
262 smp_call_func_t func
= csd
->func
;
263 void *info
= csd
->info
;
266 prev
->next
= &csd_next
->llist
;
268 entry
= &csd_next
->llist
;
279 * Second; run all !SYNC callbacks.
281 llist_for_each_entry_safe(csd
, csd_next
, entry
, llist
) {
282 int type
= CSD_TYPE(csd
);
284 if (type
== CSD_TYPE_ASYNC
) {
285 smp_call_func_t func
= csd
->func
;
286 void *info
= csd
->info
;
290 } else if (type
== CSD_TYPE_IRQ_WORK
) {
291 irq_work_single(csd
);
296 void flush_smp_call_function_from_idle(void)
300 if (llist_empty(this_cpu_ptr(&call_single_queue
)))
303 local_irq_save(flags
);
304 flush_smp_call_function_queue(true);
305 local_irq_restore(flags
);
309 * smp_call_function_single - Run a function on a specific CPU
310 * @func: The function to run. This must be fast and non-blocking.
311 * @info: An arbitrary pointer to pass to the function.
312 * @wait: If true, wait until function has completed on other CPUs.
314 * Returns 0 on success, else a negative status code.
316 int smp_call_function_single(int cpu
, smp_call_func_t func
, void *info
,
319 call_single_data_t
*csd
;
320 call_single_data_t csd_stack
= {
321 .flags
= CSD_FLAG_LOCK
| CSD_TYPE_SYNC
,
327 * prevent preemption and reschedule on another processor,
328 * as well as CPU removal
330 this_cpu
= get_cpu();
333 * Can deadlock when called with interrupts disabled.
334 * We allow cpu's that are not yet online though, as no one else can
335 * send smp call function interrupt to this cpu and as such deadlocks
338 WARN_ON_ONCE(cpu_online(this_cpu
) && irqs_disabled()
339 && !oops_in_progress
);
342 * When @wait we can deadlock when we interrupt between llist_add() and
343 * arch_send_call_function_ipi*(); when !@wait we can deadlock due to
344 * csd_lock() on because the interrupt context uses the same csd
347 WARN_ON_ONCE(!in_task());
351 csd
= this_cpu_ptr(&csd_data
);
358 err
= generic_exec_single(cpu
, csd
);
367 EXPORT_SYMBOL(smp_call_function_single
);
370 * smp_call_function_single_async(): Run an asynchronous function on a
372 * @cpu: The CPU to run on.
373 * @csd: Pre-allocated and setup data structure
375 * Like smp_call_function_single(), but the call is asynchonous and
376 * can thus be done from contexts with disabled interrupts.
378 * The caller passes his own pre-allocated data structure
379 * (ie: embedded in an object) and is responsible for synchronizing it
380 * such that the IPIs performed on the @csd are strictly serialized.
382 * If the function is called with one csd which has not yet been
383 * processed by previous call to smp_call_function_single_async(), the
384 * function will return immediately with -EBUSY showing that the csd
385 * object is still in progress.
387 * NOTE: Be careful, there is unfortunately no current debugging facility to
388 * validate the correctness of this serialization.
390 int smp_call_function_single_async(int cpu
, call_single_data_t
*csd
)
396 if (csd
->flags
& CSD_FLAG_LOCK
) {
401 csd
->flags
= CSD_FLAG_LOCK
;
404 err
= generic_exec_single(cpu
, csd
);
411 EXPORT_SYMBOL_GPL(smp_call_function_single_async
);
414 * smp_call_function_any - Run a function on any of the given cpus
415 * @mask: The mask of cpus it can run on.
416 * @func: The function to run. This must be fast and non-blocking.
417 * @info: An arbitrary pointer to pass to the function.
418 * @wait: If true, wait until function has completed.
420 * Returns 0 on success, else a negative status code (if no cpus were online).
422 * Selection preference:
423 * 1) current cpu if in @mask
424 * 2) any cpu of current node if in @mask
425 * 3) any other online cpu in @mask
427 int smp_call_function_any(const struct cpumask
*mask
,
428 smp_call_func_t func
, void *info
, int wait
)
431 const struct cpumask
*nodemask
;
434 /* Try for same CPU (cheapest) */
436 if (cpumask_test_cpu(cpu
, mask
))
439 /* Try for same node. */
440 nodemask
= cpumask_of_node(cpu_to_node(cpu
));
441 for (cpu
= cpumask_first_and(nodemask
, mask
); cpu
< nr_cpu_ids
;
442 cpu
= cpumask_next_and(cpu
, nodemask
, mask
)) {
447 /* Any online will do: smp_call_function_single handles nr_cpu_ids. */
448 cpu
= cpumask_any_and(mask
, cpu_online_mask
);
450 ret
= smp_call_function_single(cpu
, func
, info
, wait
);
454 EXPORT_SYMBOL_GPL(smp_call_function_any
);
456 static void smp_call_function_many_cond(const struct cpumask
*mask
,
457 smp_call_func_t func
, void *info
,
458 bool wait
, smp_cond_func_t cond_func
)
460 struct call_function_data
*cfd
;
461 int cpu
, next_cpu
, this_cpu
= smp_processor_id();
464 * Can deadlock when called with interrupts disabled.
465 * We allow cpu's that are not yet online though, as no one else can
466 * send smp call function interrupt to this cpu and as such deadlocks
469 WARN_ON_ONCE(cpu_online(this_cpu
) && irqs_disabled()
470 && !oops_in_progress
&& !early_boot_irqs_disabled
);
473 * When @wait we can deadlock when we interrupt between llist_add() and
474 * arch_send_call_function_ipi*(); when !@wait we can deadlock due to
475 * csd_lock() on because the interrupt context uses the same csd
478 WARN_ON_ONCE(!in_task());
480 /* Try to fastpath. So, what's a CPU they want? Ignoring this one. */
481 cpu
= cpumask_first_and(mask
, cpu_online_mask
);
483 cpu
= cpumask_next_and(cpu
, mask
, cpu_online_mask
);
485 /* No online cpus? We're done. */
486 if (cpu
>= nr_cpu_ids
)
489 /* Do we have another CPU which isn't us? */
490 next_cpu
= cpumask_next_and(cpu
, mask
, cpu_online_mask
);
491 if (next_cpu
== this_cpu
)
492 next_cpu
= cpumask_next_and(next_cpu
, mask
, cpu_online_mask
);
494 /* Fastpath: do that cpu by itself. */
495 if (next_cpu
>= nr_cpu_ids
) {
496 if (!cond_func
|| cond_func(cpu
, info
))
497 smp_call_function_single(cpu
, func
, info
, wait
);
501 cfd
= this_cpu_ptr(&cfd_data
);
503 cpumask_and(cfd
->cpumask
, mask
, cpu_online_mask
);
504 __cpumask_clear_cpu(this_cpu
, cfd
->cpumask
);
506 /* Some callers race with other cpus changing the passed mask */
507 if (unlikely(!cpumask_weight(cfd
->cpumask
)))
510 cpumask_clear(cfd
->cpumask_ipi
);
511 for_each_cpu(cpu
, cfd
->cpumask
) {
512 call_single_data_t
*csd
= per_cpu_ptr(cfd
->csd
, cpu
);
514 if (cond_func
&& !cond_func(cpu
, info
))
519 csd
->flags
|= CSD_TYPE_SYNC
;
522 if (llist_add(&csd
->llist
, &per_cpu(call_single_queue
, cpu
)))
523 __cpumask_set_cpu(cpu
, cfd
->cpumask_ipi
);
526 /* Send a message to all CPUs in the map */
527 arch_send_call_function_ipi_mask(cfd
->cpumask_ipi
);
530 for_each_cpu(cpu
, cfd
->cpumask
) {
531 call_single_data_t
*csd
;
533 csd
= per_cpu_ptr(cfd
->csd
, cpu
);
540 * smp_call_function_many(): Run a function on a set of other CPUs.
541 * @mask: The set of cpus to run on (only runs on online subset).
542 * @func: The function to run. This must be fast and non-blocking.
543 * @info: An arbitrary pointer to pass to the function.
544 * @wait: If true, wait (atomically) until function has completed
547 * If @wait is true, then returns once @func has returned.
549 * You must not call this function with disabled interrupts or from a
550 * hardware interrupt handler or from a bottom half handler. Preemption
551 * must be disabled when calling this function.
553 void smp_call_function_many(const struct cpumask
*mask
,
554 smp_call_func_t func
, void *info
, bool wait
)
556 smp_call_function_many_cond(mask
, func
, info
, wait
, NULL
);
558 EXPORT_SYMBOL(smp_call_function_many
);
561 * smp_call_function(): Run a function on all other CPUs.
562 * @func: The function to run. This must be fast and non-blocking.
563 * @info: An arbitrary pointer to pass to the function.
564 * @wait: If true, wait (atomically) until function has completed
569 * If @wait is true, then returns once @func has returned; otherwise
570 * it returns just before the target cpu calls @func.
572 * You must not call this function with disabled interrupts or from a
573 * hardware interrupt handler or from a bottom half handler.
575 void smp_call_function(smp_call_func_t func
, void *info
, int wait
)
578 smp_call_function_many(cpu_online_mask
, func
, info
, wait
);
581 EXPORT_SYMBOL(smp_call_function
);
583 /* Setup configured maximum number of CPUs to activate */
584 unsigned int setup_max_cpus
= NR_CPUS
;
585 EXPORT_SYMBOL(setup_max_cpus
);
589 * Setup routine for controlling SMP activation
591 * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
592 * activation entirely (the MPS table probe still happens, though).
594 * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
595 * greater than 0, limits the maximum number of CPUs activated in
599 void __weak
arch_disable_smp_support(void) { }
601 static int __init
nosmp(char *str
)
604 arch_disable_smp_support();
609 early_param("nosmp", nosmp
);
611 /* this is hard limit */
612 static int __init
nrcpus(char *str
)
616 get_option(&str
, &nr_cpus
);
617 if (nr_cpus
> 0 && nr_cpus
< nr_cpu_ids
)
618 nr_cpu_ids
= nr_cpus
;
623 early_param("nr_cpus", nrcpus
);
625 static int __init
maxcpus(char *str
)
627 get_option(&str
, &setup_max_cpus
);
628 if (setup_max_cpus
== 0)
629 arch_disable_smp_support();
634 early_param("maxcpus", maxcpus
);
636 /* Setup number of possible processor ids */
637 unsigned int nr_cpu_ids __read_mostly
= NR_CPUS
;
638 EXPORT_SYMBOL(nr_cpu_ids
);
640 /* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
641 void __init
setup_nr_cpu_ids(void)
643 nr_cpu_ids
= find_last_bit(cpumask_bits(cpu_possible_mask
),NR_CPUS
) + 1;
646 /* Called by boot processor to activate the rest. */
647 void __init
smp_init(void)
649 int num_nodes
, num_cpus
;
652 * Ensure struct irq_work layout matches so that
653 * flush_smp_call_function_queue() can do horrible things.
655 BUILD_BUG_ON(offsetof(struct irq_work
, llnode
) !=
656 offsetof(struct __call_single_data
, llist
));
657 BUILD_BUG_ON(offsetof(struct irq_work
, func
) !=
658 offsetof(struct __call_single_data
, func
));
659 BUILD_BUG_ON(offsetof(struct irq_work
, flags
) !=
660 offsetof(struct __call_single_data
, flags
));
663 cpuhp_threads_init();
665 pr_info("Bringing up secondary CPUs ...\n");
667 bringup_nonboot_cpus(setup_max_cpus
);
669 num_nodes
= num_online_nodes();
670 num_cpus
= num_online_cpus();
671 pr_info("Brought up %d node%s, %d CPU%s\n",
672 num_nodes
, (num_nodes
> 1 ? "s" : ""),
673 num_cpus
, (num_cpus
> 1 ? "s" : ""));
675 /* Any cleanup work */
676 smp_cpus_done(setup_max_cpus
);
680 * Call a function on all processors. May be used during early boot while
681 * early_boot_irqs_disabled is set. Use local_irq_save/restore() instead
682 * of local_irq_disable/enable().
684 void on_each_cpu(void (*func
) (void *info
), void *info
, int wait
)
689 smp_call_function(func
, info
, wait
);
690 local_irq_save(flags
);
692 local_irq_restore(flags
);
695 EXPORT_SYMBOL(on_each_cpu
);
698 * on_each_cpu_mask(): Run a function on processors specified by
699 * cpumask, which may include the local processor.
700 * @mask: The set of cpus to run on (only runs on online subset).
701 * @func: The function to run. This must be fast and non-blocking.
702 * @info: An arbitrary pointer to pass to the function.
703 * @wait: If true, wait (atomically) until function has completed
706 * If @wait is true, then returns once @func has returned.
708 * You must not call this function with disabled interrupts or from a
709 * hardware interrupt handler or from a bottom half handler. The
710 * exception is that it may be used during early boot while
711 * early_boot_irqs_disabled is set.
713 void on_each_cpu_mask(const struct cpumask
*mask
, smp_call_func_t func
,
714 void *info
, bool wait
)
718 smp_call_function_many(mask
, func
, info
, wait
);
719 if (cpumask_test_cpu(cpu
, mask
)) {
721 local_irq_save(flags
);
723 local_irq_restore(flags
);
727 EXPORT_SYMBOL(on_each_cpu_mask
);
730 * on_each_cpu_cond(): Call a function on each processor for which
731 * the supplied function cond_func returns true, optionally waiting
732 * for all the required CPUs to finish. This may include the local
734 * @cond_func: A callback function that is passed a cpu id and
735 * the the info parameter. The function is called
736 * with preemption disabled. The function should
737 * return a blooean value indicating whether to IPI
739 * @func: The function to run on all applicable CPUs.
740 * This must be fast and non-blocking.
741 * @info: An arbitrary pointer to pass to both functions.
742 * @wait: If true, wait (atomically) until function has
743 * completed on other CPUs.
745 * Preemption is disabled to protect against CPUs going offline but not online.
746 * CPUs going online during the call will not be seen or sent an IPI.
748 * You must not call this function with disabled interrupts or
749 * from a hardware interrupt handler or from a bottom half handler.
751 void on_each_cpu_cond_mask(smp_cond_func_t cond_func
, smp_call_func_t func
,
752 void *info
, bool wait
, const struct cpumask
*mask
)
756 smp_call_function_many_cond(mask
, func
, info
, wait
, cond_func
);
757 if (cpumask_test_cpu(cpu
, mask
) && cond_func(cpu
, info
)) {
760 local_irq_save(flags
);
762 local_irq_restore(flags
);
766 EXPORT_SYMBOL(on_each_cpu_cond_mask
);
768 void on_each_cpu_cond(smp_cond_func_t cond_func
, smp_call_func_t func
,
769 void *info
, bool wait
)
771 on_each_cpu_cond_mask(cond_func
, func
, info
, wait
, cpu_online_mask
);
773 EXPORT_SYMBOL(on_each_cpu_cond
);
775 static void do_nothing(void *unused
)
780 * kick_all_cpus_sync - Force all cpus out of idle
782 * Used to synchronize the update of pm_idle function pointer. It's
783 * called after the pointer is updated and returns after the dummy
784 * callback function has been executed on all cpus. The execution of
785 * the function can only happen on the remote cpus after they have
786 * left the idle function which had been called via pm_idle function
787 * pointer. So it's guaranteed that nothing uses the previous pointer
790 void kick_all_cpus_sync(void)
792 /* Make sure the change is visible before we kick the cpus */
794 smp_call_function(do_nothing
, NULL
, 1);
796 EXPORT_SYMBOL_GPL(kick_all_cpus_sync
);
799 * wake_up_all_idle_cpus - break all cpus out of idle
800 * wake_up_all_idle_cpus try to break all cpus which is in idle state even
801 * including idle polling cpus, for non-idle cpus, we will do nothing
804 void wake_up_all_idle_cpus(void)
809 for_each_online_cpu(cpu
) {
810 if (cpu
== smp_processor_id())
813 wake_up_if_idle(cpu
);
817 EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus
);
820 * smp_call_on_cpu - Call a function on a specific cpu
822 * Used to call a function on a specific cpu and wait for it to return.
823 * Optionally make sure the call is done on a specified physical cpu via vcpu
824 * pinning in order to support virtualized environments.
826 struct smp_call_on_cpu_struct
{
827 struct work_struct work
;
828 struct completion done
;
835 static void smp_call_on_cpu_callback(struct work_struct
*work
)
837 struct smp_call_on_cpu_struct
*sscs
;
839 sscs
= container_of(work
, struct smp_call_on_cpu_struct
, work
);
841 hypervisor_pin_vcpu(sscs
->cpu
);
842 sscs
->ret
= sscs
->func(sscs
->data
);
844 hypervisor_pin_vcpu(-1);
846 complete(&sscs
->done
);
849 int smp_call_on_cpu(unsigned int cpu
, int (*func
)(void *), void *par
, bool phys
)
851 struct smp_call_on_cpu_struct sscs
= {
852 .done
= COMPLETION_INITIALIZER_ONSTACK(sscs
.done
),
855 .cpu
= phys
? cpu
: -1,
858 INIT_WORK_ONSTACK(&sscs
.work
, smp_call_on_cpu_callback
);
860 if (cpu
>= nr_cpu_ids
|| !cpu_online(cpu
))
863 queue_work_on(cpu
, system_wq
, &sscs
.work
);
864 wait_for_completion(&sscs
.done
);
868 EXPORT_SYMBOL_GPL(smp_call_on_cpu
);