]> git.ipfire.org Git - thirdparty/linux.git/blob - kernel/smp.c
Merge tag 'topic/phy-compliance-2020-04-08' of git://anongit.freedesktop.org/drm...
[thirdparty/linux.git] / kernel / smp.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Generic helpers for smp ipi calls
4 *
5 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
6 */
7
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10 #include <linux/irq_work.h>
11 #include <linux/rcupdate.h>
12 #include <linux/rculist.h>
13 #include <linux/kernel.h>
14 #include <linux/export.h>
15 #include <linux/percpu.h>
16 #include <linux/init.h>
17 #include <linux/gfp.h>
18 #include <linux/smp.h>
19 #include <linux/cpu.h>
20 #include <linux/sched.h>
21 #include <linux/sched/idle.h>
22 #include <linux/hypervisor.h>
23
24 #include "smpboot.h"
25
26 enum {
27 CSD_FLAG_LOCK = 0x01,
28 CSD_FLAG_SYNCHRONOUS = 0x02,
29 };
30
31 struct call_function_data {
32 call_single_data_t __percpu *csd;
33 cpumask_var_t cpumask;
34 cpumask_var_t cpumask_ipi;
35 };
36
37 static DEFINE_PER_CPU_ALIGNED(struct call_function_data, cfd_data);
38
39 static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
40
41 static void flush_smp_call_function_queue(bool warn_cpu_offline);
42
43 int smpcfd_prepare_cpu(unsigned int cpu)
44 {
45 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
46
47 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
48 cpu_to_node(cpu)))
49 return -ENOMEM;
50 if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
51 cpu_to_node(cpu))) {
52 free_cpumask_var(cfd->cpumask);
53 return -ENOMEM;
54 }
55 cfd->csd = alloc_percpu(call_single_data_t);
56 if (!cfd->csd) {
57 free_cpumask_var(cfd->cpumask);
58 free_cpumask_var(cfd->cpumask_ipi);
59 return -ENOMEM;
60 }
61
62 return 0;
63 }
64
65 int smpcfd_dead_cpu(unsigned int cpu)
66 {
67 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
68
69 free_cpumask_var(cfd->cpumask);
70 free_cpumask_var(cfd->cpumask_ipi);
71 free_percpu(cfd->csd);
72 return 0;
73 }
74
75 int smpcfd_dying_cpu(unsigned int cpu)
76 {
77 /*
78 * The IPIs for the smp-call-function callbacks queued by other
79 * CPUs might arrive late, either due to hardware latencies or
80 * because this CPU disabled interrupts (inside stop-machine)
81 * before the IPIs were sent. So flush out any pending callbacks
82 * explicitly (without waiting for the IPIs to arrive), to
83 * ensure that the outgoing CPU doesn't go offline with work
84 * still pending.
85 */
86 flush_smp_call_function_queue(false);
87 return 0;
88 }
89
90 void __init call_function_init(void)
91 {
92 int i;
93
94 for_each_possible_cpu(i)
95 init_llist_head(&per_cpu(call_single_queue, i));
96
97 smpcfd_prepare_cpu(smp_processor_id());
98 }
99
100 /*
101 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
102 *
103 * For non-synchronous ipi calls the csd can still be in use by the
104 * previous function call. For multi-cpu calls its even more interesting
105 * as we'll have to ensure no other cpu is observing our csd.
106 */
107 static __always_inline void csd_lock_wait(call_single_data_t *csd)
108 {
109 smp_cond_load_acquire(&csd->flags, !(VAL & CSD_FLAG_LOCK));
110 }
111
112 static __always_inline void csd_lock(call_single_data_t *csd)
113 {
114 csd_lock_wait(csd);
115 csd->flags |= CSD_FLAG_LOCK;
116
117 /*
118 * prevent CPU from reordering the above assignment
119 * to ->flags with any subsequent assignments to other
120 * fields of the specified call_single_data_t structure:
121 */
122 smp_wmb();
123 }
124
125 static __always_inline void csd_unlock(call_single_data_t *csd)
126 {
127 WARN_ON(!(csd->flags & CSD_FLAG_LOCK));
128
129 /*
130 * ensure we're all done before releasing data:
131 */
132 smp_store_release(&csd->flags, 0);
133 }
134
135 static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data);
136
137 /*
138 * Insert a previously allocated call_single_data_t element
139 * for execution on the given CPU. data must already have
140 * ->func, ->info, and ->flags set.
141 */
142 static int generic_exec_single(int cpu, call_single_data_t *csd,
143 smp_call_func_t func, void *info)
144 {
145 if (cpu == smp_processor_id()) {
146 unsigned long flags;
147
148 /*
149 * We can unlock early even for the synchronous on-stack case,
150 * since we're doing this from the same CPU..
151 */
152 csd_unlock(csd);
153 local_irq_save(flags);
154 func(info);
155 local_irq_restore(flags);
156 return 0;
157 }
158
159
160 if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) {
161 csd_unlock(csd);
162 return -ENXIO;
163 }
164
165 csd->func = func;
166 csd->info = info;
167
168 /*
169 * The list addition should be visible before sending the IPI
170 * handler locks the list to pull the entry off it because of
171 * normal cache coherency rules implied by spinlocks.
172 *
173 * If IPIs can go out of order to the cache coherency protocol
174 * in an architecture, sufficient synchronisation should be added
175 * to arch code to make it appear to obey cache coherency WRT
176 * locking and barrier primitives. Generic code isn't really
177 * equipped to do the right thing...
178 */
179 if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
180 arch_send_call_function_single_ipi(cpu);
181
182 return 0;
183 }
184
185 /**
186 * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks
187 *
188 * Invoked by arch to handle an IPI for call function single.
189 * Must be called with interrupts disabled.
190 */
191 void generic_smp_call_function_single_interrupt(void)
192 {
193 flush_smp_call_function_queue(true);
194 }
195
196 /**
197 * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
198 *
199 * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an
200 * offline CPU. Skip this check if set to 'false'.
201 *
202 * Flush any pending smp-call-function callbacks queued on this CPU. This is
203 * invoked by the generic IPI handler, as well as by a CPU about to go offline,
204 * to ensure that all pending IPI callbacks are run before it goes completely
205 * offline.
206 *
207 * Loop through the call_single_queue and run all the queued callbacks.
208 * Must be called with interrupts disabled.
209 */
210 static void flush_smp_call_function_queue(bool warn_cpu_offline)
211 {
212 struct llist_head *head;
213 struct llist_node *entry;
214 call_single_data_t *csd, *csd_next;
215 static bool warned;
216
217 lockdep_assert_irqs_disabled();
218
219 head = this_cpu_ptr(&call_single_queue);
220 entry = llist_del_all(head);
221 entry = llist_reverse_order(entry);
222
223 /* There shouldn't be any pending callbacks on an offline CPU. */
224 if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
225 !warned && !llist_empty(head))) {
226 warned = true;
227 WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
228
229 /*
230 * We don't have to use the _safe() variant here
231 * because we are not invoking the IPI handlers yet.
232 */
233 llist_for_each_entry(csd, entry, llist)
234 pr_warn("IPI callback %pS sent to offline CPU\n",
235 csd->func);
236 }
237
238 llist_for_each_entry_safe(csd, csd_next, entry, llist) {
239 smp_call_func_t func = csd->func;
240 void *info = csd->info;
241
242 /* Do we wait until *after* callback? */
243 if (csd->flags & CSD_FLAG_SYNCHRONOUS) {
244 func(info);
245 csd_unlock(csd);
246 } else {
247 csd_unlock(csd);
248 func(info);
249 }
250 }
251
252 /*
253 * Handle irq works queued remotely by irq_work_queue_on().
254 * Smp functions above are typically synchronous so they
255 * better run first since some other CPUs may be busy waiting
256 * for them.
257 */
258 irq_work_run();
259 }
260
261 /*
262 * smp_call_function_single - Run a function on a specific CPU
263 * @func: The function to run. This must be fast and non-blocking.
264 * @info: An arbitrary pointer to pass to the function.
265 * @wait: If true, wait until function has completed on other CPUs.
266 *
267 * Returns 0 on success, else a negative status code.
268 */
269 int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
270 int wait)
271 {
272 call_single_data_t *csd;
273 call_single_data_t csd_stack = {
274 .flags = CSD_FLAG_LOCK | CSD_FLAG_SYNCHRONOUS,
275 };
276 int this_cpu;
277 int err;
278
279 /*
280 * prevent preemption and reschedule on another processor,
281 * as well as CPU removal
282 */
283 this_cpu = get_cpu();
284
285 /*
286 * Can deadlock when called with interrupts disabled.
287 * We allow cpu's that are not yet online though, as no one else can
288 * send smp call function interrupt to this cpu and as such deadlocks
289 * can't happen.
290 */
291 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
292 && !oops_in_progress);
293
294 /*
295 * When @wait we can deadlock when we interrupt between llist_add() and
296 * arch_send_call_function_ipi*(); when !@wait we can deadlock due to
297 * csd_lock() on because the interrupt context uses the same csd
298 * storage.
299 */
300 WARN_ON_ONCE(!in_task());
301
302 csd = &csd_stack;
303 if (!wait) {
304 csd = this_cpu_ptr(&csd_data);
305 csd_lock(csd);
306 }
307
308 err = generic_exec_single(cpu, csd, func, info);
309
310 if (wait)
311 csd_lock_wait(csd);
312
313 put_cpu();
314
315 return err;
316 }
317 EXPORT_SYMBOL(smp_call_function_single);
318
319 /**
320 * smp_call_function_single_async(): Run an asynchronous function on a
321 * specific CPU.
322 * @cpu: The CPU to run on.
323 * @csd: Pre-allocated and setup data structure
324 *
325 * Like smp_call_function_single(), but the call is asynchonous and
326 * can thus be done from contexts with disabled interrupts.
327 *
328 * The caller passes his own pre-allocated data structure
329 * (ie: embedded in an object) and is responsible for synchronizing it
330 * such that the IPIs performed on the @csd are strictly serialized.
331 *
332 * If the function is called with one csd which has not yet been
333 * processed by previous call to smp_call_function_single_async(), the
334 * function will return immediately with -EBUSY showing that the csd
335 * object is still in progress.
336 *
337 * NOTE: Be careful, there is unfortunately no current debugging facility to
338 * validate the correctness of this serialization.
339 */
340 int smp_call_function_single_async(int cpu, call_single_data_t *csd)
341 {
342 int err = 0;
343
344 preempt_disable();
345
346 if (csd->flags & CSD_FLAG_LOCK) {
347 err = -EBUSY;
348 goto out;
349 }
350
351 csd->flags = CSD_FLAG_LOCK;
352 smp_wmb();
353
354 err = generic_exec_single(cpu, csd, csd->func, csd->info);
355
356 out:
357 preempt_enable();
358
359 return err;
360 }
361 EXPORT_SYMBOL_GPL(smp_call_function_single_async);
362
363 /*
364 * smp_call_function_any - Run a function on any of the given cpus
365 * @mask: The mask of cpus it can run on.
366 * @func: The function to run. This must be fast and non-blocking.
367 * @info: An arbitrary pointer to pass to the function.
368 * @wait: If true, wait until function has completed.
369 *
370 * Returns 0 on success, else a negative status code (if no cpus were online).
371 *
372 * Selection preference:
373 * 1) current cpu if in @mask
374 * 2) any cpu of current node if in @mask
375 * 3) any other online cpu in @mask
376 */
377 int smp_call_function_any(const struct cpumask *mask,
378 smp_call_func_t func, void *info, int wait)
379 {
380 unsigned int cpu;
381 const struct cpumask *nodemask;
382 int ret;
383
384 /* Try for same CPU (cheapest) */
385 cpu = get_cpu();
386 if (cpumask_test_cpu(cpu, mask))
387 goto call;
388
389 /* Try for same node. */
390 nodemask = cpumask_of_node(cpu_to_node(cpu));
391 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
392 cpu = cpumask_next_and(cpu, nodemask, mask)) {
393 if (cpu_online(cpu))
394 goto call;
395 }
396
397 /* Any online will do: smp_call_function_single handles nr_cpu_ids. */
398 cpu = cpumask_any_and(mask, cpu_online_mask);
399 call:
400 ret = smp_call_function_single(cpu, func, info, wait);
401 put_cpu();
402 return ret;
403 }
404 EXPORT_SYMBOL_GPL(smp_call_function_any);
405
406 static void smp_call_function_many_cond(const struct cpumask *mask,
407 smp_call_func_t func, void *info,
408 bool wait, smp_cond_func_t cond_func)
409 {
410 struct call_function_data *cfd;
411 int cpu, next_cpu, this_cpu = smp_processor_id();
412
413 /*
414 * Can deadlock when called with interrupts disabled.
415 * We allow cpu's that are not yet online though, as no one else can
416 * send smp call function interrupt to this cpu and as such deadlocks
417 * can't happen.
418 */
419 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
420 && !oops_in_progress && !early_boot_irqs_disabled);
421
422 /*
423 * When @wait we can deadlock when we interrupt between llist_add() and
424 * arch_send_call_function_ipi*(); when !@wait we can deadlock due to
425 * csd_lock() on because the interrupt context uses the same csd
426 * storage.
427 */
428 WARN_ON_ONCE(!in_task());
429
430 /* Try to fastpath. So, what's a CPU they want? Ignoring this one. */
431 cpu = cpumask_first_and(mask, cpu_online_mask);
432 if (cpu == this_cpu)
433 cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
434
435 /* No online cpus? We're done. */
436 if (cpu >= nr_cpu_ids)
437 return;
438
439 /* Do we have another CPU which isn't us? */
440 next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
441 if (next_cpu == this_cpu)
442 next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
443
444 /* Fastpath: do that cpu by itself. */
445 if (next_cpu >= nr_cpu_ids) {
446 if (!cond_func || cond_func(cpu, info))
447 smp_call_function_single(cpu, func, info, wait);
448 return;
449 }
450
451 cfd = this_cpu_ptr(&cfd_data);
452
453 cpumask_and(cfd->cpumask, mask, cpu_online_mask);
454 __cpumask_clear_cpu(this_cpu, cfd->cpumask);
455
456 /* Some callers race with other cpus changing the passed mask */
457 if (unlikely(!cpumask_weight(cfd->cpumask)))
458 return;
459
460 cpumask_clear(cfd->cpumask_ipi);
461 for_each_cpu(cpu, cfd->cpumask) {
462 call_single_data_t *csd = per_cpu_ptr(cfd->csd, cpu);
463
464 if (cond_func && !cond_func(cpu, info))
465 continue;
466
467 csd_lock(csd);
468 if (wait)
469 csd->flags |= CSD_FLAG_SYNCHRONOUS;
470 csd->func = func;
471 csd->info = info;
472 if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
473 __cpumask_set_cpu(cpu, cfd->cpumask_ipi);
474 }
475
476 /* Send a message to all CPUs in the map */
477 arch_send_call_function_ipi_mask(cfd->cpumask_ipi);
478
479 if (wait) {
480 for_each_cpu(cpu, cfd->cpumask) {
481 call_single_data_t *csd;
482
483 csd = per_cpu_ptr(cfd->csd, cpu);
484 csd_lock_wait(csd);
485 }
486 }
487 }
488
489 /**
490 * smp_call_function_many(): Run a function on a set of other CPUs.
491 * @mask: The set of cpus to run on (only runs on online subset).
492 * @func: The function to run. This must be fast and non-blocking.
493 * @info: An arbitrary pointer to pass to the function.
494 * @wait: If true, wait (atomically) until function has completed
495 * on other CPUs.
496 *
497 * If @wait is true, then returns once @func has returned.
498 *
499 * You must not call this function with disabled interrupts or from a
500 * hardware interrupt handler or from a bottom half handler. Preemption
501 * must be disabled when calling this function.
502 */
503 void smp_call_function_many(const struct cpumask *mask,
504 smp_call_func_t func, void *info, bool wait)
505 {
506 smp_call_function_many_cond(mask, func, info, wait, NULL);
507 }
508 EXPORT_SYMBOL(smp_call_function_many);
509
510 /**
511 * smp_call_function(): Run a function on all other CPUs.
512 * @func: The function to run. This must be fast and non-blocking.
513 * @info: An arbitrary pointer to pass to the function.
514 * @wait: If true, wait (atomically) until function has completed
515 * on other CPUs.
516 *
517 * Returns 0.
518 *
519 * If @wait is true, then returns once @func has returned; otherwise
520 * it returns just before the target cpu calls @func.
521 *
522 * You must not call this function with disabled interrupts or from a
523 * hardware interrupt handler or from a bottom half handler.
524 */
525 void smp_call_function(smp_call_func_t func, void *info, int wait)
526 {
527 preempt_disable();
528 smp_call_function_many(cpu_online_mask, func, info, wait);
529 preempt_enable();
530 }
531 EXPORT_SYMBOL(smp_call_function);
532
533 /* Setup configured maximum number of CPUs to activate */
534 unsigned int setup_max_cpus = NR_CPUS;
535 EXPORT_SYMBOL(setup_max_cpus);
536
537
538 /*
539 * Setup routine for controlling SMP activation
540 *
541 * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
542 * activation entirely (the MPS table probe still happens, though).
543 *
544 * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
545 * greater than 0, limits the maximum number of CPUs activated in
546 * SMP mode to <NUM>.
547 */
548
549 void __weak arch_disable_smp_support(void) { }
550
551 static int __init nosmp(char *str)
552 {
553 setup_max_cpus = 0;
554 arch_disable_smp_support();
555
556 return 0;
557 }
558
559 early_param("nosmp", nosmp);
560
561 /* this is hard limit */
562 static int __init nrcpus(char *str)
563 {
564 int nr_cpus;
565
566 get_option(&str, &nr_cpus);
567 if (nr_cpus > 0 && nr_cpus < nr_cpu_ids)
568 nr_cpu_ids = nr_cpus;
569
570 return 0;
571 }
572
573 early_param("nr_cpus", nrcpus);
574
575 static int __init maxcpus(char *str)
576 {
577 get_option(&str, &setup_max_cpus);
578 if (setup_max_cpus == 0)
579 arch_disable_smp_support();
580
581 return 0;
582 }
583
584 early_param("maxcpus", maxcpus);
585
586 /* Setup number of possible processor ids */
587 unsigned int nr_cpu_ids __read_mostly = NR_CPUS;
588 EXPORT_SYMBOL(nr_cpu_ids);
589
590 /* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
591 void __init setup_nr_cpu_ids(void)
592 {
593 nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
594 }
595
596 /* Called by boot processor to activate the rest. */
597 void __init smp_init(void)
598 {
599 int num_nodes, num_cpus;
600
601 idle_threads_init();
602 cpuhp_threads_init();
603
604 pr_info("Bringing up secondary CPUs ...\n");
605
606 bringup_nonboot_cpus(setup_max_cpus);
607
608 num_nodes = num_online_nodes();
609 num_cpus = num_online_cpus();
610 pr_info("Brought up %d node%s, %d CPU%s\n",
611 num_nodes, (num_nodes > 1 ? "s" : ""),
612 num_cpus, (num_cpus > 1 ? "s" : ""));
613
614 /* Any cleanup work */
615 smp_cpus_done(setup_max_cpus);
616 }
617
618 /*
619 * Call a function on all processors. May be used during early boot while
620 * early_boot_irqs_disabled is set. Use local_irq_save/restore() instead
621 * of local_irq_disable/enable().
622 */
623 void on_each_cpu(void (*func) (void *info), void *info, int wait)
624 {
625 unsigned long flags;
626
627 preempt_disable();
628 smp_call_function(func, info, wait);
629 local_irq_save(flags);
630 func(info);
631 local_irq_restore(flags);
632 preempt_enable();
633 }
634 EXPORT_SYMBOL(on_each_cpu);
635
636 /**
637 * on_each_cpu_mask(): Run a function on processors specified by
638 * cpumask, which may include the local processor.
639 * @mask: The set of cpus to run on (only runs on online subset).
640 * @func: The function to run. This must be fast and non-blocking.
641 * @info: An arbitrary pointer to pass to the function.
642 * @wait: If true, wait (atomically) until function has completed
643 * on other CPUs.
644 *
645 * If @wait is true, then returns once @func has returned.
646 *
647 * You must not call this function with disabled interrupts or from a
648 * hardware interrupt handler or from a bottom half handler. The
649 * exception is that it may be used during early boot while
650 * early_boot_irqs_disabled is set.
651 */
652 void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
653 void *info, bool wait)
654 {
655 int cpu = get_cpu();
656
657 smp_call_function_many(mask, func, info, wait);
658 if (cpumask_test_cpu(cpu, mask)) {
659 unsigned long flags;
660 local_irq_save(flags);
661 func(info);
662 local_irq_restore(flags);
663 }
664 put_cpu();
665 }
666 EXPORT_SYMBOL(on_each_cpu_mask);
667
668 /*
669 * on_each_cpu_cond(): Call a function on each processor for which
670 * the supplied function cond_func returns true, optionally waiting
671 * for all the required CPUs to finish. This may include the local
672 * processor.
673 * @cond_func: A callback function that is passed a cpu id and
674 * the the info parameter. The function is called
675 * with preemption disabled. The function should
676 * return a blooean value indicating whether to IPI
677 * the specified CPU.
678 * @func: The function to run on all applicable CPUs.
679 * This must be fast and non-blocking.
680 * @info: An arbitrary pointer to pass to both functions.
681 * @wait: If true, wait (atomically) until function has
682 * completed on other CPUs.
683 *
684 * Preemption is disabled to protect against CPUs going offline but not online.
685 * CPUs going online during the call will not be seen or sent an IPI.
686 *
687 * You must not call this function with disabled interrupts or
688 * from a hardware interrupt handler or from a bottom half handler.
689 */
690 void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
691 void *info, bool wait, const struct cpumask *mask)
692 {
693 int cpu = get_cpu();
694
695 smp_call_function_many_cond(mask, func, info, wait, cond_func);
696 if (cpumask_test_cpu(cpu, mask) && cond_func(cpu, info)) {
697 unsigned long flags;
698
699 local_irq_save(flags);
700 func(info);
701 local_irq_restore(flags);
702 }
703 put_cpu();
704 }
705 EXPORT_SYMBOL(on_each_cpu_cond_mask);
706
707 void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
708 void *info, bool wait)
709 {
710 on_each_cpu_cond_mask(cond_func, func, info, wait, cpu_online_mask);
711 }
712 EXPORT_SYMBOL(on_each_cpu_cond);
713
714 static void do_nothing(void *unused)
715 {
716 }
717
718 /**
719 * kick_all_cpus_sync - Force all cpus out of idle
720 *
721 * Used to synchronize the update of pm_idle function pointer. It's
722 * called after the pointer is updated and returns after the dummy
723 * callback function has been executed on all cpus. The execution of
724 * the function can only happen on the remote cpus after they have
725 * left the idle function which had been called via pm_idle function
726 * pointer. So it's guaranteed that nothing uses the previous pointer
727 * anymore.
728 */
729 void kick_all_cpus_sync(void)
730 {
731 /* Make sure the change is visible before we kick the cpus */
732 smp_mb();
733 smp_call_function(do_nothing, NULL, 1);
734 }
735 EXPORT_SYMBOL_GPL(kick_all_cpus_sync);
736
737 /**
738 * wake_up_all_idle_cpus - break all cpus out of idle
739 * wake_up_all_idle_cpus try to break all cpus which is in idle state even
740 * including idle polling cpus, for non-idle cpus, we will do nothing
741 * for them.
742 */
743 void wake_up_all_idle_cpus(void)
744 {
745 int cpu;
746
747 preempt_disable();
748 for_each_online_cpu(cpu) {
749 if (cpu == smp_processor_id())
750 continue;
751
752 wake_up_if_idle(cpu);
753 }
754 preempt_enable();
755 }
756 EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus);
757
758 /**
759 * smp_call_on_cpu - Call a function on a specific cpu
760 *
761 * Used to call a function on a specific cpu and wait for it to return.
762 * Optionally make sure the call is done on a specified physical cpu via vcpu
763 * pinning in order to support virtualized environments.
764 */
765 struct smp_call_on_cpu_struct {
766 struct work_struct work;
767 struct completion done;
768 int (*func)(void *);
769 void *data;
770 int ret;
771 int cpu;
772 };
773
774 static void smp_call_on_cpu_callback(struct work_struct *work)
775 {
776 struct smp_call_on_cpu_struct *sscs;
777
778 sscs = container_of(work, struct smp_call_on_cpu_struct, work);
779 if (sscs->cpu >= 0)
780 hypervisor_pin_vcpu(sscs->cpu);
781 sscs->ret = sscs->func(sscs->data);
782 if (sscs->cpu >= 0)
783 hypervisor_pin_vcpu(-1);
784
785 complete(&sscs->done);
786 }
787
788 int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
789 {
790 struct smp_call_on_cpu_struct sscs = {
791 .done = COMPLETION_INITIALIZER_ONSTACK(sscs.done),
792 .func = func,
793 .data = par,
794 .cpu = phys ? cpu : -1,
795 };
796
797 INIT_WORK_ONSTACK(&sscs.work, smp_call_on_cpu_callback);
798
799 if (cpu >= nr_cpu_ids || !cpu_online(cpu))
800 return -ENXIO;
801
802 queue_work_on(cpu, system_wq, &sscs.work);
803 wait_for_completion(&sscs.done);
804
805 return sscs.ret;
806 }
807 EXPORT_SYMBOL_GPL(smp_call_on_cpu);