]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - kernel/stop_machine.c
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[thirdparty/kernel/stable.git] / kernel / stop_machine.c
CommitLineData
1142d810
TH
1/*
2 * kernel/stop_machine.c
3 *
4 * Copyright (C) 2008, 2005 IBM Corporation.
5 * Copyright (C) 2008, 2005 Rusty Russell rusty@rustcorp.com.au
6 * Copyright (C) 2010 SUSE Linux Products GmbH
7 * Copyright (C) 2010 Tejun Heo <tj@kernel.org>
8 *
9 * This file is released under the GPLv2 and any later version.
e5582ca2 10 */
1142d810 11#include <linux/completion.h>
1da177e4 12#include <linux/cpu.h>
1142d810 13#include <linux/init.h>
ee527cd3 14#include <linux/kthread.h>
9984de1a 15#include <linux/export.h>
1142d810 16#include <linux/percpu.h>
ee527cd3
PB
17#include <linux/sched.h>
18#include <linux/stop_machine.h>
a12bb444 19#include <linux/interrupt.h>
1142d810 20#include <linux/kallsyms.h>
14e568e7 21#include <linux/smpboot.h>
60063497 22#include <linux/atomic.h>
ce4f06dc 23#include <linux/nmi.h>
0b26351b 24#include <linux/sched/wake_q.h>
1142d810
TH
25
26/*
27 * Structure to determine completion condition and record errors. May
28 * be shared by works on different cpus.
29 */
30struct cpu_stop_done {
31 atomic_t nr_todo; /* nr left to execute */
1142d810
TH
32 int ret; /* collected return value */
33 struct completion completion; /* fired if nr_todo reaches 0 */
34};
35
36/* the actual stopper, one per every possible cpu, enabled on online cpus */
37struct cpu_stopper {
02cb7aa9
ON
38 struct task_struct *thread;
39
de5b55c1 40 raw_spinlock_t lock;
878ae127 41 bool enabled; /* is this stopper enabled? */
1142d810 42 struct list_head works; /* list of pending works */
02cb7aa9
ON
43
44 struct cpu_stop_work stop_work; /* for stop_cpus */
1142d810
TH
45};
46
47static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper);
f445027e 48static bool stop_machine_initialized = false;
1142d810 49
e6253970
ON
50/* static data for stop_cpus */
51static DEFINE_MUTEX(stop_cpus_mutex);
52static bool stop_cpus_in_progress;
7053ea1a 53
1142d810
TH
54static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo)
55{
56 memset(done, 0, sizeof(*done));
57 atomic_set(&done->nr_todo, nr_todo);
58 init_completion(&done->completion);
59}
60
61/* signal completion unless @done is NULL */
6fa3b826 62static void cpu_stop_signal_done(struct cpu_stop_done *done)
1142d810 63{
dd2e3121
ON
64 if (atomic_dec_and_test(&done->nr_todo))
65 complete(&done->completion);
1142d810
TH
66}
67
5caa1c08 68static void __cpu_stop_queue_work(struct cpu_stopper *stopper,
0b26351b
PZ
69 struct cpu_stop_work *work,
70 struct wake_q_head *wakeq)
5caa1c08
ON
71{
72 list_add_tail(&work->list, &stopper->works);
0b26351b 73 wake_q_add(wakeq, stopper->thread);
5caa1c08
ON
74}
75
1142d810 76/* queue @work to @stopper. if offline, @work is completed immediately */
1b034bd9 77static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
1142d810 78{
860a0ffa 79 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
0b26351b 80 DEFINE_WAKE_Q(wakeq);
1142d810 81 unsigned long flags;
1b034bd9 82 bool enabled;
1142d810 83
de5b55c1 84 raw_spin_lock_irqsave(&stopper->lock, flags);
1b034bd9
ON
85 enabled = stopper->enabled;
86 if (enabled)
0b26351b 87 __cpu_stop_queue_work(stopper, work, &wakeq);
dd2e3121 88 else if (work->done)
6fa3b826 89 cpu_stop_signal_done(work->done);
de5b55c1 90 raw_spin_unlock_irqrestore(&stopper->lock, flags);
1b034bd9 91
0b26351b
PZ
92 wake_up_q(&wakeq);
93
1b034bd9 94 return enabled;
1142d810
TH
95}
96
97/**
98 * stop_one_cpu - stop a cpu
99 * @cpu: cpu to stop
100 * @fn: function to execute
101 * @arg: argument to @fn
102 *
103 * Execute @fn(@arg) on @cpu. @fn is run in a process context with
104 * the highest priority preempting any task on the cpu and
105 * monopolizing it. This function returns after the execution is
106 * complete.
107 *
108 * This function doesn't guarantee @cpu stays online till @fn
109 * completes. If @cpu goes down in the middle, execution may happen
110 * partially or fully on different cpus. @fn should either be ready
111 * for that or the caller should ensure that @cpu stays online until
112 * this function completes.
113 *
114 * CONTEXT:
115 * Might sleep.
116 *
117 * RETURNS:
118 * -ENOENT if @fn(@arg) was not executed because @cpu was offline;
119 * otherwise, the return value of @fn.
120 */
121int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg)
122{
123 struct cpu_stop_done done;
124 struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done };
125
126 cpu_stop_init_done(&done, 1);
958c5f84
ON
127 if (!cpu_stop_queue_work(cpu, &work))
128 return -ENOENT;
bf89a304
CC
129 /*
130 * In case @cpu == smp_proccessor_id() we can avoid a sleep+wakeup
131 * cycle by doing a preemption:
132 */
133 cond_resched();
1142d810 134 wait_for_completion(&done.completion);
958c5f84 135 return done.ret;
1142d810
TH
136}
137
1be0bd77
PZ
138/* This controls the threads on each CPU. */
139enum multi_stop_state {
140 /* Dummy starting state for thread. */
141 MULTI_STOP_NONE,
142 /* Awaiting everyone to be scheduled. */
143 MULTI_STOP_PREPARE,
144 /* Disable interrupts. */
145 MULTI_STOP_DISABLE_IRQ,
146 /* Run the function */
147 MULTI_STOP_RUN,
148 /* Exit */
149 MULTI_STOP_EXIT,
150};
151
152struct multi_stop_data {
9a301f22 153 cpu_stop_fn_t fn;
1be0bd77
PZ
154 void *data;
155 /* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */
156 unsigned int num_threads;
157 const struct cpumask *active_cpus;
158
159 enum multi_stop_state state;
160 atomic_t thread_ack;
161};
162
163static void set_state(struct multi_stop_data *msdata,
164 enum multi_stop_state newstate)
165{
166 /* Reset ack counter. */
167 atomic_set(&msdata->thread_ack, msdata->num_threads);
168 smp_wmb();
169 msdata->state = newstate;
170}
171
172/* Last one to ack a state moves to the next state. */
173static void ack_state(struct multi_stop_data *msdata)
174{
175 if (atomic_dec_and_test(&msdata->thread_ack))
176 set_state(msdata, msdata->state + 1);
177}
178
179/* This is the cpu_stop function which stops the CPU. */
180static int multi_cpu_stop(void *data)
181{
182 struct multi_stop_data *msdata = data;
183 enum multi_stop_state curstate = MULTI_STOP_NONE;
184 int cpu = smp_processor_id(), err = 0;
185 unsigned long flags;
186 bool is_active;
187
188 /*
189 * When called from stop_machine_from_inactive_cpu(), irq might
190 * already be disabled. Save the state and restore it on exit.
191 */
192 local_save_flags(flags);
193
194 if (!msdata->active_cpus)
195 is_active = cpu == cpumask_first(cpu_online_mask);
196 else
197 is_active = cpumask_test_cpu(cpu, msdata->active_cpus);
198
199 /* Simple state machine */
200 do {
201 /* Chill out and ensure we re-read multi_stop_state. */
bf0d31c0 202 cpu_relax_yield();
1be0bd77
PZ
203 if (msdata->state != curstate) {
204 curstate = msdata->state;
205 switch (curstate) {
206 case MULTI_STOP_DISABLE_IRQ:
207 local_irq_disable();
208 hard_irq_disable();
209 break;
210 case MULTI_STOP_RUN:
211 if (is_active)
212 err = msdata->fn(msdata->data);
213 break;
214 default:
215 break;
216 }
217 ack_state(msdata);
ce4f06dc
ON
218 } else if (curstate > MULTI_STOP_PREPARE) {
219 /*
220 * At this stage all other CPUs we depend on must spin
221 * in the same loop. Any reason for hard-lockup should
222 * be detected and reported on their side.
223 */
224 touch_nmi_watchdog();
1be0bd77
PZ
225 }
226 } while (curstate != MULTI_STOP_EXIT);
227
228 local_irq_restore(flags);
229 return err;
230}
231
5caa1c08
ON
232static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
233 int cpu2, struct cpu_stop_work *work2)
234{
d8bc8535
ON
235 struct cpu_stopper *stopper1 = per_cpu_ptr(&cpu_stopper, cpu1);
236 struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2);
0b26351b 237 DEFINE_WAKE_Q(wakeq);
d8bc8535 238 int err;
e6253970 239retry:
de5b55c1
TG
240 raw_spin_lock_irq(&stopper1->lock);
241 raw_spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING);
d8bc8535
ON
242
243 err = -ENOENT;
244 if (!stopper1->enabled || !stopper2->enabled)
245 goto unlock;
e6253970
ON
246 /*
247 * Ensure that if we race with __stop_cpus() the stoppers won't get
248 * queued up in reverse order leading to system deadlock.
249 *
250 * We can't miss stop_cpus_in_progress if queue_stop_cpus_work() has
251 * queued a work on cpu1 but not on cpu2, we hold both locks.
252 *
253 * It can be falsely true but it is safe to spin until it is cleared,
254 * queue_stop_cpus_work() does everything under preempt_disable().
255 */
256 err = -EDEADLK;
257 if (unlikely(stop_cpus_in_progress))
258 goto unlock;
d8bc8535
ON
259
260 err = 0;
0b26351b
PZ
261 __cpu_stop_queue_work(stopper1, work1, &wakeq);
262 __cpu_stop_queue_work(stopper2, work2, &wakeq);
2610e889
IM
263 /*
264 * The waking up of stopper threads has to happen
265 * in the same scheduling context as the queueing.
266 * Otherwise, there is a possibility of one of the
267 * above stoppers being woken up by another CPU,
268 * and preempting us. This will cause us to n ot
269 * wake up the other stopper forever.
270 */
271 preempt_disable();
d8bc8535 272unlock:
de5b55c1
TG
273 raw_spin_unlock(&stopper2->lock);
274 raw_spin_unlock_irq(&stopper1->lock);
5caa1c08 275
e6253970
ON
276 if (unlikely(err == -EDEADLK)) {
277 while (stop_cpus_in_progress)
278 cpu_relax();
279 goto retry;
280 }
0b26351b 281
9fb8d5dc 282 if (!err) {
9fb8d5dc
IM
283 wake_up_q(&wakeq);
284 preempt_enable();
285 }
0b26351b 286
d8bc8535 287 return err;
5caa1c08 288}
1be0bd77
PZ
289/**
290 * stop_two_cpus - stops two cpus
291 * @cpu1: the cpu to stop
292 * @cpu2: the other cpu to stop
293 * @fn: function to execute
294 * @arg: argument to @fn
295 *
296 * Stops both the current and specified CPU and runs @fn on one of them.
297 *
298 * returns when both are completed.
299 */
300int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg)
301{
1be0bd77
PZ
302 struct cpu_stop_done done;
303 struct cpu_stop_work work1, work2;
6acce3ef
PZ
304 struct multi_stop_data msdata;
305
6acce3ef 306 msdata = (struct multi_stop_data){
1be0bd77
PZ
307 .fn = fn,
308 .data = arg,
309 .num_threads = 2,
310 .active_cpus = cpumask_of(cpu1),
311 };
312
313 work1 = work2 = (struct cpu_stop_work){
314 .fn = multi_cpu_stop,
315 .arg = &msdata,
316 .done = &done
317 };
318
1be0bd77
PZ
319 cpu_stop_init_done(&done, 2);
320 set_state(&msdata, MULTI_STOP_PREPARE);
321
5caa1c08
ON
322 if (cpu1 > cpu2)
323 swap(cpu1, cpu2);
6a190051 324 if (cpu_stop_queue_two_works(cpu1, &work1, cpu2, &work2))
5caa1c08 325 return -ENOENT;
1be0bd77
PZ
326
327 wait_for_completion(&done.completion);
6a190051 328 return done.ret;
1be0bd77
PZ
329}
330
1142d810
TH
331/**
332 * stop_one_cpu_nowait - stop a cpu but don't wait for completion
333 * @cpu: cpu to stop
334 * @fn: function to execute
335 * @arg: argument to @fn
cf250040 336 * @work_buf: pointer to cpu_stop_work structure
1142d810
TH
337 *
338 * Similar to stop_one_cpu() but doesn't wait for completion. The
339 * caller is responsible for ensuring @work_buf is currently unused
340 * and will remain untouched until stopper starts executing @fn.
341 *
342 * CONTEXT:
343 * Don't care.
1b034bd9
ON
344 *
345 * RETURNS:
346 * true if cpu_stop_work was queued successfully and @fn will be called,
347 * false otherwise.
1142d810 348 */
1b034bd9 349bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
1142d810
TH
350 struct cpu_stop_work *work_buf)
351{
352 *work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, };
1b034bd9 353 return cpu_stop_queue_work(cpu, work_buf);
1142d810
TH
354}
355
4aff1ca6 356static bool queue_stop_cpus_work(const struct cpumask *cpumask,
fd7355ba
TH
357 cpu_stop_fn_t fn, void *arg,
358 struct cpu_stop_done *done)
1142d810
TH
359{
360 struct cpu_stop_work *work;
1142d810 361 unsigned int cpu;
4aff1ca6 362 bool queued = false;
1142d810 363
1142d810
TH
364 /*
365 * Disable preemption while queueing to avoid getting
366 * preempted by a stopper which might wait for other stoppers
367 * to enter @fn which can lead to deadlock.
368 */
e6253970
ON
369 preempt_disable();
370 stop_cpus_in_progress = true;
b377c2a0
ON
371 for_each_cpu(cpu, cpumask) {
372 work = &per_cpu(cpu_stopper.stop_work, cpu);
373 work->fn = fn;
374 work->arg = arg;
375 work->done = done;
4aff1ca6
ON
376 if (cpu_stop_queue_work(cpu, work))
377 queued = true;
b377c2a0 378 }
e6253970
ON
379 stop_cpus_in_progress = false;
380 preempt_enable();
4aff1ca6
ON
381
382 return queued;
fd7355ba 383}
1142d810 384
fd7355ba
TH
385static int __stop_cpus(const struct cpumask *cpumask,
386 cpu_stop_fn_t fn, void *arg)
387{
388 struct cpu_stop_done done;
389
390 cpu_stop_init_done(&done, cpumask_weight(cpumask));
4aff1ca6
ON
391 if (!queue_stop_cpus_work(cpumask, fn, arg, &done))
392 return -ENOENT;
1142d810 393 wait_for_completion(&done.completion);
4aff1ca6 394 return done.ret;
1142d810
TH
395}
396
397/**
398 * stop_cpus - stop multiple cpus
399 * @cpumask: cpus to stop
400 * @fn: function to execute
401 * @arg: argument to @fn
402 *
403 * Execute @fn(@arg) on online cpus in @cpumask. On each target cpu,
404 * @fn is run in a process context with the highest priority
405 * preempting any task on the cpu and monopolizing it. This function
406 * returns after all executions are complete.
407 *
408 * This function doesn't guarantee the cpus in @cpumask stay online
409 * till @fn completes. If some cpus go down in the middle, execution
410 * on the cpu may happen partially or fully on different cpus. @fn
411 * should either be ready for that or the caller should ensure that
412 * the cpus stay online until this function completes.
413 *
414 * All stop_cpus() calls are serialized making it safe for @fn to wait
415 * for all cpus to start executing it.
416 *
417 * CONTEXT:
418 * Might sleep.
419 *
420 * RETURNS:
421 * -ENOENT if @fn(@arg) was not executed at all because all cpus in
422 * @cpumask were offline; otherwise, 0 if all executions of @fn
423 * returned 0, any non zero return value if any returned non zero.
424 */
425int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
426{
427 int ret;
428
429 /* static works are used, process one request at a time */
430 mutex_lock(&stop_cpus_mutex);
431 ret = __stop_cpus(cpumask, fn, arg);
432 mutex_unlock(&stop_cpus_mutex);
433 return ret;
434}
435
436/**
437 * try_stop_cpus - try to stop multiple cpus
438 * @cpumask: cpus to stop
439 * @fn: function to execute
440 * @arg: argument to @fn
441 *
442 * Identical to stop_cpus() except that it fails with -EAGAIN if
443 * someone else is already using the facility.
444 *
445 * CONTEXT:
446 * Might sleep.
447 *
448 * RETURNS:
449 * -EAGAIN if someone else is already stopping cpus, -ENOENT if
450 * @fn(@arg) was not executed at all because all cpus in @cpumask were
451 * offline; otherwise, 0 if all executions of @fn returned 0, any non
452 * zero return value if any returned non zero.
453 */
454int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
455{
456 int ret;
457
458 /* static works are used, process one request at a time */
459 if (!mutex_trylock(&stop_cpus_mutex))
460 return -EAGAIN;
461 ret = __stop_cpus(cpumask, fn, arg);
462 mutex_unlock(&stop_cpus_mutex);
463 return ret;
464}
465
14e568e7
TG
466static int cpu_stop_should_run(unsigned int cpu)
467{
468 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
469 unsigned long flags;
470 int run;
471
de5b55c1 472 raw_spin_lock_irqsave(&stopper->lock, flags);
14e568e7 473 run = !list_empty(&stopper->works);
de5b55c1 474 raw_spin_unlock_irqrestore(&stopper->lock, flags);
14e568e7
TG
475 return run;
476}
477
478static void cpu_stopper_thread(unsigned int cpu)
1142d810 479{
14e568e7 480 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
1142d810 481 struct cpu_stop_work *work;
1142d810
TH
482
483repeat:
1142d810 484 work = NULL;
de5b55c1 485 raw_spin_lock_irq(&stopper->lock);
1142d810
TH
486 if (!list_empty(&stopper->works)) {
487 work = list_first_entry(&stopper->works,
488 struct cpu_stop_work, list);
489 list_del_init(&work->list);
490 }
de5b55c1 491 raw_spin_unlock_irq(&stopper->lock);
1142d810
TH
492
493 if (work) {
494 cpu_stop_fn_t fn = work->fn;
495 void *arg = work->arg;
496 struct cpu_stop_done *done = work->done;
accaf6ea 497 int ret;
1142d810 498
accaf6ea
ON
499 /* cpu stop callbacks must not sleep, make in_atomic() == T */
500 preempt_count_inc();
1142d810 501 ret = fn(arg);
dd2e3121
ON
502 if (done) {
503 if (ret)
504 done->ret = ret;
505 cpu_stop_signal_done(done);
506 }
accaf6ea 507 preempt_count_dec();
1142d810 508 WARN_ONCE(preempt_count(),
accaf6ea 509 "cpu_stop: %pf(%p) leaked preempt count\n", fn, arg);
14e568e7
TG
510 goto repeat;
511 }
1142d810
TH
512}
513
233e7f26
ON
514void stop_machine_park(int cpu)
515{
516 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
517 /*
518 * Lockless. cpu_stopper_thread() will take stopper->lock and flush
519 * the pending works before it parks, until then it is fine to queue
520 * the new works.
521 */
522 stopper->enabled = false;
523 kthread_park(stopper->thread);
524}
525
34f971f6
PZ
526extern void sched_set_stop_task(int cpu, struct task_struct *stop);
527
14e568e7
TG
528static void cpu_stop_create(unsigned int cpu)
529{
02cb7aa9 530 sched_set_stop_task(cpu, per_cpu(cpu_stopper.thread, cpu));
14e568e7
TG
531}
532
533static void cpu_stop_park(unsigned int cpu)
1142d810 534{
1142d810 535 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
1142d810 536
233e7f26 537 WARN_ON(!list_empty(&stopper->works));
14e568e7 538}
1142d810 539
c00166d8
ON
540void stop_machine_unpark(int cpu)
541{
542 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
543
f0cf16cb 544 stopper->enabled = true;
c00166d8
ON
545 kthread_unpark(stopper->thread);
546}
547
14e568e7 548static struct smp_hotplug_thread cpu_stop_threads = {
02cb7aa9 549 .store = &cpu_stopper.thread,
14e568e7
TG
550 .thread_should_run = cpu_stop_should_run,
551 .thread_fn = cpu_stopper_thread,
552 .thread_comm = "migration/%u",
553 .create = cpu_stop_create,
14e568e7 554 .park = cpu_stop_park,
14e568e7 555 .selfparking = true,
1142d810
TH
556};
557
558static int __init cpu_stop_init(void)
559{
1142d810 560 unsigned int cpu;
1142d810
TH
561
562 for_each_possible_cpu(cpu) {
563 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
564
de5b55c1 565 raw_spin_lock_init(&stopper->lock);
1142d810
TH
566 INIT_LIST_HEAD(&stopper->works);
567 }
568
14e568e7 569 BUG_ON(smpboot_register_percpu_thread(&cpu_stop_threads));
c00166d8 570 stop_machine_unpark(raw_smp_processor_id());
f445027e 571 stop_machine_initialized = true;
1142d810
TH
572 return 0;
573}
574early_initcall(cpu_stop_init);
1da177e4 575
fe5595c0
SAS
576int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data,
577 const struct cpumask *cpus)
1da177e4 578{
1be0bd77
PZ
579 struct multi_stop_data msdata = {
580 .fn = fn,
581 .data = data,
582 .num_threads = num_online_cpus(),
583 .active_cpus = cpus,
584 };
3fc1f1e2 585
fe5595c0
SAS
586 lockdep_assert_cpus_held();
587
f445027e
JF
588 if (!stop_machine_initialized) {
589 /*
590 * Handle the case where stop_machine() is called
591 * early in boot before stop_machine() has been
592 * initialized.
593 */
594 unsigned long flags;
595 int ret;
596
1be0bd77 597 WARN_ON_ONCE(msdata.num_threads != 1);
f445027e
JF
598
599 local_irq_save(flags);
600 hard_irq_disable();
601 ret = (*fn)(data);
602 local_irq_restore(flags);
603
604 return ret;
605 }
606
3fc1f1e2 607 /* Set the initial state and stop all online cpus. */
1be0bd77
PZ
608 set_state(&msdata, MULTI_STOP_PREPARE);
609 return stop_cpus(cpu_online_mask, multi_cpu_stop, &msdata);
1da177e4
LT
610}
611
9a301f22 612int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus)
1da177e4 613{
1da177e4
LT
614 int ret;
615
616 /* No CPUs can come up or down during this. */
fe5595c0
SAS
617 cpus_read_lock();
618 ret = stop_machine_cpuslocked(fn, data, cpus);
619 cpus_read_unlock();
1da177e4
LT
620 return ret;
621}
eeec4fad 622EXPORT_SYMBOL_GPL(stop_machine);
bbf1bb3e 623
f740e6cd
TH
624/**
625 * stop_machine_from_inactive_cpu - stop_machine() from inactive CPU
626 * @fn: the function to run
627 * @data: the data ptr for the @fn()
628 * @cpus: the cpus to run the @fn() on (NULL = any online cpu)
629 *
630 * This is identical to stop_machine() but can be called from a CPU which
631 * is not active. The local CPU is in the process of hotplug (so no other
632 * CPU hotplug can start) and not marked active and doesn't have enough
633 * context to sleep.
634 *
635 * This function provides stop_machine() functionality for such state by
636 * using busy-wait for synchronization and executing @fn directly for local
637 * CPU.
638 *
639 * CONTEXT:
640 * Local CPU is inactive. Temporarily stops all active CPUs.
641 *
642 * RETURNS:
643 * 0 if all executions of @fn returned 0, any non zero return value if any
644 * returned non zero.
645 */
9a301f22 646int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
f740e6cd
TH
647 const struct cpumask *cpus)
648{
1be0bd77 649 struct multi_stop_data msdata = { .fn = fn, .data = data,
f740e6cd
TH
650 .active_cpus = cpus };
651 struct cpu_stop_done done;
652 int ret;
653
654 /* Local CPU must be inactive and CPU hotplug in progress. */
655 BUG_ON(cpu_active(raw_smp_processor_id()));
1be0bd77 656 msdata.num_threads = num_active_cpus() + 1; /* +1 for local */
f740e6cd
TH
657
658 /* No proper task established and can't sleep - busy wait for lock. */
659 while (!mutex_trylock(&stop_cpus_mutex))
660 cpu_relax();
661
662 /* Schedule work on other CPUs and execute directly for local CPU */
1be0bd77 663 set_state(&msdata, MULTI_STOP_PREPARE);
f740e6cd 664 cpu_stop_init_done(&done, num_active_cpus());
1be0bd77 665 queue_stop_cpus_work(cpu_active_mask, multi_cpu_stop, &msdata,
f740e6cd 666 &done);
1be0bd77 667 ret = multi_cpu_stop(&msdata);
f740e6cd
TH
668
669 /* Busy wait for completion. */
670 while (!completion_done(&done.completion))
671 cpu_relax();
672
673 mutex_unlock(&stop_cpus_mutex);
674 return ret ?: done.ret;
675}