]>
Commit | Line | Data |
---|---|---|
08e875c1 CM |
1 | /* |
2 | * SMP initialisation and IPI support | |
3 | * Based on arch/arm/kernel/smp.c | |
4 | * | |
5 | * Copyright (C) 2012 ARM Ltd. | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
19 | ||
0f078336 | 20 | #include <linux/acpi.h> |
f5df2696 | 21 | #include <linux/arm_sdei.h> |
08e875c1 CM |
22 | #include <linux/delay.h> |
23 | #include <linux/init.h> | |
24 | #include <linux/spinlock.h> | |
68e21be2 | 25 | #include <linux/sched/mm.h> |
ef8bd77f | 26 | #include <linux/sched/hotplug.h> |
68db0cf1 | 27 | #include <linux/sched/task_stack.h> |
08e875c1 CM |
28 | #include <linux/interrupt.h> |
29 | #include <linux/cache.h> | |
30 | #include <linux/profile.h> | |
31 | #include <linux/errno.h> | |
32 | #include <linux/mm.h> | |
33 | #include <linux/err.h> | |
34 | #include <linux/cpu.h> | |
35 | #include <linux/smp.h> | |
36 | #include <linux/seq_file.h> | |
37 | #include <linux/irq.h> | |
38 | #include <linux/percpu.h> | |
39 | #include <linux/clockchips.h> | |
40 | #include <linux/completion.h> | |
41 | #include <linux/of.h> | |
eb631bb5 | 42 | #include <linux/irq_work.h> |
78fd584c | 43 | #include <linux/kexec.h> |
08e875c1 | 44 | |
e039ee4e | 45 | #include <asm/alternative.h> |
08e875c1 CM |
46 | #include <asm/atomic.h> |
47 | #include <asm/cacheflush.h> | |
df857416 | 48 | #include <asm/cpu.h> |
08e875c1 | 49 | #include <asm/cputype.h> |
cd1aebf5 | 50 | #include <asm/cpu_ops.h> |
0fbeb318 | 51 | #include <asm/daifflags.h> |
08e875c1 | 52 | #include <asm/mmu_context.h> |
1a2db300 | 53 | #include <asm/numa.h> |
08e875c1 CM |
54 | #include <asm/pgtable.h> |
55 | #include <asm/pgalloc.h> | |
56 | #include <asm/processor.h> | |
4c7aa002 | 57 | #include <asm/smp_plat.h> |
08e875c1 CM |
58 | #include <asm/sections.h> |
59 | #include <asm/tlbflush.h> | |
60 | #include <asm/ptrace.h> | |
377bcff9 | 61 | #include <asm/virt.h> |
08e875c1 | 62 | |
45ed695a NP |
63 | #define CREATE_TRACE_POINTS |
64 | #include <trace/events/ipi.h> | |
65 | ||
57c82954 MR |
66 | DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number); |
67 | EXPORT_PER_CPU_SYMBOL(cpu_number); | |
68 | ||
08e875c1 CM |
69 | /* |
70 | * as from 2.5, kernels no longer have an init_tasks structure | |
71 | * so we need some other way of telling a new secondary core | |
72 | * where to place its SVC stack | |
73 | */ | |
74 | struct secondary_data secondary_data; | |
bb905274 SP |
75 | /* Number of CPUs which aren't online, but looping in kernel text. */ |
76 | int cpus_stuck_in_kernel; | |
08e875c1 CM |
77 | |
78 | enum ipi_msg_type { | |
79 | IPI_RESCHEDULE, | |
80 | IPI_CALL_FUNC, | |
08e875c1 | 81 | IPI_CPU_STOP, |
78fd584c | 82 | IPI_CPU_CRASH_STOP, |
1f85008e | 83 | IPI_TIMER, |
eb631bb5 | 84 | IPI_IRQ_WORK, |
5e89c55e | 85 | IPI_WAKEUP |
08e875c1 CM |
86 | }; |
87 | ||
bb905274 SP |
88 | #ifdef CONFIG_HOTPLUG_CPU |
89 | static int op_cpu_kill(unsigned int cpu); | |
90 | #else | |
91 | static inline int op_cpu_kill(unsigned int cpu) | |
92 | { | |
93 | return -ENOSYS; | |
94 | } | |
95 | #endif | |
96 | ||
97 | ||
08e875c1 CM |
98 | /* |
99 | * Boot a secondary CPU, and assign it the specified idle task. | |
100 | * This also gives us the initial stack to use for this CPU. | |
101 | */ | |
b8c6453a | 102 | static int boot_secondary(unsigned int cpu, struct task_struct *idle) |
08e875c1 | 103 | { |
652af899 MR |
104 | if (cpu_ops[cpu]->cpu_boot) |
105 | return cpu_ops[cpu]->cpu_boot(cpu); | |
08e875c1 | 106 | |
652af899 | 107 | return -EOPNOTSUPP; |
08e875c1 CM |
108 | } |
109 | ||
110 | static DECLARE_COMPLETION(cpu_running); | |
111 | ||
b8c6453a | 112 | int __cpu_up(unsigned int cpu, struct task_struct *idle) |
08e875c1 CM |
113 | { |
114 | int ret; | |
bb905274 | 115 | long status; |
08e875c1 CM |
116 | |
117 | /* | |
118 | * We need to tell the secondary core where to find its stack and the | |
119 | * page tables. | |
120 | */ | |
c02433dd | 121 | secondary_data.task = idle; |
34be98f4 | 122 | secondary_data.stack = task_stack_page(idle) + THREAD_SIZE; |
bb905274 | 123 | update_cpu_boot_status(CPU_MMU_OFF); |
08e875c1 CM |
124 | __flush_dcache_area(&secondary_data, sizeof(secondary_data)); |
125 | ||
126 | /* | |
127 | * Now bring the CPU into our world. | |
128 | */ | |
129 | ret = boot_secondary(cpu, idle); | |
130 | if (ret == 0) { | |
131 | /* | |
132 | * CPU was successfully started, wait for it to come online or | |
133 | * time out. | |
134 | */ | |
135 | wait_for_completion_timeout(&cpu_running, | |
136 | msecs_to_jiffies(1000)); | |
137 | ||
138 | if (!cpu_online(cpu)) { | |
139 | pr_crit("CPU%u: failed to come online\n", cpu); | |
140 | ret = -EIO; | |
141 | } | |
142 | } else { | |
143 | pr_err("CPU%u: failed to boot: %d\n", cpu, ret); | |
f357b3a7 | 144 | return ret; |
08e875c1 CM |
145 | } |
146 | ||
c02433dd | 147 | secondary_data.task = NULL; |
08e875c1 | 148 | secondary_data.stack = NULL; |
bb905274 SP |
149 | status = READ_ONCE(secondary_data.status); |
150 | if (ret && status) { | |
151 | ||
152 | if (status == CPU_MMU_OFF) | |
153 | status = READ_ONCE(__early_cpu_boot_status); | |
154 | ||
66f16a24 | 155 | switch (status & CPU_BOOT_STATUS_MASK) { |
bb905274 SP |
156 | default: |
157 | pr_err("CPU%u: failed in unknown state : 0x%lx\n", | |
158 | cpu, status); | |
159 | break; | |
160 | case CPU_KILL_ME: | |
161 | if (!op_cpu_kill(cpu)) { | |
162 | pr_crit("CPU%u: died during early boot\n", cpu); | |
163 | break; | |
164 | } | |
165 | /* Fall through */ | |
166 | pr_crit("CPU%u: may not have shut down cleanly\n", cpu); | |
167 | case CPU_STUCK_IN_KERNEL: | |
168 | pr_crit("CPU%u: is stuck in kernel\n", cpu); | |
66f16a24 WD |
169 | if (status & CPU_STUCK_REASON_52_BIT_VA) |
170 | pr_crit("CPU%u: does not support 52-bit VAs\n", cpu); | |
171 | if (status & CPU_STUCK_REASON_NO_GRAN) | |
172 | pr_crit("CPU%u: does not support %luK granule \n", cpu, PAGE_SIZE / SZ_1K); | |
bb905274 SP |
173 | cpus_stuck_in_kernel++; |
174 | break; | |
175 | case CPU_PANIC_KERNEL: | |
176 | panic("CPU%u detected unsupported configuration\n", cpu); | |
177 | } | |
178 | } | |
08e875c1 CM |
179 | |
180 | return ret; | |
181 | } | |
182 | ||
183 | /* | |
184 | * This is the secondary CPU boot entry. We're using this CPUs | |
185 | * idle thread stack, but a set of temporary page tables. | |
186 | */ | |
b154886f | 187 | asmlinkage notrace void secondary_start_kernel(void) |
08e875c1 | 188 | { |
ccaac162 | 189 | u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK; |
08e875c1 | 190 | struct mm_struct *mm = &init_mm; |
580efaa7 MR |
191 | unsigned int cpu; |
192 | ||
193 | cpu = task_cpu(current); | |
194 | set_my_cpu_offset(per_cpu_offset(cpu)); | |
08e875c1 | 195 | |
08e875c1 CM |
196 | /* |
197 | * All kernel threads share the same mm context; grab a | |
198 | * reference and switch to it. | |
199 | */ | |
f1f10076 | 200 | mmgrab(mm); |
08e875c1 | 201 | current->active_mm = mm; |
08e875c1 CM |
202 | |
203 | /* | |
204 | * TTBR0 is only used for the identity mapping at this stage. Make it | |
205 | * point to zero page to avoid speculatively fetching new entries. | |
206 | */ | |
9e8e865b | 207 | cpu_uninstall_idmap(); |
08e875c1 CM |
208 | |
209 | preempt_disable(); | |
210 | trace_hardirqs_off(); | |
211 | ||
dbb4e152 SP |
212 | /* |
213 | * If the system has established the capabilities, make sure | |
214 | * this CPU ticks all of those. If it doesn't, the CPU will | |
215 | * fail to come online. | |
216 | */ | |
c47a1900 | 217 | check_local_cpu_capabilities(); |
dbb4e152 | 218 | |
652af899 MR |
219 | if (cpu_ops[cpu]->cpu_postboot) |
220 | cpu_ops[cpu]->cpu_postboot(); | |
08e875c1 | 221 | |
df857416 MR |
222 | /* |
223 | * Log the CPU info before it is marked online and might get read. | |
224 | */ | |
225 | cpuinfo_store_cpu(); | |
226 | ||
7ade67b5 MZ |
227 | /* |
228 | * Enable GIC and timers. | |
229 | */ | |
230 | notify_cpu_starting(cpu); | |
231 | ||
c18df0ad | 232 | store_cpu_topology(cpu); |
97fd6016 | 233 | numa_add_cpu(cpu); |
f6e763b9 | 234 | |
08e875c1 CM |
235 | /* |
236 | * OK, now it's safe to let the boot CPU continue. Wait for | |
237 | * the CPU migration code to notice that the CPU is online | |
238 | * before we continue. | |
239 | */ | |
ccaac162 MR |
240 | pr_info("CPU%u: Booted secondary processor 0x%010lx [0x%08x]\n", |
241 | cpu, (unsigned long)mpidr, | |
242 | read_cpuid_id()); | |
bb905274 | 243 | update_cpu_boot_status(CPU_BOOT_SUCCESS); |
08e875c1 | 244 | set_cpu_online(cpu, true); |
b3770b32 | 245 | complete(&cpu_running); |
08e875c1 | 246 | |
41bd5b5d | 247 | local_daif_restore(DAIF_PROCCTX); |
53ae3acd | 248 | |
08e875c1 CM |
249 | /* |
250 | * OK, it's off to the idle thread for us | |
251 | */ | |
fc6d73d6 | 252 | cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); |
08e875c1 CM |
253 | } |
254 | ||
9327e2c6 MR |
255 | #ifdef CONFIG_HOTPLUG_CPU |
256 | static int op_cpu_disable(unsigned int cpu) | |
257 | { | |
258 | /* | |
259 | * If we don't have a cpu_die method, abort before we reach the point | |
260 | * of no return. CPU0 may not have an cpu_ops, so test for it. | |
261 | */ | |
262 | if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_die) | |
263 | return -EOPNOTSUPP; | |
264 | ||
265 | /* | |
266 | * We may need to abort a hot unplug for some other mechanism-specific | |
267 | * reason. | |
268 | */ | |
269 | if (cpu_ops[cpu]->cpu_disable) | |
270 | return cpu_ops[cpu]->cpu_disable(cpu); | |
271 | ||
272 | return 0; | |
273 | } | |
274 | ||
275 | /* | |
276 | * __cpu_disable runs on the processor to be shutdown. | |
277 | */ | |
278 | int __cpu_disable(void) | |
279 | { | |
280 | unsigned int cpu = smp_processor_id(); | |
281 | int ret; | |
282 | ||
283 | ret = op_cpu_disable(cpu); | |
284 | if (ret) | |
285 | return ret; | |
286 | ||
7f9545aa SH |
287 | remove_cpu_topology(cpu); |
288 | numa_remove_cpu(cpu); | |
289 | ||
9327e2c6 MR |
290 | /* |
291 | * Take this CPU offline. Once we clear this, we can't return, | |
292 | * and we must not schedule until we're ready to give up the cpu. | |
293 | */ | |
294 | set_cpu_online(cpu, false); | |
295 | ||
296 | /* | |
297 | * OK - migrate IRQs away from this CPU | |
298 | */ | |
217d453d YY |
299 | irq_migrate_all_off_this_cpu(); |
300 | ||
9327e2c6 MR |
301 | return 0; |
302 | } | |
303 | ||
c814ca02 AC |
304 | static int op_cpu_kill(unsigned int cpu) |
305 | { | |
306 | /* | |
307 | * If we have no means of synchronising with the dying CPU, then assume | |
308 | * that it is really dead. We can only wait for an arbitrary length of | |
309 | * time and hope that it's dead, so let's skip the wait and just hope. | |
310 | */ | |
311 | if (!cpu_ops[cpu]->cpu_kill) | |
6b99c68c | 312 | return 0; |
c814ca02 AC |
313 | |
314 | return cpu_ops[cpu]->cpu_kill(cpu); | |
315 | } | |
316 | ||
9327e2c6 MR |
317 | /* |
318 | * called on the thread which is asking for a CPU to be shutdown - | |
319 | * waits until shutdown has completed, or it is timed out. | |
320 | */ | |
321 | void __cpu_die(unsigned int cpu) | |
322 | { | |
6b99c68c MR |
323 | int err; |
324 | ||
05981277 | 325 | if (!cpu_wait_death(cpu, 5)) { |
9327e2c6 MR |
326 | pr_crit("CPU%u: cpu didn't die\n", cpu); |
327 | return; | |
328 | } | |
329 | pr_notice("CPU%u: shutdown\n", cpu); | |
c814ca02 AC |
330 | |
331 | /* | |
332 | * Now that the dying CPU is beyond the point of no return w.r.t. | |
333 | * in-kernel synchronisation, try to get the firwmare to help us to | |
334 | * verify that it has really left the kernel before we consider | |
335 | * clobbering anything it might still be using. | |
336 | */ | |
6b99c68c MR |
337 | err = op_cpu_kill(cpu); |
338 | if (err) | |
339 | pr_warn("CPU%d may not have shut down cleanly: %d\n", | |
340 | cpu, err); | |
9327e2c6 MR |
341 | } |
342 | ||
343 | /* | |
344 | * Called from the idle thread for the CPU which has been shutdown. | |
345 | * | |
9327e2c6 MR |
346 | */ |
347 | void cpu_die(void) | |
348 | { | |
349 | unsigned int cpu = smp_processor_id(); | |
350 | ||
351 | idle_task_exit(); | |
352 | ||
0fbeb318 | 353 | local_daif_mask(); |
9327e2c6 MR |
354 | |
355 | /* Tell __cpu_die() that this CPU is now safe to dispose of */ | |
05981277 | 356 | (void)cpu_report_death(); |
9327e2c6 MR |
357 | |
358 | /* | |
359 | * Actually shutdown the CPU. This must never fail. The specific hotplug | |
360 | * mechanism must perform all required cache maintenance to ensure that | |
361 | * no dirty lines are lost in the process of shutting down the CPU. | |
362 | */ | |
363 | cpu_ops[cpu]->cpu_die(cpu); | |
364 | ||
365 | BUG(); | |
366 | } | |
367 | #endif | |
368 | ||
fce6361f SP |
369 | /* |
370 | * Kill the calling secondary CPU, early in bringup before it is turned | |
371 | * online. | |
372 | */ | |
373 | void cpu_die_early(void) | |
374 | { | |
375 | int cpu = smp_processor_id(); | |
376 | ||
377 | pr_crit("CPU%d: will not boot\n", cpu); | |
378 | ||
379 | /* Mark this CPU absent */ | |
380 | set_cpu_present(cpu, 0); | |
381 | ||
382 | #ifdef CONFIG_HOTPLUG_CPU | |
bb905274 | 383 | update_cpu_boot_status(CPU_KILL_ME); |
fce6361f SP |
384 | /* Check if we can park ourselves */ |
385 | if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_die) | |
386 | cpu_ops[cpu]->cpu_die(cpu); | |
387 | #endif | |
bb905274 | 388 | update_cpu_boot_status(CPU_STUCK_IN_KERNEL); |
fce6361f SP |
389 | |
390 | cpu_park_loop(); | |
391 | } | |
392 | ||
377bcff9 JR |
393 | static void __init hyp_mode_check(void) |
394 | { | |
395 | if (is_hyp_mode_available()) | |
396 | pr_info("CPU: All CPU(s) started at EL2\n"); | |
397 | else if (is_hyp_mode_mismatched()) | |
398 | WARN_TAINT(1, TAINT_CPU_OUT_OF_SPEC, | |
399 | "CPU: CPUs started in inconsistent modes"); | |
400 | else | |
401 | pr_info("CPU: All CPU(s) started at EL1\n"); | |
402 | } | |
403 | ||
08e875c1 CM |
404 | void __init smp_cpus_done(unsigned int max_cpus) |
405 | { | |
326b16db | 406 | pr_info("SMP: Total of %d processors activated.\n", num_online_cpus()); |
3a75578e | 407 | setup_cpu_features(); |
377bcff9 JR |
408 | hyp_mode_check(); |
409 | apply_alternatives_all(); | |
5ea5306c | 410 | mark_linear_text_alias_ro(); |
08e875c1 CM |
411 | } |
412 | ||
413 | void __init smp_prepare_boot_cpu(void) | |
414 | { | |
9113c2aa | 415 | set_my_cpu_offset(per_cpu_offset(smp_processor_id())); |
efd9e03f CM |
416 | /* |
417 | * Initialise the static keys early as they may be enabled by the | |
418 | * cpufeature code. | |
419 | */ | |
420 | jump_label_init(); | |
4b998ff1 | 421 | cpuinfo_store_boot_cpu(); |
08e875c1 CM |
422 | } |
423 | ||
0f078336 LP |
424 | static u64 __init of_get_cpu_mpidr(struct device_node *dn) |
425 | { | |
426 | const __be32 *cell; | |
427 | u64 hwid; | |
428 | ||
429 | /* | |
430 | * A cpu node with missing "reg" property is | |
431 | * considered invalid to build a cpu_logical_map | |
432 | * entry. | |
433 | */ | |
434 | cell = of_get_property(dn, "reg", NULL); | |
435 | if (!cell) { | |
a270f327 | 436 | pr_err("%pOF: missing reg property\n", dn); |
0f078336 LP |
437 | return INVALID_HWID; |
438 | } | |
439 | ||
440 | hwid = of_read_number(cell, of_n_addr_cells(dn)); | |
441 | /* | |
442 | * Non affinity bits must be set to 0 in the DT | |
443 | */ | |
444 | if (hwid & ~MPIDR_HWID_BITMASK) { | |
a270f327 | 445 | pr_err("%pOF: invalid reg property\n", dn); |
0f078336 LP |
446 | return INVALID_HWID; |
447 | } | |
448 | return hwid; | |
449 | } | |
450 | ||
451 | /* | |
452 | * Duplicate MPIDRs are a recipe for disaster. Scan all initialized | |
453 | * entries and check for duplicates. If any is found just ignore the | |
454 | * cpu. cpu_logical_map was initialized to INVALID_HWID to avoid | |
455 | * matching valid MPIDR values. | |
456 | */ | |
457 | static bool __init is_mpidr_duplicate(unsigned int cpu, u64 hwid) | |
458 | { | |
459 | unsigned int i; | |
460 | ||
461 | for (i = 1; (i < cpu) && (i < NR_CPUS); i++) | |
462 | if (cpu_logical_map(i) == hwid) | |
463 | return true; | |
464 | return false; | |
465 | } | |
466 | ||
819a8826 LP |
467 | /* |
468 | * Initialize cpu operations for a logical cpu and | |
469 | * set it in the possible mask on success | |
470 | */ | |
471 | static int __init smp_cpu_setup(int cpu) | |
472 | { | |
473 | if (cpu_read_ops(cpu)) | |
474 | return -ENODEV; | |
475 | ||
476 | if (cpu_ops[cpu]->cpu_init(cpu)) | |
477 | return -ENODEV; | |
478 | ||
479 | set_cpu_possible(cpu, true); | |
480 | ||
481 | return 0; | |
482 | } | |
483 | ||
0f078336 LP |
484 | static bool bootcpu_valid __initdata; |
485 | static unsigned int cpu_count = 1; | |
486 | ||
487 | #ifdef CONFIG_ACPI | |
e0013aed MR |
488 | static struct acpi_madt_generic_interrupt cpu_madt_gicc[NR_CPUS]; |
489 | ||
490 | struct acpi_madt_generic_interrupt *acpi_cpu_get_madt_gicc(int cpu) | |
491 | { | |
492 | return &cpu_madt_gicc[cpu]; | |
493 | } | |
494 | ||
0f078336 LP |
495 | /* |
496 | * acpi_map_gic_cpu_interface - parse processor MADT entry | |
497 | * | |
498 | * Carry out sanity checks on MADT processor entry and initialize | |
499 | * cpu_logical_map on success | |
500 | */ | |
501 | static void __init | |
502 | acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor) | |
503 | { | |
504 | u64 hwid = processor->arm_mpidr; | |
505 | ||
f9058929 HG |
506 | if (!(processor->flags & ACPI_MADT_ENABLED)) { |
507 | pr_debug("skipping disabled CPU entry with 0x%llx MPIDR\n", hwid); | |
0f078336 LP |
508 | return; |
509 | } | |
510 | ||
f9058929 HG |
511 | if (hwid & ~MPIDR_HWID_BITMASK || hwid == INVALID_HWID) { |
512 | pr_err("skipping CPU entry with invalid MPIDR 0x%llx\n", hwid); | |
0f078336 LP |
513 | return; |
514 | } | |
515 | ||
516 | if (is_mpidr_duplicate(cpu_count, hwid)) { | |
517 | pr_err("duplicate CPU MPIDR 0x%llx in MADT\n", hwid); | |
518 | return; | |
519 | } | |
520 | ||
521 | /* Check if GICC structure of boot CPU is available in the MADT */ | |
522 | if (cpu_logical_map(0) == hwid) { | |
523 | if (bootcpu_valid) { | |
524 | pr_err("duplicate boot CPU MPIDR: 0x%llx in MADT\n", | |
525 | hwid); | |
526 | return; | |
527 | } | |
528 | bootcpu_valid = true; | |
e0013aed | 529 | cpu_madt_gicc[0] = *processor; |
0f078336 LP |
530 | return; |
531 | } | |
532 | ||
533 | if (cpu_count >= NR_CPUS) | |
534 | return; | |
535 | ||
536 | /* map the logical cpu id to cpu MPIDR */ | |
537 | cpu_logical_map(cpu_count) = hwid; | |
538 | ||
e0013aed MR |
539 | cpu_madt_gicc[cpu_count] = *processor; |
540 | ||
5e89c55e LP |
541 | /* |
542 | * Set-up the ACPI parking protocol cpu entries | |
543 | * while initializing the cpu_logical_map to | |
544 | * avoid parsing MADT entries multiple times for | |
545 | * nothing (ie a valid cpu_logical_map entry should | |
546 | * contain a valid parking protocol data set to | |
547 | * initialize the cpu if the parking protocol is | |
548 | * the only available enable method). | |
549 | */ | |
550 | acpi_set_mailbox_entry(cpu_count, processor); | |
551 | ||
0f078336 LP |
552 | cpu_count++; |
553 | } | |
554 | ||
555 | static int __init | |
556 | acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header, | |
557 | const unsigned long end) | |
558 | { | |
559 | struct acpi_madt_generic_interrupt *processor; | |
560 | ||
561 | processor = (struct acpi_madt_generic_interrupt *)header; | |
99e3e3ae | 562 | if (BAD_MADT_GICC_ENTRY(processor, end)) |
0f078336 LP |
563 | return -EINVAL; |
564 | ||
565 | acpi_table_print_madt_entry(header); | |
566 | ||
567 | acpi_map_gic_cpu_interface(processor); | |
568 | ||
569 | return 0; | |
570 | } | |
e1896249 LP |
571 | |
572 | static void __init acpi_parse_and_init_cpus(void) | |
573 | { | |
574 | int i; | |
575 | ||
576 | /* | |
577 | * do a walk of MADT to determine how many CPUs | |
578 | * we have including disabled CPUs, and get information | |
579 | * we need for SMP init. | |
580 | */ | |
581 | acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, | |
582 | acpi_parse_gic_cpu_interface, 0); | |
583 | ||
584 | /* | |
585 | * In ACPI, SMP and CPU NUMA information is provided in separate | |
586 | * static tables, namely the MADT and the SRAT. | |
587 | * | |
588 | * Thus, it is simpler to first create the cpu logical map through | |
589 | * an MADT walk and then map the logical cpus to their node ids | |
590 | * as separate steps. | |
591 | */ | |
592 | acpi_map_cpus_to_nodes(); | |
593 | ||
594 | for (i = 0; i < nr_cpu_ids; i++) | |
595 | early_map_cpu_to_node(i, acpi_numa_get_nid(i)); | |
596 | } | |
0f078336 | 597 | #else |
e1896249 | 598 | #define acpi_parse_and_init_cpus(...) do { } while (0) |
0f078336 LP |
599 | #endif |
600 | ||
08e875c1 | 601 | /* |
4c7aa002 JM |
602 | * Enumerate the possible CPU set from the device tree and build the |
603 | * cpu logical map array containing MPIDR values related to logical | |
604 | * cpus. Assumes that cpu_logical_map(0) has already been initialized. | |
08e875c1 | 605 | */ |
29b8302b | 606 | static void __init of_parse_and_init_cpus(void) |
08e875c1 | 607 | { |
3d29a9a0 | 608 | struct device_node *dn; |
08e875c1 | 609 | |
de76e70a | 610 | for_each_of_cpu_node(dn) { |
0f078336 | 611 | u64 hwid = of_get_cpu_mpidr(dn); |
4c7aa002 | 612 | |
0f078336 | 613 | if (hwid == INVALID_HWID) |
4c7aa002 | 614 | goto next; |
4c7aa002 | 615 | |
0f078336 | 616 | if (is_mpidr_duplicate(cpu_count, hwid)) { |
a270f327 RH |
617 | pr_err("%pOF: duplicate cpu reg properties in the DT\n", |
618 | dn); | |
4c7aa002 JM |
619 | goto next; |
620 | } | |
621 | ||
4c7aa002 JM |
622 | /* |
623 | * The numbering scheme requires that the boot CPU | |
624 | * must be assigned logical id 0. Record it so that | |
625 | * the logical map built from DT is validated and can | |
626 | * be used. | |
627 | */ | |
628 | if (hwid == cpu_logical_map(0)) { | |
629 | if (bootcpu_valid) { | |
a270f327 RH |
630 | pr_err("%pOF: duplicate boot cpu reg property in DT\n", |
631 | dn); | |
4c7aa002 JM |
632 | goto next; |
633 | } | |
634 | ||
635 | bootcpu_valid = true; | |
7ba5f605 | 636 | early_map_cpu_to_node(0, of_node_to_nid(dn)); |
4c7aa002 JM |
637 | |
638 | /* | |
639 | * cpu_logical_map has already been | |
640 | * initialized and the boot cpu doesn't need | |
641 | * the enable-method so continue without | |
642 | * incrementing cpu. | |
643 | */ | |
644 | continue; | |
645 | } | |
646 | ||
0f078336 | 647 | if (cpu_count >= NR_CPUS) |
08e875c1 CM |
648 | goto next; |
649 | ||
4c7aa002 | 650 | pr_debug("cpu logical map 0x%llx\n", hwid); |
0f078336 | 651 | cpu_logical_map(cpu_count) = hwid; |
1a2db300 GK |
652 | |
653 | early_map_cpu_to_node(cpu_count, of_node_to_nid(dn)); | |
08e875c1 | 654 | next: |
0f078336 | 655 | cpu_count++; |
08e875c1 | 656 | } |
0f078336 LP |
657 | } |
658 | ||
659 | /* | |
660 | * Enumerate the possible CPU set from the device tree or ACPI and build the | |
661 | * cpu logical map array containing MPIDR values related to logical | |
662 | * cpus. Assumes that cpu_logical_map(0) has already been initialized. | |
663 | */ | |
664 | void __init smp_init_cpus(void) | |
665 | { | |
666 | int i; | |
667 | ||
668 | if (acpi_disabled) | |
669 | of_parse_and_init_cpus(); | |
670 | else | |
e1896249 | 671 | acpi_parse_and_init_cpus(); |
08e875c1 | 672 | |
50ee91bd | 673 | if (cpu_count > nr_cpu_ids) |
9b130ad5 | 674 | pr_warn("Number of cores (%d) exceeds configured maximum of %u - clipping\n", |
50ee91bd | 675 | cpu_count, nr_cpu_ids); |
4c7aa002 JM |
676 | |
677 | if (!bootcpu_valid) { | |
0f078336 | 678 | pr_err("missing boot CPU MPIDR, not enabling secondaries\n"); |
4c7aa002 JM |
679 | return; |
680 | } | |
681 | ||
682 | /* | |
819a8826 LP |
683 | * We need to set the cpu_logical_map entries before enabling |
684 | * the cpus so that cpu processor description entries (DT cpu nodes | |
685 | * and ACPI MADT entries) can be retrieved by matching the cpu hwid | |
686 | * with entries in cpu_logical_map while initializing the cpus. | |
687 | * If the cpu set-up fails, invalidate the cpu_logical_map entry. | |
4c7aa002 | 688 | */ |
50ee91bd | 689 | for (i = 1; i < nr_cpu_ids; i++) { |
819a8826 LP |
690 | if (cpu_logical_map(i) != INVALID_HWID) { |
691 | if (smp_cpu_setup(i)) | |
692 | cpu_logical_map(i) = INVALID_HWID; | |
693 | } | |
694 | } | |
08e875c1 CM |
695 | } |
696 | ||
697 | void __init smp_prepare_cpus(unsigned int max_cpus) | |
698 | { | |
cd1aebf5 | 699 | int err; |
44dbcc93 | 700 | unsigned int cpu; |
c18df0ad | 701 | unsigned int this_cpu; |
08e875c1 | 702 | |
f6e763b9 MB |
703 | init_cpu_topology(); |
704 | ||
c18df0ad DD |
705 | this_cpu = smp_processor_id(); |
706 | store_cpu_topology(this_cpu); | |
707 | numa_store_cpu_info(this_cpu); | |
97fd6016 | 708 | numa_add_cpu(this_cpu); |
f6e763b9 | 709 | |
e75118a7 SP |
710 | /* |
711 | * If UP is mandated by "nosmp" (which implies "maxcpus=0"), don't set | |
712 | * secondary CPUs present. | |
713 | */ | |
714 | if (max_cpus == 0) | |
715 | return; | |
716 | ||
08e875c1 CM |
717 | /* |
718 | * Initialise the present map (which describes the set of CPUs | |
719 | * actually populated at the present time) and release the | |
720 | * secondaries from the bootloader. | |
721 | */ | |
722 | for_each_possible_cpu(cpu) { | |
08e875c1 | 723 | |
57c82954 MR |
724 | per_cpu(cpu_number, cpu) = cpu; |
725 | ||
d329de3f MZ |
726 | if (cpu == smp_processor_id()) |
727 | continue; | |
728 | ||
cd1aebf5 | 729 | if (!cpu_ops[cpu]) |
08e875c1 CM |
730 | continue; |
731 | ||
cd1aebf5 | 732 | err = cpu_ops[cpu]->cpu_prepare(cpu); |
d329de3f MZ |
733 | if (err) |
734 | continue; | |
08e875c1 CM |
735 | |
736 | set_cpu_present(cpu, true); | |
c18df0ad | 737 | numa_store_cpu_info(cpu); |
08e875c1 | 738 | } |
08e875c1 CM |
739 | } |
740 | ||
36310736 | 741 | void (*__smp_cross_call)(const struct cpumask *, unsigned int); |
08e875c1 CM |
742 | |
743 | void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int)) | |
744 | { | |
45ed695a | 745 | __smp_cross_call = fn; |
08e875c1 CM |
746 | } |
747 | ||
45ed695a NP |
748 | static const char *ipi_types[NR_IPI] __tracepoint_string = { |
749 | #define S(x,s) [x] = s | |
08e875c1 CM |
750 | S(IPI_RESCHEDULE, "Rescheduling interrupts"), |
751 | S(IPI_CALL_FUNC, "Function call interrupts"), | |
08e875c1 | 752 | S(IPI_CPU_STOP, "CPU stop interrupts"), |
78fd584c | 753 | S(IPI_CPU_CRASH_STOP, "CPU stop (for crash dump) interrupts"), |
1f85008e | 754 | S(IPI_TIMER, "Timer broadcast interrupts"), |
eb631bb5 | 755 | S(IPI_IRQ_WORK, "IRQ work interrupts"), |
5e89c55e | 756 | S(IPI_WAKEUP, "CPU wake-up interrupts"), |
08e875c1 CM |
757 | }; |
758 | ||
45ed695a NP |
759 | static void smp_cross_call(const struct cpumask *target, unsigned int ipinr) |
760 | { | |
761 | trace_ipi_raise(target, ipi_types[ipinr]); | |
762 | __smp_cross_call(target, ipinr); | |
763 | } | |
764 | ||
08e875c1 CM |
765 | void show_ipi_list(struct seq_file *p, int prec) |
766 | { | |
767 | unsigned int cpu, i; | |
768 | ||
769 | for (i = 0; i < NR_IPI; i++) { | |
45ed695a | 770 | seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i, |
08e875c1 | 771 | prec >= 4 ? " " : ""); |
67317c26 | 772 | for_each_online_cpu(cpu) |
08e875c1 CM |
773 | seq_printf(p, "%10u ", |
774 | __get_irq_stat(cpu, ipi_irqs[i])); | |
775 | seq_printf(p, " %s\n", ipi_types[i]); | |
776 | } | |
777 | } | |
778 | ||
779 | u64 smp_irq_stat_cpu(unsigned int cpu) | |
780 | { | |
781 | u64 sum = 0; | |
782 | int i; | |
783 | ||
784 | for (i = 0; i < NR_IPI; i++) | |
785 | sum += __get_irq_stat(cpu, ipi_irqs[i]); | |
786 | ||
787 | return sum; | |
788 | } | |
789 | ||
45ed695a NP |
790 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) |
791 | { | |
792 | smp_cross_call(mask, IPI_CALL_FUNC); | |
793 | } | |
794 | ||
795 | void arch_send_call_function_single_ipi(int cpu) | |
796 | { | |
0aaf0dae | 797 | smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC); |
45ed695a NP |
798 | } |
799 | ||
5e89c55e LP |
800 | #ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL |
801 | void arch_send_wakeup_ipi_mask(const struct cpumask *mask) | |
802 | { | |
803 | smp_cross_call(mask, IPI_WAKEUP); | |
804 | } | |
805 | #endif | |
806 | ||
45ed695a NP |
807 | #ifdef CONFIG_IRQ_WORK |
808 | void arch_irq_work_raise(void) | |
809 | { | |
810 | if (__smp_cross_call) | |
811 | smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK); | |
812 | } | |
813 | #endif | |
814 | ||
08e875c1 CM |
815 | /* |
816 | * ipi_cpu_stop - handle IPI from smp_send_stop() | |
817 | */ | |
818 | static void ipi_cpu_stop(unsigned int cpu) | |
819 | { | |
08e875c1 CM |
820 | set_cpu_online(cpu, false); |
821 | ||
0fbeb318 | 822 | local_daif_mask(); |
f5df2696 | 823 | sdei_mask_local_cpu(); |
08e875c1 CM |
824 | |
825 | while (1) | |
826 | cpu_relax(); | |
827 | } | |
828 | ||
78fd584c AT |
829 | #ifdef CONFIG_KEXEC_CORE |
830 | static atomic_t waiting_for_crash_ipi = ATOMIC_INIT(0); | |
831 | #endif | |
832 | ||
833 | static void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs) | |
834 | { | |
835 | #ifdef CONFIG_KEXEC_CORE | |
836 | crash_save_cpu(regs, cpu); | |
837 | ||
838 | atomic_dec(&waiting_for_crash_ipi); | |
839 | ||
840 | local_irq_disable(); | |
f5df2696 | 841 | sdei_mask_local_cpu(); |
78fd584c AT |
842 | |
843 | #ifdef CONFIG_HOTPLUG_CPU | |
844 | if (cpu_ops[cpu]->cpu_die) | |
845 | cpu_ops[cpu]->cpu_die(cpu); | |
846 | #endif | |
847 | ||
848 | /* just in case */ | |
849 | cpu_park_loop(); | |
850 | #endif | |
851 | } | |
852 | ||
08e875c1 CM |
853 | /* |
854 | * Main handler for inter-processor interrupts | |
855 | */ | |
856 | void handle_IPI(int ipinr, struct pt_regs *regs) | |
857 | { | |
858 | unsigned int cpu = smp_processor_id(); | |
859 | struct pt_regs *old_regs = set_irq_regs(regs); | |
860 | ||
45ed695a | 861 | if ((unsigned)ipinr < NR_IPI) { |
be081d9b | 862 | trace_ipi_entry_rcuidle(ipi_types[ipinr]); |
45ed695a NP |
863 | __inc_irq_stat(cpu, ipi_irqs[ipinr]); |
864 | } | |
08e875c1 CM |
865 | |
866 | switch (ipinr) { | |
867 | case IPI_RESCHEDULE: | |
868 | scheduler_ipi(); | |
869 | break; | |
870 | ||
871 | case IPI_CALL_FUNC: | |
872 | irq_enter(); | |
873 | generic_smp_call_function_interrupt(); | |
874 | irq_exit(); | |
875 | break; | |
876 | ||
08e875c1 CM |
877 | case IPI_CPU_STOP: |
878 | irq_enter(); | |
879 | ipi_cpu_stop(cpu); | |
880 | irq_exit(); | |
881 | break; | |
882 | ||
78fd584c AT |
883 | case IPI_CPU_CRASH_STOP: |
884 | if (IS_ENABLED(CONFIG_KEXEC_CORE)) { | |
885 | irq_enter(); | |
886 | ipi_cpu_crash_stop(cpu, regs); | |
887 | ||
888 | unreachable(); | |
889 | } | |
890 | break; | |
891 | ||
1f85008e LP |
892 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST |
893 | case IPI_TIMER: | |
894 | irq_enter(); | |
895 | tick_receive_broadcast(); | |
896 | irq_exit(); | |
897 | break; | |
898 | #endif | |
899 | ||
eb631bb5 LB |
900 | #ifdef CONFIG_IRQ_WORK |
901 | case IPI_IRQ_WORK: | |
902 | irq_enter(); | |
903 | irq_work_run(); | |
904 | irq_exit(); | |
905 | break; | |
906 | #endif | |
907 | ||
5e89c55e LP |
908 | #ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL |
909 | case IPI_WAKEUP: | |
910 | WARN_ONCE(!acpi_parking_protocol_valid(cpu), | |
911 | "CPU%u: Wake-up IPI outside the ACPI parking protocol\n", | |
912 | cpu); | |
913 | break; | |
914 | #endif | |
915 | ||
08e875c1 CM |
916 | default: |
917 | pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr); | |
918 | break; | |
919 | } | |
45ed695a NP |
920 | |
921 | if ((unsigned)ipinr < NR_IPI) | |
be081d9b | 922 | trace_ipi_exit_rcuidle(ipi_types[ipinr]); |
08e875c1 CM |
923 | set_irq_regs(old_regs); |
924 | } | |
925 | ||
926 | void smp_send_reschedule(int cpu) | |
927 | { | |
928 | smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); | |
929 | } | |
930 | ||
1f85008e LP |
931 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST |
932 | void tick_broadcast(const struct cpumask *mask) | |
933 | { | |
934 | smp_cross_call(mask, IPI_TIMER); | |
935 | } | |
936 | #endif | |
937 | ||
08e875c1 CM |
938 | void smp_send_stop(void) |
939 | { | |
940 | unsigned long timeout; | |
941 | ||
942 | if (num_online_cpus() > 1) { | |
943 | cpumask_t mask; | |
944 | ||
945 | cpumask_copy(&mask, cpu_online_mask); | |
434ed7f4 | 946 | cpumask_clear_cpu(smp_processor_id(), &mask); |
08e875c1 | 947 | |
ef284f5c | 948 | if (system_state <= SYSTEM_RUNNING) |
82611c14 | 949 | pr_crit("SMP: stopping secondary CPUs\n"); |
08e875c1 CM |
950 | smp_cross_call(&mask, IPI_CPU_STOP); |
951 | } | |
952 | ||
953 | /* Wait up to one second for other CPUs to stop */ | |
954 | timeout = USEC_PER_SEC; | |
955 | while (num_online_cpus() > 1 && timeout--) | |
956 | udelay(1); | |
957 | ||
958 | if (num_online_cpus() > 1) | |
82611c14 JG |
959 | pr_warning("SMP: failed to stop secondary CPUs %*pbl\n", |
960 | cpumask_pr_args(cpu_online_mask)); | |
f5df2696 JM |
961 | |
962 | sdei_mask_local_cpu(); | |
08e875c1 CM |
963 | } |
964 | ||
78fd584c | 965 | #ifdef CONFIG_KEXEC_CORE |
a88ce63b | 966 | void crash_smp_send_stop(void) |
78fd584c | 967 | { |
a88ce63b | 968 | static int cpus_stopped; |
78fd584c AT |
969 | cpumask_t mask; |
970 | unsigned long timeout; | |
971 | ||
a88ce63b HR |
972 | /* |
973 | * This function can be called twice in panic path, but obviously | |
974 | * we execute this only once. | |
975 | */ | |
976 | if (cpus_stopped) | |
977 | return; | |
978 | ||
979 | cpus_stopped = 1; | |
980 | ||
f5df2696 JM |
981 | if (num_online_cpus() == 1) { |
982 | sdei_mask_local_cpu(); | |
78fd584c | 983 | return; |
f5df2696 | 984 | } |
78fd584c AT |
985 | |
986 | cpumask_copy(&mask, cpu_online_mask); | |
987 | cpumask_clear_cpu(smp_processor_id(), &mask); | |
988 | ||
989 | atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1); | |
990 | ||
991 | pr_crit("SMP: stopping secondary CPUs\n"); | |
992 | smp_cross_call(&mask, IPI_CPU_CRASH_STOP); | |
993 | ||
994 | /* Wait up to one second for other CPUs to stop */ | |
995 | timeout = USEC_PER_SEC; | |
996 | while ((atomic_read(&waiting_for_crash_ipi) > 0) && timeout--) | |
997 | udelay(1); | |
998 | ||
999 | if (atomic_read(&waiting_for_crash_ipi) > 0) | |
1000 | pr_warning("SMP: failed to stop secondary CPUs %*pbl\n", | |
1001 | cpumask_pr_args(&mask)); | |
f5df2696 JM |
1002 | |
1003 | sdei_mask_local_cpu(); | |
78fd584c AT |
1004 | } |
1005 | ||
1006 | bool smp_crash_stop_failed(void) | |
1007 | { | |
1008 | return (atomic_read(&waiting_for_crash_ipi) > 0); | |
1009 | } | |
1010 | #endif | |
1011 | ||
08e875c1 CM |
1012 | /* |
1013 | * not supported here | |
1014 | */ | |
1015 | int setup_profiling_timer(unsigned int multiplier) | |
1016 | { | |
1017 | return -EINVAL; | |
1018 | } | |
5c492c3f JM |
1019 | |
1020 | static bool have_cpu_die(void) | |
1021 | { | |
1022 | #ifdef CONFIG_HOTPLUG_CPU | |
1023 | int any_cpu = raw_smp_processor_id(); | |
1024 | ||
335d2c2d | 1025 | if (cpu_ops[any_cpu] && cpu_ops[any_cpu]->cpu_die) |
5c492c3f JM |
1026 | return true; |
1027 | #endif | |
1028 | return false; | |
1029 | } | |
1030 | ||
1031 | bool cpus_are_stuck_in_kernel(void) | |
1032 | { | |
1033 | bool smp_spin_tables = (num_possible_cpus() > 1 && !have_cpu_die()); | |
1034 | ||
1035 | return !!cpus_stuck_in_kernel || smp_spin_tables; | |
1036 | } |