]> git.ipfire.org Git - people/ms/linux.git/blob - arch/arm/kernel/setup.c
mm: remove include/linux/bootmem.h
[people/ms/linux.git] / arch / arm / kernel / setup.c
1 /*
2 * linux/arch/arm/kernel/setup.c
3 *
4 * Copyright (C) 1995-2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10 #include <linux/efi.h>
11 #include <linux/export.h>
12 #include <linux/kernel.h>
13 #include <linux/stddef.h>
14 #include <linux/ioport.h>
15 #include <linux/delay.h>
16 #include <linux/utsname.h>
17 #include <linux/initrd.h>
18 #include <linux/console.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/of_platform.h>
22 #include <linux/init.h>
23 #include <linux/kexec.h>
24 #include <linux/of_fdt.h>
25 #include <linux/cpu.h>
26 #include <linux/interrupt.h>
27 #include <linux/smp.h>
28 #include <linux/proc_fs.h>
29 #include <linux/memblock.h>
30 #include <linux/bug.h>
31 #include <linux/compiler.h>
32 #include <linux/sort.h>
33 #include <linux/psci.h>
34
35 #include <asm/unified.h>
36 #include <asm/cp15.h>
37 #include <asm/cpu.h>
38 #include <asm/cputype.h>
39 #include <asm/efi.h>
40 #include <asm/elf.h>
41 #include <asm/early_ioremap.h>
42 #include <asm/fixmap.h>
43 #include <asm/procinfo.h>
44 #include <asm/psci.h>
45 #include <asm/sections.h>
46 #include <asm/setup.h>
47 #include <asm/smp_plat.h>
48 #include <asm/mach-types.h>
49 #include <asm/cacheflush.h>
50 #include <asm/cachetype.h>
51 #include <asm/tlbflush.h>
52 #include <asm/xen/hypervisor.h>
53
54 #include <asm/prom.h>
55 #include <asm/mach/arch.h>
56 #include <asm/mach/irq.h>
57 #include <asm/mach/time.h>
58 #include <asm/system_info.h>
59 #include <asm/system_misc.h>
60 #include <asm/traps.h>
61 #include <asm/unwind.h>
62 #include <asm/memblock.h>
63 #include <asm/virt.h>
64
65 #include "atags.h"
66
67
68 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
69 char fpe_type[8];
70
71 static int __init fpe_setup(char *line)
72 {
73 memcpy(fpe_type, line, 8);
74 return 1;
75 }
76
77 __setup("fpe=", fpe_setup);
78 #endif
79
80 extern void init_default_cache_policy(unsigned long);
81 extern void paging_init(const struct machine_desc *desc);
82 extern void early_mm_init(const struct machine_desc *);
83 extern void adjust_lowmem_bounds(void);
84 extern enum reboot_mode reboot_mode;
85 extern void setup_dma_zone(const struct machine_desc *desc);
86
87 unsigned int processor_id;
88 EXPORT_SYMBOL(processor_id);
89 unsigned int __machine_arch_type __read_mostly;
90 EXPORT_SYMBOL(__machine_arch_type);
91 unsigned int cacheid __read_mostly;
92 EXPORT_SYMBOL(cacheid);
93
94 unsigned int __atags_pointer __initdata;
95
96 unsigned int system_rev;
97 EXPORT_SYMBOL(system_rev);
98
99 const char *system_serial;
100 EXPORT_SYMBOL(system_serial);
101
102 unsigned int system_serial_low;
103 EXPORT_SYMBOL(system_serial_low);
104
105 unsigned int system_serial_high;
106 EXPORT_SYMBOL(system_serial_high);
107
108 unsigned int elf_hwcap __read_mostly;
109 EXPORT_SYMBOL(elf_hwcap);
110
111 unsigned int elf_hwcap2 __read_mostly;
112 EXPORT_SYMBOL(elf_hwcap2);
113
114
115 #ifdef MULTI_CPU
116 struct processor processor __ro_after_init;
117 #endif
118 #ifdef MULTI_TLB
119 struct cpu_tlb_fns cpu_tlb __ro_after_init;
120 #endif
121 #ifdef MULTI_USER
122 struct cpu_user_fns cpu_user __ro_after_init;
123 #endif
124 #ifdef MULTI_CACHE
125 struct cpu_cache_fns cpu_cache __ro_after_init;
126 #endif
127 #ifdef CONFIG_OUTER_CACHE
128 struct outer_cache_fns outer_cache __ro_after_init;
129 EXPORT_SYMBOL(outer_cache);
130 #endif
131
132 /*
133 * Cached cpu_architecture() result for use by assembler code.
134 * C code should use the cpu_architecture() function instead of accessing this
135 * variable directly.
136 */
137 int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
138
139 struct stack {
140 u32 irq[3];
141 u32 abt[3];
142 u32 und[3];
143 u32 fiq[3];
144 } ____cacheline_aligned;
145
146 #ifndef CONFIG_CPU_V7M
147 static struct stack stacks[NR_CPUS];
148 #endif
149
150 char elf_platform[ELF_PLATFORM_SIZE];
151 EXPORT_SYMBOL(elf_platform);
152
153 static const char *cpu_name;
154 static const char *machine_name;
155 static char __initdata cmd_line[COMMAND_LINE_SIZE];
156 const struct machine_desc *machine_desc __initdata;
157
158 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
159 #define ENDIANNESS ((char)endian_test.l)
160
161 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
162
163 /*
164 * Standard memory resources
165 */
166 static struct resource mem_res[] = {
167 {
168 .name = "Video RAM",
169 .start = 0,
170 .end = 0,
171 .flags = IORESOURCE_MEM
172 },
173 {
174 .name = "Kernel code",
175 .start = 0,
176 .end = 0,
177 .flags = IORESOURCE_SYSTEM_RAM
178 },
179 {
180 .name = "Kernel data",
181 .start = 0,
182 .end = 0,
183 .flags = IORESOURCE_SYSTEM_RAM
184 }
185 };
186
187 #define video_ram mem_res[0]
188 #define kernel_code mem_res[1]
189 #define kernel_data mem_res[2]
190
191 static struct resource io_res[] = {
192 {
193 .name = "reserved",
194 .start = 0x3bc,
195 .end = 0x3be,
196 .flags = IORESOURCE_IO | IORESOURCE_BUSY
197 },
198 {
199 .name = "reserved",
200 .start = 0x378,
201 .end = 0x37f,
202 .flags = IORESOURCE_IO | IORESOURCE_BUSY
203 },
204 {
205 .name = "reserved",
206 .start = 0x278,
207 .end = 0x27f,
208 .flags = IORESOURCE_IO | IORESOURCE_BUSY
209 }
210 };
211
212 #define lp0 io_res[0]
213 #define lp1 io_res[1]
214 #define lp2 io_res[2]
215
216 static const char *proc_arch[] = {
217 "undefined/unknown",
218 "3",
219 "4",
220 "4T",
221 "5",
222 "5T",
223 "5TE",
224 "5TEJ",
225 "6TEJ",
226 "7",
227 "7M",
228 "?(12)",
229 "?(13)",
230 "?(14)",
231 "?(15)",
232 "?(16)",
233 "?(17)",
234 };
235
236 #ifdef CONFIG_CPU_V7M
237 static int __get_cpu_architecture(void)
238 {
239 return CPU_ARCH_ARMv7M;
240 }
241 #else
242 static int __get_cpu_architecture(void)
243 {
244 int cpu_arch;
245
246 if ((read_cpuid_id() & 0x0008f000) == 0) {
247 cpu_arch = CPU_ARCH_UNKNOWN;
248 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
249 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
250 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
251 cpu_arch = (read_cpuid_id() >> 16) & 7;
252 if (cpu_arch)
253 cpu_arch += CPU_ARCH_ARMv3;
254 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
255 /* Revised CPUID format. Read the Memory Model Feature
256 * Register 0 and check for VMSAv7 or PMSAv7 */
257 unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
258 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
259 (mmfr0 & 0x000000f0) >= 0x00000030)
260 cpu_arch = CPU_ARCH_ARMv7;
261 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
262 (mmfr0 & 0x000000f0) == 0x00000020)
263 cpu_arch = CPU_ARCH_ARMv6;
264 else
265 cpu_arch = CPU_ARCH_UNKNOWN;
266 } else
267 cpu_arch = CPU_ARCH_UNKNOWN;
268
269 return cpu_arch;
270 }
271 #endif
272
273 int __pure cpu_architecture(void)
274 {
275 BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
276
277 return __cpu_architecture;
278 }
279
280 static int cpu_has_aliasing_icache(unsigned int arch)
281 {
282 int aliasing_icache;
283 unsigned int id_reg, num_sets, line_size;
284
285 /* PIPT caches never alias. */
286 if (icache_is_pipt())
287 return 0;
288
289 /* arch specifies the register format */
290 switch (arch) {
291 case CPU_ARCH_ARMv7:
292 set_csselr(CSSELR_ICACHE | CSSELR_L1);
293 isb();
294 id_reg = read_ccsidr();
295 line_size = 4 << ((id_reg & 0x7) + 2);
296 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
297 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
298 break;
299 case CPU_ARCH_ARMv6:
300 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
301 break;
302 default:
303 /* I-cache aliases will be handled by D-cache aliasing code */
304 aliasing_icache = 0;
305 }
306
307 return aliasing_icache;
308 }
309
310 static void __init cacheid_init(void)
311 {
312 unsigned int arch = cpu_architecture();
313
314 if (arch >= CPU_ARCH_ARMv6) {
315 unsigned int cachetype = read_cpuid_cachetype();
316
317 if ((arch == CPU_ARCH_ARMv7M) && !(cachetype & 0xf000f)) {
318 cacheid = 0;
319 } else if ((cachetype & (7 << 29)) == 4 << 29) {
320 /* ARMv7 register format */
321 arch = CPU_ARCH_ARMv7;
322 cacheid = CACHEID_VIPT_NONALIASING;
323 switch (cachetype & (3 << 14)) {
324 case (1 << 14):
325 cacheid |= CACHEID_ASID_TAGGED;
326 break;
327 case (3 << 14):
328 cacheid |= CACHEID_PIPT;
329 break;
330 }
331 } else {
332 arch = CPU_ARCH_ARMv6;
333 if (cachetype & (1 << 23))
334 cacheid = CACHEID_VIPT_ALIASING;
335 else
336 cacheid = CACHEID_VIPT_NONALIASING;
337 }
338 if (cpu_has_aliasing_icache(arch))
339 cacheid |= CACHEID_VIPT_I_ALIASING;
340 } else {
341 cacheid = CACHEID_VIVT;
342 }
343
344 pr_info("CPU: %s data cache, %s instruction cache\n",
345 cache_is_vivt() ? "VIVT" :
346 cache_is_vipt_aliasing() ? "VIPT aliasing" :
347 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
348 cache_is_vivt() ? "VIVT" :
349 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
350 icache_is_vipt_aliasing() ? "VIPT aliasing" :
351 icache_is_pipt() ? "PIPT" :
352 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
353 }
354
355 /*
356 * These functions re-use the assembly code in head.S, which
357 * already provide the required functionality.
358 */
359 extern struct proc_info_list *lookup_processor_type(unsigned int);
360
361 void __init early_print(const char *str, ...)
362 {
363 extern void printascii(const char *);
364 char buf[256];
365 va_list ap;
366
367 va_start(ap, str);
368 vsnprintf(buf, sizeof(buf), str, ap);
369 va_end(ap);
370
371 #ifdef CONFIG_DEBUG_LL
372 printascii(buf);
373 #endif
374 printk("%s", buf);
375 }
376
377 #ifdef CONFIG_ARM_PATCH_IDIV
378
379 static inline u32 __attribute_const__ sdiv_instruction(void)
380 {
381 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
382 /* "sdiv r0, r0, r1" */
383 u32 insn = __opcode_thumb32_compose(0xfb90, 0xf0f1);
384 return __opcode_to_mem_thumb32(insn);
385 }
386
387 /* "sdiv r0, r0, r1" */
388 return __opcode_to_mem_arm(0xe710f110);
389 }
390
391 static inline u32 __attribute_const__ udiv_instruction(void)
392 {
393 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
394 /* "udiv r0, r0, r1" */
395 u32 insn = __opcode_thumb32_compose(0xfbb0, 0xf0f1);
396 return __opcode_to_mem_thumb32(insn);
397 }
398
399 /* "udiv r0, r0, r1" */
400 return __opcode_to_mem_arm(0xe730f110);
401 }
402
403 static inline u32 __attribute_const__ bx_lr_instruction(void)
404 {
405 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
406 /* "bx lr; nop" */
407 u32 insn = __opcode_thumb32_compose(0x4770, 0x46c0);
408 return __opcode_to_mem_thumb32(insn);
409 }
410
411 /* "bx lr" */
412 return __opcode_to_mem_arm(0xe12fff1e);
413 }
414
415 static void __init patch_aeabi_idiv(void)
416 {
417 extern void __aeabi_uidiv(void);
418 extern void __aeabi_idiv(void);
419 uintptr_t fn_addr;
420 unsigned int mask;
421
422 mask = IS_ENABLED(CONFIG_THUMB2_KERNEL) ? HWCAP_IDIVT : HWCAP_IDIVA;
423 if (!(elf_hwcap & mask))
424 return;
425
426 pr_info("CPU: div instructions available: patching division code\n");
427
428 fn_addr = ((uintptr_t)&__aeabi_uidiv) & ~1;
429 asm ("" : "+g" (fn_addr));
430 ((u32 *)fn_addr)[0] = udiv_instruction();
431 ((u32 *)fn_addr)[1] = bx_lr_instruction();
432 flush_icache_range(fn_addr, fn_addr + 8);
433
434 fn_addr = ((uintptr_t)&__aeabi_idiv) & ~1;
435 asm ("" : "+g" (fn_addr));
436 ((u32 *)fn_addr)[0] = sdiv_instruction();
437 ((u32 *)fn_addr)[1] = bx_lr_instruction();
438 flush_icache_range(fn_addr, fn_addr + 8);
439 }
440
441 #else
442 static inline void patch_aeabi_idiv(void) { }
443 #endif
444
445 static void __init cpuid_init_hwcaps(void)
446 {
447 int block;
448 u32 isar5;
449
450 if (cpu_architecture() < CPU_ARCH_ARMv7)
451 return;
452
453 block = cpuid_feature_extract(CPUID_EXT_ISAR0, 24);
454 if (block >= 2)
455 elf_hwcap |= HWCAP_IDIVA;
456 if (block >= 1)
457 elf_hwcap |= HWCAP_IDIVT;
458
459 /* LPAE implies atomic ldrd/strd instructions */
460 block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
461 if (block >= 5)
462 elf_hwcap |= HWCAP_LPAE;
463
464 /* check for supported v8 Crypto instructions */
465 isar5 = read_cpuid_ext(CPUID_EXT_ISAR5);
466
467 block = cpuid_feature_extract_field(isar5, 4);
468 if (block >= 2)
469 elf_hwcap2 |= HWCAP2_PMULL;
470 if (block >= 1)
471 elf_hwcap2 |= HWCAP2_AES;
472
473 block = cpuid_feature_extract_field(isar5, 8);
474 if (block >= 1)
475 elf_hwcap2 |= HWCAP2_SHA1;
476
477 block = cpuid_feature_extract_field(isar5, 12);
478 if (block >= 1)
479 elf_hwcap2 |= HWCAP2_SHA2;
480
481 block = cpuid_feature_extract_field(isar5, 16);
482 if (block >= 1)
483 elf_hwcap2 |= HWCAP2_CRC32;
484 }
485
486 static void __init elf_hwcap_fixup(void)
487 {
488 unsigned id = read_cpuid_id();
489
490 /*
491 * HWCAP_TLS is available only on 1136 r1p0 and later,
492 * see also kuser_get_tls_init.
493 */
494 if (read_cpuid_part() == ARM_CPU_PART_ARM1136 &&
495 ((id >> 20) & 3) == 0) {
496 elf_hwcap &= ~HWCAP_TLS;
497 return;
498 }
499
500 /* Verify if CPUID scheme is implemented */
501 if ((id & 0x000f0000) != 0x000f0000)
502 return;
503
504 /*
505 * If the CPU supports LDREX/STREX and LDREXB/STREXB,
506 * avoid advertising SWP; it may not be atomic with
507 * multiprocessing cores.
508 */
509 if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 ||
510 (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 &&
511 cpuid_feature_extract(CPUID_EXT_ISAR4, 20) >= 3))
512 elf_hwcap &= ~HWCAP_SWP;
513 }
514
515 /*
516 * cpu_init - initialise one CPU.
517 *
518 * cpu_init sets up the per-CPU stacks.
519 */
520 void notrace cpu_init(void)
521 {
522 #ifndef CONFIG_CPU_V7M
523 unsigned int cpu = smp_processor_id();
524 struct stack *stk = &stacks[cpu];
525
526 if (cpu >= NR_CPUS) {
527 pr_crit("CPU%u: bad primary CPU number\n", cpu);
528 BUG();
529 }
530
531 /*
532 * This only works on resume and secondary cores. For booting on the
533 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
534 */
535 set_my_cpu_offset(per_cpu_offset(cpu));
536
537 cpu_proc_init();
538
539 /*
540 * Define the placement constraint for the inline asm directive below.
541 * In Thumb-2, msr with an immediate value is not allowed.
542 */
543 #ifdef CONFIG_THUMB2_KERNEL
544 #define PLC "r"
545 #else
546 #define PLC "I"
547 #endif
548
549 /*
550 * setup stacks for re-entrant exception handlers
551 */
552 __asm__ (
553 "msr cpsr_c, %1\n\t"
554 "add r14, %0, %2\n\t"
555 "mov sp, r14\n\t"
556 "msr cpsr_c, %3\n\t"
557 "add r14, %0, %4\n\t"
558 "mov sp, r14\n\t"
559 "msr cpsr_c, %5\n\t"
560 "add r14, %0, %6\n\t"
561 "mov sp, r14\n\t"
562 "msr cpsr_c, %7\n\t"
563 "add r14, %0, %8\n\t"
564 "mov sp, r14\n\t"
565 "msr cpsr_c, %9"
566 :
567 : "r" (stk),
568 PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
569 "I" (offsetof(struct stack, irq[0])),
570 PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
571 "I" (offsetof(struct stack, abt[0])),
572 PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
573 "I" (offsetof(struct stack, und[0])),
574 PLC (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
575 "I" (offsetof(struct stack, fiq[0])),
576 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
577 : "r14");
578 #endif
579 }
580
581 u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
582
583 void __init smp_setup_processor_id(void)
584 {
585 int i;
586 u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
587 u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
588
589 cpu_logical_map(0) = cpu;
590 for (i = 1; i < nr_cpu_ids; ++i)
591 cpu_logical_map(i) = i == cpu ? 0 : i;
592
593 /*
594 * clear __my_cpu_offset on boot CPU to avoid hang caused by
595 * using percpu variable early, for example, lockdep will
596 * access percpu variable inside lock_release
597 */
598 set_my_cpu_offset(0);
599
600 pr_info("Booting Linux on physical CPU 0x%x\n", mpidr);
601 }
602
603 struct mpidr_hash mpidr_hash;
604 #ifdef CONFIG_SMP
605 /**
606 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
607 * level in order to build a linear index from an
608 * MPIDR value. Resulting algorithm is a collision
609 * free hash carried out through shifting and ORing
610 */
611 static void __init smp_build_mpidr_hash(void)
612 {
613 u32 i, affinity;
614 u32 fs[3], bits[3], ls, mask = 0;
615 /*
616 * Pre-scan the list of MPIDRS and filter out bits that do
617 * not contribute to affinity levels, ie they never toggle.
618 */
619 for_each_possible_cpu(i)
620 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
621 pr_debug("mask of set bits 0x%x\n", mask);
622 /*
623 * Find and stash the last and first bit set at all affinity levels to
624 * check how many bits are required to represent them.
625 */
626 for (i = 0; i < 3; i++) {
627 affinity = MPIDR_AFFINITY_LEVEL(mask, i);
628 /*
629 * Find the MSB bit and LSB bits position
630 * to determine how many bits are required
631 * to express the affinity level.
632 */
633 ls = fls(affinity);
634 fs[i] = affinity ? ffs(affinity) - 1 : 0;
635 bits[i] = ls - fs[i];
636 }
637 /*
638 * An index can be created from the MPIDR by isolating the
639 * significant bits at each affinity level and by shifting
640 * them in order to compress the 24 bits values space to a
641 * compressed set of values. This is equivalent to hashing
642 * the MPIDR through shifting and ORing. It is a collision free
643 * hash though not minimal since some levels might contain a number
644 * of CPUs that is not an exact power of 2 and their bit
645 * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
646 */
647 mpidr_hash.shift_aff[0] = fs[0];
648 mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
649 mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
650 (bits[1] + bits[0]);
651 mpidr_hash.mask = mask;
652 mpidr_hash.bits = bits[2] + bits[1] + bits[0];
653 pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
654 mpidr_hash.shift_aff[0],
655 mpidr_hash.shift_aff[1],
656 mpidr_hash.shift_aff[2],
657 mpidr_hash.mask,
658 mpidr_hash.bits);
659 /*
660 * 4x is an arbitrary value used to warn on a hash table much bigger
661 * than expected on most systems.
662 */
663 if (mpidr_hash_size() > 4 * num_possible_cpus())
664 pr_warn("Large number of MPIDR hash buckets detected\n");
665 sync_cache_w(&mpidr_hash);
666 }
667 #endif
668
669 static void __init setup_processor(void)
670 {
671 struct proc_info_list *list;
672
673 /*
674 * locate processor in the list of supported processor
675 * types. The linker builds this table for us from the
676 * entries in arch/arm/mm/proc-*.S
677 */
678 list = lookup_processor_type(read_cpuid_id());
679 if (!list) {
680 pr_err("CPU configuration botched (ID %08x), unable to continue.\n",
681 read_cpuid_id());
682 while (1);
683 }
684
685 cpu_name = list->cpu_name;
686 __cpu_architecture = __get_cpu_architecture();
687
688 #ifdef MULTI_CPU
689 processor = *list->proc;
690 #endif
691 #ifdef MULTI_TLB
692 cpu_tlb = *list->tlb;
693 #endif
694 #ifdef MULTI_USER
695 cpu_user = *list->user;
696 #endif
697 #ifdef MULTI_CACHE
698 cpu_cache = *list->cache;
699 #endif
700
701 pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
702 cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
703 proc_arch[cpu_architecture()], get_cr());
704
705 snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
706 list->arch_name, ENDIANNESS);
707 snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
708 list->elf_name, ENDIANNESS);
709 elf_hwcap = list->elf_hwcap;
710
711 cpuid_init_hwcaps();
712 patch_aeabi_idiv();
713
714 #ifndef CONFIG_ARM_THUMB
715 elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
716 #endif
717 #ifdef CONFIG_MMU
718 init_default_cache_policy(list->__cpu_mm_mmu_flags);
719 #endif
720 erratum_a15_798181_init();
721
722 elf_hwcap_fixup();
723
724 cacheid_init();
725 cpu_init();
726 }
727
728 void __init dump_machine_table(void)
729 {
730 const struct machine_desc *p;
731
732 early_print("Available machine support:\n\nID (hex)\tNAME\n");
733 for_each_machine_desc(p)
734 early_print("%08x\t%s\n", p->nr, p->name);
735
736 early_print("\nPlease check your kernel config and/or bootloader.\n");
737
738 while (true)
739 /* can't use cpu_relax() here as it may require MMU setup */;
740 }
741
742 int __init arm_add_memory(u64 start, u64 size)
743 {
744 u64 aligned_start;
745
746 /*
747 * Ensure that start/size are aligned to a page boundary.
748 * Size is rounded down, start is rounded up.
749 */
750 aligned_start = PAGE_ALIGN(start);
751 if (aligned_start > start + size)
752 size = 0;
753 else
754 size -= aligned_start - start;
755
756 #ifndef CONFIG_PHYS_ADDR_T_64BIT
757 if (aligned_start > ULONG_MAX) {
758 pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n",
759 (long long)start);
760 return -EINVAL;
761 }
762
763 if (aligned_start + size > ULONG_MAX) {
764 pr_crit("Truncating memory at 0x%08llx to fit in 32-bit physical address space\n",
765 (long long)start);
766 /*
767 * To ensure bank->start + bank->size is representable in
768 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
769 * This means we lose a page after masking.
770 */
771 size = ULONG_MAX - aligned_start;
772 }
773 #endif
774
775 if (aligned_start < PHYS_OFFSET) {
776 if (aligned_start + size <= PHYS_OFFSET) {
777 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
778 aligned_start, aligned_start + size);
779 return -EINVAL;
780 }
781
782 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
783 aligned_start, (u64)PHYS_OFFSET);
784
785 size -= PHYS_OFFSET - aligned_start;
786 aligned_start = PHYS_OFFSET;
787 }
788
789 start = aligned_start;
790 size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
791
792 /*
793 * Check whether this memory region has non-zero size or
794 * invalid node number.
795 */
796 if (size == 0)
797 return -EINVAL;
798
799 memblock_add(start, size);
800 return 0;
801 }
802
803 /*
804 * Pick out the memory size. We look for mem=size@start,
805 * where start and size are "size[KkMm]"
806 */
807
808 static int __init early_mem(char *p)
809 {
810 static int usermem __initdata = 0;
811 u64 size;
812 u64 start;
813 char *endp;
814
815 /*
816 * If the user specifies memory size, we
817 * blow away any automatically generated
818 * size.
819 */
820 if (usermem == 0) {
821 usermem = 1;
822 memblock_remove(memblock_start_of_DRAM(),
823 memblock_end_of_DRAM() - memblock_start_of_DRAM());
824 }
825
826 start = PHYS_OFFSET;
827 size = memparse(p, &endp);
828 if (*endp == '@')
829 start = memparse(endp + 1, NULL);
830
831 arm_add_memory(start, size);
832
833 return 0;
834 }
835 early_param("mem", early_mem);
836
837 static void __init request_standard_resources(const struct machine_desc *mdesc)
838 {
839 struct memblock_region *region;
840 struct resource *res;
841
842 kernel_code.start = virt_to_phys(_text);
843 kernel_code.end = virt_to_phys(__init_begin - 1);
844 kernel_data.start = virt_to_phys(_sdata);
845 kernel_data.end = virt_to_phys(_end - 1);
846
847 for_each_memblock(memory, region) {
848 phys_addr_t start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
849 phys_addr_t end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
850 unsigned long boot_alias_start;
851
852 /*
853 * Some systems have a special memory alias which is only
854 * used for booting. We need to advertise this region to
855 * kexec-tools so they know where bootable RAM is located.
856 */
857 boot_alias_start = phys_to_idmap(start);
858 if (arm_has_idmap_alias() && boot_alias_start != IDMAP_INVALID_ADDR) {
859 res = memblock_alloc(sizeof(*res), 0);
860 res->name = "System RAM (boot alias)";
861 res->start = boot_alias_start;
862 res->end = phys_to_idmap(end);
863 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
864 request_resource(&iomem_resource, res);
865 }
866
867 res = memblock_alloc(sizeof(*res), 0);
868 res->name = "System RAM";
869 res->start = start;
870 res->end = end;
871 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
872
873 request_resource(&iomem_resource, res);
874
875 if (kernel_code.start >= res->start &&
876 kernel_code.end <= res->end)
877 request_resource(res, &kernel_code);
878 if (kernel_data.start >= res->start &&
879 kernel_data.end <= res->end)
880 request_resource(res, &kernel_data);
881 }
882
883 if (mdesc->video_start) {
884 video_ram.start = mdesc->video_start;
885 video_ram.end = mdesc->video_end;
886 request_resource(&iomem_resource, &video_ram);
887 }
888
889 /*
890 * Some machines don't have the possibility of ever
891 * possessing lp0, lp1 or lp2
892 */
893 if (mdesc->reserve_lp0)
894 request_resource(&ioport_resource, &lp0);
895 if (mdesc->reserve_lp1)
896 request_resource(&ioport_resource, &lp1);
897 if (mdesc->reserve_lp2)
898 request_resource(&ioport_resource, &lp2);
899 }
900
901 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) || \
902 defined(CONFIG_EFI)
903 struct screen_info screen_info = {
904 .orig_video_lines = 30,
905 .orig_video_cols = 80,
906 .orig_video_mode = 0,
907 .orig_video_ega_bx = 0,
908 .orig_video_isVGA = 1,
909 .orig_video_points = 8
910 };
911 #endif
912
913 static int __init customize_machine(void)
914 {
915 /*
916 * customizes platform devices, or adds new ones
917 * On DT based machines, we fall back to populating the
918 * machine from the device tree, if no callback is provided,
919 * otherwise we would always need an init_machine callback.
920 */
921 if (machine_desc->init_machine)
922 machine_desc->init_machine();
923
924 return 0;
925 }
926 arch_initcall(customize_machine);
927
928 static int __init init_machine_late(void)
929 {
930 struct device_node *root;
931 int ret;
932
933 if (machine_desc->init_late)
934 machine_desc->init_late();
935
936 root = of_find_node_by_path("/");
937 if (root) {
938 ret = of_property_read_string(root, "serial-number",
939 &system_serial);
940 if (ret)
941 system_serial = NULL;
942 }
943
944 if (!system_serial)
945 system_serial = kasprintf(GFP_KERNEL, "%08x%08x",
946 system_serial_high,
947 system_serial_low);
948
949 return 0;
950 }
951 late_initcall(init_machine_late);
952
953 #ifdef CONFIG_KEXEC
954 /*
955 * The crash region must be aligned to 128MB to avoid
956 * zImage relocating below the reserved region.
957 */
958 #define CRASH_ALIGN (128 << 20)
959
960 static inline unsigned long long get_total_mem(void)
961 {
962 unsigned long total;
963
964 total = max_low_pfn - min_low_pfn;
965 return total << PAGE_SHIFT;
966 }
967
968 /**
969 * reserve_crashkernel() - reserves memory are for crash kernel
970 *
971 * This function reserves memory area given in "crashkernel=" kernel command
972 * line parameter. The memory reserved is used by a dump capture kernel when
973 * primary kernel is crashing.
974 */
975 static void __init reserve_crashkernel(void)
976 {
977 unsigned long long crash_size, crash_base;
978 unsigned long long total_mem;
979 int ret;
980
981 total_mem = get_total_mem();
982 ret = parse_crashkernel(boot_command_line, total_mem,
983 &crash_size, &crash_base);
984 if (ret)
985 return;
986
987 if (crash_base <= 0) {
988 unsigned long long crash_max = idmap_to_phys((u32)~0);
989 unsigned long long lowmem_max = __pa(high_memory - 1) + 1;
990 if (crash_max > lowmem_max)
991 crash_max = lowmem_max;
992 crash_base = memblock_find_in_range(CRASH_ALIGN, crash_max,
993 crash_size, CRASH_ALIGN);
994 if (!crash_base) {
995 pr_err("crashkernel reservation failed - No suitable area found.\n");
996 return;
997 }
998 } else {
999 unsigned long long start;
1000
1001 start = memblock_find_in_range(crash_base,
1002 crash_base + crash_size,
1003 crash_size, SECTION_SIZE);
1004 if (start != crash_base) {
1005 pr_err("crashkernel reservation failed - memory is in use.\n");
1006 return;
1007 }
1008 }
1009
1010 ret = memblock_reserve(crash_base, crash_size);
1011 if (ret < 0) {
1012 pr_warn("crashkernel reservation failed - memory is in use (0x%lx)\n",
1013 (unsigned long)crash_base);
1014 return;
1015 }
1016
1017 pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
1018 (unsigned long)(crash_size >> 20),
1019 (unsigned long)(crash_base >> 20),
1020 (unsigned long)(total_mem >> 20));
1021
1022 /* The crashk resource must always be located in normal mem */
1023 crashk_res.start = crash_base;
1024 crashk_res.end = crash_base + crash_size - 1;
1025 insert_resource(&iomem_resource, &crashk_res);
1026
1027 if (arm_has_idmap_alias()) {
1028 /*
1029 * If we have a special RAM alias for use at boot, we
1030 * need to advertise to kexec tools where the alias is.
1031 */
1032 static struct resource crashk_boot_res = {
1033 .name = "Crash kernel (boot alias)",
1034 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
1035 };
1036
1037 crashk_boot_res.start = phys_to_idmap(crash_base);
1038 crashk_boot_res.end = crashk_boot_res.start + crash_size - 1;
1039 insert_resource(&iomem_resource, &crashk_boot_res);
1040 }
1041 }
1042 #else
1043 static inline void reserve_crashkernel(void) {}
1044 #endif /* CONFIG_KEXEC */
1045
1046 void __init hyp_mode_check(void)
1047 {
1048 #ifdef CONFIG_ARM_VIRT_EXT
1049 sync_boot_mode();
1050
1051 if (is_hyp_mode_available()) {
1052 pr_info("CPU: All CPU(s) started in HYP mode.\n");
1053 pr_info("CPU: Virtualization extensions available.\n");
1054 } else if (is_hyp_mode_mismatched()) {
1055 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
1056 __boot_cpu_mode & MODE_MASK);
1057 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
1058 } else
1059 pr_info("CPU: All CPU(s) started in SVC mode.\n");
1060 #endif
1061 }
1062
1063 void __init setup_arch(char **cmdline_p)
1064 {
1065 const struct machine_desc *mdesc;
1066
1067 setup_processor();
1068 mdesc = setup_machine_fdt(__atags_pointer);
1069 if (!mdesc)
1070 mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
1071 if (!mdesc) {
1072 early_print("\nError: invalid dtb and unrecognized/unsupported machine ID\n");
1073 early_print(" r1=0x%08x, r2=0x%08x\n", __machine_arch_type,
1074 __atags_pointer);
1075 if (__atags_pointer)
1076 early_print(" r2[]=%*ph\n", 16,
1077 phys_to_virt(__atags_pointer));
1078 dump_machine_table();
1079 }
1080
1081 machine_desc = mdesc;
1082 machine_name = mdesc->name;
1083 dump_stack_set_arch_desc("%s", mdesc->name);
1084
1085 if (mdesc->reboot_mode != REBOOT_HARD)
1086 reboot_mode = mdesc->reboot_mode;
1087
1088 init_mm.start_code = (unsigned long) _text;
1089 init_mm.end_code = (unsigned long) _etext;
1090 init_mm.end_data = (unsigned long) _edata;
1091 init_mm.brk = (unsigned long) _end;
1092
1093 /* populate cmd_line too for later use, preserving boot_command_line */
1094 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
1095 *cmdline_p = cmd_line;
1096
1097 early_fixmap_init();
1098 early_ioremap_init();
1099
1100 parse_early_param();
1101
1102 #ifdef CONFIG_MMU
1103 early_mm_init(mdesc);
1104 #endif
1105 setup_dma_zone(mdesc);
1106 xen_early_init();
1107 efi_init();
1108 /*
1109 * Make sure the calculation for lowmem/highmem is set appropriately
1110 * before reserving/allocating any mmeory
1111 */
1112 adjust_lowmem_bounds();
1113 arm_memblock_init(mdesc);
1114 /* Memory may have been removed so recalculate the bounds. */
1115 adjust_lowmem_bounds();
1116
1117 early_ioremap_reset();
1118
1119 paging_init(mdesc);
1120 request_standard_resources(mdesc);
1121
1122 if (mdesc->restart)
1123 arm_pm_restart = mdesc->restart;
1124
1125 unflatten_device_tree();
1126
1127 arm_dt_init_cpu_maps();
1128 psci_dt_init();
1129 #ifdef CONFIG_SMP
1130 if (is_smp()) {
1131 if (!mdesc->smp_init || !mdesc->smp_init()) {
1132 if (psci_smp_available())
1133 smp_set_ops(&psci_smp_ops);
1134 else if (mdesc->smp)
1135 smp_set_ops(mdesc->smp);
1136 }
1137 smp_init_cpus();
1138 smp_build_mpidr_hash();
1139 }
1140 #endif
1141
1142 if (!is_smp())
1143 hyp_mode_check();
1144
1145 reserve_crashkernel();
1146
1147 #ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
1148 handle_arch_irq = mdesc->handle_irq;
1149 #endif
1150
1151 #ifdef CONFIG_VT
1152 #if defined(CONFIG_VGA_CONSOLE)
1153 conswitchp = &vga_con;
1154 #elif defined(CONFIG_DUMMY_CONSOLE)
1155 conswitchp = &dummy_con;
1156 #endif
1157 #endif
1158
1159 if (mdesc->init_early)
1160 mdesc->init_early();
1161 }
1162
1163
1164 static int __init topology_init(void)
1165 {
1166 int cpu;
1167
1168 for_each_possible_cpu(cpu) {
1169 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
1170 cpuinfo->cpu.hotpluggable = platform_can_hotplug_cpu(cpu);
1171 register_cpu(&cpuinfo->cpu, cpu);
1172 }
1173
1174 return 0;
1175 }
1176 subsys_initcall(topology_init);
1177
1178 #ifdef CONFIG_HAVE_PROC_CPU
1179 static int __init proc_cpu_init(void)
1180 {
1181 struct proc_dir_entry *res;
1182
1183 res = proc_mkdir("cpu", NULL);
1184 if (!res)
1185 return -ENOMEM;
1186 return 0;
1187 }
1188 fs_initcall(proc_cpu_init);
1189 #endif
1190
1191 static const char *hwcap_str[] = {
1192 "swp",
1193 "half",
1194 "thumb",
1195 "26bit",
1196 "fastmult",
1197 "fpa",
1198 "vfp",
1199 "edsp",
1200 "java",
1201 "iwmmxt",
1202 "crunch",
1203 "thumbee",
1204 "neon",
1205 "vfpv3",
1206 "vfpv3d16",
1207 "tls",
1208 "vfpv4",
1209 "idiva",
1210 "idivt",
1211 "vfpd32",
1212 "lpae",
1213 "evtstrm",
1214 NULL
1215 };
1216
1217 static const char *hwcap2_str[] = {
1218 "aes",
1219 "pmull",
1220 "sha1",
1221 "sha2",
1222 "crc32",
1223 NULL
1224 };
1225
1226 static int c_show(struct seq_file *m, void *v)
1227 {
1228 int i, j;
1229 u32 cpuid;
1230
1231 for_each_online_cpu(i) {
1232 /*
1233 * glibc reads /proc/cpuinfo to determine the number of
1234 * online processors, looking for lines beginning with
1235 * "processor". Give glibc what it expects.
1236 */
1237 seq_printf(m, "processor\t: %d\n", i);
1238 cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
1239 seq_printf(m, "model name\t: %s rev %d (%s)\n",
1240 cpu_name, cpuid & 15, elf_platform);
1241
1242 #if defined(CONFIG_SMP)
1243 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1244 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1245 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1246 #else
1247 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1248 loops_per_jiffy / (500000/HZ),
1249 (loops_per_jiffy / (5000/HZ)) % 100);
1250 #endif
1251 /* dump out the processor features */
1252 seq_puts(m, "Features\t: ");
1253
1254 for (j = 0; hwcap_str[j]; j++)
1255 if (elf_hwcap & (1 << j))
1256 seq_printf(m, "%s ", hwcap_str[j]);
1257
1258 for (j = 0; hwcap2_str[j]; j++)
1259 if (elf_hwcap2 & (1 << j))
1260 seq_printf(m, "%s ", hwcap2_str[j]);
1261
1262 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
1263 seq_printf(m, "CPU architecture: %s\n",
1264 proc_arch[cpu_architecture()]);
1265
1266 if ((cpuid & 0x0008f000) == 0x00000000) {
1267 /* pre-ARM7 */
1268 seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
1269 } else {
1270 if ((cpuid & 0x0008f000) == 0x00007000) {
1271 /* ARM7 */
1272 seq_printf(m, "CPU variant\t: 0x%02x\n",
1273 (cpuid >> 16) & 127);
1274 } else {
1275 /* post-ARM7 */
1276 seq_printf(m, "CPU variant\t: 0x%x\n",
1277 (cpuid >> 20) & 15);
1278 }
1279 seq_printf(m, "CPU part\t: 0x%03x\n",
1280 (cpuid >> 4) & 0xfff);
1281 }
1282 seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
1283 }
1284
1285 seq_printf(m, "Hardware\t: %s\n", machine_name);
1286 seq_printf(m, "Revision\t: %04x\n", system_rev);
1287 seq_printf(m, "Serial\t\t: %s\n", system_serial);
1288
1289 return 0;
1290 }
1291
1292 static void *c_start(struct seq_file *m, loff_t *pos)
1293 {
1294 return *pos < 1 ? (void *)1 : NULL;
1295 }
1296
1297 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1298 {
1299 ++*pos;
1300 return NULL;
1301 }
1302
1303 static void c_stop(struct seq_file *m, void *v)
1304 {
1305 }
1306
1307 const struct seq_operations cpuinfo_op = {
1308 .start = c_start,
1309 .next = c_next,
1310 .stop = c_stop,
1311 .show = c_show
1312 };