]> git.ipfire.org Git - people/arne_f/kernel.git/blob - arch/arm64/kernel/setup.c
ARM64 / ACPI: If we chose to boot from acpi then disable FDT
[people/arne_f/kernel.git] / arch / arm64 / kernel / setup.c
1 /*
2 * Based on arch/arm/kernel/setup.c
3 *
4 * Copyright (C) 1995-2001 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include <linux/acpi.h>
21 #include <linux/export.h>
22 #include <linux/kernel.h>
23 #include <linux/stddef.h>
24 #include <linux/ioport.h>
25 #include <linux/delay.h>
26 #include <linux/utsname.h>
27 #include <linux/initrd.h>
28 #include <linux/console.h>
29 #include <linux/cache.h>
30 #include <linux/bootmem.h>
31 #include <linux/seq_file.h>
32 #include <linux/screen_info.h>
33 #include <linux/init.h>
34 #include <linux/kexec.h>
35 #include <linux/crash_dump.h>
36 #include <linux/root_dev.h>
37 #include <linux/clk-provider.h>
38 #include <linux/cpu.h>
39 #include <linux/interrupt.h>
40 #include <linux/smp.h>
41 #include <linux/fs.h>
42 #include <linux/proc_fs.h>
43 #include <linux/memblock.h>
44 #include <linux/of_iommu.h>
45 #include <linux/of_fdt.h>
46 #include <linux/of_platform.h>
47 #include <linux/efi.h>
48 #include <linux/personality.h>
49
50 #include <asm/acpi.h>
51 #include <asm/fixmap.h>
52 #include <asm/cpu.h>
53 #include <asm/cputype.h>
54 #include <asm/elf.h>
55 #include <asm/cputable.h>
56 #include <asm/cpufeature.h>
57 #include <asm/cpu_ops.h>
58 #include <asm/sections.h>
59 #include <asm/setup.h>
60 #include <asm/smp_plat.h>
61 #include <asm/cacheflush.h>
62 #include <asm/tlbflush.h>
63 #include <asm/traps.h>
64 #include <asm/memblock.h>
65 #include <asm/psci.h>
66 #include <asm/efi.h>
67
68 unsigned int processor_id;
69 EXPORT_SYMBOL(processor_id);
70
71 unsigned long elf_hwcap __read_mostly;
72 EXPORT_SYMBOL_GPL(elf_hwcap);
73
74 #ifdef CONFIG_COMPAT
75 #define COMPAT_ELF_HWCAP_DEFAULT \
76 (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
77 COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
78 COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
79 COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
80 COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\
81 COMPAT_HWCAP_LPAE)
82 unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
83 unsigned int compat_elf_hwcap2 __read_mostly;
84 #endif
85
86 DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
87
88 static const char *cpu_name;
89 phys_addr_t __fdt_pointer __initdata;
90
91 /*
92 * Standard memory resources
93 */
94 static struct resource mem_res[] = {
95 {
96 .name = "Kernel code",
97 .start = 0,
98 .end = 0,
99 .flags = IORESOURCE_MEM
100 },
101 {
102 .name = "Kernel data",
103 .start = 0,
104 .end = 0,
105 .flags = IORESOURCE_MEM
106 }
107 };
108
109 #define kernel_code mem_res[0]
110 #define kernel_data mem_res[1]
111
112 void __init early_print(const char *str, ...)
113 {
114 char buf[256];
115 va_list ap;
116
117 va_start(ap, str);
118 vsnprintf(buf, sizeof(buf), str, ap);
119 va_end(ap);
120
121 printk("%s", buf);
122 }
123
124 void __init smp_setup_processor_id(void)
125 {
126 u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
127 cpu_logical_map(0) = mpidr;
128
129 /*
130 * clear __my_cpu_offset on boot CPU to avoid hang caused by
131 * using percpu variable early, for example, lockdep will
132 * access percpu variable inside lock_release
133 */
134 set_my_cpu_offset(0);
135 pr_info("Booting Linux on physical CPU 0x%lx\n", (unsigned long)mpidr);
136 }
137
138 bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
139 {
140 return phys_id == cpu_logical_map(cpu);
141 }
142
143 struct mpidr_hash mpidr_hash;
144 #ifdef CONFIG_SMP
145 /**
146 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
147 * level in order to build a linear index from an
148 * MPIDR value. Resulting algorithm is a collision
149 * free hash carried out through shifting and ORing
150 */
151 static void __init smp_build_mpidr_hash(void)
152 {
153 u32 i, affinity, fs[4], bits[4], ls;
154 u64 mask = 0;
155 /*
156 * Pre-scan the list of MPIDRS and filter out bits that do
157 * not contribute to affinity levels, ie they never toggle.
158 */
159 for_each_possible_cpu(i)
160 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
161 pr_debug("mask of set bits %#llx\n", mask);
162 /*
163 * Find and stash the last and first bit set at all affinity levels to
164 * check how many bits are required to represent them.
165 */
166 for (i = 0; i < 4; i++) {
167 affinity = MPIDR_AFFINITY_LEVEL(mask, i);
168 /*
169 * Find the MSB bit and LSB bits position
170 * to determine how many bits are required
171 * to express the affinity level.
172 */
173 ls = fls(affinity);
174 fs[i] = affinity ? ffs(affinity) - 1 : 0;
175 bits[i] = ls - fs[i];
176 }
177 /*
178 * An index can be created from the MPIDR_EL1 by isolating the
179 * significant bits at each affinity level and by shifting
180 * them in order to compress the 32 bits values space to a
181 * compressed set of values. This is equivalent to hashing
182 * the MPIDR_EL1 through shifting and ORing. It is a collision free
183 * hash though not minimal since some levels might contain a number
184 * of CPUs that is not an exact power of 2 and their bit
185 * representation might contain holes, eg MPIDR_EL1[7:0] = {0x2, 0x80}.
186 */
187 mpidr_hash.shift_aff[0] = MPIDR_LEVEL_SHIFT(0) + fs[0];
188 mpidr_hash.shift_aff[1] = MPIDR_LEVEL_SHIFT(1) + fs[1] - bits[0];
189 mpidr_hash.shift_aff[2] = MPIDR_LEVEL_SHIFT(2) + fs[2] -
190 (bits[1] + bits[0]);
191 mpidr_hash.shift_aff[3] = MPIDR_LEVEL_SHIFT(3) +
192 fs[3] - (bits[2] + bits[1] + bits[0]);
193 mpidr_hash.mask = mask;
194 mpidr_hash.bits = bits[3] + bits[2] + bits[1] + bits[0];
195 pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] aff3[%u] mask[%#llx] bits[%u]\n",
196 mpidr_hash.shift_aff[0],
197 mpidr_hash.shift_aff[1],
198 mpidr_hash.shift_aff[2],
199 mpidr_hash.shift_aff[3],
200 mpidr_hash.mask,
201 mpidr_hash.bits);
202 /*
203 * 4x is an arbitrary value used to warn on a hash table much bigger
204 * than expected on most systems.
205 */
206 if (mpidr_hash_size() > 4 * num_possible_cpus())
207 pr_warn("Large number of MPIDR hash buckets detected\n");
208 __flush_dcache_area(&mpidr_hash, sizeof(struct mpidr_hash));
209 }
210 #endif
211
212 static void __init setup_processor(void)
213 {
214 struct cpu_info *cpu_info;
215 u64 features, block;
216 u32 cwg;
217 int cls;
218
219 cpu_info = lookup_processor_type(read_cpuid_id());
220 if (!cpu_info) {
221 printk("CPU configuration botched (ID %08x), unable to continue.\n",
222 read_cpuid_id());
223 while (1);
224 }
225
226 cpu_name = cpu_info->cpu_name;
227
228 printk("CPU: %s [%08x] revision %d\n",
229 cpu_name, read_cpuid_id(), read_cpuid_id() & 15);
230
231 sprintf(init_utsname()->machine, ELF_PLATFORM);
232 elf_hwcap = 0;
233
234 cpuinfo_store_boot_cpu();
235
236 /*
237 * Check for sane CTR_EL0.CWG value.
238 */
239 cwg = cache_type_cwg();
240 cls = cache_line_size();
241 if (!cwg)
242 pr_warn("No Cache Writeback Granule information, assuming cache line size %d\n",
243 cls);
244 if (L1_CACHE_BYTES < cls)
245 pr_warn("L1_CACHE_BYTES smaller than the Cache Writeback Granule (%d < %d)\n",
246 L1_CACHE_BYTES, cls);
247
248 /*
249 * ID_AA64ISAR0_EL1 contains 4-bit wide signed feature blocks.
250 * The blocks we test below represent incremental functionality
251 * for non-negative values. Negative values are reserved.
252 */
253 features = read_cpuid(ID_AA64ISAR0_EL1);
254 block = (features >> 4) & 0xf;
255 if (!(block & 0x8)) {
256 switch (block) {
257 default:
258 case 2:
259 elf_hwcap |= HWCAP_PMULL;
260 case 1:
261 elf_hwcap |= HWCAP_AES;
262 case 0:
263 break;
264 }
265 }
266
267 block = (features >> 8) & 0xf;
268 if (block && !(block & 0x8))
269 elf_hwcap |= HWCAP_SHA1;
270
271 block = (features >> 12) & 0xf;
272 if (block && !(block & 0x8))
273 elf_hwcap |= HWCAP_SHA2;
274
275 block = (features >> 16) & 0xf;
276 if (block && !(block & 0x8))
277 elf_hwcap |= HWCAP_CRC32;
278
279 #ifdef CONFIG_COMPAT
280 /*
281 * ID_ISAR5_EL1 carries similar information as above, but pertaining to
282 * the Aarch32 32-bit execution state.
283 */
284 features = read_cpuid(ID_ISAR5_EL1);
285 block = (features >> 4) & 0xf;
286 if (!(block & 0x8)) {
287 switch (block) {
288 default:
289 case 2:
290 compat_elf_hwcap2 |= COMPAT_HWCAP2_PMULL;
291 case 1:
292 compat_elf_hwcap2 |= COMPAT_HWCAP2_AES;
293 case 0:
294 break;
295 }
296 }
297
298 block = (features >> 8) & 0xf;
299 if (block && !(block & 0x8))
300 compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA1;
301
302 block = (features >> 12) & 0xf;
303 if (block && !(block & 0x8))
304 compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA2;
305
306 block = (features >> 16) & 0xf;
307 if (block && !(block & 0x8))
308 compat_elf_hwcap2 |= COMPAT_HWCAP2_CRC32;
309 #endif
310 }
311
312 static void __init setup_machine_fdt(phys_addr_t dt_phys)
313 {
314 if (!dt_phys || !early_init_dt_scan(phys_to_virt(dt_phys))) {
315 early_print("\n"
316 "Error: invalid device tree blob at physical address 0x%p (virtual address 0x%p)\n"
317 "The dtb must be 8-byte aligned and passed in the first 512MB of memory\n"
318 "\nPlease check your bootloader.\n",
319 dt_phys, phys_to_virt(dt_phys));
320
321 while (true)
322 cpu_relax();
323 }
324
325 dump_stack_set_arch_desc("%s (DT)", of_flat_dt_get_machine_name());
326 }
327
328 static void __init request_standard_resources(void)
329 {
330 struct memblock_region *region;
331 struct resource *res;
332
333 kernel_code.start = virt_to_phys(_text);
334 kernel_code.end = virt_to_phys(_etext - 1);
335 kernel_data.start = virt_to_phys(_sdata);
336 kernel_data.end = virt_to_phys(_end - 1);
337
338 for_each_memblock(memory, region) {
339 res = alloc_bootmem_low(sizeof(*res));
340 res->name = "System RAM";
341 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
342 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
343 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
344
345 request_resource(&iomem_resource, res);
346
347 if (kernel_code.start >= res->start &&
348 kernel_code.end <= res->end)
349 request_resource(res, &kernel_code);
350 if (kernel_data.start >= res->start &&
351 kernel_data.end <= res->end)
352 request_resource(res, &kernel_data);
353 }
354 }
355
356 u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
357
358 void __init setup_arch(char **cmdline_p)
359 {
360 setup_processor();
361
362 setup_machine_fdt(__fdt_pointer);
363
364 init_mm.start_code = (unsigned long) _text;
365 init_mm.end_code = (unsigned long) _etext;
366 init_mm.end_data = (unsigned long) _edata;
367 init_mm.brk = (unsigned long) _end;
368
369 *cmdline_p = boot_command_line;
370
371 early_fixmap_init();
372 early_ioremap_init();
373
374 parse_early_param();
375
376 /*
377 * Unmask asynchronous aborts after bringing up possible earlycon.
378 * (Report possible System Errors once we can report this occurred)
379 */
380 local_async_enable();
381
382 efi_init();
383 arm64_memblock_init();
384
385 /* Parse the ACPI tables for possible boot-time configuration */
386 acpi_boot_table_init();
387
388 paging_init();
389 request_standard_resources();
390
391 early_ioremap_reset();
392
393 if (acpi_disabled)
394 unflatten_device_tree();
395
396 psci_init();
397
398 cpu_read_bootcpu_ops();
399 #ifdef CONFIG_SMP
400 smp_init_cpus();
401 smp_build_mpidr_hash();
402 #endif
403
404 #ifdef CONFIG_VT
405 #if defined(CONFIG_VGA_CONSOLE)
406 conswitchp = &vga_con;
407 #elif defined(CONFIG_DUMMY_CONSOLE)
408 conswitchp = &dummy_con;
409 #endif
410 #endif
411 }
412
413 static int __init arm64_device_init(void)
414 {
415 of_iommu_init();
416 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
417 return 0;
418 }
419 arch_initcall_sync(arm64_device_init);
420
421 static int __init topology_init(void)
422 {
423 int i;
424
425 for_each_possible_cpu(i) {
426 struct cpu *cpu = &per_cpu(cpu_data.cpu, i);
427 cpu->hotpluggable = 1;
428 register_cpu(cpu, i);
429 }
430
431 return 0;
432 }
433 subsys_initcall(topology_init);
434
435 static const char *hwcap_str[] = {
436 "fp",
437 "asimd",
438 "evtstrm",
439 "aes",
440 "pmull",
441 "sha1",
442 "sha2",
443 "crc32",
444 NULL
445 };
446
447 #ifdef CONFIG_COMPAT
448 static const char *compat_hwcap_str[] = {
449 "swp",
450 "half",
451 "thumb",
452 "26bit",
453 "fastmult",
454 "fpa",
455 "vfp",
456 "edsp",
457 "java",
458 "iwmmxt",
459 "crunch",
460 "thumbee",
461 "neon",
462 "vfpv3",
463 "vfpv3d16",
464 "tls",
465 "vfpv4",
466 "idiva",
467 "idivt",
468 "vfpd32",
469 "lpae",
470 "evtstrm"
471 };
472
473 static const char *compat_hwcap2_str[] = {
474 "aes",
475 "pmull",
476 "sha1",
477 "sha2",
478 "crc32",
479 NULL
480 };
481 #endif /* CONFIG_COMPAT */
482
483 static int c_show(struct seq_file *m, void *v)
484 {
485 int i, j;
486
487 for_each_online_cpu(i) {
488 struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);
489 u32 midr = cpuinfo->reg_midr;
490
491 /*
492 * glibc reads /proc/cpuinfo to determine the number of
493 * online processors, looking for lines beginning with
494 * "processor". Give glibc what it expects.
495 */
496 #ifdef CONFIG_SMP
497 seq_printf(m, "processor\t: %d\n", i);
498 #endif
499
500 /*
501 * Dump out the common processor features in a single line.
502 * Userspace should read the hwcaps with getauxval(AT_HWCAP)
503 * rather than attempting to parse this, but there's a body of
504 * software which does already (at least for 32-bit).
505 */
506 seq_puts(m, "Features\t:");
507 if (personality(current->personality) == PER_LINUX32) {
508 #ifdef CONFIG_COMPAT
509 for (j = 0; compat_hwcap_str[j]; j++)
510 if (compat_elf_hwcap & (1 << j))
511 seq_printf(m, " %s", compat_hwcap_str[j]);
512
513 for (j = 0; compat_hwcap2_str[j]; j++)
514 if (compat_elf_hwcap2 & (1 << j))
515 seq_printf(m, " %s", compat_hwcap2_str[j]);
516 #endif /* CONFIG_COMPAT */
517 } else {
518 for (j = 0; hwcap_str[j]; j++)
519 if (elf_hwcap & (1 << j))
520 seq_printf(m, " %s", hwcap_str[j]);
521 }
522 seq_puts(m, "\n");
523
524 seq_printf(m, "CPU implementer\t: 0x%02x\n",
525 MIDR_IMPLEMENTOR(midr));
526 seq_printf(m, "CPU architecture: 8\n");
527 seq_printf(m, "CPU variant\t: 0x%x\n", MIDR_VARIANT(midr));
528 seq_printf(m, "CPU part\t: 0x%03x\n", MIDR_PARTNUM(midr));
529 seq_printf(m, "CPU revision\t: %d\n\n", MIDR_REVISION(midr));
530 }
531
532 return 0;
533 }
534
535 static void *c_start(struct seq_file *m, loff_t *pos)
536 {
537 return *pos < 1 ? (void *)1 : NULL;
538 }
539
540 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
541 {
542 ++*pos;
543 return NULL;
544 }
545
546 static void c_stop(struct seq_file *m, void *v)
547 {
548 }
549
550 const struct seq_operations cpuinfo_op = {
551 .start = c_start,
552 .next = c_next,
553 .stop = c_stop,
554 .show = c_show
555 };