1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * Common boot and setup code.
6 * Copyright (C) 2001 PPC64 Team, IBM Corp
9 #include <linux/export.h>
10 #include <linux/string.h>
11 #include <linux/sched.h>
12 #include <linux/init.h>
13 #include <linux/kernel.h>
14 #include <linux/reboot.h>
15 #include <linux/delay.h>
16 #include <linux/initrd.h>
17 #include <linux/seq_file.h>
18 #include <linux/ioport.h>
19 #include <linux/console.h>
20 #include <linux/utsname.h>
21 #include <linux/tty.h>
22 #include <linux/root_dev.h>
23 #include <linux/notifier.h>
24 #include <linux/cpu.h>
25 #include <linux/unistd.h>
26 #include <linux/serial.h>
27 #include <linux/serial_8250.h>
28 #include <linux/memblock.h>
29 #include <linux/pci.h>
30 #include <linux/lockdep.h>
31 #include <linux/memory.h>
32 #include <linux/nmi.h>
34 #include <asm/debugfs.h>
36 #include <asm/kdump.h>
38 #include <asm/processor.h>
39 #include <asm/pgtable.h>
42 #include <asm/machdep.h>
45 #include <asm/cputable.h>
46 #include <asm/dt_cpu_ftrs.h>
47 #include <asm/sections.h>
48 #include <asm/btext.h>
49 #include <asm/nvram.h>
50 #include <asm/setup.h>
52 #include <asm/iommu.h>
53 #include <asm/serial.h>
54 #include <asm/cache.h>
57 #include <asm/firmware.h>
60 #include <asm/kexec.h>
61 #include <asm/code-patching.h>
62 #include <asm/livepatch.h>
64 #include <asm/cputhreads.h>
65 #include <asm/hw_irq.h>
66 #include <asm/feature-fixups.h>
68 #include <asm/early_ioremap.h>
72 int spinning_secondaries
;
75 struct ppc64_caches ppc64_caches
= {
85 EXPORT_SYMBOL_GPL(ppc64_caches
);
87 #if defined(CONFIG_PPC_BOOK3E) && defined(CONFIG_SMP)
88 void __init
setup_tlb_core_data(void)
92 BUILD_BUG_ON(offsetof(struct tlb_core_data
, lock
) != 0);
94 for_each_possible_cpu(cpu
) {
95 int first
= cpu_first_thread_sibling(cpu
);
98 * If we boot via kdump on a non-primary thread,
99 * make sure we point at the thread that actually
102 if (cpu_first_thread_sibling(boot_cpuid
) == first
)
105 paca_ptrs
[cpu
]->tcd_ptr
= &paca_ptrs
[first
]->tcd
;
108 * If we have threads, we need either tlbsrx.
109 * or e6500 tablewalk mode, or else TLB handlers
110 * will be racy and could produce duplicate entries.
111 * Should we panic instead?
113 WARN_ONCE(smt_enabled_at_boot
>= 2 &&
114 !mmu_has_feature(MMU_FTR_USE_TLBRSRV
) &&
115 book3e_htw_mode
!= PPC_HTW_E6500
,
116 "%s: unsupported MMU configuration\n", __func__
);
123 static char *smt_enabled_cmdline
;
125 /* Look for ibm,smt-enabled OF option */
126 void __init
check_smt_enabled(void)
128 struct device_node
*dn
;
129 const char *smt_option
;
131 /* Default to enabling all threads */
132 smt_enabled_at_boot
= threads_per_core
;
134 /* Allow the command line to overrule the OF option */
135 if (smt_enabled_cmdline
) {
136 if (!strcmp(smt_enabled_cmdline
, "on"))
137 smt_enabled_at_boot
= threads_per_core
;
138 else if (!strcmp(smt_enabled_cmdline
, "off"))
139 smt_enabled_at_boot
= 0;
144 rc
= kstrtoint(smt_enabled_cmdline
, 10, &smt
);
146 smt_enabled_at_boot
=
147 min(threads_per_core
, smt
);
150 dn
= of_find_node_by_path("/options");
152 smt_option
= of_get_property(dn
, "ibm,smt-enabled",
156 if (!strcmp(smt_option
, "on"))
157 smt_enabled_at_boot
= threads_per_core
;
158 else if (!strcmp(smt_option
, "off"))
159 smt_enabled_at_boot
= 0;
167 /* Look for smt-enabled= cmdline option */
168 static int __init
early_smt_enabled(char *p
)
170 smt_enabled_cmdline
= p
;
173 early_param("smt-enabled", early_smt_enabled
);
175 #endif /* CONFIG_SMP */
177 /** Fix up paca fields required for the boot cpu */
178 static void __init
fixup_boot_paca(void)
180 /* The boot cpu is started */
181 get_paca()->cpu_start
= 1;
182 /* Allow percpu accesses to work until we setup percpu data */
183 get_paca()->data_offset
= 0;
184 /* Mark interrupts disabled in PACA */
185 irq_soft_mask_set(IRQS_DISABLED
);
188 static void __init
configure_exceptions(void)
191 * Setup the trampolines from the lowmem exception vectors
192 * to the kdump kernel when not using a relocatable kernel.
194 setup_kdump_trampoline();
196 /* Under a PAPR hypervisor, we need hypercalls */
197 if (firmware_has_feature(FW_FEATURE_SET_MODE
)) {
198 /* Enable AIL if possible */
199 pseries_enable_reloc_on_exc();
202 * Tell the hypervisor that we want our exceptions to
203 * be taken in little endian mode.
205 * We don't call this for big endian as our calling convention
206 * makes us always enter in BE, and the call may fail under
207 * some circumstances with kdump.
209 #ifdef __LITTLE_ENDIAN__
210 pseries_little_endian_exceptions();
213 /* Set endian mode using OPAL */
214 if (firmware_has_feature(FW_FEATURE_OPAL
))
215 opal_configure_cores();
217 /* AIL on native is done in cpu_ready_for_interrupts() */
221 static void cpu_ready_for_interrupts(void)
224 * Enable AIL if supported, and we are in hypervisor mode. This
225 * is called once for every processor.
227 * If we are not in hypervisor mode the job is done once for
228 * the whole partition in configure_exceptions().
230 if (cpu_has_feature(CPU_FTR_HVMODE
) &&
231 cpu_has_feature(CPU_FTR_ARCH_207S
)) {
232 unsigned long lpcr
= mfspr(SPRN_LPCR
);
233 mtspr(SPRN_LPCR
, lpcr
| LPCR_AIL_3
);
237 * Set HFSCR:TM based on CPU features:
238 * In the special case of TM no suspend (P9N DD2.1), Linux is
239 * told TM is off via the dt-ftrs but told to (partially) use
240 * it via OPAL_REINIT_CPUS_TM_SUSPEND_DISABLED. So HFSCR[TM]
241 * will be off from dt-ftrs but we need to turn it on for the
244 if (cpu_has_feature(CPU_FTR_HVMODE
)) {
245 if (cpu_has_feature(CPU_FTR_TM_COMP
))
246 mtspr(SPRN_HFSCR
, mfspr(SPRN_HFSCR
) | HFSCR_TM
);
248 mtspr(SPRN_HFSCR
, mfspr(SPRN_HFSCR
) & ~HFSCR_TM
);
251 /* Set IR and DR in PACA MSR */
252 get_paca()->kernel_msr
= MSR_KERNEL
;
255 unsigned long spr_default_dscr
= 0;
257 void __init
record_spr_defaults(void)
259 if (early_cpu_has_feature(CPU_FTR_DSCR
))
260 spr_default_dscr
= mfspr(SPRN_DSCR
);
264 * Early initialization entry point. This is called by head.S
265 * with MMU translation disabled. We rely on the "feature" of
266 * the CPU that ignores the top 2 bits of the address in real
267 * mode so we can access kernel globals normally provided we
268 * only toy with things in the RMO region. From here, we do
269 * some early parsing of the device-tree to setup out MEMBLOCK
270 * data structures, and allocate & initialize the hash table
271 * and segment tables so we can start running with translation
274 * It is this function which will call the probe() callback of
275 * the various platform types and copy the matching one to the
276 * global ppc_md structure. Your platform can eventually do
277 * some very early initializations from the probe() routine, but
278 * this is not recommended, be very careful as, for example, the
279 * device-tree is not accessible via normal means at this point.
282 void __init __nostackprotector
early_setup(unsigned long dt_ptr
)
284 static __initdata
struct paca_struct boot_paca
;
286 /* -------- printk is _NOT_ safe to use here ! ------- */
289 * Assume we're on cpu 0 for now.
291 * We need to load a PACA very early for a few reasons.
293 * The stack protector canary is stored in the paca, so as soon as we
294 * call any stack protected code we need r13 pointing somewhere valid.
296 * If we are using kcov it will call in_task() in its instrumentation,
297 * which relies on the current task from the PACA.
299 * dt_cpu_ftrs_init() calls into generic OF/fdt code, as well as
300 * printk(), which can trigger both stack protector and kcov.
302 * percpu variables and spin locks also use the paca.
304 * So set up a temporary paca. It will be replaced below once we know
305 * what CPU we are on.
307 initialise_paca(&boot_paca
, 0);
308 setup_paca(&boot_paca
);
311 /* -------- printk is now safe to use ------- */
313 /* Try new device tree based feature discovery ... */
314 if (!dt_cpu_ftrs_init(__va(dt_ptr
)))
315 /* Otherwise use the old style CPU table */
316 identify_cpu(0, mfspr(SPRN_PVR
));
318 /* Enable early debugging if any specified (see udbg.h) */
321 udbg_printf(" -> %s(), dt_ptr: 0x%lx\n", __func__
, dt_ptr
);
324 * Do early initialization using the flattened device
325 * tree, such as retrieving the physical memory map or
326 * calculating/retrieving the hash table size.
328 early_init_devtree(__va(dt_ptr
));
330 /* Now we know the logical id of our boot cpu, setup the paca. */
331 if (boot_cpuid
!= 0) {
332 /* Poison paca_ptrs[0] again if it's not the boot cpu */
333 memset(&paca_ptrs
[0], 0x88, sizeof(paca_ptrs
[0]));
335 setup_paca(paca_ptrs
[boot_cpuid
]);
339 * Configure exception handlers. This include setting up trampolines
340 * if needed, setting exception endian mode, etc...
342 configure_exceptions();
345 * Configure Kernel Userspace Protection. This needs to happen before
346 * feature fixups for platforms that implement this using features.
350 /* Apply all the dynamic patching */
351 apply_feature_fixups();
352 setup_feature_keys();
354 early_ioremap_setup();
356 /* Initialize the hash table or TLB handling */
360 * After firmware and early platform setup code has set things up,
361 * we note the SPR values for configurable control/performance
362 * registers, and use those as initial defaults.
364 record_spr_defaults();
367 * At this point, we can let interrupts switch to virtual mode
368 * (the MMU has been setup), so adjust the MSR in the PACA to
369 * have IR and DR set and enable AIL if it exists
371 cpu_ready_for_interrupts();
374 * We enable ftrace here, but since we only support DYNAMIC_FTRACE, it
375 * will only actually get enabled on the boot cpu much later once
376 * ftrace itself has been initialized.
378 this_cpu_enable_ftrace();
380 udbg_printf(" <- %s()\n", __func__
);
382 #ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
384 * This needs to be done *last* (after the above udbg_printf() even)
386 * Right after we return from this function, we turn on the MMU
387 * which means the real-mode access trick that btext does will
388 * no longer work, it needs to switch to using a real MMU
389 * mapping. This call will ensure that it does
392 #endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
396 void early_setup_secondary(void)
398 /* Mark interrupts disabled in PACA */
399 irq_soft_mask_set(IRQS_DISABLED
);
401 /* Initialize the hash table or TLB handling */
402 early_init_mmu_secondary();
404 /* Perform any KUP setup that is per-cpu */
408 * At this point, we can let interrupts switch to virtual mode
409 * (the MMU has been setup), so adjust the MSR in the PACA to
410 * have IR and DR set.
412 cpu_ready_for_interrupts();
415 #endif /* CONFIG_SMP */
417 void panic_smp_self_stop(void)
425 #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE)
426 static bool use_spinloop(void)
428 if (IS_ENABLED(CONFIG_PPC_BOOK3S
)) {
430 * See comments in head_64.S -- not all platforms insert
431 * secondaries at __secondary_hold and wait at the spin
434 if (firmware_has_feature(FW_FEATURE_OPAL
))
440 * When book3e boots from kexec, the ePAPR spin table does
443 return of_property_read_bool(of_chosen
, "linux,booted-from-kexec");
446 void smp_release_cpus(void)
454 /* All secondary cpus are spinning on a common spinloop, release them
455 * all now so they can start to spin on their individual paca
456 * spinloops. For non SMP kernels, the secondary cpus never get out
457 * of the common spinloop.
460 ptr
= (unsigned long *)((unsigned long)&__secondary_hold_spinloop
462 *ptr
= ppc_function_entry(generic_secondary_smp_init
);
464 /* And wait a bit for them to catch up */
465 for (i
= 0; i
< 100000; i
++) {
468 if (spinning_secondaries
== 0)
472 pr_debug("spinning_secondaries = %d\n", spinning_secondaries
);
474 #endif /* CONFIG_SMP || CONFIG_KEXEC_CORE */
477 * Initialize some remaining members of the ppc64_caches and systemcfg
479 * (at least until we get rid of them completely). This is mostly some
480 * cache informations about the CPU that will be used by cache flush
481 * routines and/or provided to userland
484 static void init_cache_info(struct ppc_cache_info
*info
, u32 size
, u32 lsize
,
489 info
->line_size
= lsize
;
490 info
->block_size
= bsize
;
491 info
->log_block_size
= __ilog2(bsize
);
493 info
->blocks_per_page
= PAGE_SIZE
/ bsize
;
495 info
->blocks_per_page
= 0;
498 info
->assoc
= 0xffff;
500 info
->assoc
= size
/ (sets
* lsize
);
503 static bool __init
parse_cache_info(struct device_node
*np
,
505 struct ppc_cache_info
*info
)
507 static const char *ipropnames
[] __initdata
= {
510 "i-cache-block-size",
513 static const char *dpropnames
[] __initdata
= {
516 "d-cache-block-size",
519 const char **propnames
= icache
? ipropnames
: dpropnames
;
520 const __be32
*sizep
, *lsizep
, *bsizep
, *setsp
;
521 u32 size
, lsize
, bsize
, sets
;
526 lsize
= bsize
= cur_cpu_spec
->dcache_bsize
;
527 sizep
= of_get_property(np
, propnames
[0], NULL
);
529 size
= be32_to_cpu(*sizep
);
530 setsp
= of_get_property(np
, propnames
[1], NULL
);
532 sets
= be32_to_cpu(*setsp
);
533 bsizep
= of_get_property(np
, propnames
[2], NULL
);
534 lsizep
= of_get_property(np
, propnames
[3], NULL
);
540 lsize
= be32_to_cpu(*lsizep
);
542 bsize
= be32_to_cpu(*bsizep
);
543 if (sizep
== NULL
|| bsizep
== NULL
|| lsizep
== NULL
)
547 * OF is weird .. it represents fully associative caches
548 * as "1 way" which doesn't make much sense and doesn't
549 * leave room for direct mapped. We'll assume that 0
550 * in OF means direct mapped for that reason.
557 init_cache_info(info
, size
, lsize
, bsize
, sets
);
562 void __init
initialize_cache_info(void)
564 struct device_node
*cpu
= NULL
, *l2
, *l3
= NULL
;
568 * All shipping POWER8 machines have a firmware bug that
569 * puts incorrect information in the device-tree. This will
570 * be (hopefully) fixed for future chips but for now hard
571 * code the values if we are running on one of these
573 pvr
= PVR_VER(mfspr(SPRN_PVR
));
574 if (pvr
== PVR_POWER8
|| pvr
== PVR_POWER8E
||
575 pvr
== PVR_POWER8NVL
) {
576 /* size lsize blk sets */
577 init_cache_info(&ppc64_caches
.l1i
, 0x8000, 128, 128, 32);
578 init_cache_info(&ppc64_caches
.l1d
, 0x10000, 128, 128, 64);
579 init_cache_info(&ppc64_caches
.l2
, 0x80000, 128, 0, 512);
580 init_cache_info(&ppc64_caches
.l3
, 0x800000, 128, 0, 8192);
582 cpu
= of_find_node_by_type(NULL
, "cpu");
585 * We're assuming *all* of the CPUs have the same
586 * d-cache and i-cache sizes... -Peter
589 if (!parse_cache_info(cpu
, false, &ppc64_caches
.l1d
))
590 pr_warn("Argh, can't find dcache properties !\n");
592 if (!parse_cache_info(cpu
, true, &ppc64_caches
.l1i
))
593 pr_warn("Argh, can't find icache properties !\n");
596 * Try to find the L2 and L3 if any. Assume they are
597 * unified and use the D-side properties.
599 l2
= of_find_next_cache_node(cpu
);
602 parse_cache_info(l2
, false, &ppc64_caches
.l2
);
603 l3
= of_find_next_cache_node(l2
);
607 parse_cache_info(l3
, false, &ppc64_caches
.l3
);
612 /* For use by binfmt_elf */
613 dcache_bsize
= ppc64_caches
.l1d
.block_size
;
614 icache_bsize
= ppc64_caches
.l1i
.block_size
;
616 cur_cpu_spec
->dcache_bsize
= dcache_bsize
;
617 cur_cpu_spec
->icache_bsize
= icache_bsize
;
621 * This returns the limit below which memory accesses to the linear
622 * mapping are guarnateed not to cause an architectural exception (e.g.,
623 * TLB or SLB miss fault).
625 * This is used to allocate PACAs and various interrupt stacks that
626 * that are accessed early in interrupt handlers that must not cause
627 * re-entrant interrupts.
629 __init u64
ppc64_bolted_size(void)
631 #ifdef CONFIG_PPC_BOOK3E
632 /* Freescale BookE bolts the entire linear mapping */
633 /* XXX: BookE ppc64_rma_limit setup seems to disagree? */
634 if (early_mmu_has_feature(MMU_FTR_TYPE_FSL_E
))
635 return linear_map_top
;
636 /* Other BookE, we assume the first GB is bolted */
639 /* BookS radix, does not take faults on linear mapping */
640 if (early_radix_enabled())
643 /* BookS hash, the first segment is bolted */
644 if (early_mmu_has_feature(MMU_FTR_1T_SEGMENT
))
645 return 1UL << SID_SHIFT_1T
;
646 return 1UL << SID_SHIFT
;
650 static void *__init
alloc_stack(unsigned long limit
, int cpu
)
654 BUILD_BUG_ON(STACK_INT_FRAME_SIZE
% 16);
656 ptr
= memblock_alloc_try_nid(THREAD_SIZE
, THREAD_ALIGN
,
657 MEMBLOCK_LOW_LIMIT
, limit
,
658 early_cpu_to_node(cpu
));
660 panic("cannot allocate stacks");
665 void __init
irqstack_early_init(void)
667 u64 limit
= ppc64_bolted_size();
671 * Interrupt stacks must be in the first segment since we
672 * cannot afford to take SLB misses on them. They are not
673 * accessed in realmode.
675 for_each_possible_cpu(i
) {
676 softirq_ctx
[i
] = alloc_stack(limit
, i
);
677 hardirq_ctx
[i
] = alloc_stack(limit
, i
);
681 #ifdef CONFIG_PPC_BOOK3E
682 void __init
exc_lvl_early_init(void)
686 for_each_possible_cpu(i
) {
689 sp
= alloc_stack(ULONG_MAX
, i
);
691 paca_ptrs
[i
]->crit_kstack
= sp
+ THREAD_SIZE
;
693 sp
= alloc_stack(ULONG_MAX
, i
);
695 paca_ptrs
[i
]->dbg_kstack
= sp
+ THREAD_SIZE
;
697 sp
= alloc_stack(ULONG_MAX
, i
);
698 mcheckirq_ctx
[i
] = sp
;
699 paca_ptrs
[i
]->mc_kstack
= sp
+ THREAD_SIZE
;
702 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC
))
703 patch_exception(0x040, exc_debug_debug_book3e
);
708 * Stack space used when we detect a bad kernel stack pointer, and
709 * early in SMP boots before relocation is enabled. Exclusive emergency
710 * stack for machine checks.
712 void __init
emergency_stack_init(void)
718 * Emergency stacks must be under 256MB, we cannot afford to take
719 * SLB misses on them. The ABI also requires them to be 128-byte
722 * Since we use these as temporary stacks during secondary CPU
723 * bringup, machine check, system reset, and HMI, we need to get
724 * at them in real mode. This means they must also be within the RMO
727 * The IRQ stacks allocated elsewhere in this file are zeroed and
728 * initialized in kernel/irq.c. These are initialized here in order
729 * to have emergency stacks available as early as possible.
731 limit
= min(ppc64_bolted_size(), ppc64_rma_size
);
733 for_each_possible_cpu(i
) {
734 paca_ptrs
[i
]->emergency_sp
= alloc_stack(limit
, i
) + THREAD_SIZE
;
736 #ifdef CONFIG_PPC_BOOK3S_64
737 /* emergency stack for NMI exception handling. */
738 paca_ptrs
[i
]->nmi_emergency_sp
= alloc_stack(limit
, i
) + THREAD_SIZE
;
740 /* emergency stack for machine check exception handling. */
741 paca_ptrs
[i
]->mc_emergency_sp
= alloc_stack(limit
, i
) + THREAD_SIZE
;
747 #define PCPU_DYN_SIZE ()
749 static void * __init
pcpu_fc_alloc(unsigned int cpu
, size_t size
, size_t align
)
751 return memblock_alloc_try_nid(size
, align
, __pa(MAX_DMA_ADDRESS
),
752 MEMBLOCK_ALLOC_ACCESSIBLE
,
753 early_cpu_to_node(cpu
));
757 static void __init
pcpu_fc_free(void *ptr
, size_t size
)
759 memblock_free(__pa(ptr
), size
);
762 static int pcpu_cpu_distance(unsigned int from
, unsigned int to
)
764 if (early_cpu_to_node(from
) == early_cpu_to_node(to
))
765 return LOCAL_DISTANCE
;
767 return REMOTE_DISTANCE
;
770 unsigned long __per_cpu_offset
[NR_CPUS
] __read_mostly
;
771 EXPORT_SYMBOL(__per_cpu_offset
);
773 void __init
setup_per_cpu_areas(void)
775 const size_t dyn_size
= PERCPU_MODULE_RESERVE
+ PERCPU_DYNAMIC_RESERVE
;
782 * Linear mapping is one of 4K, 1M and 16M. For 4K, no need
783 * to group units. For larger mappings, use 1M atom which
784 * should be large enough to contain a number of units.
786 if (mmu_linear_psize
== MMU_PAGE_4K
)
787 atom_size
= PAGE_SIZE
;
791 rc
= pcpu_embed_first_chunk(0, dyn_size
, atom_size
, pcpu_cpu_distance
,
792 pcpu_fc_alloc
, pcpu_fc_free
);
794 panic("cannot initialize percpu area (err=%d)", rc
);
796 delta
= (unsigned long)pcpu_base_addr
- (unsigned long)__per_cpu_start
;
797 for_each_possible_cpu(cpu
) {
798 __per_cpu_offset
[cpu
] = delta
+ pcpu_unit_offsets
[cpu
];
799 paca_ptrs
[cpu
]->data_offset
= __per_cpu_offset
[cpu
];
804 #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
805 unsigned long memory_block_size_bytes(void)
807 if (ppc_md
.memory_block_size
)
808 return ppc_md
.memory_block_size();
810 return MIN_MEMORY_BLOCK_SIZE
;
814 #if defined(CONFIG_PPC_INDIRECT_PIO) || defined(CONFIG_PPC_INDIRECT_MMIO)
815 struct ppc_pci_io ppc_pci_io
;
816 EXPORT_SYMBOL(ppc_pci_io
);
819 #ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
820 u64
hw_nmi_get_sample_period(int watchdog_thresh
)
822 return ppc_proc_freq
* watchdog_thresh
;
827 * The perf based hardlockup detector breaks PMU event based branches, so
828 * disable it by default. Book3S has a soft-nmi hardlockup detector based
829 * on the decrementer interrupt, so it does not suffer from this problem.
831 * It is likely to get false positives in VM guests, so disable it there
834 static int __init
disable_hardlockup_detector(void)
836 #ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
837 hardlockup_detector_disable();
839 if (firmware_has_feature(FW_FEATURE_LPAR
))
840 hardlockup_detector_disable();
845 early_initcall(disable_hardlockup_detector
);
847 #ifdef CONFIG_PPC_BOOK3S_64
848 static enum l1d_flush_type enabled_flush_types
;
849 static void *l1d_flush_fallback_area
;
850 static bool no_rfi_flush
;
853 static int __init
handle_no_rfi_flush(char *p
)
855 pr_info("rfi-flush: disabled on command line.");
859 early_param("no_rfi_flush", handle_no_rfi_flush
);
862 * The RFI flush is not KPTI, but because users will see doco that says to use
863 * nopti we hijack that option here to also disable the RFI flush.
865 static int __init
handle_no_pti(char *p
)
867 pr_info("rfi-flush: disabling due to 'nopti' on command line.\n");
868 handle_no_rfi_flush(NULL
);
871 early_param("nopti", handle_no_pti
);
873 static void do_nothing(void *unused
)
876 * We don't need to do the flush explicitly, just enter+exit kernel is
877 * sufficient, the RFI exit handlers will do the right thing.
881 void rfi_flush_enable(bool enable
)
884 do_rfi_flush_fixups(enabled_flush_types
);
885 on_each_cpu(do_nothing
, NULL
, 1);
887 do_rfi_flush_fixups(L1D_FLUSH_NONE
);
892 static void __ref
init_fallback_flush(void)
897 /* Only allocate the fallback flush area once (at boot time). */
898 if (l1d_flush_fallback_area
)
901 l1d_size
= ppc64_caches
.l1d
.size
;
904 * If there is no d-cache-size property in the device tree, l1d_size
905 * could be zero. That leads to the loop in the asm wrapping around to
906 * 2^64-1, and then walking off the end of the fallback area and
907 * eventually causing a page fault which is fatal. Just default to
908 * something vaguely sane.
911 l1d_size
= (64 * 1024);
913 limit
= min(ppc64_bolted_size(), ppc64_rma_size
);
916 * Align to L1d size, and size it at 2x L1d size, to catch possible
917 * hardware prefetch runoff. We don't have a recipe for load patterns to
918 * reliably avoid the prefetcher.
920 l1d_flush_fallback_area
= memblock_alloc_try_nid(l1d_size
* 2,
921 l1d_size
, MEMBLOCK_LOW_LIMIT
,
922 limit
, NUMA_NO_NODE
);
923 if (!l1d_flush_fallback_area
)
924 panic("%s: Failed to allocate %llu bytes align=0x%llx max_addr=%pa\n",
925 __func__
, l1d_size
* 2, l1d_size
, &limit
);
928 for_each_possible_cpu(cpu
) {
929 struct paca_struct
*paca
= paca_ptrs
[cpu
];
930 paca
->rfi_flush_fallback_area
= l1d_flush_fallback_area
;
931 paca
->l1d_flush_size
= l1d_size
;
935 void setup_rfi_flush(enum l1d_flush_type types
, bool enable
)
937 if (types
& L1D_FLUSH_FALLBACK
) {
938 pr_info("rfi-flush: fallback displacement flush available\n");
939 init_fallback_flush();
942 if (types
& L1D_FLUSH_ORI
)
943 pr_info("rfi-flush: ori type flush available\n");
945 if (types
& L1D_FLUSH_MTTRIG
)
946 pr_info("rfi-flush: mttrig type flush available\n");
948 enabled_flush_types
= types
;
950 if (!no_rfi_flush
&& !cpu_mitigations_off())
951 rfi_flush_enable(enable
);
954 #ifdef CONFIG_DEBUG_FS
955 static int rfi_flush_set(void *data
, u64 val
)
966 /* Only do anything if we're changing state */
967 if (enable
!= rfi_flush
)
968 rfi_flush_enable(enable
);
973 static int rfi_flush_get(void *data
, u64
*val
)
975 *val
= rfi_flush
? 1 : 0;
979 DEFINE_SIMPLE_ATTRIBUTE(fops_rfi_flush
, rfi_flush_get
, rfi_flush_set
, "%llu\n");
981 static __init
int rfi_flush_debugfs_init(void)
983 debugfs_create_file("rfi_flush", 0600, powerpc_debugfs_root
, NULL
, &fops_rfi_flush
);
986 device_initcall(rfi_flush_debugfs_init
);
988 #endif /* CONFIG_PPC_BOOK3S_64 */