1 // SPDX-License-Identifier: GPL-2.0-only
3 * Architecture specific (i386/x86_64) functions for kexec based crash dumps.
5 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
7 * Copyright (C) IBM Corporation, 2004. All rights reserved.
8 * Copyright (C) Red Hat Inc., 2014. All rights reserved.
10 * Vivek Goyal <vgoyal@redhat.com>
14 #define pr_fmt(fmt) "kexec: " fmt
16 #include <linux/types.h>
17 #include <linux/kernel.h>
18 #include <linux/smp.h>
19 #include <linux/reboot.h>
20 #include <linux/kexec.h>
21 #include <linux/delay.h>
22 #include <linux/elf.h>
23 #include <linux/elfcore.h>
24 #include <linux/export.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
28 #include <asm/processor.h>
29 #include <asm/hardirq.h>
31 #include <asm/hw_irq.h>
33 #include <asm/e820/types.h>
34 #include <asm/io_apic.h>
36 #include <linux/kdebug.h>
38 #include <asm/reboot.h>
39 #include <asm/virtext.h>
40 #include <asm/intel_pt.h>
41 #include <asm/crash.h>
43 /* Used while preparing memory map entries for second kernel */
44 struct crash_memmap_data
{
45 struct boot_params
*params
;
51 * This is used to VMCLEAR all VMCSs loaded on the
52 * processor. And when loading kvm_intel module, the
53 * callback function pointer will be assigned.
57 crash_vmclear_fn __rcu
*crash_vmclear_loaded_vmcss
= NULL
;
58 EXPORT_SYMBOL_GPL(crash_vmclear_loaded_vmcss
);
59 unsigned long crash_zero_bytes
;
61 static inline void cpu_crash_vmclear_loaded_vmcss(void)
63 crash_vmclear_fn
*do_vmclear_operation
= NULL
;
66 do_vmclear_operation
= rcu_dereference(crash_vmclear_loaded_vmcss
);
67 if (do_vmclear_operation
)
68 do_vmclear_operation();
72 #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
74 static void kdump_nmi_callback(int cpu
, struct pt_regs
*regs
)
77 struct pt_regs fixed_regs
;
79 if (!user_mode(regs
)) {
80 crash_fixup_ss_esp(&fixed_regs
, regs
);
84 crash_save_cpu(regs
, cpu
);
87 * VMCLEAR VMCSs loaded on all cpus if needed.
89 cpu_crash_vmclear_loaded_vmcss();
91 /* Disable VMX or SVM if needed.
93 * We need to disable virtualization on all CPUs.
94 * Having VMX or SVM enabled on any CPU may break rebooting
95 * after the kdump kernel has finished its task.
97 cpu_emergency_vmxoff();
98 cpu_emergency_svm_disable();
101 * Disable Intel PT to stop its logging
103 cpu_emergency_stop_pt();
105 disable_local_APIC();
108 void kdump_nmi_shootdown_cpus(void)
110 nmi_shootdown_cpus(kdump_nmi_callback
);
112 disable_local_APIC();
115 /* Override the weak function in kernel/panic.c */
116 void crash_smp_send_stop(void)
118 static int cpus_stopped
;
123 if (smp_ops
.crash_stop_other_cpus
)
124 smp_ops
.crash_stop_other_cpus();
132 void crash_smp_send_stop(void)
134 /* There are no cpus to shootdown */
138 void native_machine_crash_shutdown(struct pt_regs
*regs
)
140 /* This function is only called after the system
141 * has panicked or is otherwise in a critical state.
142 * The minimum amount of code to allow a kexec'd kernel
143 * to run successfully needs to happen here.
145 * In practice this means shooting down the other cpus in
148 /* The kernel is broken so disable interrupts */
151 crash_smp_send_stop();
154 * VMCLEAR VMCSs loaded on this cpu if needed.
156 cpu_crash_vmclear_loaded_vmcss();
158 /* Booting kdump kernel with VMX or SVM enabled won't work,
159 * because (among other limitations) we can't disable paging
160 * with the virt flags.
162 cpu_emergency_vmxoff();
163 cpu_emergency_svm_disable();
166 * Disable Intel PT to stop its logging
168 cpu_emergency_stop_pt();
170 #ifdef CONFIG_X86_IO_APIC
171 /* Prevent crash_kexec() from deadlocking on ioapic_lock. */
176 restore_boot_irq_mode();
177 #ifdef CONFIG_HPET_TIMER
180 crash_save_cpu(regs
, safe_smp_processor_id());
183 #ifdef CONFIG_KEXEC_FILE
184 static int get_nr_ram_ranges_callback(struct resource
*res
, void *arg
)
186 unsigned int *nr_ranges
= arg
;
192 /* Gather all the required information to prepare elf headers for ram regions */
193 static struct crash_mem
*fill_up_crash_elf_data(void)
195 unsigned int nr_ranges
= 0;
196 struct crash_mem
*cmem
;
198 walk_system_ram_res(0, -1, &nr_ranges
,
199 get_nr_ram_ranges_callback
);
204 * Exclusion of crash region and/or crashk_low_res may cause
205 * another range split. So add extra two slots here.
208 cmem
= vzalloc(struct_size(cmem
, ranges
, nr_ranges
));
212 cmem
->max_nr_ranges
= nr_ranges
;
219 * Look for any unwanted ranges between mstart, mend and remove them. This
220 * might lead to split and split ranges are put in cmem->ranges[] array
222 static int elf_header_exclude_ranges(struct crash_mem
*cmem
)
226 /* Exclude crashkernel region */
227 ret
= crash_exclude_mem_range(cmem
, crashk_res
.start
, crashk_res
.end
);
231 if (crashk_low_res
.end
) {
232 ret
= crash_exclude_mem_range(cmem
, crashk_low_res
.start
,
241 static int prepare_elf64_ram_headers_callback(struct resource
*res
, void *arg
)
243 struct crash_mem
*cmem
= arg
;
245 cmem
->ranges
[cmem
->nr_ranges
].start
= res
->start
;
246 cmem
->ranges
[cmem
->nr_ranges
].end
= res
->end
;
252 /* Prepare elf headers. Return addr and size */
253 static int prepare_elf_headers(struct kimage
*image
, void **addr
,
256 struct crash_mem
*cmem
;
261 cmem
= fill_up_crash_elf_data();
265 ret
= walk_system_ram_res(0, -1, cmem
,
266 prepare_elf64_ram_headers_callback
);
270 /* Exclude unwanted mem ranges */
271 ret
= elf_header_exclude_ranges(cmem
);
275 /* By default prepare 64bit headers */
276 ret
= crash_prepare_elf64_headers(cmem
,
277 IS_ENABLED(CONFIG_X86_64
), addr
, sz
);
282 * If a range matches backup region, adjust offset to backup
285 ehdr
= (Elf64_Ehdr
*)*addr
;
286 phdr
= (Elf64_Phdr
*)(ehdr
+ 1);
287 for (i
= 0; i
< ehdr
->e_phnum
; phdr
++, i
++)
288 if (phdr
->p_type
== PT_LOAD
&&
289 phdr
->p_paddr
== image
->arch
.backup_src_start
&&
290 phdr
->p_memsz
== image
->arch
.backup_src_sz
) {
291 phdr
->p_offset
= image
->arch
.backup_load_addr
;
299 static int add_e820_entry(struct boot_params
*params
, struct e820_entry
*entry
)
301 unsigned int nr_e820_entries
;
303 nr_e820_entries
= params
->e820_entries
;
304 if (nr_e820_entries
>= E820_MAX_ENTRIES_ZEROPAGE
)
307 memcpy(¶ms
->e820_table
[nr_e820_entries
], entry
,
308 sizeof(struct e820_entry
));
309 params
->e820_entries
++;
313 static int memmap_entry_callback(struct resource
*res
, void *arg
)
315 struct crash_memmap_data
*cmd
= arg
;
316 struct boot_params
*params
= cmd
->params
;
317 struct e820_entry ei
;
319 ei
.addr
= res
->start
;
320 ei
.size
= resource_size(res
);
322 add_e820_entry(params
, &ei
);
327 static int memmap_exclude_ranges(struct kimage
*image
, struct crash_mem
*cmem
,
328 unsigned long long mstart
,
329 unsigned long long mend
)
331 unsigned long start
, end
;
334 cmem
->ranges
[0].start
= mstart
;
335 cmem
->ranges
[0].end
= mend
;
338 /* Exclude Backup region */
339 start
= image
->arch
.backup_load_addr
;
340 end
= start
+ image
->arch
.backup_src_sz
- 1;
341 ret
= crash_exclude_mem_range(cmem
, start
, end
);
345 /* Exclude elf header region */
346 start
= image
->arch
.elf_load_addr
;
347 end
= start
+ image
->arch
.elf_headers_sz
- 1;
348 return crash_exclude_mem_range(cmem
, start
, end
);
351 /* Prepare memory map for crash dump kernel */
352 int crash_setup_memmap_entries(struct kimage
*image
, struct boot_params
*params
)
356 struct e820_entry ei
;
357 struct crash_memmap_data cmd
;
358 struct crash_mem
*cmem
;
360 cmem
= vzalloc(sizeof(struct crash_mem
));
364 memset(&cmd
, 0, sizeof(struct crash_memmap_data
));
367 /* Add first 640K segment */
368 ei
.addr
= image
->arch
.backup_src_start
;
369 ei
.size
= image
->arch
.backup_src_sz
;
370 ei
.type
= E820_TYPE_RAM
;
371 add_e820_entry(params
, &ei
);
373 /* Add ACPI tables */
374 cmd
.type
= E820_TYPE_ACPI
;
375 flags
= IORESOURCE_MEM
| IORESOURCE_BUSY
;
376 walk_iomem_res_desc(IORES_DESC_ACPI_TABLES
, flags
, 0, -1, &cmd
,
377 memmap_entry_callback
);
379 /* Add ACPI Non-volatile Storage */
380 cmd
.type
= E820_TYPE_NVS
;
381 walk_iomem_res_desc(IORES_DESC_ACPI_NV_STORAGE
, flags
, 0, -1, &cmd
,
382 memmap_entry_callback
);
384 /* Add crashk_low_res region */
385 if (crashk_low_res
.end
) {
386 ei
.addr
= crashk_low_res
.start
;
387 ei
.size
= crashk_low_res
.end
- crashk_low_res
.start
+ 1;
388 ei
.type
= E820_TYPE_RAM
;
389 add_e820_entry(params
, &ei
);
392 /* Exclude some ranges from crashk_res and add rest to memmap */
393 ret
= memmap_exclude_ranges(image
, cmem
, crashk_res
.start
,
398 for (i
= 0; i
< cmem
->nr_ranges
; i
++) {
399 ei
.size
= cmem
->ranges
[i
].end
- cmem
->ranges
[i
].start
+ 1;
401 /* If entry is less than a page, skip it */
402 if (ei
.size
< PAGE_SIZE
)
404 ei
.addr
= cmem
->ranges
[i
].start
;
405 ei
.type
= E820_TYPE_RAM
;
406 add_e820_entry(params
, &ei
);
414 static int determine_backup_region(struct resource
*res
, void *arg
)
416 struct kimage
*image
= arg
;
418 image
->arch
.backup_src_start
= res
->start
;
419 image
->arch
.backup_src_sz
= resource_size(res
);
421 /* Expecting only one range for backup region */
425 int crash_load_segments(struct kimage
*image
)
428 struct kexec_buf kbuf
= { .image
= image
, .buf_min
= 0,
429 .buf_max
= ULONG_MAX
, .top_down
= false };
432 * Determine and load a segment for backup area. First 640K RAM
433 * region is backup source
436 ret
= walk_system_ram_res(KEXEC_BACKUP_SRC_START
, KEXEC_BACKUP_SRC_END
,
437 image
, determine_backup_region
);
439 /* Zero or postive return values are ok */
443 /* Add backup segment. */
444 if (image
->arch
.backup_src_sz
) {
445 kbuf
.buffer
= &crash_zero_bytes
;
446 kbuf
.bufsz
= sizeof(crash_zero_bytes
);
447 kbuf
.memsz
= image
->arch
.backup_src_sz
;
448 kbuf
.buf_align
= PAGE_SIZE
;
450 * Ideally there is no source for backup segment. This is
451 * copied in purgatory after crash. Just add a zero filled
452 * segment for now to make sure checksum logic works fine.
454 ret
= kexec_add_buffer(&kbuf
);
457 image
->arch
.backup_load_addr
= kbuf
.mem
;
458 pr_debug("Loaded backup region at 0x%lx backup_start=0x%lx memsz=0x%lx\n",
459 image
->arch
.backup_load_addr
,
460 image
->arch
.backup_src_start
, kbuf
.memsz
);
463 /* Prepare elf headers and add a segment */
464 ret
= prepare_elf_headers(image
, &kbuf
.buffer
, &kbuf
.bufsz
);
468 image
->arch
.elf_headers
= kbuf
.buffer
;
469 image
->arch
.elf_headers_sz
= kbuf
.bufsz
;
471 kbuf
.memsz
= kbuf
.bufsz
;
472 kbuf
.buf_align
= ELF_CORE_HEADER_ALIGN
;
473 kbuf
.mem
= KEXEC_BUF_MEM_UNKNOWN
;
474 ret
= kexec_add_buffer(&kbuf
);
476 vfree((void *)image
->arch
.elf_headers
);
479 image
->arch
.elf_load_addr
= kbuf
.mem
;
480 pr_debug("Loaded ELF headers at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
481 image
->arch
.elf_load_addr
, kbuf
.bufsz
, kbuf
.bufsz
);
485 #endif /* CONFIG_KEXEC_FILE */