]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - arch/x86/kernel/crash.c
treewide: Add SPDX license identifier for missed files
[thirdparty/kernel/stable.git] / arch / x86 / kernel / crash.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Architecture specific (i386/x86_64) functions for kexec based crash dumps.
4 *
5 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
6 *
7 * Copyright (C) IBM Corporation, 2004. All rights reserved.
8 * Copyright (C) Red Hat Inc., 2014. All rights reserved.
9 * Authors:
10 * Vivek Goyal <vgoyal@redhat.com>
11 *
12 */
13
14 #define pr_fmt(fmt) "kexec: " fmt
15
16 #include <linux/types.h>
17 #include <linux/kernel.h>
18 #include <linux/smp.h>
19 #include <linux/reboot.h>
20 #include <linux/kexec.h>
21 #include <linux/delay.h>
22 #include <linux/elf.h>
23 #include <linux/elfcore.h>
24 #include <linux/export.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27
28 #include <asm/processor.h>
29 #include <asm/hardirq.h>
30 #include <asm/nmi.h>
31 #include <asm/hw_irq.h>
32 #include <asm/apic.h>
33 #include <asm/e820/types.h>
34 #include <asm/io_apic.h>
35 #include <asm/hpet.h>
36 #include <linux/kdebug.h>
37 #include <asm/cpu.h>
38 #include <asm/reboot.h>
39 #include <asm/virtext.h>
40 #include <asm/intel_pt.h>
41 #include <asm/crash.h>
42
43 /* Used while preparing memory map entries for second kernel */
44 struct crash_memmap_data {
45 struct boot_params *params;
46 /* Type of memory */
47 unsigned int type;
48 };
49
50 /*
51 * This is used to VMCLEAR all VMCSs loaded on the
52 * processor. And when loading kvm_intel module, the
53 * callback function pointer will be assigned.
54 *
55 * protected by rcu.
56 */
57 crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss = NULL;
58 EXPORT_SYMBOL_GPL(crash_vmclear_loaded_vmcss);
59 unsigned long crash_zero_bytes;
60
61 static inline void cpu_crash_vmclear_loaded_vmcss(void)
62 {
63 crash_vmclear_fn *do_vmclear_operation = NULL;
64
65 rcu_read_lock();
66 do_vmclear_operation = rcu_dereference(crash_vmclear_loaded_vmcss);
67 if (do_vmclear_operation)
68 do_vmclear_operation();
69 rcu_read_unlock();
70 }
71
72 #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
73
74 static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
75 {
76 #ifdef CONFIG_X86_32
77 struct pt_regs fixed_regs;
78
79 if (!user_mode(regs)) {
80 crash_fixup_ss_esp(&fixed_regs, regs);
81 regs = &fixed_regs;
82 }
83 #endif
84 crash_save_cpu(regs, cpu);
85
86 /*
87 * VMCLEAR VMCSs loaded on all cpus if needed.
88 */
89 cpu_crash_vmclear_loaded_vmcss();
90
91 /* Disable VMX or SVM if needed.
92 *
93 * We need to disable virtualization on all CPUs.
94 * Having VMX or SVM enabled on any CPU may break rebooting
95 * after the kdump kernel has finished its task.
96 */
97 cpu_emergency_vmxoff();
98 cpu_emergency_svm_disable();
99
100 /*
101 * Disable Intel PT to stop its logging
102 */
103 cpu_emergency_stop_pt();
104
105 disable_local_APIC();
106 }
107
108 void kdump_nmi_shootdown_cpus(void)
109 {
110 nmi_shootdown_cpus(kdump_nmi_callback);
111
112 disable_local_APIC();
113 }
114
115 /* Override the weak function in kernel/panic.c */
116 void crash_smp_send_stop(void)
117 {
118 static int cpus_stopped;
119
120 if (cpus_stopped)
121 return;
122
123 if (smp_ops.crash_stop_other_cpus)
124 smp_ops.crash_stop_other_cpus();
125 else
126 smp_send_stop();
127
128 cpus_stopped = 1;
129 }
130
131 #else
132 void crash_smp_send_stop(void)
133 {
134 /* There are no cpus to shootdown */
135 }
136 #endif
137
138 void native_machine_crash_shutdown(struct pt_regs *regs)
139 {
140 /* This function is only called after the system
141 * has panicked or is otherwise in a critical state.
142 * The minimum amount of code to allow a kexec'd kernel
143 * to run successfully needs to happen here.
144 *
145 * In practice this means shooting down the other cpus in
146 * an SMP system.
147 */
148 /* The kernel is broken so disable interrupts */
149 local_irq_disable();
150
151 crash_smp_send_stop();
152
153 /*
154 * VMCLEAR VMCSs loaded on this cpu if needed.
155 */
156 cpu_crash_vmclear_loaded_vmcss();
157
158 /* Booting kdump kernel with VMX or SVM enabled won't work,
159 * because (among other limitations) we can't disable paging
160 * with the virt flags.
161 */
162 cpu_emergency_vmxoff();
163 cpu_emergency_svm_disable();
164
165 /*
166 * Disable Intel PT to stop its logging
167 */
168 cpu_emergency_stop_pt();
169
170 #ifdef CONFIG_X86_IO_APIC
171 /* Prevent crash_kexec() from deadlocking on ioapic_lock. */
172 ioapic_zap_locks();
173 clear_IO_APIC();
174 #endif
175 lapic_shutdown();
176 restore_boot_irq_mode();
177 #ifdef CONFIG_HPET_TIMER
178 hpet_disable();
179 #endif
180 crash_save_cpu(regs, safe_smp_processor_id());
181 }
182
183 #ifdef CONFIG_KEXEC_FILE
184 static int get_nr_ram_ranges_callback(struct resource *res, void *arg)
185 {
186 unsigned int *nr_ranges = arg;
187
188 (*nr_ranges)++;
189 return 0;
190 }
191
192 /* Gather all the required information to prepare elf headers for ram regions */
193 static struct crash_mem *fill_up_crash_elf_data(void)
194 {
195 unsigned int nr_ranges = 0;
196 struct crash_mem *cmem;
197
198 walk_system_ram_res(0, -1, &nr_ranges,
199 get_nr_ram_ranges_callback);
200 if (!nr_ranges)
201 return NULL;
202
203 /*
204 * Exclusion of crash region and/or crashk_low_res may cause
205 * another range split. So add extra two slots here.
206 */
207 nr_ranges += 2;
208 cmem = vzalloc(struct_size(cmem, ranges, nr_ranges));
209 if (!cmem)
210 return NULL;
211
212 cmem->max_nr_ranges = nr_ranges;
213 cmem->nr_ranges = 0;
214
215 return cmem;
216 }
217
218 /*
219 * Look for any unwanted ranges between mstart, mend and remove them. This
220 * might lead to split and split ranges are put in cmem->ranges[] array
221 */
222 static int elf_header_exclude_ranges(struct crash_mem *cmem)
223 {
224 int ret = 0;
225
226 /* Exclude crashkernel region */
227 ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end);
228 if (ret)
229 return ret;
230
231 if (crashk_low_res.end) {
232 ret = crash_exclude_mem_range(cmem, crashk_low_res.start,
233 crashk_low_res.end);
234 if (ret)
235 return ret;
236 }
237
238 return ret;
239 }
240
241 static int prepare_elf64_ram_headers_callback(struct resource *res, void *arg)
242 {
243 struct crash_mem *cmem = arg;
244
245 cmem->ranges[cmem->nr_ranges].start = res->start;
246 cmem->ranges[cmem->nr_ranges].end = res->end;
247 cmem->nr_ranges++;
248
249 return 0;
250 }
251
252 /* Prepare elf headers. Return addr and size */
253 static int prepare_elf_headers(struct kimage *image, void **addr,
254 unsigned long *sz)
255 {
256 struct crash_mem *cmem;
257 Elf64_Ehdr *ehdr;
258 Elf64_Phdr *phdr;
259 int ret, i;
260
261 cmem = fill_up_crash_elf_data();
262 if (!cmem)
263 return -ENOMEM;
264
265 ret = walk_system_ram_res(0, -1, cmem,
266 prepare_elf64_ram_headers_callback);
267 if (ret)
268 goto out;
269
270 /* Exclude unwanted mem ranges */
271 ret = elf_header_exclude_ranges(cmem);
272 if (ret)
273 goto out;
274
275 /* By default prepare 64bit headers */
276 ret = crash_prepare_elf64_headers(cmem,
277 IS_ENABLED(CONFIG_X86_64), addr, sz);
278 if (ret)
279 goto out;
280
281 /*
282 * If a range matches backup region, adjust offset to backup
283 * segment.
284 */
285 ehdr = (Elf64_Ehdr *)*addr;
286 phdr = (Elf64_Phdr *)(ehdr + 1);
287 for (i = 0; i < ehdr->e_phnum; phdr++, i++)
288 if (phdr->p_type == PT_LOAD &&
289 phdr->p_paddr == image->arch.backup_src_start &&
290 phdr->p_memsz == image->arch.backup_src_sz) {
291 phdr->p_offset = image->arch.backup_load_addr;
292 break;
293 }
294 out:
295 vfree(cmem);
296 return ret;
297 }
298
299 static int add_e820_entry(struct boot_params *params, struct e820_entry *entry)
300 {
301 unsigned int nr_e820_entries;
302
303 nr_e820_entries = params->e820_entries;
304 if (nr_e820_entries >= E820_MAX_ENTRIES_ZEROPAGE)
305 return 1;
306
307 memcpy(&params->e820_table[nr_e820_entries], entry,
308 sizeof(struct e820_entry));
309 params->e820_entries++;
310 return 0;
311 }
312
313 static int memmap_entry_callback(struct resource *res, void *arg)
314 {
315 struct crash_memmap_data *cmd = arg;
316 struct boot_params *params = cmd->params;
317 struct e820_entry ei;
318
319 ei.addr = res->start;
320 ei.size = resource_size(res);
321 ei.type = cmd->type;
322 add_e820_entry(params, &ei);
323
324 return 0;
325 }
326
327 static int memmap_exclude_ranges(struct kimage *image, struct crash_mem *cmem,
328 unsigned long long mstart,
329 unsigned long long mend)
330 {
331 unsigned long start, end;
332 int ret = 0;
333
334 cmem->ranges[0].start = mstart;
335 cmem->ranges[0].end = mend;
336 cmem->nr_ranges = 1;
337
338 /* Exclude Backup region */
339 start = image->arch.backup_load_addr;
340 end = start + image->arch.backup_src_sz - 1;
341 ret = crash_exclude_mem_range(cmem, start, end);
342 if (ret)
343 return ret;
344
345 /* Exclude elf header region */
346 start = image->arch.elf_load_addr;
347 end = start + image->arch.elf_headers_sz - 1;
348 return crash_exclude_mem_range(cmem, start, end);
349 }
350
351 /* Prepare memory map for crash dump kernel */
352 int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params)
353 {
354 int i, ret = 0;
355 unsigned long flags;
356 struct e820_entry ei;
357 struct crash_memmap_data cmd;
358 struct crash_mem *cmem;
359
360 cmem = vzalloc(sizeof(struct crash_mem));
361 if (!cmem)
362 return -ENOMEM;
363
364 memset(&cmd, 0, sizeof(struct crash_memmap_data));
365 cmd.params = params;
366
367 /* Add first 640K segment */
368 ei.addr = image->arch.backup_src_start;
369 ei.size = image->arch.backup_src_sz;
370 ei.type = E820_TYPE_RAM;
371 add_e820_entry(params, &ei);
372
373 /* Add ACPI tables */
374 cmd.type = E820_TYPE_ACPI;
375 flags = IORESOURCE_MEM | IORESOURCE_BUSY;
376 walk_iomem_res_desc(IORES_DESC_ACPI_TABLES, flags, 0, -1, &cmd,
377 memmap_entry_callback);
378
379 /* Add ACPI Non-volatile Storage */
380 cmd.type = E820_TYPE_NVS;
381 walk_iomem_res_desc(IORES_DESC_ACPI_NV_STORAGE, flags, 0, -1, &cmd,
382 memmap_entry_callback);
383
384 /* Add crashk_low_res region */
385 if (crashk_low_res.end) {
386 ei.addr = crashk_low_res.start;
387 ei.size = crashk_low_res.end - crashk_low_res.start + 1;
388 ei.type = E820_TYPE_RAM;
389 add_e820_entry(params, &ei);
390 }
391
392 /* Exclude some ranges from crashk_res and add rest to memmap */
393 ret = memmap_exclude_ranges(image, cmem, crashk_res.start,
394 crashk_res.end);
395 if (ret)
396 goto out;
397
398 for (i = 0; i < cmem->nr_ranges; i++) {
399 ei.size = cmem->ranges[i].end - cmem->ranges[i].start + 1;
400
401 /* If entry is less than a page, skip it */
402 if (ei.size < PAGE_SIZE)
403 continue;
404 ei.addr = cmem->ranges[i].start;
405 ei.type = E820_TYPE_RAM;
406 add_e820_entry(params, &ei);
407 }
408
409 out:
410 vfree(cmem);
411 return ret;
412 }
413
414 static int determine_backup_region(struct resource *res, void *arg)
415 {
416 struct kimage *image = arg;
417
418 image->arch.backup_src_start = res->start;
419 image->arch.backup_src_sz = resource_size(res);
420
421 /* Expecting only one range for backup region */
422 return 1;
423 }
424
425 int crash_load_segments(struct kimage *image)
426 {
427 int ret;
428 struct kexec_buf kbuf = { .image = image, .buf_min = 0,
429 .buf_max = ULONG_MAX, .top_down = false };
430
431 /*
432 * Determine and load a segment for backup area. First 640K RAM
433 * region is backup source
434 */
435
436 ret = walk_system_ram_res(KEXEC_BACKUP_SRC_START, KEXEC_BACKUP_SRC_END,
437 image, determine_backup_region);
438
439 /* Zero or postive return values are ok */
440 if (ret < 0)
441 return ret;
442
443 /* Add backup segment. */
444 if (image->arch.backup_src_sz) {
445 kbuf.buffer = &crash_zero_bytes;
446 kbuf.bufsz = sizeof(crash_zero_bytes);
447 kbuf.memsz = image->arch.backup_src_sz;
448 kbuf.buf_align = PAGE_SIZE;
449 /*
450 * Ideally there is no source for backup segment. This is
451 * copied in purgatory after crash. Just add a zero filled
452 * segment for now to make sure checksum logic works fine.
453 */
454 ret = kexec_add_buffer(&kbuf);
455 if (ret)
456 return ret;
457 image->arch.backup_load_addr = kbuf.mem;
458 pr_debug("Loaded backup region at 0x%lx backup_start=0x%lx memsz=0x%lx\n",
459 image->arch.backup_load_addr,
460 image->arch.backup_src_start, kbuf.memsz);
461 }
462
463 /* Prepare elf headers and add a segment */
464 ret = prepare_elf_headers(image, &kbuf.buffer, &kbuf.bufsz);
465 if (ret)
466 return ret;
467
468 image->arch.elf_headers = kbuf.buffer;
469 image->arch.elf_headers_sz = kbuf.bufsz;
470
471 kbuf.memsz = kbuf.bufsz;
472 kbuf.buf_align = ELF_CORE_HEADER_ALIGN;
473 kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
474 ret = kexec_add_buffer(&kbuf);
475 if (ret) {
476 vfree((void *)image->arch.elf_headers);
477 return ret;
478 }
479 image->arch.elf_load_addr = kbuf.mem;
480 pr_debug("Loaded ELF headers at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
481 image->arch.elf_load_addr, kbuf.bufsz, kbuf.bufsz);
482
483 return ret;
484 }
485 #endif /* CONFIG_KEXEC_FILE */