]>
Commit | Line | Data |
---|---|---|
40b0b3f8 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
2965faa5 DY |
2 | /* |
3 | * kexec.c - kexec system call core code. | |
4 | * Copyright (C) 2002-2004 Eric Biederman <ebiederm@xmission.com> | |
2965faa5 DY |
5 | */ |
6 | ||
de90a6bc | 7 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
2965faa5 | 8 | |
400031e0 | 9 | #include <linux/btf.h> |
2965faa5 DY |
10 | #include <linux/capability.h> |
11 | #include <linux/mm.h> | |
12 | #include <linux/file.h> | |
13 | #include <linux/slab.h> | |
14 | #include <linux/fs.h> | |
15 | #include <linux/kexec.h> | |
16 | #include <linux/mutex.h> | |
17 | #include <linux/list.h> | |
18 | #include <linux/highmem.h> | |
19 | #include <linux/syscalls.h> | |
20 | #include <linux/reboot.h> | |
21 | #include <linux/ioport.h> | |
22 | #include <linux/hardirq.h> | |
23 | #include <linux/elf.h> | |
24 | #include <linux/elfcore.h> | |
25 | #include <linux/utsname.h> | |
26 | #include <linux/numa.h> | |
27 | #include <linux/suspend.h> | |
28 | #include <linux/device.h> | |
29 | #include <linux/freezer.h> | |
f39650de | 30 | #include <linux/panic_notifier.h> |
2965faa5 DY |
31 | #include <linux/pm.h> |
32 | #include <linux/cpu.h> | |
33 | #include <linux/uaccess.h> | |
34 | #include <linux/io.h> | |
35 | #include <linux/console.h> | |
36 | #include <linux/vmalloc.h> | |
37 | #include <linux/swap.h> | |
38 | #include <linux/syscore_ops.h> | |
39 | #include <linux/compiler.h> | |
40 | #include <linux/hugetlb.h> | |
00089c04 | 41 | #include <linux/objtool.h> |
b2075dbb | 42 | #include <linux/kmsg_dump.h> |
2965faa5 DY |
43 | |
44 | #include <asm/page.h> | |
45 | #include <asm/sections.h> | |
46 | ||
47 | #include <crypto/hash.h> | |
2965faa5 DY |
48 | #include "kexec_internal.h" |
49 | ||
05c62574 | 50 | atomic_t __kexec_lock = ATOMIC_INIT(0); |
2965faa5 | 51 | |
2965faa5 DY |
52 | /* Flag to indicate we are going to kexec a new kernel */ |
53 | bool kexec_in_progress = false; | |
54 | ||
55 | ||
56 | /* Location of the reserved area for the crash kernel */ | |
57 | struct resource crashk_res = { | |
58 | .name = "Crash kernel", | |
59 | .start = 0, | |
60 | .end = 0, | |
1a085d07 TK |
61 | .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM, |
62 | .desc = IORES_DESC_CRASH_KERNEL | |
2965faa5 DY |
63 | }; |
64 | struct resource crashk_low_res = { | |
65 | .name = "Crash kernel", | |
66 | .start = 0, | |
67 | .end = 0, | |
1a085d07 TK |
68 | .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM, |
69 | .desc = IORES_DESC_CRASH_KERNEL | |
2965faa5 DY |
70 | }; |
71 | ||
72 | int kexec_should_crash(struct task_struct *p) | |
73 | { | |
74 | /* | |
75 | * If crash_kexec_post_notifiers is enabled, don't run | |
76 | * crash_kexec() here yet, which must be run after panic | |
77 | * notifiers in panic(). | |
78 | */ | |
79 | if (crash_kexec_post_notifiers) | |
80 | return 0; | |
81 | /* | |
05ea0424 | 82 | * There are 4 panic() calls in make_task_dead() path, each of which |
2965faa5 DY |
83 | * corresponds to each of these 4 conditions. |
84 | */ | |
85 | if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops) | |
86 | return 1; | |
87 | return 0; | |
88 | } | |
89 | ||
21db79e8 PT |
90 | int kexec_crash_loaded(void) |
91 | { | |
92 | return !!kexec_crash_image; | |
93 | } | |
94 | EXPORT_SYMBOL_GPL(kexec_crash_loaded); | |
95 | ||
2965faa5 DY |
96 | /* |
97 | * When kexec transitions to the new kernel there is a one-to-one | |
98 | * mapping between physical and virtual addresses. On processors | |
99 | * where you can disable the MMU this is trivial, and easy. For | |
100 | * others it is still a simple predictable page table to setup. | |
101 | * | |
102 | * In that environment kexec copies the new kernel to its final | |
103 | * resting place. This means I can only support memory whose | |
104 | * physical address can fit in an unsigned long. In particular | |
105 | * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled. | |
106 | * If the assembly stub has more restrictive requirements | |
107 | * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be | |
108 | * defined more restrictively in <asm/kexec.h>. | |
109 | * | |
110 | * The code for the transition from the current kernel to the | |
7b7b8a2c | 111 | * new kernel is placed in the control_code_buffer, whose size |
2965faa5 DY |
112 | * is given by KEXEC_CONTROL_PAGE_SIZE. In the best case only a single |
113 | * page of memory is necessary, but some architectures require more. | |
114 | * Because this memory must be identity mapped in the transition from | |
115 | * virtual to physical addresses it must live in the range | |
116 | * 0 - TASK_SIZE, as only the user space mappings are arbitrarily | |
117 | * modifiable. | |
118 | * | |
119 | * The assembly stub in the control code buffer is passed a linked list | |
120 | * of descriptor pages detailing the source pages of the new kernel, | |
121 | * and the destination addresses of those source pages. As this data | |
122 | * structure is not used in the context of the current OS, it must | |
123 | * be self-contained. | |
124 | * | |
125 | * The code has been made to work with highmem pages and will use a | |
126 | * destination page in its final resting place (if it happens | |
127 | * to allocate it). The end product of this is that most of the | |
128 | * physical address space, and most of RAM can be used. | |
129 | * | |
130 | * Future directions include: | |
131 | * - allocating a page table with the control code buffer identity | |
132 | * mapped, to simplify machine_kexec and make kexec_on_panic more | |
133 | * reliable. | |
134 | */ | |
135 | ||
136 | /* | |
137 | * KIMAGE_NO_DEST is an impossible destination address..., for | |
138 | * allocating pages whose destination address we do not care about. | |
139 | */ | |
140 | #define KIMAGE_NO_DEST (-1UL) | |
1730f146 | 141 | #define PAGE_COUNT(x) (((x) + PAGE_SIZE - 1) >> PAGE_SHIFT) |
2965faa5 DY |
142 | |
143 | static struct page *kimage_alloc_page(struct kimage *image, | |
144 | gfp_t gfp_mask, | |
145 | unsigned long dest); | |
146 | ||
147 | int sanity_check_segment_list(struct kimage *image) | |
148 | { | |
4caf9615 | 149 | int i; |
2965faa5 | 150 | unsigned long nr_segments = image->nr_segments; |
1730f146 | 151 | unsigned long total_pages = 0; |
ca79b0c2 | 152 | unsigned long nr_pages = totalram_pages(); |
2965faa5 DY |
153 | |
154 | /* | |
155 | * Verify we have good destination addresses. The caller is | |
156 | * responsible for making certain we don't attempt to load | |
157 | * the new image into invalid or reserved areas of RAM. This | |
158 | * just verifies it is an address we can use. | |
159 | * | |
160 | * Since the kernel does everything in page size chunks ensure | |
161 | * the destination addresses are page aligned. Too many | |
162 | * special cases crop of when we don't do this. The most | |
163 | * insidious is getting overlapping destination addresses | |
164 | * simply because addresses are changed to page size | |
165 | * granularity. | |
166 | */ | |
2965faa5 DY |
167 | for (i = 0; i < nr_segments; i++) { |
168 | unsigned long mstart, mend; | |
169 | ||
170 | mstart = image->segment[i].mem; | |
171 | mend = mstart + image->segment[i].memsz; | |
465d3777 RK |
172 | if (mstart > mend) |
173 | return -EADDRNOTAVAIL; | |
2965faa5 | 174 | if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK)) |
4caf9615 | 175 | return -EADDRNOTAVAIL; |
2965faa5 | 176 | if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT) |
4caf9615 | 177 | return -EADDRNOTAVAIL; |
2965faa5 DY |
178 | } |
179 | ||
180 | /* Verify our destination addresses do not overlap. | |
181 | * If we alloed overlapping destination addresses | |
182 | * through very weird things can happen with no | |
183 | * easy explanation as one segment stops on another. | |
184 | */ | |
2965faa5 DY |
185 | for (i = 0; i < nr_segments; i++) { |
186 | unsigned long mstart, mend; | |
187 | unsigned long j; | |
188 | ||
189 | mstart = image->segment[i].mem; | |
190 | mend = mstart + image->segment[i].memsz; | |
191 | for (j = 0; j < i; j++) { | |
192 | unsigned long pstart, pend; | |
193 | ||
194 | pstart = image->segment[j].mem; | |
195 | pend = pstart + image->segment[j].memsz; | |
196 | /* Do the segments overlap ? */ | |
197 | if ((mend > pstart) && (mstart < pend)) | |
4caf9615 | 198 | return -EINVAL; |
2965faa5 DY |
199 | } |
200 | } | |
201 | ||
202 | /* Ensure our buffer sizes are strictly less than | |
203 | * our memory sizes. This should always be the case, | |
204 | * and it is easier to check up front than to be surprised | |
205 | * later on. | |
206 | */ | |
2965faa5 DY |
207 | for (i = 0; i < nr_segments; i++) { |
208 | if (image->segment[i].bufsz > image->segment[i].memsz) | |
4caf9615 | 209 | return -EINVAL; |
2965faa5 DY |
210 | } |
211 | ||
1730f146 | 212 | /* |
213 | * Verify that no more than half of memory will be consumed. If the | |
214 | * request from userspace is too large, a large amount of time will be | |
215 | * wasted allocating pages, which can cause a soft lockup. | |
216 | */ | |
217 | for (i = 0; i < nr_segments; i++) { | |
3d6357de | 218 | if (PAGE_COUNT(image->segment[i].memsz) > nr_pages / 2) |
1730f146 | 219 | return -EINVAL; |
220 | ||
221 | total_pages += PAGE_COUNT(image->segment[i].memsz); | |
222 | } | |
223 | ||
3d6357de | 224 | if (total_pages > nr_pages / 2) |
1730f146 | 225 | return -EINVAL; |
226 | ||
2965faa5 DY |
227 | /* |
228 | * Verify we have good destination addresses. Normally | |
229 | * the caller is responsible for making certain we don't | |
230 | * attempt to load the new image into invalid or reserved | |
231 | * areas of RAM. But crash kernels are preloaded into a | |
232 | * reserved area of ram. We must ensure the addresses | |
233 | * are in the reserved area otherwise preloading the | |
234 | * kernel could corrupt things. | |
235 | */ | |
236 | ||
237 | if (image->type == KEXEC_TYPE_CRASH) { | |
2965faa5 DY |
238 | for (i = 0; i < nr_segments; i++) { |
239 | unsigned long mstart, mend; | |
240 | ||
241 | mstart = image->segment[i].mem; | |
242 | mend = mstart + image->segment[i].memsz - 1; | |
243 | /* Ensure we are within the crash kernel limits */ | |
43546d86 RK |
244 | if ((mstart < phys_to_boot_phys(crashk_res.start)) || |
245 | (mend > phys_to_boot_phys(crashk_res.end))) | |
4caf9615 | 246 | return -EADDRNOTAVAIL; |
2965faa5 DY |
247 | } |
248 | } | |
249 | ||
250 | return 0; | |
251 | } | |
252 | ||
253 | struct kimage *do_kimage_alloc_init(void) | |
254 | { | |
255 | struct kimage *image; | |
256 | ||
257 | /* Allocate a controlling structure */ | |
258 | image = kzalloc(sizeof(*image), GFP_KERNEL); | |
259 | if (!image) | |
260 | return NULL; | |
261 | ||
262 | image->head = 0; | |
263 | image->entry = &image->head; | |
264 | image->last_entry = &image->head; | |
265 | image->control_page = ~0; /* By default this does not apply */ | |
266 | image->type = KEXEC_TYPE_DEFAULT; | |
267 | ||
268 | /* Initialize the list of control pages */ | |
269 | INIT_LIST_HEAD(&image->control_pages); | |
270 | ||
271 | /* Initialize the list of destination pages */ | |
272 | INIT_LIST_HEAD(&image->dest_pages); | |
273 | ||
274 | /* Initialize the list of unusable pages */ | |
275 | INIT_LIST_HEAD(&image->unusable_pages); | |
276 | ||
24726275 ED |
277 | #ifdef CONFIG_CRASH_HOTPLUG |
278 | image->hp_action = KEXEC_CRASH_HP_NONE; | |
279 | image->elfcorehdr_index = -1; | |
280 | image->elfcorehdr_updated = false; | |
281 | #endif | |
282 | ||
2965faa5 DY |
283 | return image; |
284 | } | |
285 | ||
286 | int kimage_is_destination_range(struct kimage *image, | |
287 | unsigned long start, | |
288 | unsigned long end) | |
289 | { | |
290 | unsigned long i; | |
291 | ||
292 | for (i = 0; i < image->nr_segments; i++) { | |
293 | unsigned long mstart, mend; | |
294 | ||
295 | mstart = image->segment[i].mem; | |
296 | mend = mstart + image->segment[i].memsz; | |
297 | if ((end > mstart) && (start < mend)) | |
298 | return 1; | |
299 | } | |
300 | ||
301 | return 0; | |
302 | } | |
303 | ||
304 | static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order) | |
305 | { | |
306 | struct page *pages; | |
307 | ||
7c3a6aed TH |
308 | if (fatal_signal_pending(current)) |
309 | return NULL; | |
bba4ed01 | 310 | pages = alloc_pages(gfp_mask & ~__GFP_ZERO, order); |
2965faa5 DY |
311 | if (pages) { |
312 | unsigned int count, i; | |
313 | ||
314 | pages->mapping = NULL; | |
315 | set_page_private(pages, order); | |
316 | count = 1 << order; | |
317 | for (i = 0; i < count; i++) | |
318 | SetPageReserved(pages + i); | |
bba4ed01 TL |
319 | |
320 | arch_kexec_post_alloc_pages(page_address(pages), count, | |
321 | gfp_mask); | |
322 | ||
323 | if (gfp_mask & __GFP_ZERO) | |
324 | for (i = 0; i < count; i++) | |
325 | clear_highpage(pages + i); | |
2965faa5 DY |
326 | } |
327 | ||
328 | return pages; | |
329 | } | |
330 | ||
331 | static void kimage_free_pages(struct page *page) | |
332 | { | |
333 | unsigned int order, count, i; | |
334 | ||
335 | order = page_private(page); | |
336 | count = 1 << order; | |
bba4ed01 TL |
337 | |
338 | arch_kexec_pre_free_pages(page_address(page), count); | |
339 | ||
2965faa5 DY |
340 | for (i = 0; i < count; i++) |
341 | ClearPageReserved(page + i); | |
342 | __free_pages(page, order); | |
343 | } | |
344 | ||
345 | void kimage_free_page_list(struct list_head *list) | |
346 | { | |
2b24692b | 347 | struct page *page, *next; |
2965faa5 | 348 | |
2b24692b | 349 | list_for_each_entry_safe(page, next, list, lru) { |
2965faa5 DY |
350 | list_del(&page->lru); |
351 | kimage_free_pages(page); | |
352 | } | |
353 | } | |
354 | ||
355 | static struct page *kimage_alloc_normal_control_pages(struct kimage *image, | |
356 | unsigned int order) | |
357 | { | |
358 | /* Control pages are special, they are the intermediaries | |
359 | * that are needed while we copy the rest of the pages | |
360 | * to their final resting place. As such they must | |
361 | * not conflict with either the destination addresses | |
362 | * or memory the kernel is already using. | |
363 | * | |
364 | * The only case where we really need more than one of | |
365 | * these are for architectures where we cannot disable | |
366 | * the MMU and must instead generate an identity mapped | |
367 | * page table for all of the memory. | |
368 | * | |
369 | * At worst this runs in O(N) of the image size. | |
370 | */ | |
371 | struct list_head extra_pages; | |
372 | struct page *pages; | |
373 | unsigned int count; | |
374 | ||
375 | count = 1 << order; | |
376 | INIT_LIST_HEAD(&extra_pages); | |
377 | ||
378 | /* Loop while I can allocate a page and the page allocated | |
379 | * is a destination page. | |
380 | */ | |
381 | do { | |
382 | unsigned long pfn, epfn, addr, eaddr; | |
383 | ||
384 | pages = kimage_alloc_pages(KEXEC_CONTROL_MEMORY_GFP, order); | |
385 | if (!pages) | |
386 | break; | |
43546d86 | 387 | pfn = page_to_boot_pfn(pages); |
2965faa5 DY |
388 | epfn = pfn + count; |
389 | addr = pfn << PAGE_SHIFT; | |
390 | eaddr = epfn << PAGE_SHIFT; | |
391 | if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) || | |
392 | kimage_is_destination_range(image, addr, eaddr)) { | |
393 | list_add(&pages->lru, &extra_pages); | |
394 | pages = NULL; | |
395 | } | |
396 | } while (!pages); | |
397 | ||
398 | if (pages) { | |
399 | /* Remember the allocated page... */ | |
400 | list_add(&pages->lru, &image->control_pages); | |
401 | ||
402 | /* Because the page is already in it's destination | |
403 | * location we will never allocate another page at | |
404 | * that address. Therefore kimage_alloc_pages | |
405 | * will not return it (again) and we don't need | |
406 | * to give it an entry in image->segment[]. | |
407 | */ | |
408 | } | |
409 | /* Deal with the destination pages I have inadvertently allocated. | |
410 | * | |
411 | * Ideally I would convert multi-page allocations into single | |
412 | * page allocations, and add everything to image->dest_pages. | |
413 | * | |
414 | * For now it is simpler to just free the pages. | |
415 | */ | |
416 | kimage_free_page_list(&extra_pages); | |
417 | ||
418 | return pages; | |
419 | } | |
420 | ||
421 | static struct page *kimage_alloc_crash_control_pages(struct kimage *image, | |
422 | unsigned int order) | |
423 | { | |
424 | /* Control pages are special, they are the intermediaries | |
425 | * that are needed while we copy the rest of the pages | |
426 | * to their final resting place. As such they must | |
427 | * not conflict with either the destination addresses | |
428 | * or memory the kernel is already using. | |
429 | * | |
430 | * Control pages are also the only pags we must allocate | |
431 | * when loading a crash kernel. All of the other pages | |
432 | * are specified by the segments and we just memcpy | |
433 | * into them directly. | |
434 | * | |
435 | * The only case where we really need more than one of | |
436 | * these are for architectures where we cannot disable | |
437 | * the MMU and must instead generate an identity mapped | |
438 | * page table for all of the memory. | |
439 | * | |
440 | * Given the low demand this implements a very simple | |
441 | * allocator that finds the first hole of the appropriate | |
442 | * size in the reserved memory region, and allocates all | |
443 | * of the memory up to and including the hole. | |
444 | */ | |
445 | unsigned long hole_start, hole_end, size; | |
446 | struct page *pages; | |
447 | ||
448 | pages = NULL; | |
449 | size = (1 << order) << PAGE_SHIFT; | |
450 | hole_start = (image->control_page + (size - 1)) & ~(size - 1); | |
451 | hole_end = hole_start + size - 1; | |
452 | while (hole_end <= crashk_res.end) { | |
453 | unsigned long i; | |
454 | ||
8e53c073 | 455 | cond_resched(); |
456 | ||
2965faa5 DY |
457 | if (hole_end > KEXEC_CRASH_CONTROL_MEMORY_LIMIT) |
458 | break; | |
459 | /* See if I overlap any of the segments */ | |
460 | for (i = 0; i < image->nr_segments; i++) { | |
461 | unsigned long mstart, mend; | |
462 | ||
463 | mstart = image->segment[i].mem; | |
464 | mend = mstart + image->segment[i].memsz - 1; | |
465 | if ((hole_end >= mstart) && (hole_start <= mend)) { | |
466 | /* Advance the hole to the end of the segment */ | |
467 | hole_start = (mend + (size - 1)) & ~(size - 1); | |
468 | hole_end = hole_start + size - 1; | |
469 | break; | |
470 | } | |
471 | } | |
472 | /* If I don't overlap any segments I have found my hole! */ | |
473 | if (i == image->nr_segments) { | |
474 | pages = pfn_to_page(hole_start >> PAGE_SHIFT); | |
04e9949b | 475 | image->control_page = hole_end; |
2965faa5 DY |
476 | break; |
477 | } | |
478 | } | |
2965faa5 | 479 | |
9cf38d55 LJ |
480 | /* Ensure that these pages are decrypted if SME is enabled. */ |
481 | if (pages) | |
482 | arch_kexec_post_alloc_pages(page_address(pages), 1 << order, 0); | |
483 | ||
2965faa5 DY |
484 | return pages; |
485 | } | |
486 | ||
487 | ||
488 | struct page *kimage_alloc_control_pages(struct kimage *image, | |
489 | unsigned int order) | |
490 | { | |
491 | struct page *pages = NULL; | |
492 | ||
493 | switch (image->type) { | |
494 | case KEXEC_TYPE_DEFAULT: | |
495 | pages = kimage_alloc_normal_control_pages(image, order); | |
496 | break; | |
497 | case KEXEC_TYPE_CRASH: | |
498 | pages = kimage_alloc_crash_control_pages(image, order); | |
499 | break; | |
500 | } | |
501 | ||
502 | return pages; | |
503 | } | |
504 | ||
1229384f XP |
505 | int kimage_crash_copy_vmcoreinfo(struct kimage *image) |
506 | { | |
507 | struct page *vmcoreinfo_page; | |
508 | void *safecopy; | |
509 | ||
510 | if (image->type != KEXEC_TYPE_CRASH) | |
511 | return 0; | |
512 | ||
513 | /* | |
514 | * For kdump, allocate one vmcoreinfo safe copy from the | |
515 | * crash memory. as we have arch_kexec_protect_crashkres() | |
516 | * after kexec syscall, we naturally protect it from write | |
517 | * (even read) access under kernel direct mapping. But on | |
518 | * the other hand, we still need to operate it when crash | |
519 | * happens to generate vmcoreinfo note, hereby we rely on | |
520 | * vmap for this purpose. | |
521 | */ | |
522 | vmcoreinfo_page = kimage_alloc_control_pages(image, 0); | |
523 | if (!vmcoreinfo_page) { | |
524 | pr_warn("Could not allocate vmcoreinfo buffer\n"); | |
525 | return -ENOMEM; | |
526 | } | |
527 | safecopy = vmap(&vmcoreinfo_page, 1, VM_MAP, PAGE_KERNEL); | |
528 | if (!safecopy) { | |
529 | pr_warn("Could not vmap vmcoreinfo buffer\n"); | |
530 | return -ENOMEM; | |
531 | } | |
532 | ||
533 | image->vmcoreinfo_data_copy = safecopy; | |
534 | crash_update_vmcoreinfo_safecopy(safecopy); | |
535 | ||
536 | return 0; | |
537 | } | |
538 | ||
2965faa5 DY |
539 | static int kimage_add_entry(struct kimage *image, kimage_entry_t entry) |
540 | { | |
541 | if (*image->entry != 0) | |
542 | image->entry++; | |
543 | ||
544 | if (image->entry == image->last_entry) { | |
545 | kimage_entry_t *ind_page; | |
546 | struct page *page; | |
547 | ||
548 | page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST); | |
549 | if (!page) | |
550 | return -ENOMEM; | |
551 | ||
552 | ind_page = page_address(page); | |
43546d86 | 553 | *image->entry = virt_to_boot_phys(ind_page) | IND_INDIRECTION; |
2965faa5 DY |
554 | image->entry = ind_page; |
555 | image->last_entry = ind_page + | |
556 | ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1); | |
557 | } | |
558 | *image->entry = entry; | |
559 | image->entry++; | |
560 | *image->entry = 0; | |
561 | ||
562 | return 0; | |
563 | } | |
564 | ||
565 | static int kimage_set_destination(struct kimage *image, | |
566 | unsigned long destination) | |
567 | { | |
2965faa5 | 568 | destination &= PAGE_MASK; |
2965faa5 | 569 | |
32d0c98e | 570 | return kimage_add_entry(image, destination | IND_DESTINATION); |
2965faa5 DY |
571 | } |
572 | ||
573 | ||
574 | static int kimage_add_page(struct kimage *image, unsigned long page) | |
575 | { | |
2965faa5 | 576 | page &= PAGE_MASK; |
2965faa5 | 577 | |
32d0c98e | 578 | return kimage_add_entry(image, page | IND_SOURCE); |
2965faa5 DY |
579 | } |
580 | ||
581 | ||
582 | static void kimage_free_extra_pages(struct kimage *image) | |
583 | { | |
584 | /* Walk through and free any extra destination pages I may have */ | |
585 | kimage_free_page_list(&image->dest_pages); | |
586 | ||
587 | /* Walk through and free any unusable pages I have cached */ | |
588 | kimage_free_page_list(&image->unusable_pages); | |
589 | ||
590 | } | |
de68e4da | 591 | |
2965faa5 DY |
592 | void kimage_terminate(struct kimage *image) |
593 | { | |
594 | if (*image->entry != 0) | |
595 | image->entry++; | |
596 | ||
597 | *image->entry = IND_DONE; | |
598 | } | |
599 | ||
600 | #define for_each_kimage_entry(image, ptr, entry) \ | |
601 | for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \ | |
602 | ptr = (entry & IND_INDIRECTION) ? \ | |
43546d86 | 603 | boot_phys_to_virt((entry & PAGE_MASK)) : ptr + 1) |
2965faa5 DY |
604 | |
605 | static void kimage_free_entry(kimage_entry_t entry) | |
606 | { | |
607 | struct page *page; | |
608 | ||
43546d86 | 609 | page = boot_pfn_to_page(entry >> PAGE_SHIFT); |
2965faa5 DY |
610 | kimage_free_pages(page); |
611 | } | |
612 | ||
613 | void kimage_free(struct kimage *image) | |
614 | { | |
615 | kimage_entry_t *ptr, entry; | |
616 | kimage_entry_t ind = 0; | |
617 | ||
618 | if (!image) | |
619 | return; | |
620 | ||
1229384f XP |
621 | if (image->vmcoreinfo_data_copy) { |
622 | crash_update_vmcoreinfo_safecopy(NULL); | |
623 | vunmap(image->vmcoreinfo_data_copy); | |
624 | } | |
625 | ||
2965faa5 DY |
626 | kimage_free_extra_pages(image); |
627 | for_each_kimage_entry(image, ptr, entry) { | |
628 | if (entry & IND_INDIRECTION) { | |
629 | /* Free the previous indirection page */ | |
630 | if (ind & IND_INDIRECTION) | |
631 | kimage_free_entry(ind); | |
632 | /* Save this indirection page until we are | |
633 | * done with it. | |
634 | */ | |
635 | ind = entry; | |
636 | } else if (entry & IND_SOURCE) | |
637 | kimage_free_entry(entry); | |
638 | } | |
639 | /* Free the final indirection page */ | |
640 | if (ind & IND_INDIRECTION) | |
641 | kimage_free_entry(ind); | |
642 | ||
643 | /* Handle any machine specific cleanup */ | |
644 | machine_kexec_cleanup(image); | |
645 | ||
646 | /* Free the kexec control pages... */ | |
647 | kimage_free_page_list(&image->control_pages); | |
648 | ||
649 | /* | |
650 | * Free up any temporary buffers allocated. This might hit if | |
651 | * error occurred much later after buffer allocation. | |
652 | */ | |
653 | if (image->file_mode) | |
654 | kimage_file_post_load_cleanup(image); | |
655 | ||
656 | kfree(image); | |
657 | } | |
658 | ||
659 | static kimage_entry_t *kimage_dst_used(struct kimage *image, | |
660 | unsigned long page) | |
661 | { | |
662 | kimage_entry_t *ptr, entry; | |
663 | unsigned long destination = 0; | |
664 | ||
665 | for_each_kimage_entry(image, ptr, entry) { | |
666 | if (entry & IND_DESTINATION) | |
667 | destination = entry & PAGE_MASK; | |
668 | else if (entry & IND_SOURCE) { | |
669 | if (page == destination) | |
670 | return ptr; | |
671 | destination += PAGE_SIZE; | |
672 | } | |
673 | } | |
674 | ||
675 | return NULL; | |
676 | } | |
677 | ||
678 | static struct page *kimage_alloc_page(struct kimage *image, | |
679 | gfp_t gfp_mask, | |
680 | unsigned long destination) | |
681 | { | |
682 | /* | |
683 | * Here we implement safeguards to ensure that a source page | |
684 | * is not copied to its destination page before the data on | |
685 | * the destination page is no longer useful. | |
686 | * | |
687 | * To do this we maintain the invariant that a source page is | |
688 | * either its own destination page, or it is not a | |
689 | * destination page at all. | |
690 | * | |
691 | * That is slightly stronger than required, but the proof | |
692 | * that no problems will not occur is trivial, and the | |
693 | * implementation is simply to verify. | |
694 | * | |
695 | * When allocating all pages normally this algorithm will run | |
696 | * in O(N) time, but in the worst case it will run in O(N^2) | |
697 | * time. If the runtime is a problem the data structures can | |
698 | * be fixed. | |
699 | */ | |
700 | struct page *page; | |
701 | unsigned long addr; | |
702 | ||
703 | /* | |
704 | * Walk through the list of destination pages, and see if I | |
705 | * have a match. | |
706 | */ | |
707 | list_for_each_entry(page, &image->dest_pages, lru) { | |
43546d86 | 708 | addr = page_to_boot_pfn(page) << PAGE_SHIFT; |
2965faa5 DY |
709 | if (addr == destination) { |
710 | list_del(&page->lru); | |
711 | return page; | |
712 | } | |
713 | } | |
714 | page = NULL; | |
715 | while (1) { | |
716 | kimage_entry_t *old; | |
717 | ||
718 | /* Allocate a page, if we run out of memory give up */ | |
719 | page = kimage_alloc_pages(gfp_mask, 0); | |
720 | if (!page) | |
721 | return NULL; | |
722 | /* If the page cannot be used file it away */ | |
43546d86 | 723 | if (page_to_boot_pfn(page) > |
2965faa5 DY |
724 | (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) { |
725 | list_add(&page->lru, &image->unusable_pages); | |
726 | continue; | |
727 | } | |
43546d86 | 728 | addr = page_to_boot_pfn(page) << PAGE_SHIFT; |
2965faa5 DY |
729 | |
730 | /* If it is the destination page we want use it */ | |
731 | if (addr == destination) | |
732 | break; | |
733 | ||
734 | /* If the page is not a destination page use it */ | |
735 | if (!kimage_is_destination_range(image, addr, | |
736 | addr + PAGE_SIZE)) | |
737 | break; | |
738 | ||
739 | /* | |
740 | * I know that the page is someones destination page. | |
741 | * See if there is already a source page for this | |
742 | * destination page. And if so swap the source pages. | |
743 | */ | |
744 | old = kimage_dst_used(image, addr); | |
745 | if (old) { | |
746 | /* If so move it */ | |
747 | unsigned long old_addr; | |
748 | struct page *old_page; | |
749 | ||
750 | old_addr = *old & PAGE_MASK; | |
43546d86 | 751 | old_page = boot_pfn_to_page(old_addr >> PAGE_SHIFT); |
2965faa5 DY |
752 | copy_highpage(page, old_page); |
753 | *old = addr | (*old & ~PAGE_MASK); | |
754 | ||
755 | /* The old page I have found cannot be a | |
756 | * destination page, so return it if it's | |
757 | * gfp_flags honor the ones passed in. | |
758 | */ | |
759 | if (!(gfp_mask & __GFP_HIGHMEM) && | |
760 | PageHighMem(old_page)) { | |
761 | kimage_free_pages(old_page); | |
762 | continue; | |
763 | } | |
2965faa5 DY |
764 | page = old_page; |
765 | break; | |
766 | } | |
767 | /* Place the page on the destination list, to be used later */ | |
768 | list_add(&page->lru, &image->dest_pages); | |
769 | } | |
770 | ||
771 | return page; | |
772 | } | |
773 | ||
774 | static int kimage_load_normal_segment(struct kimage *image, | |
775 | struct kexec_segment *segment) | |
776 | { | |
777 | unsigned long maddr; | |
778 | size_t ubytes, mbytes; | |
779 | int result; | |
780 | unsigned char __user *buf = NULL; | |
781 | unsigned char *kbuf = NULL; | |
782 | ||
2965faa5 DY |
783 | if (image->file_mode) |
784 | kbuf = segment->kbuf; | |
785 | else | |
786 | buf = segment->buf; | |
787 | ubytes = segment->bufsz; | |
788 | mbytes = segment->memsz; | |
789 | maddr = segment->mem; | |
790 | ||
791 | result = kimage_set_destination(image, maddr); | |
792 | if (result < 0) | |
793 | goto out; | |
794 | ||
795 | while (mbytes) { | |
796 | struct page *page; | |
797 | char *ptr; | |
798 | size_t uchunk, mchunk; | |
799 | ||
800 | page = kimage_alloc_page(image, GFP_HIGHUSER, maddr); | |
801 | if (!page) { | |
802 | result = -ENOMEM; | |
803 | goto out; | |
804 | } | |
43546d86 | 805 | result = kimage_add_page(image, page_to_boot_pfn(page) |
2965faa5 DY |
806 | << PAGE_SHIFT); |
807 | if (result < 0) | |
808 | goto out; | |
809 | ||
948084f0 | 810 | ptr = kmap_local_page(page); |
2965faa5 DY |
811 | /* Start with a clear page */ |
812 | clear_page(ptr); | |
813 | ptr += maddr & ~PAGE_MASK; | |
814 | mchunk = min_t(size_t, mbytes, | |
815 | PAGE_SIZE - (maddr & ~PAGE_MASK)); | |
816 | uchunk = min(ubytes, mchunk); | |
817 | ||
818 | /* For file based kexec, source pages are in kernel memory */ | |
819 | if (image->file_mode) | |
820 | memcpy(ptr, kbuf, uchunk); | |
821 | else | |
822 | result = copy_from_user(ptr, buf, uchunk); | |
948084f0 | 823 | kunmap_local(ptr); |
2965faa5 DY |
824 | if (result) { |
825 | result = -EFAULT; | |
826 | goto out; | |
827 | } | |
828 | ubytes -= uchunk; | |
829 | maddr += mchunk; | |
830 | if (image->file_mode) | |
831 | kbuf += mchunk; | |
832 | else | |
833 | buf += mchunk; | |
834 | mbytes -= mchunk; | |
a8311f64 JF |
835 | |
836 | cond_resched(); | |
2965faa5 DY |
837 | } |
838 | out: | |
839 | return result; | |
840 | } | |
841 | ||
842 | static int kimage_load_crash_segment(struct kimage *image, | |
843 | struct kexec_segment *segment) | |
844 | { | |
845 | /* For crash dumps kernels we simply copy the data from | |
846 | * user space to it's destination. | |
847 | * We do things a page at a time for the sake of kmap. | |
848 | */ | |
849 | unsigned long maddr; | |
850 | size_t ubytes, mbytes; | |
851 | int result; | |
852 | unsigned char __user *buf = NULL; | |
853 | unsigned char *kbuf = NULL; | |
854 | ||
855 | result = 0; | |
856 | if (image->file_mode) | |
857 | kbuf = segment->kbuf; | |
858 | else | |
859 | buf = segment->buf; | |
860 | ubytes = segment->bufsz; | |
861 | mbytes = segment->memsz; | |
862 | maddr = segment->mem; | |
863 | while (mbytes) { | |
864 | struct page *page; | |
865 | char *ptr; | |
866 | size_t uchunk, mchunk; | |
867 | ||
43546d86 | 868 | page = boot_pfn_to_page(maddr >> PAGE_SHIFT); |
2965faa5 DY |
869 | if (!page) { |
870 | result = -ENOMEM; | |
871 | goto out; | |
872 | } | |
9cf38d55 | 873 | arch_kexec_post_alloc_pages(page_address(page), 1, 0); |
948084f0 | 874 | ptr = kmap_local_page(page); |
2965faa5 DY |
875 | ptr += maddr & ~PAGE_MASK; |
876 | mchunk = min_t(size_t, mbytes, | |
877 | PAGE_SIZE - (maddr & ~PAGE_MASK)); | |
878 | uchunk = min(ubytes, mchunk); | |
879 | if (mchunk > uchunk) { | |
880 | /* Zero the trailing part of the page */ | |
881 | memset(ptr + uchunk, 0, mchunk - uchunk); | |
882 | } | |
883 | ||
884 | /* For file based kexec, source pages are in kernel memory */ | |
885 | if (image->file_mode) | |
886 | memcpy(ptr, kbuf, uchunk); | |
887 | else | |
888 | result = copy_from_user(ptr, buf, uchunk); | |
889 | kexec_flush_icache_page(page); | |
948084f0 | 890 | kunmap_local(ptr); |
9cf38d55 | 891 | arch_kexec_pre_free_pages(page_address(page), 1); |
2965faa5 DY |
892 | if (result) { |
893 | result = -EFAULT; | |
894 | goto out; | |
895 | } | |
896 | ubytes -= uchunk; | |
897 | maddr += mchunk; | |
898 | if (image->file_mode) | |
899 | kbuf += mchunk; | |
900 | else | |
901 | buf += mchunk; | |
902 | mbytes -= mchunk; | |
a8311f64 JF |
903 | |
904 | cond_resched(); | |
2965faa5 DY |
905 | } |
906 | out: | |
907 | return result; | |
908 | } | |
909 | ||
910 | int kimage_load_segment(struct kimage *image, | |
911 | struct kexec_segment *segment) | |
912 | { | |
913 | int result = -ENOMEM; | |
914 | ||
915 | switch (image->type) { | |
916 | case KEXEC_TYPE_DEFAULT: | |
917 | result = kimage_load_normal_segment(image, segment); | |
918 | break; | |
919 | case KEXEC_TYPE_CRASH: | |
920 | result = kimage_load_crash_segment(image, segment); | |
921 | break; | |
922 | } | |
923 | ||
924 | return result; | |
925 | } | |
926 | ||
a42aaad2 RR |
927 | struct kexec_load_limit { |
928 | /* Mutex protects the limit count. */ | |
929 | struct mutex mutex; | |
930 | int limit; | |
931 | }; | |
932 | ||
933 | static struct kexec_load_limit load_limit_reboot = { | |
934 | .mutex = __MUTEX_INITIALIZER(load_limit_reboot.mutex), | |
935 | .limit = -1, | |
936 | }; | |
937 | ||
938 | static struct kexec_load_limit load_limit_panic = { | |
939 | .mutex = __MUTEX_INITIALIZER(load_limit_panic.mutex), | |
940 | .limit = -1, | |
941 | }; | |
942 | ||
2965faa5 DY |
943 | struct kimage *kexec_image; |
944 | struct kimage *kexec_crash_image; | |
7e99f8b6 | 945 | static int kexec_load_disabled; |
a42aaad2 | 946 | |
a467257f | 947 | #ifdef CONFIG_SYSCTL |
a42aaad2 RR |
948 | static int kexec_limit_handler(struct ctl_table *table, int write, |
949 | void *buffer, size_t *lenp, loff_t *ppos) | |
950 | { | |
951 | struct kexec_load_limit *limit = table->data; | |
952 | int val; | |
953 | struct ctl_table tmp = { | |
954 | .data = &val, | |
955 | .maxlen = sizeof(val), | |
956 | .mode = table->mode, | |
957 | }; | |
958 | int ret; | |
959 | ||
960 | if (write) { | |
961 | ret = proc_dointvec(&tmp, write, buffer, lenp, ppos); | |
962 | if (ret) | |
963 | return ret; | |
964 | ||
965 | if (val < 0) | |
966 | return -EINVAL; | |
967 | ||
968 | mutex_lock(&limit->mutex); | |
969 | if (limit->limit != -1 && val >= limit->limit) | |
970 | ret = -EINVAL; | |
971 | else | |
972 | limit->limit = val; | |
973 | mutex_unlock(&limit->mutex); | |
974 | ||
975 | return ret; | |
976 | } | |
977 | ||
978 | mutex_lock(&limit->mutex); | |
979 | val = limit->limit; | |
980 | mutex_unlock(&limit->mutex); | |
981 | ||
982 | return proc_dointvec(&tmp, write, buffer, lenp, ppos); | |
983 | } | |
984 | ||
a467257f | 985 | static struct ctl_table kexec_core_sysctls[] = { |
986 | { | |
987 | .procname = "kexec_load_disabled", | |
988 | .data = &kexec_load_disabled, | |
989 | .maxlen = sizeof(int), | |
990 | .mode = 0644, | |
991 | /* only handle a transition from default "0" to "1" */ | |
992 | .proc_handler = proc_dointvec_minmax, | |
993 | .extra1 = SYSCTL_ONE, | |
994 | .extra2 = SYSCTL_ONE, | |
995 | }, | |
a42aaad2 RR |
996 | { |
997 | .procname = "kexec_load_limit_panic", | |
998 | .data = &load_limit_panic, | |
999 | .mode = 0644, | |
1000 | .proc_handler = kexec_limit_handler, | |
1001 | }, | |
1002 | { | |
1003 | .procname = "kexec_load_limit_reboot", | |
1004 | .data = &load_limit_reboot, | |
1005 | .mode = 0644, | |
1006 | .proc_handler = kexec_limit_handler, | |
1007 | }, | |
a467257f | 1008 | { } |
1009 | }; | |
1010 | ||
1011 | static int __init kexec_core_sysctl_init(void) | |
1012 | { | |
1013 | register_sysctl_init("kernel", kexec_core_sysctls); | |
1014 | return 0; | |
1015 | } | |
1016 | late_initcall(kexec_core_sysctl_init); | |
1017 | #endif | |
2965faa5 | 1018 | |
a42aaad2 | 1019 | bool kexec_load_permitted(int kexec_image_type) |
7e99f8b6 | 1020 | { |
a42aaad2 RR |
1021 | struct kexec_load_limit *limit; |
1022 | ||
7e99f8b6 RR |
1023 | /* |
1024 | * Only the superuser can use the kexec syscall and if it has not | |
1025 | * been disabled. | |
1026 | */ | |
a42aaad2 RR |
1027 | if (!capable(CAP_SYS_BOOT) || kexec_load_disabled) |
1028 | return false; | |
1029 | ||
1030 | /* Check limit counter and decrease it.*/ | |
1031 | limit = (kexec_image_type == KEXEC_TYPE_CRASH) ? | |
1032 | &load_limit_panic : &load_limit_reboot; | |
1033 | mutex_lock(&limit->mutex); | |
1034 | if (!limit->limit) { | |
1035 | mutex_unlock(&limit->mutex); | |
1036 | return false; | |
1037 | } | |
1038 | if (limit->limit != -1) | |
1039 | limit->limit--; | |
1040 | mutex_unlock(&limit->mutex); | |
1041 | ||
1042 | return true; | |
7e99f8b6 RR |
1043 | } |
1044 | ||
7bbee5ca HK |
1045 | /* |
1046 | * No panic_cpu check version of crash_kexec(). This function is called | |
1047 | * only when panic_cpu holds the current CPU number; this is the only CPU | |
1048 | * which processes crash_kexec routines. | |
1049 | */ | |
c207aee4 | 1050 | void __noclone __crash_kexec(struct pt_regs *regs) |
2965faa5 | 1051 | { |
05c62574 | 1052 | /* Take the kexec_lock here to prevent sys_kexec_load |
2965faa5 DY |
1053 | * running on one cpu from replacing the crash kernel |
1054 | * we are using after a panic on a different cpu. | |
1055 | * | |
1056 | * If the crash kernel was not located in a fixed area | |
1057 | * of memory the xchg(&kexec_crash_image) would be | |
1058 | * sufficient. But since I reuse the memory... | |
1059 | */ | |
05c62574 | 1060 | if (kexec_trylock()) { |
2965faa5 DY |
1061 | if (kexec_crash_image) { |
1062 | struct pt_regs fixed_regs; | |
1063 | ||
1064 | crash_setup_regs(&fixed_regs, regs); | |
1065 | crash_save_vmcoreinfo(); | |
1066 | machine_crash_shutdown(&fixed_regs); | |
1067 | machine_kexec(kexec_crash_image); | |
1068 | } | |
05c62574 | 1069 | kexec_unlock(); |
2965faa5 DY |
1070 | } |
1071 | } | |
c207aee4 | 1072 | STACK_FRAME_NON_STANDARD(__crash_kexec); |
2965faa5 | 1073 | |
400031e0 | 1074 | __bpf_kfunc void crash_kexec(struct pt_regs *regs) |
7bbee5ca HK |
1075 | { |
1076 | int old_cpu, this_cpu; | |
1077 | ||
1078 | /* | |
1079 | * Only one CPU is allowed to execute the crash_kexec() code as with | |
1080 | * panic(). Otherwise parallel calls of panic() and crash_kexec() | |
1081 | * may stop each other. To exclude them, we use panic_cpu here too. | |
1082 | */ | |
1083 | this_cpu = raw_smp_processor_id(); | |
1084 | old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, this_cpu); | |
1085 | if (old_cpu == PANIC_CPU_INVALID) { | |
1086 | /* This is the 1st CPU which comes here, so go ahead. */ | |
1087 | __crash_kexec(regs); | |
1088 | ||
1089 | /* | |
1090 | * Reset panic_cpu to allow another panic()/crash_kexec() | |
1091 | * call. | |
1092 | */ | |
1093 | atomic_set(&panic_cpu, PANIC_CPU_INVALID); | |
1094 | } | |
1095 | } | |
1096 | ||
16c6006a ZL |
1097 | static inline resource_size_t crash_resource_size(const struct resource *res) |
1098 | { | |
1099 | return !res->end ? 0 : resource_size(res); | |
1100 | } | |
1101 | ||
7bb5da0d | 1102 | ssize_t crash_get_memory_size(void) |
2965faa5 | 1103 | { |
7bb5da0d VS |
1104 | ssize_t size = 0; |
1105 | ||
05c62574 | 1106 | if (!kexec_trylock()) |
7bb5da0d | 1107 | return -EBUSY; |
2965faa5 | 1108 | |
16c6006a ZL |
1109 | size += crash_resource_size(&crashk_res); |
1110 | size += crash_resource_size(&crashk_low_res); | |
7bb5da0d | 1111 | |
05c62574 | 1112 | kexec_unlock(); |
2965faa5 DY |
1113 | return size; |
1114 | } | |
1115 | ||
5b7bfb32 ZL |
1116 | static int __crash_shrink_memory(struct resource *old_res, |
1117 | unsigned long new_size) | |
1118 | { | |
1119 | struct resource *ram_res; | |
1120 | ||
1121 | ram_res = kzalloc(sizeof(*ram_res), GFP_KERNEL); | |
1122 | if (!ram_res) | |
1123 | return -ENOMEM; | |
1124 | ||
1125 | ram_res->start = old_res->start + new_size; | |
1126 | ram_res->end = old_res->end; | |
1127 | ram_res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM; | |
1128 | ram_res->name = "System RAM"; | |
1129 | ||
1130 | if (!new_size) { | |
1131 | release_resource(old_res); | |
1132 | old_res->start = 0; | |
1133 | old_res->end = 0; | |
1134 | } else { | |
1135 | crashk_res.end = ram_res->start - 1; | |
1136 | } | |
1137 | ||
1138 | crash_free_reserved_phys_range(ram_res->start, ram_res->end); | |
1139 | insert_resource(&iomem_resource, ram_res); | |
1140 | ||
1141 | return 0; | |
1142 | } | |
1143 | ||
2965faa5 DY |
1144 | int crash_shrink_memory(unsigned long new_size) |
1145 | { | |
1146 | int ret = 0; | |
16c6006a | 1147 | unsigned long old_size, low_size; |
2965faa5 | 1148 | |
05c62574 | 1149 | if (!kexec_trylock()) |
7bb5da0d | 1150 | return -EBUSY; |
2965faa5 DY |
1151 | |
1152 | if (kexec_crash_image) { | |
1153 | ret = -ENOENT; | |
1154 | goto unlock; | |
1155 | } | |
16c6006a ZL |
1156 | |
1157 | low_size = crash_resource_size(&crashk_low_res); | |
1158 | old_size = crash_resource_size(&crashk_res) + low_size; | |
1cba6c43 | 1159 | new_size = roundup(new_size, KEXEC_CRASH_MEM_ALIGN); |
2965faa5 DY |
1160 | if (new_size >= old_size) { |
1161 | ret = (new_size == old_size) ? 0 : -EINVAL; | |
1162 | goto unlock; | |
1163 | } | |
1164 | ||
16c6006a ZL |
1165 | /* |
1166 | * (low_size > new_size) implies that low_size is greater than zero. | |
1167 | * This also means that if low_size is zero, the else branch is taken. | |
1168 | * | |
1169 | * If low_size is greater than 0, (low_size > new_size) indicates that | |
1170 | * crashk_low_res also needs to be shrunken. Otherwise, only crashk_res | |
1171 | * needs to be shrunken. | |
1172 | */ | |
1173 | if (low_size > new_size) { | |
1174 | ret = __crash_shrink_memory(&crashk_res, 0); | |
1175 | if (ret) | |
1176 | goto unlock; | |
1177 | ||
1178 | ret = __crash_shrink_memory(&crashk_low_res, new_size); | |
1179 | } else { | |
1180 | ret = __crash_shrink_memory(&crashk_res, new_size - low_size); | |
1181 | } | |
1182 | ||
1183 | /* Swap crashk_res and crashk_low_res if needed */ | |
1184 | if (!crashk_res.end && crashk_low_res.end) { | |
1185 | crashk_res.start = crashk_low_res.start; | |
1186 | crashk_res.end = crashk_low_res.end; | |
1187 | release_resource(&crashk_low_res); | |
1188 | crashk_low_res.start = 0; | |
1189 | crashk_low_res.end = 0; | |
1190 | insert_resource(&iomem_resource, &crashk_res); | |
1191 | } | |
2965faa5 DY |
1192 | |
1193 | unlock: | |
05c62574 | 1194 | kexec_unlock(); |
2965faa5 DY |
1195 | return ret; |
1196 | } | |
1197 | ||
2965faa5 DY |
1198 | void crash_save_cpu(struct pt_regs *regs, int cpu) |
1199 | { | |
1200 | struct elf_prstatus prstatus; | |
1201 | u32 *buf; | |
1202 | ||
1203 | if ((cpu < 0) || (cpu >= nr_cpu_ids)) | |
1204 | return; | |
1205 | ||
1206 | /* Using ELF notes here is opportunistic. | |
1207 | * I need a well defined structure format | |
1208 | * for the data I pass, and I need tags | |
1209 | * on the data to indicate what information I have | |
1210 | * squirrelled away. ELF notes happen to provide | |
1211 | * all of that, so there is no need to invent something new. | |
1212 | */ | |
1213 | buf = (u32 *)per_cpu_ptr(crash_notes, cpu); | |
1214 | if (!buf) | |
1215 | return; | |
1216 | memset(&prstatus, 0, sizeof(prstatus)); | |
f2485a2d | 1217 | prstatus.common.pr_pid = current->pid; |
9554e908 | 1218 | elf_core_copy_regs(&prstatus.pr_reg, regs); |
2965faa5 DY |
1219 | buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS, |
1220 | &prstatus, sizeof(prstatus)); | |
1221 | final_note(buf); | |
1222 | } | |
1223 | ||
2965faa5 DY |
1224 | /* |
1225 | * Move into place and start executing a preloaded standalone | |
1226 | * executable. If nothing was preloaded return an error. | |
1227 | */ | |
1228 | int kernel_kexec(void) | |
1229 | { | |
1230 | int error = 0; | |
1231 | ||
05c62574 | 1232 | if (!kexec_trylock()) |
2965faa5 DY |
1233 | return -EBUSY; |
1234 | if (!kexec_image) { | |
1235 | error = -EINVAL; | |
1236 | goto Unlock; | |
1237 | } | |
1238 | ||
1239 | #ifdef CONFIG_KEXEC_JUMP | |
1240 | if (kexec_image->preserve_context) { | |
2965faa5 DY |
1241 | pm_prepare_console(); |
1242 | error = freeze_processes(); | |
1243 | if (error) { | |
1244 | error = -EBUSY; | |
1245 | goto Restore_console; | |
1246 | } | |
1247 | suspend_console(); | |
1248 | error = dpm_suspend_start(PMSG_FREEZE); | |
1249 | if (error) | |
1250 | goto Resume_console; | |
1251 | /* At this point, dpm_suspend_start() has been called, | |
1252 | * but *not* dpm_suspend_end(). We *must* call | |
1253 | * dpm_suspend_end() now. Otherwise, drivers for | |
1254 | * some devices (e.g. interrupt controllers) become | |
1255 | * desynchronized with the actual state of the | |
1256 | * hardware at resume time, and evil weirdness ensues. | |
1257 | */ | |
1258 | error = dpm_suspend_end(PMSG_FREEZE); | |
1259 | if (error) | |
1260 | goto Resume_devices; | |
2f1a6fbb | 1261 | error = suspend_disable_secondary_cpus(); |
2965faa5 DY |
1262 | if (error) |
1263 | goto Enable_cpus; | |
1264 | local_irq_disable(); | |
1265 | error = syscore_suspend(); | |
1266 | if (error) | |
1267 | goto Enable_irqs; | |
1268 | } else | |
1269 | #endif | |
1270 | { | |
1271 | kexec_in_progress = true; | |
a119b4e5 | 1272 | kernel_restart_prepare("kexec reboot"); |
2965faa5 DY |
1273 | migrate_to_reboot_cpu(); |
1274 | ||
1275 | /* | |
1276 | * migrate_to_reboot_cpu() disables CPU hotplug assuming that | |
1277 | * no further code needs to use CPU hotplug (which is true in | |
1278 | * the reboot case). However, the kexec path depends on using | |
1279 | * CPU hotplug again; so re-enable it here. | |
1280 | */ | |
1281 | cpu_hotplug_enable(); | |
d42cc530 | 1282 | pr_notice("Starting new kernel\n"); |
2965faa5 DY |
1283 | machine_shutdown(); |
1284 | } | |
1285 | ||
b2075dbb | 1286 | kmsg_dump(KMSG_DUMP_SHUTDOWN); |
2965faa5 DY |
1287 | machine_kexec(kexec_image); |
1288 | ||
1289 | #ifdef CONFIG_KEXEC_JUMP | |
1290 | if (kexec_image->preserve_context) { | |
1291 | syscore_resume(); | |
1292 | Enable_irqs: | |
1293 | local_irq_enable(); | |
1294 | Enable_cpus: | |
2f1a6fbb | 1295 | suspend_enable_secondary_cpus(); |
2965faa5 DY |
1296 | dpm_resume_start(PMSG_RESTORE); |
1297 | Resume_devices: | |
1298 | dpm_resume_end(PMSG_RESTORE); | |
1299 | Resume_console: | |
1300 | resume_console(); | |
1301 | thaw_processes(); | |
1302 | Restore_console: | |
1303 | pm_restore_console(); | |
2965faa5 DY |
1304 | } |
1305 | #endif | |
1306 | ||
1307 | Unlock: | |
05c62574 | 1308 | kexec_unlock(); |
2965faa5 DY |
1309 | return error; |
1310 | } |