]> git.ipfire.org Git - people/arne_f/kernel.git/blob - arch/x86/boot/compressed/kaslr.c
Merge branch 'linus' into x86/mm to pick up fixes and to fix conflicts
[people/arne_f/kernel.git] / arch / x86 / boot / compressed / kaslr.c
1 /*
2 * kaslr.c
3 *
4 * This contains the routines needed to generate a reasonable level of
5 * entropy to choose a randomized kernel base address offset in support
6 * of Kernel Address Space Layout Randomization (KASLR). Additionally
7 * handles walking the physical memory maps (and tracking memory regions
8 * to avoid) in order to select a physical memory location that can
9 * contain the entire properly aligned running kernel image.
10 *
11 */
12
13 /*
14 * isspace() in linux/ctype.h is expected by next_args() to filter
15 * out "space/lf/tab". While boot/ctype.h conflicts with linux/ctype.h,
16 * since isdigit() is implemented in both of them. Hence disable it
17 * here.
18 */
19 #define BOOT_CTYPE_H
20
21 /*
22 * _ctype[] in lib/ctype.c is needed by isspace() of linux/ctype.h.
23 * While both lib/ctype.c and lib/cmdline.c will bring EXPORT_SYMBOL
24 * which is meaningless and will cause compiling error in some cases.
25 * So do not include linux/export.h and define EXPORT_SYMBOL(sym)
26 * as empty.
27 */
28 #define _LINUX_EXPORT_H
29 #define EXPORT_SYMBOL(sym)
30
31 #include "misc.h"
32 #include "error.h"
33 #include "../string.h"
34
35 #include <generated/compile.h>
36 #include <linux/module.h>
37 #include <linux/uts.h>
38 #include <linux/utsname.h>
39 #include <linux/ctype.h>
40 #include <generated/utsrelease.h>
41
42 /* Macros used by the included decompressor code below. */
43 #define STATIC
44 #include <linux/decompress/mm.h>
45
46 extern unsigned long get_cmd_line_ptr(void);
47
48 /* Simplified build-specific string for starting entropy. */
49 static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@"
50 LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION;
51
52 static unsigned long rotate_xor(unsigned long hash, const void *area,
53 size_t size)
54 {
55 size_t i;
56 unsigned long *ptr = (unsigned long *)area;
57
58 for (i = 0; i < size / sizeof(hash); i++) {
59 /* Rotate by odd number of bits and XOR. */
60 hash = (hash << ((sizeof(hash) * 8) - 7)) | (hash >> 7);
61 hash ^= ptr[i];
62 }
63
64 return hash;
65 }
66
67 /* Attempt to create a simple but unpredictable starting entropy. */
68 static unsigned long get_boot_seed(void)
69 {
70 unsigned long hash = 0;
71
72 hash = rotate_xor(hash, build_str, sizeof(build_str));
73 hash = rotate_xor(hash, boot_params, sizeof(*boot_params));
74
75 return hash;
76 }
77
78 #define KASLR_COMPRESSED_BOOT
79 #include "../../lib/kaslr.c"
80
81 struct mem_vector {
82 unsigned long long start;
83 unsigned long long size;
84 };
85
86 /* Only supporting at most 4 unusable memmap regions with kaslr */
87 #define MAX_MEMMAP_REGIONS 4
88
89 static bool memmap_too_large;
90
91
92 /* Store memory limit specified by "mem=nn[KMG]" or "memmap=nn[KMG]" */
93 unsigned long long mem_limit = ULLONG_MAX;
94
95
96 enum mem_avoid_index {
97 MEM_AVOID_ZO_RANGE = 0,
98 MEM_AVOID_INITRD,
99 MEM_AVOID_CMDLINE,
100 MEM_AVOID_BOOTPARAMS,
101 MEM_AVOID_MEMMAP_BEGIN,
102 MEM_AVOID_MEMMAP_END = MEM_AVOID_MEMMAP_BEGIN + MAX_MEMMAP_REGIONS - 1,
103 MEM_AVOID_MAX,
104 };
105
106 static struct mem_vector mem_avoid[MEM_AVOID_MAX];
107
108 static bool mem_overlaps(struct mem_vector *one, struct mem_vector *two)
109 {
110 /* Item one is entirely before item two. */
111 if (one->start + one->size <= two->start)
112 return false;
113 /* Item one is entirely after item two. */
114 if (one->start >= two->start + two->size)
115 return false;
116 return true;
117 }
118
119 char *skip_spaces(const char *str)
120 {
121 while (isspace(*str))
122 ++str;
123 return (char *)str;
124 }
125 #include "../../../../lib/ctype.c"
126 #include "../../../../lib/cmdline.c"
127
128 static int
129 parse_memmap(char *p, unsigned long long *start, unsigned long long *size)
130 {
131 char *oldp;
132
133 if (!p)
134 return -EINVAL;
135
136 /* We don't care about this option here */
137 if (!strncmp(p, "exactmap", 8))
138 return -EINVAL;
139
140 oldp = p;
141 *size = memparse(p, &p);
142 if (p == oldp)
143 return -EINVAL;
144
145 switch (*p) {
146 case '#':
147 case '$':
148 case '!':
149 *start = memparse(p + 1, &p);
150 return 0;
151 case '@':
152 /* memmap=nn@ss specifies usable region, should be skipped */
153 *size = 0;
154 /* Fall through */
155 default:
156 /*
157 * If w/o offset, only size specified, memmap=nn[KMG] has the
158 * same behaviour as mem=nn[KMG]. It limits the max address
159 * system can use. Region above the limit should be avoided.
160 */
161 *start = 0;
162 return 0;
163 }
164
165 return -EINVAL;
166 }
167
168 static void mem_avoid_memmap(char *str)
169 {
170 static int i;
171 int rc;
172
173 if (i >= MAX_MEMMAP_REGIONS)
174 return;
175
176 while (str && (i < MAX_MEMMAP_REGIONS)) {
177 int rc;
178 unsigned long long start, size;
179 char *k = strchr(str, ',');
180
181 if (k)
182 *k++ = 0;
183
184 rc = parse_memmap(str, &start, &size);
185 if (rc < 0)
186 break;
187 str = k;
188
189 if (start == 0) {
190 /* Store the specified memory limit if size > 0 */
191 if (size > 0)
192 mem_limit = size;
193
194 continue;
195 }
196
197 mem_avoid[MEM_AVOID_MEMMAP_BEGIN + i].start = start;
198 mem_avoid[MEM_AVOID_MEMMAP_BEGIN + i].size = size;
199 i++;
200 }
201
202 /* More than 4 memmaps, fail kaslr */
203 if ((i >= MAX_MEMMAP_REGIONS) && str)
204 memmap_too_large = true;
205 }
206
207 static int handle_mem_memmap(void)
208 {
209 char *args = (char *)get_cmd_line_ptr();
210 size_t len = strlen((char *)args);
211 char *tmp_cmdline;
212 char *param, *val;
213 u64 mem_size;
214
215 if (!strstr(args, "memmap=") && !strstr(args, "mem="))
216 return 0;
217
218 tmp_cmdline = malloc(len + 1);
219 if (!tmp_cmdline )
220 error("Failed to allocate space for tmp_cmdline");
221
222 memcpy(tmp_cmdline, args, len);
223 tmp_cmdline[len] = 0;
224 args = tmp_cmdline;
225
226 /* Chew leading spaces */
227 args = skip_spaces(args);
228
229 while (*args) {
230 args = next_arg(args, &param, &val);
231 /* Stop at -- */
232 if (!val && strcmp(param, "--") == 0) {
233 warn("Only '--' specified in cmdline");
234 free(tmp_cmdline);
235 return -1;
236 }
237
238 if (!strcmp(param, "memmap")) {
239 mem_avoid_memmap(val);
240 } else if (!strcmp(param, "mem")) {
241 char *p = val;
242
243 if (!strcmp(p, "nopentium"))
244 continue;
245 mem_size = memparse(p, &p);
246 if (mem_size == 0) {
247 free(tmp_cmdline);
248 return -EINVAL;
249 }
250 mem_limit = mem_size;
251 }
252 }
253
254 free(tmp_cmdline);
255 return 0;
256 }
257
258 /*
259 * In theory, KASLR can put the kernel anywhere in the range of [16M, 64T).
260 * The mem_avoid array is used to store the ranges that need to be avoided
261 * when KASLR searches for an appropriate random address. We must avoid any
262 * regions that are unsafe to overlap with during decompression, and other
263 * things like the initrd, cmdline and boot_params. This comment seeks to
264 * explain mem_avoid as clearly as possible since incorrect mem_avoid
265 * memory ranges lead to really hard to debug boot failures.
266 *
267 * The initrd, cmdline, and boot_params are trivial to identify for
268 * avoiding. They are MEM_AVOID_INITRD, MEM_AVOID_CMDLINE, and
269 * MEM_AVOID_BOOTPARAMS respectively below.
270 *
271 * What is not obvious how to avoid is the range of memory that is used
272 * during decompression (MEM_AVOID_ZO_RANGE below). This range must cover
273 * the compressed kernel (ZO) and its run space, which is used to extract
274 * the uncompressed kernel (VO) and relocs.
275 *
276 * ZO's full run size sits against the end of the decompression buffer, so
277 * we can calculate where text, data, bss, etc of ZO are positioned more
278 * easily.
279 *
280 * For additional background, the decompression calculations can be found
281 * in header.S, and the memory diagram is based on the one found in misc.c.
282 *
283 * The following conditions are already enforced by the image layouts and
284 * associated code:
285 * - input + input_size >= output + output_size
286 * - kernel_total_size <= init_size
287 * - kernel_total_size <= output_size (see Note below)
288 * - output + init_size >= output + output_size
289 *
290 * (Note that kernel_total_size and output_size have no fundamental
291 * relationship, but output_size is passed to choose_random_location
292 * as a maximum of the two. The diagram is showing a case where
293 * kernel_total_size is larger than output_size, but this case is
294 * handled by bumping output_size.)
295 *
296 * The above conditions can be illustrated by a diagram:
297 *
298 * 0 output input input+input_size output+init_size
299 * | | | | |
300 * | | | | |
301 * |-----|--------|--------|--------------|-----------|--|-------------|
302 * | | |
303 * | | |
304 * output+init_size-ZO_INIT_SIZE output+output_size output+kernel_total_size
305 *
306 * [output, output+init_size) is the entire memory range used for
307 * extracting the compressed image.
308 *
309 * [output, output+kernel_total_size) is the range needed for the
310 * uncompressed kernel (VO) and its run size (bss, brk, etc).
311 *
312 * [output, output+output_size) is VO plus relocs (i.e. the entire
313 * uncompressed payload contained by ZO). This is the area of the buffer
314 * written to during decompression.
315 *
316 * [output+init_size-ZO_INIT_SIZE, output+init_size) is the worst-case
317 * range of the copied ZO and decompression code. (i.e. the range
318 * covered backwards of size ZO_INIT_SIZE, starting from output+init_size.)
319 *
320 * [input, input+input_size) is the original copied compressed image (ZO)
321 * (i.e. it does not include its run size). This range must be avoided
322 * because it contains the data used for decompression.
323 *
324 * [input+input_size, output+init_size) is [_text, _end) for ZO. This
325 * range includes ZO's heap and stack, and must be avoided since it
326 * performs the decompression.
327 *
328 * Since the above two ranges need to be avoided and they are adjacent,
329 * they can be merged, resulting in: [input, output+init_size) which
330 * becomes the MEM_AVOID_ZO_RANGE below.
331 */
332 static void mem_avoid_init(unsigned long input, unsigned long input_size,
333 unsigned long output)
334 {
335 unsigned long init_size = boot_params->hdr.init_size;
336 u64 initrd_start, initrd_size;
337 u64 cmd_line, cmd_line_size;
338 char *ptr;
339
340 /*
341 * Avoid the region that is unsafe to overlap during
342 * decompression.
343 */
344 mem_avoid[MEM_AVOID_ZO_RANGE].start = input;
345 mem_avoid[MEM_AVOID_ZO_RANGE].size = (output + init_size) - input;
346 add_identity_map(mem_avoid[MEM_AVOID_ZO_RANGE].start,
347 mem_avoid[MEM_AVOID_ZO_RANGE].size);
348
349 /* Avoid initrd. */
350 initrd_start = (u64)boot_params->ext_ramdisk_image << 32;
351 initrd_start |= boot_params->hdr.ramdisk_image;
352 initrd_size = (u64)boot_params->ext_ramdisk_size << 32;
353 initrd_size |= boot_params->hdr.ramdisk_size;
354 mem_avoid[MEM_AVOID_INITRD].start = initrd_start;
355 mem_avoid[MEM_AVOID_INITRD].size = initrd_size;
356 /* No need to set mapping for initrd, it will be handled in VO. */
357
358 /* Avoid kernel command line. */
359 cmd_line = (u64)boot_params->ext_cmd_line_ptr << 32;
360 cmd_line |= boot_params->hdr.cmd_line_ptr;
361 /* Calculate size of cmd_line. */
362 ptr = (char *)(unsigned long)cmd_line;
363 for (cmd_line_size = 0; ptr[cmd_line_size++]; )
364 ;
365 mem_avoid[MEM_AVOID_CMDLINE].start = cmd_line;
366 mem_avoid[MEM_AVOID_CMDLINE].size = cmd_line_size;
367 add_identity_map(mem_avoid[MEM_AVOID_CMDLINE].start,
368 mem_avoid[MEM_AVOID_CMDLINE].size);
369
370 /* Avoid boot parameters. */
371 mem_avoid[MEM_AVOID_BOOTPARAMS].start = (unsigned long)boot_params;
372 mem_avoid[MEM_AVOID_BOOTPARAMS].size = sizeof(*boot_params);
373 add_identity_map(mem_avoid[MEM_AVOID_BOOTPARAMS].start,
374 mem_avoid[MEM_AVOID_BOOTPARAMS].size);
375
376 /* We don't need to set a mapping for setup_data. */
377
378 /* Mark the memmap regions we need to avoid */
379 handle_mem_memmap();
380
381 #ifdef CONFIG_X86_VERBOSE_BOOTUP
382 /* Make sure video RAM can be used. */
383 add_identity_map(0, PMD_SIZE);
384 #endif
385 }
386
387 /*
388 * Does this memory vector overlap a known avoided area? If so, record the
389 * overlap region with the lowest address.
390 */
391 static bool mem_avoid_overlap(struct mem_vector *img,
392 struct mem_vector *overlap)
393 {
394 int i;
395 struct setup_data *ptr;
396 unsigned long earliest = img->start + img->size;
397 bool is_overlapping = false;
398
399 for (i = 0; i < MEM_AVOID_MAX; i++) {
400 if (mem_overlaps(img, &mem_avoid[i]) &&
401 mem_avoid[i].start < earliest) {
402 *overlap = mem_avoid[i];
403 earliest = overlap->start;
404 is_overlapping = true;
405 }
406 }
407
408 /* Avoid all entries in the setup_data linked list. */
409 ptr = (struct setup_data *)(unsigned long)boot_params->hdr.setup_data;
410 while (ptr) {
411 struct mem_vector avoid;
412
413 avoid.start = (unsigned long)ptr;
414 avoid.size = sizeof(*ptr) + ptr->len;
415
416 if (mem_overlaps(img, &avoid) && (avoid.start < earliest)) {
417 *overlap = avoid;
418 earliest = overlap->start;
419 is_overlapping = true;
420 }
421
422 ptr = (struct setup_data *)(unsigned long)ptr->next;
423 }
424
425 return is_overlapping;
426 }
427
428 struct slot_area {
429 unsigned long addr;
430 int num;
431 };
432
433 #define MAX_SLOT_AREA 100
434
435 static struct slot_area slot_areas[MAX_SLOT_AREA];
436
437 static unsigned long slot_max;
438
439 static unsigned long slot_area_index;
440
441 static void store_slot_info(struct mem_vector *region, unsigned long image_size)
442 {
443 struct slot_area slot_area;
444
445 if (slot_area_index == MAX_SLOT_AREA)
446 return;
447
448 slot_area.addr = region->start;
449 slot_area.num = (region->size - image_size) /
450 CONFIG_PHYSICAL_ALIGN + 1;
451
452 if (slot_area.num > 0) {
453 slot_areas[slot_area_index++] = slot_area;
454 slot_max += slot_area.num;
455 }
456 }
457
458 static unsigned long slots_fetch_random(void)
459 {
460 unsigned long slot;
461 int i;
462
463 /* Handle case of no slots stored. */
464 if (slot_max == 0)
465 return 0;
466
467 slot = kaslr_get_random_long("Physical") % slot_max;
468
469 for (i = 0; i < slot_area_index; i++) {
470 if (slot >= slot_areas[i].num) {
471 slot -= slot_areas[i].num;
472 continue;
473 }
474 return slot_areas[i].addr + slot * CONFIG_PHYSICAL_ALIGN;
475 }
476
477 if (i == slot_area_index)
478 debug_putstr("slots_fetch_random() failed!?\n");
479 return 0;
480 }
481
482 static void process_mem_region(struct mem_vector *entry,
483 unsigned long minimum,
484 unsigned long image_size)
485 {
486 struct mem_vector region, overlap;
487 struct slot_area slot_area;
488 unsigned long start_orig, end;
489 struct mem_vector cur_entry;
490
491 /* On 32-bit, ignore entries entirely above our maximum. */
492 if (IS_ENABLED(CONFIG_X86_32) && entry->start >= KERNEL_IMAGE_SIZE)
493 return;
494
495 /* Ignore entries entirely below our minimum. */
496 if (entry->start + entry->size < minimum)
497 return;
498
499 /* Ignore entries above memory limit */
500 end = min(entry->size + entry->start, mem_limit);
501 if (entry->start >= end)
502 return;
503 cur_entry.start = entry->start;
504 cur_entry.size = end - entry->start;
505
506 region.start = cur_entry.start;
507 region.size = cur_entry.size;
508
509 /* Give up if slot area array is full. */
510 while (slot_area_index < MAX_SLOT_AREA) {
511 start_orig = region.start;
512
513 /* Potentially raise address to minimum location. */
514 if (region.start < minimum)
515 region.start = minimum;
516
517 /* Potentially raise address to meet alignment needs. */
518 region.start = ALIGN(region.start, CONFIG_PHYSICAL_ALIGN);
519
520 /* Did we raise the address above the passed in memory entry? */
521 if (region.start > cur_entry.start + cur_entry.size)
522 return;
523
524 /* Reduce size by any delta from the original address. */
525 region.size -= region.start - start_orig;
526
527 /* On 32-bit, reduce region size to fit within max size. */
528 if (IS_ENABLED(CONFIG_X86_32) &&
529 region.start + region.size > KERNEL_IMAGE_SIZE)
530 region.size = KERNEL_IMAGE_SIZE - region.start;
531
532 /* Return if region can't contain decompressed kernel */
533 if (region.size < image_size)
534 return;
535
536 /* If nothing overlaps, store the region and return. */
537 if (!mem_avoid_overlap(&region, &overlap)) {
538 store_slot_info(&region, image_size);
539 return;
540 }
541
542 /* Store beginning of region if holds at least image_size. */
543 if (overlap.start > region.start + image_size) {
544 struct mem_vector beginning;
545
546 beginning.start = region.start;
547 beginning.size = overlap.start - region.start;
548 store_slot_info(&beginning, image_size);
549 }
550
551 /* Return if overlap extends to or past end of region. */
552 if (overlap.start + overlap.size >= region.start + region.size)
553 return;
554
555 /* Clip off the overlapping region and start over. */
556 region.size -= overlap.start - region.start + overlap.size;
557 region.start = overlap.start + overlap.size;
558 }
559 }
560
561 static void process_e820_entries(unsigned long minimum,
562 unsigned long image_size)
563 {
564 int i;
565 struct mem_vector region;
566 struct boot_e820_entry *entry;
567
568 /* Verify potential e820 positions, appending to slots list. */
569 for (i = 0; i < boot_params->e820_entries; i++) {
570 entry = &boot_params->e820_table[i];
571 /* Skip non-RAM entries. */
572 if (entry->type != E820_TYPE_RAM)
573 continue;
574 region.start = entry->addr;
575 region.size = entry->size;
576 process_mem_region(&region, minimum, image_size);
577 if (slot_area_index == MAX_SLOT_AREA) {
578 debug_putstr("Aborted e820 scan (slot_areas full)!\n");
579 break;
580 }
581 }
582 }
583
584 static unsigned long find_random_phys_addr(unsigned long minimum,
585 unsigned long image_size)
586 {
587 /* Check if we had too many memmaps. */
588 if (memmap_too_large) {
589 debug_putstr("Aborted e820 scan (more than 4 memmap= args)!\n");
590 return 0;
591 }
592
593 /* Make sure minimum is aligned. */
594 minimum = ALIGN(minimum, CONFIG_PHYSICAL_ALIGN);
595
596 process_e820_entries(minimum, image_size);
597 return slots_fetch_random();
598 }
599
600 static unsigned long find_random_virt_addr(unsigned long minimum,
601 unsigned long image_size)
602 {
603 unsigned long slots, random_addr;
604
605 /* Make sure minimum is aligned. */
606 minimum = ALIGN(minimum, CONFIG_PHYSICAL_ALIGN);
607 /* Align image_size for easy slot calculations. */
608 image_size = ALIGN(image_size, CONFIG_PHYSICAL_ALIGN);
609
610 /*
611 * There are how many CONFIG_PHYSICAL_ALIGN-sized slots
612 * that can hold image_size within the range of minimum to
613 * KERNEL_IMAGE_SIZE?
614 */
615 slots = (KERNEL_IMAGE_SIZE - minimum - image_size) /
616 CONFIG_PHYSICAL_ALIGN + 1;
617
618 random_addr = kaslr_get_random_long("Virtual") % slots;
619
620 return random_addr * CONFIG_PHYSICAL_ALIGN + minimum;
621 }
622
623 /*
624 * Since this function examines addresses much more numerically,
625 * it takes the input and output pointers as 'unsigned long'.
626 */
627 void choose_random_location(unsigned long input,
628 unsigned long input_size,
629 unsigned long *output,
630 unsigned long output_size,
631 unsigned long *virt_addr)
632 {
633 unsigned long random_addr, min_addr;
634
635 if (cmdline_find_option_bool("nokaslr")) {
636 warn("KASLR disabled: 'nokaslr' on cmdline.");
637 return;
638 }
639
640 boot_params->hdr.loadflags |= KASLR_FLAG;
641
642 /* Prepare to add new identity pagetables on demand. */
643 initialize_identity_maps();
644
645 /* Record the various known unsafe memory ranges. */
646 mem_avoid_init(input, input_size, *output);
647
648 /*
649 * Low end of the randomization range should be the
650 * smaller of 512M or the initial kernel image
651 * location:
652 */
653 min_addr = min(*output, 512UL << 20);
654
655 /* Walk e820 and find a random address. */
656 random_addr = find_random_phys_addr(min_addr, output_size);
657 if (!random_addr) {
658 warn("Physical KASLR disabled: no suitable memory region!");
659 } else {
660 /* Update the new physical address location. */
661 if (*output != random_addr) {
662 add_identity_map(random_addr, output_size);
663 *output = random_addr;
664 }
665
666 /*
667 * This loads the identity mapping page table.
668 * This should only be done if a new physical address
669 * is found for the kernel, otherwise we should keep
670 * the old page table to make it be like the "nokaslr"
671 * case.
672 */
673 finalize_identity_maps();
674 }
675
676
677 /* Pick random virtual address starting from LOAD_PHYSICAL_ADDR. */
678 if (IS_ENABLED(CONFIG_X86_64))
679 random_addr = find_random_virt_addr(LOAD_PHYSICAL_ADDR, output_size);
680 *virt_addr = random_addr;
681 }