]>
git.ipfire.org Git - people/arne_f/kernel.git/blob - arch/x86/boot/compressed/kaslr.c
4 * This contains the routines needed to generate a reasonable level of
5 * entropy to choose a randomized kernel base address offset in support
6 * of Kernel Address Space Layout Randomization (KASLR). Additionally
7 * handles walking the physical memory maps (and tracking memory regions
8 * to avoid) in order to select a physical memory location that can
9 * contain the entire properly aligned running kernel image.
14 * isspace() in linux/ctype.h is expected by next_args() to filter
15 * out "space/lf/tab". While boot/ctype.h conflicts with linux/ctype.h,
16 * since isdigit() is implemented in both of them. Hence disable it
22 * _ctype[] in lib/ctype.c is needed by isspace() of linux/ctype.h.
23 * While both lib/ctype.c and lib/cmdline.c will bring EXPORT_SYMBOL
24 * which is meaningless and will cause compiling error in some cases.
25 * So do not include linux/export.h and define EXPORT_SYMBOL(sym)
28 #define _LINUX_EXPORT_H
29 #define EXPORT_SYMBOL(sym)
33 #include "../string.h"
35 #include <generated/compile.h>
36 #include <linux/module.h>
37 #include <linux/uts.h>
38 #include <linux/utsname.h>
39 #include <linux/ctype.h>
40 #include <generated/utsrelease.h>
42 /* Macros used by the included decompressor code below. */
44 #include <linux/decompress/mm.h>
46 extern unsigned long get_cmd_line_ptr(void);
48 /* Simplified build-specific string for starting entropy. */
49 static const char build_str
[] = UTS_RELEASE
" (" LINUX_COMPILE_BY
"@"
50 LINUX_COMPILE_HOST
") (" LINUX_COMPILER
") " UTS_VERSION
;
52 static unsigned long rotate_xor(unsigned long hash
, const void *area
,
56 unsigned long *ptr
= (unsigned long *)area
;
58 for (i
= 0; i
< size
/ sizeof(hash
); i
++) {
59 /* Rotate by odd number of bits and XOR. */
60 hash
= (hash
<< ((sizeof(hash
) * 8) - 7)) | (hash
>> 7);
67 /* Attempt to create a simple but unpredictable starting entropy. */
68 static unsigned long get_boot_seed(void)
70 unsigned long hash
= 0;
72 hash
= rotate_xor(hash
, build_str
, sizeof(build_str
));
73 hash
= rotate_xor(hash
, boot_params
, sizeof(*boot_params
));
78 #define KASLR_COMPRESSED_BOOT
79 #include "../../lib/kaslr.c"
82 unsigned long long start
;
83 unsigned long long size
;
86 /* Only supporting at most 4 unusable memmap regions with kaslr */
87 #define MAX_MEMMAP_REGIONS 4
89 static bool memmap_too_large
;
92 /* Store memory limit specified by "mem=nn[KMG]" or "memmap=nn[KMG]" */
93 unsigned long long mem_limit
= ULLONG_MAX
;
96 enum mem_avoid_index
{
97 MEM_AVOID_ZO_RANGE
= 0,
100 MEM_AVOID_BOOTPARAMS
,
101 MEM_AVOID_MEMMAP_BEGIN
,
102 MEM_AVOID_MEMMAP_END
= MEM_AVOID_MEMMAP_BEGIN
+ MAX_MEMMAP_REGIONS
- 1,
106 static struct mem_vector mem_avoid
[MEM_AVOID_MAX
];
108 static bool mem_overlaps(struct mem_vector
*one
, struct mem_vector
*two
)
110 /* Item one is entirely before item two. */
111 if (one
->start
+ one
->size
<= two
->start
)
113 /* Item one is entirely after item two. */
114 if (one
->start
>= two
->start
+ two
->size
)
119 char *skip_spaces(const char *str
)
121 while (isspace(*str
))
125 #include "../../../../lib/ctype.c"
126 #include "../../../../lib/cmdline.c"
129 parse_memmap(char *p
, unsigned long long *start
, unsigned long long *size
)
136 /* We don't care about this option here */
137 if (!strncmp(p
, "exactmap", 8))
141 *size
= memparse(p
, &p
);
149 *start
= memparse(p
+ 1, &p
);
152 /* memmap=nn@ss specifies usable region, should be skipped */
157 * If w/o offset, only size specified, memmap=nn[KMG] has the
158 * same behaviour as mem=nn[KMG]. It limits the max address
159 * system can use. Region above the limit should be avoided.
168 static void mem_avoid_memmap(char *str
)
173 if (i
>= MAX_MEMMAP_REGIONS
)
176 while (str
&& (i
< MAX_MEMMAP_REGIONS
)) {
178 unsigned long long start
, size
;
179 char *k
= strchr(str
, ',');
184 rc
= parse_memmap(str
, &start
, &size
);
190 /* Store the specified memory limit if size > 0 */
197 mem_avoid
[MEM_AVOID_MEMMAP_BEGIN
+ i
].start
= start
;
198 mem_avoid
[MEM_AVOID_MEMMAP_BEGIN
+ i
].size
= size
;
202 /* More than 4 memmaps, fail kaslr */
203 if ((i
>= MAX_MEMMAP_REGIONS
) && str
)
204 memmap_too_large
= true;
207 static int handle_mem_memmap(void)
209 char *args
= (char *)get_cmd_line_ptr();
210 size_t len
= strlen((char *)args
);
215 if (!strstr(args
, "memmap=") && !strstr(args
, "mem="))
218 tmp_cmdline
= malloc(len
+ 1);
220 error("Failed to allocate space for tmp_cmdline");
222 memcpy(tmp_cmdline
, args
, len
);
223 tmp_cmdline
[len
] = 0;
226 /* Chew leading spaces */
227 args
= skip_spaces(args
);
230 args
= next_arg(args
, ¶m
, &val
);
232 if (!val
&& strcmp(param
, "--") == 0) {
233 warn("Only '--' specified in cmdline");
238 if (!strcmp(param
, "memmap")) {
239 mem_avoid_memmap(val
);
240 } else if (!strcmp(param
, "mem")) {
243 if (!strcmp(p
, "nopentium"))
245 mem_size
= memparse(p
, &p
);
250 mem_limit
= mem_size
;
259 * In theory, KASLR can put the kernel anywhere in the range of [16M, 64T).
260 * The mem_avoid array is used to store the ranges that need to be avoided
261 * when KASLR searches for an appropriate random address. We must avoid any
262 * regions that are unsafe to overlap with during decompression, and other
263 * things like the initrd, cmdline and boot_params. This comment seeks to
264 * explain mem_avoid as clearly as possible since incorrect mem_avoid
265 * memory ranges lead to really hard to debug boot failures.
267 * The initrd, cmdline, and boot_params are trivial to identify for
268 * avoiding. They are MEM_AVOID_INITRD, MEM_AVOID_CMDLINE, and
269 * MEM_AVOID_BOOTPARAMS respectively below.
271 * What is not obvious how to avoid is the range of memory that is used
272 * during decompression (MEM_AVOID_ZO_RANGE below). This range must cover
273 * the compressed kernel (ZO) and its run space, which is used to extract
274 * the uncompressed kernel (VO) and relocs.
276 * ZO's full run size sits against the end of the decompression buffer, so
277 * we can calculate where text, data, bss, etc of ZO are positioned more
280 * For additional background, the decompression calculations can be found
281 * in header.S, and the memory diagram is based on the one found in misc.c.
283 * The following conditions are already enforced by the image layouts and
285 * - input + input_size >= output + output_size
286 * - kernel_total_size <= init_size
287 * - kernel_total_size <= output_size (see Note below)
288 * - output + init_size >= output + output_size
290 * (Note that kernel_total_size and output_size have no fundamental
291 * relationship, but output_size is passed to choose_random_location
292 * as a maximum of the two. The diagram is showing a case where
293 * kernel_total_size is larger than output_size, but this case is
294 * handled by bumping output_size.)
296 * The above conditions can be illustrated by a diagram:
298 * 0 output input input+input_size output+init_size
301 * |-----|--------|--------|--------------|-----------|--|-------------|
304 * output+init_size-ZO_INIT_SIZE output+output_size output+kernel_total_size
306 * [output, output+init_size) is the entire memory range used for
307 * extracting the compressed image.
309 * [output, output+kernel_total_size) is the range needed for the
310 * uncompressed kernel (VO) and its run size (bss, brk, etc).
312 * [output, output+output_size) is VO plus relocs (i.e. the entire
313 * uncompressed payload contained by ZO). This is the area of the buffer
314 * written to during decompression.
316 * [output+init_size-ZO_INIT_SIZE, output+init_size) is the worst-case
317 * range of the copied ZO and decompression code. (i.e. the range
318 * covered backwards of size ZO_INIT_SIZE, starting from output+init_size.)
320 * [input, input+input_size) is the original copied compressed image (ZO)
321 * (i.e. it does not include its run size). This range must be avoided
322 * because it contains the data used for decompression.
324 * [input+input_size, output+init_size) is [_text, _end) for ZO. This
325 * range includes ZO's heap and stack, and must be avoided since it
326 * performs the decompression.
328 * Since the above two ranges need to be avoided and they are adjacent,
329 * they can be merged, resulting in: [input, output+init_size) which
330 * becomes the MEM_AVOID_ZO_RANGE below.
332 static void mem_avoid_init(unsigned long input
, unsigned long input_size
,
333 unsigned long output
)
335 unsigned long init_size
= boot_params
->hdr
.init_size
;
336 u64 initrd_start
, initrd_size
;
337 u64 cmd_line
, cmd_line_size
;
341 * Avoid the region that is unsafe to overlap during
344 mem_avoid
[MEM_AVOID_ZO_RANGE
].start
= input
;
345 mem_avoid
[MEM_AVOID_ZO_RANGE
].size
= (output
+ init_size
) - input
;
346 add_identity_map(mem_avoid
[MEM_AVOID_ZO_RANGE
].start
,
347 mem_avoid
[MEM_AVOID_ZO_RANGE
].size
);
350 initrd_start
= (u64
)boot_params
->ext_ramdisk_image
<< 32;
351 initrd_start
|= boot_params
->hdr
.ramdisk_image
;
352 initrd_size
= (u64
)boot_params
->ext_ramdisk_size
<< 32;
353 initrd_size
|= boot_params
->hdr
.ramdisk_size
;
354 mem_avoid
[MEM_AVOID_INITRD
].start
= initrd_start
;
355 mem_avoid
[MEM_AVOID_INITRD
].size
= initrd_size
;
356 /* No need to set mapping for initrd, it will be handled in VO. */
358 /* Avoid kernel command line. */
359 cmd_line
= (u64
)boot_params
->ext_cmd_line_ptr
<< 32;
360 cmd_line
|= boot_params
->hdr
.cmd_line_ptr
;
361 /* Calculate size of cmd_line. */
362 ptr
= (char *)(unsigned long)cmd_line
;
363 for (cmd_line_size
= 0; ptr
[cmd_line_size
++]; )
365 mem_avoid
[MEM_AVOID_CMDLINE
].start
= cmd_line
;
366 mem_avoid
[MEM_AVOID_CMDLINE
].size
= cmd_line_size
;
367 add_identity_map(mem_avoid
[MEM_AVOID_CMDLINE
].start
,
368 mem_avoid
[MEM_AVOID_CMDLINE
].size
);
370 /* Avoid boot parameters. */
371 mem_avoid
[MEM_AVOID_BOOTPARAMS
].start
= (unsigned long)boot_params
;
372 mem_avoid
[MEM_AVOID_BOOTPARAMS
].size
= sizeof(*boot_params
);
373 add_identity_map(mem_avoid
[MEM_AVOID_BOOTPARAMS
].start
,
374 mem_avoid
[MEM_AVOID_BOOTPARAMS
].size
);
376 /* We don't need to set a mapping for setup_data. */
378 /* Mark the memmap regions we need to avoid */
381 #ifdef CONFIG_X86_VERBOSE_BOOTUP
382 /* Make sure video RAM can be used. */
383 add_identity_map(0, PMD_SIZE
);
388 * Does this memory vector overlap a known avoided area? If so, record the
389 * overlap region with the lowest address.
391 static bool mem_avoid_overlap(struct mem_vector
*img
,
392 struct mem_vector
*overlap
)
395 struct setup_data
*ptr
;
396 unsigned long earliest
= img
->start
+ img
->size
;
397 bool is_overlapping
= false;
399 for (i
= 0; i
< MEM_AVOID_MAX
; i
++) {
400 if (mem_overlaps(img
, &mem_avoid
[i
]) &&
401 mem_avoid
[i
].start
< earliest
) {
402 *overlap
= mem_avoid
[i
];
403 earliest
= overlap
->start
;
404 is_overlapping
= true;
408 /* Avoid all entries in the setup_data linked list. */
409 ptr
= (struct setup_data
*)(unsigned long)boot_params
->hdr
.setup_data
;
411 struct mem_vector avoid
;
413 avoid
.start
= (unsigned long)ptr
;
414 avoid
.size
= sizeof(*ptr
) + ptr
->len
;
416 if (mem_overlaps(img
, &avoid
) && (avoid
.start
< earliest
)) {
418 earliest
= overlap
->start
;
419 is_overlapping
= true;
422 ptr
= (struct setup_data
*)(unsigned long)ptr
->next
;
425 return is_overlapping
;
433 #define MAX_SLOT_AREA 100
435 static struct slot_area slot_areas
[MAX_SLOT_AREA
];
437 static unsigned long slot_max
;
439 static unsigned long slot_area_index
;
441 static void store_slot_info(struct mem_vector
*region
, unsigned long image_size
)
443 struct slot_area slot_area
;
445 if (slot_area_index
== MAX_SLOT_AREA
)
448 slot_area
.addr
= region
->start
;
449 slot_area
.num
= (region
->size
- image_size
) /
450 CONFIG_PHYSICAL_ALIGN
+ 1;
452 if (slot_area
.num
> 0) {
453 slot_areas
[slot_area_index
++] = slot_area
;
454 slot_max
+= slot_area
.num
;
458 static unsigned long slots_fetch_random(void)
463 /* Handle case of no slots stored. */
467 slot
= kaslr_get_random_long("Physical") % slot_max
;
469 for (i
= 0; i
< slot_area_index
; i
++) {
470 if (slot
>= slot_areas
[i
].num
) {
471 slot
-= slot_areas
[i
].num
;
474 return slot_areas
[i
].addr
+ slot
* CONFIG_PHYSICAL_ALIGN
;
477 if (i
== slot_area_index
)
478 debug_putstr("slots_fetch_random() failed!?\n");
482 static void process_mem_region(struct mem_vector
*entry
,
483 unsigned long minimum
,
484 unsigned long image_size
)
486 struct mem_vector region
, overlap
;
487 struct slot_area slot_area
;
488 unsigned long start_orig
, end
;
489 struct mem_vector cur_entry
;
491 /* On 32-bit, ignore entries entirely above our maximum. */
492 if (IS_ENABLED(CONFIG_X86_32
) && entry
->start
>= KERNEL_IMAGE_SIZE
)
495 /* Ignore entries entirely below our minimum. */
496 if (entry
->start
+ entry
->size
< minimum
)
499 /* Ignore entries above memory limit */
500 end
= min(entry
->size
+ entry
->start
, mem_limit
);
501 if (entry
->start
>= end
)
503 cur_entry
.start
= entry
->start
;
504 cur_entry
.size
= end
- entry
->start
;
506 region
.start
= cur_entry
.start
;
507 region
.size
= cur_entry
.size
;
509 /* Give up if slot area array is full. */
510 while (slot_area_index
< MAX_SLOT_AREA
) {
511 start_orig
= region
.start
;
513 /* Potentially raise address to minimum location. */
514 if (region
.start
< minimum
)
515 region
.start
= minimum
;
517 /* Potentially raise address to meet alignment needs. */
518 region
.start
= ALIGN(region
.start
, CONFIG_PHYSICAL_ALIGN
);
520 /* Did we raise the address above the passed in memory entry? */
521 if (region
.start
> cur_entry
.start
+ cur_entry
.size
)
524 /* Reduce size by any delta from the original address. */
525 region
.size
-= region
.start
- start_orig
;
527 /* On 32-bit, reduce region size to fit within max size. */
528 if (IS_ENABLED(CONFIG_X86_32
) &&
529 region
.start
+ region
.size
> KERNEL_IMAGE_SIZE
)
530 region
.size
= KERNEL_IMAGE_SIZE
- region
.start
;
532 /* Return if region can't contain decompressed kernel */
533 if (region
.size
< image_size
)
536 /* If nothing overlaps, store the region and return. */
537 if (!mem_avoid_overlap(®ion
, &overlap
)) {
538 store_slot_info(®ion
, image_size
);
542 /* Store beginning of region if holds at least image_size. */
543 if (overlap
.start
> region
.start
+ image_size
) {
544 struct mem_vector beginning
;
546 beginning
.start
= region
.start
;
547 beginning
.size
= overlap
.start
- region
.start
;
548 store_slot_info(&beginning
, image_size
);
551 /* Return if overlap extends to or past end of region. */
552 if (overlap
.start
+ overlap
.size
>= region
.start
+ region
.size
)
555 /* Clip off the overlapping region and start over. */
556 region
.size
-= overlap
.start
- region
.start
+ overlap
.size
;
557 region
.start
= overlap
.start
+ overlap
.size
;
561 static void process_e820_entries(unsigned long minimum
,
562 unsigned long image_size
)
565 struct mem_vector region
;
566 struct boot_e820_entry
*entry
;
568 /* Verify potential e820 positions, appending to slots list. */
569 for (i
= 0; i
< boot_params
->e820_entries
; i
++) {
570 entry
= &boot_params
->e820_table
[i
];
571 /* Skip non-RAM entries. */
572 if (entry
->type
!= E820_TYPE_RAM
)
574 region
.start
= entry
->addr
;
575 region
.size
= entry
->size
;
576 process_mem_region(®ion
, minimum
, image_size
);
577 if (slot_area_index
== MAX_SLOT_AREA
) {
578 debug_putstr("Aborted e820 scan (slot_areas full)!\n");
584 static unsigned long find_random_phys_addr(unsigned long minimum
,
585 unsigned long image_size
)
587 /* Check if we had too many memmaps. */
588 if (memmap_too_large
) {
589 debug_putstr("Aborted e820 scan (more than 4 memmap= args)!\n");
593 /* Make sure minimum is aligned. */
594 minimum
= ALIGN(minimum
, CONFIG_PHYSICAL_ALIGN
);
596 process_e820_entries(minimum
, image_size
);
597 return slots_fetch_random();
600 static unsigned long find_random_virt_addr(unsigned long minimum
,
601 unsigned long image_size
)
603 unsigned long slots
, random_addr
;
605 /* Make sure minimum is aligned. */
606 minimum
= ALIGN(minimum
, CONFIG_PHYSICAL_ALIGN
);
607 /* Align image_size for easy slot calculations. */
608 image_size
= ALIGN(image_size
, CONFIG_PHYSICAL_ALIGN
);
611 * There are how many CONFIG_PHYSICAL_ALIGN-sized slots
612 * that can hold image_size within the range of minimum to
615 slots
= (KERNEL_IMAGE_SIZE
- minimum
- image_size
) /
616 CONFIG_PHYSICAL_ALIGN
+ 1;
618 random_addr
= kaslr_get_random_long("Virtual") % slots
;
620 return random_addr
* CONFIG_PHYSICAL_ALIGN
+ minimum
;
624 * Since this function examines addresses much more numerically,
625 * it takes the input and output pointers as 'unsigned long'.
627 void choose_random_location(unsigned long input
,
628 unsigned long input_size
,
629 unsigned long *output
,
630 unsigned long output_size
,
631 unsigned long *virt_addr
)
633 unsigned long random_addr
, min_addr
;
635 if (cmdline_find_option_bool("nokaslr")) {
636 warn("KASLR disabled: 'nokaslr' on cmdline.");
640 boot_params
->hdr
.loadflags
|= KASLR_FLAG
;
642 /* Prepare to add new identity pagetables on demand. */
643 initialize_identity_maps();
645 /* Record the various known unsafe memory ranges. */
646 mem_avoid_init(input
, input_size
, *output
);
649 * Low end of the randomization range should be the
650 * smaller of 512M or the initial kernel image
653 min_addr
= min(*output
, 512UL << 20);
655 /* Walk e820 and find a random address. */
656 random_addr
= find_random_phys_addr(min_addr
, output_size
);
658 warn("Physical KASLR disabled: no suitable memory region!");
660 /* Update the new physical address location. */
661 if (*output
!= random_addr
) {
662 add_identity_map(random_addr
, output_size
);
663 *output
= random_addr
;
667 * This loads the identity mapping page table.
668 * This should only be done if a new physical address
669 * is found for the kernel, otherwise we should keep
670 * the old page table to make it be like the "nokaslr"
673 finalize_identity_maps();
677 /* Pick random virtual address starting from LOAD_PHYSICAL_ADDR. */
678 if (IS_ENABLED(CONFIG_X86_64
))
679 random_addr
= find_random_virt_addr(LOAD_PHYSICAL_ADDR
, output_size
);
680 *virt_addr
= random_addr
;