1 // SPDX-License-Identifier: GPL-2.0-only
3 * Debug helper to dump the current kernel pagetables of the system
4 * so that we can see what the various memory ranges are set to.
6 * (C) Copyright 2008 Intel Corporation
8 * Author: Arjan van de Ven <arjan@linux.intel.com>
11 #include <linux/debugfs.h>
12 #include <linux/kasan.h>
14 #include <linux/init.h>
15 #include <linux/sched.h>
16 #include <linux/seq_file.h>
17 #include <linux/highmem.h>
18 #include <linux/pci.h>
20 #include <asm/e820/types.h>
21 #include <asm/pgtable.h>
24 * The dumper groups pagetable entries of the same type into one, and for
25 * that it needs to keep some state when walking, and flush this state
26 * when a "break" in the continuity is found.
30 pgprot_t current_prot
;
31 pgprotval_t effective_prot
;
32 unsigned long start_address
;
33 unsigned long current_address
;
34 const struct addr_marker
*marker
;
38 unsigned long wx_pages
;
42 unsigned long start_address
;
44 unsigned long max_lines
;
47 /* Address space markers hints */
51 enum address_markers_idx
{
54 #ifdef CONFIG_MODIFY_LDT_SYSCALL
61 KASAN_SHADOW_START_NR
,
65 #ifdef CONFIG_X86_ESPFIX64
78 static struct addr_marker address_markers
[] = {
79 [USER_SPACE_NR
] = { 0, "User Space" },
80 [KERNEL_SPACE_NR
] = { (1UL << 63), "Kernel Space" },
81 [LOW_KERNEL_NR
] = { 0UL, "Low Kernel Mapping" },
82 [VMALLOC_START_NR
] = { 0UL, "vmalloc() Area" },
83 [VMEMMAP_START_NR
] = { 0UL, "Vmemmap" },
86 * These fields get initialized with the (dynamic)
87 * KASAN_SHADOW_{START,END} values in pt_dump_init().
89 [KASAN_SHADOW_START_NR
] = { 0UL, "KASAN shadow" },
90 [KASAN_SHADOW_END_NR
] = { 0UL, "KASAN shadow end" },
92 #ifdef CONFIG_MODIFY_LDT_SYSCALL
93 [LDT_NR
] = { 0UL, "LDT remap" },
95 [CPU_ENTRY_AREA_NR
] = { CPU_ENTRY_AREA_BASE
,"CPU entry Area" },
96 #ifdef CONFIG_X86_ESPFIX64
97 [ESPFIX_START_NR
] = { ESPFIX_BASE_ADDR
, "ESPfix Area", 16 },
100 [EFI_END_NR
] = { EFI_VA_END
, "EFI Runtime Services" },
102 [HIGH_KERNEL_NR
] = { __START_KERNEL_map
, "High Kernel Mapping" },
103 [MODULES_VADDR_NR
] = { MODULES_VADDR
, "Modules" },
104 [MODULES_END_NR
] = { MODULES_END
, "End Modules" },
105 [FIXADDR_START_NR
] = { FIXADDR_START
, "Fixmap Area" },
106 [END_OF_SPACE_NR
] = { -1, NULL
}
109 #define INIT_PGD ((pgd_t *) &init_top_pgt)
111 #else /* CONFIG_X86_64 */
113 enum address_markers_idx
{
118 #ifdef CONFIG_HIGHMEM
121 #ifdef CONFIG_MODIFY_LDT_SYSCALL
129 static struct addr_marker address_markers
[] = {
130 [USER_SPACE_NR
] = { 0, "User Space" },
131 [KERNEL_SPACE_NR
] = { PAGE_OFFSET
, "Kernel Mapping" },
132 [VMALLOC_START_NR
] = { 0UL, "vmalloc() Area" },
133 [VMALLOC_END_NR
] = { 0UL, "vmalloc() End" },
134 #ifdef CONFIG_HIGHMEM
135 [PKMAP_BASE_NR
] = { 0UL, "Persistent kmap() Area" },
137 #ifdef CONFIG_MODIFY_LDT_SYSCALL
138 [LDT_NR
] = { 0UL, "LDT remap" },
140 [CPU_ENTRY_AREA_NR
] = { 0UL, "CPU entry area" },
141 [FIXADDR_START_NR
] = { 0UL, "Fixmap area" },
142 [END_OF_SPACE_NR
] = { -1, NULL
}
145 #define INIT_PGD (swapper_pg_dir)
147 #endif /* !CONFIG_X86_64 */
149 /* Multipliers for offsets within the PTEs */
150 #define PTE_LEVEL_MULT (PAGE_SIZE)
151 #define PMD_LEVEL_MULT (PTRS_PER_PTE * PTE_LEVEL_MULT)
152 #define PUD_LEVEL_MULT (PTRS_PER_PMD * PMD_LEVEL_MULT)
153 #define P4D_LEVEL_MULT (PTRS_PER_PUD * PUD_LEVEL_MULT)
154 #define PGD_LEVEL_MULT (PTRS_PER_P4D * P4D_LEVEL_MULT)
156 #define pt_dump_seq_printf(m, to_dmesg, fmt, args...) \
159 printk(KERN_INFO fmt, ##args); \
162 seq_printf(m, fmt, ##args); \
165 #define pt_dump_cont_printf(m, to_dmesg, fmt, args...) \
168 printk(KERN_CONT fmt, ##args); \
171 seq_printf(m, fmt, ##args); \
175 * Print a readable form of a pgprot_t to the seq_file
177 static void printk_prot(struct seq_file
*m
, pgprot_t prot
, int level
, bool dmsg
)
179 pgprotval_t pr
= pgprot_val(prot
);
180 static const char * const level_name
[] =
181 { "cr3", "pgd", "p4d", "pud", "pmd", "pte" };
183 if (!(pr
& _PAGE_PRESENT
)) {
185 pt_dump_cont_printf(m
, dmsg
, " ");
188 pt_dump_cont_printf(m
, dmsg
, "USR ");
190 pt_dump_cont_printf(m
, dmsg
, " ");
192 pt_dump_cont_printf(m
, dmsg
, "RW ");
194 pt_dump_cont_printf(m
, dmsg
, "ro ");
196 pt_dump_cont_printf(m
, dmsg
, "PWT ");
198 pt_dump_cont_printf(m
, dmsg
, " ");
200 pt_dump_cont_printf(m
, dmsg
, "PCD ");
202 pt_dump_cont_printf(m
, dmsg
, " ");
204 /* Bit 7 has a different meaning on level 3 vs 4 */
205 if (level
<= 4 && pr
& _PAGE_PSE
)
206 pt_dump_cont_printf(m
, dmsg
, "PSE ");
208 pt_dump_cont_printf(m
, dmsg
, " ");
209 if ((level
== 5 && pr
& _PAGE_PAT
) ||
210 ((level
== 4 || level
== 3) && pr
& _PAGE_PAT_LARGE
))
211 pt_dump_cont_printf(m
, dmsg
, "PAT ");
213 pt_dump_cont_printf(m
, dmsg
, " ");
214 if (pr
& _PAGE_GLOBAL
)
215 pt_dump_cont_printf(m
, dmsg
, "GLB ");
217 pt_dump_cont_printf(m
, dmsg
, " ");
219 pt_dump_cont_printf(m
, dmsg
, "NX ");
221 pt_dump_cont_printf(m
, dmsg
, "x ");
223 pt_dump_cont_printf(m
, dmsg
, "%s\n", level_name
[level
]);
227 * On 64 bits, sign-extend the 48 bit address to 64 bit
229 static unsigned long normalize_addr(unsigned long u
)
232 if (!IS_ENABLED(CONFIG_X86_64
))
235 shift
= 64 - (__VIRTUAL_MASK_SHIFT
+ 1);
236 return (signed long)(u
<< shift
) >> shift
;
239 static void note_wx(struct pg_state
*st
)
241 unsigned long npages
;
243 npages
= (st
->current_address
- st
->start_address
) / PAGE_SIZE
;
245 #ifdef CONFIG_PCI_BIOS
247 * If PCI BIOS is enabled, the PCI BIOS area is forced to WX.
248 * Inform about it, but avoid the warning.
250 if (pcibios_enabled
&& st
->start_address
>= PAGE_OFFSET
+ BIOS_BEGIN
&&
251 st
->current_address
<= PAGE_OFFSET
+ BIOS_END
) {
252 pr_warn_once("x86/mm: PCI BIOS W+X mapping %lu pages\n", npages
);
256 /* Account the WX pages */
257 st
->wx_pages
+= npages
;
258 WARN_ONCE(__supported_pte_mask
& _PAGE_NX
,
259 "x86/mm: Found insecure W+X mapping at address %pS\n",
260 (void *)st
->start_address
);
264 * This function gets called on a break in a continuous series
265 * of PTE entries; the next one is different so we need to
266 * print what we collected so far.
268 static void note_page(struct seq_file
*m
, struct pg_state
*st
,
269 pgprot_t new_prot
, pgprotval_t new_eff
, int level
)
271 pgprotval_t prot
, cur
, eff
;
272 static const char units
[] = "BKMGTPE";
275 * If we have a "break" in the series, we need to flush the state that
276 * we have now. "break" is either changing perms, levels or
277 * address space marker.
279 prot
= pgprot_val(new_prot
);
280 cur
= pgprot_val(st
->current_prot
);
281 eff
= st
->effective_prot
;
285 st
->current_prot
= new_prot
;
286 st
->effective_prot
= new_eff
;
288 st
->marker
= address_markers
;
290 pt_dump_seq_printf(m
, st
->to_dmesg
, "---[ %s ]---\n",
292 } else if (prot
!= cur
|| new_eff
!= eff
|| level
!= st
->level
||
293 st
->current_address
>= st
->marker
[1].start_address
) {
294 const char *unit
= units
;
296 int width
= sizeof(unsigned long) * 2;
298 if (st
->check_wx
&& (eff
& _PAGE_RW
) && !(eff
& _PAGE_NX
))
302 * Now print the actual finished series
304 if (!st
->marker
->max_lines
||
305 st
->lines
< st
->marker
->max_lines
) {
306 pt_dump_seq_printf(m
, st
->to_dmesg
,
308 width
, st
->start_address
,
309 width
, st
->current_address
);
311 delta
= st
->current_address
- st
->start_address
;
312 while (!(delta
& 1023) && unit
[1]) {
316 pt_dump_cont_printf(m
, st
->to_dmesg
, "%9lu%c ",
318 printk_prot(m
, st
->current_prot
, st
->level
,
324 * We print markers for special areas of address space,
325 * such as the start of vmalloc space etc.
326 * This helps in the interpretation.
328 if (st
->current_address
>= st
->marker
[1].start_address
) {
329 if (st
->marker
->max_lines
&&
330 st
->lines
> st
->marker
->max_lines
) {
331 unsigned long nskip
=
332 st
->lines
- st
->marker
->max_lines
;
333 pt_dump_seq_printf(m
, st
->to_dmesg
,
334 "... %lu entr%s skipped ... \n",
336 nskip
== 1 ? "y" : "ies");
340 pt_dump_seq_printf(m
, st
->to_dmesg
, "---[ %s ]---\n",
344 st
->start_address
= st
->current_address
;
345 st
->current_prot
= new_prot
;
346 st
->effective_prot
= new_eff
;
351 static inline pgprotval_t
effective_prot(pgprotval_t prot1
, pgprotval_t prot2
)
353 return (prot1
& prot2
& (_PAGE_USER
| _PAGE_RW
)) |
354 ((prot1
| prot2
) & _PAGE_NX
);
357 static void walk_pte_level(struct seq_file
*m
, struct pg_state
*st
, pmd_t addr
,
358 pgprotval_t eff_in
, unsigned long P
)
362 pgprotval_t prot
, eff
;
364 for (i
= 0; i
< PTRS_PER_PTE
; i
++) {
365 st
->current_address
= normalize_addr(P
+ i
* PTE_LEVEL_MULT
);
366 pte
= pte_offset_map(&addr
, st
->current_address
);
367 prot
= pte_flags(*pte
);
368 eff
= effective_prot(eff_in
, prot
);
369 note_page(m
, st
, __pgprot(prot
), eff
, 5);
376 * This is an optimization for KASAN=y case. Since all kasan page tables
377 * eventually point to the kasan_early_shadow_page we could call note_page()
378 * right away without walking through lower level page tables. This saves
379 * us dozens of seconds (minutes for 5-level config) while checking for
380 * W+X mapping or reading kernel_page_tables debugfs file.
382 static inline bool kasan_page_table(struct seq_file
*m
, struct pg_state
*st
,
385 if (__pa(pt
) == __pa(kasan_early_shadow_pmd
) ||
386 (pgtable_l5_enabled() &&
387 __pa(pt
) == __pa(kasan_early_shadow_p4d
)) ||
388 __pa(pt
) == __pa(kasan_early_shadow_pud
)) {
389 pgprotval_t prot
= pte_flags(kasan_early_shadow_pte
[0]);
390 note_page(m
, st
, __pgprot(prot
), 0, 5);
396 static inline bool kasan_page_table(struct seq_file
*m
, struct pg_state
*st
,
405 static void walk_pmd_level(struct seq_file
*m
, struct pg_state
*st
, pud_t addr
,
406 pgprotval_t eff_in
, unsigned long P
)
409 pmd_t
*start
, *pmd_start
;
410 pgprotval_t prot
, eff
;
412 pmd_start
= start
= (pmd_t
*)pud_page_vaddr(addr
);
413 for (i
= 0; i
< PTRS_PER_PMD
; i
++) {
414 st
->current_address
= normalize_addr(P
+ i
* PMD_LEVEL_MULT
);
415 if (!pmd_none(*start
)) {
416 prot
= pmd_flags(*start
);
417 eff
= effective_prot(eff_in
, prot
);
418 if (pmd_large(*start
) || !pmd_present(*start
)) {
419 note_page(m
, st
, __pgprot(prot
), eff
, 4);
420 } else if (!kasan_page_table(m
, st
, pmd_start
)) {
421 walk_pte_level(m
, st
, *start
, eff
,
422 P
+ i
* PMD_LEVEL_MULT
);
425 note_page(m
, st
, __pgprot(0), 0, 4);
431 #define walk_pmd_level(m,s,a,e,p) walk_pte_level(m,s,__pmd(pud_val(a)),e,p)
432 #define pud_large(a) pmd_large(__pmd(pud_val(a)))
433 #define pud_none(a) pmd_none(__pmd(pud_val(a)))
438 static void walk_pud_level(struct seq_file
*m
, struct pg_state
*st
, p4d_t addr
,
439 pgprotval_t eff_in
, unsigned long P
)
442 pud_t
*start
, *pud_start
;
443 pgprotval_t prot
, eff
;
445 pud_start
= start
= (pud_t
*)p4d_page_vaddr(addr
);
447 for (i
= 0; i
< PTRS_PER_PUD
; i
++) {
448 st
->current_address
= normalize_addr(P
+ i
* PUD_LEVEL_MULT
);
449 if (!pud_none(*start
)) {
450 prot
= pud_flags(*start
);
451 eff
= effective_prot(eff_in
, prot
);
452 if (pud_large(*start
) || !pud_present(*start
)) {
453 note_page(m
, st
, __pgprot(prot
), eff
, 3);
454 } else if (!kasan_page_table(m
, st
, pud_start
)) {
455 walk_pmd_level(m
, st
, *start
, eff
,
456 P
+ i
* PUD_LEVEL_MULT
);
459 note_page(m
, st
, __pgprot(0), 0, 3);
466 #define walk_pud_level(m,s,a,e,p) walk_pmd_level(m,s,__pud(p4d_val(a)),e,p)
467 #define p4d_large(a) pud_large(__pud(p4d_val(a)))
468 #define p4d_none(a) pud_none(__pud(p4d_val(a)))
471 static void walk_p4d_level(struct seq_file
*m
, struct pg_state
*st
, pgd_t addr
,
472 pgprotval_t eff_in
, unsigned long P
)
475 p4d_t
*start
, *p4d_start
;
476 pgprotval_t prot
, eff
;
478 if (PTRS_PER_P4D
== 1)
479 return walk_pud_level(m
, st
, __p4d(pgd_val(addr
)), eff_in
, P
);
481 p4d_start
= start
= (p4d_t
*)pgd_page_vaddr(addr
);
483 for (i
= 0; i
< PTRS_PER_P4D
; i
++) {
484 st
->current_address
= normalize_addr(P
+ i
* P4D_LEVEL_MULT
);
485 if (!p4d_none(*start
)) {
486 prot
= p4d_flags(*start
);
487 eff
= effective_prot(eff_in
, prot
);
488 if (p4d_large(*start
) || !p4d_present(*start
)) {
489 note_page(m
, st
, __pgprot(prot
), eff
, 2);
490 } else if (!kasan_page_table(m
, st
, p4d_start
)) {
491 walk_pud_level(m
, st
, *start
, eff
,
492 P
+ i
* P4D_LEVEL_MULT
);
495 note_page(m
, st
, __pgprot(0), 0, 2);
501 #define pgd_large(a) (pgtable_l5_enabled() ? pgd_large(a) : p4d_large(__p4d(pgd_val(a))))
502 #define pgd_none(a) (pgtable_l5_enabled() ? pgd_none(a) : p4d_none(__p4d(pgd_val(a))))
504 static inline bool is_hypervisor_range(int idx
)
508 * A hole in the beginning of kernel address space reserved
511 return (idx
>= pgd_index(GUARD_HOLE_BASE_ADDR
)) &&
512 (idx
< pgd_index(GUARD_HOLE_END_ADDR
));
518 static void ptdump_walk_pgd_level_core(struct seq_file
*m
, pgd_t
*pgd
,
519 bool checkwx
, bool dmesg
)
521 pgd_t
*start
= INIT_PGD
;
522 pgprotval_t prot
, eff
;
524 struct pg_state st
= {};
531 st
.check_wx
= checkwx
;
535 for (i
= 0; i
< PTRS_PER_PGD
; i
++) {
536 st
.current_address
= normalize_addr(i
* PGD_LEVEL_MULT
);
537 if (!pgd_none(*start
) && !is_hypervisor_range(i
)) {
538 prot
= pgd_flags(*start
);
539 #ifdef CONFIG_X86_PAE
540 eff
= _PAGE_USER
| _PAGE_RW
;
544 if (pgd_large(*start
) || !pgd_present(*start
)) {
545 note_page(m
, &st
, __pgprot(prot
), eff
, 1);
547 walk_p4d_level(m
, &st
, *start
, eff
,
551 note_page(m
, &st
, __pgprot(0), 0, 1);
557 /* Flush out the last page */
558 st
.current_address
= normalize_addr(PTRS_PER_PGD
*PGD_LEVEL_MULT
);
559 note_page(m
, &st
, __pgprot(0), 0, 0);
563 pr_info("x86/mm: Checked W+X mappings: FAILED, %lu W+X pages found.\n",
566 pr_info("x86/mm: Checked W+X mappings: passed, no W+X pages found.\n");
569 void ptdump_walk_pgd_level(struct seq_file
*m
, pgd_t
*pgd
)
571 ptdump_walk_pgd_level_core(m
, pgd
, false, true);
574 void ptdump_walk_pgd_level_debugfs(struct seq_file
*m
, pgd_t
*pgd
, bool user
)
576 #ifdef CONFIG_PAGE_TABLE_ISOLATION
577 if (user
&& boot_cpu_has(X86_FEATURE_PTI
))
578 pgd
= kernel_to_user_pgdp(pgd
);
580 ptdump_walk_pgd_level_core(m
, pgd
, false, false);
582 EXPORT_SYMBOL_GPL(ptdump_walk_pgd_level_debugfs
);
584 void ptdump_walk_user_pgd_level_checkwx(void)
586 #ifdef CONFIG_PAGE_TABLE_ISOLATION
587 pgd_t
*pgd
= INIT_PGD
;
589 if (!(__supported_pte_mask
& _PAGE_NX
) ||
590 !boot_cpu_has(X86_FEATURE_PTI
))
593 pr_info("x86/mm: Checking user space page tables\n");
594 pgd
= kernel_to_user_pgdp(pgd
);
595 ptdump_walk_pgd_level_core(NULL
, pgd
, true, false);
599 void ptdump_walk_pgd_level_checkwx(void)
601 ptdump_walk_pgd_level_core(NULL
, NULL
, true, false);
604 static int __init
pt_dump_init(void)
607 * Various markers are not compile-time constants, so assign them
611 address_markers
[LOW_KERNEL_NR
].start_address
= PAGE_OFFSET
;
612 address_markers
[VMALLOC_START_NR
].start_address
= VMALLOC_START
;
613 address_markers
[VMEMMAP_START_NR
].start_address
= VMEMMAP_START
;
614 #ifdef CONFIG_MODIFY_LDT_SYSCALL
615 address_markers
[LDT_NR
].start_address
= LDT_BASE_ADDR
;
618 address_markers
[KASAN_SHADOW_START_NR
].start_address
= KASAN_SHADOW_START
;
619 address_markers
[KASAN_SHADOW_END_NR
].start_address
= KASAN_SHADOW_END
;
623 address_markers
[VMALLOC_START_NR
].start_address
= VMALLOC_START
;
624 address_markers
[VMALLOC_END_NR
].start_address
= VMALLOC_END
;
625 # ifdef CONFIG_HIGHMEM
626 address_markers
[PKMAP_BASE_NR
].start_address
= PKMAP_BASE
;
628 address_markers
[FIXADDR_START_NR
].start_address
= FIXADDR_START
;
629 address_markers
[CPU_ENTRY_AREA_NR
].start_address
= CPU_ENTRY_AREA_BASE
;
630 # ifdef CONFIG_MODIFY_LDT_SYSCALL
631 address_markers
[LDT_NR
].start_address
= LDT_BASE_ADDR
;
636 __initcall(pt_dump_init
);