]> git.ipfire.org Git - thirdparty/linux.git/blob - mm/ptdump.c
b600c7f864b8b420f25aa3b52787db3df22e9422
[thirdparty/linux.git] / mm / ptdump.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <linux/pagewalk.h>
4 #include <linux/debugfs.h>
5 #include <linux/ptdump.h>
6 #include <linux/kasan.h>
7 #include "internal.h"
8
9 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
10 /*
11 * This is an optimization for KASAN=y case. Since all kasan page tables
12 * eventually point to the kasan_early_shadow_page we could call note_page()
13 * right away without walking through lower level page tables. This saves
14 * us dozens of seconds (minutes for 5-level config) while checking for
15 * W+X mapping or reading kernel_page_tables debugfs file.
16 */
17 static inline int note_kasan_page_table(struct mm_walk *walk,
18 unsigned long addr)
19 {
20 struct ptdump_state *st = walk->private;
21
22 st->note_page_pte(st, addr, kasan_early_shadow_pte[0]);
23
24 walk->action = ACTION_CONTINUE;
25
26 return 0;
27 }
28 #endif
29
30 static int ptdump_pgd_entry(pgd_t *pgd, unsigned long addr,
31 unsigned long next, struct mm_walk *walk)
32 {
33 struct ptdump_state *st = walk->private;
34 pgd_t val = READ_ONCE(*pgd);
35
36 #if CONFIG_PGTABLE_LEVELS > 4 && \
37 (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS))
38 if (pgd_page(val) == virt_to_page(lm_alias(kasan_early_shadow_p4d)))
39 return note_kasan_page_table(walk, addr);
40 #endif
41
42 if (st->effective_prot_pgd)
43 st->effective_prot_pgd(st, val);
44
45 if (pgd_leaf(val)) {
46 st->note_page_pgd(st, addr, val);
47 walk->action = ACTION_CONTINUE;
48 }
49
50 return 0;
51 }
52
53 static int ptdump_p4d_entry(p4d_t *p4d, unsigned long addr,
54 unsigned long next, struct mm_walk *walk)
55 {
56 struct ptdump_state *st = walk->private;
57 p4d_t val = READ_ONCE(*p4d);
58
59 #if CONFIG_PGTABLE_LEVELS > 3 && \
60 (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS))
61 if (p4d_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pud)))
62 return note_kasan_page_table(walk, addr);
63 #endif
64
65 if (st->effective_prot_p4d)
66 st->effective_prot_p4d(st, val);
67
68 if (p4d_leaf(val)) {
69 st->note_page_p4d(st, addr, val);
70 walk->action = ACTION_CONTINUE;
71 }
72
73 return 0;
74 }
75
76 static int ptdump_pud_entry(pud_t *pud, unsigned long addr,
77 unsigned long next, struct mm_walk *walk)
78 {
79 struct ptdump_state *st = walk->private;
80 pud_t val = READ_ONCE(*pud);
81
82 #if CONFIG_PGTABLE_LEVELS > 2 && \
83 (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS))
84 if (pud_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pmd)))
85 return note_kasan_page_table(walk, addr);
86 #endif
87
88 if (st->effective_prot_pud)
89 st->effective_prot_pud(st, val);
90
91 if (pud_leaf(val)) {
92 st->note_page_pud(st, addr, val);
93 walk->action = ACTION_CONTINUE;
94 }
95
96 return 0;
97 }
98
99 static int ptdump_pmd_entry(pmd_t *pmd, unsigned long addr,
100 unsigned long next, struct mm_walk *walk)
101 {
102 struct ptdump_state *st = walk->private;
103 pmd_t val = READ_ONCE(*pmd);
104
105 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
106 if (pmd_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pte)))
107 return note_kasan_page_table(walk, addr);
108 #endif
109
110 if (st->effective_prot_pmd)
111 st->effective_prot_pmd(st, val);
112 if (pmd_leaf(val)) {
113 st->note_page_pmd(st, addr, val);
114 walk->action = ACTION_CONTINUE;
115 }
116
117 return 0;
118 }
119
120 static int ptdump_pte_entry(pte_t *pte, unsigned long addr,
121 unsigned long next, struct mm_walk *walk)
122 {
123 struct ptdump_state *st = walk->private;
124 pte_t val = ptep_get_lockless(pte);
125
126 if (st->effective_prot_pte)
127 st->effective_prot_pte(st, val);
128
129 st->note_page_pte(st, addr, val);
130
131 return 0;
132 }
133
134 static int ptdump_hole(unsigned long addr, unsigned long next,
135 int depth, struct mm_walk *walk)
136 {
137 struct ptdump_state *st = walk->private;
138 pte_t pte_zero = {0};
139 pmd_t pmd_zero = {0};
140 pud_t pud_zero = {0};
141 p4d_t p4d_zero = {0};
142 pgd_t pgd_zero = {0};
143
144 switch (depth) {
145 case 4:
146 st->note_page_pte(st, addr, pte_zero);
147 break;
148 case 3:
149 st->note_page_pmd(st, addr, pmd_zero);
150 break;
151 case 2:
152 st->note_page_pud(st, addr, pud_zero);
153 break;
154 case 1:
155 st->note_page_p4d(st, addr, p4d_zero);
156 break;
157 case 0:
158 st->note_page_pgd(st, addr, pgd_zero);
159 break;
160 default:
161 break;
162 }
163 return 0;
164 }
165
166 static const struct mm_walk_ops ptdump_ops = {
167 .pgd_entry = ptdump_pgd_entry,
168 .p4d_entry = ptdump_p4d_entry,
169 .pud_entry = ptdump_pud_entry,
170 .pmd_entry = ptdump_pmd_entry,
171 .pte_entry = ptdump_pte_entry,
172 .pte_hole = ptdump_hole,
173 };
174
175 void ptdump_walk_pgd(struct ptdump_state *st, struct mm_struct *mm, pgd_t *pgd)
176 {
177 const struct ptdump_range *range = st->range;
178
179 get_online_mems();
180 mmap_write_lock(mm);
181 while (range->start != range->end) {
182 walk_page_range_debug(mm, range->start, range->end,
183 &ptdump_ops, pgd, st);
184 range++;
185 }
186 mmap_write_unlock(mm);
187 put_online_mems();
188
189 /* Flush out the last page */
190 st->note_page_flush(st);
191 }
192
193 static int check_wx_show(struct seq_file *m, void *v)
194 {
195 if (ptdump_check_wx())
196 seq_puts(m, "SUCCESS\n");
197 else
198 seq_puts(m, "FAILED\n");
199
200 return 0;
201 }
202
203 DEFINE_SHOW_ATTRIBUTE(check_wx);
204
205 static int ptdump_debugfs_init(void)
206 {
207 debugfs_create_file("check_wx_pages", 0400, NULL, NULL, &check_wx_fops);
208
209 return 0;
210 }
211
212 device_initcall(ptdump_debugfs_init);