]> git.ipfire.org Git - thirdparty/linux.git/blob - mm/ptdump.c
9374f29cdc6f8af1eff1ff34fed95cd9a50bed5a
[thirdparty/linux.git] / mm / ptdump.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <linux/pagewalk.h>
4 #include <linux/debugfs.h>
5 #include <linux/ptdump.h>
6 #include <linux/kasan.h>
7
8 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
9 /*
10 * This is an optimization for KASAN=y case. Since all kasan page tables
11 * eventually point to the kasan_early_shadow_page we could call note_page()
12 * right away without walking through lower level page tables. This saves
13 * us dozens of seconds (minutes for 5-level config) while checking for
14 * W+X mapping or reading kernel_page_tables debugfs file.
15 */
16 static inline int note_kasan_page_table(struct mm_walk *walk,
17 unsigned long addr)
18 {
19 struct ptdump_state *st = walk->private;
20
21 st->note_page_pte(st, addr, kasan_early_shadow_pte[0]);
22
23 walk->action = ACTION_CONTINUE;
24
25 return 0;
26 }
27 #endif
28
29 static int ptdump_pgd_entry(pgd_t *pgd, unsigned long addr,
30 unsigned long next, struct mm_walk *walk)
31 {
32 struct ptdump_state *st = walk->private;
33 pgd_t val = READ_ONCE(*pgd);
34
35 #if CONFIG_PGTABLE_LEVELS > 4 && \
36 (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS))
37 if (pgd_page(val) == virt_to_page(lm_alias(kasan_early_shadow_p4d)))
38 return note_kasan_page_table(walk, addr);
39 #endif
40
41 if (st->effective_prot_pgd)
42 st->effective_prot_pgd(st, val);
43
44 if (pgd_leaf(val)) {
45 st->note_page_pgd(st, addr, val);
46 walk->action = ACTION_CONTINUE;
47 }
48
49 return 0;
50 }
51
52 static int ptdump_p4d_entry(p4d_t *p4d, unsigned long addr,
53 unsigned long next, struct mm_walk *walk)
54 {
55 struct ptdump_state *st = walk->private;
56 p4d_t val = READ_ONCE(*p4d);
57
58 #if CONFIG_PGTABLE_LEVELS > 3 && \
59 (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS))
60 if (p4d_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pud)))
61 return note_kasan_page_table(walk, addr);
62 #endif
63
64 if (st->effective_prot_p4d)
65 st->effective_prot_p4d(st, val);
66
67 if (p4d_leaf(val)) {
68 st->note_page_p4d(st, addr, val);
69 walk->action = ACTION_CONTINUE;
70 }
71
72 return 0;
73 }
74
75 static int ptdump_pud_entry(pud_t *pud, unsigned long addr,
76 unsigned long next, struct mm_walk *walk)
77 {
78 struct ptdump_state *st = walk->private;
79 pud_t val = READ_ONCE(*pud);
80
81 #if CONFIG_PGTABLE_LEVELS > 2 && \
82 (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS))
83 if (pud_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pmd)))
84 return note_kasan_page_table(walk, addr);
85 #endif
86
87 if (st->effective_prot_pud)
88 st->effective_prot_pud(st, val);
89
90 if (pud_leaf(val)) {
91 st->note_page_pud(st, addr, val);
92 walk->action = ACTION_CONTINUE;
93 }
94
95 return 0;
96 }
97
98 static int ptdump_pmd_entry(pmd_t *pmd, unsigned long addr,
99 unsigned long next, struct mm_walk *walk)
100 {
101 struct ptdump_state *st = walk->private;
102 pmd_t val = READ_ONCE(*pmd);
103
104 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
105 if (pmd_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pte)))
106 return note_kasan_page_table(walk, addr);
107 #endif
108
109 if (st->effective_prot_pmd)
110 st->effective_prot_pmd(st, val);
111 if (pmd_leaf(val)) {
112 st->note_page_pmd(st, addr, val);
113 walk->action = ACTION_CONTINUE;
114 }
115
116 return 0;
117 }
118
119 static int ptdump_pte_entry(pte_t *pte, unsigned long addr,
120 unsigned long next, struct mm_walk *walk)
121 {
122 struct ptdump_state *st = walk->private;
123 pte_t val = ptep_get_lockless(pte);
124
125 if (st->effective_prot_pte)
126 st->effective_prot_pte(st, val);
127
128 st->note_page_pte(st, addr, val);
129
130 return 0;
131 }
132
133 static int ptdump_hole(unsigned long addr, unsigned long next,
134 int depth, struct mm_walk *walk)
135 {
136 struct ptdump_state *st = walk->private;
137 pte_t pte_zero = {0};
138 pmd_t pmd_zero = {0};
139 pud_t pud_zero = {0};
140 p4d_t p4d_zero = {0};
141 pgd_t pgd_zero = {0};
142
143 switch (depth) {
144 case 4:
145 st->note_page_pte(st, addr, pte_zero);
146 break;
147 case 3:
148 st->note_page_pmd(st, addr, pmd_zero);
149 break;
150 case 2:
151 st->note_page_pud(st, addr, pud_zero);
152 break;
153 case 1:
154 st->note_page_p4d(st, addr, p4d_zero);
155 break;
156 case 0:
157 st->note_page_pgd(st, addr, pgd_zero);
158 break;
159 default:
160 break;
161 }
162 return 0;
163 }
164
165 static const struct mm_walk_ops ptdump_ops = {
166 .pgd_entry = ptdump_pgd_entry,
167 .p4d_entry = ptdump_p4d_entry,
168 .pud_entry = ptdump_pud_entry,
169 .pmd_entry = ptdump_pmd_entry,
170 .pte_entry = ptdump_pte_entry,
171 .pte_hole = ptdump_hole,
172 };
173
174 void ptdump_walk_pgd(struct ptdump_state *st, struct mm_struct *mm, pgd_t *pgd)
175 {
176 const struct ptdump_range *range = st->range;
177
178 mmap_write_lock(mm);
179 while (range->start != range->end) {
180 walk_page_range_novma(mm, range->start, range->end,
181 &ptdump_ops, pgd, st);
182 range++;
183 }
184 mmap_write_unlock(mm);
185
186 /* Flush out the last page */
187 st->note_page_flush(st);
188 }
189
190 static int check_wx_show(struct seq_file *m, void *v)
191 {
192 if (ptdump_check_wx())
193 seq_puts(m, "SUCCESS\n");
194 else
195 seq_puts(m, "FAILED\n");
196
197 return 0;
198 }
199
200 DEFINE_SHOW_ATTRIBUTE(check_wx);
201
202 static int ptdump_debugfs_init(void)
203 {
204 debugfs_create_file("check_wx_pages", 0400, NULL, NULL, &check_wx_fops);
205
206 return 0;
207 }
208
209 device_initcall(ptdump_debugfs_init);