]>
Commit | Line | Data |
---|---|---|
30d621f6 SP |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | ||
3 | #include <linux/pagewalk.h> | |
565474af | 4 | #include <linux/debugfs.h> |
30d621f6 SP |
5 | #include <linux/ptdump.h> |
6 | #include <linux/kasan.h> | |
96d81e47 | 7 | #include "internal.h" |
30d621f6 | 8 | |
0fea6e9a | 9 | #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) |
30d621f6 SP |
10 | /* |
11 | * This is an optimization for KASAN=y case. Since all kasan page tables | |
12 | * eventually point to the kasan_early_shadow_page we could call note_page() | |
13 | * right away without walking through lower level page tables. This saves | |
14 | * us dozens of seconds (minutes for 5-level config) while checking for | |
15 | * W+X mapping or reading kernel_page_tables debugfs file. | |
16 | */ | |
17 | static inline int note_kasan_page_table(struct mm_walk *walk, | |
18 | unsigned long addr) | |
19 | { | |
20 | struct ptdump_state *st = walk->private; | |
21 | ||
e064e738 | 22 | st->note_page_pte(st, addr, kasan_early_shadow_pte[0]); |
30d621f6 SP |
23 | |
24 | walk->action = ACTION_CONTINUE; | |
25 | ||
26 | return 0; | |
27 | } | |
28 | #endif | |
29 | ||
30 | static int ptdump_pgd_entry(pgd_t *pgd, unsigned long addr, | |
31 | unsigned long next, struct mm_walk *walk) | |
32 | { | |
33 | struct ptdump_state *st = walk->private; | |
34 | pgd_t val = READ_ONCE(*pgd); | |
35 | ||
0fea6e9a AK |
36 | #if CONFIG_PGTABLE_LEVELS > 4 && \ |
37 | (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) | |
30d621f6 SP |
38 | if (pgd_page(val) == virt_to_page(lm_alias(kasan_early_shadow_p4d))) |
39 | return note_kasan_page_table(walk, addr); | |
40 | #endif | |
41 | ||
08978fc3 AK |
42 | if (st->effective_prot_pgd) |
43 | st->effective_prot_pgd(st, val); | |
1494e0c3 | 44 | |
d8d55f56 | 45 | if (pgd_leaf(val)) { |
e064e738 | 46 | st->note_page_pgd(st, addr, val); |
d8d55f56 MS |
47 | walk->action = ACTION_CONTINUE; |
48 | } | |
30d621f6 SP |
49 | |
50 | return 0; | |
51 | } | |
52 | ||
53 | static int ptdump_p4d_entry(p4d_t *p4d, unsigned long addr, | |
54 | unsigned long next, struct mm_walk *walk) | |
55 | { | |
56 | struct ptdump_state *st = walk->private; | |
57 | p4d_t val = READ_ONCE(*p4d); | |
58 | ||
0fea6e9a AK |
59 | #if CONFIG_PGTABLE_LEVELS > 3 && \ |
60 | (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) | |
30d621f6 SP |
61 | if (p4d_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pud))) |
62 | return note_kasan_page_table(walk, addr); | |
63 | #endif | |
64 | ||
08978fc3 AK |
65 | if (st->effective_prot_p4d) |
66 | st->effective_prot_p4d(st, val); | |
1494e0c3 | 67 | |
d8d55f56 | 68 | if (p4d_leaf(val)) { |
e064e738 | 69 | st->note_page_p4d(st, addr, val); |
d8d55f56 MS |
70 | walk->action = ACTION_CONTINUE; |
71 | } | |
30d621f6 SP |
72 | |
73 | return 0; | |
74 | } | |
75 | ||
76 | static int ptdump_pud_entry(pud_t *pud, unsigned long addr, | |
77 | unsigned long next, struct mm_walk *walk) | |
78 | { | |
79 | struct ptdump_state *st = walk->private; | |
80 | pud_t val = READ_ONCE(*pud); | |
81 | ||
0fea6e9a AK |
82 | #if CONFIG_PGTABLE_LEVELS > 2 && \ |
83 | (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) | |
30d621f6 SP |
84 | if (pud_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pmd))) |
85 | return note_kasan_page_table(walk, addr); | |
86 | #endif | |
87 | ||
08978fc3 AK |
88 | if (st->effective_prot_pud) |
89 | st->effective_prot_pud(st, val); | |
1494e0c3 | 90 | |
d8d55f56 | 91 | if (pud_leaf(val)) { |
e064e738 | 92 | st->note_page_pud(st, addr, val); |
d8d55f56 MS |
93 | walk->action = ACTION_CONTINUE; |
94 | } | |
30d621f6 SP |
95 | |
96 | return 0; | |
97 | } | |
98 | ||
99 | static int ptdump_pmd_entry(pmd_t *pmd, unsigned long addr, | |
100 | unsigned long next, struct mm_walk *walk) | |
101 | { | |
102 | struct ptdump_state *st = walk->private; | |
103 | pmd_t val = READ_ONCE(*pmd); | |
104 | ||
0fea6e9a | 105 | #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) |
30d621f6 SP |
106 | if (pmd_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pte))) |
107 | return note_kasan_page_table(walk, addr); | |
108 | #endif | |
109 | ||
08978fc3 AK |
110 | if (st->effective_prot_pmd) |
111 | st->effective_prot_pmd(st, val); | |
d8d55f56 | 112 | if (pmd_leaf(val)) { |
e064e738 | 113 | st->note_page_pmd(st, addr, val); |
d8d55f56 MS |
114 | walk->action = ACTION_CONTINUE; |
115 | } | |
30d621f6 SP |
116 | |
117 | return 0; | |
118 | } | |
119 | ||
120 | static int ptdump_pte_entry(pte_t *pte, unsigned long addr, | |
121 | unsigned long next, struct mm_walk *walk) | |
122 | { | |
123 | struct ptdump_state *st = walk->private; | |
426931e7 | 124 | pte_t val = ptep_get_lockless(pte); |
1494e0c3 | 125 | |
08978fc3 AK |
126 | if (st->effective_prot_pte) |
127 | st->effective_prot_pte(st, val); | |
30d621f6 | 128 | |
e064e738 | 129 | st->note_page_pte(st, addr, val); |
30d621f6 SP |
130 | |
131 | return 0; | |
132 | } | |
133 | ||
134 | static int ptdump_hole(unsigned long addr, unsigned long next, | |
135 | int depth, struct mm_walk *walk) | |
136 | { | |
137 | struct ptdump_state *st = walk->private; | |
e064e738 AK |
138 | pte_t pte_zero = {0}; |
139 | pmd_t pmd_zero = {0}; | |
140 | pud_t pud_zero = {0}; | |
141 | p4d_t p4d_zero = {0}; | |
142 | pgd_t pgd_zero = {0}; | |
143 | ||
144 | switch (depth) { | |
145 | case 4: | |
146 | st->note_page_pte(st, addr, pte_zero); | |
147 | break; | |
148 | case 3: | |
149 | st->note_page_pmd(st, addr, pmd_zero); | |
150 | break; | |
151 | case 2: | |
152 | st->note_page_pud(st, addr, pud_zero); | |
153 | break; | |
154 | case 1: | |
155 | st->note_page_p4d(st, addr, p4d_zero); | |
156 | break; | |
157 | case 0: | |
158 | st->note_page_pgd(st, addr, pgd_zero); | |
159 | break; | |
160 | default: | |
161 | break; | |
162 | } | |
30d621f6 SP |
163 | return 0; |
164 | } | |
165 | ||
166 | static const struct mm_walk_ops ptdump_ops = { | |
167 | .pgd_entry = ptdump_pgd_entry, | |
168 | .p4d_entry = ptdump_p4d_entry, | |
169 | .pud_entry = ptdump_pud_entry, | |
170 | .pmd_entry = ptdump_pmd_entry, | |
171 | .pte_entry = ptdump_pte_entry, | |
172 | .pte_hole = ptdump_hole, | |
173 | }; | |
174 | ||
e47690d7 | 175 | void ptdump_walk_pgd(struct ptdump_state *st, struct mm_struct *mm, pgd_t *pgd) |
30d621f6 SP |
176 | { |
177 | const struct ptdump_range *range = st->range; | |
178 | ||
59305202 | 179 | get_online_mems(); |
8782fb61 | 180 | mmap_write_lock(mm); |
30d621f6 | 181 | while (range->start != range->end) { |
96d81e47 | 182 | walk_page_range_debug(mm, range->start, range->end, |
e47690d7 | 183 | &ptdump_ops, pgd, st); |
30d621f6 SP |
184 | range++; |
185 | } | |
8782fb61 | 186 | mmap_write_unlock(mm); |
59305202 | 187 | put_online_mems(); |
30d621f6 SP |
188 | |
189 | /* Flush out the last page */ | |
e064e738 | 190 | st->note_page_flush(st); |
30d621f6 | 191 | } |
565474af CL |
192 | |
193 | static int check_wx_show(struct seq_file *m, void *v) | |
194 | { | |
195 | if (ptdump_check_wx()) | |
196 | seq_puts(m, "SUCCESS\n"); | |
197 | else | |
198 | seq_puts(m, "FAILED\n"); | |
199 | ||
200 | return 0; | |
201 | } | |
202 | ||
203 | DEFINE_SHOW_ATTRIBUTE(check_wx); | |
204 | ||
205 | static int ptdump_debugfs_init(void) | |
206 | { | |
207 | debugfs_create_file("check_wx_pages", 0400, NULL, NULL, &check_wx_fops); | |
208 | ||
209 | return 0; | |
210 | } | |
211 | ||
212 | device_initcall(ptdump_debugfs_init); |