]> git.ipfire.org Git - thirdparty/linux.git/blob - mm/ptdump.c
dt-bindings: usb: tegra-xudc: Remove extraneous PHYs
[thirdparty/linux.git] / mm / ptdump.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <linux/pagewalk.h>
4 #include <linux/ptdump.h>
5 #include <linux/kasan.h>
6
7 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
8 /*
9 * This is an optimization for KASAN=y case. Since all kasan page tables
10 * eventually point to the kasan_early_shadow_page we could call note_page()
11 * right away without walking through lower level page tables. This saves
12 * us dozens of seconds (minutes for 5-level config) while checking for
13 * W+X mapping or reading kernel_page_tables debugfs file.
14 */
15 static inline int note_kasan_page_table(struct mm_walk *walk,
16 unsigned long addr)
17 {
18 struct ptdump_state *st = walk->private;
19
20 st->note_page(st, addr, 4, pte_val(kasan_early_shadow_pte[0]));
21
22 walk->action = ACTION_CONTINUE;
23
24 return 0;
25 }
26 #endif
27
28 static int ptdump_pgd_entry(pgd_t *pgd, unsigned long addr,
29 unsigned long next, struct mm_walk *walk)
30 {
31 struct ptdump_state *st = walk->private;
32 pgd_t val = READ_ONCE(*pgd);
33
34 #if CONFIG_PGTABLE_LEVELS > 4 && \
35 (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS))
36 if (pgd_page(val) == virt_to_page(lm_alias(kasan_early_shadow_p4d)))
37 return note_kasan_page_table(walk, addr);
38 #endif
39
40 if (st->effective_prot)
41 st->effective_prot(st, 0, pgd_val(val));
42
43 if (pgd_leaf(val)) {
44 st->note_page(st, addr, 0, pgd_val(val));
45 walk->action = ACTION_CONTINUE;
46 }
47
48 return 0;
49 }
50
51 static int ptdump_p4d_entry(p4d_t *p4d, unsigned long addr,
52 unsigned long next, struct mm_walk *walk)
53 {
54 struct ptdump_state *st = walk->private;
55 p4d_t val = READ_ONCE(*p4d);
56
57 #if CONFIG_PGTABLE_LEVELS > 3 && \
58 (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS))
59 if (p4d_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pud)))
60 return note_kasan_page_table(walk, addr);
61 #endif
62
63 if (st->effective_prot)
64 st->effective_prot(st, 1, p4d_val(val));
65
66 if (p4d_leaf(val)) {
67 st->note_page(st, addr, 1, p4d_val(val));
68 walk->action = ACTION_CONTINUE;
69 }
70
71 return 0;
72 }
73
74 static int ptdump_pud_entry(pud_t *pud, unsigned long addr,
75 unsigned long next, struct mm_walk *walk)
76 {
77 struct ptdump_state *st = walk->private;
78 pud_t val = READ_ONCE(*pud);
79
80 #if CONFIG_PGTABLE_LEVELS > 2 && \
81 (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS))
82 if (pud_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pmd)))
83 return note_kasan_page_table(walk, addr);
84 #endif
85
86 if (st->effective_prot)
87 st->effective_prot(st, 2, pud_val(val));
88
89 if (pud_leaf(val)) {
90 st->note_page(st, addr, 2, pud_val(val));
91 walk->action = ACTION_CONTINUE;
92 }
93
94 return 0;
95 }
96
97 static int ptdump_pmd_entry(pmd_t *pmd, unsigned long addr,
98 unsigned long next, struct mm_walk *walk)
99 {
100 struct ptdump_state *st = walk->private;
101 pmd_t val = READ_ONCE(*pmd);
102
103 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
104 if (pmd_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pte)))
105 return note_kasan_page_table(walk, addr);
106 #endif
107
108 if (st->effective_prot)
109 st->effective_prot(st, 3, pmd_val(val));
110 if (pmd_leaf(val)) {
111 st->note_page(st, addr, 3, pmd_val(val));
112 walk->action = ACTION_CONTINUE;
113 }
114
115 return 0;
116 }
117
118 static int ptdump_pte_entry(pte_t *pte, unsigned long addr,
119 unsigned long next, struct mm_walk *walk)
120 {
121 struct ptdump_state *st = walk->private;
122 pte_t val = ptep_get(pte);
123
124 if (st->effective_prot)
125 st->effective_prot(st, 4, pte_val(val));
126
127 st->note_page(st, addr, 4, pte_val(val));
128
129 return 0;
130 }
131
132 static int ptdump_hole(unsigned long addr, unsigned long next,
133 int depth, struct mm_walk *walk)
134 {
135 struct ptdump_state *st = walk->private;
136
137 st->note_page(st, addr, depth, 0);
138
139 return 0;
140 }
141
142 static const struct mm_walk_ops ptdump_ops = {
143 .pgd_entry = ptdump_pgd_entry,
144 .p4d_entry = ptdump_p4d_entry,
145 .pud_entry = ptdump_pud_entry,
146 .pmd_entry = ptdump_pmd_entry,
147 .pte_entry = ptdump_pte_entry,
148 .pte_hole = ptdump_hole,
149 };
150
151 void ptdump_walk_pgd(struct ptdump_state *st, struct mm_struct *mm, pgd_t *pgd)
152 {
153 const struct ptdump_range *range = st->range;
154
155 mmap_write_lock(mm);
156 while (range->start != range->end) {
157 walk_page_range_novma(mm, range->start, range->end,
158 &ptdump_ops, pgd, st);
159 range++;
160 }
161 mmap_write_unlock(mm);
162
163 /* Flush out the last page */
164 st->note_page(st, 0, -1, 0);
165 }