]> git.ipfire.org Git - people/arne_f/kernel.git/blob - mm/pagewalk.c
net: qmi_wwan: fix divide by 0 on bad descriptors
[people/arne_f/kernel.git] / mm / pagewalk.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/mm.h>
3 #include <linux/highmem.h>
4 #include <linux/sched.h>
5 #include <linux/hugetlb.h>
6
7 static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
8 struct mm_walk *walk)
9 {
10 pte_t *pte;
11 int err = 0;
12
13 pte = pte_offset_map(pmd, addr);
14 for (;;) {
15 err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
16 if (err)
17 break;
18 addr += PAGE_SIZE;
19 if (addr == end)
20 break;
21 pte++;
22 }
23
24 pte_unmap(pte);
25 return err;
26 }
27
28 static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
29 struct mm_walk *walk)
30 {
31 pmd_t *pmd;
32 unsigned long next;
33 int err = 0;
34
35 pmd = pmd_offset(pud, addr);
36 do {
37 again:
38 next = pmd_addr_end(addr, end);
39 if (pmd_none(*pmd) || !walk->vma) {
40 if (walk->pte_hole)
41 err = walk->pte_hole(addr, next, walk);
42 if (err)
43 break;
44 continue;
45 }
46 /*
47 * This implies that each ->pmd_entry() handler
48 * needs to know about pmd_trans_huge() pmds
49 */
50 if (walk->pmd_entry)
51 err = walk->pmd_entry(pmd, addr, next, walk);
52 if (err)
53 break;
54
55 /*
56 * Check this here so we only break down trans_huge
57 * pages when we _need_ to
58 */
59 if (!walk->pte_entry)
60 continue;
61
62 split_huge_pmd(walk->vma, pmd, addr);
63 if (pmd_trans_unstable(pmd))
64 goto again;
65 err = walk_pte_range(pmd, addr, next, walk);
66 if (err)
67 break;
68 } while (pmd++, addr = next, addr != end);
69
70 return err;
71 }
72
73 static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
74 struct mm_walk *walk)
75 {
76 pud_t *pud;
77 unsigned long next;
78 int err = 0;
79
80 pud = pud_offset(p4d, addr);
81 do {
82 again:
83 next = pud_addr_end(addr, end);
84 if (pud_none(*pud) || !walk->vma) {
85 if (walk->pte_hole)
86 err = walk->pte_hole(addr, next, walk);
87 if (err)
88 break;
89 continue;
90 }
91
92 if (walk->pud_entry) {
93 spinlock_t *ptl = pud_trans_huge_lock(pud, walk->vma);
94
95 if (ptl) {
96 err = walk->pud_entry(pud, addr, next, walk);
97 spin_unlock(ptl);
98 if (err)
99 break;
100 continue;
101 }
102 }
103
104 split_huge_pud(walk->vma, pud, addr);
105 if (pud_none(*pud))
106 goto again;
107
108 if (walk->pmd_entry || walk->pte_entry)
109 err = walk_pmd_range(pud, addr, next, walk);
110 if (err)
111 break;
112 } while (pud++, addr = next, addr != end);
113
114 return err;
115 }
116
117 static int walk_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
118 struct mm_walk *walk)
119 {
120 p4d_t *p4d;
121 unsigned long next;
122 int err = 0;
123
124 p4d = p4d_offset(pgd, addr);
125 do {
126 next = p4d_addr_end(addr, end);
127 if (p4d_none_or_clear_bad(p4d)) {
128 if (walk->pte_hole)
129 err = walk->pte_hole(addr, next, walk);
130 if (err)
131 break;
132 continue;
133 }
134 if (walk->pmd_entry || walk->pte_entry)
135 err = walk_pud_range(p4d, addr, next, walk);
136 if (err)
137 break;
138 } while (p4d++, addr = next, addr != end);
139
140 return err;
141 }
142
143 static int walk_pgd_range(unsigned long addr, unsigned long end,
144 struct mm_walk *walk)
145 {
146 pgd_t *pgd;
147 unsigned long next;
148 int err = 0;
149
150 pgd = pgd_offset(walk->mm, addr);
151 do {
152 next = pgd_addr_end(addr, end);
153 if (pgd_none_or_clear_bad(pgd)) {
154 if (walk->pte_hole)
155 err = walk->pte_hole(addr, next, walk);
156 if (err)
157 break;
158 continue;
159 }
160 if (walk->pmd_entry || walk->pte_entry)
161 err = walk_p4d_range(pgd, addr, next, walk);
162 if (err)
163 break;
164 } while (pgd++, addr = next, addr != end);
165
166 return err;
167 }
168
169 #ifdef CONFIG_HUGETLB_PAGE
170 static unsigned long hugetlb_entry_end(struct hstate *h, unsigned long addr,
171 unsigned long end)
172 {
173 unsigned long boundary = (addr & huge_page_mask(h)) + huge_page_size(h);
174 return boundary < end ? boundary : end;
175 }
176
177 static int walk_hugetlb_range(unsigned long addr, unsigned long end,
178 struct mm_walk *walk)
179 {
180 struct vm_area_struct *vma = walk->vma;
181 struct hstate *h = hstate_vma(vma);
182 unsigned long next;
183 unsigned long hmask = huge_page_mask(h);
184 unsigned long sz = huge_page_size(h);
185 pte_t *pte;
186 int err = 0;
187
188 do {
189 next = hugetlb_entry_end(h, addr, end);
190 pte = huge_pte_offset(walk->mm, addr & hmask, sz);
191 if (pte && walk->hugetlb_entry)
192 err = walk->hugetlb_entry(pte, hmask, addr, next, walk);
193 if (err)
194 break;
195 } while (addr = next, addr != end);
196
197 return err;
198 }
199
200 #else /* CONFIG_HUGETLB_PAGE */
201 static int walk_hugetlb_range(unsigned long addr, unsigned long end,
202 struct mm_walk *walk)
203 {
204 return 0;
205 }
206
207 #endif /* CONFIG_HUGETLB_PAGE */
208
209 /*
210 * Decide whether we really walk over the current vma on [@start, @end)
211 * or skip it via the returned value. Return 0 if we do walk over the
212 * current vma, and return 1 if we skip the vma. Negative values means
213 * error, where we abort the current walk.
214 */
215 static int walk_page_test(unsigned long start, unsigned long end,
216 struct mm_walk *walk)
217 {
218 struct vm_area_struct *vma = walk->vma;
219
220 if (walk->test_walk)
221 return walk->test_walk(start, end, walk);
222
223 /*
224 * vma(VM_PFNMAP) doesn't have any valid struct pages behind VM_PFNMAP
225 * range, so we don't walk over it as we do for normal vmas. However,
226 * Some callers are interested in handling hole range and they don't
227 * want to just ignore any single address range. Such users certainly
228 * define their ->pte_hole() callbacks, so let's delegate them to handle
229 * vma(VM_PFNMAP).
230 */
231 if (vma->vm_flags & VM_PFNMAP) {
232 int err = 1;
233 if (walk->pte_hole)
234 err = walk->pte_hole(start, end, walk);
235 return err ? err : 1;
236 }
237 return 0;
238 }
239
240 static int __walk_page_range(unsigned long start, unsigned long end,
241 struct mm_walk *walk)
242 {
243 int err = 0;
244 struct vm_area_struct *vma = walk->vma;
245
246 if (vma && is_vm_hugetlb_page(vma)) {
247 if (walk->hugetlb_entry)
248 err = walk_hugetlb_range(start, end, walk);
249 } else
250 err = walk_pgd_range(start, end, walk);
251
252 return err;
253 }
254
255 /**
256 * walk_page_range - walk page table with caller specific callbacks
257 *
258 * Recursively walk the page table tree of the process represented by @walk->mm
259 * within the virtual address range [@start, @end). During walking, we can do
260 * some caller-specific works for each entry, by setting up pmd_entry(),
261 * pte_entry(), and/or hugetlb_entry(). If you don't set up for some of these
262 * callbacks, the associated entries/pages are just ignored.
263 * The return values of these callbacks are commonly defined like below:
264 * - 0 : succeeded to handle the current entry, and if you don't reach the
265 * end address yet, continue to walk.
266 * - >0 : succeeded to handle the current entry, and return to the caller
267 * with caller specific value.
268 * - <0 : failed to handle the current entry, and return to the caller
269 * with error code.
270 *
271 * Before starting to walk page table, some callers want to check whether
272 * they really want to walk over the current vma, typically by checking
273 * its vm_flags. walk_page_test() and @walk->test_walk() are used for this
274 * purpose.
275 *
276 * struct mm_walk keeps current values of some common data like vma and pmd,
277 * which are useful for the access from callbacks. If you want to pass some
278 * caller-specific data to callbacks, @walk->private should be helpful.
279 *
280 * Locking:
281 * Callers of walk_page_range() and walk_page_vma() should hold
282 * @walk->mm->mmap_sem, because these function traverse vma list and/or
283 * access to vma's data.
284 */
285 int walk_page_range(unsigned long start, unsigned long end,
286 struct mm_walk *walk)
287 {
288 int err = 0;
289 unsigned long next;
290 struct vm_area_struct *vma;
291
292 if (start >= end)
293 return -EINVAL;
294
295 if (!walk->mm)
296 return -EINVAL;
297
298 VM_BUG_ON_MM(!rwsem_is_locked(&walk->mm->mmap_sem), walk->mm);
299
300 vma = find_vma(walk->mm, start);
301 do {
302 if (!vma) { /* after the last vma */
303 walk->vma = NULL;
304 next = end;
305 } else if (start < vma->vm_start) { /* outside vma */
306 walk->vma = NULL;
307 next = min(end, vma->vm_start);
308 } else { /* inside vma */
309 walk->vma = vma;
310 next = min(end, vma->vm_end);
311 vma = vma->vm_next;
312
313 err = walk_page_test(start, next, walk);
314 if (err > 0) {
315 /*
316 * positive return values are purely for
317 * controlling the pagewalk, so should never
318 * be passed to the callers.
319 */
320 err = 0;
321 continue;
322 }
323 if (err < 0)
324 break;
325 }
326 if (walk->vma || walk->pte_hole)
327 err = __walk_page_range(start, next, walk);
328 if (err)
329 break;
330 } while (start = next, start < end);
331 return err;
332 }
333
334 int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk)
335 {
336 int err;
337
338 if (!walk->mm)
339 return -EINVAL;
340
341 VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem));
342 VM_BUG_ON(!vma);
343 walk->vma = vma;
344 err = walk_page_test(vma->vm_start, vma->vm_end, walk);
345 if (err > 0)
346 return 0;
347 if (err < 0)
348 return err;
349 return __walk_page_range(vma->vm_start, vma->vm_end, walk);
350 }