]>
git.ipfire.org Git - thirdparty/linux.git/blob - mm/pagewalk.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/pagewalk.h>
3 #include <linux/highmem.h>
4 #include <linux/sched.h>
5 #include <linux/hugetlb.h>
8 * We want to know the real level where a entry is located ignoring any
9 * folding of levels which may be happening. For example if p4d is folded then
10 * a missing entry found at level 1 (p4d) is actually at level 0 (pgd).
12 static int real_depth(int depth
)
14 if (depth
== 3 && PTRS_PER_PMD
== 1)
16 if (depth
== 2 && PTRS_PER_PUD
== 1)
18 if (depth
== 1 && PTRS_PER_P4D
== 1)
23 static int walk_pte_range_inner(pte_t
*pte
, unsigned long addr
,
24 unsigned long end
, struct mm_walk
*walk
)
26 const struct mm_walk_ops
*ops
= walk
->ops
;
30 err
= ops
->pte_entry(pte
, addr
, addr
+ PAGE_SIZE
, walk
);
33 if (addr
>= end
- PAGE_SIZE
)
41 static int walk_pte_range(pmd_t
*pmd
, unsigned long addr
, unsigned long end
,
50 * pte_offset_map() might apply user-specific validation.
51 * Indeed, on x86_64 the pmd entries set up by init_espfix_ap()
52 * fit its pmd_bad() check (_PAGE_NX set and _PAGE_RW clear),
53 * and CONFIG_EFI_PGT_DUMP efi_mm goes so far as to walk them.
55 if (walk
->mm
== &init_mm
|| addr
>= TASK_SIZE
)
56 pte
= pte_offset_kernel(pmd
, addr
);
58 pte
= pte_offset_map(pmd
, addr
);
60 err
= walk_pte_range_inner(pte
, addr
, end
, walk
);
61 if (walk
->mm
!= &init_mm
&& addr
< TASK_SIZE
)
65 pte
= pte_offset_map_lock(walk
->mm
, pmd
, addr
, &ptl
);
67 err
= walk_pte_range_inner(pte
, addr
, end
, walk
);
68 pte_unmap_unlock(pte
, ptl
);
72 walk
->action
= ACTION_AGAIN
;
76 #ifdef CONFIG_ARCH_HAS_HUGEPD
77 static int walk_hugepd_range(hugepd_t
*phpd
, unsigned long addr
,
78 unsigned long end
, struct mm_walk
*walk
, int pdshift
)
81 const struct mm_walk_ops
*ops
= walk
->ops
;
82 int shift
= hugepd_shift(*phpd
);
83 int page_size
= 1 << shift
;
88 if (addr
& (page_size
- 1))
94 spin_lock(&walk
->mm
->page_table_lock
);
95 pte
= hugepte_offset(*phpd
, addr
, pdshift
);
96 err
= ops
->pte_entry(pte
, addr
, addr
+ page_size
, walk
);
97 spin_unlock(&walk
->mm
->page_table_lock
);
101 if (addr
>= end
- page_size
)
108 static int walk_hugepd_range(hugepd_t
*phpd
, unsigned long addr
,
109 unsigned long end
, struct mm_walk
*walk
, int pdshift
)
115 static int walk_pmd_range(pud_t
*pud
, unsigned long addr
, unsigned long end
,
116 struct mm_walk
*walk
)
120 const struct mm_walk_ops
*ops
= walk
->ops
;
122 int depth
= real_depth(3);
124 pmd
= pmd_offset(pud
, addr
);
127 next
= pmd_addr_end(addr
, end
);
128 if (pmd_none(*pmd
)) {
130 err
= ops
->pte_hole(addr
, next
, depth
, walk
);
136 walk
->action
= ACTION_SUBTREE
;
139 * This implies that each ->pmd_entry() handler
140 * needs to know about pmd_trans_huge() pmds
143 err
= ops
->pmd_entry(pmd
, addr
, next
, walk
);
147 if (walk
->action
== ACTION_AGAIN
)
151 * Check this here so we only break down trans_huge
152 * pages when we _need_ to
154 if ((!walk
->vma
&& (pmd_leaf(*pmd
) || !pmd_present(*pmd
))) ||
155 walk
->action
== ACTION_CONTINUE
||
160 split_huge_pmd(walk
->vma
, pmd
, addr
);
162 if (is_hugepd(__hugepd(pmd_val(*pmd
))))
163 err
= walk_hugepd_range((hugepd_t
*)pmd
, addr
, next
, walk
, PMD_SHIFT
);
165 err
= walk_pte_range(pmd
, addr
, next
, walk
);
169 if (walk
->action
== ACTION_AGAIN
)
172 } while (pmd
++, addr
= next
, addr
!= end
);
177 static int walk_pud_range(p4d_t
*p4d
, unsigned long addr
, unsigned long end
,
178 struct mm_walk
*walk
)
182 const struct mm_walk_ops
*ops
= walk
->ops
;
184 int depth
= real_depth(2);
186 pud
= pud_offset(p4d
, addr
);
189 next
= pud_addr_end(addr
, end
);
190 if (pud_none(*pud
)) {
192 err
= ops
->pte_hole(addr
, next
, depth
, walk
);
198 walk
->action
= ACTION_SUBTREE
;
201 err
= ops
->pud_entry(pud
, addr
, next
, walk
);
205 if (walk
->action
== ACTION_AGAIN
)
208 if ((!walk
->vma
&& (pud_leaf(*pud
) || !pud_present(*pud
))) ||
209 walk
->action
== ACTION_CONTINUE
||
210 !(ops
->pmd_entry
|| ops
->pte_entry
))
214 split_huge_pud(walk
->vma
, pud
, addr
);
218 if (is_hugepd(__hugepd(pud_val(*pud
))))
219 err
= walk_hugepd_range((hugepd_t
*)pud
, addr
, next
, walk
, PUD_SHIFT
);
221 err
= walk_pmd_range(pud
, addr
, next
, walk
);
224 } while (pud
++, addr
= next
, addr
!= end
);
229 static int walk_p4d_range(pgd_t
*pgd
, unsigned long addr
, unsigned long end
,
230 struct mm_walk
*walk
)
234 const struct mm_walk_ops
*ops
= walk
->ops
;
236 int depth
= real_depth(1);
238 p4d
= p4d_offset(pgd
, addr
);
240 next
= p4d_addr_end(addr
, end
);
241 if (p4d_none_or_clear_bad(p4d
)) {
243 err
= ops
->pte_hole(addr
, next
, depth
, walk
);
248 if (ops
->p4d_entry
) {
249 err
= ops
->p4d_entry(p4d
, addr
, next
, walk
);
253 if (is_hugepd(__hugepd(p4d_val(*p4d
))))
254 err
= walk_hugepd_range((hugepd_t
*)p4d
, addr
, next
, walk
, P4D_SHIFT
);
255 else if (ops
->pud_entry
|| ops
->pmd_entry
|| ops
->pte_entry
)
256 err
= walk_pud_range(p4d
, addr
, next
, walk
);
259 } while (p4d
++, addr
= next
, addr
!= end
);
264 static int walk_pgd_range(unsigned long addr
, unsigned long end
,
265 struct mm_walk
*walk
)
269 const struct mm_walk_ops
*ops
= walk
->ops
;
273 pgd
= walk
->pgd
+ pgd_index(addr
);
275 pgd
= pgd_offset(walk
->mm
, addr
);
277 next
= pgd_addr_end(addr
, end
);
278 if (pgd_none_or_clear_bad(pgd
)) {
280 err
= ops
->pte_hole(addr
, next
, 0, walk
);
285 if (ops
->pgd_entry
) {
286 err
= ops
->pgd_entry(pgd
, addr
, next
, walk
);
290 if (is_hugepd(__hugepd(pgd_val(*pgd
))))
291 err
= walk_hugepd_range((hugepd_t
*)pgd
, addr
, next
, walk
, PGDIR_SHIFT
);
292 else if (ops
->p4d_entry
|| ops
->pud_entry
|| ops
->pmd_entry
|| ops
->pte_entry
)
293 err
= walk_p4d_range(pgd
, addr
, next
, walk
);
296 } while (pgd
++, addr
= next
, addr
!= end
);
301 #ifdef CONFIG_HUGETLB_PAGE
302 static unsigned long hugetlb_entry_end(struct hstate
*h
, unsigned long addr
,
305 unsigned long boundary
= (addr
& huge_page_mask(h
)) + huge_page_size(h
);
306 return boundary
< end
? boundary
: end
;
309 static int walk_hugetlb_range(unsigned long addr
, unsigned long end
,
310 struct mm_walk
*walk
)
312 struct vm_area_struct
*vma
= walk
->vma
;
313 struct hstate
*h
= hstate_vma(vma
);
315 unsigned long hmask
= huge_page_mask(h
);
316 unsigned long sz
= huge_page_size(h
);
318 const struct mm_walk_ops
*ops
= walk
->ops
;
321 hugetlb_vma_lock_read(vma
);
323 next
= hugetlb_entry_end(h
, addr
, end
);
324 pte
= hugetlb_walk(vma
, addr
& hmask
, sz
);
326 err
= ops
->hugetlb_entry(pte
, hmask
, addr
, next
, walk
);
327 else if (ops
->pte_hole
)
328 err
= ops
->pte_hole(addr
, next
, -1, walk
);
331 } while (addr
= next
, addr
!= end
);
332 hugetlb_vma_unlock_read(vma
);
337 #else /* CONFIG_HUGETLB_PAGE */
338 static int walk_hugetlb_range(unsigned long addr
, unsigned long end
,
339 struct mm_walk
*walk
)
344 #endif /* CONFIG_HUGETLB_PAGE */
347 * Decide whether we really walk over the current vma on [@start, @end)
348 * or skip it via the returned value. Return 0 if we do walk over the
349 * current vma, and return 1 if we skip the vma. Negative values means
350 * error, where we abort the current walk.
352 static int walk_page_test(unsigned long start
, unsigned long end
,
353 struct mm_walk
*walk
)
355 struct vm_area_struct
*vma
= walk
->vma
;
356 const struct mm_walk_ops
*ops
= walk
->ops
;
359 return ops
->test_walk(start
, end
, walk
);
362 * vma(VM_PFNMAP) doesn't have any valid struct pages behind VM_PFNMAP
363 * range, so we don't walk over it as we do for normal vmas. However,
364 * Some callers are interested in handling hole range and they don't
365 * want to just ignore any single address range. Such users certainly
366 * define their ->pte_hole() callbacks, so let's delegate them to handle
369 if (vma
->vm_flags
& VM_PFNMAP
) {
372 err
= ops
->pte_hole(start
, end
, -1, walk
);
373 return err
? err
: 1;
378 static int __walk_page_range(unsigned long start
, unsigned long end
,
379 struct mm_walk
*walk
)
382 struct vm_area_struct
*vma
= walk
->vma
;
383 const struct mm_walk_ops
*ops
= walk
->ops
;
386 err
= ops
->pre_vma(start
, end
, walk
);
391 if (is_vm_hugetlb_page(vma
)) {
392 if (ops
->hugetlb_entry
)
393 err
= walk_hugetlb_range(start
, end
, walk
);
395 err
= walk_pgd_range(start
, end
, walk
);
403 static inline void process_mm_walk_lock(struct mm_struct
*mm
,
404 enum page_walk_lock walk_lock
)
406 if (walk_lock
== PGWALK_RDLOCK
)
407 mmap_assert_locked(mm
);
409 mmap_assert_write_locked(mm
);
412 static inline void process_vma_walk_lock(struct vm_area_struct
*vma
,
413 enum page_walk_lock walk_lock
)
415 #ifdef CONFIG_PER_VMA_LOCK
418 vma_start_write(vma
);
420 case PGWALK_WRLOCK_VERIFY
:
421 vma_assert_write_locked(vma
);
424 /* PGWALK_RDLOCK is handled by process_mm_walk_lock */
431 * walk_page_range - walk page table with caller specific callbacks
432 * @mm: mm_struct representing the target process of page table walk
433 * @start: start address of the virtual address range
434 * @end: end address of the virtual address range
435 * @ops: operation to call during the walk
436 * @private: private data for callbacks' usage
438 * Recursively walk the page table tree of the process represented by @mm
439 * within the virtual address range [@start, @end). During walking, we can do
440 * some caller-specific works for each entry, by setting up pmd_entry(),
441 * pte_entry(), and/or hugetlb_entry(). If you don't set up for some of these
442 * callbacks, the associated entries/pages are just ignored.
443 * The return values of these callbacks are commonly defined like below:
445 * - 0 : succeeded to handle the current entry, and if you don't reach the
446 * end address yet, continue to walk.
447 * - >0 : succeeded to handle the current entry, and return to the caller
448 * with caller specific value.
449 * - <0 : failed to handle the current entry, and return to the caller
452 * Before starting to walk page table, some callers want to check whether
453 * they really want to walk over the current vma, typically by checking
454 * its vm_flags. walk_page_test() and @ops->test_walk() are used for this
457 * If operations need to be staged before and committed after a vma is walked,
458 * there are two callbacks, pre_vma() and post_vma(). Note that post_vma(),
459 * since it is intended to handle commit-type operations, can't return any
462 * struct mm_walk keeps current values of some common data like vma and pmd,
463 * which are useful for the access from callbacks. If you want to pass some
464 * caller-specific data to callbacks, @private should be helpful.
467 * Callers of walk_page_range() and walk_page_vma() should hold @mm->mmap_lock,
468 * because these function traverse vma list and/or access to vma's data.
470 int walk_page_range(struct mm_struct
*mm
, unsigned long start
,
471 unsigned long end
, const struct mm_walk_ops
*ops
,
476 struct vm_area_struct
*vma
;
477 struct mm_walk walk
= {
489 process_mm_walk_lock(walk
.mm
, ops
->walk_lock
);
491 vma
= find_vma(walk
.mm
, start
);
493 if (!vma
) { /* after the last vma */
497 err
= ops
->pte_hole(start
, next
, -1, &walk
);
498 } else if (start
< vma
->vm_start
) { /* outside vma */
500 next
= min(end
, vma
->vm_start
);
502 err
= ops
->pte_hole(start
, next
, -1, &walk
);
503 } else { /* inside vma */
504 process_vma_walk_lock(vma
, ops
->walk_lock
);
506 next
= min(end
, vma
->vm_end
);
507 vma
= find_vma(mm
, vma
->vm_end
);
509 err
= walk_page_test(start
, next
, &walk
);
512 * positive return values are purely for
513 * controlling the pagewalk, so should never
514 * be passed to the callers.
521 err
= __walk_page_range(start
, next
, &walk
);
525 } while (start
= next
, start
< end
);
530 * walk_page_range_novma - walk a range of pagetables not backed by a vma
531 * @mm: mm_struct representing the target process of page table walk
532 * @start: start address of the virtual address range
533 * @end: end address of the virtual address range
534 * @ops: operation to call during the walk
535 * @pgd: pgd to walk if different from mm->pgd
536 * @private: private data for callbacks' usage
538 * Similar to walk_page_range() but can walk any page tables even if they are
539 * not backed by VMAs. Because 'unusual' entries may be walked this function
540 * will also not lock the PTEs for the pte_entry() callback. This is useful for
541 * walking the kernel pages tables or page tables for firmware.
543 int walk_page_range_novma(struct mm_struct
*mm
, unsigned long start
,
544 unsigned long end
, const struct mm_walk_ops
*ops
,
548 struct mm_walk walk
= {
556 if (start
>= end
|| !walk
.mm
)
559 mmap_assert_write_locked(walk
.mm
);
561 return walk_pgd_range(start
, end
, &walk
);
564 int walk_page_range_vma(struct vm_area_struct
*vma
, unsigned long start
,
565 unsigned long end
, const struct mm_walk_ops
*ops
,
568 struct mm_walk walk
= {
575 if (start
>= end
|| !walk
.mm
)
577 if (start
< vma
->vm_start
|| end
> vma
->vm_end
)
580 process_mm_walk_lock(walk
.mm
, ops
->walk_lock
);
581 process_vma_walk_lock(vma
, ops
->walk_lock
);
582 return __walk_page_range(start
, end
, &walk
);
585 int walk_page_vma(struct vm_area_struct
*vma
, const struct mm_walk_ops
*ops
,
588 struct mm_walk walk
= {
598 process_mm_walk_lock(walk
.mm
, ops
->walk_lock
);
599 process_vma_walk_lock(vma
, ops
->walk_lock
);
600 return __walk_page_range(vma
->vm_start
, vma
->vm_end
, &walk
);
604 * walk_page_mapping - walk all memory areas mapped into a struct address_space.
605 * @mapping: Pointer to the struct address_space
606 * @first_index: First page offset in the address_space
607 * @nr: Number of incremental page offsets to cover
608 * @ops: operation to call during the walk
609 * @private: private data for callbacks' usage
611 * This function walks all memory areas mapped into a struct address_space.
612 * The walk is limited to only the given page-size index range, but if
613 * the index boundaries cross a huge page-table entry, that entry will be
616 * Also see walk_page_range() for additional information.
619 * This function can't require that the struct mm_struct::mmap_lock is held,
620 * since @mapping may be mapped by multiple processes. Instead
621 * @mapping->i_mmap_rwsem must be held. This might have implications in the
622 * callbacks, and it's up tho the caller to ensure that the
623 * struct mm_struct::mmap_lock is not needed.
625 * Also this means that a caller can't rely on the struct
626 * vm_area_struct::vm_flags to be constant across a call,
627 * except for immutable flags. Callers requiring this shouldn't use
630 * Return: 0 on success, negative error code on failure, positive number on
631 * caller defined premature termination.
633 int walk_page_mapping(struct address_space
*mapping
, pgoff_t first_index
,
634 pgoff_t nr
, const struct mm_walk_ops
*ops
,
637 struct mm_walk walk
= {
641 struct vm_area_struct
*vma
;
642 pgoff_t vba
, vea
, cba
, cea
;
643 unsigned long start_addr
, end_addr
;
646 lockdep_assert_held(&mapping
->i_mmap_rwsem
);
647 vma_interval_tree_foreach(vma
, &mapping
->i_mmap
, first_index
,
648 first_index
+ nr
- 1) {
649 /* Clip to the vma */
651 vea
= vba
+ vma_pages(vma
);
654 cea
= first_index
+ nr
;
657 start_addr
= ((cba
- vba
) << PAGE_SHIFT
) + vma
->vm_start
;
658 end_addr
= ((cea
- vba
) << PAGE_SHIFT
) + vma
->vm_start
;
659 if (start_addr
>= end_addr
)
663 walk
.mm
= vma
->vm_mm
;
665 err
= walk_page_test(vma
->vm_start
, vma
->vm_end
, &walk
);
672 err
= __walk_page_range(start_addr
, end_addr
, &walk
);