]> git.ipfire.org Git - people/arne_f/kernel.git/blob - mm/page_vma_mapped.c
net/mlx5: Loop over temp list to release delay events
[people/arne_f/kernel.git] / mm / page_vma_mapped.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/mm.h>
3 #include <linux/rmap.h>
4 #include <linux/hugetlb.h>
5 #include <linux/swap.h>
6 #include <linux/swapops.h>
7
8 #include "internal.h"
9
10 static inline bool not_found(struct page_vma_mapped_walk *pvmw)
11 {
12 page_vma_mapped_walk_done(pvmw);
13 return false;
14 }
15
16 static bool map_pte(struct page_vma_mapped_walk *pvmw)
17 {
18 pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address);
19 if (!(pvmw->flags & PVMW_SYNC)) {
20 if (pvmw->flags & PVMW_MIGRATION) {
21 if (!is_swap_pte(*pvmw->pte))
22 return false;
23 } else {
24 if (!pte_present(*pvmw->pte))
25 return false;
26 }
27 }
28 pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd);
29 spin_lock(pvmw->ptl);
30 return true;
31 }
32
33 static bool check_pte(struct page_vma_mapped_walk *pvmw)
34 {
35 if (pvmw->flags & PVMW_MIGRATION) {
36 #ifdef CONFIG_MIGRATION
37 swp_entry_t entry;
38 if (!is_swap_pte(*pvmw->pte))
39 return false;
40 entry = pte_to_swp_entry(*pvmw->pte);
41
42 if (!is_migration_entry(entry))
43 return false;
44 if (migration_entry_to_page(entry) - pvmw->page >=
45 hpage_nr_pages(pvmw->page)) {
46 return false;
47 }
48 if (migration_entry_to_page(entry) < pvmw->page)
49 return false;
50 #else
51 WARN_ON_ONCE(1);
52 #endif
53 } else {
54 if (is_swap_pte(*pvmw->pte)) {
55 swp_entry_t entry;
56
57 entry = pte_to_swp_entry(*pvmw->pte);
58 if (is_device_private_entry(entry) &&
59 device_private_entry_to_page(entry) == pvmw->page)
60 return true;
61 }
62
63 if (!pte_present(*pvmw->pte))
64 return false;
65
66 /* THP can be referenced by any subpage */
67 if (pte_page(*pvmw->pte) - pvmw->page >=
68 hpage_nr_pages(pvmw->page)) {
69 return false;
70 }
71 if (pte_page(*pvmw->pte) < pvmw->page)
72 return false;
73 }
74
75 return true;
76 }
77
78 /**
79 * page_vma_mapped_walk - check if @pvmw->page is mapped in @pvmw->vma at
80 * @pvmw->address
81 * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags
82 * must be set. pmd, pte and ptl must be NULL.
83 *
84 * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point
85 * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is
86 * adjusted if needed (for PTE-mapped THPs).
87 *
88 * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page
89 * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in
90 * a loop to find all PTEs that map the THP.
91 *
92 * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry
93 * regardless of which page table level the page is mapped at. @pvmw->pmd is
94 * NULL.
95 *
96 * Retruns false if there are no more page table entries for the page in
97 * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped.
98 *
99 * If you need to stop the walk before page_vma_mapped_walk() returned false,
100 * use page_vma_mapped_walk_done(). It will do the housekeeping.
101 */
102 bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
103 {
104 struct mm_struct *mm = pvmw->vma->vm_mm;
105 struct page *page = pvmw->page;
106 pgd_t *pgd;
107 p4d_t *p4d;
108 pud_t *pud;
109 pmd_t pmde;
110
111 /* The only possible pmd mapping has been handled on last iteration */
112 if (pvmw->pmd && !pvmw->pte)
113 return not_found(pvmw);
114
115 if (pvmw->pte)
116 goto next_pte;
117
118 if (unlikely(PageHuge(pvmw->page))) {
119 /* when pud is not present, pte will be NULL */
120 pvmw->pte = huge_pte_offset(mm, pvmw->address,
121 PAGE_SIZE << compound_order(page));
122 if (!pvmw->pte)
123 return false;
124
125 pvmw->ptl = huge_pte_lockptr(page_hstate(page), mm, pvmw->pte);
126 spin_lock(pvmw->ptl);
127 if (!check_pte(pvmw))
128 return not_found(pvmw);
129 return true;
130 }
131 restart:
132 pgd = pgd_offset(mm, pvmw->address);
133 if (!pgd_present(*pgd))
134 return false;
135 p4d = p4d_offset(pgd, pvmw->address);
136 if (!p4d_present(*p4d))
137 return false;
138 pud = pud_offset(p4d, pvmw->address);
139 if (!pud_present(*pud))
140 return false;
141 pvmw->pmd = pmd_offset(pud, pvmw->address);
142 /*
143 * Make sure the pmd value isn't cached in a register by the
144 * compiler and used as a stale value after we've observed a
145 * subsequent update.
146 */
147 pmde = READ_ONCE(*pvmw->pmd);
148 if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
149 pvmw->ptl = pmd_lock(mm, pvmw->pmd);
150 if (likely(pmd_trans_huge(*pvmw->pmd))) {
151 if (pvmw->flags & PVMW_MIGRATION)
152 return not_found(pvmw);
153 if (pmd_page(*pvmw->pmd) != page)
154 return not_found(pvmw);
155 return true;
156 } else if (!pmd_present(*pvmw->pmd)) {
157 if (thp_migration_supported()) {
158 if (!(pvmw->flags & PVMW_MIGRATION))
159 return not_found(pvmw);
160 if (is_migration_entry(pmd_to_swp_entry(*pvmw->pmd))) {
161 swp_entry_t entry = pmd_to_swp_entry(*pvmw->pmd);
162
163 if (migration_entry_to_page(entry) != page)
164 return not_found(pvmw);
165 return true;
166 }
167 }
168 return not_found(pvmw);
169 } else {
170 /* THP pmd was split under us: handle on pte level */
171 spin_unlock(pvmw->ptl);
172 pvmw->ptl = NULL;
173 }
174 } else if (!pmd_present(pmde)) {
175 return false;
176 }
177 if (!map_pte(pvmw))
178 goto next_pte;
179 while (1) {
180 if (check_pte(pvmw))
181 return true;
182 next_pte:
183 /* Seek to next pte only makes sense for THP */
184 if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page))
185 return not_found(pvmw);
186 do {
187 pvmw->address += PAGE_SIZE;
188 if (pvmw->address >= pvmw->vma->vm_end ||
189 pvmw->address >=
190 __vma_address(pvmw->page, pvmw->vma) +
191 hpage_nr_pages(pvmw->page) * PAGE_SIZE)
192 return not_found(pvmw);
193 /* Did we cross page table boundary? */
194 if (pvmw->address % PMD_SIZE == 0) {
195 pte_unmap(pvmw->pte);
196 if (pvmw->ptl) {
197 spin_unlock(pvmw->ptl);
198 pvmw->ptl = NULL;
199 }
200 goto restart;
201 } else {
202 pvmw->pte++;
203 }
204 } while (pte_none(*pvmw->pte));
205
206 if (!pvmw->ptl) {
207 pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
208 spin_lock(pvmw->ptl);
209 }
210 }
211 }
212
213 /**
214 * page_mapped_in_vma - check whether a page is really mapped in a VMA
215 * @page: the page to test
216 * @vma: the VMA to test
217 *
218 * Returns 1 if the page is mapped into the page tables of the VMA, 0
219 * if the page is not mapped into the page tables of this VMA. Only
220 * valid for normal file or anonymous VMAs.
221 */
222 int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
223 {
224 struct page_vma_mapped_walk pvmw = {
225 .page = page,
226 .vma = vma,
227 .flags = PVMW_SYNC,
228 };
229 unsigned long start, end;
230
231 start = __vma_address(page, vma);
232 end = start + PAGE_SIZE * (hpage_nr_pages(page) - 1);
233
234 if (unlikely(end < vma->vm_start || start >= vma->vm_end))
235 return 0;
236 pvmw.address = max(start, vma->vm_start);
237 if (!page_vma_mapped_walk(&pvmw))
238 return 0;
239 page_vma_mapped_walk_done(&pvmw);
240 return 1;
241 }