]> git.ipfire.org Git - people/ms/linux.git/blame - mm/page_vma_mapped.c
Merge tag 'tegra-for-5.20-arm-dt' of git://git.kernel.org/pub/scm/linux/kernel/git...
[people/ms/linux.git] / mm / page_vma_mapped.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
ace71a19
KS
2#include <linux/mm.h>
3#include <linux/rmap.h>
4#include <linux/hugetlb.h>
5#include <linux/swap.h>
6#include <linux/swapops.h>
7
8#include "internal.h"
9
ace71a19
KS
10static inline bool not_found(struct page_vma_mapped_walk *pvmw)
11{
12 page_vma_mapped_walk_done(pvmw);
13 return false;
14}
15
16static bool map_pte(struct page_vma_mapped_walk *pvmw)
17{
18 pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address);
19 if (!(pvmw->flags & PVMW_SYNC)) {
20 if (pvmw->flags & PVMW_MIGRATION) {
21 if (!is_swap_pte(*pvmw->pte))
22 return false;
23 } else {
aab8d052
RC
24 /*
25 * We get here when we are trying to unmap a private
26 * device page from the process address space. Such
27 * page is not CPU accessible and thus is mapped as
28 * a special swap entry, nonetheless it still does
29 * count as a valid regular mapping for the page (and
30 * is accounted as such in page maps count).
31 *
32 * So handle this special case as if it was a normal
33 * page mapping ie lock CPU page table and returns
34 * true.
35 *
36 * For more details on device private memory see HMM
37 * (include/linux/hmm.h or mm/hmm.c).
38 */
39 if (is_swap_pte(*pvmw->pte)) {
40 swp_entry_t entry;
41
42 /* Handle un-addressable ZONE_DEVICE memory */
43 entry = pte_to_swp_entry(*pvmw->pte);
b756a3b5
AP
44 if (!is_device_private_entry(entry) &&
45 !is_device_exclusive_entry(entry))
aab8d052
RC
46 return false;
47 } else if (!pte_present(*pvmw->pte))
ace71a19
KS
48 return false;
49 }
50 }
51 pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd);
52 spin_lock(pvmw->ptl);
53 return true;
54}
55
0d665e7b
KS
56/**
57 * check_pte - check if @pvmw->page is mapped at the @pvmw->pte
777f303c 58 * @pvmw: page_vma_mapped_walk struct, includes a pair pte and page for checking
0d665e7b
KS
59 *
60 * page_vma_mapped_walk() found a place where @pvmw->page is *potentially*
61 * mapped. check_pte() has to validate this.
62 *
777f303c
AS
63 * pvmw->pte may point to empty PTE, swap PTE or PTE pointing to
64 * arbitrary page.
0d665e7b
KS
65 *
66 * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration
67 * entry that points to @pvmw->page or any subpage in case of THP.
68 *
777f303c
AS
69 * If PVMW_MIGRATION flag is not set, returns true if pvmw->pte points to
70 * pvmw->page or any subpage in case of THP.
0d665e7b
KS
71 *
72 * Otherwise, return false.
73 *
74 */
ace71a19
KS
75static bool check_pte(struct page_vma_mapped_walk *pvmw)
76{
0d665e7b
KS
77 unsigned long pfn;
78
ace71a19 79 if (pvmw->flags & PVMW_MIGRATION) {
ace71a19
KS
80 swp_entry_t entry;
81 if (!is_swap_pte(*pvmw->pte))
82 return false;
83 entry = pte_to_swp_entry(*pvmw->pte);
a5430dda 84
b756a3b5
AP
85 if (!is_migration_entry(entry) &&
86 !is_device_exclusive_entry(entry))
ace71a19 87 return false;
a5430dda 88
af5cdaf8 89 pfn = swp_offset(entry);
0d665e7b
KS
90 } else if (is_swap_pte(*pvmw->pte)) {
91 swp_entry_t entry;
a5430dda 92
0d665e7b
KS
93 /* Handle un-addressable ZONE_DEVICE memory */
94 entry = pte_to_swp_entry(*pvmw->pte);
b756a3b5
AP
95 if (!is_device_private_entry(entry) &&
96 !is_device_exclusive_entry(entry))
ace71a19
KS
97 return false;
98
af5cdaf8 99 pfn = swp_offset(entry);
0d665e7b
KS
100 } else {
101 if (!pte_present(*pvmw->pte))
ace71a19 102 return false;
0d665e7b
KS
103
104 pfn = pte_pfn(*pvmw->pte);
ace71a19
KS
105 }
106
2aff7a47
MWO
107 return (pfn - pvmw->pfn) < pvmw->nr_pages;
108}
109
110/* Returns true if the two ranges overlap. Careful to not overflow. */
111static bool check_pmd(unsigned long pfn, struct page_vma_mapped_walk *pvmw)
112{
113 if ((pfn + HPAGE_PMD_NR - 1) < pvmw->pfn)
114 return false;
115 if (pfn > pvmw->pfn + pvmw->nr_pages - 1)
116 return false;
117 return true;
ace71a19
KS
118}
119
a9a7504d
HD
120static void step_forward(struct page_vma_mapped_walk *pvmw, unsigned long size)
121{
122 pvmw->address = (pvmw->address + size) & ~(size - 1);
123 if (!pvmw->address)
124 pvmw->address = ULONG_MAX;
125}
126
ace71a19 127/**
2aff7a47 128 * page_vma_mapped_walk - check if @pvmw->pfn is mapped in @pvmw->vma at
ace71a19
KS
129 * @pvmw->address
130 * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags
131 * must be set. pmd, pte and ptl must be NULL.
132 *
133 * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point
134 * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is
135 * adjusted if needed (for PTE-mapped THPs).
136 *
137 * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page
138 * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in
139 * a loop to find all PTEs that map the THP.
140 *
141 * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry
142 * regardless of which page table level the page is mapped at. @pvmw->pmd is
143 * NULL.
144 *
baf2f90b 145 * Returns false if there are no more page table entries for the page in
ace71a19
KS
146 * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped.
147 *
148 * If you need to stop the walk before page_vma_mapped_walk() returned false,
149 * use page_vma_mapped_walk_done(). It will do the housekeeping.
150 */
151bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
152{
2aff7a47
MWO
153 struct vm_area_struct *vma = pvmw->vma;
154 struct mm_struct *mm = vma->vm_mm;
47446630 155 unsigned long end;
ace71a19 156 pgd_t *pgd;
c2febafc 157 p4d_t *p4d;
ace71a19 158 pud_t *pud;
a7b10095 159 pmd_t pmde;
ace71a19
KS
160
161 /* The only possible pmd mapping has been handled on last iteration */
162 if (pvmw->pmd && !pvmw->pte)
163 return not_found(pvmw);
164
2aff7a47 165 if (unlikely(is_vm_hugetlb_page(vma))) {
98ea0259 166 struct hstate *hstate = hstate_vma(vma);
167 unsigned long size = huge_page_size(hstate);
6d0fd598
HD
168 /* The only possible mapping was handled on last iteration */
169 if (pvmw->pte)
170 return not_found(pvmw);
171
ace71a19 172 /* when pud is not present, pte will be NULL */
2aff7a47 173 pvmw->pte = huge_pte_offset(mm, pvmw->address, size);
ace71a19
KS
174 if (!pvmw->pte)
175 return false;
176
98ea0259 177 pvmw->ptl = huge_pte_lockptr(hstate, mm, pvmw->pte);
ace71a19
KS
178 spin_lock(pvmw->ptl);
179 if (!check_pte(pvmw))
180 return not_found(pvmw);
181 return true;
182 }
6d0fd598 183
2aff7a47 184 end = vma_address_end(pvmw);
6d0fd598
HD
185 if (pvmw->pte)
186 goto next_pte;
ace71a19 187restart:
a9a7504d 188 do {
b3807a91 189 pgd = pgd_offset(mm, pvmw->address);
a9a7504d
HD
190 if (!pgd_present(*pgd)) {
191 step_forward(pvmw, PGDIR_SIZE);
192 continue;
193 }
b3807a91 194 p4d = p4d_offset(pgd, pvmw->address);
a9a7504d
HD
195 if (!p4d_present(*p4d)) {
196 step_forward(pvmw, P4D_SIZE);
197 continue;
198 }
b3807a91 199 pud = pud_offset(p4d, pvmw->address);
a9a7504d
HD
200 if (!pud_present(*pud)) {
201 step_forward(pvmw, PUD_SIZE);
202 continue;
203 }
e2e1d407 204
b3807a91 205 pvmw->pmd = pmd_offset(pud, pvmw->address);
732ed558 206 /*
b3807a91
HD
207 * Make sure the pmd value isn't cached in a register by the
208 * compiler and used as a stale value after we've observed a
209 * subsequent update.
732ed558 210 */
b3807a91
HD
211 pmde = READ_ONCE(*pvmw->pmd);
212
6472f6d2
MS
213 if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde) ||
214 (pmd_present(pmde) && pmd_devmap(pmde))) {
b3807a91
HD
215 pvmw->ptl = pmd_lock(mm, pvmw->pmd);
216 pmde = *pvmw->pmd;
b3807a91
HD
217 if (!pmd_present(pmde)) {
218 swp_entry_t entry;
219
220 if (!thp_migration_supported() ||
221 !(pvmw->flags & PVMW_MIGRATION))
222 return not_found(pvmw);
223 entry = pmd_to_swp_entry(pmde);
224 if (!is_migration_entry(entry) ||
2aff7a47 225 !check_pmd(swp_offset(entry), pvmw))
b3807a91
HD
226 return not_found(pvmw);
227 return true;
228 }
6472f6d2
MS
229 if (likely(pmd_trans_huge(pmde) || pmd_devmap(pmde))) {
230 if (pvmw->flags & PVMW_MIGRATION)
231 return not_found(pvmw);
232 if (!check_pmd(pmd_pfn(pmde), pvmw))
233 return not_found(pvmw);
234 return true;
235 }
b3807a91
HD
236 /* THP pmd was split under us: handle on pte level */
237 spin_unlock(pvmw->ptl);
238 pvmw->ptl = NULL;
239 } else if (!pmd_present(pmde)) {
240 /*
241 * If PVMW_SYNC, take and drop THP pmd lock so that we
242 * cannot return prematurely, while zap_huge_pmd() has
243 * cleared *pmd but not decremented compound_mapcount().
244 */
245 if ((pvmw->flags & PVMW_SYNC) &&
2aff7a47
MWO
246 transparent_hugepage_active(vma) &&
247 (pvmw->nr_pages >= HPAGE_PMD_NR)) {
b3807a91 248 spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);
732ed558 249
b3807a91
HD
250 spin_unlock(ptl);
251 }
a9a7504d
HD
252 step_forward(pvmw, PMD_SIZE);
253 continue;
732ed558 254 }
b3807a91
HD
255 if (!map_pte(pvmw))
256 goto next_pte;
47446630 257this_pte:
ace71a19
KS
258 if (check_pte(pvmw))
259 return true;
d75450ff 260next_pte:
d75450ff 261 do {
ace71a19 262 pvmw->address += PAGE_SIZE;
494334e4 263 if (pvmw->address >= end)
ace71a19
KS
264 return not_found(pvmw);
265 /* Did we cross page table boundary? */
44828248 266 if ((pvmw->address & (PMD_SIZE - PAGE_SIZE)) == 0) {
ace71a19
KS
267 if (pvmw->ptl) {
268 spin_unlock(pvmw->ptl);
269 pvmw->ptl = NULL;
270 }
44828248
HD
271 pte_unmap(pvmw->pte);
272 pvmw->pte = NULL;
ace71a19 273 goto restart;
ace71a19 274 }
44828248 275 pvmw->pte++;
a7a69d8b
HD
276 if ((pvmw->flags & PVMW_SYNC) && !pvmw->ptl) {
277 pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
278 spin_lock(pvmw->ptl);
279 }
ace71a19
KS
280 } while (pte_none(*pvmw->pte));
281
282 if (!pvmw->ptl) {
283 pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
284 spin_lock(pvmw->ptl);
285 }
47446630 286 goto this_pte;
a9a7504d
HD
287 } while (pvmw->address < end);
288
289 return false;
ace71a19 290}
6a328a62
KS
291
292/**
293 * page_mapped_in_vma - check whether a page is really mapped in a VMA
294 * @page: the page to test
295 * @vma: the VMA to test
296 *
297 * Returns 1 if the page is mapped into the page tables of the VMA, 0
298 * if the page is not mapped into the page tables of this VMA. Only
299 * valid for normal file or anonymous VMAs.
300 */
301int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
302{
303 struct page_vma_mapped_walk pvmw = {
2aff7a47
MWO
304 .pfn = page_to_pfn(page),
305 .nr_pages = 1,
6a328a62
KS
306 .vma = vma,
307 .flags = PVMW_SYNC,
308 };
6a328a62 309
494334e4
HD
310 pvmw.address = vma_address(page, vma);
311 if (pvmw.address == -EFAULT)
6a328a62 312 return 0;
6a328a62
KS
313 if (!page_vma_mapped_walk(&pvmw))
314 return 0;
315 page_vma_mapped_walk_done(&pvmw);
316 return 1;
317}