]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
ace71a19 KS |
2 | #include <linux/mm.h> |
3 | #include <linux/rmap.h> | |
4 | #include <linux/hugetlb.h> | |
5 | #include <linux/swap.h> | |
6 | #include <linux/swapops.h> | |
7 | ||
8 | #include "internal.h" | |
9 | ||
ace71a19 KS |
10 | static inline bool not_found(struct page_vma_mapped_walk *pvmw) |
11 | { | |
12 | page_vma_mapped_walk_done(pvmw); | |
13 | return false; | |
14 | } | |
15 | ||
16 | static bool map_pte(struct page_vma_mapped_walk *pvmw) | |
17 | { | |
18 | pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address); | |
19 | if (!(pvmw->flags & PVMW_SYNC)) { | |
20 | if (pvmw->flags & PVMW_MIGRATION) { | |
21 | if (!is_swap_pte(*pvmw->pte)) | |
22 | return false; | |
23 | } else { | |
24 | if (!pte_present(*pvmw->pte)) | |
25 | return false; | |
26 | } | |
27 | } | |
28 | pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd); | |
29 | spin_lock(pvmw->ptl); | |
30 | return true; | |
31 | } | |
32 | ||
3abb4c11 KS |
33 | /** |
34 | * check_pte - check if @pvmw->page is mapped at the @pvmw->pte | |
35 | * | |
36 | * page_vma_mapped_walk() found a place where @pvmw->page is *potentially* | |
37 | * mapped. check_pte() has to validate this. | |
38 | * | |
39 | * @pvmw->pte may point to empty PTE, swap PTE or PTE pointing to arbitrary | |
40 | * page. | |
41 | * | |
42 | * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration | |
43 | * entry that points to @pvmw->page or any subpage in case of THP. | |
44 | * | |
45 | * If PVMW_MIGRATION flag is not set, returns true if @pvmw->pte points to | |
46 | * @pvmw->page or any subpage in case of THP. | |
47 | * | |
48 | * Otherwise, return false. | |
49 | * | |
50 | */ | |
ace71a19 KS |
51 | static bool check_pte(struct page_vma_mapped_walk *pvmw) |
52 | { | |
3abb4c11 KS |
53 | unsigned long pfn; |
54 | ||
ace71a19 | 55 | if (pvmw->flags & PVMW_MIGRATION) { |
ace71a19 KS |
56 | swp_entry_t entry; |
57 | if (!is_swap_pte(*pvmw->pte)) | |
58 | return false; | |
59 | entry = pte_to_swp_entry(*pvmw->pte); | |
a5430dda | 60 | |
ace71a19 KS |
61 | if (!is_migration_entry(entry)) |
62 | return false; | |
a5430dda | 63 | |
3abb4c11 KS |
64 | pfn = migration_entry_to_pfn(entry); |
65 | } else if (is_swap_pte(*pvmw->pte)) { | |
66 | swp_entry_t entry; | |
a5430dda | 67 | |
3abb4c11 KS |
68 | /* Handle un-addressable ZONE_DEVICE memory */ |
69 | entry = pte_to_swp_entry(*pvmw->pte); | |
70 | if (!is_device_private_entry(entry)) | |
ace71a19 KS |
71 | return false; |
72 | ||
3abb4c11 KS |
73 | pfn = device_private_entry_to_pfn(entry); |
74 | } else { | |
75 | if (!pte_present(*pvmw->pte)) | |
ace71a19 | 76 | return false; |
3abb4c11 KS |
77 | |
78 | pfn = pte_pfn(*pvmw->pte); | |
ace71a19 KS |
79 | } |
80 | ||
3abb4c11 KS |
81 | if (pfn < page_to_pfn(pvmw->page)) |
82 | return false; | |
83 | ||
84 | /* THP can be referenced by any subpage */ | |
85 | if (pfn - page_to_pfn(pvmw->page) >= hpage_nr_pages(pvmw->page)) | |
86 | return false; | |
87 | ||
ace71a19 KS |
88 | return true; |
89 | } | |
90 | ||
91 | /** | |
92 | * page_vma_mapped_walk - check if @pvmw->page is mapped in @pvmw->vma at | |
93 | * @pvmw->address | |
94 | * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags | |
95 | * must be set. pmd, pte and ptl must be NULL. | |
96 | * | |
97 | * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point | |
98 | * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is | |
99 | * adjusted if needed (for PTE-mapped THPs). | |
100 | * | |
101 | * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page | |
102 | * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in | |
103 | * a loop to find all PTEs that map the THP. | |
104 | * | |
105 | * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry | |
106 | * regardless of which page table level the page is mapped at. @pvmw->pmd is | |
107 | * NULL. | |
108 | * | |
109 | * Retruns false if there are no more page table entries for the page in | |
110 | * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped. | |
111 | * | |
112 | * If you need to stop the walk before page_vma_mapped_walk() returned false, | |
113 | * use page_vma_mapped_walk_done(). It will do the housekeeping. | |
114 | */ | |
115 | bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw) | |
116 | { | |
117 | struct mm_struct *mm = pvmw->vma->vm_mm; | |
118 | struct page *page = pvmw->page; | |
119 | pgd_t *pgd; | |
c2febafc | 120 | p4d_t *p4d; |
ace71a19 | 121 | pud_t *pud; |
a7b10095 | 122 | pmd_t pmde; |
ace71a19 KS |
123 | |
124 | /* The only possible pmd mapping has been handled on last iteration */ | |
125 | if (pvmw->pmd && !pvmw->pte) | |
126 | return not_found(pvmw); | |
127 | ||
d75450ff | 128 | if (pvmw->pte) |
ace71a19 | 129 | goto next_pte; |
ace71a19 KS |
130 | |
131 | if (unlikely(PageHuge(pvmw->page))) { | |
132 | /* when pud is not present, pte will be NULL */ | |
7868a208 PA |
133 | pvmw->pte = huge_pte_offset(mm, pvmw->address, |
134 | PAGE_SIZE << compound_order(page)); | |
ace71a19 KS |
135 | if (!pvmw->pte) |
136 | return false; | |
137 | ||
138 | pvmw->ptl = huge_pte_lockptr(page_hstate(page), mm, pvmw->pte); | |
139 | spin_lock(pvmw->ptl); | |
140 | if (!check_pte(pvmw)) | |
141 | return not_found(pvmw); | |
142 | return true; | |
143 | } | |
144 | restart: | |
145 | pgd = pgd_offset(mm, pvmw->address); | |
146 | if (!pgd_present(*pgd)) | |
147 | return false; | |
c2febafc KS |
148 | p4d = p4d_offset(pgd, pvmw->address); |
149 | if (!p4d_present(*p4d)) | |
150 | return false; | |
151 | pud = pud_offset(p4d, pvmw->address); | |
ace71a19 KS |
152 | if (!pud_present(*pud)) |
153 | return false; | |
154 | pvmw->pmd = pmd_offset(pud, pvmw->address); | |
a7b10095 WD |
155 | /* |
156 | * Make sure the pmd value isn't cached in a register by the | |
157 | * compiler and used as a stale value after we've observed a | |
158 | * subsequent update. | |
159 | */ | |
160 | pmde = READ_ONCE(*pvmw->pmd); | |
161 | if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) { | |
ace71a19 | 162 | pvmw->ptl = pmd_lock(mm, pvmw->pmd); |
ace71a19 KS |
163 | if (likely(pmd_trans_huge(*pvmw->pmd))) { |
164 | if (pvmw->flags & PVMW_MIGRATION) | |
165 | return not_found(pvmw); | |
166 | if (pmd_page(*pvmw->pmd) != page) | |
167 | return not_found(pvmw); | |
168 | return true; | |
616b8371 ZY |
169 | } else if (!pmd_present(*pvmw->pmd)) { |
170 | if (thp_migration_supported()) { | |
171 | if (!(pvmw->flags & PVMW_MIGRATION)) | |
172 | return not_found(pvmw); | |
173 | if (is_migration_entry(pmd_to_swp_entry(*pvmw->pmd))) { | |
174 | swp_entry_t entry = pmd_to_swp_entry(*pvmw->pmd); | |
175 | ||
176 | if (migration_entry_to_page(entry) != page) | |
177 | return not_found(pvmw); | |
178 | return true; | |
179 | } | |
af0db981 | 180 | } |
616b8371 | 181 | return not_found(pvmw); |
ace71a19 KS |
182 | } else { |
183 | /* THP pmd was split under us: handle on pte level */ | |
184 | spin_unlock(pvmw->ptl); | |
185 | pvmw->ptl = NULL; | |
186 | } | |
a7b10095 WD |
187 | } else if (!pmd_present(pmde)) { |
188 | return false; | |
ace71a19 KS |
189 | } |
190 | if (!map_pte(pvmw)) | |
191 | goto next_pte; | |
192 | while (1) { | |
193 | if (check_pte(pvmw)) | |
194 | return true; | |
d75450ff HD |
195 | next_pte: |
196 | /* Seek to next pte only makes sense for THP */ | |
197 | if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page)) | |
198 | return not_found(pvmw); | |
199 | do { | |
ace71a19 | 200 | pvmw->address += PAGE_SIZE; |
d75450ff HD |
201 | if (pvmw->address >= pvmw->vma->vm_end || |
202 | pvmw->address >= | |
ace71a19 KS |
203 | __vma_address(pvmw->page, pvmw->vma) + |
204 | hpage_nr_pages(pvmw->page) * PAGE_SIZE) | |
205 | return not_found(pvmw); | |
206 | /* Did we cross page table boundary? */ | |
207 | if (pvmw->address % PMD_SIZE == 0) { | |
208 | pte_unmap(pvmw->pte); | |
209 | if (pvmw->ptl) { | |
210 | spin_unlock(pvmw->ptl); | |
211 | pvmw->ptl = NULL; | |
212 | } | |
213 | goto restart; | |
214 | } else { | |
215 | pvmw->pte++; | |
216 | } | |
217 | } while (pte_none(*pvmw->pte)); | |
218 | ||
219 | if (!pvmw->ptl) { | |
220 | pvmw->ptl = pte_lockptr(mm, pvmw->pmd); | |
221 | spin_lock(pvmw->ptl); | |
222 | } | |
223 | } | |
224 | } | |
6a328a62 KS |
225 | |
226 | /** | |
227 | * page_mapped_in_vma - check whether a page is really mapped in a VMA | |
228 | * @page: the page to test | |
229 | * @vma: the VMA to test | |
230 | * | |
231 | * Returns 1 if the page is mapped into the page tables of the VMA, 0 | |
232 | * if the page is not mapped into the page tables of this VMA. Only | |
233 | * valid for normal file or anonymous VMAs. | |
234 | */ | |
235 | int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma) | |
236 | { | |
237 | struct page_vma_mapped_walk pvmw = { | |
238 | .page = page, | |
239 | .vma = vma, | |
240 | .flags = PVMW_SYNC, | |
241 | }; | |
242 | unsigned long start, end; | |
243 | ||
244 | start = __vma_address(page, vma); | |
245 | end = start + PAGE_SIZE * (hpage_nr_pages(page) - 1); | |
246 | ||
247 | if (unlikely(end < vma->vm_start || start >= vma->vm_end)) | |
248 | return 0; | |
249 | pvmw.address = max(start, vma->vm_start); | |
250 | if (!page_vma_mapped_walk(&pvmw)) | |
251 | return 0; | |
252 | page_vma_mapped_walk_done(&pvmw); | |
253 | return 1; | |
254 | } |