]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - arch/riscv/mm/hugetlbpage.c
Merge tag 'kvm-x86-docs-6.7' of https://github.com/kvm-x86/linux into HEAD
[thirdparty/kernel/stable.git] / arch / riscv / mm / hugetlbpage.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/hugetlb.h>
3 #include <linux/err.h>
4
5 #ifdef CONFIG_RISCV_ISA_SVNAPOT
6 pte_t huge_ptep_get(pte_t *ptep)
7 {
8 unsigned long pte_num;
9 int i;
10 pte_t orig_pte = ptep_get(ptep);
11
12 if (!pte_present(orig_pte) || !pte_napot(orig_pte))
13 return orig_pte;
14
15 pte_num = napot_pte_num(napot_cont_order(orig_pte));
16
17 for (i = 0; i < pte_num; i++, ptep++) {
18 pte_t pte = ptep_get(ptep);
19
20 if (pte_dirty(pte))
21 orig_pte = pte_mkdirty(orig_pte);
22
23 if (pte_young(pte))
24 orig_pte = pte_mkyoung(orig_pte);
25 }
26
27 return orig_pte;
28 }
29
30 pte_t *huge_pte_alloc(struct mm_struct *mm,
31 struct vm_area_struct *vma,
32 unsigned long addr,
33 unsigned long sz)
34 {
35 unsigned long order;
36 pte_t *pte = NULL;
37 pgd_t *pgd;
38 p4d_t *p4d;
39 pud_t *pud;
40 pmd_t *pmd;
41
42 pgd = pgd_offset(mm, addr);
43 p4d = p4d_alloc(mm, pgd, addr);
44 if (!p4d)
45 return NULL;
46
47 pud = pud_alloc(mm, p4d, addr);
48 if (!pud)
49 return NULL;
50
51 if (sz == PUD_SIZE) {
52 pte = (pte_t *)pud;
53 goto out;
54 }
55
56 if (sz == PMD_SIZE) {
57 if (want_pmd_share(vma, addr) && pud_none(*pud))
58 pte = huge_pmd_share(mm, vma, addr, pud);
59 else
60 pte = (pte_t *)pmd_alloc(mm, pud, addr);
61 goto out;
62 }
63
64 pmd = pmd_alloc(mm, pud, addr);
65 if (!pmd)
66 return NULL;
67
68 for_each_napot_order(order) {
69 if (napot_cont_size(order) == sz) {
70 pte = pte_alloc_huge(mm, pmd, addr & napot_cont_mask(order));
71 break;
72 }
73 }
74
75 out:
76 if (pte) {
77 pte_t pteval = ptep_get_lockless(pte);
78
79 WARN_ON_ONCE(pte_present(pteval) && !pte_huge(pteval));
80 }
81 return pte;
82 }
83
84 pte_t *huge_pte_offset(struct mm_struct *mm,
85 unsigned long addr,
86 unsigned long sz)
87 {
88 unsigned long order;
89 pte_t *pte = NULL;
90 pgd_t *pgd;
91 p4d_t *p4d;
92 pud_t *pud;
93 pmd_t *pmd;
94
95 pgd = pgd_offset(mm, addr);
96 if (!pgd_present(*pgd))
97 return NULL;
98
99 p4d = p4d_offset(pgd, addr);
100 if (!p4d_present(*p4d))
101 return NULL;
102
103 pud = pud_offset(p4d, addr);
104 if (sz == PUD_SIZE)
105 /* must be pud huge, non-present or none */
106 return (pte_t *)pud;
107
108 if (!pud_present(*pud))
109 return NULL;
110
111 pmd = pmd_offset(pud, addr);
112 if (sz == PMD_SIZE)
113 /* must be pmd huge, non-present or none */
114 return (pte_t *)pmd;
115
116 if (!pmd_present(*pmd))
117 return NULL;
118
119 for_each_napot_order(order) {
120 if (napot_cont_size(order) == sz) {
121 pte = pte_offset_huge(pmd, addr & napot_cont_mask(order));
122 break;
123 }
124 }
125 return pte;
126 }
127
128 static pte_t get_clear_contig(struct mm_struct *mm,
129 unsigned long addr,
130 pte_t *ptep,
131 unsigned long pte_num)
132 {
133 pte_t orig_pte = ptep_get(ptep);
134 unsigned long i;
135
136 for (i = 0; i < pte_num; i++, addr += PAGE_SIZE, ptep++) {
137 pte_t pte = ptep_get_and_clear(mm, addr, ptep);
138
139 if (pte_dirty(pte))
140 orig_pte = pte_mkdirty(orig_pte);
141
142 if (pte_young(pte))
143 orig_pte = pte_mkyoung(orig_pte);
144 }
145
146 return orig_pte;
147 }
148
149 static pte_t get_clear_contig_flush(struct mm_struct *mm,
150 unsigned long addr,
151 pte_t *ptep,
152 unsigned long pte_num)
153 {
154 pte_t orig_pte = get_clear_contig(mm, addr, ptep, pte_num);
155 struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
156 bool valid = !pte_none(orig_pte);
157
158 if (valid)
159 flush_tlb_range(&vma, addr, addr + (PAGE_SIZE * pte_num));
160
161 return orig_pte;
162 }
163
164 pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags)
165 {
166 unsigned long order;
167
168 for_each_napot_order(order) {
169 if (shift == napot_cont_shift(order)) {
170 entry = pte_mknapot(entry, order);
171 break;
172 }
173 }
174 if (order == NAPOT_ORDER_MAX)
175 entry = pte_mkhuge(entry);
176
177 return entry;
178 }
179
180 void set_huge_pte_at(struct mm_struct *mm,
181 unsigned long addr,
182 pte_t *ptep,
183 pte_t pte,
184 unsigned long sz)
185 {
186 unsigned long hugepage_shift;
187 int i, pte_num;
188
189 if (sz >= PGDIR_SIZE)
190 hugepage_shift = PGDIR_SHIFT;
191 else if (sz >= P4D_SIZE)
192 hugepage_shift = P4D_SHIFT;
193 else if (sz >= PUD_SIZE)
194 hugepage_shift = PUD_SHIFT;
195 else if (sz >= PMD_SIZE)
196 hugepage_shift = PMD_SHIFT;
197 else
198 hugepage_shift = PAGE_SHIFT;
199
200 pte_num = sz >> hugepage_shift;
201 for (i = 0; i < pte_num; i++, ptep++, addr += (1 << hugepage_shift))
202 set_pte_at(mm, addr, ptep, pte);
203 }
204
205 int huge_ptep_set_access_flags(struct vm_area_struct *vma,
206 unsigned long addr,
207 pte_t *ptep,
208 pte_t pte,
209 int dirty)
210 {
211 struct mm_struct *mm = vma->vm_mm;
212 unsigned long order;
213 pte_t orig_pte;
214 int i, pte_num;
215
216 if (!pte_napot(pte))
217 return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
218
219 order = napot_cont_order(pte);
220 pte_num = napot_pte_num(order);
221 ptep = huge_pte_offset(mm, addr, napot_cont_size(order));
222 orig_pte = get_clear_contig_flush(mm, addr, ptep, pte_num);
223
224 if (pte_dirty(orig_pte))
225 pte = pte_mkdirty(pte);
226
227 if (pte_young(orig_pte))
228 pte = pte_mkyoung(pte);
229
230 for (i = 0; i < pte_num; i++, addr += PAGE_SIZE, ptep++)
231 set_pte_at(mm, addr, ptep, pte);
232
233 return true;
234 }
235
236 pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
237 unsigned long addr,
238 pte_t *ptep)
239 {
240 pte_t orig_pte = ptep_get(ptep);
241 int pte_num;
242
243 if (!pte_napot(orig_pte))
244 return ptep_get_and_clear(mm, addr, ptep);
245
246 pte_num = napot_pte_num(napot_cont_order(orig_pte));
247
248 return get_clear_contig(mm, addr, ptep, pte_num);
249 }
250
251 void huge_ptep_set_wrprotect(struct mm_struct *mm,
252 unsigned long addr,
253 pte_t *ptep)
254 {
255 pte_t pte = ptep_get(ptep);
256 unsigned long order;
257 pte_t orig_pte;
258 int i, pte_num;
259
260 if (!pte_napot(pte)) {
261 ptep_set_wrprotect(mm, addr, ptep);
262 return;
263 }
264
265 order = napot_cont_order(pte);
266 pte_num = napot_pte_num(order);
267 ptep = huge_pte_offset(mm, addr, napot_cont_size(order));
268 orig_pte = get_clear_contig_flush(mm, addr, ptep, pte_num);
269
270 orig_pte = pte_wrprotect(orig_pte);
271
272 for (i = 0; i < pte_num; i++, addr += PAGE_SIZE, ptep++)
273 set_pte_at(mm, addr, ptep, orig_pte);
274 }
275
276 pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
277 unsigned long addr,
278 pte_t *ptep)
279 {
280 pte_t pte = ptep_get(ptep);
281 int pte_num;
282
283 if (!pte_napot(pte))
284 return ptep_clear_flush(vma, addr, ptep);
285
286 pte_num = napot_pte_num(napot_cont_order(pte));
287
288 return get_clear_contig_flush(vma->vm_mm, addr, ptep, pte_num);
289 }
290
291 void huge_pte_clear(struct mm_struct *mm,
292 unsigned long addr,
293 pte_t *ptep,
294 unsigned long sz)
295 {
296 pte_t pte = READ_ONCE(*ptep);
297 int i, pte_num;
298
299 if (!pte_napot(pte)) {
300 pte_clear(mm, addr, ptep);
301 return;
302 }
303
304 pte_num = napot_pte_num(napot_cont_order(pte));
305 for (i = 0; i < pte_num; i++, addr += PAGE_SIZE, ptep++)
306 pte_clear(mm, addr, ptep);
307 }
308
309 static __init bool is_napot_size(unsigned long size)
310 {
311 unsigned long order;
312
313 if (!has_svnapot())
314 return false;
315
316 for_each_napot_order(order) {
317 if (size == napot_cont_size(order))
318 return true;
319 }
320 return false;
321 }
322
323 static __init int napot_hugetlbpages_init(void)
324 {
325 if (has_svnapot()) {
326 unsigned long order;
327
328 for_each_napot_order(order)
329 hugetlb_add_hstate(order);
330 }
331 return 0;
332 }
333 arch_initcall(napot_hugetlbpages_init);
334
335 #else
336
337 static __init bool is_napot_size(unsigned long size)
338 {
339 return false;
340 }
341
342 #endif /*CONFIG_RISCV_ISA_SVNAPOT*/
343
344 int pud_huge(pud_t pud)
345 {
346 return pud_leaf(pud);
347 }
348
349 int pmd_huge(pmd_t pmd)
350 {
351 return pmd_leaf(pmd);
352 }
353
354 bool __init arch_hugetlb_valid_size(unsigned long size)
355 {
356 if (size == HPAGE_SIZE)
357 return true;
358 else if (IS_ENABLED(CONFIG_64BIT) && size == PUD_SIZE)
359 return true;
360 else if (is_napot_size(size))
361 return true;
362 else
363 return false;
364 }
365
366 #ifdef CONFIG_CONTIG_ALLOC
367 static __init int gigantic_pages_init(void)
368 {
369 /* With CONTIG_ALLOC, we can allocate gigantic pages at runtime */
370 if (IS_ENABLED(CONFIG_64BIT))
371 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
372 return 0;
373 }
374 arch_initcall(gigantic_pages_init);
375 #endif