]>
git.ipfire.org Git - thirdparty/kernel/stable.git/blob - arch/riscv/mm/hugetlbpage.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/hugetlb.h>
5 #ifdef CONFIG_RISCV_ISA_SVNAPOT
6 pte_t
huge_ptep_get(pte_t
*ptep
)
10 pte_t orig_pte
= ptep_get(ptep
);
12 if (!pte_present(orig_pte
) || !pte_napot(orig_pte
))
15 pte_num
= napot_pte_num(napot_cont_order(orig_pte
));
17 for (i
= 0; i
< pte_num
; i
++, ptep
++) {
18 pte_t pte
= ptep_get(ptep
);
21 orig_pte
= pte_mkdirty(orig_pte
);
24 orig_pte
= pte_mkyoung(orig_pte
);
30 pte_t
*huge_pte_alloc(struct mm_struct
*mm
,
31 struct vm_area_struct
*vma
,
42 pgd
= pgd_offset(mm
, addr
);
43 p4d
= p4d_alloc(mm
, pgd
, addr
);
47 pud
= pud_alloc(mm
, p4d
, addr
);
57 if (want_pmd_share(vma
, addr
) && pud_none(*pud
))
58 pte
= huge_pmd_share(mm
, vma
, addr
, pud
);
60 pte
= (pte_t
*)pmd_alloc(mm
, pud
, addr
);
64 pmd
= pmd_alloc(mm
, pud
, addr
);
68 for_each_napot_order(order
) {
69 if (napot_cont_size(order
) == sz
) {
70 pte
= pte_alloc_huge(mm
, pmd
, addr
& napot_cont_mask(order
));
77 pte_t pteval
= ptep_get_lockless(pte
);
79 WARN_ON_ONCE(pte_present(pteval
) && !pte_huge(pteval
));
84 pte_t
*huge_pte_offset(struct mm_struct
*mm
,
95 pgd
= pgd_offset(mm
, addr
);
96 if (!pgd_present(*pgd
))
99 p4d
= p4d_offset(pgd
, addr
);
100 if (!p4d_present(*p4d
))
103 pud
= pud_offset(p4d
, addr
);
105 /* must be pud huge, non-present or none */
108 if (!pud_present(*pud
))
111 pmd
= pmd_offset(pud
, addr
);
113 /* must be pmd huge, non-present or none */
116 if (!pmd_present(*pmd
))
119 for_each_napot_order(order
) {
120 if (napot_cont_size(order
) == sz
) {
121 pte
= pte_offset_huge(pmd
, addr
& napot_cont_mask(order
));
128 static pte_t
get_clear_contig(struct mm_struct
*mm
,
131 unsigned long pte_num
)
133 pte_t orig_pte
= ptep_get(ptep
);
136 for (i
= 0; i
< pte_num
; i
++, addr
+= PAGE_SIZE
, ptep
++) {
137 pte_t pte
= ptep_get_and_clear(mm
, addr
, ptep
);
140 orig_pte
= pte_mkdirty(orig_pte
);
143 orig_pte
= pte_mkyoung(orig_pte
);
149 static pte_t
get_clear_contig_flush(struct mm_struct
*mm
,
152 unsigned long pte_num
)
154 pte_t orig_pte
= get_clear_contig(mm
, addr
, ptep
, pte_num
);
155 struct vm_area_struct vma
= TLB_FLUSH_VMA(mm
, 0);
156 bool valid
= !pte_none(orig_pte
);
159 flush_tlb_range(&vma
, addr
, addr
+ (PAGE_SIZE
* pte_num
));
164 pte_t
arch_make_huge_pte(pte_t entry
, unsigned int shift
, vm_flags_t flags
)
168 for_each_napot_order(order
) {
169 if (shift
== napot_cont_shift(order
)) {
170 entry
= pte_mknapot(entry
, order
);
174 if (order
== NAPOT_ORDER_MAX
)
175 entry
= pte_mkhuge(entry
);
180 void set_huge_pte_at(struct mm_struct
*mm
,
186 unsigned long hugepage_shift
;
189 if (sz
>= PGDIR_SIZE
)
190 hugepage_shift
= PGDIR_SHIFT
;
191 else if (sz
>= P4D_SIZE
)
192 hugepage_shift
= P4D_SHIFT
;
193 else if (sz
>= PUD_SIZE
)
194 hugepage_shift
= PUD_SHIFT
;
195 else if (sz
>= PMD_SIZE
)
196 hugepage_shift
= PMD_SHIFT
;
198 hugepage_shift
= PAGE_SHIFT
;
200 pte_num
= sz
>> hugepage_shift
;
201 for (i
= 0; i
< pte_num
; i
++, ptep
++, addr
+= (1 << hugepage_shift
))
202 set_pte_at(mm
, addr
, ptep
, pte
);
205 int huge_ptep_set_access_flags(struct vm_area_struct
*vma
,
211 struct mm_struct
*mm
= vma
->vm_mm
;
217 return ptep_set_access_flags(vma
, addr
, ptep
, pte
, dirty
);
219 order
= napot_cont_order(pte
);
220 pte_num
= napot_pte_num(order
);
221 ptep
= huge_pte_offset(mm
, addr
, napot_cont_size(order
));
222 orig_pte
= get_clear_contig_flush(mm
, addr
, ptep
, pte_num
);
224 if (pte_dirty(orig_pte
))
225 pte
= pte_mkdirty(pte
);
227 if (pte_young(orig_pte
))
228 pte
= pte_mkyoung(pte
);
230 for (i
= 0; i
< pte_num
; i
++, addr
+= PAGE_SIZE
, ptep
++)
231 set_pte_at(mm
, addr
, ptep
, pte
);
236 pte_t
huge_ptep_get_and_clear(struct mm_struct
*mm
,
240 pte_t orig_pte
= ptep_get(ptep
);
243 if (!pte_napot(orig_pte
))
244 return ptep_get_and_clear(mm
, addr
, ptep
);
246 pte_num
= napot_pte_num(napot_cont_order(orig_pte
));
248 return get_clear_contig(mm
, addr
, ptep
, pte_num
);
251 void huge_ptep_set_wrprotect(struct mm_struct
*mm
,
255 pte_t pte
= ptep_get(ptep
);
260 if (!pte_napot(pte
)) {
261 ptep_set_wrprotect(mm
, addr
, ptep
);
265 order
= napot_cont_order(pte
);
266 pte_num
= napot_pte_num(order
);
267 ptep
= huge_pte_offset(mm
, addr
, napot_cont_size(order
));
268 orig_pte
= get_clear_contig_flush(mm
, addr
, ptep
, pte_num
);
270 orig_pte
= pte_wrprotect(orig_pte
);
272 for (i
= 0; i
< pte_num
; i
++, addr
+= PAGE_SIZE
, ptep
++)
273 set_pte_at(mm
, addr
, ptep
, orig_pte
);
276 pte_t
huge_ptep_clear_flush(struct vm_area_struct
*vma
,
280 pte_t pte
= ptep_get(ptep
);
284 return ptep_clear_flush(vma
, addr
, ptep
);
286 pte_num
= napot_pte_num(napot_cont_order(pte
));
288 return get_clear_contig_flush(vma
->vm_mm
, addr
, ptep
, pte_num
);
291 void huge_pte_clear(struct mm_struct
*mm
,
296 pte_t pte
= READ_ONCE(*ptep
);
299 if (!pte_napot(pte
)) {
300 pte_clear(mm
, addr
, ptep
);
304 pte_num
= napot_pte_num(napot_cont_order(pte
));
305 for (i
= 0; i
< pte_num
; i
++, addr
+= PAGE_SIZE
, ptep
++)
306 pte_clear(mm
, addr
, ptep
);
309 static __init
bool is_napot_size(unsigned long size
)
316 for_each_napot_order(order
) {
317 if (size
== napot_cont_size(order
))
323 static __init
int napot_hugetlbpages_init(void)
328 for_each_napot_order(order
)
329 hugetlb_add_hstate(order
);
333 arch_initcall(napot_hugetlbpages_init
);
337 static __init
bool is_napot_size(unsigned long size
)
342 #endif /*CONFIG_RISCV_ISA_SVNAPOT*/
344 int pud_huge(pud_t pud
)
346 return pud_leaf(pud
);
349 int pmd_huge(pmd_t pmd
)
351 return pmd_leaf(pmd
);
354 bool __init
arch_hugetlb_valid_size(unsigned long size
)
356 if (size
== HPAGE_SIZE
)
358 else if (IS_ENABLED(CONFIG_64BIT
) && size
== PUD_SIZE
)
360 else if (is_napot_size(size
))
366 #ifdef CONFIG_CONTIG_ALLOC
367 static __init
int gigantic_pages_init(void)
369 /* With CONTIG_ALLOC, we can allocate gigantic pages at runtime */
370 if (IS_ENABLED(CONFIG_64BIT
))
371 hugetlb_add_hstate(PUD_SHIFT
- PAGE_SHIFT
);
374 arch_initcall(gigantic_pages_init
);