]> git.ipfire.org Git - thirdparty/linux.git/blame - arch/arm64/include/asm/pgalloc.h
treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 234
[thirdparty/linux.git] / arch / arm64 / include / asm / pgalloc.h
CommitLineData
caab277b 1/* SPDX-License-Identifier: GPL-2.0-only */
1d18c47c
CM
2/*
3 * Based on arch/arm/include/asm/pgalloc.h
4 *
5 * Copyright (C) 2000-2001 Russell King
6 * Copyright (C) 2012 ARM Ltd.
1d18c47c
CM
7 */
8#ifndef __ASM_PGALLOC_H
9#define __ASM_PGALLOC_H
10
11#include <asm/pgtable-hwdef.h>
12#include <asm/processor.h>
13#include <asm/cacheflush.h>
14#include <asm/tlbflush.h>
15
16#define check_pgt_cache() do { } while (0)
17
75f296d9 18#define PGALLOC_GFP (GFP_KERNEL | __GFP_ZERO)
fd2203dd 19#define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
15670ef1 20
9f25e6ad 21#if CONFIG_PGTABLE_LEVELS > 2
1d18c47c
CM
22
23static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
24{
54c8d911
YZ
25 struct page *page;
26
27 page = alloc_page(PGALLOC_GFP);
28 if (!page)
29 return NULL;
30 if (!pgtable_pmd_page_ctor(page)) {
31 __free_page(page);
32 return NULL;
33 }
34 return page_address(page);
1d18c47c
CM
35}
36
20a004e7 37static inline void pmd_free(struct mm_struct *mm, pmd_t *pmdp)
1d18c47c 38{
20a004e7 39 BUG_ON((unsigned long)pmdp & (PAGE_SIZE-1));
54c8d911 40 pgtable_pmd_page_dtor(virt_to_page(pmdp));
20a004e7 41 free_page((unsigned long)pmdp);
1d18c47c
CM
42}
43
20a004e7 44static inline void __pud_populate(pud_t *pudp, phys_addr_t pmdp, pudval_t prot)
1d18c47c 45{
20a004e7 46 set_pud(pudp, __pud(__phys_to_pud_val(pmdp) | prot));
1d18c47c
CM
47}
48
20a004e7 49static inline void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmdp)
1e531cce 50{
20a004e7 51 __pud_populate(pudp, __pa(pmdp), PMD_TYPE_TABLE);
1e531cce
MR
52}
53#else
20a004e7 54static inline void __pud_populate(pud_t *pudp, phys_addr_t pmdp, pudval_t prot)
1e531cce
MR
55{
56 BUILD_BUG();
57}
9f25e6ad 58#endif /* CONFIG_PGTABLE_LEVELS > 2 */
1d18c47c 59
9f25e6ad 60#if CONFIG_PGTABLE_LEVELS > 3
c79b954b
JL
61
62static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
63{
15670ef1 64 return (pud_t *)__get_free_page(PGALLOC_GFP);
c79b954b
JL
65}
66
20a004e7 67static inline void pud_free(struct mm_struct *mm, pud_t *pudp)
c79b954b 68{
20a004e7
WD
69 BUG_ON((unsigned long)pudp & (PAGE_SIZE-1));
70 free_page((unsigned long)pudp);
c79b954b
JL
71}
72
20a004e7 73static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t pudp, pgdval_t prot)
c79b954b 74{
20a004e7 75 set_pgd(pgdp, __pgd(__phys_to_pgd_val(pudp) | prot));
c79b954b
JL
76}
77
20a004e7 78static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgdp, pud_t *pudp)
1e531cce 79{
20a004e7 80 __pgd_populate(pgdp, __pa(pudp), PUD_TYPE_TABLE);
1e531cce
MR
81}
82#else
20a004e7 83static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t pudp, pgdval_t prot)
1e531cce
MR
84{
85 BUILD_BUG();
86}
9f25e6ad 87#endif /* CONFIG_PGTABLE_LEVELS > 3 */
c79b954b 88
1d18c47c 89extern pgd_t *pgd_alloc(struct mm_struct *mm);
20a004e7 90extern void pgd_free(struct mm_struct *mm, pgd_t *pgdp);
1d18c47c 91
1d18c47c 92static inline pte_t *
4cf58924 93pte_alloc_one_kernel(struct mm_struct *mm)
1d18c47c
CM
94{
95 return (pte_t *)__get_free_page(PGALLOC_GFP);
96}
97
98static inline pgtable_t
4cf58924 99pte_alloc_one(struct mm_struct *mm)
1d18c47c
CM
100{
101 struct page *pte;
102
103 pte = alloc_pages(PGALLOC_GFP, 0);
d97a2291
KS
104 if (!pte)
105 return NULL;
106 if (!pgtable_page_ctor(pte)) {
107 __free_page(pte);
108 return NULL;
109 }
1d18c47c
CM
110 return pte;
111}
112
113/*
114 * Free a PTE table.
115 */
20a004e7 116static inline void pte_free_kernel(struct mm_struct *mm, pte_t *ptep)
1d18c47c 117{
20a004e7
WD
118 if (ptep)
119 free_page((unsigned long)ptep);
1d18c47c
CM
120}
121
122static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
123{
124 pgtable_page_dtor(pte);
125 __free_page(pte);
126}
127
20a004e7 128static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t ptep,
1d18c47c
CM
129 pmdval_t prot)
130{
20a004e7 131 set_pmd(pmdp, __pmd(__phys_to_pmd_val(ptep) | prot));
1d18c47c
CM
132}
133
134/*
135 * Populate the pmdp entry with a pointer to the pte. This pmd is part
136 * of the mm address space.
137 */
138static inline void
139pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
140{
141 /*
142 * The pmd must be loaded with the physical address of the PTE table
143 */
144 __pmd_populate(pmdp, __pa(ptep), PMD_TYPE_TABLE);
145}
146
147static inline void
148pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
149{
150 __pmd_populate(pmdp, page_to_phys(ptep), PMD_TYPE_TABLE);
151}
152#define pmd_pgtable(pmd) pmd_page(pmd)
153
154#endif