]> git.ipfire.org Git - thirdparty/kernel/linux.git/blob - arch/parisc/include/asm/pgalloc.h
License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[thirdparty/kernel/linux.git] / arch / parisc / include / asm / pgalloc.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_PGALLOC_H
3 #define _ASM_PGALLOC_H
4
5 #include <linux/gfp.h>
6 #include <linux/mm.h>
7 #include <linux/threads.h>
8 #include <asm/processor.h>
9 #include <asm/fixmap.h>
10
11 #include <asm/cache.h>
12
13 /* Allocate the top level pgd (page directory)
14 *
15 * Here (for 64 bit kernels) we implement a Hybrid L2/L3 scheme: we
16 * allocate the first pmd adjacent to the pgd. This means that we can
17 * subtract a constant offset to get to it. The pmd and pgd sizes are
18 * arranged so that a single pmd covers 4GB (giving a full 64-bit
19 * process access to 8TB) so our lookups are effectively L2 for the
20 * first 4GB of the kernel (i.e. for all ILP32 processes and all the
21 * kernel for machines with under 4GB of memory) */
22 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
23 {
24 pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL,
25 PGD_ALLOC_ORDER);
26 pgd_t *actual_pgd = pgd;
27
28 if (likely(pgd != NULL)) {
29 memset(pgd, 0, PAGE_SIZE<<PGD_ALLOC_ORDER);
30 #if CONFIG_PGTABLE_LEVELS == 3
31 actual_pgd += PTRS_PER_PGD;
32 /* Populate first pmd with allocated memory. We mark it
33 * with PxD_FLAG_ATTACHED as a signal to the system that this
34 * pmd entry may not be cleared. */
35 __pgd_val_set(*actual_pgd, (PxD_FLAG_PRESENT |
36 PxD_FLAG_VALID |
37 PxD_FLAG_ATTACHED)
38 + (__u32)(__pa((unsigned long)pgd) >> PxD_VALUE_SHIFT));
39 /* The first pmd entry also is marked with PxD_FLAG_ATTACHED as
40 * a signal that this pmd may not be freed */
41 __pgd_val_set(*pgd, PxD_FLAG_ATTACHED);
42 #endif
43 }
44 return actual_pgd;
45 }
46
47 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
48 {
49 #if CONFIG_PGTABLE_LEVELS == 3
50 pgd -= PTRS_PER_PGD;
51 #endif
52 free_pages((unsigned long)pgd, PGD_ALLOC_ORDER);
53 }
54
55 #if CONFIG_PGTABLE_LEVELS == 3
56
57 /* Three Level Page Table Support for pmd's */
58
59 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
60 {
61 __pgd_val_set(*pgd, (PxD_FLAG_PRESENT | PxD_FLAG_VALID) +
62 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
63 }
64
65 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
66 {
67 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL, PMD_ORDER);
68 if (pmd)
69 memset(pmd, 0, PAGE_SIZE<<PMD_ORDER);
70 return pmd;
71 }
72
73 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
74 {
75 if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED) {
76 /*
77 * This is the permanent pmd attached to the pgd;
78 * cannot free it.
79 * Increment the counter to compensate for the decrement
80 * done by generic mm code.
81 */
82 mm_inc_nr_pmds(mm);
83 return;
84 }
85 free_pages((unsigned long)pmd, PMD_ORDER);
86 }
87
88 #else
89
90 /* Two Level Page Table Support for pmd's */
91
92 /*
93 * allocating and freeing a pmd is trivial: the 1-entry pmd is
94 * inside the pgd, so has no extra memory associated with it.
95 */
96
97 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
98 #define pmd_free(mm, x) do { } while (0)
99 #define pgd_populate(mm, pmd, pte) BUG()
100
101 #endif
102
103 static inline void
104 pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
105 {
106 #if CONFIG_PGTABLE_LEVELS == 3
107 /* preserve the gateway marker if this is the beginning of
108 * the permanent pmd */
109 if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
110 __pmd_val_set(*pmd, (PxD_FLAG_PRESENT |
111 PxD_FLAG_VALID |
112 PxD_FLAG_ATTACHED)
113 + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT));
114 else
115 #endif
116 __pmd_val_set(*pmd, (PxD_FLAG_PRESENT | PxD_FLAG_VALID)
117 + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT));
118 }
119
120 #define pmd_populate(mm, pmd, pte_page) \
121 pmd_populate_kernel(mm, pmd, page_address(pte_page))
122 #define pmd_pgtable(pmd) pmd_page(pmd)
123
124 static inline pgtable_t
125 pte_alloc_one(struct mm_struct *mm, unsigned long address)
126 {
127 struct page *page = alloc_page(GFP_KERNEL|__GFP_ZERO);
128 if (!page)
129 return NULL;
130 if (!pgtable_page_ctor(page)) {
131 __free_page(page);
132 return NULL;
133 }
134 return page;
135 }
136
137 static inline pte_t *
138 pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
139 {
140 pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
141 return pte;
142 }
143
144 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
145 {
146 free_page((unsigned long)pte);
147 }
148
149 static inline void pte_free(struct mm_struct *mm, struct page *pte)
150 {
151 pgtable_page_dtor(pte);
152 pte_free_kernel(mm, page_address(pte));
153 }
154
155 #define check_pgt_cache() do { } while (0)
156
157 #endif