]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
4f76cd38 | 2 | #include <linux/mm.h> |
5a0e3ad6 | 3 | #include <linux/gfp.h> |
e3e28812 | 4 | #include <linux/hugetlb.h> |
4f76cd38 | 5 | #include <asm/pgalloc.h> |
ee5aa8d3 | 6 | #include <asm/pgtable.h> |
4f76cd38 | 7 | #include <asm/tlb.h> |
a1d5a869 | 8 | #include <asm/fixmap.h> |
6b637835 | 9 | #include <asm/mtrr.h> |
4f76cd38 | 10 | |
94d49eb3 KS |
11 | #ifdef CONFIG_DYNAMIC_PHYSICAL_MASK |
12 | phys_addr_t physical_mask __ro_after_init = (1ULL << __PHYSICAL_MASK_SHIFT) - 1; | |
13 | EXPORT_SYMBOL(physical_mask); | |
14 | #endif | |
15 | ||
75f296d9 | 16 | #define PGALLOC_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO) |
9e730237 | 17 | |
14315592 IC |
18 | #ifdef CONFIG_HIGHPTE |
19 | #define PGALLOC_USER_GFP __GFP_HIGHMEM | |
20 | #else | |
21 | #define PGALLOC_USER_GFP 0 | |
22 | #endif | |
23 | ||
24 | gfp_t __userpte_alloc_gfp = PGALLOC_GFP | PGALLOC_USER_GFP; | |
25 | ||
4cf58924 | 26 | pte_t *pte_alloc_one_kernel(struct mm_struct *mm) |
4f76cd38 | 27 | { |
3e79ec7d | 28 | return (pte_t *)__get_free_page(PGALLOC_GFP & ~__GFP_ACCOUNT); |
4f76cd38 JF |
29 | } |
30 | ||
4cf58924 | 31 | pgtable_t pte_alloc_one(struct mm_struct *mm) |
4f76cd38 JF |
32 | { |
33 | struct page *pte; | |
34 | ||
14315592 | 35 | pte = alloc_pages(__userpte_alloc_gfp, 0); |
cecbd1b5 KS |
36 | if (!pte) |
37 | return NULL; | |
38 | if (!pgtable_page_ctor(pte)) { | |
39 | __free_page(pte); | |
40 | return NULL; | |
41 | } | |
4f76cd38 JF |
42 | return pte; |
43 | } | |
44 | ||
14315592 IC |
45 | static int __init setup_userpte(char *arg) |
46 | { | |
47 | if (!arg) | |
48 | return -EINVAL; | |
49 | ||
50 | /* | |
51 | * "userpte=nohigh" disables allocation of user pagetables in | |
52 | * high memory. | |
53 | */ | |
54 | if (strcmp(arg, "nohigh") == 0) | |
55 | __userpte_alloc_gfp &= ~__GFP_HIGHMEM; | |
56 | else | |
57 | return -EINVAL; | |
58 | return 0; | |
59 | } | |
60 | early_param("userpte", setup_userpte); | |
61 | ||
9e1b32ca | 62 | void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte) |
397f687a JF |
63 | { |
64 | pgtable_page_dtor(pte); | |
6944a9c8 | 65 | paravirt_release_pte(page_to_pfn(pte)); |
48a8b97c | 66 | paravirt_tlb_remove_table(tlb, pte); |
397f687a JF |
67 | } |
68 | ||
98233368 | 69 | #if CONFIG_PGTABLE_LEVELS > 2 |
9e1b32ca | 70 | void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) |
170fdff7 | 71 | { |
c283610e | 72 | struct page *page = virt_to_page(pmd); |
6944a9c8 | 73 | paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT); |
1de14c3c DH |
74 | /* |
75 | * NOTE! For PAE, any changes to the top page-directory-pointer-table | |
76 | * entries need a full cr3 reload to flush. | |
77 | */ | |
78 | #ifdef CONFIG_X86_PAE | |
79 | tlb->need_flush_all = 1; | |
80 | #endif | |
c283610e | 81 | pgtable_pmd_page_dtor(page); |
48a8b97c | 82 | paravirt_tlb_remove_table(tlb, page); |
170fdff7 | 83 | } |
5a5f8f42 | 84 | |
98233368 | 85 | #if CONFIG_PGTABLE_LEVELS > 3 |
9e1b32ca | 86 | void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud) |
5a5f8f42 | 87 | { |
2761fa09 | 88 | paravirt_release_pud(__pa(pud) >> PAGE_SHIFT); |
48a8b97c | 89 | paravirt_tlb_remove_table(tlb, virt_to_page(pud)); |
5a5f8f42 | 90 | } |
b8504058 KS |
91 | |
92 | #if CONFIG_PGTABLE_LEVELS > 4 | |
93 | void ___p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d) | |
94 | { | |
95 | paravirt_release_p4d(__pa(p4d) >> PAGE_SHIFT); | |
48a8b97c | 96 | paravirt_tlb_remove_table(tlb, virt_to_page(p4d)); |
b8504058 KS |
97 | } |
98 | #endif /* CONFIG_PGTABLE_LEVELS > 4 */ | |
98233368 KS |
99 | #endif /* CONFIG_PGTABLE_LEVELS > 3 */ |
100 | #endif /* CONFIG_PGTABLE_LEVELS > 2 */ | |
170fdff7 | 101 | |
4f76cd38 JF |
102 | static inline void pgd_list_add(pgd_t *pgd) |
103 | { | |
104 | struct page *page = virt_to_page(pgd); | |
4f76cd38 | 105 | |
4f76cd38 | 106 | list_add(&page->lru, &pgd_list); |
4f76cd38 JF |
107 | } |
108 | ||
109 | static inline void pgd_list_del(pgd_t *pgd) | |
110 | { | |
111 | struct page *page = virt_to_page(pgd); | |
4f76cd38 | 112 | |
4f76cd38 | 113 | list_del(&page->lru); |
4f76cd38 JF |
114 | } |
115 | ||
4f76cd38 | 116 | #define UNSHARED_PTRS_PER_PGD \ |
68db065c | 117 | (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD) |
184d47f0 KC |
118 | #define MAX_UNSHARED_PTRS_PER_PGD \ |
119 | max_t(size_t, KERNEL_PGD_BOUNDARY, PTRS_PER_PGD) | |
4f76cd38 | 120 | |
617d34d9 JF |
121 | |
122 | static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm) | |
123 | { | |
a052f0a5 | 124 | virt_to_page(pgd)->pt_mm = mm; |
617d34d9 JF |
125 | } |
126 | ||
127 | struct mm_struct *pgd_page_get_mm(struct page *page) | |
128 | { | |
a052f0a5 | 129 | return page->pt_mm; |
617d34d9 JF |
130 | } |
131 | ||
132 | static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) | |
4f76cd38 | 133 | { |
4f76cd38 JF |
134 | /* If the pgd points to a shared pagetable level (either the |
135 | ptes in non-PAE, or shared PMD in PAE), then just copy the | |
136 | references from swapper_pg_dir. */ | |
98233368 KS |
137 | if (CONFIG_PGTABLE_LEVELS == 2 || |
138 | (CONFIG_PGTABLE_LEVELS == 3 && SHARED_KERNEL_PMD) || | |
b8504058 | 139 | CONFIG_PGTABLE_LEVELS >= 4) { |
68db065c JF |
140 | clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY, |
141 | swapper_pg_dir + KERNEL_PGD_BOUNDARY, | |
4f76cd38 | 142 | KERNEL_PGD_PTRS); |
4f76cd38 JF |
143 | } |
144 | ||
145 | /* list required to sync kernel mapping updates */ | |
617d34d9 JF |
146 | if (!SHARED_KERNEL_PMD) { |
147 | pgd_set_mm(pgd, mm); | |
4f76cd38 | 148 | pgd_list_add(pgd); |
617d34d9 | 149 | } |
4f76cd38 JF |
150 | } |
151 | ||
17b74627 | 152 | static void pgd_dtor(pgd_t *pgd) |
4f76cd38 | 153 | { |
4f76cd38 JF |
154 | if (SHARED_KERNEL_PMD) |
155 | return; | |
156 | ||
a79e53d8 | 157 | spin_lock(&pgd_lock); |
4f76cd38 | 158 | pgd_list_del(pgd); |
a79e53d8 | 159 | spin_unlock(&pgd_lock); |
4f76cd38 JF |
160 | } |
161 | ||
85958b46 JF |
162 | /* |
163 | * List of all pgd's needed for non-PAE so it can invalidate entries | |
164 | * in both cached and uncached pgd's; not needed for PAE since the | |
165 | * kernel pmd is shared. If PAE were not to share the pmd a similar | |
166 | * tactic would be needed. This is essentially codepath-based locking | |
167 | * against pageattr.c; it is the unique case in which a valid change | |
168 | * of kernel pagetables can't be lazily synchronized by vmalloc faults. | |
169 | * vmalloc faults work because attached pagetables are never freed. | |
6d49e352 | 170 | * -- nyc |
85958b46 JF |
171 | */ |
172 | ||
4f76cd38 | 173 | #ifdef CONFIG_X86_PAE |
d8d5900e JF |
174 | /* |
175 | * In PAE mode, we need to do a cr3 reload (=tlb flush) when | |
176 | * updating the top-level pagetable entries to guarantee the | |
177 | * processor notices the update. Since this is expensive, and | |
178 | * all 4 top-level entries are used almost immediately in a | |
179 | * new process's life, we just pre-populate them here. | |
180 | * | |
181 | * Also, if we're in a paravirt environment where the kernel pmd is | |
182 | * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate | |
183 | * and initialize the kernel pmds here. | |
184 | */ | |
185 | #define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD | |
184d47f0 | 186 | #define MAX_PREALLOCATED_PMDS MAX_UNSHARED_PTRS_PER_PGD |
d8d5900e | 187 | |
f59dbe9c JR |
188 | /* |
189 | * We allocate separate PMDs for the kernel part of the user page-table | |
190 | * when PTI is enabled. We need them to map the per-process LDT into the | |
191 | * user-space page-table. | |
192 | */ | |
193 | #define PREALLOCATED_USER_PMDS (static_cpu_has(X86_FEATURE_PTI) ? \ | |
194 | KERNEL_PGD_PTRS : 0) | |
184d47f0 | 195 | #define MAX_PREALLOCATED_USER_PMDS KERNEL_PGD_PTRS |
f59dbe9c | 196 | |
d8d5900e JF |
197 | void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd) |
198 | { | |
199 | paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT); | |
200 | ||
201 | /* Note: almost everything apart from _PAGE_PRESENT is | |
202 | reserved at the pmd (PDPT) level. */ | |
203 | set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT)); | |
204 | ||
205 | /* | |
206 | * According to Intel App note "TLBs, Paging-Structure Caches, | |
207 | * and Their Invalidation", April 2007, document 317080-001, | |
208 | * section 8.1: in PAE mode we explicitly have to flush the | |
209 | * TLB via cr3 if the top-level pgd is changed... | |
210 | */ | |
4981d01e | 211 | flush_tlb_mm(mm); |
d8d5900e JF |
212 | } |
213 | #else /* !CONFIG_X86_PAE */ | |
214 | ||
215 | /* No need to prepopulate any pagetable entries in non-PAE modes. */ | |
216 | #define PREALLOCATED_PMDS 0 | |
184d47f0 | 217 | #define MAX_PREALLOCATED_PMDS 0 |
f59dbe9c | 218 | #define PREALLOCATED_USER_PMDS 0 |
184d47f0 | 219 | #define MAX_PREALLOCATED_USER_PMDS 0 |
d8d5900e JF |
220 | #endif /* CONFIG_X86_PAE */ |
221 | ||
f59dbe9c | 222 | static void free_pmds(struct mm_struct *mm, pmd_t *pmds[], int count) |
d8d5900e JF |
223 | { |
224 | int i; | |
225 | ||
f59dbe9c | 226 | for (i = 0; i < count; i++) |
09ef4939 KS |
227 | if (pmds[i]) { |
228 | pgtable_pmd_page_dtor(virt_to_page(pmds[i])); | |
d8d5900e | 229 | free_page((unsigned long)pmds[i]); |
dc6c9a35 | 230 | mm_dec_nr_pmds(mm); |
09ef4939 | 231 | } |
d8d5900e JF |
232 | } |
233 | ||
f59dbe9c | 234 | static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[], int count) |
d8d5900e JF |
235 | { |
236 | int i; | |
237 | bool failed = false; | |
3e79ec7d VD |
238 | gfp_t gfp = PGALLOC_GFP; |
239 | ||
240 | if (mm == &init_mm) | |
241 | gfp &= ~__GFP_ACCOUNT; | |
d8d5900e | 242 | |
f59dbe9c | 243 | for (i = 0; i < count; i++) { |
3e79ec7d | 244 | pmd_t *pmd = (pmd_t *)__get_free_page(gfp); |
09ef4939 | 245 | if (!pmd) |
d8d5900e | 246 | failed = true; |
09ef4939 | 247 | if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) { |
2a46eed5 | 248 | free_page((unsigned long)pmd); |
09ef4939 KS |
249 | pmd = NULL; |
250 | failed = true; | |
251 | } | |
dc6c9a35 KS |
252 | if (pmd) |
253 | mm_inc_nr_pmds(mm); | |
d8d5900e JF |
254 | pmds[i] = pmd; |
255 | } | |
256 | ||
257 | if (failed) { | |
f59dbe9c | 258 | free_pmds(mm, pmds, count); |
d8d5900e JF |
259 | return -ENOMEM; |
260 | } | |
261 | ||
262 | return 0; | |
263 | } | |
264 | ||
4f76cd38 JF |
265 | /* |
266 | * Mop up any pmd pages which may still be attached to the pgd. | |
267 | * Normally they will be freed by munmap/exit_mmap, but any pmd we | |
268 | * preallocate which never got a corresponding vma will need to be | |
269 | * freed manually. | |
270 | */ | |
f59dbe9c JR |
271 | static void mop_up_one_pmd(struct mm_struct *mm, pgd_t *pgdp) |
272 | { | |
273 | pgd_t pgd = *pgdp; | |
274 | ||
275 | if (pgd_val(pgd) != 0) { | |
276 | pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd); | |
277 | ||
9bc4f28a | 278 | pgd_clear(pgdp); |
f59dbe9c JR |
279 | |
280 | paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT); | |
281 | pmd_free(mm, pmd); | |
282 | mm_dec_nr_pmds(mm); | |
283 | } | |
284 | } | |
285 | ||
4f76cd38 JF |
286 | static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp) |
287 | { | |
288 | int i; | |
289 | ||
f59dbe9c JR |
290 | for (i = 0; i < PREALLOCATED_PMDS; i++) |
291 | mop_up_one_pmd(mm, &pgdp[i]); | |
4f76cd38 | 292 | |
f59dbe9c | 293 | #ifdef CONFIG_PAGE_TABLE_ISOLATION |
4f76cd38 | 294 | |
f59dbe9c JR |
295 | if (!static_cpu_has(X86_FEATURE_PTI)) |
296 | return; | |
4f76cd38 | 297 | |
f59dbe9c JR |
298 | pgdp = kernel_to_user_pgdp(pgdp); |
299 | ||
300 | for (i = 0; i < PREALLOCATED_USER_PMDS; i++) | |
301 | mop_up_one_pmd(mm, &pgdp[i + KERNEL_PGD_BOUNDARY]); | |
302 | #endif | |
4f76cd38 JF |
303 | } |
304 | ||
d8d5900e | 305 | static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[]) |
4f76cd38 | 306 | { |
e0c4f675 | 307 | p4d_t *p4d; |
4f76cd38 | 308 | pud_t *pud; |
4f76cd38 JF |
309 | int i; |
310 | ||
cf3e5050 JF |
311 | if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */ |
312 | return; | |
313 | ||
e0c4f675 KS |
314 | p4d = p4d_offset(pgd, 0); |
315 | pud = pud_offset(p4d, 0); | |
4f76cd38 | 316 | |
73b44ff4 | 317 | for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) { |
d8d5900e | 318 | pmd_t *pmd = pmds[i]; |
4f76cd38 | 319 | |
68db065c | 320 | if (i >= KERNEL_PGD_BOUNDARY) |
4f76cd38 JF |
321 | memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]), |
322 | sizeof(pmd_t) * PTRS_PER_PMD); | |
323 | ||
324 | pud_populate(mm, pud, pmd); | |
325 | } | |
4f76cd38 | 326 | } |
1ec1fe73 | 327 | |
f59dbe9c JR |
328 | #ifdef CONFIG_PAGE_TABLE_ISOLATION |
329 | static void pgd_prepopulate_user_pmd(struct mm_struct *mm, | |
330 | pgd_t *k_pgd, pmd_t *pmds[]) | |
331 | { | |
332 | pgd_t *s_pgd = kernel_to_user_pgdp(swapper_pg_dir); | |
333 | pgd_t *u_pgd = kernel_to_user_pgdp(k_pgd); | |
334 | p4d_t *u_p4d; | |
335 | pud_t *u_pud; | |
336 | int i; | |
337 | ||
338 | u_p4d = p4d_offset(u_pgd, 0); | |
339 | u_pud = pud_offset(u_p4d, 0); | |
340 | ||
341 | s_pgd += KERNEL_PGD_BOUNDARY; | |
342 | u_pud += KERNEL_PGD_BOUNDARY; | |
343 | ||
344 | for (i = 0; i < PREALLOCATED_USER_PMDS; i++, u_pud++, s_pgd++) { | |
345 | pmd_t *pmd = pmds[i]; | |
346 | ||
347 | memcpy(pmd, (pmd_t *)pgd_page_vaddr(*s_pgd), | |
348 | sizeof(pmd_t) * PTRS_PER_PMD); | |
349 | ||
350 | pud_populate(mm, u_pud, pmd); | |
351 | } | |
352 | ||
353 | } | |
354 | #else | |
355 | static void pgd_prepopulate_user_pmd(struct mm_struct *mm, | |
356 | pgd_t *k_pgd, pmd_t *pmds[]) | |
357 | { | |
358 | } | |
359 | #endif | |
1db491f7 FY |
360 | /* |
361 | * Xen paravirt assumes pgd table should be in one page. 64 bit kernel also | |
362 | * assumes that pgd should be in one page. | |
363 | * | |
364 | * But kernel with PAE paging that is not running as a Xen domain | |
365 | * only needs to allocate 32 bytes for pgd instead of one page. | |
366 | */ | |
367 | #ifdef CONFIG_X86_PAE | |
368 | ||
369 | #include <linux/slab.h> | |
370 | ||
371 | #define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t)) | |
372 | #define PGD_ALIGN 32 | |
373 | ||
374 | static struct kmem_cache *pgd_cache; | |
375 | ||
376 | static int __init pgd_cache_init(void) | |
377 | { | |
378 | /* | |
379 | * When PAE kernel is running as a Xen domain, it does not use | |
380 | * shared kernel pmd. And this requires a whole page for pgd. | |
381 | */ | |
382 | if (!SHARED_KERNEL_PMD) | |
383 | return 0; | |
384 | ||
385 | /* | |
386 | * when PAE kernel is not running as a Xen domain, it uses | |
387 | * shared kernel pmd. Shared kernel pmd does not require a whole | |
388 | * page for pgd. We are able to just allocate a 32-byte for pgd. | |
389 | * During boot time, we create a 32-byte slab for pgd table allocation. | |
390 | */ | |
391 | pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_ALIGN, | |
392 | SLAB_PANIC, NULL); | |
1db491f7 FY |
393 | return 0; |
394 | } | |
395 | core_initcall(pgd_cache_init); | |
396 | ||
397 | static inline pgd_t *_pgd_alloc(void) | |
398 | { | |
399 | /* | |
400 | * If no SHARED_KERNEL_PMD, PAE kernel is running as a Xen domain. | |
401 | * We allocate one page for pgd. | |
402 | */ | |
403 | if (!SHARED_KERNEL_PMD) | |
e3238faf JR |
404 | return (pgd_t *)__get_free_pages(PGALLOC_GFP, |
405 | PGD_ALLOCATION_ORDER); | |
1db491f7 FY |
406 | |
407 | /* | |
408 | * Now PAE kernel is not running as a Xen domain. We can allocate | |
409 | * a 32-byte slab for pgd to save memory space. | |
410 | */ | |
411 | return kmem_cache_alloc(pgd_cache, PGALLOC_GFP); | |
412 | } | |
413 | ||
414 | static inline void _pgd_free(pgd_t *pgd) | |
415 | { | |
416 | if (!SHARED_KERNEL_PMD) | |
e3238faf | 417 | free_pages((unsigned long)pgd, PGD_ALLOCATION_ORDER); |
1db491f7 FY |
418 | else |
419 | kmem_cache_free(pgd_cache, pgd); | |
420 | } | |
421 | #else | |
d9e9a641 | 422 | |
1db491f7 FY |
423 | static inline pgd_t *_pgd_alloc(void) |
424 | { | |
d9e9a641 | 425 | return (pgd_t *)__get_free_pages(PGALLOC_GFP, PGD_ALLOCATION_ORDER); |
1db491f7 FY |
426 | } |
427 | ||
428 | static inline void _pgd_free(pgd_t *pgd) | |
429 | { | |
d9e9a641 | 430 | free_pages((unsigned long)pgd, PGD_ALLOCATION_ORDER); |
1db491f7 FY |
431 | } |
432 | #endif /* CONFIG_X86_PAE */ | |
433 | ||
d8d5900e | 434 | pgd_t *pgd_alloc(struct mm_struct *mm) |
1ec1fe73 | 435 | { |
d8d5900e | 436 | pgd_t *pgd; |
184d47f0 KC |
437 | pmd_t *u_pmds[MAX_PREALLOCATED_USER_PMDS]; |
438 | pmd_t *pmds[MAX_PREALLOCATED_PMDS]; | |
1ec1fe73 | 439 | |
1db491f7 | 440 | pgd = _pgd_alloc(); |
d8d5900e JF |
441 | |
442 | if (pgd == NULL) | |
443 | goto out; | |
444 | ||
445 | mm->pgd = pgd; | |
446 | ||
f59dbe9c | 447 | if (preallocate_pmds(mm, pmds, PREALLOCATED_PMDS) != 0) |
d8d5900e JF |
448 | goto out_free_pgd; |
449 | ||
f59dbe9c | 450 | if (preallocate_pmds(mm, u_pmds, PREALLOCATED_USER_PMDS) != 0) |
d8d5900e | 451 | goto out_free_pmds; |
1ec1fe73 | 452 | |
f59dbe9c JR |
453 | if (paravirt_pgd_alloc(mm) != 0) |
454 | goto out_free_user_pmds; | |
455 | ||
1ec1fe73 | 456 | /* |
d8d5900e JF |
457 | * Make sure that pre-populating the pmds is atomic with |
458 | * respect to anything walking the pgd_list, so that they | |
459 | * never see a partially populated pgd. | |
1ec1fe73 | 460 | */ |
a79e53d8 | 461 | spin_lock(&pgd_lock); |
4f76cd38 | 462 | |
617d34d9 | 463 | pgd_ctor(mm, pgd); |
d8d5900e | 464 | pgd_prepopulate_pmd(mm, pgd, pmds); |
f59dbe9c | 465 | pgd_prepopulate_user_pmd(mm, pgd, u_pmds); |
4f76cd38 | 466 | |
a79e53d8 | 467 | spin_unlock(&pgd_lock); |
4f76cd38 JF |
468 | |
469 | return pgd; | |
d8d5900e | 470 | |
f59dbe9c JR |
471 | out_free_user_pmds: |
472 | free_pmds(mm, u_pmds, PREALLOCATED_USER_PMDS); | |
d8d5900e | 473 | out_free_pmds: |
f59dbe9c | 474 | free_pmds(mm, pmds, PREALLOCATED_PMDS); |
d8d5900e | 475 | out_free_pgd: |
1db491f7 | 476 | _pgd_free(pgd); |
d8d5900e JF |
477 | out: |
478 | return NULL; | |
4f76cd38 JF |
479 | } |
480 | ||
481 | void pgd_free(struct mm_struct *mm, pgd_t *pgd) | |
482 | { | |
483 | pgd_mop_up_pmds(mm, pgd); | |
484 | pgd_dtor(pgd); | |
eba0045f | 485 | paravirt_pgd_free(mm, pgd); |
1db491f7 | 486 | _pgd_free(pgd); |
4f76cd38 | 487 | } |
ee5aa8d3 | 488 | |
0f9a921c RR |
489 | /* |
490 | * Used to set accessed or dirty bits in the page table entries | |
491 | * on other architectures. On x86, the accessed and dirty bits | |
492 | * are tracked by hardware. However, do_wp_page calls this function | |
493 | * to also make the pte writeable at the same time the dirty bit is | |
494 | * set. In that case we do actually need to write the PTE. | |
495 | */ | |
ee5aa8d3 JF |
496 | int ptep_set_access_flags(struct vm_area_struct *vma, |
497 | unsigned long address, pte_t *ptep, | |
498 | pte_t entry, int dirty) | |
499 | { | |
500 | int changed = !pte_same(*ptep, entry); | |
501 | ||
87930019 | 502 | if (changed && dirty) |
9bc4f28a | 503 | set_pte(ptep, entry); |
ee5aa8d3 JF |
504 | |
505 | return changed; | |
506 | } | |
f9fbf1a3 | 507 | |
db3eb96f AA |
508 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
509 | int pmdp_set_access_flags(struct vm_area_struct *vma, | |
510 | unsigned long address, pmd_t *pmdp, | |
511 | pmd_t entry, int dirty) | |
512 | { | |
513 | int changed = !pmd_same(*pmdp, entry); | |
514 | ||
515 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); | |
516 | ||
517 | if (changed && dirty) { | |
9bc4f28a | 518 | set_pmd(pmdp, entry); |
5e4bf1a5 IM |
519 | /* |
520 | * We had a write-protection fault here and changed the pmd | |
521 | * to to more permissive. No need to flush the TLB for that, | |
522 | * #PF is architecturally guaranteed to do that and in the | |
523 | * worst-case we'll generate a spurious fault. | |
524 | */ | |
db3eb96f AA |
525 | } |
526 | ||
527 | return changed; | |
528 | } | |
a00cc7d9 MW |
529 | |
530 | int pudp_set_access_flags(struct vm_area_struct *vma, unsigned long address, | |
531 | pud_t *pudp, pud_t entry, int dirty) | |
532 | { | |
533 | int changed = !pud_same(*pudp, entry); | |
534 | ||
535 | VM_BUG_ON(address & ~HPAGE_PUD_MASK); | |
536 | ||
537 | if (changed && dirty) { | |
9bc4f28a | 538 | set_pud(pudp, entry); |
a00cc7d9 MW |
539 | /* |
540 | * We had a write-protection fault here and changed the pud | |
541 | * to to more permissive. No need to flush the TLB for that, | |
542 | * #PF is architecturally guaranteed to do that and in the | |
543 | * worst-case we'll generate a spurious fault. | |
544 | */ | |
545 | } | |
546 | ||
547 | return changed; | |
548 | } | |
db3eb96f AA |
549 | #endif |
550 | ||
f9fbf1a3 JF |
551 | int ptep_test_and_clear_young(struct vm_area_struct *vma, |
552 | unsigned long addr, pte_t *ptep) | |
553 | { | |
554 | int ret = 0; | |
555 | ||
556 | if (pte_young(*ptep)) | |
557 | ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, | |
48e23957 | 558 | (unsigned long *) &ptep->pte); |
f9fbf1a3 | 559 | |
f9fbf1a3 JF |
560 | return ret; |
561 | } | |
c20311e1 | 562 | |
db3eb96f AA |
563 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
564 | int pmdp_test_and_clear_young(struct vm_area_struct *vma, | |
565 | unsigned long addr, pmd_t *pmdp) | |
566 | { | |
567 | int ret = 0; | |
568 | ||
569 | if (pmd_young(*pmdp)) | |
570 | ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, | |
f2d6bfe9 | 571 | (unsigned long *)pmdp); |
db3eb96f | 572 | |
db3eb96f AA |
573 | return ret; |
574 | } | |
a00cc7d9 MW |
575 | int pudp_test_and_clear_young(struct vm_area_struct *vma, |
576 | unsigned long addr, pud_t *pudp) | |
577 | { | |
578 | int ret = 0; | |
579 | ||
580 | if (pud_young(*pudp)) | |
581 | ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, | |
582 | (unsigned long *)pudp); | |
583 | ||
584 | return ret; | |
585 | } | |
db3eb96f AA |
586 | #endif |
587 | ||
c20311e1 JF |
588 | int ptep_clear_flush_young(struct vm_area_struct *vma, |
589 | unsigned long address, pte_t *ptep) | |
590 | { | |
b13b1d2d SL |
591 | /* |
592 | * On x86 CPUs, clearing the accessed bit without a TLB flush | |
593 | * doesn't cause data corruption. [ It could cause incorrect | |
594 | * page aging and the (mistaken) reclaim of hot pages, but the | |
595 | * chance of that should be relatively low. ] | |
596 | * | |
597 | * So as a performance optimization don't flush the TLB when | |
598 | * clearing the accessed bit, it will eventually be flushed by | |
599 | * a context switch or a VM operation anyway. [ In the rare | |
600 | * event of it not getting flushed for a long time the delay | |
601 | * shouldn't really matter because there's no real memory | |
602 | * pressure for swapout to react to. ] | |
603 | */ | |
604 | return ptep_test_and_clear_young(vma, address, ptep); | |
c20311e1 | 605 | } |
7c7e6e07 | 606 | |
db3eb96f AA |
607 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
608 | int pmdp_clear_flush_young(struct vm_area_struct *vma, | |
609 | unsigned long address, pmd_t *pmdp) | |
610 | { | |
611 | int young; | |
612 | ||
613 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); | |
614 | ||
615 | young = pmdp_test_and_clear_young(vma, address, pmdp); | |
616 | if (young) | |
617 | flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); | |
618 | ||
619 | return young; | |
620 | } | |
db3eb96f AA |
621 | #endif |
622 | ||
fd862dde GP |
623 | /** |
624 | * reserve_top_address - reserves a hole in the top of kernel address space | |
625 | * @reserve - size of hole to reserve | |
626 | * | |
627 | * Can be used to relocate the fixmap area and poke a hole in the top | |
628 | * of kernel address space to make room for a hypervisor. | |
629 | */ | |
630 | void __init reserve_top_address(unsigned long reserve) | |
631 | { | |
632 | #ifdef CONFIG_X86_32 | |
633 | BUG_ON(fixmaps_set > 0); | |
73159fdc AL |
634 | __FIXADDR_TOP = round_down(-reserve, 1 << PMD_SHIFT) - PAGE_SIZE; |
635 | printk(KERN_INFO "Reserving virtual address space above 0x%08lx (rounded to 0x%08lx)\n", | |
636 | -reserve, __FIXADDR_TOP + PAGE_SIZE); | |
fd862dde GP |
637 | #endif |
638 | } | |
639 | ||
7c7e6e07 JF |
640 | int fixmaps_set; |
641 | ||
aeaaa59c | 642 | void __native_set_fixmap(enum fixed_addresses idx, pte_t pte) |
7c7e6e07 JF |
643 | { |
644 | unsigned long address = __fix_to_virt(idx); | |
645 | ||
05ab1d8a FT |
646 | #ifdef CONFIG_X86_64 |
647 | /* | |
648 | * Ensure that the static initial page tables are covering the | |
649 | * fixmap completely. | |
650 | */ | |
651 | BUILD_BUG_ON(__end_of_permanent_fixed_addresses > | |
652 | (FIXMAP_PMD_NUM * PTRS_PER_PTE)); | |
653 | #endif | |
654 | ||
7c7e6e07 JF |
655 | if (idx >= __end_of_fixed_addresses) { |
656 | BUG(); | |
657 | return; | |
658 | } | |
aeaaa59c | 659 | set_pte_vaddr(address, pte); |
7c7e6e07 JF |
660 | fixmaps_set++; |
661 | } | |
aeaaa59c | 662 | |
3b3809ac MH |
663 | void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys, |
664 | pgprot_t flags) | |
aeaaa59c | 665 | { |
fb43d6cb DH |
666 | /* Sanitize 'prot' against any unsupported bits: */ |
667 | pgprot_val(flags) &= __default_kernel_pte_mask; | |
668 | ||
aeaaa59c JF |
669 | __native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags)); |
670 | } | |
6b637835 TK |
671 | |
672 | #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP | |
b8504058 KS |
673 | #ifdef CONFIG_X86_5LEVEL |
674 | /** | |
675 | * p4d_set_huge - setup kernel P4D mapping | |
676 | * | |
677 | * No 512GB pages yet -- always return 0 | |
678 | */ | |
679 | int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot) | |
680 | { | |
681 | return 0; | |
682 | } | |
683 | ||
684 | /** | |
685 | * p4d_clear_huge - clear kernel P4D mapping when it is set | |
686 | * | |
687 | * No 512GB pages yet -- always return 0 | |
688 | */ | |
689 | int p4d_clear_huge(p4d_t *p4d) | |
690 | { | |
691 | return 0; | |
692 | } | |
693 | #endif | |
694 | ||
3d3ca416 TK |
695 | /** |
696 | * pud_set_huge - setup kernel PUD mapping | |
697 | * | |
b73522e0 TK |
698 | * MTRRs can override PAT memory types with 4KiB granularity. Therefore, this |
699 | * function sets up a huge page only if any of the following conditions are met: | |
700 | * | |
701 | * - MTRRs are disabled, or | |
702 | * | |
703 | * - MTRRs are enabled and the range is completely covered by a single MTRR, or | |
704 | * | |
705 | * - MTRRs are enabled and the corresponding MTRR memory type is WB, which | |
706 | * has no effect on the requested PAT memory type. | |
707 | * | |
708 | * Callers should try to decrease page size (1GB -> 2MB -> 4K) if the bigger | |
709 | * page mapping attempt fails. | |
3d3ca416 TK |
710 | * |
711 | * Returns 1 on success and 0 on failure. | |
712 | */ | |
6b637835 TK |
713 | int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot) |
714 | { | |
b73522e0 | 715 | u8 mtrr, uniform; |
6b637835 | 716 | |
b73522e0 TK |
717 | mtrr = mtrr_type_lookup(addr, addr + PUD_SIZE, &uniform); |
718 | if ((mtrr != MTRR_TYPE_INVALID) && (!uniform) && | |
719 | (mtrr != MTRR_TYPE_WRBACK)) | |
6b637835 TK |
720 | return 0; |
721 | ||
e3e28812 JR |
722 | /* Bail out if we are we on a populated non-leaf entry: */ |
723 | if (pud_present(*pud) && !pud_huge(*pud)) | |
724 | return 0; | |
725 | ||
6b637835 TK |
726 | prot = pgprot_4k_2_large(prot); |
727 | ||
728 | set_pte((pte_t *)pud, pfn_pte( | |
729 | (u64)addr >> PAGE_SHIFT, | |
730 | __pgprot(pgprot_val(prot) | _PAGE_PSE))); | |
731 | ||
732 | return 1; | |
733 | } | |
734 | ||
3d3ca416 TK |
735 | /** |
736 | * pmd_set_huge - setup kernel PMD mapping | |
737 | * | |
b73522e0 | 738 | * See text over pud_set_huge() above. |
3d3ca416 TK |
739 | * |
740 | * Returns 1 on success and 0 on failure. | |
741 | */ | |
6b637835 TK |
742 | int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot) |
743 | { | |
b73522e0 | 744 | u8 mtrr, uniform; |
6b637835 | 745 | |
b73522e0 TK |
746 | mtrr = mtrr_type_lookup(addr, addr + PMD_SIZE, &uniform); |
747 | if ((mtrr != MTRR_TYPE_INVALID) && (!uniform) && | |
748 | (mtrr != MTRR_TYPE_WRBACK)) { | |
749 | pr_warn_once("%s: Cannot satisfy [mem %#010llx-%#010llx] with a huge-page mapping due to MTRR override.\n", | |
750 | __func__, addr, addr + PMD_SIZE); | |
6b637835 | 751 | return 0; |
b73522e0 | 752 | } |
6b637835 | 753 | |
e3e28812 JR |
754 | /* Bail out if we are we on a populated non-leaf entry: */ |
755 | if (pmd_present(*pmd) && !pmd_huge(*pmd)) | |
756 | return 0; | |
757 | ||
6b637835 TK |
758 | prot = pgprot_4k_2_large(prot); |
759 | ||
760 | set_pte((pte_t *)pmd, pfn_pte( | |
761 | (u64)addr >> PAGE_SHIFT, | |
762 | __pgprot(pgprot_val(prot) | _PAGE_PSE))); | |
763 | ||
764 | return 1; | |
765 | } | |
766 | ||
3d3ca416 TK |
767 | /** |
768 | * pud_clear_huge - clear kernel PUD mapping when it is set | |
769 | * | |
770 | * Returns 1 on success and 0 on failure (no PUD map is found). | |
771 | */ | |
6b637835 TK |
772 | int pud_clear_huge(pud_t *pud) |
773 | { | |
774 | if (pud_large(*pud)) { | |
775 | pud_clear(pud); | |
776 | return 1; | |
777 | } | |
778 | ||
779 | return 0; | |
780 | } | |
781 | ||
3d3ca416 TK |
782 | /** |
783 | * pmd_clear_huge - clear kernel PMD mapping when it is set | |
784 | * | |
785 | * Returns 1 on success and 0 on failure (no PMD map is found). | |
786 | */ | |
6b637835 TK |
787 | int pmd_clear_huge(pmd_t *pmd) |
788 | { | |
789 | if (pmd_large(*pmd)) { | |
790 | pmd_clear(pmd); | |
791 | return 1; | |
792 | } | |
793 | ||
794 | return 0; | |
795 | } | |
b6bdb751 | 796 | |
8e2d4340 WD |
797 | /* |
798 | * Until we support 512GB pages, skip them in the vmap area. | |
799 | */ | |
800 | int p4d_free_pud_page(p4d_t *p4d, unsigned long addr) | |
801 | { | |
802 | return 0; | |
803 | } | |
804 | ||
f967db0b | 805 | #ifdef CONFIG_X86_64 |
b6bdb751 TK |
806 | /** |
807 | * pud_free_pmd_page - Clear pud entry and free pmd page. | |
808 | * @pud: Pointer to a PUD. | |
785a19f9 | 809 | * @addr: Virtual address associated with pud. |
b6bdb751 | 810 | * |
5e0fb5df | 811 | * Context: The pud range has been unmapped and TLB purged. |
b6bdb751 | 812 | * Return: 1 if clearing the entry succeeded. 0 otherwise. |
5e0fb5df TK |
813 | * |
814 | * NOTE: Callers must allow a single page allocation. | |
b6bdb751 | 815 | */ |
785a19f9 | 816 | int pud_free_pmd_page(pud_t *pud, unsigned long addr) |
b6bdb751 | 817 | { |
5e0fb5df TK |
818 | pmd_t *pmd, *pmd_sv; |
819 | pte_t *pte; | |
28ee90fe TK |
820 | int i; |
821 | ||
28ee90fe | 822 | pmd = (pmd_t *)pud_page_vaddr(*pud); |
5e0fb5df TK |
823 | pmd_sv = (pmd_t *)__get_free_page(GFP_KERNEL); |
824 | if (!pmd_sv) | |
825 | return 0; | |
28ee90fe | 826 | |
5e0fb5df TK |
827 | for (i = 0; i < PTRS_PER_PMD; i++) { |
828 | pmd_sv[i] = pmd[i]; | |
829 | if (!pmd_none(pmd[i])) | |
830 | pmd_clear(&pmd[i]); | |
831 | } | |
28ee90fe TK |
832 | |
833 | pud_clear(pud); | |
5e0fb5df TK |
834 | |
835 | /* INVLPG to clear all paging-structure caches */ | |
836 | flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1); | |
837 | ||
838 | for (i = 0; i < PTRS_PER_PMD; i++) { | |
839 | if (!pmd_none(pmd_sv[i])) { | |
840 | pte = (pte_t *)pmd_page_vaddr(pmd_sv[i]); | |
841 | free_page((unsigned long)pte); | |
842 | } | |
843 | } | |
844 | ||
845 | free_page((unsigned long)pmd_sv); | |
28ee90fe TK |
846 | free_page((unsigned long)pmd); |
847 | ||
848 | return 1; | |
b6bdb751 TK |
849 | } |
850 | ||
851 | /** | |
852 | * pmd_free_pte_page - Clear pmd entry and free pte page. | |
853 | * @pmd: Pointer to a PMD. | |
785a19f9 | 854 | * @addr: Virtual address associated with pmd. |
b6bdb751 | 855 | * |
5e0fb5df | 856 | * Context: The pmd range has been unmapped and TLB purged. |
b6bdb751 TK |
857 | * Return: 1 if clearing the entry succeeded. 0 otherwise. |
858 | */ | |
785a19f9 | 859 | int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) |
b6bdb751 | 860 | { |
28ee90fe TK |
861 | pte_t *pte; |
862 | ||
28ee90fe TK |
863 | pte = (pte_t *)pmd_page_vaddr(*pmd); |
864 | pmd_clear(pmd); | |
5e0fb5df TK |
865 | |
866 | /* INVLPG to clear all paging-structure caches */ | |
867 | flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1); | |
868 | ||
28ee90fe TK |
869 | free_page((unsigned long)pte); |
870 | ||
871 | return 1; | |
b6bdb751 | 872 | } |
f967db0b TK |
873 | |
874 | #else /* !CONFIG_X86_64 */ | |
875 | ||
785a19f9 | 876 | int pud_free_pmd_page(pud_t *pud, unsigned long addr) |
f967db0b TK |
877 | { |
878 | return pud_none(*pud); | |
879 | } | |
880 | ||
881 | /* | |
882 | * Disable free page handling on x86-PAE. This assures that ioremap() | |
883 | * does not update sync'd pmd entries. See vmalloc_sync_one(). | |
884 | */ | |
785a19f9 | 885 | int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) |
f967db0b TK |
886 | { |
887 | return pmd_none(*pmd); | |
888 | } | |
889 | ||
890 | #endif /* CONFIG_X86_64 */ | |
6b637835 | 891 | #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ |