1 // SPDX-License-Identifier: GPL-2.0
3 * Page table allocation functions
5 * Copyright IBM Corp. 2016
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
9 #include <linux/sysctl.h>
10 #include <linux/slab.h>
12 #include <asm/mmu_context.h>
13 #include <asm/pgalloc.h>
16 #include <asm/tlbflush.h>
20 int page_table_allocate_pgste
= 0;
21 EXPORT_SYMBOL(page_table_allocate_pgste
);
23 static struct ctl_table page_table_sysctl
[] = {
25 .procname
= "allocate_pgste",
26 .data
= &page_table_allocate_pgste
,
27 .maxlen
= sizeof(int),
28 .mode
= S_IRUGO
| S_IWUSR
,
29 .proc_handler
= proc_dointvec_minmax
,
30 .extra1
= SYSCTL_ZERO
,
36 static struct ctl_table page_table_sysctl_dir
[] = {
41 .child
= page_table_sysctl
,
46 static int __init
page_table_register_sysctl(void)
48 return register_sysctl_table(page_table_sysctl_dir
) ? 0 : -ENOMEM
;
50 __initcall(page_table_register_sysctl
);
52 #endif /* CONFIG_PGSTE */
54 unsigned long *crst_table_alloc(struct mm_struct
*mm
)
56 struct page
*page
= alloc_pages(GFP_KERNEL
, 2);
60 arch_set_page_dat(page
, 2);
61 return (unsigned long *) page_to_phys(page
);
64 void crst_table_free(struct mm_struct
*mm
, unsigned long *table
)
66 free_pages((unsigned long) table
, 2);
69 static void __crst_table_upgrade(void *arg
)
71 struct mm_struct
*mm
= arg
;
73 if (current
->active_mm
== mm
)
78 int crst_table_upgrade(struct mm_struct
*mm
, unsigned long end
)
80 unsigned long *pgd
= NULL
, *p4d
= NULL
, *__pgd
;
81 unsigned long asce_limit
= mm
->context
.asce_limit
;
83 /* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */
84 VM_BUG_ON(asce_limit
< _REGION2_SIZE
);
86 if (end
<= asce_limit
)
89 if (asce_limit
== _REGION2_SIZE
) {
90 p4d
= crst_table_alloc(mm
);
93 crst_table_init(p4d
, _REGION2_ENTRY_EMPTY
);
95 if (end
> _REGION1_SIZE
) {
96 pgd
= crst_table_alloc(mm
);
99 crst_table_init(pgd
, _REGION1_ENTRY_EMPTY
);
102 spin_lock_bh(&mm
->page_table_lock
);
105 * This routine gets called with mmap_sem lock held and there is
106 * no reason to optimize for the case of otherwise. However, if
107 * that would ever change, the below check will let us know.
109 VM_BUG_ON(asce_limit
!= mm
->context
.asce_limit
);
112 __pgd
= (unsigned long *) mm
->pgd
;
113 p4d_populate(mm
, (p4d_t
*) p4d
, (pud_t
*) __pgd
);
114 mm
->pgd
= (pgd_t
*) p4d
;
115 mm
->context
.asce_limit
= _REGION1_SIZE
;
116 mm
->context
.asce
= __pa(mm
->pgd
) | _ASCE_TABLE_LENGTH
|
117 _ASCE_USER_BITS
| _ASCE_TYPE_REGION2
;
121 __pgd
= (unsigned long *) mm
->pgd
;
122 pgd_populate(mm
, (pgd_t
*) pgd
, (p4d_t
*) __pgd
);
123 mm
->pgd
= (pgd_t
*) pgd
;
124 mm
->context
.asce_limit
= TASK_SIZE_MAX
;
125 mm
->context
.asce
= __pa(mm
->pgd
) | _ASCE_TABLE_LENGTH
|
126 _ASCE_USER_BITS
| _ASCE_TYPE_REGION1
;
129 spin_unlock_bh(&mm
->page_table_lock
);
131 on_each_cpu(__crst_table_upgrade
, mm
, 0);
136 crst_table_free(mm
, p4d
);
141 static inline unsigned int atomic_xor_bits(atomic_t
*v
, unsigned int bits
)
143 unsigned int old
, new;
146 old
= atomic_read(v
);
148 } while (atomic_cmpxchg(v
, old
, new) != old
);
154 struct page
*page_table_alloc_pgste(struct mm_struct
*mm
)
159 page
= alloc_page(GFP_KERNEL
);
161 table
= (u64
*)page_to_phys(page
);
162 memset64(table
, _PAGE_INVALID
, PTRS_PER_PTE
);
163 memset64(table
+ PTRS_PER_PTE
, 0, PTRS_PER_PTE
);
168 void page_table_free_pgste(struct page
*page
)
173 #endif /* CONFIG_PGSTE */
176 * page table entry allocation/free routines.
178 unsigned long *page_table_alloc(struct mm_struct
*mm
)
180 unsigned long *table
;
182 unsigned int mask
, bit
;
184 /* Try to get a fragment of a 4K page as a 2K page table */
185 if (!mm_alloc_pgste(mm
)) {
187 spin_lock_bh(&mm
->context
.lock
);
188 if (!list_empty(&mm
->context
.pgtable_list
)) {
189 page
= list_first_entry(&mm
->context
.pgtable_list
,
191 mask
= atomic_read(&page
->_refcount
) >> 24;
192 mask
= (mask
| (mask
>> 4)) & 3;
194 table
= (unsigned long *) page_to_phys(page
);
195 bit
= mask
& 1; /* =1 -> second 2K */
197 table
+= PTRS_PER_PTE
;
198 atomic_xor_bits(&page
->_refcount
,
200 list_del(&page
->lru
);
203 spin_unlock_bh(&mm
->context
.lock
);
207 /* Allocate a fresh page */
208 page
= alloc_page(GFP_KERNEL
);
211 if (!pgtable_pte_page_ctor(page
)) {
215 arch_set_page_dat(page
, 0);
216 /* Initialize page table */
217 table
= (unsigned long *) page_to_phys(page
);
218 if (mm_alloc_pgste(mm
)) {
219 /* Return 4K page table with PGSTEs */
220 atomic_xor_bits(&page
->_refcount
, 3 << 24);
221 memset64((u64
*)table
, _PAGE_INVALID
, PTRS_PER_PTE
);
222 memset64((u64
*)table
+ PTRS_PER_PTE
, 0, PTRS_PER_PTE
);
224 /* Return the first 2K fragment of the page */
225 atomic_xor_bits(&page
->_refcount
, 1 << 24);
226 memset64((u64
*)table
, _PAGE_INVALID
, 2 * PTRS_PER_PTE
);
227 spin_lock_bh(&mm
->context
.lock
);
228 list_add(&page
->lru
, &mm
->context
.pgtable_list
);
229 spin_unlock_bh(&mm
->context
.lock
);
234 void page_table_free(struct mm_struct
*mm
, unsigned long *table
)
237 unsigned int bit
, mask
;
239 page
= pfn_to_page(__pa(table
) >> PAGE_SHIFT
);
240 if (!mm_alloc_pgste(mm
)) {
241 /* Free 2K page table fragment of a 4K page */
242 bit
= (__pa(table
) & ~PAGE_MASK
)/(PTRS_PER_PTE
*sizeof(pte_t
));
243 spin_lock_bh(&mm
->context
.lock
);
244 mask
= atomic_xor_bits(&page
->_refcount
, 1U << (bit
+ 24));
247 list_add(&page
->lru
, &mm
->context
.pgtable_list
);
249 list_del(&page
->lru
);
250 spin_unlock_bh(&mm
->context
.lock
);
254 atomic_xor_bits(&page
->_refcount
, 3U << 24);
257 pgtable_pte_page_dtor(page
);
261 void page_table_free_rcu(struct mmu_gather
*tlb
, unsigned long *table
,
262 unsigned long vmaddr
)
264 struct mm_struct
*mm
;
266 unsigned int bit
, mask
;
269 page
= pfn_to_page(__pa(table
) >> PAGE_SHIFT
);
270 if (mm_alloc_pgste(mm
)) {
271 gmap_unlink(mm
, table
, vmaddr
);
272 table
= (unsigned long *) (__pa(table
) | 3);
273 tlb_remove_table(tlb
, table
);
276 bit
= (__pa(table
) & ~PAGE_MASK
) / (PTRS_PER_PTE
*sizeof(pte_t
));
277 spin_lock_bh(&mm
->context
.lock
);
278 mask
= atomic_xor_bits(&page
->_refcount
, 0x11U
<< (bit
+ 24));
281 list_add_tail(&page
->lru
, &mm
->context
.pgtable_list
);
283 list_del(&page
->lru
);
284 spin_unlock_bh(&mm
->context
.lock
);
285 table
= (unsigned long *) (__pa(table
) | (1U << bit
));
286 tlb_remove_table(tlb
, table
);
289 void __tlb_remove_table(void *_table
)
291 unsigned int mask
= (unsigned long) _table
& 3;
292 void *table
= (void *)((unsigned long) _table
^ mask
);
293 struct page
*page
= pfn_to_page(__pa(table
) >> PAGE_SHIFT
);
296 case 0: /* pmd, pud, or p4d */
297 free_pages((unsigned long) table
, 2);
299 case 1: /* lower 2K of a 4K page table */
300 case 2: /* higher 2K of a 4K page table */
301 mask
= atomic_xor_bits(&page
->_refcount
, mask
<< (4 + 24));
306 case 3: /* 4K page table with pgstes */
308 atomic_xor_bits(&page
->_refcount
, 3 << 24);
309 pgtable_pte_page_dtor(page
);
316 * Base infrastructure required to generate basic asces, region, segment,
317 * and page tables that do not make use of enhanced features like EDAT1.
320 static struct kmem_cache
*base_pgt_cache
;
322 static unsigned long base_pgt_alloc(void)
326 table
= kmem_cache_alloc(base_pgt_cache
, GFP_KERNEL
);
328 memset64(table
, _PAGE_INVALID
, PTRS_PER_PTE
);
329 return (unsigned long) table
;
332 static void base_pgt_free(unsigned long table
)
334 kmem_cache_free(base_pgt_cache
, (void *) table
);
337 static unsigned long base_crst_alloc(unsigned long val
)
341 table
= __get_free_pages(GFP_KERNEL
, CRST_ALLOC_ORDER
);
343 crst_table_init((unsigned long *)table
, val
);
347 static void base_crst_free(unsigned long table
)
349 free_pages(table
, CRST_ALLOC_ORDER
);
352 #define BASE_ADDR_END_FUNC(NAME, SIZE) \
353 static inline unsigned long base_##NAME##_addr_end(unsigned long addr, \
356 unsigned long next = (addr + (SIZE)) & ~((SIZE) - 1); \
358 return (next - 1) < (end - 1) ? next : end; \
361 BASE_ADDR_END_FUNC(page
, _PAGE_SIZE
)
362 BASE_ADDR_END_FUNC(segment
, _SEGMENT_SIZE
)
363 BASE_ADDR_END_FUNC(region3
, _REGION3_SIZE
)
364 BASE_ADDR_END_FUNC(region2
, _REGION2_SIZE
)
365 BASE_ADDR_END_FUNC(region1
, _REGION1_SIZE
)
367 static inline unsigned long base_lra(unsigned long address
)
373 : "=d" (real
) : "a" (address
) : "cc");
377 static int base_page_walk(unsigned long origin
, unsigned long addr
,
378 unsigned long end
, int alloc
)
380 unsigned long *pte
, next
;
384 pte
= (unsigned long *) origin
;
385 pte
+= (addr
& _PAGE_INDEX
) >> _PAGE_SHIFT
;
387 next
= base_page_addr_end(addr
, end
);
388 *pte
= base_lra(addr
);
389 } while (pte
++, addr
= next
, addr
< end
);
393 static int base_segment_walk(unsigned long origin
, unsigned long addr
,
394 unsigned long end
, int alloc
)
396 unsigned long *ste
, next
, table
;
399 ste
= (unsigned long *) origin
;
400 ste
+= (addr
& _SEGMENT_INDEX
) >> _SEGMENT_SHIFT
;
402 next
= base_segment_addr_end(addr
, end
);
403 if (*ste
& _SEGMENT_ENTRY_INVALID
) {
406 table
= base_pgt_alloc();
409 *ste
= table
| _SEGMENT_ENTRY
;
411 table
= *ste
& _SEGMENT_ENTRY_ORIGIN
;
412 rc
= base_page_walk(table
, addr
, next
, alloc
);
416 base_pgt_free(table
);
418 } while (ste
++, addr
= next
, addr
< end
);
422 static int base_region3_walk(unsigned long origin
, unsigned long addr
,
423 unsigned long end
, int alloc
)
425 unsigned long *rtte
, next
, table
;
428 rtte
= (unsigned long *) origin
;
429 rtte
+= (addr
& _REGION3_INDEX
) >> _REGION3_SHIFT
;
431 next
= base_region3_addr_end(addr
, end
);
432 if (*rtte
& _REGION_ENTRY_INVALID
) {
435 table
= base_crst_alloc(_SEGMENT_ENTRY_EMPTY
);
438 *rtte
= table
| _REGION3_ENTRY
;
440 table
= *rtte
& _REGION_ENTRY_ORIGIN
;
441 rc
= base_segment_walk(table
, addr
, next
, alloc
);
445 base_crst_free(table
);
446 } while (rtte
++, addr
= next
, addr
< end
);
450 static int base_region2_walk(unsigned long origin
, unsigned long addr
,
451 unsigned long end
, int alloc
)
453 unsigned long *rste
, next
, table
;
456 rste
= (unsigned long *) origin
;
457 rste
+= (addr
& _REGION2_INDEX
) >> _REGION2_SHIFT
;
459 next
= base_region2_addr_end(addr
, end
);
460 if (*rste
& _REGION_ENTRY_INVALID
) {
463 table
= base_crst_alloc(_REGION3_ENTRY_EMPTY
);
466 *rste
= table
| _REGION2_ENTRY
;
468 table
= *rste
& _REGION_ENTRY_ORIGIN
;
469 rc
= base_region3_walk(table
, addr
, next
, alloc
);
473 base_crst_free(table
);
474 } while (rste
++, addr
= next
, addr
< end
);
478 static int base_region1_walk(unsigned long origin
, unsigned long addr
,
479 unsigned long end
, int alloc
)
481 unsigned long *rfte
, next
, table
;
484 rfte
= (unsigned long *) origin
;
485 rfte
+= (addr
& _REGION1_INDEX
) >> _REGION1_SHIFT
;
487 next
= base_region1_addr_end(addr
, end
);
488 if (*rfte
& _REGION_ENTRY_INVALID
) {
491 table
= base_crst_alloc(_REGION2_ENTRY_EMPTY
);
494 *rfte
= table
| _REGION1_ENTRY
;
496 table
= *rfte
& _REGION_ENTRY_ORIGIN
;
497 rc
= base_region2_walk(table
, addr
, next
, alloc
);
501 base_crst_free(table
);
502 } while (rfte
++, addr
= next
, addr
< end
);
507 * base_asce_free - free asce and tables returned from base_asce_alloc()
508 * @asce: asce to be freed
510 * Frees all region, segment, and page tables that were allocated with a
511 * corresponding base_asce_alloc() call.
513 void base_asce_free(unsigned long asce
)
515 unsigned long table
= asce
& _ASCE_ORIGIN
;
519 switch (asce
& _ASCE_TYPE_MASK
) {
520 case _ASCE_TYPE_SEGMENT
:
521 base_segment_walk(table
, 0, _REGION3_SIZE
, 0);
523 case _ASCE_TYPE_REGION3
:
524 base_region3_walk(table
, 0, _REGION2_SIZE
, 0);
526 case _ASCE_TYPE_REGION2
:
527 base_region2_walk(table
, 0, _REGION1_SIZE
, 0);
529 case _ASCE_TYPE_REGION1
:
530 base_region1_walk(table
, 0, TASK_SIZE_MAX
, 0);
533 base_crst_free(table
);
536 static int base_pgt_cache_init(void)
538 static DEFINE_MUTEX(base_pgt_cache_mutex
);
539 unsigned long sz
= _PAGE_TABLE_SIZE
;
543 mutex_lock(&base_pgt_cache_mutex
);
545 base_pgt_cache
= kmem_cache_create("base_pgt", sz
, sz
, 0, NULL
);
546 mutex_unlock(&base_pgt_cache_mutex
);
547 return base_pgt_cache
? 0 : -ENOMEM
;
551 * base_asce_alloc - create kernel mapping without enhanced DAT features
552 * @addr: virtual start address of kernel mapping
553 * @num_pages: number of consecutive pages
555 * Generate an asce, including all required region, segment and page tables,
556 * that can be used to access the virtual kernel mapping. The difference is
557 * that the returned asce does not make use of any enhanced DAT features like
558 * e.g. large pages. This is required for some I/O functions that pass an
559 * asce, like e.g. some service call requests.
561 * Note: the returned asce may NEVER be attached to any cpu. It may only be
562 * used for I/O requests. tlb entries that might result because the
563 * asce was attached to a cpu won't be cleared.
565 unsigned long base_asce_alloc(unsigned long addr
, unsigned long num_pages
)
567 unsigned long asce
, table
, end
;
570 if (base_pgt_cache_init())
572 end
= addr
+ num_pages
* PAGE_SIZE
;
573 if (end
<= _REGION3_SIZE
) {
574 table
= base_crst_alloc(_SEGMENT_ENTRY_EMPTY
);
577 rc
= base_segment_walk(table
, addr
, end
, 1);
578 asce
= table
| _ASCE_TYPE_SEGMENT
| _ASCE_TABLE_LENGTH
;
579 } else if (end
<= _REGION2_SIZE
) {
580 table
= base_crst_alloc(_REGION3_ENTRY_EMPTY
);
583 rc
= base_region3_walk(table
, addr
, end
, 1);
584 asce
= table
| _ASCE_TYPE_REGION3
| _ASCE_TABLE_LENGTH
;
585 } else if (end
<= _REGION1_SIZE
) {
586 table
= base_crst_alloc(_REGION2_ENTRY_EMPTY
);
589 rc
= base_region2_walk(table
, addr
, end
, 1);
590 asce
= table
| _ASCE_TYPE_REGION2
| _ASCE_TABLE_LENGTH
;
592 table
= base_crst_alloc(_REGION1_ENTRY_EMPTY
);
595 rc
= base_region1_walk(table
, addr
, end
, 1);
596 asce
= table
| _ASCE_TYPE_REGION1
| _ASCE_TABLE_LENGTH
;
599 base_asce_free(asce
);