]> git.ipfire.org Git - thirdparty/linux.git/blob - arch/s390/mm/pgalloc.c
84bd6329a88dd3ace39e612197dccec0a48dc4fc
[thirdparty/linux.git] / arch / s390 / mm / pgalloc.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Page table allocation functions
4 *
5 * Copyright IBM Corp. 2016
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7 */
8
9 #include <linux/sysctl.h>
10 #include <linux/slab.h>
11 #include <linux/mm.h>
12 #include <asm/mmu_context.h>
13 #include <asm/pgalloc.h>
14 #include <asm/gmap.h>
15 #include <asm/tlb.h>
16 #include <asm/tlbflush.h>
17
18 #ifdef CONFIG_PGSTE
19
20 static int page_table_allocate_pgste_min = 0;
21 static int page_table_allocate_pgste_max = 1;
22 int page_table_allocate_pgste = 0;
23 EXPORT_SYMBOL(page_table_allocate_pgste);
24
25 static struct ctl_table page_table_sysctl[] = {
26 {
27 .procname = "allocate_pgste",
28 .data = &page_table_allocate_pgste,
29 .maxlen = sizeof(int),
30 .mode = S_IRUGO | S_IWUSR,
31 .proc_handler = proc_dointvec,
32 .extra1 = &page_table_allocate_pgste_min,
33 .extra2 = &page_table_allocate_pgste_max,
34 },
35 { }
36 };
37
38 static struct ctl_table page_table_sysctl_dir[] = {
39 {
40 .procname = "vm",
41 .maxlen = 0,
42 .mode = 0555,
43 .child = page_table_sysctl,
44 },
45 { }
46 };
47
48 static int __init page_table_register_sysctl(void)
49 {
50 return register_sysctl_table(page_table_sysctl_dir) ? 0 : -ENOMEM;
51 }
52 __initcall(page_table_register_sysctl);
53
54 #endif /* CONFIG_PGSTE */
55
56 unsigned long *crst_table_alloc(struct mm_struct *mm)
57 {
58 struct page *page = alloc_pages(GFP_KERNEL, 2);
59
60 if (!page)
61 return NULL;
62 arch_set_page_dat(page, 2);
63 return (unsigned long *) page_to_phys(page);
64 }
65
66 void crst_table_free(struct mm_struct *mm, unsigned long *table)
67 {
68 free_pages((unsigned long) table, 2);
69 }
70
71 static void __crst_table_upgrade(void *arg)
72 {
73 struct mm_struct *mm = arg;
74
75 if (current->active_mm == mm)
76 set_user_asce(mm);
77 __tlb_flush_local();
78 }
79
80 int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
81 {
82 unsigned long *table, *pgd;
83 int rc, notify;
84
85 /* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */
86 VM_BUG_ON(mm->context.asce_limit < _REGION2_SIZE);
87 rc = 0;
88 notify = 0;
89 while (mm->context.asce_limit < end) {
90 table = crst_table_alloc(mm);
91 if (!table) {
92 rc = -ENOMEM;
93 break;
94 }
95 spin_lock_bh(&mm->page_table_lock);
96 pgd = (unsigned long *) mm->pgd;
97 if (mm->context.asce_limit == _REGION2_SIZE) {
98 crst_table_init(table, _REGION2_ENTRY_EMPTY);
99 p4d_populate(mm, (p4d_t *) table, (pud_t *) pgd);
100 mm->pgd = (pgd_t *) table;
101 mm->context.asce_limit = _REGION1_SIZE;
102 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
103 _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
104 } else {
105 crst_table_init(table, _REGION1_ENTRY_EMPTY);
106 pgd_populate(mm, (pgd_t *) table, (p4d_t *) pgd);
107 mm->pgd = (pgd_t *) table;
108 mm->context.asce_limit = -PAGE_SIZE;
109 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
110 _ASCE_USER_BITS | _ASCE_TYPE_REGION1;
111 }
112 notify = 1;
113 spin_unlock_bh(&mm->page_table_lock);
114 }
115 if (notify)
116 on_each_cpu(__crst_table_upgrade, mm, 0);
117 return rc;
118 }
119
120 void crst_table_downgrade(struct mm_struct *mm)
121 {
122 pgd_t *pgd;
123
124 /* downgrade should only happen from 3 to 2 levels (compat only) */
125 VM_BUG_ON(mm->context.asce_limit != _REGION2_SIZE);
126
127 if (current->active_mm == mm) {
128 clear_user_asce();
129 __tlb_flush_mm(mm);
130 }
131
132 pgd = mm->pgd;
133 mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
134 mm->context.asce_limit = _REGION3_SIZE;
135 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
136 _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
137 crst_table_free(mm, (unsigned long *) pgd);
138
139 if (current->active_mm == mm)
140 set_user_asce(mm);
141 }
142
143 static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
144 {
145 unsigned int old, new;
146
147 do {
148 old = atomic_read(v);
149 new = old ^ bits;
150 } while (atomic_cmpxchg(v, old, new) != old);
151 return new;
152 }
153
154 #ifdef CONFIG_PGSTE
155
156 struct page *page_table_alloc_pgste(struct mm_struct *mm)
157 {
158 struct page *page;
159 u64 *table;
160
161 page = alloc_page(GFP_KERNEL);
162 if (page) {
163 table = (u64 *)page_to_phys(page);
164 memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
165 memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
166 }
167 return page;
168 }
169
170 void page_table_free_pgste(struct page *page)
171 {
172 __free_page(page);
173 }
174
175 #endif /* CONFIG_PGSTE */
176
177 /*
178 * page table entry allocation/free routines.
179 */
180 unsigned long *page_table_alloc(struct mm_struct *mm)
181 {
182 unsigned long *table;
183 struct page *page;
184 unsigned int mask, bit;
185
186 /* Try to get a fragment of a 4K page as a 2K page table */
187 if (!mm_alloc_pgste(mm)) {
188 table = NULL;
189 spin_lock_bh(&mm->context.lock);
190 if (!list_empty(&mm->context.pgtable_list)) {
191 page = list_first_entry(&mm->context.pgtable_list,
192 struct page, lru);
193 mask = atomic_read(&page->_refcount) >> 24;
194 mask = (mask | (mask >> 4)) & 3;
195 if (mask != 3) {
196 table = (unsigned long *) page_to_phys(page);
197 bit = mask & 1; /* =1 -> second 2K */
198 if (bit)
199 table += PTRS_PER_PTE;
200 atomic_xor_bits(&page->_refcount,
201 1U << (bit + 24));
202 list_del(&page->lru);
203 }
204 }
205 spin_unlock_bh(&mm->context.lock);
206 if (table)
207 return table;
208 }
209 /* Allocate a fresh page */
210 page = alloc_page(GFP_KERNEL);
211 if (!page)
212 return NULL;
213 if (!pgtable_page_ctor(page)) {
214 __free_page(page);
215 return NULL;
216 }
217 arch_set_page_dat(page, 0);
218 /* Initialize page table */
219 table = (unsigned long *) page_to_phys(page);
220 if (mm_alloc_pgste(mm)) {
221 /* Return 4K page table with PGSTEs */
222 atomic_xor_bits(&page->_refcount, 3 << 24);
223 memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
224 memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
225 } else {
226 /* Return the first 2K fragment of the page */
227 atomic_xor_bits(&page->_refcount, 1 << 24);
228 memset64((u64 *)table, _PAGE_INVALID, 2 * PTRS_PER_PTE);
229 spin_lock_bh(&mm->context.lock);
230 list_add(&page->lru, &mm->context.pgtable_list);
231 spin_unlock_bh(&mm->context.lock);
232 }
233 return table;
234 }
235
236 void page_table_free(struct mm_struct *mm, unsigned long *table)
237 {
238 struct page *page;
239 unsigned int bit, mask;
240
241 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
242 if (!mm_alloc_pgste(mm)) {
243 /* Free 2K page table fragment of a 4K page */
244 bit = (__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t));
245 spin_lock_bh(&mm->context.lock);
246 mask = atomic_xor_bits(&page->_refcount, 1U << (bit + 24));
247 mask >>= 24;
248 if (mask & 3)
249 list_add(&page->lru, &mm->context.pgtable_list);
250 else
251 list_del(&page->lru);
252 spin_unlock_bh(&mm->context.lock);
253 if (mask != 0)
254 return;
255 }
256
257 pgtable_page_dtor(page);
258 __free_page(page);
259 }
260
261 void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
262 unsigned long vmaddr)
263 {
264 struct mm_struct *mm;
265 struct page *page;
266 unsigned int bit, mask;
267
268 mm = tlb->mm;
269 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
270 if (mm_alloc_pgste(mm)) {
271 gmap_unlink(mm, table, vmaddr);
272 table = (unsigned long *) (__pa(table) | 3);
273 tlb_remove_table(tlb, table);
274 return;
275 }
276 bit = (__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t));
277 spin_lock_bh(&mm->context.lock);
278 mask = atomic_xor_bits(&page->_refcount, 0x11U << (bit + 24));
279 mask >>= 24;
280 if (mask & 3)
281 list_add_tail(&page->lru, &mm->context.pgtable_list);
282 else
283 list_del(&page->lru);
284 spin_unlock_bh(&mm->context.lock);
285 table = (unsigned long *) (__pa(table) | (1U << bit));
286 tlb_remove_table(tlb, table);
287 }
288
289 static void __tlb_remove_table(void *_table)
290 {
291 unsigned int mask = (unsigned long) _table & 3;
292 void *table = (void *)((unsigned long) _table ^ mask);
293 struct page *page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
294
295 switch (mask) {
296 case 0: /* pmd, pud, or p4d */
297 free_pages((unsigned long) table, 2);
298 break;
299 case 1: /* lower 2K of a 4K page table */
300 case 2: /* higher 2K of a 4K page table */
301 mask = atomic_xor_bits(&page->_refcount, mask << (4 + 24));
302 mask >>= 24;
303 if (mask != 0)
304 break;
305 /* fallthrough */
306 case 3: /* 4K page table with pgstes */
307 pgtable_page_dtor(page);
308 __free_page(page);
309 break;
310 }
311 }
312
313 static void tlb_remove_table_smp_sync(void *arg)
314 {
315 /* Simply deliver the interrupt */
316 }
317
318 static void tlb_remove_table_one(void *table)
319 {
320 /*
321 * This isn't an RCU grace period and hence the page-tables cannot be
322 * assumed to be actually RCU-freed.
323 *
324 * It is however sufficient for software page-table walkers that rely
325 * on IRQ disabling. See the comment near struct mmu_table_batch.
326 */
327 smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
328 __tlb_remove_table(table);
329 }
330
331 static void tlb_remove_table_rcu(struct rcu_head *head)
332 {
333 struct mmu_table_batch *batch;
334 int i;
335
336 batch = container_of(head, struct mmu_table_batch, rcu);
337
338 for (i = 0; i < batch->nr; i++)
339 __tlb_remove_table(batch->tables[i]);
340
341 free_page((unsigned long)batch);
342 }
343
344 void tlb_table_flush(struct mmu_gather *tlb)
345 {
346 struct mmu_table_batch **batch = &tlb->batch;
347
348 if (*batch) {
349 call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
350 *batch = NULL;
351 }
352 }
353
354 void tlb_remove_table(struct mmu_gather *tlb, void *table)
355 {
356 struct mmu_table_batch **batch = &tlb->batch;
357
358 tlb->mm->context.flush_mm = 1;
359 if (*batch == NULL) {
360 *batch = (struct mmu_table_batch *)
361 __get_free_page(GFP_NOWAIT | __GFP_NOWARN);
362 if (*batch == NULL) {
363 __tlb_flush_mm_lazy(tlb->mm);
364 tlb_remove_table_one(table);
365 return;
366 }
367 (*batch)->nr = 0;
368 }
369 (*batch)->tables[(*batch)->nr++] = table;
370 if ((*batch)->nr == MAX_TABLE_BATCH)
371 tlb_flush_mmu(tlb);
372 }
373
374 /*
375 * Base infrastructure required to generate basic asces, region, segment,
376 * and page tables that do not make use of enhanced features like EDAT1.
377 */
378
379 static struct kmem_cache *base_pgt_cache;
380
381 static unsigned long base_pgt_alloc(void)
382 {
383 u64 *table;
384
385 table = kmem_cache_alloc(base_pgt_cache, GFP_KERNEL);
386 if (table)
387 memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
388 return (unsigned long) table;
389 }
390
391 static void base_pgt_free(unsigned long table)
392 {
393 kmem_cache_free(base_pgt_cache, (void *) table);
394 }
395
396 static unsigned long base_crst_alloc(unsigned long val)
397 {
398 unsigned long table;
399
400 table = __get_free_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
401 if (table)
402 crst_table_init((unsigned long *)table, val);
403 return table;
404 }
405
406 static void base_crst_free(unsigned long table)
407 {
408 free_pages(table, CRST_ALLOC_ORDER);
409 }
410
411 #define BASE_ADDR_END_FUNC(NAME, SIZE) \
412 static inline unsigned long base_##NAME##_addr_end(unsigned long addr, \
413 unsigned long end) \
414 { \
415 unsigned long next = (addr + (SIZE)) & ~((SIZE) - 1); \
416 \
417 return (next - 1) < (end - 1) ? next : end; \
418 }
419
420 BASE_ADDR_END_FUNC(page, _PAGE_SIZE)
421 BASE_ADDR_END_FUNC(segment, _SEGMENT_SIZE)
422 BASE_ADDR_END_FUNC(region3, _REGION3_SIZE)
423 BASE_ADDR_END_FUNC(region2, _REGION2_SIZE)
424 BASE_ADDR_END_FUNC(region1, _REGION1_SIZE)
425
426 static inline unsigned long base_lra(unsigned long address)
427 {
428 unsigned long real;
429
430 asm volatile(
431 " lra %0,0(%1)\n"
432 : "=d" (real) : "a" (address) : "cc");
433 return real;
434 }
435
436 static int base_page_walk(unsigned long origin, unsigned long addr,
437 unsigned long end, int alloc)
438 {
439 unsigned long *pte, next;
440
441 if (!alloc)
442 return 0;
443 pte = (unsigned long *) origin;
444 pte += (addr & _PAGE_INDEX) >> _PAGE_SHIFT;
445 do {
446 next = base_page_addr_end(addr, end);
447 *pte = base_lra(addr);
448 } while (pte++, addr = next, addr < end);
449 return 0;
450 }
451
452 static int base_segment_walk(unsigned long origin, unsigned long addr,
453 unsigned long end, int alloc)
454 {
455 unsigned long *ste, next, table;
456 int rc;
457
458 ste = (unsigned long *) origin;
459 ste += (addr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
460 do {
461 next = base_segment_addr_end(addr, end);
462 if (*ste & _SEGMENT_ENTRY_INVALID) {
463 if (!alloc)
464 continue;
465 table = base_pgt_alloc();
466 if (!table)
467 return -ENOMEM;
468 *ste = table | _SEGMENT_ENTRY;
469 }
470 table = *ste & _SEGMENT_ENTRY_ORIGIN;
471 rc = base_page_walk(table, addr, next, alloc);
472 if (rc)
473 return rc;
474 if (!alloc)
475 base_pgt_free(table);
476 cond_resched();
477 } while (ste++, addr = next, addr < end);
478 return 0;
479 }
480
481 static int base_region3_walk(unsigned long origin, unsigned long addr,
482 unsigned long end, int alloc)
483 {
484 unsigned long *rtte, next, table;
485 int rc;
486
487 rtte = (unsigned long *) origin;
488 rtte += (addr & _REGION3_INDEX) >> _REGION3_SHIFT;
489 do {
490 next = base_region3_addr_end(addr, end);
491 if (*rtte & _REGION_ENTRY_INVALID) {
492 if (!alloc)
493 continue;
494 table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
495 if (!table)
496 return -ENOMEM;
497 *rtte = table | _REGION3_ENTRY;
498 }
499 table = *rtte & _REGION_ENTRY_ORIGIN;
500 rc = base_segment_walk(table, addr, next, alloc);
501 if (rc)
502 return rc;
503 if (!alloc)
504 base_crst_free(table);
505 } while (rtte++, addr = next, addr < end);
506 return 0;
507 }
508
509 static int base_region2_walk(unsigned long origin, unsigned long addr,
510 unsigned long end, int alloc)
511 {
512 unsigned long *rste, next, table;
513 int rc;
514
515 rste = (unsigned long *) origin;
516 rste += (addr & _REGION2_INDEX) >> _REGION2_SHIFT;
517 do {
518 next = base_region2_addr_end(addr, end);
519 if (*rste & _REGION_ENTRY_INVALID) {
520 if (!alloc)
521 continue;
522 table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
523 if (!table)
524 return -ENOMEM;
525 *rste = table | _REGION2_ENTRY;
526 }
527 table = *rste & _REGION_ENTRY_ORIGIN;
528 rc = base_region3_walk(table, addr, next, alloc);
529 if (rc)
530 return rc;
531 if (!alloc)
532 base_crst_free(table);
533 } while (rste++, addr = next, addr < end);
534 return 0;
535 }
536
537 static int base_region1_walk(unsigned long origin, unsigned long addr,
538 unsigned long end, int alloc)
539 {
540 unsigned long *rfte, next, table;
541 int rc;
542
543 rfte = (unsigned long *) origin;
544 rfte += (addr & _REGION1_INDEX) >> _REGION1_SHIFT;
545 do {
546 next = base_region1_addr_end(addr, end);
547 if (*rfte & _REGION_ENTRY_INVALID) {
548 if (!alloc)
549 continue;
550 table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
551 if (!table)
552 return -ENOMEM;
553 *rfte = table | _REGION1_ENTRY;
554 }
555 table = *rfte & _REGION_ENTRY_ORIGIN;
556 rc = base_region2_walk(table, addr, next, alloc);
557 if (rc)
558 return rc;
559 if (!alloc)
560 base_crst_free(table);
561 } while (rfte++, addr = next, addr < end);
562 return 0;
563 }
564
565 /**
566 * base_asce_free - free asce and tables returned from base_asce_alloc()
567 * @asce: asce to be freed
568 *
569 * Frees all region, segment, and page tables that were allocated with a
570 * corresponding base_asce_alloc() call.
571 */
572 void base_asce_free(unsigned long asce)
573 {
574 unsigned long table = asce & _ASCE_ORIGIN;
575
576 if (!asce)
577 return;
578 switch (asce & _ASCE_TYPE_MASK) {
579 case _ASCE_TYPE_SEGMENT:
580 base_segment_walk(table, 0, _REGION3_SIZE, 0);
581 break;
582 case _ASCE_TYPE_REGION3:
583 base_region3_walk(table, 0, _REGION2_SIZE, 0);
584 break;
585 case _ASCE_TYPE_REGION2:
586 base_region2_walk(table, 0, _REGION1_SIZE, 0);
587 break;
588 case _ASCE_TYPE_REGION1:
589 base_region1_walk(table, 0, -_PAGE_SIZE, 0);
590 break;
591 }
592 base_crst_free(table);
593 }
594
595 static int base_pgt_cache_init(void)
596 {
597 static DEFINE_MUTEX(base_pgt_cache_mutex);
598 unsigned long sz = _PAGE_TABLE_SIZE;
599
600 if (base_pgt_cache)
601 return 0;
602 mutex_lock(&base_pgt_cache_mutex);
603 if (!base_pgt_cache)
604 base_pgt_cache = kmem_cache_create("base_pgt", sz, sz, 0, NULL);
605 mutex_unlock(&base_pgt_cache_mutex);
606 return base_pgt_cache ? 0 : -ENOMEM;
607 }
608
609 /**
610 * base_asce_alloc - create kernel mapping without enhanced DAT features
611 * @addr: virtual start address of kernel mapping
612 * @num_pages: number of consecutive pages
613 *
614 * Generate an asce, including all required region, segment and page tables,
615 * that can be used to access the virtual kernel mapping. The difference is
616 * that the returned asce does not make use of any enhanced DAT features like
617 * e.g. large pages. This is required for some I/O functions that pass an
618 * asce, like e.g. some service call requests.
619 *
620 * Note: the returned asce may NEVER be attached to any cpu. It may only be
621 * used for I/O requests. tlb entries that might result because the
622 * asce was attached to a cpu won't be cleared.
623 */
624 unsigned long base_asce_alloc(unsigned long addr, unsigned long num_pages)
625 {
626 unsigned long asce, table, end;
627 int rc;
628
629 if (base_pgt_cache_init())
630 return 0;
631 end = addr + num_pages * PAGE_SIZE;
632 if (end <= _REGION3_SIZE) {
633 table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
634 if (!table)
635 return 0;
636 rc = base_segment_walk(table, addr, end, 1);
637 asce = table | _ASCE_TYPE_SEGMENT | _ASCE_TABLE_LENGTH;
638 } else if (end <= _REGION2_SIZE) {
639 table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
640 if (!table)
641 return 0;
642 rc = base_region3_walk(table, addr, end, 1);
643 asce = table | _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
644 } else if (end <= _REGION1_SIZE) {
645 table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
646 if (!table)
647 return 0;
648 rc = base_region2_walk(table, addr, end, 1);
649 asce = table | _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
650 } else {
651 table = base_crst_alloc(_REGION1_ENTRY_EMPTY);
652 if (!table)
653 return 0;
654 rc = base_region1_walk(table, addr, end, 1);
655 asce = table | _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH;
656 }
657 if (rc) {
658 base_asce_free(asce);
659 asce = 0;
660 }
661 return asce;
662 }