]> git.ipfire.org Git - thirdparty/linux.git/blame - arch/s390/mm/vmem.c
License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[thirdparty/linux.git] / arch / s390 / mm / vmem.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
f4eb07c1 2/*
f4eb07c1
HC
3 * Copyright IBM Corp. 2006
4 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
5 */
6
7#include <linux/bootmem.h>
8#include <linux/pfn.h>
9#include <linux/mm.h>
ff24b07a 10#include <linux/init.h>
f4eb07c1 11#include <linux/list.h>
53492b1d 12#include <linux/hugetlb.h>
5a0e3ad6 13#include <linux/slab.h>
50be6345 14#include <linux/memblock.h>
bab247ff 15#include <asm/cacheflush.h>
f4eb07c1
HC
16#include <asm/pgalloc.h>
17#include <asm/pgtable.h>
18#include <asm/setup.h>
19#include <asm/tlbflush.h>
53492b1d 20#include <asm/sections.h>
e6c7c630 21#include <asm/set_memory.h>
f4eb07c1 22
f4eb07c1
HC
23static DEFINE_MUTEX(vmem_mutex);
24
25struct memory_segment {
26 struct list_head list;
27 unsigned long start;
28 unsigned long size;
29};
30
31static LIST_HEAD(mem_segs);
32
67060d9c
HC
33static void __ref *vmem_alloc_pages(unsigned int order)
34{
2e9996fc
HC
35 unsigned long size = PAGE_SIZE << order;
36
67060d9c
HC
37 if (slab_is_available())
38 return (void *)__get_free_pages(GFP_KERNEL, order);
9e427365 39 return (void *) memblock_alloc(size, size);
67060d9c
HC
40}
41
a01ef308 42void *vmem_crst_alloc(unsigned long val)
1aea9b3f 43{
a01ef308 44 unsigned long *table;
1aea9b3f 45
a01ef308
HC
46 table = vmem_alloc_pages(CRST_ALLOC_ORDER);
47 if (table)
48 crst_table_init(table, val);
49 return table;
f4eb07c1
HC
50}
51
e8a97e42 52pte_t __ref *vmem_pte_alloc(void)
f4eb07c1 53{
9e427365 54 unsigned long size = PTRS_PER_PTE * sizeof(pte_t);
146e4b3c 55 pte_t *pte;
f4eb07c1 56
146e4b3c 57 if (slab_is_available())
527e30b4 58 pte = (pte_t *) page_table_alloc(&init_mm);
146e4b3c 59 else
9e427365 60 pte = (pte_t *) memblock_alloc(size, size);
f4eb07c1
HC
61 if (!pte)
62 return NULL;
9e427365 63 clear_table((unsigned long *) pte, _PAGE_INVALID, size);
f4eb07c1
HC
64 return pte;
65}
66
67/*
68 * Add a physical memory range to the 1:1 mapping.
69 */
bab247ff 70static int vmem_add_mem(unsigned long start, unsigned long size)
f4eb07c1 71{
57d7f939 72 unsigned long pgt_prot, sgt_prot, r3_prot;
37cd944c 73 unsigned long pages4k, pages1m, pages2g;
378b1e7a
HC
74 unsigned long end = start + size;
75 unsigned long address = start;
f4eb07c1 76 pgd_t *pg_dir;
1aea9b3f 77 p4d_t *p4_dir;
190a1d72 78 pud_t *pu_dir;
f4eb07c1
HC
79 pmd_t *pm_dir;
80 pte_t *pt_dir;
f4eb07c1
HC
81 int ret = -ENOMEM;
82
57d7f939
MS
83 pgt_prot = pgprot_val(PAGE_KERNEL);
84 sgt_prot = pgprot_val(SEGMENT_KERNEL);
85 r3_prot = pgprot_val(REGION3_KERNEL);
86 if (!MACHINE_HAS_NX) {
87 pgt_prot &= ~_PAGE_NOEXEC;
88 sgt_prot &= ~_SEGMENT_ENTRY_NOEXEC;
89 r3_prot &= ~_REGION_ENTRY_NOEXEC;
90 }
37cd944c 91 pages4k = pages1m = pages2g = 0;
378b1e7a 92 while (address < end) {
f4eb07c1
HC
93 pg_dir = pgd_offset_k(address);
94 if (pgd_none(*pg_dir)) {
a01ef308 95 p4_dir = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
1aea9b3f
MS
96 if (!p4_dir)
97 goto out;
98 pgd_populate(&init_mm, pg_dir, p4_dir);
99 }
100 p4_dir = p4d_offset(pg_dir, address);
101 if (p4d_none(*p4_dir)) {
a01ef308 102 pu_dir = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
190a1d72
MS
103 if (!pu_dir)
104 goto out;
1aea9b3f 105 p4d_populate(&init_mm, p4_dir, pu_dir);
190a1d72 106 }
1aea9b3f 107 pu_dir = pud_offset(p4_dir, address);
18da2369 108 if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address &&
10917b83
CB
109 !(address & ~PUD_MASK) && (address + PUD_SIZE <= end) &&
110 !debug_pagealloc_enabled()) {
57d7f939 111 pud_val(*pu_dir) = address | r3_prot;
18da2369 112 address += PUD_SIZE;
37cd944c 113 pages2g++;
18da2369
HC
114 continue;
115 }
190a1d72 116 if (pud_none(*pu_dir)) {
a01ef308 117 pm_dir = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
f4eb07c1
HC
118 if (!pm_dir)
119 goto out;
b2fa47e6 120 pud_populate(&init_mm, pu_dir, pm_dir);
f4eb07c1 121 }
190a1d72 122 pm_dir = pmd_offset(pu_dir, address);
fc7e48aa 123 if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address &&
10917b83
CB
124 !(address & ~PMD_MASK) && (address + PMD_SIZE <= end) &&
125 !debug_pagealloc_enabled()) {
57d7f939 126 pmd_val(*pm_dir) = address | sgt_prot;
378b1e7a 127 address += PMD_SIZE;
37cd944c 128 pages1m++;
53492b1d
GS
129 continue;
130 }
f4eb07c1 131 if (pmd_none(*pm_dir)) {
c53db522 132 pt_dir = vmem_pte_alloc();
f4eb07c1
HC
133 if (!pt_dir)
134 goto out;
b2fa47e6 135 pmd_populate(&init_mm, pm_dir, pt_dir);
f4eb07c1
HC
136 }
137
138 pt_dir = pte_offset_kernel(pm_dir, address);
57d7f939 139 pte_val(*pt_dir) = address | pgt_prot;
378b1e7a 140 address += PAGE_SIZE;
37cd944c 141 pages4k++;
f4eb07c1
HC
142 }
143 ret = 0;
144out:
37cd944c
HC
145 update_page_count(PG_DIRECT_MAP_4K, pages4k);
146 update_page_count(PG_DIRECT_MAP_1M, pages1m);
147 update_page_count(PG_DIRECT_MAP_2G, pages2g);
f4eb07c1
HC
148 return ret;
149}
150
151/*
152 * Remove a physical memory range from the 1:1 mapping.
153 * Currently only invalidates page table entries.
154 */
155static void vmem_remove_range(unsigned long start, unsigned long size)
156{
37cd944c 157 unsigned long pages4k, pages1m, pages2g;
378b1e7a
HC
158 unsigned long end = start + size;
159 unsigned long address = start;
f4eb07c1 160 pgd_t *pg_dir;
1aea9b3f 161 p4d_t *p4_dir;
190a1d72 162 pud_t *pu_dir;
f4eb07c1
HC
163 pmd_t *pm_dir;
164 pte_t *pt_dir;
f4eb07c1 165
37cd944c 166 pages4k = pages1m = pages2g = 0;
378b1e7a 167 while (address < end) {
f4eb07c1 168 pg_dir = pgd_offset_k(address);
fc7e48aa
HC
169 if (pgd_none(*pg_dir)) {
170 address += PGDIR_SIZE;
171 continue;
172 }
1aea9b3f
MS
173 p4_dir = p4d_offset(pg_dir, address);
174 if (p4d_none(*p4_dir)) {
175 address += P4D_SIZE;
176 continue;
177 }
178 pu_dir = pud_offset(p4_dir, address);
fc7e48aa
HC
179 if (pud_none(*pu_dir)) {
180 address += PUD_SIZE;
f4eb07c1 181 continue;
fc7e48aa 182 }
18da2369
HC
183 if (pud_large(*pu_dir)) {
184 pud_clear(pu_dir);
185 address += PUD_SIZE;
37cd944c 186 pages2g++;
18da2369
HC
187 continue;
188 }
190a1d72 189 pm_dir = pmd_offset(pu_dir, address);
fc7e48aa
HC
190 if (pmd_none(*pm_dir)) {
191 address += PMD_SIZE;
f4eb07c1 192 continue;
fc7e48aa 193 }
378b1e7a 194 if (pmd_large(*pm_dir)) {
b2fa47e6 195 pmd_clear(pm_dir);
378b1e7a 196 address += PMD_SIZE;
37cd944c 197 pages1m++;
53492b1d
GS
198 continue;
199 }
f4eb07c1 200 pt_dir = pte_offset_kernel(pm_dir, address);
5aa29975 201 pte_clear(&init_mm, address, pt_dir);
378b1e7a 202 address += PAGE_SIZE;
37cd944c 203 pages4k++;
f4eb07c1 204 }
378b1e7a 205 flush_tlb_kernel_range(start, end);
37cd944c
HC
206 update_page_count(PG_DIRECT_MAP_4K, -pages4k);
207 update_page_count(PG_DIRECT_MAP_1M, -pages1m);
208 update_page_count(PG_DIRECT_MAP_2G, -pages2g);
f4eb07c1
HC
209}
210
211/*
212 * Add a backed mem_map array to the virtual mem_map array.
213 */
0aad818b 214int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
f4eb07c1 215{
57d7f939 216 unsigned long pgt_prot, sgt_prot;
0aad818b 217 unsigned long address = start;
f4eb07c1 218 pgd_t *pg_dir;
1aea9b3f 219 p4d_t *p4_dir;
190a1d72 220 pud_t *pu_dir;
f4eb07c1
HC
221 pmd_t *pm_dir;
222 pte_t *pt_dir;
f4eb07c1
HC
223 int ret = -ENOMEM;
224
57d7f939
MS
225 pgt_prot = pgprot_val(PAGE_KERNEL);
226 sgt_prot = pgprot_val(SEGMENT_KERNEL);
227 if (!MACHINE_HAS_NX) {
228 pgt_prot &= ~_PAGE_NOEXEC;
229 sgt_prot &= ~_SEGMENT_ENTRY_NOEXEC;
230 }
0aad818b 231 for (address = start; address < end;) {
f4eb07c1
HC
232 pg_dir = pgd_offset_k(address);
233 if (pgd_none(*pg_dir)) {
a01ef308 234 p4_dir = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
1aea9b3f
MS
235 if (!p4_dir)
236 goto out;
237 pgd_populate(&init_mm, pg_dir, p4_dir);
238 }
239
240 p4_dir = p4d_offset(pg_dir, address);
241 if (p4d_none(*p4_dir)) {
a01ef308 242 pu_dir = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
190a1d72
MS
243 if (!pu_dir)
244 goto out;
1aea9b3f 245 p4d_populate(&init_mm, p4_dir, pu_dir);
190a1d72
MS
246 }
247
1aea9b3f 248 pu_dir = pud_offset(p4_dir, address);
190a1d72 249 if (pud_none(*pu_dir)) {
a01ef308 250 pm_dir = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
f4eb07c1
HC
251 if (!pm_dir)
252 goto out;
b2fa47e6 253 pud_populate(&init_mm, pu_dir, pm_dir);
f4eb07c1
HC
254 }
255
190a1d72 256 pm_dir = pmd_offset(pu_dir, address);
f4eb07c1 257 if (pmd_none(*pm_dir)) {
f7817968
HC
258 /* Use 1MB frames for vmemmap if available. We always
259 * use large frames even if they are only partially
260 * used.
261 * Otherwise we would have also page tables since
262 * vmemmap_populate gets called for each section
263 * separately. */
264 if (MACHINE_HAS_EDAT1) {
265 void *new_page;
266
267 new_page = vmemmap_alloc_block(PMD_SIZE, node);
268 if (!new_page)
269 goto out;
57d7f939 270 pmd_val(*pm_dir) = __pa(new_page) | sgt_prot;
f7817968
HC
271 address = (address + PMD_SIZE) & PMD_MASK;
272 continue;
273 }
c53db522 274 pt_dir = vmem_pte_alloc();
f4eb07c1
HC
275 if (!pt_dir)
276 goto out;
b2fa47e6 277 pmd_populate(&init_mm, pm_dir, pt_dir);
f7817968
HC
278 } else if (pmd_large(*pm_dir)) {
279 address = (address + PMD_SIZE) & PMD_MASK;
280 continue;
f4eb07c1
HC
281 }
282
283 pt_dir = pte_offset_kernel(pm_dir, address);
284 if (pte_none(*pt_dir)) {
70c9d296 285 void *new_page;
f4eb07c1 286
70c9d296 287 new_page = vmemmap_alloc_block(PAGE_SIZE, node);
f4eb07c1
HC
288 if (!new_page)
289 goto out;
57d7f939 290 pte_val(*pt_dir) = __pa(new_page) | pgt_prot;
f4eb07c1 291 }
f7817968 292 address += PAGE_SIZE;
f4eb07c1
HC
293 }
294 ret = 0;
295out:
f4eb07c1
HC
296 return ret;
297}
298
0aad818b 299void vmemmap_free(unsigned long start, unsigned long end)
0197518c
TC
300{
301}
302
f4eb07c1
HC
303/*
304 * Add memory segment to the segment list if it doesn't overlap with
305 * an already present segment.
306 */
307static int insert_memory_segment(struct memory_segment *seg)
308{
309 struct memory_segment *tmp;
310
ee0ddadd 311 if (seg->start + seg->size > VMEM_MAX_PHYS ||
f4eb07c1
HC
312 seg->start + seg->size < seg->start)
313 return -ERANGE;
314
315 list_for_each_entry(tmp, &mem_segs, list) {
316 if (seg->start >= tmp->start + tmp->size)
317 continue;
318 if (seg->start + seg->size <= tmp->start)
319 continue;
320 return -ENOSPC;
321 }
322 list_add(&seg->list, &mem_segs);
323 return 0;
324}
325
326/*
327 * Remove memory segment from the segment list.
328 */
329static void remove_memory_segment(struct memory_segment *seg)
330{
331 list_del(&seg->list);
332}
333
334static void __remove_shared_memory(struct memory_segment *seg)
335{
336 remove_memory_segment(seg);
337 vmem_remove_range(seg->start, seg->size);
338}
339
17f34580 340int vmem_remove_mapping(unsigned long start, unsigned long size)
f4eb07c1
HC
341{
342 struct memory_segment *seg;
343 int ret;
344
345 mutex_lock(&vmem_mutex);
346
347 ret = -ENOENT;
348 list_for_each_entry(seg, &mem_segs, list) {
349 if (seg->start == start && seg->size == size)
350 break;
351 }
352
353 if (seg->start != start || seg->size != size)
354 goto out;
355
356 ret = 0;
357 __remove_shared_memory(seg);
358 kfree(seg);
359out:
360 mutex_unlock(&vmem_mutex);
361 return ret;
362}
363
17f34580 364int vmem_add_mapping(unsigned long start, unsigned long size)
f4eb07c1
HC
365{
366 struct memory_segment *seg;
f4eb07c1
HC
367 int ret;
368
369 mutex_lock(&vmem_mutex);
370 ret = -ENOMEM;
371 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
372 if (!seg)
373 goto out;
374 seg->start = start;
375 seg->size = size;
376
377 ret = insert_memory_segment(seg);
378 if (ret)
379 goto out_free;
380
bab247ff 381 ret = vmem_add_mem(start, size);
f4eb07c1
HC
382 if (ret)
383 goto out_remove;
f4eb07c1
HC
384 goto out;
385
386out_remove:
387 __remove_shared_memory(seg);
388out_free:
389 kfree(seg);
390out:
391 mutex_unlock(&vmem_mutex);
392 return ret;
393}
394
395/*
396 * map whole physical memory to virtual memory (identity mapping)
5fd9c6e2
CB
397 * we reserve enough space in the vmalloc area for vmemmap to hotplug
398 * additional memory segments.
f4eb07c1
HC
399 */
400void __init vmem_map_init(void)
401{
50be6345 402 struct memblock_region *reg;
f4eb07c1 403
bab247ff
HC
404 for_each_memblock(memory, reg)
405 vmem_add_mem(reg->base, reg->size);
57d7f939
MS
406 __set_memory((unsigned long) _stext,
407 (_etext - _stext) >> PAGE_SHIFT,
408 SET_MEMORY_RO | SET_MEMORY_X);
409 __set_memory((unsigned long) _etext,
410 (_eshared - _etext) >> PAGE_SHIFT,
411 SET_MEMORY_RO);
412 __set_memory((unsigned long) _sinittext,
413 (_einittext - _sinittext) >> PAGE_SHIFT,
414 SET_MEMORY_RO | SET_MEMORY_X);
415 pr_info("Write protected kernel read-only data: %luk\n",
416 (_eshared - _stext) >> 10);
f4eb07c1
HC
417}
418
419/*
50be6345
PH
420 * Convert memblock.memory to a memory segment list so there is a single
421 * list that contains all memory segments.
f4eb07c1
HC
422 */
423static int __init vmem_convert_memory_chunk(void)
424{
50be6345 425 struct memblock_region *reg;
f4eb07c1 426 struct memory_segment *seg;
f4eb07c1
HC
427
428 mutex_lock(&vmem_mutex);
50be6345 429 for_each_memblock(memory, reg) {
f4eb07c1
HC
430 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
431 if (!seg)
432 panic("Out of memory...\n");
50be6345
PH
433 seg->start = reg->base;
434 seg->size = reg->size;
f4eb07c1
HC
435 insert_memory_segment(seg);
436 }
437 mutex_unlock(&vmem_mutex);
438 return 0;
439}
440
441core_initcall(vmem_convert_memory_chunk);