]> git.ipfire.org Git - people/ms/linux.git/blame - arch/s390/mm/vmem.c
mm: remove include/linux/bootmem.h
[people/ms/linux.git] / arch / s390 / mm / vmem.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
f4eb07c1 2/*
f4eb07c1
HC
3 * Copyright IBM Corp. 2006
4 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
5 */
6
57c8a661 7#include <linux/memblock.h>
f4eb07c1
HC
8#include <linux/pfn.h>
9#include <linux/mm.h>
ff24b07a 10#include <linux/init.h>
f4eb07c1 11#include <linux/list.h>
53492b1d 12#include <linux/hugetlb.h>
5a0e3ad6 13#include <linux/slab.h>
bab247ff 14#include <asm/cacheflush.h>
f4eb07c1
HC
15#include <asm/pgalloc.h>
16#include <asm/pgtable.h>
17#include <asm/setup.h>
18#include <asm/tlbflush.h>
53492b1d 19#include <asm/sections.h>
e6c7c630 20#include <asm/set_memory.h>
f4eb07c1 21
f4eb07c1
HC
22static DEFINE_MUTEX(vmem_mutex);
23
24struct memory_segment {
25 struct list_head list;
26 unsigned long start;
27 unsigned long size;
28};
29
30static LIST_HEAD(mem_segs);
31
67060d9c
HC
32static void __ref *vmem_alloc_pages(unsigned int order)
33{
2e9996fc
HC
34 unsigned long size = PAGE_SIZE << order;
35
67060d9c
HC
36 if (slab_is_available())
37 return (void *)__get_free_pages(GFP_KERNEL, order);
9a8dd708 38 return (void *) memblock_phys_alloc(size, size);
67060d9c
HC
39}
40
a01ef308 41void *vmem_crst_alloc(unsigned long val)
1aea9b3f 42{
a01ef308 43 unsigned long *table;
1aea9b3f 44
a01ef308
HC
45 table = vmem_alloc_pages(CRST_ALLOC_ORDER);
46 if (table)
47 crst_table_init(table, val);
48 return table;
f4eb07c1
HC
49}
50
e8a97e42 51pte_t __ref *vmem_pte_alloc(void)
f4eb07c1 52{
9e427365 53 unsigned long size = PTRS_PER_PTE * sizeof(pte_t);
146e4b3c 54 pte_t *pte;
f4eb07c1 55
146e4b3c 56 if (slab_is_available())
527e30b4 57 pte = (pte_t *) page_table_alloc(&init_mm);
146e4b3c 58 else
9a8dd708 59 pte = (pte_t *) memblock_phys_alloc(size, size);
f4eb07c1
HC
60 if (!pte)
61 return NULL;
41879ff6 62 memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
f4eb07c1
HC
63 return pte;
64}
65
66/*
67 * Add a physical memory range to the 1:1 mapping.
68 */
bab247ff 69static int vmem_add_mem(unsigned long start, unsigned long size)
f4eb07c1 70{
57d7f939 71 unsigned long pgt_prot, sgt_prot, r3_prot;
37cd944c 72 unsigned long pages4k, pages1m, pages2g;
378b1e7a
HC
73 unsigned long end = start + size;
74 unsigned long address = start;
f4eb07c1 75 pgd_t *pg_dir;
1aea9b3f 76 p4d_t *p4_dir;
190a1d72 77 pud_t *pu_dir;
f4eb07c1
HC
78 pmd_t *pm_dir;
79 pte_t *pt_dir;
f4eb07c1
HC
80 int ret = -ENOMEM;
81
57d7f939
MS
82 pgt_prot = pgprot_val(PAGE_KERNEL);
83 sgt_prot = pgprot_val(SEGMENT_KERNEL);
84 r3_prot = pgprot_val(REGION3_KERNEL);
85 if (!MACHINE_HAS_NX) {
86 pgt_prot &= ~_PAGE_NOEXEC;
87 sgt_prot &= ~_SEGMENT_ENTRY_NOEXEC;
88 r3_prot &= ~_REGION_ENTRY_NOEXEC;
89 }
37cd944c 90 pages4k = pages1m = pages2g = 0;
378b1e7a 91 while (address < end) {
f4eb07c1
HC
92 pg_dir = pgd_offset_k(address);
93 if (pgd_none(*pg_dir)) {
a01ef308 94 p4_dir = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
1aea9b3f
MS
95 if (!p4_dir)
96 goto out;
97 pgd_populate(&init_mm, pg_dir, p4_dir);
98 }
99 p4_dir = p4d_offset(pg_dir, address);
100 if (p4d_none(*p4_dir)) {
a01ef308 101 pu_dir = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
190a1d72
MS
102 if (!pu_dir)
103 goto out;
1aea9b3f 104 p4d_populate(&init_mm, p4_dir, pu_dir);
190a1d72 105 }
1aea9b3f 106 pu_dir = pud_offset(p4_dir, address);
18da2369 107 if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address &&
10917b83
CB
108 !(address & ~PUD_MASK) && (address + PUD_SIZE <= end) &&
109 !debug_pagealloc_enabled()) {
57d7f939 110 pud_val(*pu_dir) = address | r3_prot;
18da2369 111 address += PUD_SIZE;
37cd944c 112 pages2g++;
18da2369
HC
113 continue;
114 }
190a1d72 115 if (pud_none(*pu_dir)) {
a01ef308 116 pm_dir = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
f4eb07c1
HC
117 if (!pm_dir)
118 goto out;
b2fa47e6 119 pud_populate(&init_mm, pu_dir, pm_dir);
f4eb07c1 120 }
190a1d72 121 pm_dir = pmd_offset(pu_dir, address);
fc7e48aa 122 if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address &&
10917b83
CB
123 !(address & ~PMD_MASK) && (address + PMD_SIZE <= end) &&
124 !debug_pagealloc_enabled()) {
57d7f939 125 pmd_val(*pm_dir) = address | sgt_prot;
378b1e7a 126 address += PMD_SIZE;
37cd944c 127 pages1m++;
53492b1d
GS
128 continue;
129 }
f4eb07c1 130 if (pmd_none(*pm_dir)) {
c53db522 131 pt_dir = vmem_pte_alloc();
f4eb07c1
HC
132 if (!pt_dir)
133 goto out;
b2fa47e6 134 pmd_populate(&init_mm, pm_dir, pt_dir);
f4eb07c1
HC
135 }
136
137 pt_dir = pte_offset_kernel(pm_dir, address);
57d7f939 138 pte_val(*pt_dir) = address | pgt_prot;
378b1e7a 139 address += PAGE_SIZE;
37cd944c 140 pages4k++;
f4eb07c1
HC
141 }
142 ret = 0;
143out:
37cd944c
HC
144 update_page_count(PG_DIRECT_MAP_4K, pages4k);
145 update_page_count(PG_DIRECT_MAP_1M, pages1m);
146 update_page_count(PG_DIRECT_MAP_2G, pages2g);
f4eb07c1
HC
147 return ret;
148}
149
150/*
151 * Remove a physical memory range from the 1:1 mapping.
152 * Currently only invalidates page table entries.
153 */
154static void vmem_remove_range(unsigned long start, unsigned long size)
155{
37cd944c 156 unsigned long pages4k, pages1m, pages2g;
378b1e7a
HC
157 unsigned long end = start + size;
158 unsigned long address = start;
f4eb07c1 159 pgd_t *pg_dir;
1aea9b3f 160 p4d_t *p4_dir;
190a1d72 161 pud_t *pu_dir;
f4eb07c1
HC
162 pmd_t *pm_dir;
163 pte_t *pt_dir;
f4eb07c1 164
37cd944c 165 pages4k = pages1m = pages2g = 0;
378b1e7a 166 while (address < end) {
f4eb07c1 167 pg_dir = pgd_offset_k(address);
fc7e48aa
HC
168 if (pgd_none(*pg_dir)) {
169 address += PGDIR_SIZE;
170 continue;
171 }
1aea9b3f
MS
172 p4_dir = p4d_offset(pg_dir, address);
173 if (p4d_none(*p4_dir)) {
174 address += P4D_SIZE;
175 continue;
176 }
177 pu_dir = pud_offset(p4_dir, address);
fc7e48aa
HC
178 if (pud_none(*pu_dir)) {
179 address += PUD_SIZE;
f4eb07c1 180 continue;
fc7e48aa 181 }
18da2369
HC
182 if (pud_large(*pu_dir)) {
183 pud_clear(pu_dir);
184 address += PUD_SIZE;
37cd944c 185 pages2g++;
18da2369
HC
186 continue;
187 }
190a1d72 188 pm_dir = pmd_offset(pu_dir, address);
fc7e48aa
HC
189 if (pmd_none(*pm_dir)) {
190 address += PMD_SIZE;
f4eb07c1 191 continue;
fc7e48aa 192 }
378b1e7a 193 if (pmd_large(*pm_dir)) {
b2fa47e6 194 pmd_clear(pm_dir);
378b1e7a 195 address += PMD_SIZE;
37cd944c 196 pages1m++;
53492b1d
GS
197 continue;
198 }
f4eb07c1 199 pt_dir = pte_offset_kernel(pm_dir, address);
5aa29975 200 pte_clear(&init_mm, address, pt_dir);
378b1e7a 201 address += PAGE_SIZE;
37cd944c 202 pages4k++;
f4eb07c1 203 }
378b1e7a 204 flush_tlb_kernel_range(start, end);
37cd944c
HC
205 update_page_count(PG_DIRECT_MAP_4K, -pages4k);
206 update_page_count(PG_DIRECT_MAP_1M, -pages1m);
207 update_page_count(PG_DIRECT_MAP_2G, -pages2g);
f4eb07c1
HC
208}
209
210/*
211 * Add a backed mem_map array to the virtual mem_map array.
212 */
7b73d978
CH
213int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
214 struct vmem_altmap *altmap)
f4eb07c1 215{
57d7f939 216 unsigned long pgt_prot, sgt_prot;
0aad818b 217 unsigned long address = start;
f4eb07c1 218 pgd_t *pg_dir;
1aea9b3f 219 p4d_t *p4_dir;
190a1d72 220 pud_t *pu_dir;
f4eb07c1
HC
221 pmd_t *pm_dir;
222 pte_t *pt_dir;
f4eb07c1
HC
223 int ret = -ENOMEM;
224
57d7f939
MS
225 pgt_prot = pgprot_val(PAGE_KERNEL);
226 sgt_prot = pgprot_val(SEGMENT_KERNEL);
227 if (!MACHINE_HAS_NX) {
228 pgt_prot &= ~_PAGE_NOEXEC;
229 sgt_prot &= ~_SEGMENT_ENTRY_NOEXEC;
230 }
0aad818b 231 for (address = start; address < end;) {
f4eb07c1
HC
232 pg_dir = pgd_offset_k(address);
233 if (pgd_none(*pg_dir)) {
a01ef308 234 p4_dir = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
1aea9b3f
MS
235 if (!p4_dir)
236 goto out;
237 pgd_populate(&init_mm, pg_dir, p4_dir);
238 }
239
240 p4_dir = p4d_offset(pg_dir, address);
241 if (p4d_none(*p4_dir)) {
a01ef308 242 pu_dir = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
190a1d72
MS
243 if (!pu_dir)
244 goto out;
1aea9b3f 245 p4d_populate(&init_mm, p4_dir, pu_dir);
190a1d72
MS
246 }
247
1aea9b3f 248 pu_dir = pud_offset(p4_dir, address);
190a1d72 249 if (pud_none(*pu_dir)) {
a01ef308 250 pm_dir = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
f4eb07c1
HC
251 if (!pm_dir)
252 goto out;
b2fa47e6 253 pud_populate(&init_mm, pu_dir, pm_dir);
f4eb07c1
HC
254 }
255
190a1d72 256 pm_dir = pmd_offset(pu_dir, address);
f4eb07c1 257 if (pmd_none(*pm_dir)) {
f7817968
HC
258 /* Use 1MB frames for vmemmap if available. We always
259 * use large frames even if they are only partially
260 * used.
261 * Otherwise we would have also page tables since
262 * vmemmap_populate gets called for each section
263 * separately. */
264 if (MACHINE_HAS_EDAT1) {
265 void *new_page;
266
267 new_page = vmemmap_alloc_block(PMD_SIZE, node);
268 if (!new_page)
269 goto out;
57d7f939 270 pmd_val(*pm_dir) = __pa(new_page) | sgt_prot;
f7817968
HC
271 address = (address + PMD_SIZE) & PMD_MASK;
272 continue;
273 }
c53db522 274 pt_dir = vmem_pte_alloc();
f4eb07c1
HC
275 if (!pt_dir)
276 goto out;
b2fa47e6 277 pmd_populate(&init_mm, pm_dir, pt_dir);
f7817968
HC
278 } else if (pmd_large(*pm_dir)) {
279 address = (address + PMD_SIZE) & PMD_MASK;
280 continue;
f4eb07c1
HC
281 }
282
283 pt_dir = pte_offset_kernel(pm_dir, address);
284 if (pte_none(*pt_dir)) {
70c9d296 285 void *new_page;
f4eb07c1 286
70c9d296 287 new_page = vmemmap_alloc_block(PAGE_SIZE, node);
f4eb07c1
HC
288 if (!new_page)
289 goto out;
57d7f939 290 pte_val(*pt_dir) = __pa(new_page) | pgt_prot;
f4eb07c1 291 }
f7817968 292 address += PAGE_SIZE;
f4eb07c1
HC
293 }
294 ret = 0;
295out:
f4eb07c1
HC
296 return ret;
297}
298
24b6d416
CH
299void vmemmap_free(unsigned long start, unsigned long end,
300 struct vmem_altmap *altmap)
0197518c
TC
301{
302}
303
f4eb07c1
HC
304/*
305 * Add memory segment to the segment list if it doesn't overlap with
306 * an already present segment.
307 */
308static int insert_memory_segment(struct memory_segment *seg)
309{
310 struct memory_segment *tmp;
311
ee0ddadd 312 if (seg->start + seg->size > VMEM_MAX_PHYS ||
f4eb07c1
HC
313 seg->start + seg->size < seg->start)
314 return -ERANGE;
315
316 list_for_each_entry(tmp, &mem_segs, list) {
317 if (seg->start >= tmp->start + tmp->size)
318 continue;
319 if (seg->start + seg->size <= tmp->start)
320 continue;
321 return -ENOSPC;
322 }
323 list_add(&seg->list, &mem_segs);
324 return 0;
325}
326
327/*
328 * Remove memory segment from the segment list.
329 */
330static void remove_memory_segment(struct memory_segment *seg)
331{
332 list_del(&seg->list);
333}
334
335static void __remove_shared_memory(struct memory_segment *seg)
336{
337 remove_memory_segment(seg);
338 vmem_remove_range(seg->start, seg->size);
339}
340
17f34580 341int vmem_remove_mapping(unsigned long start, unsigned long size)
f4eb07c1
HC
342{
343 struct memory_segment *seg;
344 int ret;
345
346 mutex_lock(&vmem_mutex);
347
348 ret = -ENOENT;
349 list_for_each_entry(seg, &mem_segs, list) {
350 if (seg->start == start && seg->size == size)
351 break;
352 }
353
354 if (seg->start != start || seg->size != size)
355 goto out;
356
357 ret = 0;
358 __remove_shared_memory(seg);
359 kfree(seg);
360out:
361 mutex_unlock(&vmem_mutex);
362 return ret;
363}
364
17f34580 365int vmem_add_mapping(unsigned long start, unsigned long size)
f4eb07c1
HC
366{
367 struct memory_segment *seg;
f4eb07c1
HC
368 int ret;
369
370 mutex_lock(&vmem_mutex);
371 ret = -ENOMEM;
372 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
373 if (!seg)
374 goto out;
375 seg->start = start;
376 seg->size = size;
377
378 ret = insert_memory_segment(seg);
379 if (ret)
380 goto out_free;
381
bab247ff 382 ret = vmem_add_mem(start, size);
f4eb07c1
HC
383 if (ret)
384 goto out_remove;
f4eb07c1
HC
385 goto out;
386
387out_remove:
388 __remove_shared_memory(seg);
389out_free:
390 kfree(seg);
391out:
392 mutex_unlock(&vmem_mutex);
393 return ret;
394}
395
396/*
397 * map whole physical memory to virtual memory (identity mapping)
5fd9c6e2
CB
398 * we reserve enough space in the vmalloc area for vmemmap to hotplug
399 * additional memory segments.
f4eb07c1
HC
400 */
401void __init vmem_map_init(void)
402{
50be6345 403 struct memblock_region *reg;
f4eb07c1 404
bab247ff
HC
405 for_each_memblock(memory, reg)
406 vmem_add_mem(reg->base, reg->size);
ead7a22e
HC
407 __set_memory((unsigned long)_stext,
408 (unsigned long)(_etext - _stext) >> PAGE_SHIFT,
57d7f939 409 SET_MEMORY_RO | SET_MEMORY_X);
ead7a22e
HC
410 __set_memory((unsigned long)_etext,
411 (unsigned long)(__end_rodata - _etext) >> PAGE_SHIFT,
57d7f939 412 SET_MEMORY_RO);
ead7a22e
HC
413 __set_memory((unsigned long)_sinittext,
414 (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
57d7f939
MS
415 SET_MEMORY_RO | SET_MEMORY_X);
416 pr_info("Write protected kernel read-only data: %luk\n",
ead7a22e 417 (unsigned long)(__end_rodata - _stext) >> 10);
f4eb07c1
HC
418}
419
420/*
50be6345
PH
421 * Convert memblock.memory to a memory segment list so there is a single
422 * list that contains all memory segments.
f4eb07c1
HC
423 */
424static int __init vmem_convert_memory_chunk(void)
425{
50be6345 426 struct memblock_region *reg;
f4eb07c1 427 struct memory_segment *seg;
f4eb07c1
HC
428
429 mutex_lock(&vmem_mutex);
50be6345 430 for_each_memblock(memory, reg) {
f4eb07c1
HC
431 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
432 if (!seg)
433 panic("Out of memory...\n");
50be6345
PH
434 seg->start = reg->base;
435 seg->size = reg->size;
f4eb07c1
HC
436 insert_memory_segment(seg);
437 }
438 mutex_unlock(&vmem_mutex);
439 return 0;
440}
441
442core_initcall(vmem_convert_memory_chunk);