2 * linux/arch/arm/mm/init.c
4 * Copyright (C) 1995-2005 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/kernel.h>
11 #include <linux/errno.h>
12 #include <linux/swap.h>
13 #include <linux/init.h>
14 #include <linux/mman.h>
15 #include <linux/sched/signal.h>
16 #include <linux/sched/task.h>
17 #include <linux/export.h>
18 #include <linux/nodemask.h>
19 #include <linux/initrd.h>
20 #include <linux/of_fdt.h>
21 #include <linux/highmem.h>
22 #include <linux/gfp.h>
23 #include <linux/memblock.h>
24 #include <linux/dma-contiguous.h>
25 #include <linux/sizes.h>
26 #include <linux/stop_machine.h>
29 #include <asm/mach-types.h>
30 #include <asm/memblock.h>
31 #include <asm/memory.h>
33 #include <asm/sections.h>
34 #include <asm/setup.h>
35 #include <asm/system_info.h>
37 #include <asm/fixmap.h>
38 #include <asm/ptdump.h>
40 #include <asm/mach/arch.h>
41 #include <asm/mach/map.h>
45 #ifdef CONFIG_CPU_CP15_MMU
46 unsigned long __init
__clear_cr(unsigned long mask
)
48 cr_alignment
= cr_alignment
& ~mask
;
53 #ifdef CONFIG_BLK_DEV_INITRD
54 static int __init
parse_tag_initrd(const struct tag
*tag
)
56 pr_warn("ATAG_INITRD is deprecated; "
57 "please update your bootloader.\n");
58 phys_initrd_start
= __virt_to_phys(tag
->u
.initrd
.start
);
59 phys_initrd_size
= tag
->u
.initrd
.size
;
63 __tagtable(ATAG_INITRD
, parse_tag_initrd
);
65 static int __init
parse_tag_initrd2(const struct tag
*tag
)
67 phys_initrd_start
= tag
->u
.initrd
.start
;
68 phys_initrd_size
= tag
->u
.initrd
.size
;
72 __tagtable(ATAG_INITRD2
, parse_tag_initrd2
);
75 static void __init
find_limits(unsigned long *min
, unsigned long *max_low
,
76 unsigned long *max_high
)
78 *max_low
= PFN_DOWN(memblock_get_current_limit());
79 *min
= PFN_UP(memblock_start_of_DRAM());
80 *max_high
= PFN_DOWN(memblock_end_of_DRAM());
83 #ifdef CONFIG_ZONE_DMA
85 phys_addr_t arm_dma_zone_size __read_mostly
;
86 EXPORT_SYMBOL(arm_dma_zone_size
);
89 * The DMA mask corresponding to the maximum bus address allocatable
90 * using GFP_DMA. The default here places no restriction on DMA
91 * allocations. This must be the smallest DMA mask in the system,
92 * so a successful GFP_DMA allocation will always satisfy this.
94 phys_addr_t arm_dma_limit
;
95 unsigned long arm_dma_pfn_limit
;
97 static void __init
arm_adjust_dma_zone(unsigned long *size
, unsigned long *hole
,
98 unsigned long dma_size
)
100 if (size
[0] <= dma_size
)
103 size
[ZONE_NORMAL
] = size
[0] - dma_size
;
104 size
[ZONE_DMA
] = dma_size
;
105 hole
[ZONE_NORMAL
] = hole
[0];
110 void __init
setup_dma_zone(const struct machine_desc
*mdesc
)
112 #ifdef CONFIG_ZONE_DMA
113 if (mdesc
->dma_zone_size
) {
114 arm_dma_zone_size
= mdesc
->dma_zone_size
;
115 arm_dma_limit
= PHYS_OFFSET
+ arm_dma_zone_size
- 1;
117 arm_dma_limit
= 0xffffffff;
118 arm_dma_pfn_limit
= arm_dma_limit
>> PAGE_SHIFT
;
122 static void __init
zone_sizes_init(unsigned long min
, unsigned long max_low
,
123 unsigned long max_high
)
125 unsigned long zone_size
[MAX_NR_ZONES
], zhole_size
[MAX_NR_ZONES
];
126 struct memblock_region
*reg
;
129 * initialise the zones.
131 memset(zone_size
, 0, sizeof(zone_size
));
134 * The memory size has already been determined. If we need
135 * to do anything fancy with the allocation of this memory
136 * to the zones, now is the time to do it.
138 zone_size
[0] = max_low
- min
;
139 #ifdef CONFIG_HIGHMEM
140 zone_size
[ZONE_HIGHMEM
] = max_high
- max_low
;
144 * Calculate the size of the holes.
145 * holes = node_size - sum(bank_sizes)
147 memcpy(zhole_size
, zone_size
, sizeof(zhole_size
));
148 for_each_memblock(memory
, reg
) {
149 unsigned long start
= memblock_region_memory_base_pfn(reg
);
150 unsigned long end
= memblock_region_memory_end_pfn(reg
);
152 if (start
< max_low
) {
153 unsigned long low_end
= min(end
, max_low
);
154 zhole_size
[0] -= low_end
- start
;
156 #ifdef CONFIG_HIGHMEM
158 unsigned long high_start
= max(start
, max_low
);
159 zhole_size
[ZONE_HIGHMEM
] -= end
- high_start
;
164 #ifdef CONFIG_ZONE_DMA
166 * Adjust the sizes according to any special requirements for
169 if (arm_dma_zone_size
)
170 arm_adjust_dma_zone(zone_size
, zhole_size
,
171 arm_dma_zone_size
>> PAGE_SHIFT
);
174 free_area_init_node(0, zone_size
, min
, zhole_size
);
177 #ifdef CONFIG_HAVE_ARCH_PFN_VALID
178 int pfn_valid(unsigned long pfn
)
180 return memblock_is_map_memory(__pfn_to_phys(pfn
));
182 EXPORT_SYMBOL(pfn_valid
);
185 static bool arm_memblock_steal_permitted
= true;
187 phys_addr_t __init
arm_memblock_steal(phys_addr_t size
, phys_addr_t align
)
191 BUG_ON(!arm_memblock_steal_permitted
);
193 phys
= memblock_phys_alloc(size
, align
);
195 panic("Failed to steal %pa bytes at %pS\n",
196 &size
, (void *)_RET_IP_
);
198 memblock_free(phys
, size
);
199 memblock_remove(phys
, size
);
204 static void __init
arm_initrd_init(void)
206 #ifdef CONFIG_BLK_DEV_INITRD
210 initrd_start
= initrd_end
= 0;
212 if (!phys_initrd_size
)
216 * Round the memory region to page boundaries as per free_initrd_mem()
217 * This allows us to detect whether the pages overlapping the initrd
218 * are in use, but more importantly, reserves the entire set of pages
219 * as we don't want these pages allocated for other purposes.
221 start
= round_down(phys_initrd_start
, PAGE_SIZE
);
222 size
= phys_initrd_size
+ (phys_initrd_start
- start
);
223 size
= round_up(size
, PAGE_SIZE
);
225 if (!memblock_is_region_memory(start
, size
)) {
226 pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n",
231 if (memblock_is_region_reserved(start
, size
)) {
232 pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region - disabling initrd\n",
237 memblock_reserve(start
, size
);
239 /* Now convert initrd to virtual addresses */
240 initrd_start
= __phys_to_virt(phys_initrd_start
);
241 initrd_end
= initrd_start
+ phys_initrd_size
;
245 void __init
arm_memblock_init(const struct machine_desc
*mdesc
)
247 /* Register the kernel text, kernel data and initrd with memblock. */
248 memblock_reserve(__pa(KERNEL_START
), KERNEL_END
- KERNEL_START
);
252 arm_mm_memblock_reserve();
254 /* reserve any platform specific memblock areas */
258 early_init_fdt_reserve_self();
259 early_init_fdt_scan_reserved_mem();
261 /* reserve memory for DMA contiguous allocations */
262 dma_contiguous_reserve(arm_dma_limit
);
264 arm_memblock_steal_permitted
= false;
268 void __init
bootmem_init(void)
270 memblock_allow_resize();
272 find_limits(&min_low_pfn
, &max_low_pfn
, &max_pfn
);
274 early_memtest((phys_addr_t
)min_low_pfn
<< PAGE_SHIFT
,
275 (phys_addr_t
)max_low_pfn
<< PAGE_SHIFT
);
278 * Sparsemem tries to allocate bootmem in memory_present(),
279 * so must be done after the fixed reservations
284 * sparse_init() needs the bootmem allocator up and running.
289 * Now free the memory - free_area_init_node needs
290 * the sparse mem_map arrays initialized by sparse_init()
291 * for memmap_init_zone(), otherwise all PFNs are invalid.
293 zone_sizes_init(min_low_pfn
, max_low_pfn
, max_pfn
);
297 * Poison init memory with an undefined instruction (ARM) or a branch to an
298 * undefined instruction (Thumb).
300 static inline void poison_init_mem(void *s
, size_t count
)
303 for (; count
!= 0; count
-= 4)
308 free_memmap(unsigned long start_pfn
, unsigned long end_pfn
)
310 struct page
*start_pg
, *end_pg
;
311 phys_addr_t pg
, pgend
;
314 * Convert start_pfn/end_pfn to a struct page pointer.
316 start_pg
= pfn_to_page(start_pfn
- 1) + 1;
317 end_pg
= pfn_to_page(end_pfn
- 1) + 1;
320 * Convert to physical addresses, and
321 * round start upwards and end downwards.
323 pg
= PAGE_ALIGN(__pa(start_pg
));
324 pgend
= __pa(end_pg
) & PAGE_MASK
;
327 * If there are free pages between these,
328 * free the section of the memmap array.
331 memblock_free_early(pg
, pgend
- pg
);
335 * The mem_map array can get very big. Free the unused area of the memory map.
337 static void __init
free_unused_memmap(void)
339 unsigned long start
, prev_end
= 0;
340 struct memblock_region
*reg
;
343 * This relies on each bank being in address order.
344 * The banks are sorted previously in bootmem_init().
346 for_each_memblock(memory
, reg
) {
347 start
= memblock_region_memory_base_pfn(reg
);
349 #ifdef CONFIG_SPARSEMEM
351 * Take care not to free memmap entries that don't exist
352 * due to SPARSEMEM sections which aren't present.
355 ALIGN(prev_end
, PAGES_PER_SECTION
));
358 * Align down here since the VM subsystem insists that the
359 * memmap entries are valid from the bank start aligned to
360 * MAX_ORDER_NR_PAGES.
362 start
= round_down(start
, MAX_ORDER_NR_PAGES
);
365 * If we had a previous bank, and there is a space
366 * between the current bank and the previous, free it.
368 if (prev_end
&& prev_end
< start
)
369 free_memmap(prev_end
, start
);
372 * Align up here since the VM subsystem insists that the
373 * memmap entries are valid from the bank end aligned to
374 * MAX_ORDER_NR_PAGES.
376 prev_end
= ALIGN(memblock_region_memory_end_pfn(reg
),
380 #ifdef CONFIG_SPARSEMEM
381 if (!IS_ALIGNED(prev_end
, PAGES_PER_SECTION
))
382 free_memmap(prev_end
,
383 ALIGN(prev_end
, PAGES_PER_SECTION
));
387 #ifdef CONFIG_HIGHMEM
388 static inline void free_area_high(unsigned long pfn
, unsigned long end
)
390 for (; pfn
< end
; pfn
++)
391 free_highmem_page(pfn_to_page(pfn
));
395 static void __init
free_highpages(void)
397 #ifdef CONFIG_HIGHMEM
398 unsigned long max_low
= max_low_pfn
;
399 struct memblock_region
*mem
, *res
;
401 /* set highmem page free */
402 for_each_memblock(memory
, mem
) {
403 unsigned long start
= memblock_region_memory_base_pfn(mem
);
404 unsigned long end
= memblock_region_memory_end_pfn(mem
);
406 /* Ignore complete lowmem entries */
410 if (memblock_is_nomap(mem
))
413 /* Truncate partial highmem entries */
417 /* Find and exclude any reserved regions */
418 for_each_memblock(reserved
, res
) {
419 unsigned long res_start
, res_end
;
421 res_start
= memblock_region_reserved_base_pfn(res
);
422 res_end
= memblock_region_reserved_end_pfn(res
);
426 if (res_start
< start
)
432 if (res_start
!= start
)
433 free_area_high(start
, res_start
);
439 /* And now free anything which remains */
441 free_area_high(start
, end
);
447 * mem_init() marks the free areas in the mem_map and tells us how much
448 * memory is free. This is done after various parts of the system have
449 * claimed their memory after the kernel image.
451 void __init
mem_init(void)
453 #ifdef CONFIG_HAVE_TCM
454 /* These pointers are filled in on TCM detection */
459 set_max_mapnr(pfn_to_page(max_pfn
) - mem_map
);
461 /* this will put all unused low memory onto the freelists */
462 free_unused_memmap();
466 /* now that our DMA memory is actually so designated, we can free it */
467 free_reserved_area(__va(PHYS_OFFSET
), swapper_pg_dir
, -1, NULL
);
472 mem_init_print_info(NULL
);
475 * Check boundaries twice: Some fundamental inconsistencies can
476 * be detected at build time already.
479 BUILD_BUG_ON(TASK_SIZE
> MODULES_VADDR
);
480 BUG_ON(TASK_SIZE
> MODULES_VADDR
);
483 #ifdef CONFIG_HIGHMEM
484 BUILD_BUG_ON(PKMAP_BASE
+ LAST_PKMAP
* PAGE_SIZE
> PAGE_OFFSET
);
485 BUG_ON(PKMAP_BASE
+ LAST_PKMAP
* PAGE_SIZE
> PAGE_OFFSET
);
489 #ifdef CONFIG_STRICT_KERNEL_RWX
490 struct section_perm
{
499 /* First section-aligned location at or after __start_rodata. */
500 extern char __start_rodata_section_aligned
[];
502 static struct section_perm nx_perms
[] = {
503 /* Make pages tables, etc before _stext RW (set NX). */
505 .name
= "pre-text NX",
506 .start
= PAGE_OFFSET
,
507 .end
= (unsigned long)_stext
,
508 .mask
= ~PMD_SECT_XN
,
511 /* Make init RW (set NX). */
514 .start
= (unsigned long)__init_begin
,
515 .end
= (unsigned long)_sdata
,
516 .mask
= ~PMD_SECT_XN
,
519 /* Make rodata NX (set RO in ro_perms below). */
522 .start
= (unsigned long)__start_rodata_section_aligned
,
523 .end
= (unsigned long)__init_begin
,
524 .mask
= ~PMD_SECT_XN
,
529 static struct section_perm ro_perms
[] = {
530 /* Make kernel code and rodata RX (set RO). */
532 .name
= "text/rodata RO",
533 .start
= (unsigned long)_stext
,
534 .end
= (unsigned long)__init_begin
,
535 #ifdef CONFIG_ARM_LPAE
536 .mask
= ~(L_PMD_SECT_RDONLY
| PMD_SECT_AP2
),
537 .prot
= L_PMD_SECT_RDONLY
| PMD_SECT_AP2
,
539 .mask
= ~(PMD_SECT_APX
| PMD_SECT_AP_WRITE
),
540 .prot
= PMD_SECT_APX
| PMD_SECT_AP_WRITE
,
541 .clear
= PMD_SECT_AP_WRITE
,
547 * Updates section permissions only for the current mm (sections are
548 * copied into each mm). During startup, this is the init_mm. Is only
549 * safe to be called with preemption disabled, as under stop_machine().
551 static inline void section_update(unsigned long addr
, pmdval_t mask
,
552 pmdval_t prot
, struct mm_struct
*mm
)
556 pmd
= pmd_offset(pud_offset(pgd_offset(mm
, addr
), addr
), addr
);
558 #ifdef CONFIG_ARM_LPAE
559 pmd
[0] = __pmd((pmd_val(pmd
[0]) & mask
) | prot
);
561 if (addr
& SECTION_SIZE
)
562 pmd
[1] = __pmd((pmd_val(pmd
[1]) & mask
) | prot
);
564 pmd
[0] = __pmd((pmd_val(pmd
[0]) & mask
) | prot
);
566 flush_pmd_entry(pmd
);
567 local_flush_tlb_kernel_range(addr
, addr
+ SECTION_SIZE
);
570 /* Make sure extended page tables are in use. */
571 static inline bool arch_has_strict_perms(void)
573 if (cpu_architecture() < CPU_ARCH_ARMv6
)
576 return !!(get_cr() & CR_XP
);
579 void set_section_perms(struct section_perm
*perms
, int n
, bool set
,
580 struct mm_struct
*mm
)
585 if (!arch_has_strict_perms())
588 for (i
= 0; i
< n
; i
++) {
589 if (!IS_ALIGNED(perms
[i
].start
, SECTION_SIZE
) ||
590 !IS_ALIGNED(perms
[i
].end
, SECTION_SIZE
)) {
591 pr_err("BUG: %s section %lx-%lx not aligned to %lx\n",
592 perms
[i
].name
, perms
[i
].start
, perms
[i
].end
,
597 for (addr
= perms
[i
].start
;
599 addr
+= SECTION_SIZE
)
600 section_update(addr
, perms
[i
].mask
,
601 set
? perms
[i
].prot
: perms
[i
].clear
, mm
);
607 * update_sections_early intended to be called only through stop_machine
608 * framework and executed by only one CPU while all other CPUs will spin and
609 * wait, so no locking is required in this function.
611 static void update_sections_early(struct section_perm perms
[], int n
)
613 struct task_struct
*t
, *s
;
615 for_each_process(t
) {
616 if (t
->flags
& PF_KTHREAD
)
618 for_each_thread(t
, s
)
619 set_section_perms(perms
, n
, true, s
->mm
);
621 set_section_perms(perms
, n
, true, current
->active_mm
);
622 set_section_perms(perms
, n
, true, &init_mm
);
625 static int __fix_kernmem_perms(void *unused
)
627 update_sections_early(nx_perms
, ARRAY_SIZE(nx_perms
));
631 static void fix_kernmem_perms(void)
633 stop_machine(__fix_kernmem_perms
, NULL
, NULL
);
636 static int __mark_rodata_ro(void *unused
)
638 update_sections_early(ro_perms
, ARRAY_SIZE(ro_perms
));
642 static int kernel_set_to_readonly __read_mostly
;
644 void mark_rodata_ro(void)
646 kernel_set_to_readonly
= 1;
647 stop_machine(__mark_rodata_ro
, NULL
, NULL
);
651 void set_kernel_text_rw(void)
653 if (!kernel_set_to_readonly
)
656 set_section_perms(ro_perms
, ARRAY_SIZE(ro_perms
), false,
660 void set_kernel_text_ro(void)
662 if (!kernel_set_to_readonly
)
665 set_section_perms(ro_perms
, ARRAY_SIZE(ro_perms
), true,
670 static inline void fix_kernmem_perms(void) { }
671 #endif /* CONFIG_STRICT_KERNEL_RWX */
673 void free_initmem(void)
677 poison_init_mem(__init_begin
, __init_end
- __init_begin
);
678 if (!machine_is_integrator() && !machine_is_cintegrator())
679 free_initmem_default(-1);
682 #ifdef CONFIG_BLK_DEV_INITRD
683 void free_initrd_mem(unsigned long start
, unsigned long end
)
685 if (start
== initrd_start
)
686 start
= round_down(start
, PAGE_SIZE
);
687 if (end
== initrd_end
)
688 end
= round_up(end
, PAGE_SIZE
);
690 poison_init_mem((void *)start
, PAGE_ALIGN(end
) - start
);
691 free_reserved_area((void *)start
, (void *)end
, -1, "initrd");