1 // SPDX-License-Identifier: GPL-2.0-only
3 * mm_init.c - Memory initialisation verification and debugging
5 * Copyright 2008 IBM Corporation, 2008
6 * Author Mel Gorman <mel@csn.ul.ie>
9 #include <linux/kernel.h>
10 #include <linux/init.h>
11 #include <linux/kobject.h>
12 #include <linux/export.h>
13 #include <linux/memory.h>
14 #include <linux/notifier.h>
15 #include <linux/sched.h>
16 #include <linux/mman.h>
17 #include <linux/memblock.h>
18 #include <linux/page-isolation.h>
19 #include <linux/padata.h>
20 #include <linux/nmi.h>
21 #include <linux/buffer_head.h>
22 #include <linux/kmemleak.h>
23 #include <linux/kfence.h>
24 #include <linux/page_ext.h>
25 #include <linux/pti.h>
26 #include <linux/pgtable.h>
27 #include <linux/swap.h>
28 #include <linux/cma.h>
29 #include <linux/crash_dump.h>
34 #include <asm/setup.h>
36 #ifdef CONFIG_DEBUG_MEMORY_INIT
37 int __meminitdata mminit_loglevel
;
39 /* The zonelists are simply reported, validation is manual. */
40 void __init
mminit_verify_zonelist(void)
44 if (mminit_loglevel
< MMINIT_VERIFY
)
47 for_each_online_node(nid
) {
48 pg_data_t
*pgdat
= NODE_DATA(nid
);
51 struct zonelist
*zonelist
;
52 int i
, listid
, zoneid
;
54 BUILD_BUG_ON(MAX_ZONELISTS
> 2);
55 for (i
= 0; i
< MAX_ZONELISTS
* MAX_NR_ZONES
; i
++) {
57 /* Identify the zone and nodelist */
58 zoneid
= i
% MAX_NR_ZONES
;
59 listid
= i
/ MAX_NR_ZONES
;
60 zonelist
= &pgdat
->node_zonelists
[listid
];
61 zone
= &pgdat
->node_zones
[zoneid
];
62 if (!populated_zone(zone
))
65 /* Print information about the zonelist */
66 printk(KERN_DEBUG
"mminit::zonelist %s %d:%s = ",
67 listid
> 0 ? "thisnode" : "general", nid
,
70 /* Iterate the zonelist */
71 for_each_zone_zonelist(zone
, z
, zonelist
, zoneid
)
72 pr_cont("%d:%s ", zone_to_nid(zone
), zone
->name
);
78 void __init
mminit_verify_pageflags_layout(void)
81 unsigned long or_mask
, add_mask
;
83 shift
= BITS_PER_LONG
;
84 width
= shift
- SECTIONS_WIDTH
- NODES_WIDTH
- ZONES_WIDTH
85 - LAST_CPUPID_SHIFT
- KASAN_TAG_WIDTH
- LRU_GEN_WIDTH
- LRU_REFS_WIDTH
;
86 mminit_dprintk(MMINIT_TRACE
, "pageflags_layout_widths",
87 "Section %d Node %d Zone %d Lastcpupid %d Kasantag %d Gen %d Tier %d Flags %d\n",
96 mminit_dprintk(MMINIT_TRACE
, "pageflags_layout_shifts",
97 "Section %d Node %d Zone %d Lastcpupid %d Kasantag %d\n",
103 mminit_dprintk(MMINIT_TRACE
, "pageflags_layout_pgshifts",
104 "Section %lu Node %lu Zone %lu Lastcpupid %lu Kasantag %lu\n",
105 (unsigned long)SECTIONS_PGSHIFT
,
106 (unsigned long)NODES_PGSHIFT
,
107 (unsigned long)ZONES_PGSHIFT
,
108 (unsigned long)LAST_CPUPID_PGSHIFT
,
109 (unsigned long)KASAN_TAG_PGSHIFT
);
110 mminit_dprintk(MMINIT_TRACE
, "pageflags_layout_nodezoneid",
111 "Node/Zone ID: %lu -> %lu\n",
112 (unsigned long)(ZONEID_PGOFF
+ ZONEID_SHIFT
),
113 (unsigned long)ZONEID_PGOFF
);
114 mminit_dprintk(MMINIT_TRACE
, "pageflags_layout_usage",
115 "location: %d -> %d layout %d -> %d unused %d -> %d page-flags\n",
116 shift
, width
, width
, NR_PAGEFLAGS
, NR_PAGEFLAGS
, 0);
117 #ifdef NODE_NOT_IN_PAGE_FLAGS
118 mminit_dprintk(MMINIT_TRACE
, "pageflags_layout_nodeflags",
119 "Node not in page flags");
121 #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
122 mminit_dprintk(MMINIT_TRACE
, "pageflags_layout_nodeflags",
123 "Last cpupid not in page flags");
126 if (SECTIONS_WIDTH
) {
127 shift
-= SECTIONS_WIDTH
;
128 BUG_ON(shift
!= SECTIONS_PGSHIFT
);
131 shift
-= NODES_WIDTH
;
132 BUG_ON(shift
!= NODES_PGSHIFT
);
135 shift
-= ZONES_WIDTH
;
136 BUG_ON(shift
!= ZONES_PGSHIFT
);
139 /* Check for bitmask overlaps */
140 or_mask
= (ZONES_MASK
<< ZONES_PGSHIFT
) |
141 (NODES_MASK
<< NODES_PGSHIFT
) |
142 (SECTIONS_MASK
<< SECTIONS_PGSHIFT
);
143 add_mask
= (ZONES_MASK
<< ZONES_PGSHIFT
) +
144 (NODES_MASK
<< NODES_PGSHIFT
) +
145 (SECTIONS_MASK
<< SECTIONS_PGSHIFT
);
146 BUG_ON(or_mask
!= add_mask
);
149 static __init
int set_mminit_loglevel(char *str
)
151 get_option(&str
, &mminit_loglevel
);
154 early_param("mminit_loglevel", set_mminit_loglevel
);
155 #endif /* CONFIG_DEBUG_MEMORY_INIT */
157 struct kobject
*mm_kobj
;
160 s32 vm_committed_as_batch
= 32;
162 void mm_compute_batch(int overcommit_policy
)
165 s32 nr
= num_present_cpus();
166 s32 batch
= max_t(s32
, nr
*2, 32);
167 unsigned long ram_pages
= totalram_pages();
170 * For policy OVERCOMMIT_NEVER, set batch size to 0.4% of
171 * (total memory/#cpus), and lift it to 25% for other policies
172 * to easy the possible lock contention for percpu_counter
173 * vm_committed_as, while the max limit is INT_MAX
175 if (overcommit_policy
== OVERCOMMIT_NEVER
)
176 memsized_batch
= min_t(u64
, ram_pages
/nr
/256, INT_MAX
);
178 memsized_batch
= min_t(u64
, ram_pages
/nr
/4, INT_MAX
);
180 vm_committed_as_batch
= max_t(s32
, memsized_batch
, batch
);
183 static int __meminit
mm_compute_batch_notifier(struct notifier_block
*self
,
184 unsigned long action
, void *arg
)
189 mm_compute_batch(sysctl_overcommit_memory
);
197 static int __init
mm_compute_batch_init(void)
199 mm_compute_batch(sysctl_overcommit_memory
);
200 hotplug_memory_notifier(mm_compute_batch_notifier
, MM_COMPUTE_BATCH_PRI
);
204 __initcall(mm_compute_batch_init
);
208 static int __init
mm_sysfs_init(void)
210 mm_kobj
= kobject_create_and_add("mm", kernel_kobj
);
216 postcore_initcall(mm_sysfs_init
);
218 static unsigned long arch_zone_lowest_possible_pfn
[MAX_NR_ZONES
] __initdata
;
219 static unsigned long arch_zone_highest_possible_pfn
[MAX_NR_ZONES
] __initdata
;
220 static unsigned long zone_movable_pfn
[MAX_NUMNODES
] __initdata
;
222 static unsigned long required_kernelcore __initdata
;
223 static unsigned long required_kernelcore_percent __initdata
;
224 static unsigned long required_movablecore __initdata
;
225 static unsigned long required_movablecore_percent __initdata
;
227 static unsigned long nr_kernel_pages __initdata
;
228 static unsigned long nr_all_pages __initdata
;
229 static unsigned long dma_reserve __initdata
;
231 static bool deferred_struct_pages __meminitdata
;
233 static DEFINE_PER_CPU(struct per_cpu_nodestat
, boot_nodestats
);
235 static int __init
cmdline_parse_core(char *p
, unsigned long *core
,
236 unsigned long *percent
)
238 unsigned long long coremem
;
244 /* Value may be a percentage of total memory, otherwise bytes */
245 coremem
= simple_strtoull(p
, &endptr
, 0);
246 if (*endptr
== '%') {
247 /* Paranoid check for percent values greater than 100 */
248 WARN_ON(coremem
> 100);
252 coremem
= memparse(p
, &p
);
253 /* Paranoid check that UL is enough for the coremem value */
254 WARN_ON((coremem
>> PAGE_SHIFT
) > ULONG_MAX
);
256 *core
= coremem
>> PAGE_SHIFT
;
262 bool mirrored_kernelcore __initdata_memblock
;
265 * kernelcore=size sets the amount of memory for use for allocations that
266 * cannot be reclaimed or migrated.
268 static int __init
cmdline_parse_kernelcore(char *p
)
270 /* parse kernelcore=mirror */
271 if (parse_option_str(p
, "mirror")) {
272 mirrored_kernelcore
= true;
276 return cmdline_parse_core(p
, &required_kernelcore
,
277 &required_kernelcore_percent
);
279 early_param("kernelcore", cmdline_parse_kernelcore
);
282 * movablecore=size sets the amount of memory for use for allocations that
283 * can be reclaimed or migrated.
285 static int __init
cmdline_parse_movablecore(char *p
)
287 return cmdline_parse_core(p
, &required_movablecore
,
288 &required_movablecore_percent
);
290 early_param("movablecore", cmdline_parse_movablecore
);
293 * early_calculate_totalpages()
294 * Sum pages in active regions for movable zone.
295 * Populate N_MEMORY for calculating usable_nodes.
297 static unsigned long __init
early_calculate_totalpages(void)
299 unsigned long totalpages
= 0;
300 unsigned long start_pfn
, end_pfn
;
303 for_each_mem_pfn_range(i
, MAX_NUMNODES
, &start_pfn
, &end_pfn
, &nid
) {
304 unsigned long pages
= end_pfn
- start_pfn
;
308 node_set_state(nid
, N_MEMORY
);
314 * This finds a zone that can be used for ZONE_MOVABLE pages. The
315 * assumption is made that zones within a node are ordered in monotonic
316 * increasing memory addresses so that the "highest" populated zone is used
318 static void __init
find_usable_zone_for_movable(void)
321 for (zone_index
= MAX_NR_ZONES
- 1; zone_index
>= 0; zone_index
--) {
322 if (zone_index
== ZONE_MOVABLE
)
325 if (arch_zone_highest_possible_pfn
[zone_index
] >
326 arch_zone_lowest_possible_pfn
[zone_index
])
330 VM_BUG_ON(zone_index
== -1);
331 movable_zone
= zone_index
;
335 * Find the PFN the Movable zone begins in each node. Kernel memory
336 * is spread evenly between nodes as long as the nodes have enough
337 * memory. When they don't, some nodes will have more kernelcore than
340 static void __init
find_zone_movable_pfns_for_nodes(void)
343 unsigned long usable_startpfn
;
344 unsigned long kernelcore_node
, kernelcore_remaining
;
345 /* save the state before borrow the nodemask */
346 nodemask_t saved_node_state
= node_states
[N_MEMORY
];
347 unsigned long totalpages
= early_calculate_totalpages();
348 int usable_nodes
= nodes_weight(node_states
[N_MEMORY
]);
349 struct memblock_region
*r
;
351 /* Need to find movable_zone earlier when movable_node is specified. */
352 find_usable_zone_for_movable();
355 * If movable_node is specified, ignore kernelcore and movablecore
358 if (movable_node_is_enabled()) {
359 for_each_mem_region(r
) {
360 if (!memblock_is_hotpluggable(r
))
363 nid
= memblock_get_region_node(r
);
365 usable_startpfn
= PFN_DOWN(r
->base
);
366 zone_movable_pfn
[nid
] = zone_movable_pfn
[nid
] ?
367 min(usable_startpfn
, zone_movable_pfn
[nid
]) :
375 * If kernelcore=mirror is specified, ignore movablecore option
377 if (mirrored_kernelcore
) {
378 bool mem_below_4gb_not_mirrored
= false;
380 if (!memblock_has_mirror()) {
381 pr_warn("The system has no mirror memory, ignore kernelcore=mirror.\n");
385 if (is_kdump_kernel()) {
386 pr_warn("The system is under kdump, ignore kernelcore=mirror.\n");
390 for_each_mem_region(r
) {
391 if (memblock_is_mirror(r
))
394 nid
= memblock_get_region_node(r
);
396 usable_startpfn
= memblock_region_memory_base_pfn(r
);
398 if (usable_startpfn
< PHYS_PFN(SZ_4G
)) {
399 mem_below_4gb_not_mirrored
= true;
403 zone_movable_pfn
[nid
] = zone_movable_pfn
[nid
] ?
404 min(usable_startpfn
, zone_movable_pfn
[nid
]) :
408 if (mem_below_4gb_not_mirrored
)
409 pr_warn("This configuration results in unmirrored kernel memory.\n");
415 * If kernelcore=nn% or movablecore=nn% was specified, calculate the
416 * amount of necessary memory.
418 if (required_kernelcore_percent
)
419 required_kernelcore
= (totalpages
* 100 * required_kernelcore_percent
) /
421 if (required_movablecore_percent
)
422 required_movablecore
= (totalpages
* 100 * required_movablecore_percent
) /
426 * If movablecore= was specified, calculate what size of
427 * kernelcore that corresponds so that memory usable for
428 * any allocation type is evenly spread. If both kernelcore
429 * and movablecore are specified, then the value of kernelcore
430 * will be used for required_kernelcore if it's greater than
431 * what movablecore would have allowed.
433 if (required_movablecore
) {
434 unsigned long corepages
;
437 * Round-up so that ZONE_MOVABLE is at least as large as what
438 * was requested by the user
440 required_movablecore
=
441 roundup(required_movablecore
, MAX_ORDER_NR_PAGES
);
442 required_movablecore
= min(totalpages
, required_movablecore
);
443 corepages
= totalpages
- required_movablecore
;
445 required_kernelcore
= max(required_kernelcore
, corepages
);
449 * If kernelcore was not specified or kernelcore size is larger
450 * than totalpages, there is no ZONE_MOVABLE.
452 if (!required_kernelcore
|| required_kernelcore
>= totalpages
)
455 /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
456 usable_startpfn
= arch_zone_lowest_possible_pfn
[movable_zone
];
459 /* Spread kernelcore memory as evenly as possible throughout nodes */
460 kernelcore_node
= required_kernelcore
/ usable_nodes
;
461 for_each_node_state(nid
, N_MEMORY
) {
462 unsigned long start_pfn
, end_pfn
;
465 * Recalculate kernelcore_node if the division per node
466 * now exceeds what is necessary to satisfy the requested
467 * amount of memory for the kernel
469 if (required_kernelcore
< kernelcore_node
)
470 kernelcore_node
= required_kernelcore
/ usable_nodes
;
473 * As the map is walked, we track how much memory is usable
474 * by the kernel using kernelcore_remaining. When it is
475 * 0, the rest of the node is usable by ZONE_MOVABLE
477 kernelcore_remaining
= kernelcore_node
;
479 /* Go through each range of PFNs within this node */
480 for_each_mem_pfn_range(i
, nid
, &start_pfn
, &end_pfn
, NULL
) {
481 unsigned long size_pages
;
483 start_pfn
= max(start_pfn
, zone_movable_pfn
[nid
]);
484 if (start_pfn
>= end_pfn
)
487 /* Account for what is only usable for kernelcore */
488 if (start_pfn
< usable_startpfn
) {
489 unsigned long kernel_pages
;
490 kernel_pages
= min(end_pfn
, usable_startpfn
)
493 kernelcore_remaining
-= min(kernel_pages
,
494 kernelcore_remaining
);
495 required_kernelcore
-= min(kernel_pages
,
496 required_kernelcore
);
498 /* Continue if range is now fully accounted */
499 if (end_pfn
<= usable_startpfn
) {
502 * Push zone_movable_pfn to the end so
503 * that if we have to rebalance
504 * kernelcore across nodes, we will
505 * not double account here
507 zone_movable_pfn
[nid
] = end_pfn
;
510 start_pfn
= usable_startpfn
;
514 * The usable PFN range for ZONE_MOVABLE is from
515 * start_pfn->end_pfn. Calculate size_pages as the
516 * number of pages used as kernelcore
518 size_pages
= end_pfn
- start_pfn
;
519 if (size_pages
> kernelcore_remaining
)
520 size_pages
= kernelcore_remaining
;
521 zone_movable_pfn
[nid
] = start_pfn
+ size_pages
;
524 * Some kernelcore has been met, update counts and
525 * break if the kernelcore for this node has been
528 required_kernelcore
-= min(required_kernelcore
,
530 kernelcore_remaining
-= size_pages
;
531 if (!kernelcore_remaining
)
537 * If there is still required_kernelcore, we do another pass with one
538 * less node in the count. This will push zone_movable_pfn[nid] further
539 * along on the nodes that still have memory until kernelcore is
543 if (usable_nodes
&& required_kernelcore
> usable_nodes
)
547 /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
548 for (nid
= 0; nid
< MAX_NUMNODES
; nid
++) {
549 unsigned long start_pfn
, end_pfn
;
551 zone_movable_pfn
[nid
] =
552 roundup(zone_movable_pfn
[nid
], MAX_ORDER_NR_PAGES
);
554 get_pfn_range_for_nid(nid
, &start_pfn
, &end_pfn
);
555 if (zone_movable_pfn
[nid
] >= end_pfn
)
556 zone_movable_pfn
[nid
] = 0;
560 /* restore the node_state */
561 node_states
[N_MEMORY
] = saved_node_state
;
564 void __meminit
__init_single_page(struct page
*page
, unsigned long pfn
,
565 unsigned long zone
, int nid
)
567 mm_zero_struct_page(page
);
568 set_page_links(page
, zone
, nid
, pfn
);
569 init_page_count(page
);
570 page_mapcount_reset(page
);
571 page_cpupid_reset_last(page
);
572 page_kasan_tag_reset(page
);
574 INIT_LIST_HEAD(&page
->lru
);
575 #ifdef WANT_PAGE_VIRTUAL
576 /* The shift won't overflow because ZONE_NORMAL is below 4G. */
577 if (!is_highmem_idx(zone
))
578 set_page_address(page
, __va(pfn
<< PAGE_SHIFT
));
584 * During memory init memblocks map pfns to nids. The search is expensive and
585 * this caches recent lookups. The implementation of __early_pfn_to_nid
586 * treats start/end as pfns.
588 struct mminit_pfnnid_cache
{
589 unsigned long last_start
;
590 unsigned long last_end
;
594 static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata
;
597 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
599 static int __meminit
__early_pfn_to_nid(unsigned long pfn
,
600 struct mminit_pfnnid_cache
*state
)
602 unsigned long start_pfn
, end_pfn
;
605 if (state
->last_start
<= pfn
&& pfn
< state
->last_end
)
606 return state
->last_nid
;
608 nid
= memblock_search_pfn_nid(pfn
, &start_pfn
, &end_pfn
);
609 if (nid
!= NUMA_NO_NODE
) {
610 state
->last_start
= start_pfn
;
611 state
->last_end
= end_pfn
;
612 state
->last_nid
= nid
;
618 int __meminit
early_pfn_to_nid(unsigned long pfn
)
620 static DEFINE_SPINLOCK(early_pfn_lock
);
623 spin_lock(&early_pfn_lock
);
624 nid
= __early_pfn_to_nid(pfn
, &early_pfnnid_cache
);
626 nid
= first_online_node
;
627 spin_unlock(&early_pfn_lock
);
632 int hashdist
= HASHDIST_DEFAULT
;
634 static int __init
set_hashdist(char *str
)
638 hashdist
= simple_strtoul(str
, &str
, 0);
641 __setup("hashdist=", set_hashdist
);
643 static inline void fixup_hashdist(void)
645 if (num_node_state(N_MEMORY
) == 1)
649 static inline void fixup_hashdist(void) {}
650 #endif /* CONFIG_NUMA */
652 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
653 static inline void pgdat_set_deferred_range(pg_data_t
*pgdat
)
655 pgdat
->first_deferred_pfn
= ULONG_MAX
;
658 /* Returns true if the struct page for the pfn is initialised */
659 static inline bool __meminit
early_page_initialised(unsigned long pfn
, int nid
)
661 if (node_online(nid
) && pfn
>= NODE_DATA(nid
)->first_deferred_pfn
)
668 * Returns true when the remaining initialisation should be deferred until
669 * later in the boot cycle when it can be parallelised.
671 static bool __meminit
672 defer_init(int nid
, unsigned long pfn
, unsigned long end_pfn
)
674 static unsigned long prev_end_pfn
, nr_initialised
;
676 if (early_page_ext_enabled())
679 * prev_end_pfn static that contains the end of previous zone
680 * No need to protect because called very early in boot before smp_init.
682 if (prev_end_pfn
!= end_pfn
) {
683 prev_end_pfn
= end_pfn
;
687 /* Always populate low zones for address-constrained allocations */
688 if (end_pfn
< pgdat_end_pfn(NODE_DATA(nid
)))
691 if (NODE_DATA(nid
)->first_deferred_pfn
!= ULONG_MAX
)
694 * We start only with one section of pages, more pages are added as
695 * needed until the rest of deferred pages are initialized.
698 if ((nr_initialised
> PAGES_PER_SECTION
) &&
699 (pfn
& (PAGES_PER_SECTION
- 1)) == 0) {
700 NODE_DATA(nid
)->first_deferred_pfn
= pfn
;
706 static void __meminit
init_reserved_page(unsigned long pfn
, int nid
)
711 if (early_page_initialised(pfn
, nid
))
714 pgdat
= NODE_DATA(nid
);
716 for (zid
= 0; zid
< MAX_NR_ZONES
; zid
++) {
717 struct zone
*zone
= &pgdat
->node_zones
[zid
];
719 if (zone_spans_pfn(zone
, pfn
))
722 __init_single_page(pfn_to_page(pfn
), pfn
, zid
, nid
);
725 static inline void pgdat_set_deferred_range(pg_data_t
*pgdat
) {}
727 static inline bool early_page_initialised(unsigned long pfn
, int nid
)
732 static inline bool defer_init(int nid
, unsigned long pfn
, unsigned long end_pfn
)
737 static inline void init_reserved_page(unsigned long pfn
, int nid
)
740 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
743 * Initialised pages do not have PageReserved set. This function is
744 * called for each range allocated by the bootmem allocator and
745 * marks the pages PageReserved. The remaining valid pages are later
746 * sent to the buddy page allocator.
748 void __meminit
reserve_bootmem_region(phys_addr_t start
,
749 phys_addr_t end
, int nid
)
751 unsigned long start_pfn
= PFN_DOWN(start
);
752 unsigned long end_pfn
= PFN_UP(end
);
754 for (; start_pfn
< end_pfn
; start_pfn
++) {
755 if (pfn_valid(start_pfn
)) {
756 struct page
*page
= pfn_to_page(start_pfn
);
758 init_reserved_page(start_pfn
, nid
);
760 /* Avoid false-positive PageTail() */
761 INIT_LIST_HEAD(&page
->lru
);
764 * no need for atomic set_bit because the struct
765 * page is not visible yet so nobody should
768 __SetPageReserved(page
);
773 /* If zone is ZONE_MOVABLE but memory is mirrored, it is an overlapped init */
774 static bool __meminit
775 overlap_memmap_init(unsigned long zone
, unsigned long *pfn
)
777 static struct memblock_region
*r
;
779 if (mirrored_kernelcore
&& zone
== ZONE_MOVABLE
) {
780 if (!r
|| *pfn
>= memblock_region_memory_end_pfn(r
)) {
781 for_each_mem_region(r
) {
782 if (*pfn
< memblock_region_memory_end_pfn(r
))
786 if (*pfn
>= memblock_region_memory_base_pfn(r
) &&
787 memblock_is_mirror(r
)) {
788 *pfn
= memblock_region_memory_end_pfn(r
);
796 * Only struct pages that correspond to ranges defined by memblock.memory
797 * are zeroed and initialized by going through __init_single_page() during
798 * memmap_init_zone_range().
800 * But, there could be struct pages that correspond to holes in
801 * memblock.memory. This can happen because of the following reasons:
802 * - physical memory bank size is not necessarily the exact multiple of the
803 * arbitrary section size
804 * - early reserved memory may not be listed in memblock.memory
805 * - non-memory regions covered by the contigious flatmem mapping
806 * - memory layouts defined with memmap= kernel parameter may not align
807 * nicely with memmap sections
809 * Explicitly initialize those struct pages so that:
810 * - PG_Reserved is set
811 * - zone and node links point to zone and node that span the page if the
812 * hole is in the middle of a zone
813 * - zone and node links point to adjacent zone/node if the hole falls on
814 * the zone boundary; the pages in such holes will be prepended to the
815 * zone/node above the hole except for the trailing pages in the last
816 * section that will be appended to the zone/node below.
818 static void __init
init_unavailable_range(unsigned long spfn
,
825 for (pfn
= spfn
; pfn
< epfn
; pfn
++) {
826 if (!pfn_valid(pageblock_start_pfn(pfn
))) {
827 pfn
= pageblock_end_pfn(pfn
) - 1;
830 __init_single_page(pfn_to_page(pfn
), pfn
, zone
, node
);
831 __SetPageReserved(pfn_to_page(pfn
));
836 pr_info("On node %d, zone %s: %lld pages in unavailable ranges\n",
837 node
, zone_names
[zone
], pgcnt
);
841 * Initially all pages are reserved - free ones are freed
842 * up by memblock_free_all() once the early boot process is
843 * done. Non-atomic initialization, single-pass.
845 * All aligned pageblocks are initialized to the specified migratetype
846 * (usually MIGRATE_MOVABLE). Besides setting the migratetype, no related
847 * zone stats (e.g., nr_isolate_pageblock) are touched.
849 void __meminit
memmap_init_range(unsigned long size
, int nid
, unsigned long zone
,
850 unsigned long start_pfn
, unsigned long zone_end_pfn
,
851 enum meminit_context context
,
852 struct vmem_altmap
*altmap
, int migratetype
)
854 unsigned long pfn
, end_pfn
= start_pfn
+ size
;
857 if (highest_memmap_pfn
< end_pfn
- 1)
858 highest_memmap_pfn
= end_pfn
- 1;
860 #ifdef CONFIG_ZONE_DEVICE
862 * Honor reservation requested by the driver for this ZONE_DEVICE
863 * memory. We limit the total number of pages to initialize to just
864 * those that might contain the memory mapping. We will defer the
865 * ZONE_DEVICE page initialization until after we have released
868 if (zone
== ZONE_DEVICE
) {
872 if (start_pfn
== altmap
->base_pfn
)
873 start_pfn
+= altmap
->reserve
;
874 end_pfn
= altmap
->base_pfn
+ vmem_altmap_offset(altmap
);
878 for (pfn
= start_pfn
; pfn
< end_pfn
; ) {
880 * There can be holes in boot-time mem_map[]s handed to this
881 * function. They do not exist on hotplugged memory.
883 if (context
== MEMINIT_EARLY
) {
884 if (overlap_memmap_init(zone
, &pfn
))
886 if (defer_init(nid
, pfn
, zone_end_pfn
)) {
887 deferred_struct_pages
= true;
892 page
= pfn_to_page(pfn
);
893 __init_single_page(page
, pfn
, zone
, nid
);
894 if (context
== MEMINIT_HOTPLUG
)
895 __SetPageReserved(page
);
898 * Usually, we want to mark the pageblock MIGRATE_MOVABLE,
899 * such that unmovable allocations won't be scattered all
900 * over the place during system boot.
902 if (pageblock_aligned(pfn
)) {
903 set_pageblock_migratetype(page
, migratetype
);
910 static void __init
memmap_init_zone_range(struct zone
*zone
,
911 unsigned long start_pfn
,
912 unsigned long end_pfn
,
913 unsigned long *hole_pfn
)
915 unsigned long zone_start_pfn
= zone
->zone_start_pfn
;
916 unsigned long zone_end_pfn
= zone_start_pfn
+ zone
->spanned_pages
;
917 int nid
= zone_to_nid(zone
), zone_id
= zone_idx(zone
);
919 start_pfn
= clamp(start_pfn
, zone_start_pfn
, zone_end_pfn
);
920 end_pfn
= clamp(end_pfn
, zone_start_pfn
, zone_end_pfn
);
922 if (start_pfn
>= end_pfn
)
925 memmap_init_range(end_pfn
- start_pfn
, nid
, zone_id
, start_pfn
,
926 zone_end_pfn
, MEMINIT_EARLY
, NULL
, MIGRATE_MOVABLE
);
928 if (*hole_pfn
< start_pfn
)
929 init_unavailable_range(*hole_pfn
, start_pfn
, zone_id
, nid
);
934 static void __init
memmap_init(void)
936 unsigned long start_pfn
, end_pfn
;
937 unsigned long hole_pfn
= 0;
938 int i
, j
, zone_id
= 0, nid
;
940 for_each_mem_pfn_range(i
, MAX_NUMNODES
, &start_pfn
, &end_pfn
, &nid
) {
941 struct pglist_data
*node
= NODE_DATA(nid
);
943 for (j
= 0; j
< MAX_NR_ZONES
; j
++) {
944 struct zone
*zone
= node
->node_zones
+ j
;
946 if (!populated_zone(zone
))
949 memmap_init_zone_range(zone
, start_pfn
, end_pfn
,
955 #ifdef CONFIG_SPARSEMEM
957 * Initialize the memory map for hole in the range [memory_end,
959 * Append the pages in this hole to the highest zone in the last
961 * The call to init_unavailable_range() is outside the ifdef to
962 * silence the compiler warining about zone_id set but not used;
963 * for FLATMEM it is a nop anyway
965 end_pfn
= round_up(end_pfn
, PAGES_PER_SECTION
);
966 if (hole_pfn
< end_pfn
)
968 init_unavailable_range(hole_pfn
, end_pfn
, zone_id
, nid
);
971 #ifdef CONFIG_ZONE_DEVICE
972 static void __ref
__init_zone_device_page(struct page
*page
, unsigned long pfn
,
973 unsigned long zone_idx
, int nid
,
974 struct dev_pagemap
*pgmap
)
977 __init_single_page(page
, pfn
, zone_idx
, nid
);
980 * Mark page reserved as it will need to wait for onlining
981 * phase for it to be fully associated with a zone.
983 * We can use the non-atomic __set_bit operation for setting
984 * the flag as we are still initializing the pages.
986 __SetPageReserved(page
);
989 * ZONE_DEVICE pages union ->lru with a ->pgmap back pointer
990 * and zone_device_data. It is a bug if a ZONE_DEVICE page is
991 * ever freed or placed on a driver-private list.
994 page
->zone_device_data
= NULL
;
997 * Mark the block movable so that blocks are reserved for
998 * movable at startup. This will force kernel allocations
999 * to reserve their blocks rather than leaking throughout
1000 * the address space during boot when many long-lived
1001 * kernel allocations are made.
1003 * Please note that MEMINIT_HOTPLUG path doesn't clear memmap
1004 * because this is done early in section_activate()
1006 if (pageblock_aligned(pfn
)) {
1007 set_pageblock_migratetype(page
, MIGRATE_MOVABLE
);
1012 * ZONE_DEVICE pages are released directly to the driver page allocator
1013 * which will set the page count to 1 when allocating the page.
1015 if (pgmap
->type
== MEMORY_DEVICE_PRIVATE
||
1016 pgmap
->type
== MEMORY_DEVICE_COHERENT
)
1017 set_page_count(page
, 0);
1021 * With compound page geometry and when struct pages are stored in ram most
1022 * tail pages are reused. Consequently, the amount of unique struct pages to
1023 * initialize is a lot smaller that the total amount of struct pages being
1024 * mapped. This is a paired / mild layering violation with explicit knowledge
1025 * of how the sparse_vmemmap internals handle compound pages in the lack
1026 * of an altmap. See vmemmap_populate_compound_pages().
1028 static inline unsigned long compound_nr_pages(struct vmem_altmap
*altmap
,
1029 struct dev_pagemap
*pgmap
)
1031 if (!vmemmap_can_optimize(altmap
, pgmap
))
1032 return pgmap_vmemmap_nr(pgmap
);
1034 return VMEMMAP_RESERVE_NR
* (PAGE_SIZE
/ sizeof(struct page
));
1037 static void __ref
memmap_init_compound(struct page
*head
,
1038 unsigned long head_pfn
,
1039 unsigned long zone_idx
, int nid
,
1040 struct dev_pagemap
*pgmap
,
1041 unsigned long nr_pages
)
1043 unsigned long pfn
, end_pfn
= head_pfn
+ nr_pages
;
1044 unsigned int order
= pgmap
->vmemmap_shift
;
1046 __SetPageHead(head
);
1047 for (pfn
= head_pfn
+ 1; pfn
< end_pfn
; pfn
++) {
1048 struct page
*page
= pfn_to_page(pfn
);
1050 __init_zone_device_page(page
, pfn
, zone_idx
, nid
, pgmap
);
1051 prep_compound_tail(head
, pfn
- head_pfn
);
1052 set_page_count(page
, 0);
1055 * The first tail page stores important compound page info.
1056 * Call prep_compound_head() after the first tail page has
1057 * been initialized, to not have the data overwritten.
1059 if (pfn
== head_pfn
+ 1)
1060 prep_compound_head(head
, order
);
1064 void __ref
memmap_init_zone_device(struct zone
*zone
,
1065 unsigned long start_pfn
,
1066 unsigned long nr_pages
,
1067 struct dev_pagemap
*pgmap
)
1069 unsigned long pfn
, end_pfn
= start_pfn
+ nr_pages
;
1070 struct pglist_data
*pgdat
= zone
->zone_pgdat
;
1071 struct vmem_altmap
*altmap
= pgmap_altmap(pgmap
);
1072 unsigned int pfns_per_compound
= pgmap_vmemmap_nr(pgmap
);
1073 unsigned long zone_idx
= zone_idx(zone
);
1074 unsigned long start
= jiffies
;
1075 int nid
= pgdat
->node_id
;
1077 if (WARN_ON_ONCE(!pgmap
|| zone_idx
!= ZONE_DEVICE
))
1081 * The call to memmap_init should have already taken care
1082 * of the pages reserved for the memmap, so we can just jump to
1083 * the end of that region and start processing the device pages.
1086 start_pfn
= altmap
->base_pfn
+ vmem_altmap_offset(altmap
);
1087 nr_pages
= end_pfn
- start_pfn
;
1090 for (pfn
= start_pfn
; pfn
< end_pfn
; pfn
+= pfns_per_compound
) {
1091 struct page
*page
= pfn_to_page(pfn
);
1093 __init_zone_device_page(page
, pfn
, zone_idx
, nid
, pgmap
);
1095 if (pfns_per_compound
== 1)
1098 memmap_init_compound(page
, pfn
, zone_idx
, nid
, pgmap
,
1099 compound_nr_pages(altmap
, pgmap
));
1102 pr_debug("%s initialised %lu pages in %ums\n", __func__
,
1103 nr_pages
, jiffies_to_msecs(jiffies
- start
));
1108 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
1109 * because it is sized independent of architecture. Unlike the other zones,
1110 * the starting point for ZONE_MOVABLE is not fixed. It may be different
1111 * in each node depending on the size of each node and how evenly kernelcore
1112 * is distributed. This helper function adjusts the zone ranges
1113 * provided by the architecture for a given node by using the end of the
1114 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
1115 * zones within a node are in order of monotonic increases memory addresses
1117 static void __init
adjust_zone_range_for_zone_movable(int nid
,
1118 unsigned long zone_type
,
1119 unsigned long node_end_pfn
,
1120 unsigned long *zone_start_pfn
,
1121 unsigned long *zone_end_pfn
)
1123 /* Only adjust if ZONE_MOVABLE is on this node */
1124 if (zone_movable_pfn
[nid
]) {
1125 /* Size ZONE_MOVABLE */
1126 if (zone_type
== ZONE_MOVABLE
) {
1127 *zone_start_pfn
= zone_movable_pfn
[nid
];
1128 *zone_end_pfn
= min(node_end_pfn
,
1129 arch_zone_highest_possible_pfn
[movable_zone
]);
1131 /* Adjust for ZONE_MOVABLE starting within this range */
1132 } else if (!mirrored_kernelcore
&&
1133 *zone_start_pfn
< zone_movable_pfn
[nid
] &&
1134 *zone_end_pfn
> zone_movable_pfn
[nid
]) {
1135 *zone_end_pfn
= zone_movable_pfn
[nid
];
1137 /* Check if this whole range is within ZONE_MOVABLE */
1138 } else if (*zone_start_pfn
>= zone_movable_pfn
[nid
])
1139 *zone_start_pfn
= *zone_end_pfn
;
1144 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
1145 * then all holes in the requested range will be accounted for.
1147 unsigned long __init
__absent_pages_in_range(int nid
,
1148 unsigned long range_start_pfn
,
1149 unsigned long range_end_pfn
)
1151 unsigned long nr_absent
= range_end_pfn
- range_start_pfn
;
1152 unsigned long start_pfn
, end_pfn
;
1155 for_each_mem_pfn_range(i
, nid
, &start_pfn
, &end_pfn
, NULL
) {
1156 start_pfn
= clamp(start_pfn
, range_start_pfn
, range_end_pfn
);
1157 end_pfn
= clamp(end_pfn
, range_start_pfn
, range_end_pfn
);
1158 nr_absent
-= end_pfn
- start_pfn
;
1164 * absent_pages_in_range - Return number of page frames in holes within a range
1165 * @start_pfn: The start PFN to start searching for holes
1166 * @end_pfn: The end PFN to stop searching for holes
1168 * Return: the number of pages frames in memory holes within a range.
1170 unsigned long __init
absent_pages_in_range(unsigned long start_pfn
,
1171 unsigned long end_pfn
)
1173 return __absent_pages_in_range(MAX_NUMNODES
, start_pfn
, end_pfn
);
1176 /* Return the number of page frames in holes in a zone on a node */
1177 static unsigned long __init
zone_absent_pages_in_node(int nid
,
1178 unsigned long zone_type
,
1179 unsigned long zone_start_pfn
,
1180 unsigned long zone_end_pfn
)
1182 unsigned long nr_absent
;
1184 /* zone is empty, we don't have any absent pages */
1185 if (zone_start_pfn
== zone_end_pfn
)
1188 nr_absent
= __absent_pages_in_range(nid
, zone_start_pfn
, zone_end_pfn
);
1191 * ZONE_MOVABLE handling.
1192 * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages
1195 if (mirrored_kernelcore
&& zone_movable_pfn
[nid
]) {
1196 unsigned long start_pfn
, end_pfn
;
1197 struct memblock_region
*r
;
1199 for_each_mem_region(r
) {
1200 start_pfn
= clamp(memblock_region_memory_base_pfn(r
),
1201 zone_start_pfn
, zone_end_pfn
);
1202 end_pfn
= clamp(memblock_region_memory_end_pfn(r
),
1203 zone_start_pfn
, zone_end_pfn
);
1205 if (zone_type
== ZONE_MOVABLE
&&
1206 memblock_is_mirror(r
))
1207 nr_absent
+= end_pfn
- start_pfn
;
1209 if (zone_type
== ZONE_NORMAL
&&
1210 !memblock_is_mirror(r
))
1211 nr_absent
+= end_pfn
- start_pfn
;
1219 * Return the number of pages a zone spans in a node, including holes
1220 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
1222 static unsigned long __init
zone_spanned_pages_in_node(int nid
,
1223 unsigned long zone_type
,
1224 unsigned long node_start_pfn
,
1225 unsigned long node_end_pfn
,
1226 unsigned long *zone_start_pfn
,
1227 unsigned long *zone_end_pfn
)
1229 unsigned long zone_low
= arch_zone_lowest_possible_pfn
[zone_type
];
1230 unsigned long zone_high
= arch_zone_highest_possible_pfn
[zone_type
];
1232 /* Get the start and end of the zone */
1233 *zone_start_pfn
= clamp(node_start_pfn
, zone_low
, zone_high
);
1234 *zone_end_pfn
= clamp(node_end_pfn
, zone_low
, zone_high
);
1235 adjust_zone_range_for_zone_movable(nid
, zone_type
, node_end_pfn
,
1236 zone_start_pfn
, zone_end_pfn
);
1238 /* Check that this node has pages within the zone's required range */
1239 if (*zone_end_pfn
< node_start_pfn
|| *zone_start_pfn
> node_end_pfn
)
1242 /* Move the zone boundaries inside the node if necessary */
1243 *zone_end_pfn
= min(*zone_end_pfn
, node_end_pfn
);
1244 *zone_start_pfn
= max(*zone_start_pfn
, node_start_pfn
);
1246 /* Return the spanned pages */
1247 return *zone_end_pfn
- *zone_start_pfn
;
1250 static void __init
reset_memoryless_node_totalpages(struct pglist_data
*pgdat
)
1254 for (z
= pgdat
->node_zones
; z
< pgdat
->node_zones
+ MAX_NR_ZONES
; z
++) {
1255 z
->zone_start_pfn
= 0;
1256 z
->spanned_pages
= 0;
1257 z
->present_pages
= 0;
1258 #if defined(CONFIG_MEMORY_HOTPLUG)
1259 z
->present_early_pages
= 0;
1263 pgdat
->node_spanned_pages
= 0;
1264 pgdat
->node_present_pages
= 0;
1265 pr_debug("On node %d totalpages: 0\n", pgdat
->node_id
);
1268 static void __init
calculate_node_totalpages(struct pglist_data
*pgdat
,
1269 unsigned long node_start_pfn
,
1270 unsigned long node_end_pfn
)
1272 unsigned long realtotalpages
= 0, totalpages
= 0;
1275 for (i
= 0; i
< MAX_NR_ZONES
; i
++) {
1276 struct zone
*zone
= pgdat
->node_zones
+ i
;
1277 unsigned long zone_start_pfn
, zone_end_pfn
;
1278 unsigned long spanned
, absent
;
1279 unsigned long real_size
;
1281 spanned
= zone_spanned_pages_in_node(pgdat
->node_id
, i
,
1286 absent
= zone_absent_pages_in_node(pgdat
->node_id
, i
,
1290 real_size
= spanned
- absent
;
1293 zone
->zone_start_pfn
= zone_start_pfn
;
1295 zone
->zone_start_pfn
= 0;
1296 zone
->spanned_pages
= spanned
;
1297 zone
->present_pages
= real_size
;
1298 #if defined(CONFIG_MEMORY_HOTPLUG)
1299 zone
->present_early_pages
= real_size
;
1302 totalpages
+= spanned
;
1303 realtotalpages
+= real_size
;
1306 pgdat
->node_spanned_pages
= totalpages
;
1307 pgdat
->node_present_pages
= realtotalpages
;
1308 pr_debug("On node %d totalpages: %lu\n", pgdat
->node_id
, realtotalpages
);
1311 static unsigned long __init
calc_memmap_size(unsigned long spanned_pages
,
1312 unsigned long present_pages
)
1314 unsigned long pages
= spanned_pages
;
1317 * Provide a more accurate estimation if there are holes within
1318 * the zone and SPARSEMEM is in use. If there are holes within the
1319 * zone, each populated memory region may cost us one or two extra
1320 * memmap pages due to alignment because memmap pages for each
1321 * populated regions may not be naturally aligned on page boundary.
1322 * So the (present_pages >> 4) heuristic is a tradeoff for that.
1324 if (spanned_pages
> present_pages
+ (present_pages
>> 4) &&
1325 IS_ENABLED(CONFIG_SPARSEMEM
))
1326 pages
= present_pages
;
1328 return PAGE_ALIGN(pages
* sizeof(struct page
)) >> PAGE_SHIFT
;
1331 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1332 static void pgdat_init_split_queue(struct pglist_data
*pgdat
)
1334 struct deferred_split
*ds_queue
= &pgdat
->deferred_split_queue
;
1336 spin_lock_init(&ds_queue
->split_queue_lock
);
1337 INIT_LIST_HEAD(&ds_queue
->split_queue
);
1338 ds_queue
->split_queue_len
= 0;
1341 static void pgdat_init_split_queue(struct pglist_data
*pgdat
) {}
1344 #ifdef CONFIG_COMPACTION
1345 static void pgdat_init_kcompactd(struct pglist_data
*pgdat
)
1347 init_waitqueue_head(&pgdat
->kcompactd_wait
);
1350 static void pgdat_init_kcompactd(struct pglist_data
*pgdat
) {}
1353 static void __meminit
pgdat_init_internals(struct pglist_data
*pgdat
)
1357 pgdat_resize_init(pgdat
);
1358 pgdat_kswapd_lock_init(pgdat
);
1360 pgdat_init_split_queue(pgdat
);
1361 pgdat_init_kcompactd(pgdat
);
1363 init_waitqueue_head(&pgdat
->kswapd_wait
);
1364 init_waitqueue_head(&pgdat
->pfmemalloc_wait
);
1366 for (i
= 0; i
< NR_VMSCAN_THROTTLE
; i
++)
1367 init_waitqueue_head(&pgdat
->reclaim_wait
[i
]);
1369 pgdat_page_ext_init(pgdat
);
1370 lruvec_init(&pgdat
->__lruvec
);
1373 static void __meminit
zone_init_internals(struct zone
*zone
, enum zone_type idx
, int nid
,
1374 unsigned long remaining_pages
)
1376 atomic_long_set(&zone
->managed_pages
, remaining_pages
);
1377 zone_set_nid(zone
, nid
);
1378 zone
->name
= zone_names
[idx
];
1379 zone
->zone_pgdat
= NODE_DATA(nid
);
1380 spin_lock_init(&zone
->lock
);
1381 zone_seqlock_init(zone
);
1382 zone_pcp_init(zone
);
1385 static void __meminit
zone_init_free_lists(struct zone
*zone
)
1387 unsigned int order
, t
;
1388 for_each_migratetype_order(order
, t
) {
1389 INIT_LIST_HEAD(&zone
->free_area
[order
].free_list
[t
]);
1390 zone
->free_area
[order
].nr_free
= 0;
1393 #ifdef CONFIG_UNACCEPTED_MEMORY
1394 INIT_LIST_HEAD(&zone
->unaccepted_pages
);
1398 void __meminit
init_currently_empty_zone(struct zone
*zone
,
1399 unsigned long zone_start_pfn
,
1402 struct pglist_data
*pgdat
= zone
->zone_pgdat
;
1403 int zone_idx
= zone_idx(zone
) + 1;
1405 if (zone_idx
> pgdat
->nr_zones
)
1406 pgdat
->nr_zones
= zone_idx
;
1408 zone
->zone_start_pfn
= zone_start_pfn
;
1410 mminit_dprintk(MMINIT_TRACE
, "memmap_init",
1411 "Initialising map node %d zone %lu pfns %lu -> %lu\n",
1413 (unsigned long)zone_idx(zone
),
1414 zone_start_pfn
, (zone_start_pfn
+ size
));
1416 zone_init_free_lists(zone
);
1417 zone
->initialized
= 1;
1420 #ifndef CONFIG_SPARSEMEM
1422 * Calculate the size of the zone->blockflags rounded to an unsigned long
1423 * Start by making sure zonesize is a multiple of pageblock_order by rounding
1424 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
1425 * round what is now in bits to nearest long in bits, then return it in
1428 static unsigned long __init
usemap_size(unsigned long zone_start_pfn
, unsigned long zonesize
)
1430 unsigned long usemapsize
;
1432 zonesize
+= zone_start_pfn
& (pageblock_nr_pages
-1);
1433 usemapsize
= roundup(zonesize
, pageblock_nr_pages
);
1434 usemapsize
= usemapsize
>> pageblock_order
;
1435 usemapsize
*= NR_PAGEBLOCK_BITS
;
1436 usemapsize
= roundup(usemapsize
, BITS_PER_LONG
);
1438 return usemapsize
/ BITS_PER_BYTE
;
1441 static void __ref
setup_usemap(struct zone
*zone
)
1443 unsigned long usemapsize
= usemap_size(zone
->zone_start_pfn
,
1444 zone
->spanned_pages
);
1445 zone
->pageblock_flags
= NULL
;
1447 zone
->pageblock_flags
=
1448 memblock_alloc_node(usemapsize
, SMP_CACHE_BYTES
,
1450 if (!zone
->pageblock_flags
)
1451 panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n",
1452 usemapsize
, zone
->name
, zone_to_nid(zone
));
1456 static inline void setup_usemap(struct zone
*zone
) {}
1457 #endif /* CONFIG_SPARSEMEM */
1459 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
1461 /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
1462 void __init
set_pageblock_order(void)
1464 unsigned int order
= MAX_PAGE_ORDER
;
1466 /* Check that pageblock_nr_pages has not already been setup */
1467 if (pageblock_order
)
1470 /* Don't let pageblocks exceed the maximum allocation granularity. */
1471 if (HPAGE_SHIFT
> PAGE_SHIFT
&& HUGETLB_PAGE_ORDER
< order
)
1472 order
= HUGETLB_PAGE_ORDER
;
1475 * Assume the largest contiguous order of interest is a huge page.
1476 * This value may be variable depending on boot parameters on powerpc.
1478 pageblock_order
= order
;
1480 #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
1483 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
1484 * is unused as pageblock_order is set at compile-time. See
1485 * include/linux/pageblock-flags.h for the values of pageblock_order based on
1488 void __init
set_pageblock_order(void)
1492 #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
1495 * Set up the zone data structures
1496 * - init pgdat internals
1497 * - init all zones belonging to this node
1499 * NOTE: this function is only called during memory hotplug
1501 #ifdef CONFIG_MEMORY_HOTPLUG
1502 void __ref
free_area_init_core_hotplug(struct pglist_data
*pgdat
)
1504 int nid
= pgdat
->node_id
;
1508 pgdat_init_internals(pgdat
);
1510 if (pgdat
->per_cpu_nodestats
== &boot_nodestats
)
1511 pgdat
->per_cpu_nodestats
= alloc_percpu(struct per_cpu_nodestat
);
1514 * Reset the nr_zones, order and highest_zoneidx before reuse.
1515 * Note that kswapd will init kswapd_highest_zoneidx properly
1516 * when it starts in the near future.
1518 pgdat
->nr_zones
= 0;
1519 pgdat
->kswapd_order
= 0;
1520 pgdat
->kswapd_highest_zoneidx
= 0;
1521 pgdat
->node_start_pfn
= 0;
1522 pgdat
->node_present_pages
= 0;
1524 for_each_online_cpu(cpu
) {
1525 struct per_cpu_nodestat
*p
;
1527 p
= per_cpu_ptr(pgdat
->per_cpu_nodestats
, cpu
);
1528 memset(p
, 0, sizeof(*p
));
1532 * When memory is hot-added, all the memory is in offline state. So
1533 * clear all zones' present_pages and managed_pages because they will
1534 * be updated in online_pages() and offline_pages().
1536 for (z
= 0; z
< MAX_NR_ZONES
; z
++) {
1537 struct zone
*zone
= pgdat
->node_zones
+ z
;
1539 zone
->present_pages
= 0;
1540 zone_init_internals(zone
, z
, nid
, 0);
1546 * Set up the zone data structures:
1547 * - mark all pages reserved
1548 * - mark all memory queues empty
1549 * - clear the memory bitmaps
1551 * NOTE: pgdat should get zeroed by caller.
1552 * NOTE: this function is only called during early init.
1554 static void __init
free_area_init_core(struct pglist_data
*pgdat
)
1557 int nid
= pgdat
->node_id
;
1559 pgdat_init_internals(pgdat
);
1560 pgdat
->per_cpu_nodestats
= &boot_nodestats
;
1562 for (j
= 0; j
< MAX_NR_ZONES
; j
++) {
1563 struct zone
*zone
= pgdat
->node_zones
+ j
;
1564 unsigned long size
, freesize
, memmap_pages
;
1566 size
= zone
->spanned_pages
;
1567 freesize
= zone
->present_pages
;
1570 * Adjust freesize so that it accounts for how much memory
1571 * is used by this zone for memmap. This affects the watermark
1572 * and per-cpu initialisations
1574 memmap_pages
= calc_memmap_size(size
, freesize
);
1575 if (!is_highmem_idx(j
)) {
1576 if (freesize
>= memmap_pages
) {
1577 freesize
-= memmap_pages
;
1579 pr_debug(" %s zone: %lu pages used for memmap\n",
1580 zone_names
[j
], memmap_pages
);
1582 pr_warn(" %s zone: %lu memmap pages exceeds freesize %lu\n",
1583 zone_names
[j
], memmap_pages
, freesize
);
1586 /* Account for reserved pages */
1587 if (j
== 0 && freesize
> dma_reserve
) {
1588 freesize
-= dma_reserve
;
1589 pr_debug(" %s zone: %lu pages reserved\n", zone_names
[0], dma_reserve
);
1592 if (!is_highmem_idx(j
))
1593 nr_kernel_pages
+= freesize
;
1594 /* Charge for highmem memmap if there are enough kernel pages */
1595 else if (nr_kernel_pages
> memmap_pages
* 2)
1596 nr_kernel_pages
-= memmap_pages
;
1597 nr_all_pages
+= freesize
;
1600 * Set an approximate value for lowmem here, it will be adjusted
1601 * when the bootmem allocator frees pages into the buddy system.
1602 * And all highmem pages will be managed by the buddy system.
1604 zone_init_internals(zone
, j
, nid
, freesize
);
1610 init_currently_empty_zone(zone
, zone
->zone_start_pfn
, size
);
1614 void __init
*memmap_alloc(phys_addr_t size
, phys_addr_t align
,
1615 phys_addr_t min_addr
, int nid
, bool exact_nid
)
1620 ptr
= memblock_alloc_exact_nid_raw(size
, align
, min_addr
,
1621 MEMBLOCK_ALLOC_ACCESSIBLE
,
1624 ptr
= memblock_alloc_try_nid_raw(size
, align
, min_addr
,
1625 MEMBLOCK_ALLOC_ACCESSIBLE
,
1628 if (ptr
&& size
> 0)
1629 page_init_poison(ptr
, size
);
1634 #ifdef CONFIG_FLATMEM
1635 static void __init
alloc_node_mem_map(struct pglist_data
*pgdat
)
1637 unsigned long start
, offset
, size
, end
;
1640 /* Skip empty nodes */
1641 if (!pgdat
->node_spanned_pages
)
1644 start
= pgdat
->node_start_pfn
& ~(MAX_ORDER_NR_PAGES
- 1);
1645 offset
= pgdat
->node_start_pfn
- start
;
1647 * The zone's endpoints aren't required to be MAX_PAGE_ORDER
1648 * aligned but the node_mem_map endpoints must be in order
1649 * for the buddy allocator to function correctly.
1651 end
= ALIGN(pgdat_end_pfn(pgdat
), MAX_ORDER_NR_PAGES
);
1652 size
= (end
- start
) * sizeof(struct page
);
1653 map
= memmap_alloc(size
, SMP_CACHE_BYTES
, MEMBLOCK_LOW_LIMIT
,
1654 pgdat
->node_id
, false);
1656 panic("Failed to allocate %ld bytes for node %d memory map\n",
1657 size
, pgdat
->node_id
);
1658 pgdat
->node_mem_map
= map
+ offset
;
1659 pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n",
1660 __func__
, pgdat
->node_id
, (unsigned long)pgdat
,
1661 (unsigned long)pgdat
->node_mem_map
);
1663 /* the global mem_map is just set as node 0's */
1664 if (pgdat
== NODE_DATA(0)) {
1665 mem_map
= NODE_DATA(0)->node_mem_map
;
1666 if (page_to_pfn(mem_map
) != pgdat
->node_start_pfn
)
1672 static inline void alloc_node_mem_map(struct pglist_data
*pgdat
) { }
1673 #endif /* CONFIG_FLATMEM */
1676 * get_pfn_range_for_nid - Return the start and end page frames for a node
1677 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
1678 * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
1679 * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
1681 * It returns the start and end page frame of a node based on information
1682 * provided by memblock_set_node(). If called for a node
1683 * with no available memory, the start and end PFNs will be 0.
1685 void __init
get_pfn_range_for_nid(unsigned int nid
,
1686 unsigned long *start_pfn
, unsigned long *end_pfn
)
1688 unsigned long this_start_pfn
, this_end_pfn
;
1694 for_each_mem_pfn_range(i
, nid
, &this_start_pfn
, &this_end_pfn
, NULL
) {
1695 *start_pfn
= min(*start_pfn
, this_start_pfn
);
1696 *end_pfn
= max(*end_pfn
, this_end_pfn
);
1699 if (*start_pfn
== -1UL)
1703 static void __init
free_area_init_node(int nid
)
1705 pg_data_t
*pgdat
= NODE_DATA(nid
);
1706 unsigned long start_pfn
= 0;
1707 unsigned long end_pfn
= 0;
1709 /* pg_data_t should be reset to zero when it's allocated */
1710 WARN_ON(pgdat
->nr_zones
|| pgdat
->kswapd_highest_zoneidx
);
1712 get_pfn_range_for_nid(nid
, &start_pfn
, &end_pfn
);
1714 pgdat
->node_id
= nid
;
1715 pgdat
->node_start_pfn
= start_pfn
;
1716 pgdat
->per_cpu_nodestats
= NULL
;
1718 if (start_pfn
!= end_pfn
) {
1719 pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid
,
1720 (u64
)start_pfn
<< PAGE_SHIFT
,
1721 end_pfn
? ((u64
)end_pfn
<< PAGE_SHIFT
) - 1 : 0);
1723 calculate_node_totalpages(pgdat
, start_pfn
, end_pfn
);
1725 pr_info("Initmem setup node %d as memoryless\n", nid
);
1727 reset_memoryless_node_totalpages(pgdat
);
1730 alloc_node_mem_map(pgdat
);
1731 pgdat_set_deferred_range(pgdat
);
1733 free_area_init_core(pgdat
);
1734 lru_gen_init_pgdat(pgdat
);
1737 /* Any regular or high memory on that node ? */
1738 static void __init
check_for_memory(pg_data_t
*pgdat
)
1740 enum zone_type zone_type
;
1742 for (zone_type
= 0; zone_type
<= ZONE_MOVABLE
- 1; zone_type
++) {
1743 struct zone
*zone
= &pgdat
->node_zones
[zone_type
];
1744 if (populated_zone(zone
)) {
1745 if (IS_ENABLED(CONFIG_HIGHMEM
))
1746 node_set_state(pgdat
->node_id
, N_HIGH_MEMORY
);
1747 if (zone_type
<= ZONE_NORMAL
)
1748 node_set_state(pgdat
->node_id
, N_NORMAL_MEMORY
);
1754 #if MAX_NUMNODES > 1
1756 * Figure out the number of possible node ids.
1758 void __init
setup_nr_node_ids(void)
1760 unsigned int highest
;
1762 highest
= find_last_bit(node_possible_map
.bits
, MAX_NUMNODES
);
1763 nr_node_ids
= highest
+ 1;
1768 * Some architectures, e.g. ARC may have ZONE_HIGHMEM below ZONE_NORMAL. For
1769 * such cases we allow max_zone_pfn sorted in the descending order
1771 static bool arch_has_descending_max_zone_pfns(void)
1773 return IS_ENABLED(CONFIG_ARC
) && !IS_ENABLED(CONFIG_ARC_HAS_PAE40
);
1777 * free_area_init - Initialise all pg_data_t and zone data
1778 * @max_zone_pfn: an array of max PFNs for each zone
1780 * This will call free_area_init_node() for each active node in the system.
1781 * Using the page ranges provided by memblock_set_node(), the size of each
1782 * zone in each node and their holes is calculated. If the maximum PFN
1783 * between two adjacent zones match, it is assumed that the zone is empty.
1784 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
1785 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
1786 * starts where the previous one ended. For example, ZONE_DMA32 starts
1787 * at arch_max_dma_pfn.
1789 void __init
free_area_init(unsigned long *max_zone_pfn
)
1791 unsigned long start_pfn
, end_pfn
;
1795 /* Record where the zone boundaries are */
1796 memset(arch_zone_lowest_possible_pfn
, 0,
1797 sizeof(arch_zone_lowest_possible_pfn
));
1798 memset(arch_zone_highest_possible_pfn
, 0,
1799 sizeof(arch_zone_highest_possible_pfn
));
1801 start_pfn
= PHYS_PFN(memblock_start_of_DRAM());
1802 descending
= arch_has_descending_max_zone_pfns();
1804 for (i
= 0; i
< MAX_NR_ZONES
; i
++) {
1806 zone
= MAX_NR_ZONES
- i
- 1;
1810 if (zone
== ZONE_MOVABLE
)
1813 end_pfn
= max(max_zone_pfn
[zone
], start_pfn
);
1814 arch_zone_lowest_possible_pfn
[zone
] = start_pfn
;
1815 arch_zone_highest_possible_pfn
[zone
] = end_pfn
;
1817 start_pfn
= end_pfn
;
1820 /* Find the PFNs that ZONE_MOVABLE begins at in each node */
1821 memset(zone_movable_pfn
, 0, sizeof(zone_movable_pfn
));
1822 find_zone_movable_pfns_for_nodes();
1824 /* Print out the zone ranges */
1825 pr_info("Zone ranges:\n");
1826 for (i
= 0; i
< MAX_NR_ZONES
; i
++) {
1827 if (i
== ZONE_MOVABLE
)
1829 pr_info(" %-8s ", zone_names
[i
]);
1830 if (arch_zone_lowest_possible_pfn
[i
] ==
1831 arch_zone_highest_possible_pfn
[i
])
1834 pr_cont("[mem %#018Lx-%#018Lx]\n",
1835 (u64
)arch_zone_lowest_possible_pfn
[i
]
1837 ((u64
)arch_zone_highest_possible_pfn
[i
]
1838 << PAGE_SHIFT
) - 1);
1841 /* Print out the PFNs ZONE_MOVABLE begins at in each node */
1842 pr_info("Movable zone start for each node\n");
1843 for (i
= 0; i
< MAX_NUMNODES
; i
++) {
1844 if (zone_movable_pfn
[i
])
1845 pr_info(" Node %d: %#018Lx\n", i
,
1846 (u64
)zone_movable_pfn
[i
] << PAGE_SHIFT
);
1850 * Print out the early node map, and initialize the
1851 * subsection-map relative to active online memory ranges to
1852 * enable future "sub-section" extensions of the memory map.
1854 pr_info("Early memory node ranges\n");
1855 for_each_mem_pfn_range(i
, MAX_NUMNODES
, &start_pfn
, &end_pfn
, &nid
) {
1856 pr_info(" node %3d: [mem %#018Lx-%#018Lx]\n", nid
,
1857 (u64
)start_pfn
<< PAGE_SHIFT
,
1858 ((u64
)end_pfn
<< PAGE_SHIFT
) - 1);
1859 subsection_map_init(start_pfn
, end_pfn
- start_pfn
);
1862 /* Initialise every node */
1863 mminit_verify_pageflags_layout();
1864 setup_nr_node_ids();
1865 set_pageblock_order();
1867 for_each_node(nid
) {
1870 if (!node_online(nid
)) {
1871 /* Allocator not initialized yet */
1872 pgdat
= arch_alloc_nodedata(nid
);
1874 panic("Cannot allocate %zuB for node %d.\n",
1875 sizeof(*pgdat
), nid
);
1876 arch_refresh_nodedata(nid
, pgdat
);
1877 free_area_init_node(nid
);
1880 * We do not want to confuse userspace by sysfs
1881 * files/directories for node without any memory
1882 * attached to it, so this node is not marked as
1883 * N_MEMORY and not marked online so that no sysfs
1884 * hierarchy will be created via register_one_node for
1885 * it. The pgdat will get fully initialized by
1886 * hotadd_init_pgdat() when memory is hotplugged into
1892 pgdat
= NODE_DATA(nid
);
1893 free_area_init_node(nid
);
1895 /* Any memory on that node */
1896 if (pgdat
->node_present_pages
)
1897 node_set_state(nid
, N_MEMORY
);
1898 check_for_memory(pgdat
);
1903 /* disable hash distribution for systems with a single node */
1908 * node_map_pfn_alignment - determine the maximum internode alignment
1910 * This function should be called after node map is populated and sorted.
1911 * It calculates the maximum power of two alignment which can distinguish
1914 * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
1915 * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)). If the
1916 * nodes are shifted by 256MiB, 256MiB. Note that if only the last node is
1917 * shifted, 1GiB is enough and this function will indicate so.
1919 * This is used to test whether pfn -> nid mapping of the chosen memory
1920 * model has fine enough granularity to avoid incorrect mapping for the
1921 * populated node map.
1923 * Return: the determined alignment in pfn's. 0 if there is no alignment
1924 * requirement (single node).
1926 unsigned long __init
node_map_pfn_alignment(void)
1928 unsigned long accl_mask
= 0, last_end
= 0;
1929 unsigned long start
, end
, mask
;
1930 int last_nid
= NUMA_NO_NODE
;
1933 for_each_mem_pfn_range(i
, MAX_NUMNODES
, &start
, &end
, &nid
) {
1934 if (!start
|| last_nid
< 0 || last_nid
== nid
) {
1941 * Start with a mask granular enough to pin-point to the
1942 * start pfn and tick off bits one-by-one until it becomes
1943 * too coarse to separate the current node from the last.
1945 mask
= ~((1 << __ffs(start
)) - 1);
1946 while (mask
&& last_end
<= (start
& (mask
<< 1)))
1949 /* accumulate all internode masks */
1953 /* convert mask to number of pages */
1954 return ~accl_mask
+ 1;
1957 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1958 static void __init
deferred_free_range(unsigned long pfn
,
1959 unsigned long nr_pages
)
1967 page
= pfn_to_page(pfn
);
1969 /* Free a large naturally-aligned chunk if possible */
1970 if (nr_pages
== MAX_ORDER_NR_PAGES
&& IS_MAX_ORDER_ALIGNED(pfn
)) {
1971 for (i
= 0; i
< nr_pages
; i
+= pageblock_nr_pages
)
1972 set_pageblock_migratetype(page
+ i
, MIGRATE_MOVABLE
);
1973 __free_pages_core(page
, MAX_PAGE_ORDER
);
1977 /* Accept chunks smaller than MAX_PAGE_ORDER upfront */
1978 accept_memory(PFN_PHYS(pfn
), PFN_PHYS(pfn
+ nr_pages
));
1980 for (i
= 0; i
< nr_pages
; i
++, page
++, pfn
++) {
1981 if (pageblock_aligned(pfn
))
1982 set_pageblock_migratetype(page
, MIGRATE_MOVABLE
);
1983 __free_pages_core(page
, 0);
1987 /* Completion tracking for deferred_init_memmap() threads */
1988 static atomic_t pgdat_init_n_undone __initdata
;
1989 static __initdata
DECLARE_COMPLETION(pgdat_init_all_done_comp
);
1991 static inline void __init
pgdat_init_report_one_done(void)
1993 if (atomic_dec_and_test(&pgdat_init_n_undone
))
1994 complete(&pgdat_init_all_done_comp
);
1998 * Returns true if page needs to be initialized or freed to buddy allocator.
2000 * We check if a current MAX_PAGE_ORDER block is valid by only checking the
2001 * validity of the head pfn.
2003 static inline bool __init
deferred_pfn_valid(unsigned long pfn
)
2005 if (IS_MAX_ORDER_ALIGNED(pfn
) && !pfn_valid(pfn
))
2011 * Free pages to buddy allocator. Try to free aligned pages in
2012 * MAX_ORDER_NR_PAGES sizes.
2014 static void __init
deferred_free_pages(unsigned long pfn
,
2015 unsigned long end_pfn
)
2017 unsigned long nr_free
= 0;
2019 for (; pfn
< end_pfn
; pfn
++) {
2020 if (!deferred_pfn_valid(pfn
)) {
2021 deferred_free_range(pfn
- nr_free
, nr_free
);
2023 } else if (IS_MAX_ORDER_ALIGNED(pfn
)) {
2024 deferred_free_range(pfn
- nr_free
, nr_free
);
2030 /* Free the last block of pages to allocator */
2031 deferred_free_range(pfn
- nr_free
, nr_free
);
2035 * Initialize struct pages. We minimize pfn page lookups and scheduler checks
2036 * by performing it only once every MAX_ORDER_NR_PAGES.
2037 * Return number of pages initialized.
2039 static unsigned long __init
deferred_init_pages(struct zone
*zone
,
2041 unsigned long end_pfn
)
2043 int nid
= zone_to_nid(zone
);
2044 unsigned long nr_pages
= 0;
2045 int zid
= zone_idx(zone
);
2046 struct page
*page
= NULL
;
2048 for (; pfn
< end_pfn
; pfn
++) {
2049 if (!deferred_pfn_valid(pfn
)) {
2052 } else if (!page
|| IS_MAX_ORDER_ALIGNED(pfn
)) {
2053 page
= pfn_to_page(pfn
);
2057 __init_single_page(page
, pfn
, zid
, nid
);
2064 * This function is meant to pre-load the iterator for the zone init.
2065 * Specifically it walks through the ranges until we are caught up to the
2066 * first_init_pfn value and exits there. If we never encounter the value we
2067 * return false indicating there are no valid ranges left.
2070 deferred_init_mem_pfn_range_in_zone(u64
*i
, struct zone
*zone
,
2071 unsigned long *spfn
, unsigned long *epfn
,
2072 unsigned long first_init_pfn
)
2077 * Start out by walking through the ranges in this zone that have
2078 * already been initialized. We don't need to do anything with them
2079 * so we just need to flush them out of the system.
2081 for_each_free_mem_pfn_range_in_zone(j
, zone
, spfn
, epfn
) {
2082 if (*epfn
<= first_init_pfn
)
2084 if (*spfn
< first_init_pfn
)
2085 *spfn
= first_init_pfn
;
2094 * Initialize and free pages. We do it in two loops: first we initialize
2095 * struct page, then free to buddy allocator, because while we are
2096 * freeing pages we can access pages that are ahead (computing buddy
2097 * page in __free_one_page()).
2099 * In order to try and keep some memory in the cache we have the loop
2100 * broken along max page order boundaries. This way we will not cause
2101 * any issues with the buddy page computation.
2103 static unsigned long __init
2104 deferred_init_maxorder(u64
*i
, struct zone
*zone
, unsigned long *start_pfn
,
2105 unsigned long *end_pfn
)
2107 unsigned long mo_pfn
= ALIGN(*start_pfn
+ 1, MAX_ORDER_NR_PAGES
);
2108 unsigned long spfn
= *start_pfn
, epfn
= *end_pfn
;
2109 unsigned long nr_pages
= 0;
2112 /* First we loop through and initialize the page values */
2113 for_each_free_mem_pfn_range_in_zone_from(j
, zone
, start_pfn
, end_pfn
) {
2116 if (mo_pfn
<= *start_pfn
)
2119 t
= min(mo_pfn
, *end_pfn
);
2120 nr_pages
+= deferred_init_pages(zone
, *start_pfn
, t
);
2122 if (mo_pfn
< *end_pfn
) {
2123 *start_pfn
= mo_pfn
;
2128 /* Reset values and now loop through freeing pages as needed */
2131 for_each_free_mem_pfn_range_in_zone_from(j
, zone
, &spfn
, &epfn
) {
2137 t
= min(mo_pfn
, epfn
);
2138 deferred_free_pages(spfn
, t
);
2148 deferred_init_memmap_chunk(unsigned long start_pfn
, unsigned long end_pfn
,
2151 unsigned long spfn
, epfn
;
2152 struct zone
*zone
= arg
;
2155 deferred_init_mem_pfn_range_in_zone(&i
, zone
, &spfn
, &epfn
, start_pfn
);
2158 * Initialize and free pages in MAX_PAGE_ORDER sized increments so that
2159 * we can avoid introducing any issues with the buddy allocator.
2161 while (spfn
< end_pfn
) {
2162 deferred_init_maxorder(&i
, zone
, &spfn
, &epfn
);
2167 /* An arch may override for more concurrency. */
2169 deferred_page_init_max_threads(const struct cpumask
*node_cpumask
)
2174 /* Initialise remaining memory on a node */
2175 static int __init
deferred_init_memmap(void *data
)
2177 pg_data_t
*pgdat
= data
;
2178 const struct cpumask
*cpumask
= cpumask_of_node(pgdat
->node_id
);
2179 unsigned long spfn
= 0, epfn
= 0;
2180 unsigned long first_init_pfn
, flags
;
2181 unsigned long start
= jiffies
;
2183 int zid
, max_threads
;
2186 /* Bind memory initialisation thread to a local node if possible */
2187 if (!cpumask_empty(cpumask
))
2188 set_cpus_allowed_ptr(current
, cpumask
);
2190 pgdat_resize_lock(pgdat
, &flags
);
2191 first_init_pfn
= pgdat
->first_deferred_pfn
;
2192 if (first_init_pfn
== ULONG_MAX
) {
2193 pgdat_resize_unlock(pgdat
, &flags
);
2194 pgdat_init_report_one_done();
2198 /* Sanity check boundaries */
2199 BUG_ON(pgdat
->first_deferred_pfn
< pgdat
->node_start_pfn
);
2200 BUG_ON(pgdat
->first_deferred_pfn
> pgdat_end_pfn(pgdat
));
2201 pgdat
->first_deferred_pfn
= ULONG_MAX
;
2204 * Once we unlock here, the zone cannot be grown anymore, thus if an
2205 * interrupt thread must allocate this early in boot, zone must be
2206 * pre-grown prior to start of deferred page initialization.
2208 pgdat_resize_unlock(pgdat
, &flags
);
2210 /* Only the highest zone is deferred so find it */
2211 for (zid
= 0; zid
< MAX_NR_ZONES
; zid
++) {
2212 zone
= pgdat
->node_zones
+ zid
;
2213 if (first_init_pfn
< zone_end_pfn(zone
))
2217 /* If the zone is empty somebody else may have cleared out the zone */
2218 if (!deferred_init_mem_pfn_range_in_zone(&i
, zone
, &spfn
, &epfn
,
2222 max_threads
= deferred_page_init_max_threads(cpumask
);
2224 while (spfn
< epfn
) {
2225 unsigned long epfn_align
= ALIGN(epfn
, PAGES_PER_SECTION
);
2226 struct padata_mt_job job
= {
2227 .thread_fn
= deferred_init_memmap_chunk
,
2230 .size
= epfn_align
- spfn
,
2231 .align
= PAGES_PER_SECTION
,
2232 .min_chunk
= PAGES_PER_SECTION
,
2233 .max_threads
= max_threads
,
2234 .numa_aware
= false,
2237 padata_do_multithreaded(&job
);
2238 deferred_init_mem_pfn_range_in_zone(&i
, zone
, &spfn
, &epfn
,
2242 /* Sanity check that the next zone really is unpopulated */
2243 WARN_ON(++zid
< MAX_NR_ZONES
&& populated_zone(++zone
));
2245 pr_info("node %d deferred pages initialised in %ums\n",
2246 pgdat
->node_id
, jiffies_to_msecs(jiffies
- start
));
2248 pgdat_init_report_one_done();
2253 * If this zone has deferred pages, try to grow it by initializing enough
2254 * deferred pages to satisfy the allocation specified by order, rounded up to
2255 * the nearest PAGES_PER_SECTION boundary. So we're adding memory in increments
2256 * of SECTION_SIZE bytes by initializing struct pages in increments of
2257 * PAGES_PER_SECTION * sizeof(struct page) bytes.
2259 * Return true when zone was grown, otherwise return false. We return true even
2260 * when we grow less than requested, to let the caller decide if there are
2261 * enough pages to satisfy the allocation.
2263 * Note: We use noinline because this function is needed only during boot, and
2264 * it is called from a __ref function _deferred_grow_zone. This way we are
2265 * making sure that it is not inlined into permanent text section.
2267 bool __init
deferred_grow_zone(struct zone
*zone
, unsigned int order
)
2269 unsigned long nr_pages_needed
= ALIGN(1 << order
, PAGES_PER_SECTION
);
2270 pg_data_t
*pgdat
= zone
->zone_pgdat
;
2271 unsigned long first_deferred_pfn
= pgdat
->first_deferred_pfn
;
2272 unsigned long spfn
, epfn
, flags
;
2273 unsigned long nr_pages
= 0;
2276 /* Only the last zone may have deferred pages */
2277 if (zone_end_pfn(zone
) != pgdat_end_pfn(pgdat
))
2280 pgdat_resize_lock(pgdat
, &flags
);
2283 * If someone grew this zone while we were waiting for spinlock, return
2284 * true, as there might be enough pages already.
2286 if (first_deferred_pfn
!= pgdat
->first_deferred_pfn
) {
2287 pgdat_resize_unlock(pgdat
, &flags
);
2291 /* If the zone is empty somebody else may have cleared out the zone */
2292 if (!deferred_init_mem_pfn_range_in_zone(&i
, zone
, &spfn
, &epfn
,
2293 first_deferred_pfn
)) {
2294 pgdat
->first_deferred_pfn
= ULONG_MAX
;
2295 pgdat_resize_unlock(pgdat
, &flags
);
2296 /* Retry only once. */
2297 return first_deferred_pfn
!= ULONG_MAX
;
2301 * Initialize and free pages in MAX_PAGE_ORDER sized increments so
2302 * that we can avoid introducing any issues with the buddy
2305 while (spfn
< epfn
) {
2306 /* update our first deferred PFN for this section */
2307 first_deferred_pfn
= spfn
;
2309 nr_pages
+= deferred_init_maxorder(&i
, zone
, &spfn
, &epfn
);
2310 touch_nmi_watchdog();
2312 /* We should only stop along section boundaries */
2313 if ((first_deferred_pfn
^ spfn
) < PAGES_PER_SECTION
)
2316 /* If our quota has been met we can stop here */
2317 if (nr_pages
>= nr_pages_needed
)
2321 pgdat
->first_deferred_pfn
= spfn
;
2322 pgdat_resize_unlock(pgdat
, &flags
);
2324 return nr_pages
> 0;
2327 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
2330 void __init
init_cma_reserved_pageblock(struct page
*page
)
2332 unsigned i
= pageblock_nr_pages
;
2333 struct page
*p
= page
;
2336 __ClearPageReserved(p
);
2337 set_page_count(p
, 0);
2340 set_pageblock_migratetype(page
, MIGRATE_CMA
);
2341 set_page_refcounted(page
);
2342 __free_pages(page
, pageblock_order
);
2344 adjust_managed_page_count(page
, pageblock_nr_pages
);
2345 page_zone(page
)->cma_pages
+= pageblock_nr_pages
;
2349 void set_zone_contiguous(struct zone
*zone
)
2351 unsigned long block_start_pfn
= zone
->zone_start_pfn
;
2352 unsigned long block_end_pfn
;
2354 block_end_pfn
= pageblock_end_pfn(block_start_pfn
);
2355 for (; block_start_pfn
< zone_end_pfn(zone
);
2356 block_start_pfn
= block_end_pfn
,
2357 block_end_pfn
+= pageblock_nr_pages
) {
2359 block_end_pfn
= min(block_end_pfn
, zone_end_pfn(zone
));
2361 if (!__pageblock_pfn_to_page(block_start_pfn
,
2362 block_end_pfn
, zone
))
2367 /* We confirm that there is no hole */
2368 zone
->contiguous
= true;
2371 void __init
page_alloc_init_late(void)
2376 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
2378 /* There will be num_node_state(N_MEMORY) threads */
2379 atomic_set(&pgdat_init_n_undone
, num_node_state(N_MEMORY
));
2380 for_each_node_state(nid
, N_MEMORY
) {
2381 kthread_run(deferred_init_memmap
, NODE_DATA(nid
), "pgdatinit%d", nid
);
2384 /* Block until all are initialised */
2385 wait_for_completion(&pgdat_init_all_done_comp
);
2388 * We initialized the rest of the deferred pages. Permanently disable
2389 * on-demand struct page initialization.
2391 static_branch_disable(&deferred_pages
);
2393 /* Reinit limits that are based on free pages after the kernel is up */
2394 files_maxfiles_init();
2399 /* Discard memblock private memory */
2402 for_each_node_state(nid
, N_MEMORY
)
2403 shuffle_free_memory(NODE_DATA(nid
));
2405 for_each_populated_zone(zone
)
2406 set_zone_contiguous(zone
);
2408 /* Initialize page ext after all struct pages are initialized. */
2409 if (deferred_struct_pages
)
2412 page_alloc_sysctl_init();
2415 #ifndef __HAVE_ARCH_RESERVED_KERNEL_PAGES
2417 * Returns the number of pages that arch has reserved but
2418 * is not known to alloc_large_system_hash().
2420 static unsigned long __init
arch_reserved_kernel_pages(void)
2427 * Adaptive scale is meant to reduce sizes of hash tables on large memory
2428 * machines. As memory size is increased the scale is also increased but at
2429 * slower pace. Starting from ADAPT_SCALE_BASE (64G), every time memory
2430 * quadruples the scale is increased by one, which means the size of hash table
2431 * only doubles, instead of quadrupling as well.
2432 * Because 32-bit systems cannot have large physical memory, where this scaling
2433 * makes sense, it is disabled on such platforms.
2435 #if __BITS_PER_LONG > 32
2436 #define ADAPT_SCALE_BASE (64ul << 30)
2437 #define ADAPT_SCALE_SHIFT 2
2438 #define ADAPT_SCALE_NPAGES (ADAPT_SCALE_BASE >> PAGE_SHIFT)
2442 * allocate a large system hash table from bootmem
2443 * - it is assumed that the hash table must contain an exact power-of-2
2444 * quantity of entries
2445 * - limit is the number of hash buckets, not the total allocation size
2447 void *__init
alloc_large_system_hash(const char *tablename
,
2448 unsigned long bucketsize
,
2449 unsigned long numentries
,
2452 unsigned int *_hash_shift
,
2453 unsigned int *_hash_mask
,
2454 unsigned long low_limit
,
2455 unsigned long high_limit
)
2457 unsigned long long max
= high_limit
;
2458 unsigned long log2qty
, size
;
2464 /* allow the kernel cmdline to have a say */
2466 /* round applicable memory size up to nearest megabyte */
2467 numentries
= nr_kernel_pages
;
2468 numentries
-= arch_reserved_kernel_pages();
2470 /* It isn't necessary when PAGE_SIZE >= 1MB */
2471 if (PAGE_SIZE
< SZ_1M
)
2472 numentries
= round_up(numentries
, SZ_1M
/ PAGE_SIZE
);
2474 #if __BITS_PER_LONG > 32
2476 unsigned long adapt
;
2478 for (adapt
= ADAPT_SCALE_NPAGES
; adapt
< numentries
;
2479 adapt
<<= ADAPT_SCALE_SHIFT
)
2484 /* limit to 1 bucket per 2^scale bytes of low memory */
2485 if (scale
> PAGE_SHIFT
)
2486 numentries
>>= (scale
- PAGE_SHIFT
);
2488 numentries
<<= (PAGE_SHIFT
- scale
);
2490 if (unlikely((numentries
* bucketsize
) < PAGE_SIZE
))
2491 numentries
= PAGE_SIZE
/ bucketsize
;
2493 numentries
= roundup_pow_of_two(numentries
);
2495 /* limit allocation size to 1/16 total memory by default */
2497 max
= ((unsigned long long)nr_all_pages
<< PAGE_SHIFT
) >> 4;
2498 do_div(max
, bucketsize
);
2500 max
= min(max
, 0x80000000ULL
);
2502 if (numentries
< low_limit
)
2503 numentries
= low_limit
;
2504 if (numentries
> max
)
2507 log2qty
= ilog2(numentries
);
2509 gfp_flags
= (flags
& HASH_ZERO
) ? GFP_ATOMIC
| __GFP_ZERO
: GFP_ATOMIC
;
2512 size
= bucketsize
<< log2qty
;
2513 if (flags
& HASH_EARLY
) {
2514 if (flags
& HASH_ZERO
)
2515 table
= memblock_alloc(size
, SMP_CACHE_BYTES
);
2517 table
= memblock_alloc_raw(size
,
2519 } else if (get_order(size
) > MAX_PAGE_ORDER
|| hashdist
) {
2520 table
= vmalloc_huge(size
, gfp_flags
);
2523 huge
= is_vm_area_hugepages(table
);
2526 * If bucketsize is not a power-of-two, we may free
2527 * some pages at the end of hash table which
2528 * alloc_pages_exact() automatically does
2530 table
= alloc_pages_exact(size
, gfp_flags
);
2531 kmemleak_alloc(table
, size
, 1, gfp_flags
);
2533 } while (!table
&& size
> PAGE_SIZE
&& --log2qty
);
2536 panic("Failed to allocate %s hash table\n", tablename
);
2538 pr_info("%s hash table entries: %ld (order: %d, %lu bytes, %s)\n",
2539 tablename
, 1UL << log2qty
, ilog2(size
) - PAGE_SHIFT
, size
,
2540 virt
? (huge
? "vmalloc hugepage" : "vmalloc") : "linear");
2543 *_hash_shift
= log2qty
;
2545 *_hash_mask
= (1 << log2qty
) - 1;
2551 * set_dma_reserve - set the specified number of pages reserved in the first zone
2552 * @new_dma_reserve: The number of pages to mark reserved
2554 * The per-cpu batchsize and zone watermarks are determined by managed_pages.
2555 * In the DMA zone, a significant percentage may be consumed by kernel image
2556 * and other unfreeable allocations which can skew the watermarks badly. This
2557 * function may optionally be used to account for unfreeable pages in the
2558 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
2559 * smaller per-cpu batchsize.
2561 void __init
set_dma_reserve(unsigned long new_dma_reserve
)
2563 dma_reserve
= new_dma_reserve
;
2566 void __init
memblock_free_pages(struct page
*page
, unsigned long pfn
,
2570 if (IS_ENABLED(CONFIG_DEFERRED_STRUCT_PAGE_INIT
)) {
2571 int nid
= early_pfn_to_nid(pfn
);
2573 if (!early_page_initialised(pfn
, nid
))
2577 if (!kmsan_memblock_free_pages(page
, order
)) {
2578 /* KMSAN will take care of these pages. */
2581 __free_pages_core(page
, order
);
2584 DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON
, init_on_alloc
);
2585 EXPORT_SYMBOL(init_on_alloc
);
2587 DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON
, init_on_free
);
2588 EXPORT_SYMBOL(init_on_free
);
2590 static bool _init_on_alloc_enabled_early __read_mostly
2591 = IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON
);
2592 static int __init
early_init_on_alloc(char *buf
)
2595 return kstrtobool(buf
, &_init_on_alloc_enabled_early
);
2597 early_param("init_on_alloc", early_init_on_alloc
);
2599 static bool _init_on_free_enabled_early __read_mostly
2600 = IS_ENABLED(CONFIG_INIT_ON_FREE_DEFAULT_ON
);
2601 static int __init
early_init_on_free(char *buf
)
2603 return kstrtobool(buf
, &_init_on_free_enabled_early
);
2605 early_param("init_on_free", early_init_on_free
);
2607 DEFINE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM
, check_pages_enabled
);
2610 * Enable static keys related to various memory debugging and hardening options.
2611 * Some override others, and depend on early params that are evaluated in the
2612 * order of appearance. So we need to first gather the full picture of what was
2613 * enabled, and then make decisions.
2615 static void __init
mem_debugging_and_hardening_init(void)
2617 bool page_poisoning_requested
= false;
2618 bool want_check_pages
= false;
2620 #ifdef CONFIG_PAGE_POISONING
2622 * Page poisoning is debug page alloc for some arches. If
2623 * either of those options are enabled, enable poisoning.
2625 if (page_poisoning_enabled() ||
2626 (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC
) &&
2627 debug_pagealloc_enabled())) {
2628 static_branch_enable(&_page_poisoning_enabled
);
2629 page_poisoning_requested
= true;
2630 want_check_pages
= true;
2634 if ((_init_on_alloc_enabled_early
|| _init_on_free_enabled_early
) &&
2635 page_poisoning_requested
) {
2636 pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
2637 "will take precedence over init_on_alloc and init_on_free\n");
2638 _init_on_alloc_enabled_early
= false;
2639 _init_on_free_enabled_early
= false;
2642 if (_init_on_alloc_enabled_early
) {
2643 want_check_pages
= true;
2644 static_branch_enable(&init_on_alloc
);
2646 static_branch_disable(&init_on_alloc
);
2649 if (_init_on_free_enabled_early
) {
2650 want_check_pages
= true;
2651 static_branch_enable(&init_on_free
);
2653 static_branch_disable(&init_on_free
);
2656 if (IS_ENABLED(CONFIG_KMSAN
) &&
2657 (_init_on_alloc_enabled_early
|| _init_on_free_enabled_early
))
2658 pr_info("mem auto-init: please make sure init_on_alloc and init_on_free are disabled when running KMSAN\n");
2660 #ifdef CONFIG_DEBUG_PAGEALLOC
2661 if (debug_pagealloc_enabled()) {
2662 want_check_pages
= true;
2663 static_branch_enable(&_debug_pagealloc_enabled
);
2665 if (debug_guardpage_minorder())
2666 static_branch_enable(&_debug_guardpage_enabled
);
2671 * Any page debugging or hardening option also enables sanity checking
2672 * of struct pages being allocated or freed. With CONFIG_DEBUG_VM it's
2675 if (!IS_ENABLED(CONFIG_DEBUG_VM
) && want_check_pages
)
2676 static_branch_enable(&check_pages_enabled
);
2679 /* Report memory auto-initialization states for this boot. */
2680 static void __init
report_meminit(void)
2684 if (IS_ENABLED(CONFIG_INIT_STACK_ALL_PATTERN
))
2685 stack
= "all(pattern)";
2686 else if (IS_ENABLED(CONFIG_INIT_STACK_ALL_ZERO
))
2687 stack
= "all(zero)";
2688 else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL
))
2689 stack
= "byref_all(zero)";
2690 else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF
))
2691 stack
= "byref(zero)";
2692 else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_USER
))
2693 stack
= "__user(zero)";
2697 pr_info("mem auto-init: stack:%s, heap alloc:%s, heap free:%s\n",
2698 stack
, want_init_on_alloc(GFP_KERNEL
) ? "on" : "off",
2699 want_init_on_free() ? "on" : "off");
2700 if (want_init_on_free())
2701 pr_info("mem auto-init: clearing system memory may take some time...\n");
2704 static void __init
mem_init_print_info(void)
2706 unsigned long physpages
, codesize
, datasize
, rosize
, bss_size
;
2707 unsigned long init_code_size
, init_data_size
;
2709 physpages
= get_num_physpages();
2710 codesize
= _etext
- _stext
;
2711 datasize
= _edata
- _sdata
;
2712 rosize
= __end_rodata
- __start_rodata
;
2713 bss_size
= __bss_stop
- __bss_start
;
2714 init_data_size
= __init_end
- __init_begin
;
2715 init_code_size
= _einittext
- _sinittext
;
2718 * Detect special cases and adjust section sizes accordingly:
2719 * 1) .init.* may be embedded into .data sections
2720 * 2) .init.text.* may be out of [__init_begin, __init_end],
2721 * please refer to arch/tile/kernel/vmlinux.lds.S.
2722 * 3) .rodata.* may be embedded into .text or .data sections.
2724 #define adj_init_size(start, end, size, pos, adj) \
2726 if (&start[0] <= &pos[0] && &pos[0] < &end[0] && size > adj) \
2730 adj_init_size(__init_begin
, __init_end
, init_data_size
,
2731 _sinittext
, init_code_size
);
2732 adj_init_size(_stext
, _etext
, codesize
, _sinittext
, init_code_size
);
2733 adj_init_size(_sdata
, _edata
, datasize
, __init_begin
, init_data_size
);
2734 adj_init_size(_stext
, _etext
, codesize
, __start_rodata
, rosize
);
2735 adj_init_size(_sdata
, _edata
, datasize
, __start_rodata
, rosize
);
2737 #undef adj_init_size
2739 pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved"
2740 #ifdef CONFIG_HIGHMEM
2744 K(nr_free_pages()), K(physpages
),
2745 codesize
/ SZ_1K
, datasize
/ SZ_1K
, rosize
/ SZ_1K
,
2746 (init_data_size
+ init_code_size
) / SZ_1K
, bss_size
/ SZ_1K
,
2747 K(physpages
- totalram_pages() - totalcma_pages
),
2749 #ifdef CONFIG_HIGHMEM
2750 , K(totalhigh_pages())
2756 * Set up kernel memory allocators
2758 void __init
mm_core_init(void)
2760 /* Initializations relying on SMP setup */
2761 build_all_zonelists(NULL
);
2762 page_alloc_init_cpuhp();
2765 * page_ext requires contiguous pages,
2766 * bigger than MAX_PAGE_ORDER unless SPARSEMEM.
2768 page_ext_init_flatmem();
2769 mem_debugging_and_hardening_init();
2770 kfence_alloc_pool_and_metadata();
2772 kmsan_init_shadow();
2773 stack_depot_early_init();
2775 mem_init_print_info();
2778 * page_owner must be initialized after buddy is ready, and also after
2779 * slab is ready so that stack_depot_init() works properly
2781 page_ext_init_flatmem_late();
2783 ptlock_cache_init();
2784 pgtable_cache_init();
2785 debug_objects_mem_init();
2787 /* If no deferred init page_ext now, as vmap is fully initialized */
2788 if (!deferred_struct_pages
)
2790 /* Should be run before the first non-init thread is created */
2792 /* Should be run after espfix64 is set up. */
2794 kmsan_init_runtime();