1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Procedures for maintaining information about logical memory blocks.
5 * Peter Bergner, IBM Corp. June 2001.
6 * Copyright (C) 2001 Peter Bergner.
9 #include <linux/kernel.h>
10 #include <linux/slab.h>
11 #include <linux/init.h>
12 #include <linux/bitops.h>
13 #include <linux/poison.h>
14 #include <linux/pfn.h>
15 #include <linux/debugfs.h>
16 #include <linux/kmemleak.h>
17 #include <linux/seq_file.h>
18 #include <linux/memblock.h>
19 #include <linux/mutex.h>
21 #ifdef CONFIG_KEXEC_HANDOVER
22 #include <linux/libfdt.h>
23 #include <linux/kexec_handover.h>
24 #endif /* CONFIG_KEXEC_HANDOVER */
26 #include <asm/sections.h>
31 #define INIT_MEMBLOCK_REGIONS 128
32 #define INIT_PHYSMEM_REGIONS 4
34 #ifndef INIT_MEMBLOCK_RESERVED_REGIONS
35 # define INIT_MEMBLOCK_RESERVED_REGIONS INIT_MEMBLOCK_REGIONS
38 #ifndef INIT_MEMBLOCK_MEMORY_REGIONS
39 #define INIT_MEMBLOCK_MEMORY_REGIONS INIT_MEMBLOCK_REGIONS
43 * DOC: memblock overview
45 * Memblock is a method of managing memory regions during the early
46 * boot period when the usual kernel memory allocators are not up and
49 * Memblock views the system memory as collections of contiguous
50 * regions. There are several types of these collections:
52 * * ``memory`` - describes the physical memory available to the
53 * kernel; this may differ from the actual physical memory installed
54 * in the system, for instance when the memory is restricted with
55 * ``mem=`` command line parameter
56 * * ``reserved`` - describes the regions that were allocated
57 * * ``physmem`` - describes the actual physical memory available during
58 * boot regardless of the possible restrictions and memory hot(un)plug;
59 * the ``physmem`` type is only available on some architectures.
61 * Each region is represented by struct memblock_region that
62 * defines the region extents, its attributes and NUMA node id on NUMA
63 * systems. Every memory type is described by the struct memblock_type
64 * which contains an array of memory regions along with
65 * the allocator metadata. The "memory" and "reserved" types are nicely
66 * wrapped with struct memblock. This structure is statically
67 * initialized at build time. The region arrays are initially sized to
68 * %INIT_MEMBLOCK_MEMORY_REGIONS for "memory" and
69 * %INIT_MEMBLOCK_RESERVED_REGIONS for "reserved". The region array
70 * for "physmem" is initially sized to %INIT_PHYSMEM_REGIONS.
71 * The memblock_allow_resize() enables automatic resizing of the region
72 * arrays during addition of new regions. This feature should be used
73 * with care so that memory allocated for the region array will not
74 * overlap with areas that should be reserved, for example initrd.
76 * The early architecture setup should tell memblock what the physical
77 * memory layout is by using memblock_add() or memblock_add_node()
78 * functions. The first function does not assign the region to a NUMA
79 * node and it is appropriate for UMA systems. Yet, it is possible to
80 * use it on NUMA systems as well and assign the region to a NUMA node
81 * later in the setup process using memblock_set_node(). The
82 * memblock_add_node() performs such an assignment directly.
84 * Once memblock is setup the memory can be allocated using one of the
87 * * memblock_phys_alloc*() - these functions return the **physical**
88 * address of the allocated memory
89 * * memblock_alloc*() - these functions return the **virtual** address
90 * of the allocated memory.
92 * Note, that both API variants use implicit assumptions about allowed
93 * memory ranges and the fallback methods. Consult the documentation
94 * of memblock_alloc_internal() and memblock_alloc_range_nid()
95 * functions for more elaborate description.
97 * As the system boot progresses, the architecture specific mem_init()
98 * function frees all the memory to the buddy page allocator.
100 * Unless an architecture enables %CONFIG_ARCH_KEEP_MEMBLOCK, the
101 * memblock data structures (except "physmem") will be discarded after the
102 * system initialization completes.
106 struct pglist_data __refdata contig_page_data
;
107 EXPORT_SYMBOL(contig_page_data
);
110 unsigned long max_low_pfn
;
111 unsigned long min_low_pfn
;
112 unsigned long max_pfn
;
113 unsigned long long max_possible_pfn
;
115 #ifdef CONFIG_MEMBLOCK_KHO_SCRATCH
116 /* When set to true, only allocate from MEMBLOCK_KHO_SCRATCH ranges */
117 static bool kho_scratch_only
;
119 #define kho_scratch_only false
122 static struct memblock_region memblock_memory_init_regions
[INIT_MEMBLOCK_MEMORY_REGIONS
] __initdata_memblock
;
123 static struct memblock_region memblock_reserved_init_regions
[INIT_MEMBLOCK_RESERVED_REGIONS
] __initdata_memblock
;
124 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
125 static struct memblock_region memblock_physmem_init_regions
[INIT_PHYSMEM_REGIONS
];
128 struct memblock memblock __initdata_memblock
= {
129 .memory
.regions
= memblock_memory_init_regions
,
130 .memory
.max
= INIT_MEMBLOCK_MEMORY_REGIONS
,
131 .memory
.name
= "memory",
133 .reserved
.regions
= memblock_reserved_init_regions
,
134 .reserved
.max
= INIT_MEMBLOCK_RESERVED_REGIONS
,
135 .reserved
.name
= "reserved",
138 .current_limit
= MEMBLOCK_ALLOC_ANYWHERE
,
141 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
142 struct memblock_type physmem
= {
143 .regions
= memblock_physmem_init_regions
,
144 .max
= INIT_PHYSMEM_REGIONS
,
150 * keep a pointer to &memblock.memory in the text section to use it in
151 * __next_mem_range() and its helpers.
152 * For architectures that do not keep memblock data after init, this
153 * pointer will be reset to NULL at memblock_discard()
155 static __refdata
struct memblock_type
*memblock_memory
= &memblock
.memory
;
157 #define for_each_memblock_type(i, memblock_type, rgn) \
158 for (i = 0, rgn = &memblock_type->regions[0]; \
159 i < memblock_type->cnt; \
160 i++, rgn = &memblock_type->regions[i])
162 #define memblock_dbg(fmt, ...) \
164 if (memblock_debug) \
165 pr_info(fmt, ##__VA_ARGS__); \
168 static int memblock_debug __initdata_memblock
;
169 static bool system_has_some_mirror __initdata_memblock
;
170 static int memblock_can_resize __initdata_memblock
;
171 static int memblock_memory_in_slab __initdata_memblock
;
172 static int memblock_reserved_in_slab __initdata_memblock
;
174 bool __init_memblock
memblock_has_mirror(void)
176 return system_has_some_mirror
;
179 static enum memblock_flags __init_memblock
choose_memblock_flags(void)
181 /* skip non-scratch memory for kho early boot allocations */
182 if (kho_scratch_only
)
183 return MEMBLOCK_KHO_SCRATCH
;
185 return system_has_some_mirror
? MEMBLOCK_MIRROR
: MEMBLOCK_NONE
;
188 /* adjust *@size so that (@base + *@size) doesn't overflow, return new size */
189 static inline phys_addr_t
memblock_cap_size(phys_addr_t base
, phys_addr_t
*size
)
191 return *size
= min(*size
, PHYS_ADDR_MAX
- base
);
195 * Address comparison utilities
197 unsigned long __init_memblock
198 memblock_addrs_overlap(phys_addr_t base1
, phys_addr_t size1
, phys_addr_t base2
,
201 return ((base1
< (base2
+ size2
)) && (base2
< (base1
+ size1
)));
204 bool __init_memblock
memblock_overlaps_region(struct memblock_type
*type
,
205 phys_addr_t base
, phys_addr_t size
)
209 memblock_cap_size(base
, &size
);
211 for (i
= 0; i
< type
->cnt
; i
++)
212 if (memblock_addrs_overlap(base
, size
, type
->regions
[i
].base
,
213 type
->regions
[i
].size
))
219 * __memblock_find_range_bottom_up - find free area utility in bottom-up
220 * @start: start of candidate range
221 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
222 * %MEMBLOCK_ALLOC_ACCESSIBLE
223 * @size: size of free area to find
224 * @align: alignment of free area to find
225 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
226 * @flags: pick from blocks based on memory attributes
228 * Utility called from memblock_find_in_range_node(), find free area bottom-up.
231 * Found address on success, 0 on failure.
233 static phys_addr_t __init_memblock
234 __memblock_find_range_bottom_up(phys_addr_t start
, phys_addr_t end
,
235 phys_addr_t size
, phys_addr_t align
, int nid
,
236 enum memblock_flags flags
)
238 phys_addr_t this_start
, this_end
, cand
;
241 for_each_free_mem_range(i
, nid
, flags
, &this_start
, &this_end
, NULL
) {
242 this_start
= clamp(this_start
, start
, end
);
243 this_end
= clamp(this_end
, start
, end
);
245 cand
= round_up(this_start
, align
);
246 if (cand
< this_end
&& this_end
- cand
>= size
)
254 * __memblock_find_range_top_down - find free area utility, in top-down
255 * @start: start of candidate range
256 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
257 * %MEMBLOCK_ALLOC_ACCESSIBLE
258 * @size: size of free area to find
259 * @align: alignment of free area to find
260 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
261 * @flags: pick from blocks based on memory attributes
263 * Utility called from memblock_find_in_range_node(), find free area top-down.
266 * Found address on success, 0 on failure.
268 static phys_addr_t __init_memblock
269 __memblock_find_range_top_down(phys_addr_t start
, phys_addr_t end
,
270 phys_addr_t size
, phys_addr_t align
, int nid
,
271 enum memblock_flags flags
)
273 phys_addr_t this_start
, this_end
, cand
;
276 for_each_free_mem_range_reverse(i
, nid
, flags
, &this_start
, &this_end
,
278 this_start
= clamp(this_start
, start
, end
);
279 this_end
= clamp(this_end
, start
, end
);
284 cand
= round_down(this_end
- size
, align
);
285 if (cand
>= this_start
)
293 * memblock_find_in_range_node - find free area in given range and node
294 * @size: size of free area to find
295 * @align: alignment of free area to find
296 * @start: start of candidate range
297 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
298 * %MEMBLOCK_ALLOC_ACCESSIBLE
299 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
300 * @flags: pick from blocks based on memory attributes
302 * Find @size free area aligned to @align in the specified range and node.
305 * Found address on success, 0 on failure.
307 static phys_addr_t __init_memblock
memblock_find_in_range_node(phys_addr_t size
,
308 phys_addr_t align
, phys_addr_t start
,
309 phys_addr_t end
, int nid
,
310 enum memblock_flags flags
)
313 if (end
== MEMBLOCK_ALLOC_ACCESSIBLE
||
314 end
== MEMBLOCK_ALLOC_NOLEAKTRACE
)
315 end
= memblock
.current_limit
;
317 /* avoid allocating the first page */
318 start
= max_t(phys_addr_t
, start
, PAGE_SIZE
);
319 end
= max(start
, end
);
321 if (memblock_bottom_up())
322 return __memblock_find_range_bottom_up(start
, end
, size
, align
,
325 return __memblock_find_range_top_down(start
, end
, size
, align
,
330 * memblock_find_in_range - find free area in given range
331 * @start: start of candidate range
332 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
333 * %MEMBLOCK_ALLOC_ACCESSIBLE
334 * @size: size of free area to find
335 * @align: alignment of free area to find
337 * Find @size free area aligned to @align in the specified range.
340 * Found address on success, 0 on failure.
342 static phys_addr_t __init_memblock
memblock_find_in_range(phys_addr_t start
,
343 phys_addr_t end
, phys_addr_t size
,
347 enum memblock_flags flags
= choose_memblock_flags();
350 ret
= memblock_find_in_range_node(size
, align
, start
, end
,
351 NUMA_NO_NODE
, flags
);
353 if (!ret
&& (flags
& MEMBLOCK_MIRROR
)) {
354 pr_warn_ratelimited("Could not allocate %pap bytes of mirrored memory\n",
356 flags
&= ~MEMBLOCK_MIRROR
;
363 static void __init_memblock
memblock_remove_region(struct memblock_type
*type
, unsigned long r
)
365 type
->total_size
-= type
->regions
[r
].size
;
366 memmove(&type
->regions
[r
], &type
->regions
[r
+ 1],
367 (type
->cnt
- (r
+ 1)) * sizeof(type
->regions
[r
]));
370 /* Special case for empty arrays */
371 if (type
->cnt
== 0) {
372 WARN_ON(type
->total_size
!= 0);
373 type
->regions
[0].base
= 0;
374 type
->regions
[0].size
= 0;
375 type
->regions
[0].flags
= 0;
376 memblock_set_region_node(&type
->regions
[0], MAX_NUMNODES
);
380 #ifndef CONFIG_ARCH_KEEP_MEMBLOCK
382 * memblock_discard - discard memory and reserved arrays if they were allocated
384 void __init
memblock_discard(void)
386 phys_addr_t addr
, size
;
388 if (memblock
.reserved
.regions
!= memblock_reserved_init_regions
) {
389 addr
= __pa(memblock
.reserved
.regions
);
390 size
= PAGE_ALIGN(sizeof(struct memblock_region
) *
391 memblock
.reserved
.max
);
392 if (memblock_reserved_in_slab
)
393 kfree(memblock
.reserved
.regions
);
395 memblock_free_late(addr
, size
);
398 if (memblock
.memory
.regions
!= memblock_memory_init_regions
) {
399 addr
= __pa(memblock
.memory
.regions
);
400 size
= PAGE_ALIGN(sizeof(struct memblock_region
) *
401 memblock
.memory
.max
);
402 if (memblock_memory_in_slab
)
403 kfree(memblock
.memory
.regions
);
405 memblock_free_late(addr
, size
);
408 memblock_memory
= NULL
;
413 * memblock_double_array - double the size of the memblock regions array
414 * @type: memblock type of the regions array being doubled
415 * @new_area_start: starting address of memory range to avoid overlap with
416 * @new_area_size: size of memory range to avoid overlap with
418 * Double the size of the @type regions array. If memblock is being used to
419 * allocate memory for a new reserved regions array and there is a previously
420 * allocated memory range [@new_area_start, @new_area_start + @new_area_size]
421 * waiting to be reserved, ensure the memory used by the new array does
425 * 0 on success, -1 on failure.
427 static int __init_memblock
memblock_double_array(struct memblock_type
*type
,
428 phys_addr_t new_area_start
,
429 phys_addr_t new_area_size
)
431 struct memblock_region
*new_array
, *old_array
;
432 phys_addr_t old_alloc_size
, new_alloc_size
;
433 phys_addr_t old_size
, new_size
, addr
, new_end
;
434 int use_slab
= slab_is_available();
437 /* We don't allow resizing until we know about the reserved regions
438 * of memory that aren't suitable for allocation
440 if (!memblock_can_resize
)
441 panic("memblock: cannot resize %s array\n", type
->name
);
443 /* Calculate new doubled size */
444 old_size
= type
->max
* sizeof(struct memblock_region
);
445 new_size
= old_size
<< 1;
447 * We need to allocated new one align to PAGE_SIZE,
448 * so we can free them completely later.
450 old_alloc_size
= PAGE_ALIGN(old_size
);
451 new_alloc_size
= PAGE_ALIGN(new_size
);
453 /* Retrieve the slab flag */
454 if (type
== &memblock
.memory
)
455 in_slab
= &memblock_memory_in_slab
;
457 in_slab
= &memblock_reserved_in_slab
;
459 /* Try to find some space for it */
461 new_array
= kmalloc(new_size
, GFP_KERNEL
);
462 addr
= new_array
? __pa(new_array
) : 0;
464 /* only exclude range when trying to double reserved.regions */
465 if (type
!= &memblock
.reserved
)
466 new_area_start
= new_area_size
= 0;
468 addr
= memblock_find_in_range(new_area_start
+ new_area_size
,
469 memblock
.current_limit
,
470 new_alloc_size
, PAGE_SIZE
);
471 if (!addr
&& new_area_size
)
472 addr
= memblock_find_in_range(0,
473 min(new_area_start
, memblock
.current_limit
),
474 new_alloc_size
, PAGE_SIZE
);
477 /* The memory may not have been accepted, yet. */
478 accept_memory(addr
, new_alloc_size
);
480 new_array
= __va(addr
);
486 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
487 type
->name
, type
->max
, type
->max
* 2);
491 new_end
= addr
+ new_size
- 1;
492 memblock_dbg("memblock: %s is doubled to %ld at [%pa-%pa]",
493 type
->name
, type
->max
* 2, &addr
, &new_end
);
496 * Found space, we now need to move the array over before we add the
497 * reserved region since it may be our reserved array itself that is
500 memcpy(new_array
, type
->regions
, old_size
);
501 memset(new_array
+ type
->max
, 0, old_size
);
502 old_array
= type
->regions
;
503 type
->regions
= new_array
;
506 /* Free old array. We needn't free it if the array is the static one */
509 else if (old_array
!= memblock_memory_init_regions
&&
510 old_array
!= memblock_reserved_init_regions
)
511 memblock_free(old_array
, old_alloc_size
);
514 * Reserve the new array if that comes from the memblock. Otherwise, we
518 BUG_ON(memblock_reserve_kern(addr
, new_alloc_size
));
520 /* Update slab flag */
527 * memblock_merge_regions - merge neighboring compatible regions
528 * @type: memblock type to scan
529 * @start_rgn: start scanning from (@start_rgn - 1)
530 * @end_rgn: end scanning at (@end_rgn - 1)
531 * Scan @type and merge neighboring compatible regions in [@start_rgn - 1, @end_rgn)
533 static void __init_memblock
memblock_merge_regions(struct memblock_type
*type
,
534 unsigned long start_rgn
,
535 unsigned long end_rgn
)
540 end_rgn
= min(end_rgn
, type
->cnt
- 1);
541 while (i
< end_rgn
) {
542 struct memblock_region
*this = &type
->regions
[i
];
543 struct memblock_region
*next
= &type
->regions
[i
+ 1];
545 if (this->base
+ this->size
!= next
->base
||
546 memblock_get_region_node(this) !=
547 memblock_get_region_node(next
) ||
548 this->flags
!= next
->flags
) {
549 BUG_ON(this->base
+ this->size
> next
->base
);
554 this->size
+= next
->size
;
555 /* move forward from next + 1, index of which is i + 2 */
556 memmove(next
, next
+ 1, (type
->cnt
- (i
+ 2)) * sizeof(*next
));
563 * memblock_insert_region - insert new memblock region
564 * @type: memblock type to insert into
565 * @idx: index for the insertion point
566 * @base: base address of the new region
567 * @size: size of the new region
568 * @nid: node id of the new region
569 * @flags: flags of the new region
571 * Insert new memblock region [@base, @base + @size) into @type at @idx.
572 * @type must already have extra room to accommodate the new region.
574 static void __init_memblock
memblock_insert_region(struct memblock_type
*type
,
575 int idx
, phys_addr_t base
,
578 enum memblock_flags flags
)
580 struct memblock_region
*rgn
= &type
->regions
[idx
];
582 BUG_ON(type
->cnt
>= type
->max
);
583 memmove(rgn
+ 1, rgn
, (type
->cnt
- idx
) * sizeof(*rgn
));
587 memblock_set_region_node(rgn
, nid
);
589 type
->total_size
+= size
;
593 * memblock_add_range - add new memblock region
594 * @type: memblock type to add new region into
595 * @base: base address of the new region
596 * @size: size of the new region
597 * @nid: nid of the new region
598 * @flags: flags of the new region
600 * Add new memblock region [@base, @base + @size) into @type. The new region
601 * is allowed to overlap with existing ones - overlaps don't affect already
602 * existing regions. @type is guaranteed to be minimal (all neighbouring
603 * compatible regions are merged) after the addition.
606 * 0 on success, -errno on failure.
608 static int __init_memblock
memblock_add_range(struct memblock_type
*type
,
609 phys_addr_t base
, phys_addr_t size
,
610 int nid
, enum memblock_flags flags
)
613 phys_addr_t obase
= base
;
614 phys_addr_t end
= base
+ memblock_cap_size(base
, &size
);
615 int idx
, nr_new
, start_rgn
= -1, end_rgn
;
616 struct memblock_region
*rgn
;
621 /* special case for empty array */
622 if (type
->regions
[0].size
== 0) {
623 WARN_ON(type
->cnt
!= 0 || type
->total_size
);
624 type
->regions
[0].base
= base
;
625 type
->regions
[0].size
= size
;
626 type
->regions
[0].flags
= flags
;
627 memblock_set_region_node(&type
->regions
[0], nid
);
628 type
->total_size
= size
;
634 * The worst case is when new range overlaps all existing regions,
635 * then we'll need type->cnt + 1 empty regions in @type. So if
636 * type->cnt * 2 + 1 is less than or equal to type->max, we know
637 * that there is enough empty regions in @type, and we can insert
640 if (type
->cnt
* 2 + 1 <= type
->max
)
645 * The following is executed twice. Once with %false @insert and
646 * then with %true. The first counts the number of regions needed
647 * to accommodate the new area. The second actually inserts them.
652 for_each_memblock_type(idx
, type
, rgn
) {
653 phys_addr_t rbase
= rgn
->base
;
654 phys_addr_t rend
= rbase
+ rgn
->size
;
661 * @rgn overlaps. If it separates the lower part of new
662 * area, insert that portion.
666 WARN_ON(nid
!= memblock_get_region_node(rgn
));
668 WARN_ON(flags
!= MEMBLOCK_NONE
&& flags
!= rgn
->flags
);
674 memblock_insert_region(type
, idx
++, base
,
679 /* area below @rend is dealt with, forget about it */
680 base
= min(rend
, end
);
683 /* insert the remaining portion */
690 memblock_insert_region(type
, idx
, base
, end
- base
,
699 * If this was the first round, resize array and repeat for actual
700 * insertions; otherwise, merge and return.
703 while (type
->cnt
+ nr_new
> type
->max
)
704 if (memblock_double_array(type
, obase
, size
) < 0)
709 memblock_merge_regions(type
, start_rgn
, end_rgn
);
715 * memblock_add_node - add new memblock region within a NUMA node
716 * @base: base address of the new region
717 * @size: size of the new region
718 * @nid: nid of the new region
719 * @flags: flags of the new region
721 * Add new memblock region [@base, @base + @size) to the "memory"
722 * type. See memblock_add_range() description for mode details
725 * 0 on success, -errno on failure.
727 int __init_memblock
memblock_add_node(phys_addr_t base
, phys_addr_t size
,
728 int nid
, enum memblock_flags flags
)
730 phys_addr_t end
= base
+ size
- 1;
732 memblock_dbg("%s: [%pa-%pa] nid=%d flags=%x %pS\n", __func__
,
733 &base
, &end
, nid
, flags
, (void *)_RET_IP_
);
735 return memblock_add_range(&memblock
.memory
, base
, size
, nid
, flags
);
739 * memblock_add - add new memblock region
740 * @base: base address of the new region
741 * @size: size of the new region
743 * Add new memblock region [@base, @base + @size) to the "memory"
744 * type. See memblock_add_range() description for mode details
747 * 0 on success, -errno on failure.
749 int __init_memblock
memblock_add(phys_addr_t base
, phys_addr_t size
)
751 phys_addr_t end
= base
+ size
- 1;
753 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__
,
754 &base
, &end
, (void *)_RET_IP_
);
756 return memblock_add_range(&memblock
.memory
, base
, size
, MAX_NUMNODES
, 0);
760 * memblock_validate_numa_coverage - check if amount of memory with
761 * no node ID assigned is less than a threshold
762 * @threshold_bytes: maximal memory size that can have unassigned node
765 * A buggy firmware may report memory that does not belong to any node.
766 * Check if amount of such memory is below @threshold_bytes.
768 * Return: true on success, false on failure.
770 bool __init_memblock
memblock_validate_numa_coverage(unsigned long threshold_bytes
)
772 unsigned long nr_pages
= 0;
773 unsigned long start_pfn
, end_pfn
, mem_size_mb
;
776 /* calculate lose page */
777 for_each_mem_pfn_range(i
, MAX_NUMNODES
, &start_pfn
, &end_pfn
, &nid
) {
778 if (!numa_valid_node(nid
))
779 nr_pages
+= end_pfn
- start_pfn
;
782 if ((nr_pages
<< PAGE_SHIFT
) > threshold_bytes
) {
783 mem_size_mb
= memblock_phys_mem_size() >> 20;
784 pr_err("NUMA: no nodes coverage for %luMB of %luMB RAM\n",
785 (nr_pages
<< PAGE_SHIFT
) >> 20, mem_size_mb
);
794 * memblock_isolate_range - isolate given range into disjoint memblocks
795 * @type: memblock type to isolate range for
796 * @base: base of range to isolate
797 * @size: size of range to isolate
798 * @start_rgn: out parameter for the start of isolated region
799 * @end_rgn: out parameter for the end of isolated region
801 * Walk @type and ensure that regions don't cross the boundaries defined by
802 * [@base, @base + @size). Crossing regions are split at the boundaries,
803 * which may create at most two more regions. The index of the first
804 * region inside the range is returned in *@start_rgn and the index of the
805 * first region after the range is returned in *@end_rgn.
808 * 0 on success, -errno on failure.
810 static int __init_memblock
memblock_isolate_range(struct memblock_type
*type
,
811 phys_addr_t base
, phys_addr_t size
,
812 int *start_rgn
, int *end_rgn
)
814 phys_addr_t end
= base
+ memblock_cap_size(base
, &size
);
816 struct memblock_region
*rgn
;
818 *start_rgn
= *end_rgn
= 0;
823 /* we'll create at most two more regions */
824 while (type
->cnt
+ 2 > type
->max
)
825 if (memblock_double_array(type
, base
, size
) < 0)
828 for_each_memblock_type(idx
, type
, rgn
) {
829 phys_addr_t rbase
= rgn
->base
;
830 phys_addr_t rend
= rbase
+ rgn
->size
;
839 * @rgn intersects from below. Split and continue
840 * to process the next region - the new top half.
843 rgn
->size
-= base
- rbase
;
844 type
->total_size
-= base
- rbase
;
845 memblock_insert_region(type
, idx
, rbase
, base
- rbase
,
846 memblock_get_region_node(rgn
),
848 } else if (rend
> end
) {
850 * @rgn intersects from above. Split and redo the
851 * current region - the new bottom half.
854 rgn
->size
-= end
- rbase
;
855 type
->total_size
-= end
- rbase
;
856 memblock_insert_region(type
, idx
--, rbase
, end
- rbase
,
857 memblock_get_region_node(rgn
),
860 /* @rgn is fully contained, record it */
870 static int __init_memblock
memblock_remove_range(struct memblock_type
*type
,
871 phys_addr_t base
, phys_addr_t size
)
873 int start_rgn
, end_rgn
;
876 ret
= memblock_isolate_range(type
, base
, size
, &start_rgn
, &end_rgn
);
880 for (i
= end_rgn
- 1; i
>= start_rgn
; i
--)
881 memblock_remove_region(type
, i
);
885 int __init_memblock
memblock_remove(phys_addr_t base
, phys_addr_t size
)
887 phys_addr_t end
= base
+ size
- 1;
889 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__
,
890 &base
, &end
, (void *)_RET_IP_
);
892 return memblock_remove_range(&memblock
.memory
, base
, size
);
896 * memblock_free - free boot memory allocation
897 * @ptr: starting address of the boot memory allocation
898 * @size: size of the boot memory block in bytes
900 * Free boot memory block previously allocated by memblock_alloc_xx() API.
901 * The freeing memory will not be released to the buddy allocator.
903 void __init_memblock
memblock_free(void *ptr
, size_t size
)
906 memblock_phys_free(__pa(ptr
), size
);
910 * memblock_phys_free - free boot memory block
911 * @base: phys starting address of the boot memory block
912 * @size: size of the boot memory block in bytes
914 * Free boot memory block previously allocated by memblock_phys_alloc_xx() API.
915 * The freeing memory will not be released to the buddy allocator.
917 int __init_memblock
memblock_phys_free(phys_addr_t base
, phys_addr_t size
)
919 phys_addr_t end
= base
+ size
- 1;
921 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__
,
922 &base
, &end
, (void *)_RET_IP_
);
924 kmemleak_free_part_phys(base
, size
);
925 return memblock_remove_range(&memblock
.reserved
, base
, size
);
928 int __init_memblock
__memblock_reserve(phys_addr_t base
, phys_addr_t size
,
929 int nid
, enum memblock_flags flags
)
931 phys_addr_t end
= base
+ size
- 1;
933 memblock_dbg("%s: [%pa-%pa] nid=%d flags=%x %pS\n", __func__
,
934 &base
, &end
, nid
, flags
, (void *)_RET_IP_
);
936 return memblock_add_range(&memblock
.reserved
, base
, size
, nid
, flags
);
939 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
940 int __init_memblock
memblock_physmem_add(phys_addr_t base
, phys_addr_t size
)
942 phys_addr_t end
= base
+ size
- 1;
944 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__
,
945 &base
, &end
, (void *)_RET_IP_
);
947 return memblock_add_range(&physmem
, base
, size
, MAX_NUMNODES
, 0);
951 #ifdef CONFIG_MEMBLOCK_KHO_SCRATCH
952 __init
void memblock_set_kho_scratch_only(void)
954 kho_scratch_only
= true;
957 __init
void memblock_clear_kho_scratch_only(void)
959 kho_scratch_only
= false;
962 __init
void memmap_init_kho_scratch_pages(void)
964 phys_addr_t start
, end
;
969 if (!IS_ENABLED(CONFIG_DEFERRED_STRUCT_PAGE_INIT
))
973 * Initialize struct pages for free scratch memory.
974 * The struct pages for reserved scratch memory will be set up in
975 * reserve_bootmem_region()
977 __for_each_mem_range(i
, &memblock
.memory
, NULL
, NUMA_NO_NODE
,
978 MEMBLOCK_KHO_SCRATCH
, &start
, &end
, &nid
) {
979 for (pfn
= PFN_UP(start
); pfn
< PFN_DOWN(end
); pfn
++)
980 init_deferred_page(pfn
, nid
);
986 * memblock_setclr_flag - set or clear flag for a memory region
987 * @type: memblock type to set/clear flag for
988 * @base: base address of the region
989 * @size: size of the region
990 * @set: set or clear the flag
991 * @flag: the flag to update
993 * This function isolates region [@base, @base + @size), and sets/clears flag
995 * Return: 0 on success, -errno on failure.
997 static int __init_memblock
memblock_setclr_flag(struct memblock_type
*type
,
998 phys_addr_t base
, phys_addr_t size
, int set
, int flag
)
1000 int i
, ret
, start_rgn
, end_rgn
;
1002 ret
= memblock_isolate_range(type
, base
, size
, &start_rgn
, &end_rgn
);
1006 for (i
= start_rgn
; i
< end_rgn
; i
++) {
1007 struct memblock_region
*r
= &type
->regions
[i
];
1015 memblock_merge_regions(type
, start_rgn
, end_rgn
);
1020 * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG.
1021 * @base: the base phys addr of the region
1022 * @size: the size of the region
1024 * Return: 0 on success, -errno on failure.
1026 int __init_memblock
memblock_mark_hotplug(phys_addr_t base
, phys_addr_t size
)
1028 return memblock_setclr_flag(&memblock
.memory
, base
, size
, 1, MEMBLOCK_HOTPLUG
);
1032 * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region.
1033 * @base: the base phys addr of the region
1034 * @size: the size of the region
1036 * Return: 0 on success, -errno on failure.
1038 int __init_memblock
memblock_clear_hotplug(phys_addr_t base
, phys_addr_t size
)
1040 return memblock_setclr_flag(&memblock
.memory
, base
, size
, 0, MEMBLOCK_HOTPLUG
);
1044 * memblock_mark_mirror - Mark mirrored memory with flag MEMBLOCK_MIRROR.
1045 * @base: the base phys addr of the region
1046 * @size: the size of the region
1048 * Return: 0 on success, -errno on failure.
1050 int __init_memblock
memblock_mark_mirror(phys_addr_t base
, phys_addr_t size
)
1052 if (!mirrored_kernelcore
)
1055 system_has_some_mirror
= true;
1057 return memblock_setclr_flag(&memblock
.memory
, base
, size
, 1, MEMBLOCK_MIRROR
);
1061 * memblock_mark_nomap - Mark a memory region with flag MEMBLOCK_NOMAP.
1062 * @base: the base phys addr of the region
1063 * @size: the size of the region
1065 * The memory regions marked with %MEMBLOCK_NOMAP will not be added to the
1066 * direct mapping of the physical memory. These regions will still be
1067 * covered by the memory map. The struct page representing NOMAP memory
1068 * frames in the memory map will be PageReserved()
1070 * Note: if the memory being marked %MEMBLOCK_NOMAP was allocated from
1071 * memblock, the caller must inform kmemleak to ignore that memory
1073 * Return: 0 on success, -errno on failure.
1075 int __init_memblock
memblock_mark_nomap(phys_addr_t base
, phys_addr_t size
)
1077 return memblock_setclr_flag(&memblock
.memory
, base
, size
, 1, MEMBLOCK_NOMAP
);
1081 * memblock_clear_nomap - Clear flag MEMBLOCK_NOMAP for a specified region.
1082 * @base: the base phys addr of the region
1083 * @size: the size of the region
1085 * Return: 0 on success, -errno on failure.
1087 int __init_memblock
memblock_clear_nomap(phys_addr_t base
, phys_addr_t size
)
1089 return memblock_setclr_flag(&memblock
.memory
, base
, size
, 0, MEMBLOCK_NOMAP
);
1093 * memblock_reserved_mark_noinit - Mark a reserved memory region with flag
1094 * MEMBLOCK_RSRV_NOINIT which results in the struct pages not being initialized
1096 * @base: the base phys addr of the region
1097 * @size: the size of the region
1099 * struct pages will not be initialized for reserved memory regions marked with
1100 * %MEMBLOCK_RSRV_NOINIT.
1102 * Return: 0 on success, -errno on failure.
1104 int __init_memblock
memblock_reserved_mark_noinit(phys_addr_t base
, phys_addr_t size
)
1106 return memblock_setclr_flag(&memblock
.reserved
, base
, size
, 1,
1107 MEMBLOCK_RSRV_NOINIT
);
1111 * memblock_mark_kho_scratch - Mark a memory region as MEMBLOCK_KHO_SCRATCH.
1112 * @base: the base phys addr of the region
1113 * @size: the size of the region
1115 * Only memory regions marked with %MEMBLOCK_KHO_SCRATCH will be considered
1116 * for allocations during early boot with kexec handover.
1118 * Return: 0 on success, -errno on failure.
1120 __init
int memblock_mark_kho_scratch(phys_addr_t base
, phys_addr_t size
)
1122 return memblock_setclr_flag(&memblock
.memory
, base
, size
, 1,
1123 MEMBLOCK_KHO_SCRATCH
);
1127 * memblock_clear_kho_scratch - Clear MEMBLOCK_KHO_SCRATCH flag for a
1129 * @base: the base phys addr of the region
1130 * @size: the size of the region
1132 * Return: 0 on success, -errno on failure.
1134 __init
int memblock_clear_kho_scratch(phys_addr_t base
, phys_addr_t size
)
1136 return memblock_setclr_flag(&memblock
.memory
, base
, size
, 0,
1137 MEMBLOCK_KHO_SCRATCH
);
1140 static bool should_skip_region(struct memblock_type
*type
,
1141 struct memblock_region
*m
,
1144 int m_nid
= memblock_get_region_node(m
);
1146 /* we never skip regions when iterating memblock.reserved or physmem */
1147 if (type
!= memblock_memory
)
1150 /* only memory regions are associated with nodes, check it */
1151 if (numa_valid_node(nid
) && nid
!= m_nid
)
1154 /* skip hotpluggable memory regions if needed */
1155 if (movable_node_is_enabled() && memblock_is_hotpluggable(m
) &&
1156 !(flags
& MEMBLOCK_HOTPLUG
))
1159 /* if we want mirror memory skip non-mirror memory regions */
1160 if ((flags
& MEMBLOCK_MIRROR
) && !memblock_is_mirror(m
))
1163 /* skip nomap memory unless we were asked for it explicitly */
1164 if (!(flags
& MEMBLOCK_NOMAP
) && memblock_is_nomap(m
))
1167 /* skip driver-managed memory unless we were asked for it explicitly */
1168 if (!(flags
& MEMBLOCK_DRIVER_MANAGED
) && memblock_is_driver_managed(m
))
1172 * In early alloc during kexec handover, we can only consider
1173 * MEMBLOCK_KHO_SCRATCH regions for the allocations
1175 if ((flags
& MEMBLOCK_KHO_SCRATCH
) && !memblock_is_kho_scratch(m
))
1182 * __next_mem_range - next function for for_each_free_mem_range() etc.
1183 * @idx: pointer to u64 loop variable
1184 * @nid: node selector, %NUMA_NO_NODE for all nodes
1185 * @flags: pick from blocks based on memory attributes
1186 * @type_a: pointer to memblock_type from where the range is taken
1187 * @type_b: pointer to memblock_type which excludes memory from being taken
1188 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
1189 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
1190 * @out_nid: ptr to int for nid of the range, can be %NULL
1192 * Find the first area from *@idx which matches @nid, fill the out
1193 * parameters, and update *@idx for the next iteration. The lower 32bit of
1194 * *@idx contains index into type_a and the upper 32bit indexes the
1195 * areas before each region in type_b. For example, if type_b regions
1196 * look like the following,
1198 * 0:[0-16), 1:[32-48), 2:[128-130)
1200 * The upper 32bit indexes the following regions.
1202 * 0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX)
1204 * As both region arrays are sorted, the function advances the two indices
1205 * in lockstep and returns each intersection.
1207 void __next_mem_range(u64
*idx
, int nid
, enum memblock_flags flags
,
1208 struct memblock_type
*type_a
,
1209 struct memblock_type
*type_b
, phys_addr_t
*out_start
,
1210 phys_addr_t
*out_end
, int *out_nid
)
1212 int idx_a
= *idx
& 0xffffffff;
1213 int idx_b
= *idx
>> 32;
1215 for (; idx_a
< type_a
->cnt
; idx_a
++) {
1216 struct memblock_region
*m
= &type_a
->regions
[idx_a
];
1218 phys_addr_t m_start
= m
->base
;
1219 phys_addr_t m_end
= m
->base
+ m
->size
;
1220 int m_nid
= memblock_get_region_node(m
);
1222 if (should_skip_region(type_a
, m
, nid
, flags
))
1227 *out_start
= m_start
;
1233 *idx
= (u32
)idx_a
| (u64
)idx_b
<< 32;
1237 /* scan areas before each reservation */
1238 for (; idx_b
< type_b
->cnt
+ 1; idx_b
++) {
1239 struct memblock_region
*r
;
1240 phys_addr_t r_start
;
1243 r
= &type_b
->regions
[idx_b
];
1244 r_start
= idx_b
? r
[-1].base
+ r
[-1].size
: 0;
1245 r_end
= idx_b
< type_b
->cnt
?
1246 r
->base
: PHYS_ADDR_MAX
;
1249 * if idx_b advanced past idx_a,
1250 * break out to advance idx_a
1252 if (r_start
>= m_end
)
1254 /* if the two regions intersect, we're done */
1255 if (m_start
< r_end
) {
1258 max(m_start
, r_start
);
1260 *out_end
= min(m_end
, r_end
);
1264 * The region which ends first is
1265 * advanced for the next iteration.
1271 *idx
= (u32
)idx_a
| (u64
)idx_b
<< 32;
1277 /* signal end of iteration */
1282 * __next_mem_range_rev - generic next function for for_each_*_range_rev()
1284 * @idx: pointer to u64 loop variable
1285 * @nid: node selector, %NUMA_NO_NODE for all nodes
1286 * @flags: pick from blocks based on memory attributes
1287 * @type_a: pointer to memblock_type from where the range is taken
1288 * @type_b: pointer to memblock_type which excludes memory from being taken
1289 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
1290 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
1291 * @out_nid: ptr to int for nid of the range, can be %NULL
1293 * Finds the next range from type_a which is not marked as unsuitable
1296 * Reverse of __next_mem_range().
1298 void __init_memblock
__next_mem_range_rev(u64
*idx
, int nid
,
1299 enum memblock_flags flags
,
1300 struct memblock_type
*type_a
,
1301 struct memblock_type
*type_b
,
1302 phys_addr_t
*out_start
,
1303 phys_addr_t
*out_end
, int *out_nid
)
1305 int idx_a
= *idx
& 0xffffffff;
1306 int idx_b
= *idx
>> 32;
1308 if (*idx
== (u64
)ULLONG_MAX
) {
1309 idx_a
= type_a
->cnt
- 1;
1311 idx_b
= type_b
->cnt
;
1316 for (; idx_a
>= 0; idx_a
--) {
1317 struct memblock_region
*m
= &type_a
->regions
[idx_a
];
1319 phys_addr_t m_start
= m
->base
;
1320 phys_addr_t m_end
= m
->base
+ m
->size
;
1321 int m_nid
= memblock_get_region_node(m
);
1323 if (should_skip_region(type_a
, m
, nid
, flags
))
1328 *out_start
= m_start
;
1334 *idx
= (u32
)idx_a
| (u64
)idx_b
<< 32;
1338 /* scan areas before each reservation */
1339 for (; idx_b
>= 0; idx_b
--) {
1340 struct memblock_region
*r
;
1341 phys_addr_t r_start
;
1344 r
= &type_b
->regions
[idx_b
];
1345 r_start
= idx_b
? r
[-1].base
+ r
[-1].size
: 0;
1346 r_end
= idx_b
< type_b
->cnt
?
1347 r
->base
: PHYS_ADDR_MAX
;
1349 * if idx_b advanced past idx_a,
1350 * break out to advance idx_a
1353 if (r_end
<= m_start
)
1355 /* if the two regions intersect, we're done */
1356 if (m_end
> r_start
) {
1358 *out_start
= max(m_start
, r_start
);
1360 *out_end
= min(m_end
, r_end
);
1363 if (m_start
>= r_start
)
1367 *idx
= (u32
)idx_a
| (u64
)idx_b
<< 32;
1372 /* signal end of iteration */
1377 * Common iterator interface used to define for_each_mem_pfn_range().
1379 void __init_memblock
__next_mem_pfn_range(int *idx
, int nid
,
1380 unsigned long *out_start_pfn
,
1381 unsigned long *out_end_pfn
, int *out_nid
)
1383 struct memblock_type
*type
= &memblock
.memory
;
1384 struct memblock_region
*r
;
1387 while (++*idx
< type
->cnt
) {
1388 r
= &type
->regions
[*idx
];
1389 r_nid
= memblock_get_region_node(r
);
1391 if (PFN_UP(r
->base
) >= PFN_DOWN(r
->base
+ r
->size
))
1393 if (!numa_valid_node(nid
) || nid
== r_nid
)
1396 if (*idx
>= type
->cnt
) {
1402 *out_start_pfn
= PFN_UP(r
->base
);
1404 *out_end_pfn
= PFN_DOWN(r
->base
+ r
->size
);
1410 * memblock_set_node - set node ID on memblock regions
1411 * @base: base of area to set node ID for
1412 * @size: size of area to set node ID for
1413 * @type: memblock type to set node ID for
1414 * @nid: node ID to set
1416 * Set the nid of memblock @type regions in [@base, @base + @size) to @nid.
1417 * Regions which cross the area boundaries are split as necessary.
1420 * 0 on success, -errno on failure.
1422 int __init_memblock
memblock_set_node(phys_addr_t base
, phys_addr_t size
,
1423 struct memblock_type
*type
, int nid
)
1426 int start_rgn
, end_rgn
;
1429 ret
= memblock_isolate_range(type
, base
, size
, &start_rgn
, &end_rgn
);
1433 for (i
= start_rgn
; i
< end_rgn
; i
++)
1434 memblock_set_region_node(&type
->regions
[i
], nid
);
1436 memblock_merge_regions(type
, start_rgn
, end_rgn
);
1441 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1443 * __next_mem_pfn_range_in_zone - iterator for for_each_*_range_in_zone()
1445 * @idx: pointer to u64 loop variable
1446 * @zone: zone in which all of the memory blocks reside
1447 * @out_spfn: ptr to ulong for start pfn of the range, can be %NULL
1448 * @out_epfn: ptr to ulong for end pfn of the range, can be %NULL
1450 * This function is meant to be a zone/pfn specific wrapper for the
1451 * for_each_mem_range type iterators. Specifically they are used in the
1452 * deferred memory init routines and as such we were duplicating much of
1453 * this logic throughout the code. So instead of having it in multiple
1454 * locations it seemed like it would make more sense to centralize this to
1455 * one new iterator that does everything they need.
1457 void __init_memblock
1458 __next_mem_pfn_range_in_zone(u64
*idx
, struct zone
*zone
,
1459 unsigned long *out_spfn
, unsigned long *out_epfn
)
1461 int zone_nid
= zone_to_nid(zone
);
1462 phys_addr_t spa
, epa
;
1464 __next_mem_range(idx
, zone_nid
, MEMBLOCK_NONE
,
1465 &memblock
.memory
, &memblock
.reserved
,
1468 while (*idx
!= U64_MAX
) {
1469 unsigned long epfn
= PFN_DOWN(epa
);
1470 unsigned long spfn
= PFN_UP(spa
);
1473 * Verify the end is at least past the start of the zone and
1474 * that we have at least one PFN to initialize.
1476 if (zone
->zone_start_pfn
< epfn
&& spfn
< epfn
) {
1477 /* if we went too far just stop searching */
1478 if (zone_end_pfn(zone
) <= spfn
) {
1484 *out_spfn
= max(zone
->zone_start_pfn
, spfn
);
1486 *out_epfn
= min(zone_end_pfn(zone
), epfn
);
1491 __next_mem_range(idx
, zone_nid
, MEMBLOCK_NONE
,
1492 &memblock
.memory
, &memblock
.reserved
,
1496 /* signal end of iteration */
1498 *out_spfn
= ULONG_MAX
;
1503 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1506 * memblock_alloc_range_nid - allocate boot memory block
1507 * @size: size of memory block to be allocated in bytes
1508 * @align: alignment of the region and block's size
1509 * @start: the lower bound of the memory region to allocate (phys address)
1510 * @end: the upper bound of the memory region to allocate (phys address)
1511 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1512 * @exact_nid: control the allocation fall back to other nodes
1514 * The allocation is performed from memory region limited by
1515 * memblock.current_limit if @end == %MEMBLOCK_ALLOC_ACCESSIBLE.
1517 * If the specified node can not hold the requested memory and @exact_nid
1518 * is false, the allocation falls back to any node in the system.
1520 * For systems with memory mirroring, the allocation is attempted first
1521 * from the regions with mirroring enabled and then retried from any
1524 * In addition, function using kmemleak_alloc_phys for allocated boot
1525 * memory block, it is never reported as leaks.
1528 * Physical address of allocated memory block on success, %0 on failure.
1530 phys_addr_t __init
memblock_alloc_range_nid(phys_addr_t size
,
1531 phys_addr_t align
, phys_addr_t start
,
1532 phys_addr_t end
, int nid
,
1535 enum memblock_flags flags
= choose_memblock_flags();
1539 * Detect any accidental use of these APIs after slab is ready, as at
1540 * this moment memblock may be deinitialized already and its
1541 * internal data may be destroyed (after execution of memblock_free_all)
1543 if (WARN_ON_ONCE(slab_is_available())) {
1544 void *vaddr
= kzalloc_node(size
, GFP_NOWAIT
, nid
);
1546 return vaddr
? virt_to_phys(vaddr
) : 0;
1550 /* Can't use WARNs this early in boot on powerpc */
1552 align
= SMP_CACHE_BYTES
;
1556 found
= memblock_find_in_range_node(size
, align
, start
, end
, nid
,
1558 if (found
&& !__memblock_reserve(found
, size
, nid
, MEMBLOCK_RSRV_KERN
))
1561 if (numa_valid_node(nid
) && !exact_nid
) {
1562 found
= memblock_find_in_range_node(size
, align
, start
,
1565 if (found
&& !memblock_reserve_kern(found
, size
))
1569 if (flags
& MEMBLOCK_MIRROR
) {
1570 flags
&= ~MEMBLOCK_MIRROR
;
1571 pr_warn_ratelimited("Could not allocate %pap bytes of mirrored memory\n",
1580 * Skip kmemleak for those places like kasan_init() and
1581 * early_pgtable_alloc() due to high volume.
1583 if (end
!= MEMBLOCK_ALLOC_NOLEAKTRACE
)
1585 * Memblock allocated blocks are never reported as
1586 * leaks. This is because many of these blocks are
1587 * only referred via the physical address which is
1588 * not looked up by kmemleak.
1590 kmemleak_alloc_phys(found
, size
, 0);
1593 * Some Virtual Machine platforms, such as Intel TDX or AMD SEV-SNP,
1594 * require memory to be accepted before it can be used by the
1597 * Accept the memory of the allocated buffer.
1599 accept_memory(found
, size
);
1605 * memblock_phys_alloc_range - allocate a memory block inside specified range
1606 * @size: size of memory block to be allocated in bytes
1607 * @align: alignment of the region and block's size
1608 * @start: the lower bound of the memory region to allocate (physical address)
1609 * @end: the upper bound of the memory region to allocate (physical address)
1611 * Allocate @size bytes in the between @start and @end.
1613 * Return: physical address of the allocated memory block on success,
1616 phys_addr_t __init
memblock_phys_alloc_range(phys_addr_t size
,
1621 memblock_dbg("%s: %llu bytes align=0x%llx from=%pa max_addr=%pa %pS\n",
1622 __func__
, (u64
)size
, (u64
)align
, &start
, &end
,
1624 return memblock_alloc_range_nid(size
, align
, start
, end
, NUMA_NO_NODE
,
1629 * memblock_phys_alloc_try_nid - allocate a memory block from specified NUMA node
1630 * @size: size of memory block to be allocated in bytes
1631 * @align: alignment of the region and block's size
1632 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1634 * Allocates memory block from the specified NUMA node. If the node
1635 * has no available memory, attempts to allocated from any node in the
1638 * Return: physical address of the allocated memory block on success,
1641 phys_addr_t __init
memblock_phys_alloc_try_nid(phys_addr_t size
, phys_addr_t align
, int nid
)
1643 return memblock_alloc_range_nid(size
, align
, 0,
1644 MEMBLOCK_ALLOC_ACCESSIBLE
, nid
, false);
1648 * memblock_alloc_internal - allocate boot memory block
1649 * @size: size of memory block to be allocated in bytes
1650 * @align: alignment of the region and block's size
1651 * @min_addr: the lower bound of the memory region to allocate (phys address)
1652 * @max_addr: the upper bound of the memory region to allocate (phys address)
1653 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1654 * @exact_nid: control the allocation fall back to other nodes
1656 * Allocates memory block using memblock_alloc_range_nid() and
1657 * converts the returned physical address to virtual.
1659 * The @min_addr limit is dropped if it can not be satisfied and the allocation
1660 * will fall back to memory below @min_addr. Other constraints, such
1661 * as node and mirrored memory will be handled again in
1662 * memblock_alloc_range_nid().
1665 * Virtual address of allocated memory block on success, NULL on failure.
1667 static void * __init
memblock_alloc_internal(
1668 phys_addr_t size
, phys_addr_t align
,
1669 phys_addr_t min_addr
, phys_addr_t max_addr
,
1670 int nid
, bool exact_nid
)
1675 if (max_addr
> memblock
.current_limit
)
1676 max_addr
= memblock
.current_limit
;
1678 alloc
= memblock_alloc_range_nid(size
, align
, min_addr
, max_addr
, nid
,
1681 /* retry allocation without lower limit */
1682 if (!alloc
&& min_addr
)
1683 alloc
= memblock_alloc_range_nid(size
, align
, 0, max_addr
, nid
,
1689 return phys_to_virt(alloc
);
1693 * memblock_alloc_exact_nid_raw - allocate boot memory block on the exact node
1694 * without zeroing memory
1695 * @size: size of memory block to be allocated in bytes
1696 * @align: alignment of the region and block's size
1697 * @min_addr: the lower bound of the memory region from where the allocation
1698 * is preferred (phys address)
1699 * @max_addr: the upper bound of the memory region from where the allocation
1700 * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
1701 * allocate only from memory limited by memblock.current_limit value
1702 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1704 * Public function, provides additional debug information (including caller
1705 * info), if enabled. Does not zero allocated memory.
1708 * Virtual address of allocated memory block on success, NULL on failure.
1710 void * __init
memblock_alloc_exact_nid_raw(
1711 phys_addr_t size
, phys_addr_t align
,
1712 phys_addr_t min_addr
, phys_addr_t max_addr
,
1715 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1716 __func__
, (u64
)size
, (u64
)align
, nid
, &min_addr
,
1717 &max_addr
, (void *)_RET_IP_
);
1719 return memblock_alloc_internal(size
, align
, min_addr
, max_addr
, nid
,
1724 * memblock_alloc_try_nid_raw - allocate boot memory block without zeroing
1725 * memory and without panicking
1726 * @size: size of memory block to be allocated in bytes
1727 * @align: alignment of the region and block's size
1728 * @min_addr: the lower bound of the memory region from where the allocation
1729 * is preferred (phys address)
1730 * @max_addr: the upper bound of the memory region from where the allocation
1731 * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
1732 * allocate only from memory limited by memblock.current_limit value
1733 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1735 * Public function, provides additional debug information (including caller
1736 * info), if enabled. Does not zero allocated memory, does not panic if request
1737 * cannot be satisfied.
1740 * Virtual address of allocated memory block on success, NULL on failure.
1742 void * __init
memblock_alloc_try_nid_raw(
1743 phys_addr_t size
, phys_addr_t align
,
1744 phys_addr_t min_addr
, phys_addr_t max_addr
,
1747 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1748 __func__
, (u64
)size
, (u64
)align
, nid
, &min_addr
,
1749 &max_addr
, (void *)_RET_IP_
);
1751 return memblock_alloc_internal(size
, align
, min_addr
, max_addr
, nid
,
1756 * memblock_alloc_try_nid - allocate boot memory block
1757 * @size: size of memory block to be allocated in bytes
1758 * @align: alignment of the region and block's size
1759 * @min_addr: the lower bound of the memory region from where the allocation
1760 * is preferred (phys address)
1761 * @max_addr: the upper bound of the memory region from where the allocation
1762 * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
1763 * allocate only from memory limited by memblock.current_limit value
1764 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1766 * Public function, provides additional debug information (including caller
1767 * info), if enabled. This function zeroes the allocated memory.
1770 * Virtual address of allocated memory block on success, NULL on failure.
1772 void * __init
memblock_alloc_try_nid(
1773 phys_addr_t size
, phys_addr_t align
,
1774 phys_addr_t min_addr
, phys_addr_t max_addr
,
1779 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1780 __func__
, (u64
)size
, (u64
)align
, nid
, &min_addr
,
1781 &max_addr
, (void *)_RET_IP_
);
1782 ptr
= memblock_alloc_internal(size
, align
,
1783 min_addr
, max_addr
, nid
, false);
1785 memset(ptr
, 0, size
);
1791 * __memblock_alloc_or_panic - Try to allocate memory and panic on failure
1792 * @size: size of memory block to be allocated in bytes
1793 * @align: alignment of the region and block's size
1794 * @func: caller func name
1796 * This function attempts to allocate memory using memblock_alloc,
1797 * and in case of failure, it calls panic with the formatted message.
1798 * This function should not be used directly, please use the macro memblock_alloc_or_panic.
1800 void *__init
__memblock_alloc_or_panic(phys_addr_t size
, phys_addr_t align
,
1803 void *addr
= memblock_alloc(size
, align
);
1805 if (unlikely(!addr
))
1806 panic("%s: Failed to allocate %pap bytes\n", func
, &size
);
1811 * memblock_free_late - free pages directly to buddy allocator
1812 * @base: phys starting address of the boot memory block
1813 * @size: size of the boot memory block in bytes
1815 * This is only useful when the memblock allocator has already been torn
1816 * down, but we are still initializing the system. Pages are released directly
1817 * to the buddy allocator.
1819 void __init
memblock_free_late(phys_addr_t base
, phys_addr_t size
)
1821 phys_addr_t cursor
, end
;
1823 end
= base
+ size
- 1;
1824 memblock_dbg("%s: [%pa-%pa] %pS\n",
1825 __func__
, &base
, &end
, (void *)_RET_IP_
);
1826 kmemleak_free_part_phys(base
, size
);
1827 cursor
= PFN_UP(base
);
1828 end
= PFN_DOWN(base
+ size
);
1830 for (; cursor
< end
; cursor
++) {
1831 memblock_free_pages(pfn_to_page(cursor
), cursor
, 0);
1832 totalram_pages_inc();
1837 * Remaining API functions
1840 phys_addr_t __init_memblock
memblock_phys_mem_size(void)
1842 return memblock
.memory
.total_size
;
1845 phys_addr_t __init_memblock
memblock_reserved_size(void)
1847 return memblock
.reserved
.total_size
;
1850 phys_addr_t __init_memblock
memblock_reserved_kern_size(phys_addr_t limit
, int nid
)
1852 struct memblock_region
*r
;
1853 phys_addr_t total
= 0;
1855 for_each_reserved_mem_region(r
) {
1856 phys_addr_t size
= r
->size
;
1858 if (r
->base
> limit
)
1861 if (r
->base
+ r
->size
> limit
)
1862 size
= limit
- r
->base
;
1864 if (nid
== memblock_get_region_node(r
) || !numa_valid_node(nid
))
1865 if (r
->flags
& MEMBLOCK_RSRV_KERN
)
1873 * memblock_estimated_nr_free_pages - return estimated number of free pages
1874 * from memblock point of view
1876 * During bootup, subsystems might need a rough estimate of the number of free
1877 * pages in the whole system, before precise numbers are available from the
1878 * buddy. Especially with CONFIG_DEFERRED_STRUCT_PAGE_INIT, the numbers
1879 * obtained from the buddy might be very imprecise during bootup.
1882 * An estimated number of free pages from memblock point of view.
1884 unsigned long __init
memblock_estimated_nr_free_pages(void)
1886 return PHYS_PFN(memblock_phys_mem_size() - memblock_reserved_size());
1889 /* lowest address */
1890 phys_addr_t __init_memblock
memblock_start_of_DRAM(void)
1892 return memblock
.memory
.regions
[0].base
;
1895 phys_addr_t __init_memblock
memblock_end_of_DRAM(void)
1897 int idx
= memblock
.memory
.cnt
- 1;
1899 return (memblock
.memory
.regions
[idx
].base
+ memblock
.memory
.regions
[idx
].size
);
1902 static phys_addr_t __init_memblock
__find_max_addr(phys_addr_t limit
)
1904 phys_addr_t max_addr
= PHYS_ADDR_MAX
;
1905 struct memblock_region
*r
;
1908 * translate the memory @limit size into the max address within one of
1909 * the memory memblock regions, if the @limit exceeds the total size
1910 * of those regions, max_addr will keep original value PHYS_ADDR_MAX
1912 for_each_mem_region(r
) {
1913 if (limit
<= r
->size
) {
1914 max_addr
= r
->base
+ limit
;
1923 void __init
memblock_enforce_memory_limit(phys_addr_t limit
)
1925 phys_addr_t max_addr
;
1930 max_addr
= __find_max_addr(limit
);
1932 /* @limit exceeds the total size of the memory, do nothing */
1933 if (max_addr
== PHYS_ADDR_MAX
)
1936 /* truncate both memory and reserved regions */
1937 memblock_remove_range(&memblock
.memory
, max_addr
,
1939 memblock_remove_range(&memblock
.reserved
, max_addr
,
1943 void __init
memblock_cap_memory_range(phys_addr_t base
, phys_addr_t size
)
1945 int start_rgn
, end_rgn
;
1951 if (!memblock_memory
->total_size
) {
1952 pr_warn("%s: No memory registered yet\n", __func__
);
1956 ret
= memblock_isolate_range(&memblock
.memory
, base
, size
,
1957 &start_rgn
, &end_rgn
);
1961 /* remove all the MAP regions */
1962 for (i
= memblock
.memory
.cnt
- 1; i
>= end_rgn
; i
--)
1963 if (!memblock_is_nomap(&memblock
.memory
.regions
[i
]))
1964 memblock_remove_region(&memblock
.memory
, i
);
1966 for (i
= start_rgn
- 1; i
>= 0; i
--)
1967 if (!memblock_is_nomap(&memblock
.memory
.regions
[i
]))
1968 memblock_remove_region(&memblock
.memory
, i
);
1970 /* truncate the reserved regions */
1971 memblock_remove_range(&memblock
.reserved
, 0, base
);
1972 memblock_remove_range(&memblock
.reserved
,
1973 base
+ size
, PHYS_ADDR_MAX
);
1976 void __init
memblock_mem_limit_remove_map(phys_addr_t limit
)
1978 phys_addr_t max_addr
;
1983 max_addr
= __find_max_addr(limit
);
1985 /* @limit exceeds the total size of the memory, do nothing */
1986 if (max_addr
== PHYS_ADDR_MAX
)
1989 memblock_cap_memory_range(0, max_addr
);
1992 static int __init_memblock
memblock_search(struct memblock_type
*type
, phys_addr_t addr
)
1994 unsigned int left
= 0, right
= type
->cnt
;
1997 unsigned int mid
= (right
+ left
) / 2;
1999 if (addr
< type
->regions
[mid
].base
)
2001 else if (addr
>= (type
->regions
[mid
].base
+
2002 type
->regions
[mid
].size
))
2006 } while (left
< right
);
2010 bool __init_memblock
memblock_is_reserved(phys_addr_t addr
)
2012 return memblock_search(&memblock
.reserved
, addr
) != -1;
2015 bool __init_memblock
memblock_is_memory(phys_addr_t addr
)
2017 return memblock_search(&memblock
.memory
, addr
) != -1;
2020 bool __init_memblock
memblock_is_map_memory(phys_addr_t addr
)
2022 int i
= memblock_search(&memblock
.memory
, addr
);
2026 return !memblock_is_nomap(&memblock
.memory
.regions
[i
]);
2029 int __init_memblock
memblock_search_pfn_nid(unsigned long pfn
,
2030 unsigned long *start_pfn
, unsigned long *end_pfn
)
2032 struct memblock_type
*type
= &memblock
.memory
;
2033 int mid
= memblock_search(type
, PFN_PHYS(pfn
));
2036 return NUMA_NO_NODE
;
2038 *start_pfn
= PFN_DOWN(type
->regions
[mid
].base
);
2039 *end_pfn
= PFN_DOWN(type
->regions
[mid
].base
+ type
->regions
[mid
].size
);
2041 return memblock_get_region_node(&type
->regions
[mid
]);
2045 * memblock_is_region_memory - check if a region is a subset of memory
2046 * @base: base of region to check
2047 * @size: size of region to check
2049 * Check if the region [@base, @base + @size) is a subset of a memory block.
2052 * 0 if false, non-zero if true
2054 bool __init_memblock
memblock_is_region_memory(phys_addr_t base
, phys_addr_t size
)
2056 int idx
= memblock_search(&memblock
.memory
, base
);
2057 phys_addr_t end
= base
+ memblock_cap_size(base
, &size
);
2061 return (memblock
.memory
.regions
[idx
].base
+
2062 memblock
.memory
.regions
[idx
].size
) >= end
;
2066 * memblock_is_region_reserved - check if a region intersects reserved memory
2067 * @base: base of region to check
2068 * @size: size of region to check
2070 * Check if the region [@base, @base + @size) intersects a reserved
2074 * True if they intersect, false if not.
2076 bool __init_memblock
memblock_is_region_reserved(phys_addr_t base
, phys_addr_t size
)
2078 return memblock_overlaps_region(&memblock
.reserved
, base
, size
);
2081 void __init_memblock
memblock_trim_memory(phys_addr_t align
)
2083 phys_addr_t start
, end
, orig_start
, orig_end
;
2084 struct memblock_region
*r
;
2086 for_each_mem_region(r
) {
2087 orig_start
= r
->base
;
2088 orig_end
= r
->base
+ r
->size
;
2089 start
= round_up(orig_start
, align
);
2090 end
= round_down(orig_end
, align
);
2092 if (start
== orig_start
&& end
== orig_end
)
2097 r
->size
= end
- start
;
2099 memblock_remove_region(&memblock
.memory
,
2100 r
- memblock
.memory
.regions
);
2106 void __init_memblock
memblock_set_current_limit(phys_addr_t limit
)
2108 memblock
.current_limit
= limit
;
2111 phys_addr_t __init_memblock
memblock_get_current_limit(void)
2113 return memblock
.current_limit
;
2116 static void __init_memblock
memblock_dump(struct memblock_type
*type
)
2118 phys_addr_t base
, end
, size
;
2119 enum memblock_flags flags
;
2121 struct memblock_region
*rgn
;
2123 pr_info(" %s.cnt = 0x%lx\n", type
->name
, type
->cnt
);
2125 for_each_memblock_type(idx
, type
, rgn
) {
2126 char nid_buf
[32] = "";
2130 end
= base
+ size
- 1;
2133 if (numa_valid_node(memblock_get_region_node(rgn
)))
2134 snprintf(nid_buf
, sizeof(nid_buf
), " on node %d",
2135 memblock_get_region_node(rgn
));
2137 pr_info(" %s[%#x]\t[%pa-%pa], %pa bytes%s flags: %#x\n",
2138 type
->name
, idx
, &base
, &end
, &size
, nid_buf
, flags
);
2142 static void __init_memblock
__memblock_dump_all(void)
2144 pr_info("MEMBLOCK configuration:\n");
2145 pr_info(" memory size = %pa reserved size = %pa\n",
2146 &memblock
.memory
.total_size
,
2147 &memblock
.reserved
.total_size
);
2149 memblock_dump(&memblock
.memory
);
2150 memblock_dump(&memblock
.reserved
);
2151 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
2152 memblock_dump(&physmem
);
2156 void __init_memblock
memblock_dump_all(void)
2159 __memblock_dump_all();
2162 void __init
memblock_allow_resize(void)
2164 memblock_can_resize
= 1;
2167 static int __init
early_memblock(char *p
)
2169 if (p
&& strstr(p
, "debug"))
2173 early_param("memblock", early_memblock
);
2175 static void __init
free_memmap(unsigned long start_pfn
, unsigned long end_pfn
)
2177 struct page
*start_pg
, *end_pg
;
2178 phys_addr_t pg
, pgend
;
2181 * Convert start_pfn/end_pfn to a struct page pointer.
2183 start_pg
= pfn_to_page(start_pfn
- 1) + 1;
2184 end_pg
= pfn_to_page(end_pfn
- 1) + 1;
2187 * Convert to physical addresses, and round start upwards and end
2190 pg
= PAGE_ALIGN(__pa(start_pg
));
2191 pgend
= PAGE_ALIGN_DOWN(__pa(end_pg
));
2194 * If there are free pages between these, free the section of the
2198 memblock_phys_free(pg
, pgend
- pg
);
2202 * The mem_map array can get very big. Free the unused area of the memory map.
2204 static void __init
free_unused_memmap(void)
2206 unsigned long start
, end
, prev_end
= 0;
2209 if (!IS_ENABLED(CONFIG_HAVE_ARCH_PFN_VALID
) ||
2210 IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP
))
2214 * This relies on each bank being in address order.
2215 * The banks are sorted previously in bootmem_init().
2217 for_each_mem_pfn_range(i
, MAX_NUMNODES
, &start
, &end
, NULL
) {
2218 #ifdef CONFIG_SPARSEMEM
2220 * Take care not to free memmap entries that don't exist
2221 * due to SPARSEMEM sections which aren't present.
2223 start
= min(start
, ALIGN(prev_end
, PAGES_PER_SECTION
));
2226 * Align down here since many operations in VM subsystem
2227 * presume that there are no holes in the memory map inside
2230 start
= pageblock_start_pfn(start
);
2233 * If we had a previous bank, and there is a space
2234 * between the current bank and the previous, free it.
2236 if (prev_end
&& prev_end
< start
)
2237 free_memmap(prev_end
, start
);
2240 * Align up here since many operations in VM subsystem
2241 * presume that there are no holes in the memory map inside
2244 prev_end
= pageblock_align(end
);
2247 #ifdef CONFIG_SPARSEMEM
2248 if (!IS_ALIGNED(prev_end
, PAGES_PER_SECTION
)) {
2249 prev_end
= pageblock_align(end
);
2250 free_memmap(prev_end
, ALIGN(prev_end
, PAGES_PER_SECTION
));
2255 static void __init
__free_pages_memory(unsigned long start
, unsigned long end
)
2259 while (start
< end
) {
2261 * Free the pages in the largest chunks alignment allows.
2263 * __ffs() behaviour is undefined for 0. start == 0 is
2264 * MAX_PAGE_ORDER-aligned, set order to MAX_PAGE_ORDER for
2268 order
= min_t(int, MAX_PAGE_ORDER
, __ffs(start
));
2270 order
= MAX_PAGE_ORDER
;
2272 while (start
+ (1UL << order
) > end
)
2275 memblock_free_pages(pfn_to_page(start
), start
, order
);
2277 start
+= (1UL << order
);
2281 static unsigned long __init
__free_memory_core(phys_addr_t start
,
2284 unsigned long start_pfn
= PFN_UP(start
);
2285 unsigned long end_pfn
= PFN_DOWN(end
);
2287 if (!IS_ENABLED(CONFIG_HIGHMEM
) && end_pfn
> max_low_pfn
)
2288 end_pfn
= max_low_pfn
;
2290 if (start_pfn
>= end_pfn
)
2293 __free_pages_memory(start_pfn
, end_pfn
);
2295 return end_pfn
- start_pfn
;
2298 static void __init
memmap_init_reserved_pages(void)
2300 struct memblock_region
*region
;
2301 phys_addr_t start
, end
;
2303 unsigned long max_reserved
;
2306 * set nid on all reserved pages and also treat struct
2307 * pages for the NOMAP regions as PageReserved
2310 max_reserved
= memblock
.reserved
.max
;
2311 for_each_mem_region(region
) {
2312 nid
= memblock_get_region_node(region
);
2313 start
= region
->base
;
2314 end
= start
+ region
->size
;
2316 if (memblock_is_nomap(region
))
2317 reserve_bootmem_region(start
, end
, nid
);
2319 memblock_set_node(start
, region
->size
, &memblock
.reserved
, nid
);
2322 * 'max' is changed means memblock.reserved has been doubled its
2323 * array, which may result a new reserved region before current
2324 * 'start'. Now we should repeat the procedure to set its node id.
2326 if (max_reserved
!= memblock
.reserved
.max
)
2330 * initialize struct pages for reserved regions that don't have
2331 * the MEMBLOCK_RSRV_NOINIT flag set
2333 for_each_reserved_mem_region(region
) {
2334 if (!memblock_is_reserved_noinit(region
)) {
2335 nid
= memblock_get_region_node(region
);
2336 start
= region
->base
;
2337 end
= start
+ region
->size
;
2339 if (!numa_valid_node(nid
))
2340 nid
= early_pfn_to_nid(PFN_DOWN(start
));
2342 reserve_bootmem_region(start
, end
, nid
);
2347 static unsigned long __init
free_low_memory_core_early(void)
2349 unsigned long count
= 0;
2350 phys_addr_t start
, end
;
2353 memblock_clear_hotplug(0, -1);
2355 memmap_init_reserved_pages();
2358 * We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id
2359 * because in some case like Node0 doesn't have RAM installed
2360 * low ram will be on Node1
2362 for_each_free_mem_range(i
, NUMA_NO_NODE
, MEMBLOCK_NONE
, &start
, &end
,
2364 count
+= __free_memory_core(start
, end
);
2369 static int reset_managed_pages_done __initdata
;
2371 static void __init
reset_node_managed_pages(pg_data_t
*pgdat
)
2375 for (z
= pgdat
->node_zones
; z
< pgdat
->node_zones
+ MAX_NR_ZONES
; z
++)
2376 atomic_long_set(&z
->managed_pages
, 0);
2379 void __init
reset_all_zones_managed_pages(void)
2381 struct pglist_data
*pgdat
;
2383 if (reset_managed_pages_done
)
2386 for_each_online_pgdat(pgdat
)
2387 reset_node_managed_pages(pgdat
);
2389 reset_managed_pages_done
= 1;
2393 * memblock_free_all - release free pages to the buddy allocator
2395 void __init
memblock_free_all(void)
2397 unsigned long pages
;
2399 free_unused_memmap();
2400 reset_all_zones_managed_pages();
2402 memblock_clear_kho_scratch_only();
2403 pages
= free_low_memory_core_early();
2404 totalram_pages_add(pages
);
2407 /* Keep a table to reserve named memory */
2408 #define RESERVE_MEM_MAX_ENTRIES 8
2409 #define RESERVE_MEM_NAME_SIZE 16
2410 struct reserve_mem_table
{
2411 char name
[RESERVE_MEM_NAME_SIZE
];
2415 static struct reserve_mem_table reserved_mem_table
[RESERVE_MEM_MAX_ENTRIES
];
2416 static int reserved_mem_count
;
2417 static DEFINE_MUTEX(reserve_mem_lock
);
2419 /* Add wildcard region with a lookup name */
2420 static void __init
reserved_mem_add(phys_addr_t start
, phys_addr_t size
,
2423 struct reserve_mem_table
*map
;
2425 map
= &reserved_mem_table
[reserved_mem_count
++];
2428 strscpy(map
->name
, name
);
2431 static struct reserve_mem_table
*reserve_mem_find_by_name_nolock(const char *name
)
2433 struct reserve_mem_table
*map
;
2436 for (i
= 0; i
< reserved_mem_count
; i
++) {
2437 map
= &reserved_mem_table
[i
];
2440 if (strcmp(name
, map
->name
) == 0)
2447 * reserve_mem_find_by_name - Find reserved memory region with a given name
2448 * @name: The name that is attached to a reserved memory region
2449 * @start: If found, holds the start address
2450 * @size: If found, holds the size of the address.
2452 * @start and @size are only updated if @name is found.
2454 * Returns: 1 if found or 0 if not found.
2456 int reserve_mem_find_by_name(const char *name
, phys_addr_t
*start
, phys_addr_t
*size
)
2458 struct reserve_mem_table
*map
;
2460 guard(mutex
)(&reserve_mem_lock
);
2461 map
= reserve_mem_find_by_name_nolock(name
);
2465 *start
= map
->start
;
2469 EXPORT_SYMBOL_GPL(reserve_mem_find_by_name
);
2472 * reserve_mem_release_by_name - Release reserved memory region with a given name
2473 * @name: The name that is attatched to a reserved memory region
2475 * Forcibly release the pages in the reserved memory region so that those memory
2476 * can be used as free memory. After released the reserved region size becomes 0.
2478 * Returns: 1 if released or 0 if not found.
2480 int reserve_mem_release_by_name(const char *name
)
2482 char buf
[RESERVE_MEM_NAME_SIZE
+ 12];
2483 struct reserve_mem_table
*map
;
2486 guard(mutex
)(&reserve_mem_lock
);
2487 map
= reserve_mem_find_by_name_nolock(name
);
2491 start
= phys_to_virt(map
->start
);
2492 end
= start
+ map
->size
- 1;
2493 snprintf(buf
, sizeof(buf
), "reserve_mem:%s", name
);
2494 free_reserved_area(start
, end
, 0, buf
);
2500 #ifdef CONFIG_KEXEC_HANDOVER
2501 #define MEMBLOCK_KHO_FDT "memblock"
2502 #define MEMBLOCK_KHO_NODE_COMPATIBLE "memblock-v1"
2503 #define RESERVE_MEM_KHO_NODE_COMPATIBLE "reserve-mem-v1"
2504 static struct page
*kho_fdt
;
2506 static int reserve_mem_kho_finalize(struct kho_serialization
*ser
)
2510 for (i
= 0; i
< reserved_mem_count
; i
++) {
2511 struct reserve_mem_table
*map
= &reserved_mem_table
[i
];
2513 err
|= kho_preserve_phys(map
->start
, map
->size
);
2516 err
|= kho_preserve_folio(page_folio(kho_fdt
));
2517 err
|= kho_add_subtree(ser
, MEMBLOCK_KHO_FDT
, page_to_virt(kho_fdt
));
2519 return notifier_from_errno(err
);
2522 static int reserve_mem_kho_notifier(struct notifier_block
*self
,
2523 unsigned long cmd
, void *v
)
2526 case KEXEC_KHO_FINALIZE
:
2527 return reserve_mem_kho_finalize((struct kho_serialization
*)v
);
2528 case KEXEC_KHO_ABORT
:
2535 static struct notifier_block reserve_mem_kho_nb
= {
2536 .notifier_call
= reserve_mem_kho_notifier
,
2539 static int __init
prepare_kho_fdt(void)
2544 kho_fdt
= alloc_page(GFP_KERNEL
);
2548 fdt
= page_to_virt(kho_fdt
);
2550 err
|= fdt_create(fdt
, PAGE_SIZE
);
2551 err
|= fdt_finish_reservemap(fdt
);
2553 err
|= fdt_begin_node(fdt
, "");
2554 err
|= fdt_property_string(fdt
, "compatible", MEMBLOCK_KHO_NODE_COMPATIBLE
);
2555 for (i
= 0; i
< reserved_mem_count
; i
++) {
2556 struct reserve_mem_table
*map
= &reserved_mem_table
[i
];
2558 err
|= fdt_begin_node(fdt
, map
->name
);
2559 err
|= fdt_property_string(fdt
, "compatible", RESERVE_MEM_KHO_NODE_COMPATIBLE
);
2560 err
|= fdt_property(fdt
, "start", &map
->start
, sizeof(map
->start
));
2561 err
|= fdt_property(fdt
, "size", &map
->size
, sizeof(map
->size
));
2562 err
|= fdt_end_node(fdt
);
2564 err
|= fdt_end_node(fdt
);
2566 err
|= fdt_finish(fdt
);
2569 pr_err("failed to prepare memblock FDT for KHO: %d\n", err
);
2577 static int __init
reserve_mem_init(void)
2581 if (!kho_is_enabled() || !reserved_mem_count
)
2584 err
= prepare_kho_fdt();
2588 err
= register_kho_notifier(&reserve_mem_kho_nb
);
2596 late_initcall(reserve_mem_init
);
2598 static void *__init
reserve_mem_kho_retrieve_fdt(void)
2600 phys_addr_t fdt_phys
;
2607 err
= kho_retrieve_subtree(MEMBLOCK_KHO_FDT
, &fdt_phys
);
2610 pr_warn("failed to retrieve FDT '%s' from KHO: %d\n",
2611 MEMBLOCK_KHO_FDT
, err
);
2615 fdt
= phys_to_virt(fdt_phys
);
2617 err
= fdt_node_check_compatible(fdt
, 0, MEMBLOCK_KHO_NODE_COMPATIBLE
);
2619 pr_warn("FDT '%s' is incompatible with '%s': %d\n",
2620 MEMBLOCK_KHO_FDT
, MEMBLOCK_KHO_NODE_COMPATIBLE
, err
);
2627 static bool __init
reserve_mem_kho_revive(const char *name
, phys_addr_t size
,
2630 int err
, len_start
, len_size
, offset
;
2631 const phys_addr_t
*p_start
, *p_size
;
2634 fdt
= reserve_mem_kho_retrieve_fdt();
2638 offset
= fdt_subnode_offset(fdt
, 0, name
);
2640 pr_warn("FDT '%s' has no child '%s': %d\n",
2641 MEMBLOCK_KHO_FDT
, name
, offset
);
2644 err
= fdt_node_check_compatible(fdt
, offset
, RESERVE_MEM_KHO_NODE_COMPATIBLE
);
2646 pr_warn("Node '%s' is incompatible with '%s': %d\n",
2647 name
, RESERVE_MEM_KHO_NODE_COMPATIBLE
, err
);
2651 p_start
= fdt_getprop(fdt
, offset
, "start", &len_start
);
2652 p_size
= fdt_getprop(fdt
, offset
, "size", &len_size
);
2653 if (!p_start
|| len_start
!= sizeof(*p_start
) || !p_size
||
2654 len_size
!= sizeof(*p_size
)) {
2658 if (*p_start
& (align
- 1)) {
2659 pr_warn("KHO reserve-mem '%s' has wrong alignment (0x%lx, 0x%lx)\n",
2660 name
, (long)align
, (long)*p_start
);
2664 if (*p_size
!= size
) {
2665 pr_warn("KHO reserve-mem '%s' has wrong size (0x%lx != 0x%lx)\n",
2666 name
, (long)*p_size
, (long)size
);
2670 reserved_mem_add(*p_start
, size
, name
);
2671 pr_info("Revived memory reservation '%s' from KHO\n", name
);
2676 static bool __init
reserve_mem_kho_revive(const char *name
, phys_addr_t size
,
2681 #endif /* CONFIG_KEXEC_HANDOVER */
2684 * Parse reserve_mem=nn:align:name
2686 static int __init
reserve_mem(char *p
)
2688 phys_addr_t start
, size
, align
, tmp
;
2696 /* Check if there's room for more reserved memory */
2697 if (reserved_mem_count
>= RESERVE_MEM_MAX_ENTRIES
)
2701 size
= memparse(p
, &p
);
2702 if (!size
|| p
== oldp
)
2708 align
= memparse(p
+1, &p
);
2713 * memblock_phys_alloc() doesn't like a zero size align,
2714 * but it is OK for this command to have it.
2716 if (align
< SMP_CACHE_BYTES
)
2717 align
= SMP_CACHE_BYTES
;
2722 /* name needs to have length but not too big */
2723 if (!len
|| len
>= RESERVE_MEM_NAME_SIZE
)
2726 /* Make sure that name has text */
2727 for (p
= name
; *p
; p
++) {
2734 /* Make sure the name is not already used */
2735 if (reserve_mem_find_by_name(name
, &start
, &tmp
))
2738 /* Pick previous allocations up from KHO if available */
2739 if (reserve_mem_kho_revive(name
, size
, align
))
2742 /* TODO: Allocation must be outside of scratch region */
2743 start
= memblock_phys_alloc(size
, align
);
2747 reserved_mem_add(start
, size
, name
);
2751 __setup("reserve_mem=", reserve_mem
);
2753 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_ARCH_KEEP_MEMBLOCK)
2754 static const char * const flagname
[] = {
2755 [ilog2(MEMBLOCK_HOTPLUG
)] = "HOTPLUG",
2756 [ilog2(MEMBLOCK_MIRROR
)] = "MIRROR",
2757 [ilog2(MEMBLOCK_NOMAP
)] = "NOMAP",
2758 [ilog2(MEMBLOCK_DRIVER_MANAGED
)] = "DRV_MNG",
2759 [ilog2(MEMBLOCK_RSRV_NOINIT
)] = "RSV_NIT",
2760 [ilog2(MEMBLOCK_RSRV_KERN
)] = "RSV_KERN",
2761 [ilog2(MEMBLOCK_KHO_SCRATCH
)] = "KHO_SCRATCH",
2764 static int memblock_debug_show(struct seq_file
*m
, void *private)
2766 struct memblock_type
*type
= m
->private;
2767 struct memblock_region
*reg
;
2769 unsigned int count
= ARRAY_SIZE(flagname
);
2772 for (i
= 0; i
< type
->cnt
; i
++) {
2773 reg
= &type
->regions
[i
];
2774 end
= reg
->base
+ reg
->size
- 1;
2775 nid
= memblock_get_region_node(reg
);
2777 seq_printf(m
, "%4d: ", i
);
2778 seq_printf(m
, "%pa..%pa ", ®
->base
, &end
);
2779 if (numa_valid_node(nid
))
2780 seq_printf(m
, "%4d ", nid
);
2782 seq_printf(m
, "%4c ", 'x');
2784 for (j
= 0; j
< count
; j
++) {
2785 if (reg
->flags
& (1U << j
)) {
2786 seq_printf(m
, "%s\n", flagname
[j
]);
2791 seq_printf(m
, "%s\n", "UNKNOWN");
2793 seq_printf(m
, "%s\n", "NONE");
2798 DEFINE_SHOW_ATTRIBUTE(memblock_debug
);
2800 static int __init
memblock_init_debugfs(void)
2802 struct dentry
*root
= debugfs_create_dir("memblock", NULL
);
2804 debugfs_create_file("memory", 0444, root
,
2805 &memblock
.memory
, &memblock_debug_fops
);
2806 debugfs_create_file("reserved", 0444, root
,
2807 &memblock
.reserved
, &memblock_debug_fops
);
2808 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
2809 debugfs_create_file("physmem", 0444, root
, &physmem
,
2810 &memblock_debug_fops
);
2815 __initcall(memblock_init_debugfs
);
2817 #endif /* CONFIG_DEBUG_FS */