1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Procedures for maintaining information about logical memory blocks.
5 * Peter Bergner, IBM Corp. June 2001.
6 * Copyright (C) 2001 Peter Bergner.
9 #include <linux/kernel.h>
10 #include <linux/slab.h>
11 #include <linux/init.h>
12 #include <linux/bitops.h>
13 #include <linux/poison.h>
14 #include <linux/pfn.h>
15 #include <linux/debugfs.h>
16 #include <linux/kmemleak.h>
17 #include <linux/seq_file.h>
18 #include <linux/memblock.h>
20 #include <asm/sections.h>
25 #define INIT_MEMBLOCK_REGIONS 128
26 #define INIT_PHYSMEM_REGIONS 4
28 #ifndef INIT_MEMBLOCK_RESERVED_REGIONS
29 # define INIT_MEMBLOCK_RESERVED_REGIONS INIT_MEMBLOCK_REGIONS
32 #ifndef INIT_MEMBLOCK_MEMORY_REGIONS
33 #define INIT_MEMBLOCK_MEMORY_REGIONS INIT_MEMBLOCK_REGIONS
37 * DOC: memblock overview
39 * Memblock is a method of managing memory regions during the early
40 * boot period when the usual kernel memory allocators are not up and
43 * Memblock views the system memory as collections of contiguous
44 * regions. There are several types of these collections:
46 * * ``memory`` - describes the physical memory available to the
47 * kernel; this may differ from the actual physical memory installed
48 * in the system, for instance when the memory is restricted with
49 * ``mem=`` command line parameter
50 * * ``reserved`` - describes the regions that were allocated
51 * * ``physmem`` - describes the actual physical memory available during
52 * boot regardless of the possible restrictions and memory hot(un)plug;
53 * the ``physmem`` type is only available on some architectures.
55 * Each region is represented by struct memblock_region that
56 * defines the region extents, its attributes and NUMA node id on NUMA
57 * systems. Every memory type is described by the struct memblock_type
58 * which contains an array of memory regions along with
59 * the allocator metadata. The "memory" and "reserved" types are nicely
60 * wrapped with struct memblock. This structure is statically
61 * initialized at build time. The region arrays are initially sized to
62 * %INIT_MEMBLOCK_MEMORY_REGIONS for "memory" and
63 * %INIT_MEMBLOCK_RESERVED_REGIONS for "reserved". The region array
64 * for "physmem" is initially sized to %INIT_PHYSMEM_REGIONS.
65 * The memblock_allow_resize() enables automatic resizing of the region
66 * arrays during addition of new regions. This feature should be used
67 * with care so that memory allocated for the region array will not
68 * overlap with areas that should be reserved, for example initrd.
70 * The early architecture setup should tell memblock what the physical
71 * memory layout is by using memblock_add() or memblock_add_node()
72 * functions. The first function does not assign the region to a NUMA
73 * node and it is appropriate for UMA systems. Yet, it is possible to
74 * use it on NUMA systems as well and assign the region to a NUMA node
75 * later in the setup process using memblock_set_node(). The
76 * memblock_add_node() performs such an assignment directly.
78 * Once memblock is setup the memory can be allocated using one of the
81 * * memblock_phys_alloc*() - these functions return the **physical**
82 * address of the allocated memory
83 * * memblock_alloc*() - these functions return the **virtual** address
84 * of the allocated memory.
86 * Note, that both API variants use implicit assumptions about allowed
87 * memory ranges and the fallback methods. Consult the documentation
88 * of memblock_alloc_internal() and memblock_alloc_range_nid()
89 * functions for more elaborate description.
91 * As the system boot progresses, the architecture specific mem_init()
92 * function frees all the memory to the buddy page allocator.
94 * Unless an architecture enables %CONFIG_ARCH_KEEP_MEMBLOCK, the
95 * memblock data structures (except "physmem") will be discarded after the
96 * system initialization completes.
100 struct pglist_data __refdata contig_page_data
;
101 EXPORT_SYMBOL(contig_page_data
);
104 unsigned long max_low_pfn
;
105 unsigned long min_low_pfn
;
106 unsigned long max_pfn
;
107 unsigned long long max_possible_pfn
;
109 static struct memblock_region memblock_memory_init_regions
[INIT_MEMBLOCK_MEMORY_REGIONS
] __initdata_memblock
;
110 static struct memblock_region memblock_reserved_init_regions
[INIT_MEMBLOCK_RESERVED_REGIONS
] __initdata_memblock
;
111 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
112 static struct memblock_region memblock_physmem_init_regions
[INIT_PHYSMEM_REGIONS
];
115 struct memblock memblock __initdata_memblock
= {
116 .memory
.regions
= memblock_memory_init_regions
,
117 .memory
.cnt
= 1, /* empty dummy entry */
118 .memory
.max
= INIT_MEMBLOCK_MEMORY_REGIONS
,
119 .memory
.name
= "memory",
121 .reserved
.regions
= memblock_reserved_init_regions
,
122 .reserved
.cnt
= 1, /* empty dummy entry */
123 .reserved
.max
= INIT_MEMBLOCK_RESERVED_REGIONS
,
124 .reserved
.name
= "reserved",
127 .current_limit
= MEMBLOCK_ALLOC_ANYWHERE
,
130 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
131 struct memblock_type physmem
= {
132 .regions
= memblock_physmem_init_regions
,
133 .cnt
= 1, /* empty dummy entry */
134 .max
= INIT_PHYSMEM_REGIONS
,
140 * keep a pointer to &memblock.memory in the text section to use it in
141 * __next_mem_range() and its helpers.
142 * For architectures that do not keep memblock data after init, this
143 * pointer will be reset to NULL at memblock_discard()
145 static __refdata
struct memblock_type
*memblock_memory
= &memblock
.memory
;
147 #define for_each_memblock_type(i, memblock_type, rgn) \
148 for (i = 0, rgn = &memblock_type->regions[0]; \
149 i < memblock_type->cnt; \
150 i++, rgn = &memblock_type->regions[i])
152 #define memblock_dbg(fmt, ...) \
154 if (memblock_debug) \
155 pr_info(fmt, ##__VA_ARGS__); \
158 static int memblock_debug __initdata_memblock
;
159 static bool system_has_some_mirror __initdata_memblock
;
160 static int memblock_can_resize __initdata_memblock
;
161 static int memblock_memory_in_slab __initdata_memblock
;
162 static int memblock_reserved_in_slab __initdata_memblock
;
164 bool __init_memblock
memblock_has_mirror(void)
166 return system_has_some_mirror
;
169 static enum memblock_flags __init_memblock
choose_memblock_flags(void)
171 return system_has_some_mirror
? MEMBLOCK_MIRROR
: MEMBLOCK_NONE
;
174 /* adjust *@size so that (@base + *@size) doesn't overflow, return new size */
175 static inline phys_addr_t
memblock_cap_size(phys_addr_t base
, phys_addr_t
*size
)
177 return *size
= min(*size
, PHYS_ADDR_MAX
- base
);
181 * Address comparison utilities
183 unsigned long __init_memblock
184 memblock_addrs_overlap(phys_addr_t base1
, phys_addr_t size1
, phys_addr_t base2
,
187 return ((base1
< (base2
+ size2
)) && (base2
< (base1
+ size1
)));
190 bool __init_memblock
memblock_overlaps_region(struct memblock_type
*type
,
191 phys_addr_t base
, phys_addr_t size
)
195 memblock_cap_size(base
, &size
);
197 for (i
= 0; i
< type
->cnt
; i
++)
198 if (memblock_addrs_overlap(base
, size
, type
->regions
[i
].base
,
199 type
->regions
[i
].size
))
201 return i
< type
->cnt
;
205 * __memblock_find_range_bottom_up - find free area utility in bottom-up
206 * @start: start of candidate range
207 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
208 * %MEMBLOCK_ALLOC_ACCESSIBLE
209 * @size: size of free area to find
210 * @align: alignment of free area to find
211 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
212 * @flags: pick from blocks based on memory attributes
214 * Utility called from memblock_find_in_range_node(), find free area bottom-up.
217 * Found address on success, 0 on failure.
219 static phys_addr_t __init_memblock
220 __memblock_find_range_bottom_up(phys_addr_t start
, phys_addr_t end
,
221 phys_addr_t size
, phys_addr_t align
, int nid
,
222 enum memblock_flags flags
)
224 phys_addr_t this_start
, this_end
, cand
;
227 for_each_free_mem_range(i
, nid
, flags
, &this_start
, &this_end
, NULL
) {
228 this_start
= clamp(this_start
, start
, end
);
229 this_end
= clamp(this_end
, start
, end
);
231 cand
= round_up(this_start
, align
);
232 if (cand
< this_end
&& this_end
- cand
>= size
)
240 * __memblock_find_range_top_down - find free area utility, in top-down
241 * @start: start of candidate range
242 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
243 * %MEMBLOCK_ALLOC_ACCESSIBLE
244 * @size: size of free area to find
245 * @align: alignment of free area to find
246 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
247 * @flags: pick from blocks based on memory attributes
249 * Utility called from memblock_find_in_range_node(), find free area top-down.
252 * Found address on success, 0 on failure.
254 static phys_addr_t __init_memblock
255 __memblock_find_range_top_down(phys_addr_t start
, phys_addr_t end
,
256 phys_addr_t size
, phys_addr_t align
, int nid
,
257 enum memblock_flags flags
)
259 phys_addr_t this_start
, this_end
, cand
;
262 for_each_free_mem_range_reverse(i
, nid
, flags
, &this_start
, &this_end
,
264 this_start
= clamp(this_start
, start
, end
);
265 this_end
= clamp(this_end
, start
, end
);
270 cand
= round_down(this_end
- size
, align
);
271 if (cand
>= this_start
)
279 * memblock_find_in_range_node - find free area in given range and node
280 * @size: size of free area to find
281 * @align: alignment of free area to find
282 * @start: start of candidate range
283 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
284 * %MEMBLOCK_ALLOC_ACCESSIBLE
285 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
286 * @flags: pick from blocks based on memory attributes
288 * Find @size free area aligned to @align in the specified range and node.
291 * Found address on success, 0 on failure.
293 static phys_addr_t __init_memblock
memblock_find_in_range_node(phys_addr_t size
,
294 phys_addr_t align
, phys_addr_t start
,
295 phys_addr_t end
, int nid
,
296 enum memblock_flags flags
)
299 if (end
== MEMBLOCK_ALLOC_ACCESSIBLE
||
300 end
== MEMBLOCK_ALLOC_NOLEAKTRACE
)
301 end
= memblock
.current_limit
;
303 /* avoid allocating the first page */
304 start
= max_t(phys_addr_t
, start
, PAGE_SIZE
);
305 end
= max(start
, end
);
307 if (memblock_bottom_up())
308 return __memblock_find_range_bottom_up(start
, end
, size
, align
,
311 return __memblock_find_range_top_down(start
, end
, size
, align
,
316 * memblock_find_in_range - find free area in given range
317 * @start: start of candidate range
318 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
319 * %MEMBLOCK_ALLOC_ACCESSIBLE
320 * @size: size of free area to find
321 * @align: alignment of free area to find
323 * Find @size free area aligned to @align in the specified range.
326 * Found address on success, 0 on failure.
328 static phys_addr_t __init_memblock
memblock_find_in_range(phys_addr_t start
,
329 phys_addr_t end
, phys_addr_t size
,
333 enum memblock_flags flags
= choose_memblock_flags();
336 ret
= memblock_find_in_range_node(size
, align
, start
, end
,
337 NUMA_NO_NODE
, flags
);
339 if (!ret
&& (flags
& MEMBLOCK_MIRROR
)) {
340 pr_warn_ratelimited("Could not allocate %pap bytes of mirrored memory\n",
342 flags
&= ~MEMBLOCK_MIRROR
;
349 static void __init_memblock
memblock_remove_region(struct memblock_type
*type
, unsigned long r
)
351 type
->total_size
-= type
->regions
[r
].size
;
352 memmove(&type
->regions
[r
], &type
->regions
[r
+ 1],
353 (type
->cnt
- (r
+ 1)) * sizeof(type
->regions
[r
]));
356 /* Special case for empty arrays */
357 if (type
->cnt
== 0) {
358 WARN_ON(type
->total_size
!= 0);
360 type
->regions
[0].base
= 0;
361 type
->regions
[0].size
= 0;
362 type
->regions
[0].flags
= 0;
363 memblock_set_region_node(&type
->regions
[0], MAX_NUMNODES
);
367 #ifndef CONFIG_ARCH_KEEP_MEMBLOCK
369 * memblock_discard - discard memory and reserved arrays if they were allocated
371 void __init
memblock_discard(void)
373 phys_addr_t addr
, size
;
375 if (memblock
.reserved
.regions
!= memblock_reserved_init_regions
) {
376 addr
= __pa(memblock
.reserved
.regions
);
377 size
= PAGE_ALIGN(sizeof(struct memblock_region
) *
378 memblock
.reserved
.max
);
379 if (memblock_reserved_in_slab
)
380 kfree(memblock
.reserved
.regions
);
382 memblock_free_late(addr
, size
);
385 if (memblock
.memory
.regions
!= memblock_memory_init_regions
) {
386 addr
= __pa(memblock
.memory
.regions
);
387 size
= PAGE_ALIGN(sizeof(struct memblock_region
) *
388 memblock
.memory
.max
);
389 if (memblock_memory_in_slab
)
390 kfree(memblock
.memory
.regions
);
392 memblock_free_late(addr
, size
);
395 memblock_memory
= NULL
;
400 * memblock_double_array - double the size of the memblock regions array
401 * @type: memblock type of the regions array being doubled
402 * @new_area_start: starting address of memory range to avoid overlap with
403 * @new_area_size: size of memory range to avoid overlap with
405 * Double the size of the @type regions array. If memblock is being used to
406 * allocate memory for a new reserved regions array and there is a previously
407 * allocated memory range [@new_area_start, @new_area_start + @new_area_size]
408 * waiting to be reserved, ensure the memory used by the new array does
412 * 0 on success, -1 on failure.
414 static int __init_memblock
memblock_double_array(struct memblock_type
*type
,
415 phys_addr_t new_area_start
,
416 phys_addr_t new_area_size
)
418 struct memblock_region
*new_array
, *old_array
;
419 phys_addr_t old_alloc_size
, new_alloc_size
;
420 phys_addr_t old_size
, new_size
, addr
, new_end
;
421 int use_slab
= slab_is_available();
424 /* We don't allow resizing until we know about the reserved regions
425 * of memory that aren't suitable for allocation
427 if (!memblock_can_resize
)
428 panic("memblock: cannot resize %s array\n", type
->name
);
430 /* Calculate new doubled size */
431 old_size
= type
->max
* sizeof(struct memblock_region
);
432 new_size
= old_size
<< 1;
434 * We need to allocated new one align to PAGE_SIZE,
435 * so we can free them completely later.
437 old_alloc_size
= PAGE_ALIGN(old_size
);
438 new_alloc_size
= PAGE_ALIGN(new_size
);
440 /* Retrieve the slab flag */
441 if (type
== &memblock
.memory
)
442 in_slab
= &memblock_memory_in_slab
;
444 in_slab
= &memblock_reserved_in_slab
;
446 /* Try to find some space for it */
448 new_array
= kmalloc(new_size
, GFP_KERNEL
);
449 addr
= new_array
? __pa(new_array
) : 0;
451 /* only exclude range when trying to double reserved.regions */
452 if (type
!= &memblock
.reserved
)
453 new_area_start
= new_area_size
= 0;
455 addr
= memblock_find_in_range(new_area_start
+ new_area_size
,
456 memblock
.current_limit
,
457 new_alloc_size
, PAGE_SIZE
);
458 if (!addr
&& new_area_size
)
459 addr
= memblock_find_in_range(0,
460 min(new_area_start
, memblock
.current_limit
),
461 new_alloc_size
, PAGE_SIZE
);
463 new_array
= addr
? __va(addr
) : NULL
;
466 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
467 type
->name
, type
->max
, type
->max
* 2);
471 new_end
= addr
+ new_size
- 1;
472 memblock_dbg("memblock: %s is doubled to %ld at [%pa-%pa]",
473 type
->name
, type
->max
* 2, &addr
, &new_end
);
476 * Found space, we now need to move the array over before we add the
477 * reserved region since it may be our reserved array itself that is
480 memcpy(new_array
, type
->regions
, old_size
);
481 memset(new_array
+ type
->max
, 0, old_size
);
482 old_array
= type
->regions
;
483 type
->regions
= new_array
;
486 /* Free old array. We needn't free it if the array is the static one */
489 else if (old_array
!= memblock_memory_init_regions
&&
490 old_array
!= memblock_reserved_init_regions
)
491 memblock_free(old_array
, old_alloc_size
);
494 * Reserve the new array if that comes from the memblock. Otherwise, we
498 BUG_ON(memblock_reserve(addr
, new_alloc_size
));
500 /* Update slab flag */
507 * memblock_merge_regions - merge neighboring compatible regions
508 * @type: memblock type to scan
509 * @start_rgn: start scanning from (@start_rgn - 1)
510 * @end_rgn: end scanning at (@end_rgn - 1)
511 * Scan @type and merge neighboring compatible regions in [@start_rgn - 1, @end_rgn)
513 static void __init_memblock
memblock_merge_regions(struct memblock_type
*type
,
514 unsigned long start_rgn
,
515 unsigned long end_rgn
)
520 end_rgn
= min(end_rgn
, type
->cnt
- 1);
521 while (i
< end_rgn
) {
522 struct memblock_region
*this = &type
->regions
[i
];
523 struct memblock_region
*next
= &type
->regions
[i
+ 1];
525 if (this->base
+ this->size
!= next
->base
||
526 memblock_get_region_node(this) !=
527 memblock_get_region_node(next
) ||
528 this->flags
!= next
->flags
) {
529 BUG_ON(this->base
+ this->size
> next
->base
);
534 this->size
+= next
->size
;
535 /* move forward from next + 1, index of which is i + 2 */
536 memmove(next
, next
+ 1, (type
->cnt
- (i
+ 2)) * sizeof(*next
));
543 * memblock_insert_region - insert new memblock region
544 * @type: memblock type to insert into
545 * @idx: index for the insertion point
546 * @base: base address of the new region
547 * @size: size of the new region
548 * @nid: node id of the new region
549 * @flags: flags of the new region
551 * Insert new memblock region [@base, @base + @size) into @type at @idx.
552 * @type must already have extra room to accommodate the new region.
554 static void __init_memblock
memblock_insert_region(struct memblock_type
*type
,
555 int idx
, phys_addr_t base
,
558 enum memblock_flags flags
)
560 struct memblock_region
*rgn
= &type
->regions
[idx
];
562 BUG_ON(type
->cnt
>= type
->max
);
563 memmove(rgn
+ 1, rgn
, (type
->cnt
- idx
) * sizeof(*rgn
));
567 memblock_set_region_node(rgn
, nid
);
569 type
->total_size
+= size
;
573 * memblock_add_range - add new memblock region
574 * @type: memblock type to add new region into
575 * @base: base address of the new region
576 * @size: size of the new region
577 * @nid: nid of the new region
578 * @flags: flags of the new region
580 * Add new memblock region [@base, @base + @size) into @type. The new region
581 * is allowed to overlap with existing ones - overlaps don't affect already
582 * existing regions. @type is guaranteed to be minimal (all neighbouring
583 * compatible regions are merged) after the addition.
586 * 0 on success, -errno on failure.
588 static int __init_memblock
memblock_add_range(struct memblock_type
*type
,
589 phys_addr_t base
, phys_addr_t size
,
590 int nid
, enum memblock_flags flags
)
593 phys_addr_t obase
= base
;
594 phys_addr_t end
= base
+ memblock_cap_size(base
, &size
);
595 int idx
, nr_new
, start_rgn
= -1, end_rgn
;
596 struct memblock_region
*rgn
;
601 /* special case for empty array */
602 if (type
->regions
[0].size
== 0) {
603 WARN_ON(type
->cnt
!= 1 || type
->total_size
);
604 type
->regions
[0].base
= base
;
605 type
->regions
[0].size
= size
;
606 type
->regions
[0].flags
= flags
;
607 memblock_set_region_node(&type
->regions
[0], nid
);
608 type
->total_size
= size
;
613 * The worst case is when new range overlaps all existing regions,
614 * then we'll need type->cnt + 1 empty regions in @type. So if
615 * type->cnt * 2 + 1 is less than or equal to type->max, we know
616 * that there is enough empty regions in @type, and we can insert
619 if (type
->cnt
* 2 + 1 <= type
->max
)
624 * The following is executed twice. Once with %false @insert and
625 * then with %true. The first counts the number of regions needed
626 * to accommodate the new area. The second actually inserts them.
631 for_each_memblock_type(idx
, type
, rgn
) {
632 phys_addr_t rbase
= rgn
->base
;
633 phys_addr_t rend
= rbase
+ rgn
->size
;
640 * @rgn overlaps. If it separates the lower part of new
641 * area, insert that portion.
645 WARN_ON(nid
!= memblock_get_region_node(rgn
));
647 WARN_ON(flags
!= rgn
->flags
);
653 memblock_insert_region(type
, idx
++, base
,
658 /* area below @rend is dealt with, forget about it */
659 base
= min(rend
, end
);
662 /* insert the remaining portion */
669 memblock_insert_region(type
, idx
, base
, end
- base
,
678 * If this was the first round, resize array and repeat for actual
679 * insertions; otherwise, merge and return.
682 while (type
->cnt
+ nr_new
> type
->max
)
683 if (memblock_double_array(type
, obase
, size
) < 0)
688 memblock_merge_regions(type
, start_rgn
, end_rgn
);
694 * memblock_add_node - add new memblock region within a NUMA node
695 * @base: base address of the new region
696 * @size: size of the new region
697 * @nid: nid of the new region
698 * @flags: flags of the new region
700 * Add new memblock region [@base, @base + @size) to the "memory"
701 * type. See memblock_add_range() description for mode details
704 * 0 on success, -errno on failure.
706 int __init_memblock
memblock_add_node(phys_addr_t base
, phys_addr_t size
,
707 int nid
, enum memblock_flags flags
)
709 phys_addr_t end
= base
+ size
- 1;
711 memblock_dbg("%s: [%pa-%pa] nid=%d flags=%x %pS\n", __func__
,
712 &base
, &end
, nid
, flags
, (void *)_RET_IP_
);
714 return memblock_add_range(&memblock
.memory
, base
, size
, nid
, flags
);
718 * memblock_add - add new memblock region
719 * @base: base address of the new region
720 * @size: size of the new region
722 * Add new memblock region [@base, @base + @size) to the "memory"
723 * type. See memblock_add_range() description for mode details
726 * 0 on success, -errno on failure.
728 int __init_memblock
memblock_add(phys_addr_t base
, phys_addr_t size
)
730 phys_addr_t end
= base
+ size
- 1;
732 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__
,
733 &base
, &end
, (void *)_RET_IP_
);
735 return memblock_add_range(&memblock
.memory
, base
, size
, MAX_NUMNODES
, 0);
739 * memblock_validate_numa_coverage - check if amount of memory with
740 * no node ID assigned is less than a threshold
741 * @threshold_bytes: maximal number of pages that can have unassigned node
744 * A buggy firmware may report memory that does not belong to any node.
745 * Check if amount of such memory is below @threshold_bytes.
747 * Return: true on success, false on failure.
749 bool __init_memblock
memblock_validate_numa_coverage(unsigned long threshold_bytes
)
751 unsigned long nr_pages
= 0;
752 unsigned long start_pfn
, end_pfn
, mem_size_mb
;
755 /* calculate lose page */
756 for_each_mem_pfn_range(i
, MAX_NUMNODES
, &start_pfn
, &end_pfn
, &nid
) {
757 if (nid
== NUMA_NO_NODE
)
758 nr_pages
+= end_pfn
- start_pfn
;
761 if ((nr_pages
<< PAGE_SHIFT
) >= threshold_bytes
) {
762 mem_size_mb
= memblock_phys_mem_size() >> 20;
763 pr_err("NUMA: no nodes coverage for %luMB of %luMB RAM\n",
764 (nr_pages
<< PAGE_SHIFT
) >> 20, mem_size_mb
);
773 * memblock_isolate_range - isolate given range into disjoint memblocks
774 * @type: memblock type to isolate range for
775 * @base: base of range to isolate
776 * @size: size of range to isolate
777 * @start_rgn: out parameter for the start of isolated region
778 * @end_rgn: out parameter for the end of isolated region
780 * Walk @type and ensure that regions don't cross the boundaries defined by
781 * [@base, @base + @size). Crossing regions are split at the boundaries,
782 * which may create at most two more regions. The index of the first
783 * region inside the range is returned in *@start_rgn and end in *@end_rgn.
786 * 0 on success, -errno on failure.
788 static int __init_memblock
memblock_isolate_range(struct memblock_type
*type
,
789 phys_addr_t base
, phys_addr_t size
,
790 int *start_rgn
, int *end_rgn
)
792 phys_addr_t end
= base
+ memblock_cap_size(base
, &size
);
794 struct memblock_region
*rgn
;
796 *start_rgn
= *end_rgn
= 0;
801 /* we'll create at most two more regions */
802 while (type
->cnt
+ 2 > type
->max
)
803 if (memblock_double_array(type
, base
, size
) < 0)
806 for_each_memblock_type(idx
, type
, rgn
) {
807 phys_addr_t rbase
= rgn
->base
;
808 phys_addr_t rend
= rbase
+ rgn
->size
;
817 * @rgn intersects from below. Split and continue
818 * to process the next region - the new top half.
821 rgn
->size
-= base
- rbase
;
822 type
->total_size
-= base
- rbase
;
823 memblock_insert_region(type
, idx
, rbase
, base
- rbase
,
824 memblock_get_region_node(rgn
),
826 } else if (rend
> end
) {
828 * @rgn intersects from above. Split and redo the
829 * current region - the new bottom half.
832 rgn
->size
-= end
- rbase
;
833 type
->total_size
-= end
- rbase
;
834 memblock_insert_region(type
, idx
--, rbase
, end
- rbase
,
835 memblock_get_region_node(rgn
),
838 /* @rgn is fully contained, record it */
848 static int __init_memblock
memblock_remove_range(struct memblock_type
*type
,
849 phys_addr_t base
, phys_addr_t size
)
851 int start_rgn
, end_rgn
;
854 ret
= memblock_isolate_range(type
, base
, size
, &start_rgn
, &end_rgn
);
858 for (i
= end_rgn
- 1; i
>= start_rgn
; i
--)
859 memblock_remove_region(type
, i
);
863 int __init_memblock
memblock_remove(phys_addr_t base
, phys_addr_t size
)
865 phys_addr_t end
= base
+ size
- 1;
867 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__
,
868 &base
, &end
, (void *)_RET_IP_
);
870 return memblock_remove_range(&memblock
.memory
, base
, size
);
874 * memblock_free - free boot memory allocation
875 * @ptr: starting address of the boot memory allocation
876 * @size: size of the boot memory block in bytes
878 * Free boot memory block previously allocated by memblock_alloc_xx() API.
879 * The freeing memory will not be released to the buddy allocator.
881 void __init_memblock
memblock_free(void *ptr
, size_t size
)
884 memblock_phys_free(__pa(ptr
), size
);
888 * memblock_phys_free - free boot memory block
889 * @base: phys starting address of the boot memory block
890 * @size: size of the boot memory block in bytes
892 * Free boot memory block previously allocated by memblock_phys_alloc_xx() API.
893 * The freeing memory will not be released to the buddy allocator.
895 int __init_memblock
memblock_phys_free(phys_addr_t base
, phys_addr_t size
)
897 phys_addr_t end
= base
+ size
- 1;
899 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__
,
900 &base
, &end
, (void *)_RET_IP_
);
902 kmemleak_free_part_phys(base
, size
);
903 return memblock_remove_range(&memblock
.reserved
, base
, size
);
906 int __init_memblock
memblock_reserve(phys_addr_t base
, phys_addr_t size
)
908 phys_addr_t end
= base
+ size
- 1;
910 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__
,
911 &base
, &end
, (void *)_RET_IP_
);
913 return memblock_add_range(&memblock
.reserved
, base
, size
, MAX_NUMNODES
, 0);
916 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
917 int __init_memblock
memblock_physmem_add(phys_addr_t base
, phys_addr_t size
)
919 phys_addr_t end
= base
+ size
- 1;
921 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__
,
922 &base
, &end
, (void *)_RET_IP_
);
924 return memblock_add_range(&physmem
, base
, size
, MAX_NUMNODES
, 0);
929 * memblock_setclr_flag - set or clear flag for a memory region
930 * @type: memblock type to set/clear flag for
931 * @base: base address of the region
932 * @size: size of the region
933 * @set: set or clear the flag
934 * @flag: the flag to update
936 * This function isolates region [@base, @base + @size), and sets/clears flag
938 * Return: 0 on success, -errno on failure.
940 static int __init_memblock
memblock_setclr_flag(struct memblock_type
*type
,
941 phys_addr_t base
, phys_addr_t size
, int set
, int flag
)
943 int i
, ret
, start_rgn
, end_rgn
;
945 ret
= memblock_isolate_range(type
, base
, size
, &start_rgn
, &end_rgn
);
949 for (i
= start_rgn
; i
< end_rgn
; i
++) {
950 struct memblock_region
*r
= &type
->regions
[i
];
958 memblock_merge_regions(type
, start_rgn
, end_rgn
);
963 * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG.
964 * @base: the base phys addr of the region
965 * @size: the size of the region
967 * Return: 0 on success, -errno on failure.
969 int __init_memblock
memblock_mark_hotplug(phys_addr_t base
, phys_addr_t size
)
971 return memblock_setclr_flag(&memblock
.memory
, base
, size
, 1, MEMBLOCK_HOTPLUG
);
975 * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region.
976 * @base: the base phys addr of the region
977 * @size: the size of the region
979 * Return: 0 on success, -errno on failure.
981 int __init_memblock
memblock_clear_hotplug(phys_addr_t base
, phys_addr_t size
)
983 return memblock_setclr_flag(&memblock
.memory
, base
, size
, 0, MEMBLOCK_HOTPLUG
);
987 * memblock_mark_mirror - Mark mirrored memory with flag MEMBLOCK_MIRROR.
988 * @base: the base phys addr of the region
989 * @size: the size of the region
991 * Return: 0 on success, -errno on failure.
993 int __init_memblock
memblock_mark_mirror(phys_addr_t base
, phys_addr_t size
)
995 if (!mirrored_kernelcore
)
998 system_has_some_mirror
= true;
1000 return memblock_setclr_flag(&memblock
.memory
, base
, size
, 1, MEMBLOCK_MIRROR
);
1004 * memblock_mark_nomap - Mark a memory region with flag MEMBLOCK_NOMAP.
1005 * @base: the base phys addr of the region
1006 * @size: the size of the region
1008 * The memory regions marked with %MEMBLOCK_NOMAP will not be added to the
1009 * direct mapping of the physical memory. These regions will still be
1010 * covered by the memory map. The struct page representing NOMAP memory
1011 * frames in the memory map will be PageReserved()
1013 * Note: if the memory being marked %MEMBLOCK_NOMAP was allocated from
1014 * memblock, the caller must inform kmemleak to ignore that memory
1016 * Return: 0 on success, -errno on failure.
1018 int __init_memblock
memblock_mark_nomap(phys_addr_t base
, phys_addr_t size
)
1020 return memblock_setclr_flag(&memblock
.memory
, base
, size
, 1, MEMBLOCK_NOMAP
);
1024 * memblock_clear_nomap - Clear flag MEMBLOCK_NOMAP for a specified region.
1025 * @base: the base phys addr of the region
1026 * @size: the size of the region
1028 * Return: 0 on success, -errno on failure.
1030 int __init_memblock
memblock_clear_nomap(phys_addr_t base
, phys_addr_t size
)
1032 return memblock_setclr_flag(&memblock
.memory
, base
, size
, 0, MEMBLOCK_NOMAP
);
1036 * memblock_reserved_mark_noinit - Mark a reserved memory region with flag
1037 * MEMBLOCK_RSRV_NOINIT which results in the struct pages not being initialized
1039 * @base: the base phys addr of the region
1040 * @size: the size of the region
1042 * struct pages will not be initialized for reserved memory regions marked with
1043 * %MEMBLOCK_RSRV_NOINIT.
1045 * Return: 0 on success, -errno on failure.
1047 int __init_memblock
memblock_reserved_mark_noinit(phys_addr_t base
, phys_addr_t size
)
1049 return memblock_setclr_flag(&memblock
.reserved
, base
, size
, 1,
1050 MEMBLOCK_RSRV_NOINIT
);
1053 static bool should_skip_region(struct memblock_type
*type
,
1054 struct memblock_region
*m
,
1057 int m_nid
= memblock_get_region_node(m
);
1059 /* we never skip regions when iterating memblock.reserved or physmem */
1060 if (type
!= memblock_memory
)
1063 /* only memory regions are associated with nodes, check it */
1064 if (nid
!= NUMA_NO_NODE
&& nid
!= m_nid
)
1067 /* skip hotpluggable memory regions if needed */
1068 if (movable_node_is_enabled() && memblock_is_hotpluggable(m
) &&
1069 !(flags
& MEMBLOCK_HOTPLUG
))
1072 /* if we want mirror memory skip non-mirror memory regions */
1073 if ((flags
& MEMBLOCK_MIRROR
) && !memblock_is_mirror(m
))
1076 /* skip nomap memory unless we were asked for it explicitly */
1077 if (!(flags
& MEMBLOCK_NOMAP
) && memblock_is_nomap(m
))
1080 /* skip driver-managed memory unless we were asked for it explicitly */
1081 if (!(flags
& MEMBLOCK_DRIVER_MANAGED
) && memblock_is_driver_managed(m
))
1088 * __next_mem_range - next function for for_each_free_mem_range() etc.
1089 * @idx: pointer to u64 loop variable
1090 * @nid: node selector, %NUMA_NO_NODE for all nodes
1091 * @flags: pick from blocks based on memory attributes
1092 * @type_a: pointer to memblock_type from where the range is taken
1093 * @type_b: pointer to memblock_type which excludes memory from being taken
1094 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
1095 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
1096 * @out_nid: ptr to int for nid of the range, can be %NULL
1098 * Find the first area from *@idx which matches @nid, fill the out
1099 * parameters, and update *@idx for the next iteration. The lower 32bit of
1100 * *@idx contains index into type_a and the upper 32bit indexes the
1101 * areas before each region in type_b. For example, if type_b regions
1102 * look like the following,
1104 * 0:[0-16), 1:[32-48), 2:[128-130)
1106 * The upper 32bit indexes the following regions.
1108 * 0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX)
1110 * As both region arrays are sorted, the function advances the two indices
1111 * in lockstep and returns each intersection.
1113 void __next_mem_range(u64
*idx
, int nid
, enum memblock_flags flags
,
1114 struct memblock_type
*type_a
,
1115 struct memblock_type
*type_b
, phys_addr_t
*out_start
,
1116 phys_addr_t
*out_end
, int *out_nid
)
1118 int idx_a
= *idx
& 0xffffffff;
1119 int idx_b
= *idx
>> 32;
1121 if (WARN_ONCE(nid
== MAX_NUMNODES
,
1122 "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1125 for (; idx_a
< type_a
->cnt
; idx_a
++) {
1126 struct memblock_region
*m
= &type_a
->regions
[idx_a
];
1128 phys_addr_t m_start
= m
->base
;
1129 phys_addr_t m_end
= m
->base
+ m
->size
;
1130 int m_nid
= memblock_get_region_node(m
);
1132 if (should_skip_region(type_a
, m
, nid
, flags
))
1137 *out_start
= m_start
;
1143 *idx
= (u32
)idx_a
| (u64
)idx_b
<< 32;
1147 /* scan areas before each reservation */
1148 for (; idx_b
< type_b
->cnt
+ 1; idx_b
++) {
1149 struct memblock_region
*r
;
1150 phys_addr_t r_start
;
1153 r
= &type_b
->regions
[idx_b
];
1154 r_start
= idx_b
? r
[-1].base
+ r
[-1].size
: 0;
1155 r_end
= idx_b
< type_b
->cnt
?
1156 r
->base
: PHYS_ADDR_MAX
;
1159 * if idx_b advanced past idx_a,
1160 * break out to advance idx_a
1162 if (r_start
>= m_end
)
1164 /* if the two regions intersect, we're done */
1165 if (m_start
< r_end
) {
1168 max(m_start
, r_start
);
1170 *out_end
= min(m_end
, r_end
);
1174 * The region which ends first is
1175 * advanced for the next iteration.
1181 *idx
= (u32
)idx_a
| (u64
)idx_b
<< 32;
1187 /* signal end of iteration */
1192 * __next_mem_range_rev - generic next function for for_each_*_range_rev()
1194 * @idx: pointer to u64 loop variable
1195 * @nid: node selector, %NUMA_NO_NODE for all nodes
1196 * @flags: pick from blocks based on memory attributes
1197 * @type_a: pointer to memblock_type from where the range is taken
1198 * @type_b: pointer to memblock_type which excludes memory from being taken
1199 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
1200 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
1201 * @out_nid: ptr to int for nid of the range, can be %NULL
1203 * Finds the next range from type_a which is not marked as unsuitable
1206 * Reverse of __next_mem_range().
1208 void __init_memblock
__next_mem_range_rev(u64
*idx
, int nid
,
1209 enum memblock_flags flags
,
1210 struct memblock_type
*type_a
,
1211 struct memblock_type
*type_b
,
1212 phys_addr_t
*out_start
,
1213 phys_addr_t
*out_end
, int *out_nid
)
1215 int idx_a
= *idx
& 0xffffffff;
1216 int idx_b
= *idx
>> 32;
1218 if (WARN_ONCE(nid
== MAX_NUMNODES
, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1221 if (*idx
== (u64
)ULLONG_MAX
) {
1222 idx_a
= type_a
->cnt
- 1;
1224 idx_b
= type_b
->cnt
;
1229 for (; idx_a
>= 0; idx_a
--) {
1230 struct memblock_region
*m
= &type_a
->regions
[idx_a
];
1232 phys_addr_t m_start
= m
->base
;
1233 phys_addr_t m_end
= m
->base
+ m
->size
;
1234 int m_nid
= memblock_get_region_node(m
);
1236 if (should_skip_region(type_a
, m
, nid
, flags
))
1241 *out_start
= m_start
;
1247 *idx
= (u32
)idx_a
| (u64
)idx_b
<< 32;
1251 /* scan areas before each reservation */
1252 for (; idx_b
>= 0; idx_b
--) {
1253 struct memblock_region
*r
;
1254 phys_addr_t r_start
;
1257 r
= &type_b
->regions
[idx_b
];
1258 r_start
= idx_b
? r
[-1].base
+ r
[-1].size
: 0;
1259 r_end
= idx_b
< type_b
->cnt
?
1260 r
->base
: PHYS_ADDR_MAX
;
1262 * if idx_b advanced past idx_a,
1263 * break out to advance idx_a
1266 if (r_end
<= m_start
)
1268 /* if the two regions intersect, we're done */
1269 if (m_end
> r_start
) {
1271 *out_start
= max(m_start
, r_start
);
1273 *out_end
= min(m_end
, r_end
);
1276 if (m_start
>= r_start
)
1280 *idx
= (u32
)idx_a
| (u64
)idx_b
<< 32;
1285 /* signal end of iteration */
1290 * Common iterator interface used to define for_each_mem_pfn_range().
1292 void __init_memblock
__next_mem_pfn_range(int *idx
, int nid
,
1293 unsigned long *out_start_pfn
,
1294 unsigned long *out_end_pfn
, int *out_nid
)
1296 struct memblock_type
*type
= &memblock
.memory
;
1297 struct memblock_region
*r
;
1300 while (++*idx
< type
->cnt
) {
1301 r
= &type
->regions
[*idx
];
1302 r_nid
= memblock_get_region_node(r
);
1304 if (PFN_UP(r
->base
) >= PFN_DOWN(r
->base
+ r
->size
))
1306 if (nid
== MAX_NUMNODES
|| nid
== r_nid
)
1309 if (*idx
>= type
->cnt
) {
1315 *out_start_pfn
= PFN_UP(r
->base
);
1317 *out_end_pfn
= PFN_DOWN(r
->base
+ r
->size
);
1323 * memblock_set_node - set node ID on memblock regions
1324 * @base: base of area to set node ID for
1325 * @size: size of area to set node ID for
1326 * @type: memblock type to set node ID for
1327 * @nid: node ID to set
1329 * Set the nid of memblock @type regions in [@base, @base + @size) to @nid.
1330 * Regions which cross the area boundaries are split as necessary.
1333 * 0 on success, -errno on failure.
1335 int __init_memblock
memblock_set_node(phys_addr_t base
, phys_addr_t size
,
1336 struct memblock_type
*type
, int nid
)
1339 int start_rgn
, end_rgn
;
1342 ret
= memblock_isolate_range(type
, base
, size
, &start_rgn
, &end_rgn
);
1346 for (i
= start_rgn
; i
< end_rgn
; i
++)
1347 memblock_set_region_node(&type
->regions
[i
], nid
);
1349 memblock_merge_regions(type
, start_rgn
, end_rgn
);
1354 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1356 * __next_mem_pfn_range_in_zone - iterator for for_each_*_range_in_zone()
1358 * @idx: pointer to u64 loop variable
1359 * @zone: zone in which all of the memory blocks reside
1360 * @out_spfn: ptr to ulong for start pfn of the range, can be %NULL
1361 * @out_epfn: ptr to ulong for end pfn of the range, can be %NULL
1363 * This function is meant to be a zone/pfn specific wrapper for the
1364 * for_each_mem_range type iterators. Specifically they are used in the
1365 * deferred memory init routines and as such we were duplicating much of
1366 * this logic throughout the code. So instead of having it in multiple
1367 * locations it seemed like it would make more sense to centralize this to
1368 * one new iterator that does everything they need.
1370 void __init_memblock
1371 __next_mem_pfn_range_in_zone(u64
*idx
, struct zone
*zone
,
1372 unsigned long *out_spfn
, unsigned long *out_epfn
)
1374 int zone_nid
= zone_to_nid(zone
);
1375 phys_addr_t spa
, epa
;
1377 __next_mem_range(idx
, zone_nid
, MEMBLOCK_NONE
,
1378 &memblock
.memory
, &memblock
.reserved
,
1381 while (*idx
!= U64_MAX
) {
1382 unsigned long epfn
= PFN_DOWN(epa
);
1383 unsigned long spfn
= PFN_UP(spa
);
1386 * Verify the end is at least past the start of the zone and
1387 * that we have at least one PFN to initialize.
1389 if (zone
->zone_start_pfn
< epfn
&& spfn
< epfn
) {
1390 /* if we went too far just stop searching */
1391 if (zone_end_pfn(zone
) <= spfn
) {
1397 *out_spfn
= max(zone
->zone_start_pfn
, spfn
);
1399 *out_epfn
= min(zone_end_pfn(zone
), epfn
);
1404 __next_mem_range(idx
, zone_nid
, MEMBLOCK_NONE
,
1405 &memblock
.memory
, &memblock
.reserved
,
1409 /* signal end of iteration */
1411 *out_spfn
= ULONG_MAX
;
1416 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1419 * memblock_alloc_range_nid - allocate boot memory block
1420 * @size: size of memory block to be allocated in bytes
1421 * @align: alignment of the region and block's size
1422 * @start: the lower bound of the memory region to allocate (phys address)
1423 * @end: the upper bound of the memory region to allocate (phys address)
1424 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1425 * @exact_nid: control the allocation fall back to other nodes
1427 * The allocation is performed from memory region limited by
1428 * memblock.current_limit if @end == %MEMBLOCK_ALLOC_ACCESSIBLE.
1430 * If the specified node can not hold the requested memory and @exact_nid
1431 * is false, the allocation falls back to any node in the system.
1433 * For systems with memory mirroring, the allocation is attempted first
1434 * from the regions with mirroring enabled and then retried from any
1437 * In addition, function using kmemleak_alloc_phys for allocated boot
1438 * memory block, it is never reported as leaks.
1441 * Physical address of allocated memory block on success, %0 on failure.
1443 phys_addr_t __init
memblock_alloc_range_nid(phys_addr_t size
,
1444 phys_addr_t align
, phys_addr_t start
,
1445 phys_addr_t end
, int nid
,
1448 enum memblock_flags flags
= choose_memblock_flags();
1451 if (WARN_ONCE(nid
== MAX_NUMNODES
, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1455 /* Can't use WARNs this early in boot on powerpc */
1457 align
= SMP_CACHE_BYTES
;
1461 found
= memblock_find_in_range_node(size
, align
, start
, end
, nid
,
1463 if (found
&& !memblock_reserve(found
, size
))
1466 if (nid
!= NUMA_NO_NODE
&& !exact_nid
) {
1467 found
= memblock_find_in_range_node(size
, align
, start
,
1470 if (found
&& !memblock_reserve(found
, size
))
1474 if (flags
& MEMBLOCK_MIRROR
) {
1475 flags
&= ~MEMBLOCK_MIRROR
;
1476 pr_warn_ratelimited("Could not allocate %pap bytes of mirrored memory\n",
1485 * Skip kmemleak for those places like kasan_init() and
1486 * early_pgtable_alloc() due to high volume.
1488 if (end
!= MEMBLOCK_ALLOC_NOLEAKTRACE
)
1490 * Memblock allocated blocks are never reported as
1491 * leaks. This is because many of these blocks are
1492 * only referred via the physical address which is
1493 * not looked up by kmemleak.
1495 kmemleak_alloc_phys(found
, size
, 0);
1498 * Some Virtual Machine platforms, such as Intel TDX or AMD SEV-SNP,
1499 * require memory to be accepted before it can be used by the
1502 * Accept the memory of the allocated buffer.
1504 accept_memory(found
, found
+ size
);
1510 * memblock_phys_alloc_range - allocate a memory block inside specified range
1511 * @size: size of memory block to be allocated in bytes
1512 * @align: alignment of the region and block's size
1513 * @start: the lower bound of the memory region to allocate (physical address)
1514 * @end: the upper bound of the memory region to allocate (physical address)
1516 * Allocate @size bytes in the between @start and @end.
1518 * Return: physical address of the allocated memory block on success,
1521 phys_addr_t __init
memblock_phys_alloc_range(phys_addr_t size
,
1526 memblock_dbg("%s: %llu bytes align=0x%llx from=%pa max_addr=%pa %pS\n",
1527 __func__
, (u64
)size
, (u64
)align
, &start
, &end
,
1529 return memblock_alloc_range_nid(size
, align
, start
, end
, NUMA_NO_NODE
,
1534 * memblock_phys_alloc_try_nid - allocate a memory block from specified NUMA node
1535 * @size: size of memory block to be allocated in bytes
1536 * @align: alignment of the region and block's size
1537 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1539 * Allocates memory block from the specified NUMA node. If the node
1540 * has no available memory, attempts to allocated from any node in the
1543 * Return: physical address of the allocated memory block on success,
1546 phys_addr_t __init
memblock_phys_alloc_try_nid(phys_addr_t size
, phys_addr_t align
, int nid
)
1548 return memblock_alloc_range_nid(size
, align
, 0,
1549 MEMBLOCK_ALLOC_ACCESSIBLE
, nid
, false);
1553 * memblock_alloc_internal - allocate boot memory block
1554 * @size: size of memory block to be allocated in bytes
1555 * @align: alignment of the region and block's size
1556 * @min_addr: the lower bound of the memory region to allocate (phys address)
1557 * @max_addr: the upper bound of the memory region to allocate (phys address)
1558 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1559 * @exact_nid: control the allocation fall back to other nodes
1561 * Allocates memory block using memblock_alloc_range_nid() and
1562 * converts the returned physical address to virtual.
1564 * The @min_addr limit is dropped if it can not be satisfied and the allocation
1565 * will fall back to memory below @min_addr. Other constraints, such
1566 * as node and mirrored memory will be handled again in
1567 * memblock_alloc_range_nid().
1570 * Virtual address of allocated memory block on success, NULL on failure.
1572 static void * __init
memblock_alloc_internal(
1573 phys_addr_t size
, phys_addr_t align
,
1574 phys_addr_t min_addr
, phys_addr_t max_addr
,
1575 int nid
, bool exact_nid
)
1580 * Detect any accidental use of these APIs after slab is ready, as at
1581 * this moment memblock may be deinitialized already and its
1582 * internal data may be destroyed (after execution of memblock_free_all)
1584 if (WARN_ON_ONCE(slab_is_available()))
1585 return kzalloc_node(size
, GFP_NOWAIT
, nid
);
1587 if (max_addr
> memblock
.current_limit
)
1588 max_addr
= memblock
.current_limit
;
1590 alloc
= memblock_alloc_range_nid(size
, align
, min_addr
, max_addr
, nid
,
1593 /* retry allocation without lower limit */
1594 if (!alloc
&& min_addr
)
1595 alloc
= memblock_alloc_range_nid(size
, align
, 0, max_addr
, nid
,
1601 return phys_to_virt(alloc
);
1605 * memblock_alloc_exact_nid_raw - allocate boot memory block on the exact node
1606 * without zeroing memory
1607 * @size: size of memory block to be allocated in bytes
1608 * @align: alignment of the region and block's size
1609 * @min_addr: the lower bound of the memory region from where the allocation
1610 * is preferred (phys address)
1611 * @max_addr: the upper bound of the memory region from where the allocation
1612 * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
1613 * allocate only from memory limited by memblock.current_limit value
1614 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1616 * Public function, provides additional debug information (including caller
1617 * info), if enabled. Does not zero allocated memory.
1620 * Virtual address of allocated memory block on success, NULL on failure.
1622 void * __init
memblock_alloc_exact_nid_raw(
1623 phys_addr_t size
, phys_addr_t align
,
1624 phys_addr_t min_addr
, phys_addr_t max_addr
,
1627 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1628 __func__
, (u64
)size
, (u64
)align
, nid
, &min_addr
,
1629 &max_addr
, (void *)_RET_IP_
);
1631 return memblock_alloc_internal(size
, align
, min_addr
, max_addr
, nid
,
1636 * memblock_alloc_try_nid_raw - allocate boot memory block without zeroing
1637 * memory and without panicking
1638 * @size: size of memory block to be allocated in bytes
1639 * @align: alignment of the region and block's size
1640 * @min_addr: the lower bound of the memory region from where the allocation
1641 * is preferred (phys address)
1642 * @max_addr: the upper bound of the memory region from where the allocation
1643 * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
1644 * allocate only from memory limited by memblock.current_limit value
1645 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1647 * Public function, provides additional debug information (including caller
1648 * info), if enabled. Does not zero allocated memory, does not panic if request
1649 * cannot be satisfied.
1652 * Virtual address of allocated memory block on success, NULL on failure.
1654 void * __init
memblock_alloc_try_nid_raw(
1655 phys_addr_t size
, phys_addr_t align
,
1656 phys_addr_t min_addr
, phys_addr_t max_addr
,
1659 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1660 __func__
, (u64
)size
, (u64
)align
, nid
, &min_addr
,
1661 &max_addr
, (void *)_RET_IP_
);
1663 return memblock_alloc_internal(size
, align
, min_addr
, max_addr
, nid
,
1668 * memblock_alloc_try_nid - allocate boot memory block
1669 * @size: size of memory block to be allocated in bytes
1670 * @align: alignment of the region and block's size
1671 * @min_addr: the lower bound of the memory region from where the allocation
1672 * is preferred (phys address)
1673 * @max_addr: the upper bound of the memory region from where the allocation
1674 * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
1675 * allocate only from memory limited by memblock.current_limit value
1676 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1678 * Public function, provides additional debug information (including caller
1679 * info), if enabled. This function zeroes the allocated memory.
1682 * Virtual address of allocated memory block on success, NULL on failure.
1684 void * __init
memblock_alloc_try_nid(
1685 phys_addr_t size
, phys_addr_t align
,
1686 phys_addr_t min_addr
, phys_addr_t max_addr
,
1691 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1692 __func__
, (u64
)size
, (u64
)align
, nid
, &min_addr
,
1693 &max_addr
, (void *)_RET_IP_
);
1694 ptr
= memblock_alloc_internal(size
, align
,
1695 min_addr
, max_addr
, nid
, false);
1697 memset(ptr
, 0, size
);
1703 * memblock_free_late - free pages directly to buddy allocator
1704 * @base: phys starting address of the boot memory block
1705 * @size: size of the boot memory block in bytes
1707 * This is only useful when the memblock allocator has already been torn
1708 * down, but we are still initializing the system. Pages are released directly
1709 * to the buddy allocator.
1711 void __init
memblock_free_late(phys_addr_t base
, phys_addr_t size
)
1713 phys_addr_t cursor
, end
;
1715 end
= base
+ size
- 1;
1716 memblock_dbg("%s: [%pa-%pa] %pS\n",
1717 __func__
, &base
, &end
, (void *)_RET_IP_
);
1718 kmemleak_free_part_phys(base
, size
);
1719 cursor
= PFN_UP(base
);
1720 end
= PFN_DOWN(base
+ size
);
1722 for (; cursor
< end
; cursor
++) {
1723 memblock_free_pages(pfn_to_page(cursor
), cursor
, 0);
1724 totalram_pages_inc();
1729 * Remaining API functions
1732 phys_addr_t __init_memblock
memblock_phys_mem_size(void)
1734 return memblock
.memory
.total_size
;
1737 phys_addr_t __init_memblock
memblock_reserved_size(void)
1739 return memblock
.reserved
.total_size
;
1742 /* lowest address */
1743 phys_addr_t __init_memblock
memblock_start_of_DRAM(void)
1745 return memblock
.memory
.regions
[0].base
;
1748 phys_addr_t __init_memblock
memblock_end_of_DRAM(void)
1750 int idx
= memblock
.memory
.cnt
- 1;
1752 return (memblock
.memory
.regions
[idx
].base
+ memblock
.memory
.regions
[idx
].size
);
1755 static phys_addr_t __init_memblock
__find_max_addr(phys_addr_t limit
)
1757 phys_addr_t max_addr
= PHYS_ADDR_MAX
;
1758 struct memblock_region
*r
;
1761 * translate the memory @limit size into the max address within one of
1762 * the memory memblock regions, if the @limit exceeds the total size
1763 * of those regions, max_addr will keep original value PHYS_ADDR_MAX
1765 for_each_mem_region(r
) {
1766 if (limit
<= r
->size
) {
1767 max_addr
= r
->base
+ limit
;
1776 void __init
memblock_enforce_memory_limit(phys_addr_t limit
)
1778 phys_addr_t max_addr
;
1783 max_addr
= __find_max_addr(limit
);
1785 /* @limit exceeds the total size of the memory, do nothing */
1786 if (max_addr
== PHYS_ADDR_MAX
)
1789 /* truncate both memory and reserved regions */
1790 memblock_remove_range(&memblock
.memory
, max_addr
,
1792 memblock_remove_range(&memblock
.reserved
, max_addr
,
1796 void __init
memblock_cap_memory_range(phys_addr_t base
, phys_addr_t size
)
1798 int start_rgn
, end_rgn
;
1804 if (!memblock_memory
->total_size
) {
1805 pr_warn("%s: No memory registered yet\n", __func__
);
1809 ret
= memblock_isolate_range(&memblock
.memory
, base
, size
,
1810 &start_rgn
, &end_rgn
);
1814 /* remove all the MAP regions */
1815 for (i
= memblock
.memory
.cnt
- 1; i
>= end_rgn
; i
--)
1816 if (!memblock_is_nomap(&memblock
.memory
.regions
[i
]))
1817 memblock_remove_region(&memblock
.memory
, i
);
1819 for (i
= start_rgn
- 1; i
>= 0; i
--)
1820 if (!memblock_is_nomap(&memblock
.memory
.regions
[i
]))
1821 memblock_remove_region(&memblock
.memory
, i
);
1823 /* truncate the reserved regions */
1824 memblock_remove_range(&memblock
.reserved
, 0, base
);
1825 memblock_remove_range(&memblock
.reserved
,
1826 base
+ size
, PHYS_ADDR_MAX
);
1829 void __init
memblock_mem_limit_remove_map(phys_addr_t limit
)
1831 phys_addr_t max_addr
;
1836 max_addr
= __find_max_addr(limit
);
1838 /* @limit exceeds the total size of the memory, do nothing */
1839 if (max_addr
== PHYS_ADDR_MAX
)
1842 memblock_cap_memory_range(0, max_addr
);
1845 static int __init_memblock
memblock_search(struct memblock_type
*type
, phys_addr_t addr
)
1847 unsigned int left
= 0, right
= type
->cnt
;
1850 unsigned int mid
= (right
+ left
) / 2;
1852 if (addr
< type
->regions
[mid
].base
)
1854 else if (addr
>= (type
->regions
[mid
].base
+
1855 type
->regions
[mid
].size
))
1859 } while (left
< right
);
1863 bool __init_memblock
memblock_is_reserved(phys_addr_t addr
)
1865 return memblock_search(&memblock
.reserved
, addr
) != -1;
1868 bool __init_memblock
memblock_is_memory(phys_addr_t addr
)
1870 return memblock_search(&memblock
.memory
, addr
) != -1;
1873 bool __init_memblock
memblock_is_map_memory(phys_addr_t addr
)
1875 int i
= memblock_search(&memblock
.memory
, addr
);
1879 return !memblock_is_nomap(&memblock
.memory
.regions
[i
]);
1882 int __init_memblock
memblock_search_pfn_nid(unsigned long pfn
,
1883 unsigned long *start_pfn
, unsigned long *end_pfn
)
1885 struct memblock_type
*type
= &memblock
.memory
;
1886 int mid
= memblock_search(type
, PFN_PHYS(pfn
));
1889 return NUMA_NO_NODE
;
1891 *start_pfn
= PFN_DOWN(type
->regions
[mid
].base
);
1892 *end_pfn
= PFN_DOWN(type
->regions
[mid
].base
+ type
->regions
[mid
].size
);
1894 return memblock_get_region_node(&type
->regions
[mid
]);
1898 * memblock_is_region_memory - check if a region is a subset of memory
1899 * @base: base of region to check
1900 * @size: size of region to check
1902 * Check if the region [@base, @base + @size) is a subset of a memory block.
1905 * 0 if false, non-zero if true
1907 bool __init_memblock
memblock_is_region_memory(phys_addr_t base
, phys_addr_t size
)
1909 int idx
= memblock_search(&memblock
.memory
, base
);
1910 phys_addr_t end
= base
+ memblock_cap_size(base
, &size
);
1914 return (memblock
.memory
.regions
[idx
].base
+
1915 memblock
.memory
.regions
[idx
].size
) >= end
;
1919 * memblock_is_region_reserved - check if a region intersects reserved memory
1920 * @base: base of region to check
1921 * @size: size of region to check
1923 * Check if the region [@base, @base + @size) intersects a reserved
1927 * True if they intersect, false if not.
1929 bool __init_memblock
memblock_is_region_reserved(phys_addr_t base
, phys_addr_t size
)
1931 return memblock_overlaps_region(&memblock
.reserved
, base
, size
);
1934 void __init_memblock
memblock_trim_memory(phys_addr_t align
)
1936 phys_addr_t start
, end
, orig_start
, orig_end
;
1937 struct memblock_region
*r
;
1939 for_each_mem_region(r
) {
1940 orig_start
= r
->base
;
1941 orig_end
= r
->base
+ r
->size
;
1942 start
= round_up(orig_start
, align
);
1943 end
= round_down(orig_end
, align
);
1945 if (start
== orig_start
&& end
== orig_end
)
1950 r
->size
= end
- start
;
1952 memblock_remove_region(&memblock
.memory
,
1953 r
- memblock
.memory
.regions
);
1959 void __init_memblock
memblock_set_current_limit(phys_addr_t limit
)
1961 memblock
.current_limit
= limit
;
1964 phys_addr_t __init_memblock
memblock_get_current_limit(void)
1966 return memblock
.current_limit
;
1969 static void __init_memblock
memblock_dump(struct memblock_type
*type
)
1971 phys_addr_t base
, end
, size
;
1972 enum memblock_flags flags
;
1974 struct memblock_region
*rgn
;
1976 pr_info(" %s.cnt = 0x%lx\n", type
->name
, type
->cnt
);
1978 for_each_memblock_type(idx
, type
, rgn
) {
1979 char nid_buf
[32] = "";
1983 end
= base
+ size
- 1;
1986 if (memblock_get_region_node(rgn
) != MAX_NUMNODES
)
1987 snprintf(nid_buf
, sizeof(nid_buf
), " on node %d",
1988 memblock_get_region_node(rgn
));
1990 pr_info(" %s[%#x]\t[%pa-%pa], %pa bytes%s flags: %#x\n",
1991 type
->name
, idx
, &base
, &end
, &size
, nid_buf
, flags
);
1995 static void __init_memblock
__memblock_dump_all(void)
1997 pr_info("MEMBLOCK configuration:\n");
1998 pr_info(" memory size = %pa reserved size = %pa\n",
1999 &memblock
.memory
.total_size
,
2000 &memblock
.reserved
.total_size
);
2002 memblock_dump(&memblock
.memory
);
2003 memblock_dump(&memblock
.reserved
);
2004 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
2005 memblock_dump(&physmem
);
2009 void __init_memblock
memblock_dump_all(void)
2012 __memblock_dump_all();
2015 void __init
memblock_allow_resize(void)
2017 memblock_can_resize
= 1;
2020 static int __init
early_memblock(char *p
)
2022 if (p
&& strstr(p
, "debug"))
2026 early_param("memblock", early_memblock
);
2028 static void __init
free_memmap(unsigned long start_pfn
, unsigned long end_pfn
)
2030 struct page
*start_pg
, *end_pg
;
2031 phys_addr_t pg
, pgend
;
2034 * Convert start_pfn/end_pfn to a struct page pointer.
2036 start_pg
= pfn_to_page(start_pfn
- 1) + 1;
2037 end_pg
= pfn_to_page(end_pfn
- 1) + 1;
2040 * Convert to physical addresses, and round start upwards and end
2043 pg
= PAGE_ALIGN(__pa(start_pg
));
2044 pgend
= __pa(end_pg
) & PAGE_MASK
;
2047 * If there are free pages between these, free the section of the
2051 memblock_phys_free(pg
, pgend
- pg
);
2055 * The mem_map array can get very big. Free the unused area of the memory map.
2057 static void __init
free_unused_memmap(void)
2059 unsigned long start
, end
, prev_end
= 0;
2062 if (!IS_ENABLED(CONFIG_HAVE_ARCH_PFN_VALID
) ||
2063 IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP
))
2067 * This relies on each bank being in address order.
2068 * The banks are sorted previously in bootmem_init().
2070 for_each_mem_pfn_range(i
, MAX_NUMNODES
, &start
, &end
, NULL
) {
2071 #ifdef CONFIG_SPARSEMEM
2073 * Take care not to free memmap entries that don't exist
2074 * due to SPARSEMEM sections which aren't present.
2076 start
= min(start
, ALIGN(prev_end
, PAGES_PER_SECTION
));
2079 * Align down here since many operations in VM subsystem
2080 * presume that there are no holes in the memory map inside
2083 start
= pageblock_start_pfn(start
);
2086 * If we had a previous bank, and there is a space
2087 * between the current bank and the previous, free it.
2089 if (prev_end
&& prev_end
< start
)
2090 free_memmap(prev_end
, start
);
2093 * Align up here since many operations in VM subsystem
2094 * presume that there are no holes in the memory map inside
2097 prev_end
= pageblock_align(end
);
2100 #ifdef CONFIG_SPARSEMEM
2101 if (!IS_ALIGNED(prev_end
, PAGES_PER_SECTION
)) {
2102 prev_end
= pageblock_align(end
);
2103 free_memmap(prev_end
, ALIGN(prev_end
, PAGES_PER_SECTION
));
2108 static void __init
__free_pages_memory(unsigned long start
, unsigned long end
)
2112 while (start
< end
) {
2114 * Free the pages in the largest chunks alignment allows.
2116 * __ffs() behaviour is undefined for 0. start == 0 is
2117 * MAX_PAGE_ORDER-aligned, set order to MAX_PAGE_ORDER for
2121 order
= min_t(int, MAX_PAGE_ORDER
, __ffs(start
));
2123 order
= MAX_PAGE_ORDER
;
2125 while (start
+ (1UL << order
) > end
)
2128 memblock_free_pages(pfn_to_page(start
), start
, order
);
2130 start
+= (1UL << order
);
2134 static unsigned long __init
__free_memory_core(phys_addr_t start
,
2137 unsigned long start_pfn
= PFN_UP(start
);
2138 unsigned long end_pfn
= min_t(unsigned long,
2139 PFN_DOWN(end
), max_low_pfn
);
2141 if (start_pfn
>= end_pfn
)
2144 __free_pages_memory(start_pfn
, end_pfn
);
2146 return end_pfn
- start_pfn
;
2149 static void __init
memmap_init_reserved_pages(void)
2151 struct memblock_region
*region
;
2152 phys_addr_t start
, end
;
2156 * set nid on all reserved pages and also treat struct
2157 * pages for the NOMAP regions as PageReserved
2159 for_each_mem_region(region
) {
2160 nid
= memblock_get_region_node(region
);
2161 start
= region
->base
;
2162 end
= start
+ region
->size
;
2164 if (memblock_is_nomap(region
))
2165 reserve_bootmem_region(start
, end
, nid
);
2167 memblock_set_node(start
, end
, &memblock
.reserved
, nid
);
2171 * initialize struct pages for reserved regions that don't have
2172 * the MEMBLOCK_RSRV_NOINIT flag set
2174 for_each_reserved_mem_region(region
) {
2175 if (!memblock_is_reserved_noinit(region
)) {
2176 nid
= memblock_get_region_node(region
);
2177 start
= region
->base
;
2178 end
= start
+ region
->size
;
2180 if (nid
== NUMA_NO_NODE
|| nid
>= MAX_NUMNODES
)
2181 nid
= early_pfn_to_nid(PFN_DOWN(start
));
2183 reserve_bootmem_region(start
, end
, nid
);
2188 static unsigned long __init
free_low_memory_core_early(void)
2190 unsigned long count
= 0;
2191 phys_addr_t start
, end
;
2194 memblock_clear_hotplug(0, -1);
2196 memmap_init_reserved_pages();
2199 * We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id
2200 * because in some case like Node0 doesn't have RAM installed
2201 * low ram will be on Node1
2203 for_each_free_mem_range(i
, NUMA_NO_NODE
, MEMBLOCK_NONE
, &start
, &end
,
2205 count
+= __free_memory_core(start
, end
);
2210 static int reset_managed_pages_done __initdata
;
2212 static void __init
reset_node_managed_pages(pg_data_t
*pgdat
)
2216 for (z
= pgdat
->node_zones
; z
< pgdat
->node_zones
+ MAX_NR_ZONES
; z
++)
2217 atomic_long_set(&z
->managed_pages
, 0);
2220 void __init
reset_all_zones_managed_pages(void)
2222 struct pglist_data
*pgdat
;
2224 if (reset_managed_pages_done
)
2227 for_each_online_pgdat(pgdat
)
2228 reset_node_managed_pages(pgdat
);
2230 reset_managed_pages_done
= 1;
2234 * memblock_free_all - release free pages to the buddy allocator
2236 void __init
memblock_free_all(void)
2238 unsigned long pages
;
2240 free_unused_memmap();
2241 reset_all_zones_managed_pages();
2243 pages
= free_low_memory_core_early();
2244 totalram_pages_add(pages
);
2247 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_ARCH_KEEP_MEMBLOCK)
2248 static const char * const flagname
[] = {
2249 [ilog2(MEMBLOCK_HOTPLUG
)] = "HOTPLUG",
2250 [ilog2(MEMBLOCK_MIRROR
)] = "MIRROR",
2251 [ilog2(MEMBLOCK_NOMAP
)] = "NOMAP",
2252 [ilog2(MEMBLOCK_DRIVER_MANAGED
)] = "DRV_MNG",
2253 [ilog2(MEMBLOCK_RSRV_NOINIT
)] = "RSV_NIT",
2256 static int memblock_debug_show(struct seq_file
*m
, void *private)
2258 struct memblock_type
*type
= m
->private;
2259 struct memblock_region
*reg
;
2261 unsigned int count
= ARRAY_SIZE(flagname
);
2264 for (i
= 0; i
< type
->cnt
; i
++) {
2265 reg
= &type
->regions
[i
];
2266 end
= reg
->base
+ reg
->size
- 1;
2267 nid
= memblock_get_region_node(reg
);
2269 seq_printf(m
, "%4d: ", i
);
2270 seq_printf(m
, "%pa..%pa ", ®
->base
, &end
);
2271 if (nid
!= MAX_NUMNODES
)
2272 seq_printf(m
, "%4d ", nid
);
2274 seq_printf(m
, "%4c ", 'x');
2276 for (j
= 0; j
< count
; j
++) {
2277 if (reg
->flags
& (1U << j
)) {
2278 seq_printf(m
, "%s\n", flagname
[j
]);
2283 seq_printf(m
, "%s\n", "UNKNOWN");
2285 seq_printf(m
, "%s\n", "NONE");
2290 DEFINE_SHOW_ATTRIBUTE(memblock_debug
);
2292 static int __init
memblock_init_debugfs(void)
2294 struct dentry
*root
= debugfs_create_dir("memblock", NULL
);
2296 debugfs_create_file("memory", 0444, root
,
2297 &memblock
.memory
, &memblock_debug_fops
);
2298 debugfs_create_file("reserved", 0444, root
,
2299 &memblock
.reserved
, &memblock_debug_fops
);
2300 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
2301 debugfs_create_file("physmem", 0444, root
, &physmem
,
2302 &memblock_debug_fops
);
2307 __initcall(memblock_init_debugfs
);
2309 #endif /* CONFIG_DEBUG_FS */