]> git.ipfire.org Git - thirdparty/linux.git/blob - mm/memblock.c
Merge tag 'sound-5.1-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai...
[thirdparty/linux.git] / mm / memblock.c
1 /*
2 * Procedures for maintaining information about logical memory blocks.
3 *
4 * Peter Bergner, IBM Corp. June 2001.
5 * Copyright (C) 2001 Peter Bergner.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/bitops.h>
17 #include <linux/poison.h>
18 #include <linux/pfn.h>
19 #include <linux/debugfs.h>
20 #include <linux/kmemleak.h>
21 #include <linux/seq_file.h>
22 #include <linux/memblock.h>
23
24 #include <asm/sections.h>
25 #include <linux/io.h>
26
27 #include "internal.h"
28
29 #define INIT_MEMBLOCK_REGIONS 128
30 #define INIT_PHYSMEM_REGIONS 4
31
32 #ifndef INIT_MEMBLOCK_RESERVED_REGIONS
33 # define INIT_MEMBLOCK_RESERVED_REGIONS INIT_MEMBLOCK_REGIONS
34 #endif
35
36 /**
37 * DOC: memblock overview
38 *
39 * Memblock is a method of managing memory regions during the early
40 * boot period when the usual kernel memory allocators are not up and
41 * running.
42 *
43 * Memblock views the system memory as collections of contiguous
44 * regions. There are several types of these collections:
45 *
46 * * ``memory`` - describes the physical memory available to the
47 * kernel; this may differ from the actual physical memory installed
48 * in the system, for instance when the memory is restricted with
49 * ``mem=`` command line parameter
50 * * ``reserved`` - describes the regions that were allocated
51 * * ``physmap`` - describes the actual physical memory regardless of
52 * the possible restrictions; the ``physmap`` type is only available
53 * on some architectures.
54 *
55 * Each region is represented by :c:type:`struct memblock_region` that
56 * defines the region extents, its attributes and NUMA node id on NUMA
57 * systems. Every memory type is described by the :c:type:`struct
58 * memblock_type` which contains an array of memory regions along with
59 * the allocator metadata. The memory types are nicely wrapped with
60 * :c:type:`struct memblock`. This structure is statically initialzed
61 * at build time. The region arrays for the "memory" and "reserved"
62 * types are initially sized to %INIT_MEMBLOCK_REGIONS and for the
63 * "physmap" type to %INIT_PHYSMEM_REGIONS.
64 * The :c:func:`memblock_allow_resize` enables automatic resizing of
65 * the region arrays during addition of new regions. This feature
66 * should be used with care so that memory allocated for the region
67 * array will not overlap with areas that should be reserved, for
68 * example initrd.
69 *
70 * The early architecture setup should tell memblock what the physical
71 * memory layout is by using :c:func:`memblock_add` or
72 * :c:func:`memblock_add_node` functions. The first function does not
73 * assign the region to a NUMA node and it is appropriate for UMA
74 * systems. Yet, it is possible to use it on NUMA systems as well and
75 * assign the region to a NUMA node later in the setup process using
76 * :c:func:`memblock_set_node`. The :c:func:`memblock_add_node`
77 * performs such an assignment directly.
78 *
79 * Once memblock is setup the memory can be allocated using one of the
80 * API variants:
81 *
82 * * :c:func:`memblock_phys_alloc*` - these functions return the
83 * **physical** address of the allocated memory
84 * * :c:func:`memblock_alloc*` - these functions return the **virtual**
85 * address of the allocated memory.
86 *
87 * Note, that both API variants use implict assumptions about allowed
88 * memory ranges and the fallback methods. Consult the documentation
89 * of :c:func:`memblock_alloc_internal` and
90 * :c:func:`memblock_alloc_range_nid` functions for more elaboarte
91 * description.
92 *
93 * As the system boot progresses, the architecture specific
94 * :c:func:`mem_init` function frees all the memory to the buddy page
95 * allocator.
96 *
97 * If an architecure enables %CONFIG_ARCH_DISCARD_MEMBLOCK, the
98 * memblock data structures will be discarded after the system
99 * initialization compltes.
100 */
101
102 #ifndef CONFIG_NEED_MULTIPLE_NODES
103 struct pglist_data __refdata contig_page_data;
104 EXPORT_SYMBOL(contig_page_data);
105 #endif
106
107 unsigned long max_low_pfn;
108 unsigned long min_low_pfn;
109 unsigned long max_pfn;
110 unsigned long long max_possible_pfn;
111
112 static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
113 static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_RESERVED_REGIONS] __initdata_memblock;
114 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
115 static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS] __initdata_memblock;
116 #endif
117
118 struct memblock memblock __initdata_memblock = {
119 .memory.regions = memblock_memory_init_regions,
120 .memory.cnt = 1, /* empty dummy entry */
121 .memory.max = INIT_MEMBLOCK_REGIONS,
122 .memory.name = "memory",
123
124 .reserved.regions = memblock_reserved_init_regions,
125 .reserved.cnt = 1, /* empty dummy entry */
126 .reserved.max = INIT_MEMBLOCK_RESERVED_REGIONS,
127 .reserved.name = "reserved",
128
129 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
130 .physmem.regions = memblock_physmem_init_regions,
131 .physmem.cnt = 1, /* empty dummy entry */
132 .physmem.max = INIT_PHYSMEM_REGIONS,
133 .physmem.name = "physmem",
134 #endif
135
136 .bottom_up = false,
137 .current_limit = MEMBLOCK_ALLOC_ANYWHERE,
138 };
139
140 int memblock_debug __initdata_memblock;
141 static bool system_has_some_mirror __initdata_memblock = false;
142 static int memblock_can_resize __initdata_memblock;
143 static int memblock_memory_in_slab __initdata_memblock = 0;
144 static int memblock_reserved_in_slab __initdata_memblock = 0;
145
146 static enum memblock_flags __init_memblock choose_memblock_flags(void)
147 {
148 return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE;
149 }
150
151 /* adjust *@size so that (@base + *@size) doesn't overflow, return new size */
152 static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
153 {
154 return *size = min(*size, PHYS_ADDR_MAX - base);
155 }
156
157 /*
158 * Address comparison utilities
159 */
160 static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
161 phys_addr_t base2, phys_addr_t size2)
162 {
163 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
164 }
165
166 bool __init_memblock memblock_overlaps_region(struct memblock_type *type,
167 phys_addr_t base, phys_addr_t size)
168 {
169 unsigned long i;
170
171 for (i = 0; i < type->cnt; i++)
172 if (memblock_addrs_overlap(base, size, type->regions[i].base,
173 type->regions[i].size))
174 break;
175 return i < type->cnt;
176 }
177
178 /**
179 * __memblock_find_range_bottom_up - find free area utility in bottom-up
180 * @start: start of candidate range
181 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
182 * %MEMBLOCK_ALLOC_ACCESSIBLE
183 * @size: size of free area to find
184 * @align: alignment of free area to find
185 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
186 * @flags: pick from blocks based on memory attributes
187 *
188 * Utility called from memblock_find_in_range_node(), find free area bottom-up.
189 *
190 * Return:
191 * Found address on success, 0 on failure.
192 */
193 static phys_addr_t __init_memblock
194 __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
195 phys_addr_t size, phys_addr_t align, int nid,
196 enum memblock_flags flags)
197 {
198 phys_addr_t this_start, this_end, cand;
199 u64 i;
200
201 for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) {
202 this_start = clamp(this_start, start, end);
203 this_end = clamp(this_end, start, end);
204
205 cand = round_up(this_start, align);
206 if (cand < this_end && this_end - cand >= size)
207 return cand;
208 }
209
210 return 0;
211 }
212
213 /**
214 * __memblock_find_range_top_down - find free area utility, in top-down
215 * @start: start of candidate range
216 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
217 * %MEMBLOCK_ALLOC_ACCESSIBLE
218 * @size: size of free area to find
219 * @align: alignment of free area to find
220 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
221 * @flags: pick from blocks based on memory attributes
222 *
223 * Utility called from memblock_find_in_range_node(), find free area top-down.
224 *
225 * Return:
226 * Found address on success, 0 on failure.
227 */
228 static phys_addr_t __init_memblock
229 __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
230 phys_addr_t size, phys_addr_t align, int nid,
231 enum memblock_flags flags)
232 {
233 phys_addr_t this_start, this_end, cand;
234 u64 i;
235
236 for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end,
237 NULL) {
238 this_start = clamp(this_start, start, end);
239 this_end = clamp(this_end, start, end);
240
241 if (this_end < size)
242 continue;
243
244 cand = round_down(this_end - size, align);
245 if (cand >= this_start)
246 return cand;
247 }
248
249 return 0;
250 }
251
252 /**
253 * memblock_find_in_range_node - find free area in given range and node
254 * @size: size of free area to find
255 * @align: alignment of free area to find
256 * @start: start of candidate range
257 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
258 * %MEMBLOCK_ALLOC_ACCESSIBLE
259 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
260 * @flags: pick from blocks based on memory attributes
261 *
262 * Find @size free area aligned to @align in the specified range and node.
263 *
264 * When allocation direction is bottom-up, the @start should be greater
265 * than the end of the kernel image. Otherwise, it will be trimmed. The
266 * reason is that we want the bottom-up allocation just near the kernel
267 * image so it is highly likely that the allocated memory and the kernel
268 * will reside in the same node.
269 *
270 * If bottom-up allocation failed, will try to allocate memory top-down.
271 *
272 * Return:
273 * Found address on success, 0 on failure.
274 */
275 static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
276 phys_addr_t align, phys_addr_t start,
277 phys_addr_t end, int nid,
278 enum memblock_flags flags)
279 {
280 phys_addr_t kernel_end, ret;
281
282 /* pump up @end */
283 if (end == MEMBLOCK_ALLOC_ACCESSIBLE ||
284 end == MEMBLOCK_ALLOC_KASAN)
285 end = memblock.current_limit;
286
287 /* avoid allocating the first page */
288 start = max_t(phys_addr_t, start, PAGE_SIZE);
289 end = max(start, end);
290 kernel_end = __pa_symbol(_end);
291
292 /*
293 * try bottom-up allocation only when bottom-up mode
294 * is set and @end is above the kernel image.
295 */
296 if (memblock_bottom_up() && end > kernel_end) {
297 phys_addr_t bottom_up_start;
298
299 /* make sure we will allocate above the kernel */
300 bottom_up_start = max(start, kernel_end);
301
302 /* ok, try bottom-up allocation first */
303 ret = __memblock_find_range_bottom_up(bottom_up_start, end,
304 size, align, nid, flags);
305 if (ret)
306 return ret;
307
308 /*
309 * we always limit bottom-up allocation above the kernel,
310 * but top-down allocation doesn't have the limit, so
311 * retrying top-down allocation may succeed when bottom-up
312 * allocation failed.
313 *
314 * bottom-up allocation is expected to be fail very rarely,
315 * so we use WARN_ONCE() here to see the stack trace if
316 * fail happens.
317 */
318 WARN_ONCE(IS_ENABLED(CONFIG_MEMORY_HOTREMOVE),
319 "memblock: bottom-up allocation failed, memory hotremove may be affected\n");
320 }
321
322 return __memblock_find_range_top_down(start, end, size, align, nid,
323 flags);
324 }
325
326 /**
327 * memblock_find_in_range - find free area in given range
328 * @start: start of candidate range
329 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
330 * %MEMBLOCK_ALLOC_ACCESSIBLE
331 * @size: size of free area to find
332 * @align: alignment of free area to find
333 *
334 * Find @size free area aligned to @align in the specified range.
335 *
336 * Return:
337 * Found address on success, 0 on failure.
338 */
339 phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
340 phys_addr_t end, phys_addr_t size,
341 phys_addr_t align)
342 {
343 phys_addr_t ret;
344 enum memblock_flags flags = choose_memblock_flags();
345
346 again:
347 ret = memblock_find_in_range_node(size, align, start, end,
348 NUMA_NO_NODE, flags);
349
350 if (!ret && (flags & MEMBLOCK_MIRROR)) {
351 pr_warn("Could not allocate %pap bytes of mirrored memory\n",
352 &size);
353 flags &= ~MEMBLOCK_MIRROR;
354 goto again;
355 }
356
357 return ret;
358 }
359
360 static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
361 {
362 type->total_size -= type->regions[r].size;
363 memmove(&type->regions[r], &type->regions[r + 1],
364 (type->cnt - (r + 1)) * sizeof(type->regions[r]));
365 type->cnt--;
366
367 /* Special case for empty arrays */
368 if (type->cnt == 0) {
369 WARN_ON(type->total_size != 0);
370 type->cnt = 1;
371 type->regions[0].base = 0;
372 type->regions[0].size = 0;
373 type->regions[0].flags = 0;
374 memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
375 }
376 }
377
378 #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
379 /**
380 * memblock_discard - discard memory and reserved arrays if they were allocated
381 */
382 void __init memblock_discard(void)
383 {
384 phys_addr_t addr, size;
385
386 if (memblock.reserved.regions != memblock_reserved_init_regions) {
387 addr = __pa(memblock.reserved.regions);
388 size = PAGE_ALIGN(sizeof(struct memblock_region) *
389 memblock.reserved.max);
390 __memblock_free_late(addr, size);
391 }
392
393 if (memblock.memory.regions != memblock_memory_init_regions) {
394 addr = __pa(memblock.memory.regions);
395 size = PAGE_ALIGN(sizeof(struct memblock_region) *
396 memblock.memory.max);
397 __memblock_free_late(addr, size);
398 }
399 }
400 #endif
401
402 /**
403 * memblock_double_array - double the size of the memblock regions array
404 * @type: memblock type of the regions array being doubled
405 * @new_area_start: starting address of memory range to avoid overlap with
406 * @new_area_size: size of memory range to avoid overlap with
407 *
408 * Double the size of the @type regions array. If memblock is being used to
409 * allocate memory for a new reserved regions array and there is a previously
410 * allocated memory range [@new_area_start, @new_area_start + @new_area_size]
411 * waiting to be reserved, ensure the memory used by the new array does
412 * not overlap.
413 *
414 * Return:
415 * 0 on success, -1 on failure.
416 */
417 static int __init_memblock memblock_double_array(struct memblock_type *type,
418 phys_addr_t new_area_start,
419 phys_addr_t new_area_size)
420 {
421 struct memblock_region *new_array, *old_array;
422 phys_addr_t old_alloc_size, new_alloc_size;
423 phys_addr_t old_size, new_size, addr, new_end;
424 int use_slab = slab_is_available();
425 int *in_slab;
426
427 /* We don't allow resizing until we know about the reserved regions
428 * of memory that aren't suitable for allocation
429 */
430 if (!memblock_can_resize)
431 return -1;
432
433 /* Calculate new doubled size */
434 old_size = type->max * sizeof(struct memblock_region);
435 new_size = old_size << 1;
436 /*
437 * We need to allocated new one align to PAGE_SIZE,
438 * so we can free them completely later.
439 */
440 old_alloc_size = PAGE_ALIGN(old_size);
441 new_alloc_size = PAGE_ALIGN(new_size);
442
443 /* Retrieve the slab flag */
444 if (type == &memblock.memory)
445 in_slab = &memblock_memory_in_slab;
446 else
447 in_slab = &memblock_reserved_in_slab;
448
449 /* Try to find some space for it */
450 if (use_slab) {
451 new_array = kmalloc(new_size, GFP_KERNEL);
452 addr = new_array ? __pa(new_array) : 0;
453 } else {
454 /* only exclude range when trying to double reserved.regions */
455 if (type != &memblock.reserved)
456 new_area_start = new_area_size = 0;
457
458 addr = memblock_find_in_range(new_area_start + new_area_size,
459 memblock.current_limit,
460 new_alloc_size, PAGE_SIZE);
461 if (!addr && new_area_size)
462 addr = memblock_find_in_range(0,
463 min(new_area_start, memblock.current_limit),
464 new_alloc_size, PAGE_SIZE);
465
466 new_array = addr ? __va(addr) : NULL;
467 }
468 if (!addr) {
469 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
470 type->name, type->max, type->max * 2);
471 return -1;
472 }
473
474 new_end = addr + new_size - 1;
475 memblock_dbg("memblock: %s is doubled to %ld at [%pa-%pa]",
476 type->name, type->max * 2, &addr, &new_end);
477
478 /*
479 * Found space, we now need to move the array over before we add the
480 * reserved region since it may be our reserved array itself that is
481 * full.
482 */
483 memcpy(new_array, type->regions, old_size);
484 memset(new_array + type->max, 0, old_size);
485 old_array = type->regions;
486 type->regions = new_array;
487 type->max <<= 1;
488
489 /* Free old array. We needn't free it if the array is the static one */
490 if (*in_slab)
491 kfree(old_array);
492 else if (old_array != memblock_memory_init_regions &&
493 old_array != memblock_reserved_init_regions)
494 memblock_free(__pa(old_array), old_alloc_size);
495
496 /*
497 * Reserve the new array if that comes from the memblock. Otherwise, we
498 * needn't do it
499 */
500 if (!use_slab)
501 BUG_ON(memblock_reserve(addr, new_alloc_size));
502
503 /* Update slab flag */
504 *in_slab = use_slab;
505
506 return 0;
507 }
508
509 /**
510 * memblock_merge_regions - merge neighboring compatible regions
511 * @type: memblock type to scan
512 *
513 * Scan @type and merge neighboring compatible regions.
514 */
515 static void __init_memblock memblock_merge_regions(struct memblock_type *type)
516 {
517 int i = 0;
518
519 /* cnt never goes below 1 */
520 while (i < type->cnt - 1) {
521 struct memblock_region *this = &type->regions[i];
522 struct memblock_region *next = &type->regions[i + 1];
523
524 if (this->base + this->size != next->base ||
525 memblock_get_region_node(this) !=
526 memblock_get_region_node(next) ||
527 this->flags != next->flags) {
528 BUG_ON(this->base + this->size > next->base);
529 i++;
530 continue;
531 }
532
533 this->size += next->size;
534 /* move forward from next + 1, index of which is i + 2 */
535 memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next));
536 type->cnt--;
537 }
538 }
539
540 /**
541 * memblock_insert_region - insert new memblock region
542 * @type: memblock type to insert into
543 * @idx: index for the insertion point
544 * @base: base address of the new region
545 * @size: size of the new region
546 * @nid: node id of the new region
547 * @flags: flags of the new region
548 *
549 * Insert new memblock region [@base, @base + @size) into @type at @idx.
550 * @type must already have extra room to accommodate the new region.
551 */
552 static void __init_memblock memblock_insert_region(struct memblock_type *type,
553 int idx, phys_addr_t base,
554 phys_addr_t size,
555 int nid,
556 enum memblock_flags flags)
557 {
558 struct memblock_region *rgn = &type->regions[idx];
559
560 BUG_ON(type->cnt >= type->max);
561 memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn));
562 rgn->base = base;
563 rgn->size = size;
564 rgn->flags = flags;
565 memblock_set_region_node(rgn, nid);
566 type->cnt++;
567 type->total_size += size;
568 }
569
570 /**
571 * memblock_add_range - add new memblock region
572 * @type: memblock type to add new region into
573 * @base: base address of the new region
574 * @size: size of the new region
575 * @nid: nid of the new region
576 * @flags: flags of the new region
577 *
578 * Add new memblock region [@base, @base + @size) into @type. The new region
579 * is allowed to overlap with existing ones - overlaps don't affect already
580 * existing regions. @type is guaranteed to be minimal (all neighbouring
581 * compatible regions are merged) after the addition.
582 *
583 * Return:
584 * 0 on success, -errno on failure.
585 */
586 int __init_memblock memblock_add_range(struct memblock_type *type,
587 phys_addr_t base, phys_addr_t size,
588 int nid, enum memblock_flags flags)
589 {
590 bool insert = false;
591 phys_addr_t obase = base;
592 phys_addr_t end = base + memblock_cap_size(base, &size);
593 int idx, nr_new;
594 struct memblock_region *rgn;
595
596 if (!size)
597 return 0;
598
599 /* special case for empty array */
600 if (type->regions[0].size == 0) {
601 WARN_ON(type->cnt != 1 || type->total_size);
602 type->regions[0].base = base;
603 type->regions[0].size = size;
604 type->regions[0].flags = flags;
605 memblock_set_region_node(&type->regions[0], nid);
606 type->total_size = size;
607 return 0;
608 }
609 repeat:
610 /*
611 * The following is executed twice. Once with %false @insert and
612 * then with %true. The first counts the number of regions needed
613 * to accommodate the new area. The second actually inserts them.
614 */
615 base = obase;
616 nr_new = 0;
617
618 for_each_memblock_type(idx, type, rgn) {
619 phys_addr_t rbase = rgn->base;
620 phys_addr_t rend = rbase + rgn->size;
621
622 if (rbase >= end)
623 break;
624 if (rend <= base)
625 continue;
626 /*
627 * @rgn overlaps. If it separates the lower part of new
628 * area, insert that portion.
629 */
630 if (rbase > base) {
631 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
632 WARN_ON(nid != memblock_get_region_node(rgn));
633 #endif
634 WARN_ON(flags != rgn->flags);
635 nr_new++;
636 if (insert)
637 memblock_insert_region(type, idx++, base,
638 rbase - base, nid,
639 flags);
640 }
641 /* area below @rend is dealt with, forget about it */
642 base = min(rend, end);
643 }
644
645 /* insert the remaining portion */
646 if (base < end) {
647 nr_new++;
648 if (insert)
649 memblock_insert_region(type, idx, base, end - base,
650 nid, flags);
651 }
652
653 if (!nr_new)
654 return 0;
655
656 /*
657 * If this was the first round, resize array and repeat for actual
658 * insertions; otherwise, merge and return.
659 */
660 if (!insert) {
661 while (type->cnt + nr_new > type->max)
662 if (memblock_double_array(type, obase, size) < 0)
663 return -ENOMEM;
664 insert = true;
665 goto repeat;
666 } else {
667 memblock_merge_regions(type);
668 return 0;
669 }
670 }
671
672 /**
673 * memblock_add_node - add new memblock region within a NUMA node
674 * @base: base address of the new region
675 * @size: size of the new region
676 * @nid: nid of the new region
677 *
678 * Add new memblock region [@base, @base + @size) to the "memory"
679 * type. See memblock_add_range() description for mode details
680 *
681 * Return:
682 * 0 on success, -errno on failure.
683 */
684 int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
685 int nid)
686 {
687 return memblock_add_range(&memblock.memory, base, size, nid, 0);
688 }
689
690 /**
691 * memblock_add - add new memblock region
692 * @base: base address of the new region
693 * @size: size of the new region
694 *
695 * Add new memblock region [@base, @base + @size) to the "memory"
696 * type. See memblock_add_range() description for mode details
697 *
698 * Return:
699 * 0 on success, -errno on failure.
700 */
701 int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
702 {
703 phys_addr_t end = base + size - 1;
704
705 memblock_dbg("memblock_add: [%pa-%pa] %pF\n",
706 &base, &end, (void *)_RET_IP_);
707
708 return memblock_add_range(&memblock.memory, base, size, MAX_NUMNODES, 0);
709 }
710
711 /**
712 * memblock_isolate_range - isolate given range into disjoint memblocks
713 * @type: memblock type to isolate range for
714 * @base: base of range to isolate
715 * @size: size of range to isolate
716 * @start_rgn: out parameter for the start of isolated region
717 * @end_rgn: out parameter for the end of isolated region
718 *
719 * Walk @type and ensure that regions don't cross the boundaries defined by
720 * [@base, @base + @size). Crossing regions are split at the boundaries,
721 * which may create at most two more regions. The index of the first
722 * region inside the range is returned in *@start_rgn and end in *@end_rgn.
723 *
724 * Return:
725 * 0 on success, -errno on failure.
726 */
727 static int __init_memblock memblock_isolate_range(struct memblock_type *type,
728 phys_addr_t base, phys_addr_t size,
729 int *start_rgn, int *end_rgn)
730 {
731 phys_addr_t end = base + memblock_cap_size(base, &size);
732 int idx;
733 struct memblock_region *rgn;
734
735 *start_rgn = *end_rgn = 0;
736
737 if (!size)
738 return 0;
739
740 /* we'll create at most two more regions */
741 while (type->cnt + 2 > type->max)
742 if (memblock_double_array(type, base, size) < 0)
743 return -ENOMEM;
744
745 for_each_memblock_type(idx, type, rgn) {
746 phys_addr_t rbase = rgn->base;
747 phys_addr_t rend = rbase + rgn->size;
748
749 if (rbase >= end)
750 break;
751 if (rend <= base)
752 continue;
753
754 if (rbase < base) {
755 /*
756 * @rgn intersects from below. Split and continue
757 * to process the next region - the new top half.
758 */
759 rgn->base = base;
760 rgn->size -= base - rbase;
761 type->total_size -= base - rbase;
762 memblock_insert_region(type, idx, rbase, base - rbase,
763 memblock_get_region_node(rgn),
764 rgn->flags);
765 } else if (rend > end) {
766 /*
767 * @rgn intersects from above. Split and redo the
768 * current region - the new bottom half.
769 */
770 rgn->base = end;
771 rgn->size -= end - rbase;
772 type->total_size -= end - rbase;
773 memblock_insert_region(type, idx--, rbase, end - rbase,
774 memblock_get_region_node(rgn),
775 rgn->flags);
776 } else {
777 /* @rgn is fully contained, record it */
778 if (!*end_rgn)
779 *start_rgn = idx;
780 *end_rgn = idx + 1;
781 }
782 }
783
784 return 0;
785 }
786
787 static int __init_memblock memblock_remove_range(struct memblock_type *type,
788 phys_addr_t base, phys_addr_t size)
789 {
790 int start_rgn, end_rgn;
791 int i, ret;
792
793 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
794 if (ret)
795 return ret;
796
797 for (i = end_rgn - 1; i >= start_rgn; i--)
798 memblock_remove_region(type, i);
799 return 0;
800 }
801
802 int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
803 {
804 phys_addr_t end = base + size - 1;
805
806 memblock_dbg("memblock_remove: [%pa-%pa] %pS\n",
807 &base, &end, (void *)_RET_IP_);
808
809 return memblock_remove_range(&memblock.memory, base, size);
810 }
811
812 /**
813 * memblock_free - free boot memory block
814 * @base: phys starting address of the boot memory block
815 * @size: size of the boot memory block in bytes
816 *
817 * Free boot memory block previously allocated by memblock_alloc_xx() API.
818 * The freeing memory will not be released to the buddy allocator.
819 */
820 int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
821 {
822 phys_addr_t end = base + size - 1;
823
824 memblock_dbg(" memblock_free: [%pa-%pa] %pF\n",
825 &base, &end, (void *)_RET_IP_);
826
827 kmemleak_free_part_phys(base, size);
828 return memblock_remove_range(&memblock.reserved, base, size);
829 }
830
831 int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
832 {
833 phys_addr_t end = base + size - 1;
834
835 memblock_dbg("memblock_reserve: [%pa-%pa] %pF\n",
836 &base, &end, (void *)_RET_IP_);
837
838 return memblock_add_range(&memblock.reserved, base, size, MAX_NUMNODES, 0);
839 }
840
841 /**
842 * memblock_setclr_flag - set or clear flag for a memory region
843 * @base: base address of the region
844 * @size: size of the region
845 * @set: set or clear the flag
846 * @flag: the flag to udpate
847 *
848 * This function isolates region [@base, @base + @size), and sets/clears flag
849 *
850 * Return: 0 on success, -errno on failure.
851 */
852 static int __init_memblock memblock_setclr_flag(phys_addr_t base,
853 phys_addr_t size, int set, int flag)
854 {
855 struct memblock_type *type = &memblock.memory;
856 int i, ret, start_rgn, end_rgn;
857
858 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
859 if (ret)
860 return ret;
861
862 for (i = start_rgn; i < end_rgn; i++) {
863 struct memblock_region *r = &type->regions[i];
864
865 if (set)
866 r->flags |= flag;
867 else
868 r->flags &= ~flag;
869 }
870
871 memblock_merge_regions(type);
872 return 0;
873 }
874
875 /**
876 * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG.
877 * @base: the base phys addr of the region
878 * @size: the size of the region
879 *
880 * Return: 0 on success, -errno on failure.
881 */
882 int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size)
883 {
884 return memblock_setclr_flag(base, size, 1, MEMBLOCK_HOTPLUG);
885 }
886
887 /**
888 * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region.
889 * @base: the base phys addr of the region
890 * @size: the size of the region
891 *
892 * Return: 0 on success, -errno on failure.
893 */
894 int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
895 {
896 return memblock_setclr_flag(base, size, 0, MEMBLOCK_HOTPLUG);
897 }
898
899 /**
900 * memblock_mark_mirror - Mark mirrored memory with flag MEMBLOCK_MIRROR.
901 * @base: the base phys addr of the region
902 * @size: the size of the region
903 *
904 * Return: 0 on success, -errno on failure.
905 */
906 int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size)
907 {
908 system_has_some_mirror = true;
909
910 return memblock_setclr_flag(base, size, 1, MEMBLOCK_MIRROR);
911 }
912
913 /**
914 * memblock_mark_nomap - Mark a memory region with flag MEMBLOCK_NOMAP.
915 * @base: the base phys addr of the region
916 * @size: the size of the region
917 *
918 * Return: 0 on success, -errno on failure.
919 */
920 int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size)
921 {
922 return memblock_setclr_flag(base, size, 1, MEMBLOCK_NOMAP);
923 }
924
925 /**
926 * memblock_clear_nomap - Clear flag MEMBLOCK_NOMAP for a specified region.
927 * @base: the base phys addr of the region
928 * @size: the size of the region
929 *
930 * Return: 0 on success, -errno on failure.
931 */
932 int __init_memblock memblock_clear_nomap(phys_addr_t base, phys_addr_t size)
933 {
934 return memblock_setclr_flag(base, size, 0, MEMBLOCK_NOMAP);
935 }
936
937 /**
938 * __next_reserved_mem_region - next function for for_each_reserved_region()
939 * @idx: pointer to u64 loop variable
940 * @out_start: ptr to phys_addr_t for start address of the region, can be %NULL
941 * @out_end: ptr to phys_addr_t for end address of the region, can be %NULL
942 *
943 * Iterate over all reserved memory regions.
944 */
945 void __init_memblock __next_reserved_mem_region(u64 *idx,
946 phys_addr_t *out_start,
947 phys_addr_t *out_end)
948 {
949 struct memblock_type *type = &memblock.reserved;
950
951 if (*idx < type->cnt) {
952 struct memblock_region *r = &type->regions[*idx];
953 phys_addr_t base = r->base;
954 phys_addr_t size = r->size;
955
956 if (out_start)
957 *out_start = base;
958 if (out_end)
959 *out_end = base + size - 1;
960
961 *idx += 1;
962 return;
963 }
964
965 /* signal end of iteration */
966 *idx = ULLONG_MAX;
967 }
968
969 static bool should_skip_region(struct memblock_region *m, int nid, int flags)
970 {
971 int m_nid = memblock_get_region_node(m);
972
973 /* only memory regions are associated with nodes, check it */
974 if (nid != NUMA_NO_NODE && nid != m_nid)
975 return true;
976
977 /* skip hotpluggable memory regions if needed */
978 if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
979 return true;
980
981 /* if we want mirror memory skip non-mirror memory regions */
982 if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
983 return true;
984
985 /* skip nomap memory unless we were asked for it explicitly */
986 if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m))
987 return true;
988
989 return false;
990 }
991
992 /**
993 * __next_mem_range - next function for for_each_free_mem_range() etc.
994 * @idx: pointer to u64 loop variable
995 * @nid: node selector, %NUMA_NO_NODE for all nodes
996 * @flags: pick from blocks based on memory attributes
997 * @type_a: pointer to memblock_type from where the range is taken
998 * @type_b: pointer to memblock_type which excludes memory from being taken
999 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
1000 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
1001 * @out_nid: ptr to int for nid of the range, can be %NULL
1002 *
1003 * Find the first area from *@idx which matches @nid, fill the out
1004 * parameters, and update *@idx for the next iteration. The lower 32bit of
1005 * *@idx contains index into type_a and the upper 32bit indexes the
1006 * areas before each region in type_b. For example, if type_b regions
1007 * look like the following,
1008 *
1009 * 0:[0-16), 1:[32-48), 2:[128-130)
1010 *
1011 * The upper 32bit indexes the following regions.
1012 *
1013 * 0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX)
1014 *
1015 * As both region arrays are sorted, the function advances the two indices
1016 * in lockstep and returns each intersection.
1017 */
1018 void __init_memblock __next_mem_range(u64 *idx, int nid,
1019 enum memblock_flags flags,
1020 struct memblock_type *type_a,
1021 struct memblock_type *type_b,
1022 phys_addr_t *out_start,
1023 phys_addr_t *out_end, int *out_nid)
1024 {
1025 int idx_a = *idx & 0xffffffff;
1026 int idx_b = *idx >> 32;
1027
1028 if (WARN_ONCE(nid == MAX_NUMNODES,
1029 "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1030 nid = NUMA_NO_NODE;
1031
1032 for (; idx_a < type_a->cnt; idx_a++) {
1033 struct memblock_region *m = &type_a->regions[idx_a];
1034
1035 phys_addr_t m_start = m->base;
1036 phys_addr_t m_end = m->base + m->size;
1037 int m_nid = memblock_get_region_node(m);
1038
1039 if (should_skip_region(m, nid, flags))
1040 continue;
1041
1042 if (!type_b) {
1043 if (out_start)
1044 *out_start = m_start;
1045 if (out_end)
1046 *out_end = m_end;
1047 if (out_nid)
1048 *out_nid = m_nid;
1049 idx_a++;
1050 *idx = (u32)idx_a | (u64)idx_b << 32;
1051 return;
1052 }
1053
1054 /* scan areas before each reservation */
1055 for (; idx_b < type_b->cnt + 1; idx_b++) {
1056 struct memblock_region *r;
1057 phys_addr_t r_start;
1058 phys_addr_t r_end;
1059
1060 r = &type_b->regions[idx_b];
1061 r_start = idx_b ? r[-1].base + r[-1].size : 0;
1062 r_end = idx_b < type_b->cnt ?
1063 r->base : PHYS_ADDR_MAX;
1064
1065 /*
1066 * if idx_b advanced past idx_a,
1067 * break out to advance idx_a
1068 */
1069 if (r_start >= m_end)
1070 break;
1071 /* if the two regions intersect, we're done */
1072 if (m_start < r_end) {
1073 if (out_start)
1074 *out_start =
1075 max(m_start, r_start);
1076 if (out_end)
1077 *out_end = min(m_end, r_end);
1078 if (out_nid)
1079 *out_nid = m_nid;
1080 /*
1081 * The region which ends first is
1082 * advanced for the next iteration.
1083 */
1084 if (m_end <= r_end)
1085 idx_a++;
1086 else
1087 idx_b++;
1088 *idx = (u32)idx_a | (u64)idx_b << 32;
1089 return;
1090 }
1091 }
1092 }
1093
1094 /* signal end of iteration */
1095 *idx = ULLONG_MAX;
1096 }
1097
1098 /**
1099 * __next_mem_range_rev - generic next function for for_each_*_range_rev()
1100 *
1101 * @idx: pointer to u64 loop variable
1102 * @nid: node selector, %NUMA_NO_NODE for all nodes
1103 * @flags: pick from blocks based on memory attributes
1104 * @type_a: pointer to memblock_type from where the range is taken
1105 * @type_b: pointer to memblock_type which excludes memory from being taken
1106 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
1107 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
1108 * @out_nid: ptr to int for nid of the range, can be %NULL
1109 *
1110 * Finds the next range from type_a which is not marked as unsuitable
1111 * in type_b.
1112 *
1113 * Reverse of __next_mem_range().
1114 */
1115 void __init_memblock __next_mem_range_rev(u64 *idx, int nid,
1116 enum memblock_flags flags,
1117 struct memblock_type *type_a,
1118 struct memblock_type *type_b,
1119 phys_addr_t *out_start,
1120 phys_addr_t *out_end, int *out_nid)
1121 {
1122 int idx_a = *idx & 0xffffffff;
1123 int idx_b = *idx >> 32;
1124
1125 if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1126 nid = NUMA_NO_NODE;
1127
1128 if (*idx == (u64)ULLONG_MAX) {
1129 idx_a = type_a->cnt - 1;
1130 if (type_b != NULL)
1131 idx_b = type_b->cnt;
1132 else
1133 idx_b = 0;
1134 }
1135
1136 for (; idx_a >= 0; idx_a--) {
1137 struct memblock_region *m = &type_a->regions[idx_a];
1138
1139 phys_addr_t m_start = m->base;
1140 phys_addr_t m_end = m->base + m->size;
1141 int m_nid = memblock_get_region_node(m);
1142
1143 if (should_skip_region(m, nid, flags))
1144 continue;
1145
1146 if (!type_b) {
1147 if (out_start)
1148 *out_start = m_start;
1149 if (out_end)
1150 *out_end = m_end;
1151 if (out_nid)
1152 *out_nid = m_nid;
1153 idx_a--;
1154 *idx = (u32)idx_a | (u64)idx_b << 32;
1155 return;
1156 }
1157
1158 /* scan areas before each reservation */
1159 for (; idx_b >= 0; idx_b--) {
1160 struct memblock_region *r;
1161 phys_addr_t r_start;
1162 phys_addr_t r_end;
1163
1164 r = &type_b->regions[idx_b];
1165 r_start = idx_b ? r[-1].base + r[-1].size : 0;
1166 r_end = idx_b < type_b->cnt ?
1167 r->base : PHYS_ADDR_MAX;
1168 /*
1169 * if idx_b advanced past idx_a,
1170 * break out to advance idx_a
1171 */
1172
1173 if (r_end <= m_start)
1174 break;
1175 /* if the two regions intersect, we're done */
1176 if (m_end > r_start) {
1177 if (out_start)
1178 *out_start = max(m_start, r_start);
1179 if (out_end)
1180 *out_end = min(m_end, r_end);
1181 if (out_nid)
1182 *out_nid = m_nid;
1183 if (m_start >= r_start)
1184 idx_a--;
1185 else
1186 idx_b--;
1187 *idx = (u32)idx_a | (u64)idx_b << 32;
1188 return;
1189 }
1190 }
1191 }
1192 /* signal end of iteration */
1193 *idx = ULLONG_MAX;
1194 }
1195
1196 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1197 /*
1198 * Common iterator interface used to define for_each_mem_pfn_range().
1199 */
1200 void __init_memblock __next_mem_pfn_range(int *idx, int nid,
1201 unsigned long *out_start_pfn,
1202 unsigned long *out_end_pfn, int *out_nid)
1203 {
1204 struct memblock_type *type = &memblock.memory;
1205 struct memblock_region *r;
1206
1207 while (++*idx < type->cnt) {
1208 r = &type->regions[*idx];
1209
1210 if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
1211 continue;
1212 if (nid == MAX_NUMNODES || nid == r->nid)
1213 break;
1214 }
1215 if (*idx >= type->cnt) {
1216 *idx = -1;
1217 return;
1218 }
1219
1220 if (out_start_pfn)
1221 *out_start_pfn = PFN_UP(r->base);
1222 if (out_end_pfn)
1223 *out_end_pfn = PFN_DOWN(r->base + r->size);
1224 if (out_nid)
1225 *out_nid = r->nid;
1226 }
1227
1228 /**
1229 * memblock_set_node - set node ID on memblock regions
1230 * @base: base of area to set node ID for
1231 * @size: size of area to set node ID for
1232 * @type: memblock type to set node ID for
1233 * @nid: node ID to set
1234 *
1235 * Set the nid of memblock @type regions in [@base, @base + @size) to @nid.
1236 * Regions which cross the area boundaries are split as necessary.
1237 *
1238 * Return:
1239 * 0 on success, -errno on failure.
1240 */
1241 int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
1242 struct memblock_type *type, int nid)
1243 {
1244 int start_rgn, end_rgn;
1245 int i, ret;
1246
1247 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
1248 if (ret)
1249 return ret;
1250
1251 for (i = start_rgn; i < end_rgn; i++)
1252 memblock_set_region_node(&type->regions[i], nid);
1253
1254 memblock_merge_regions(type);
1255 return 0;
1256 }
1257 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
1258
1259 /**
1260 * memblock_alloc_range_nid - allocate boot memory block
1261 * @size: size of memory block to be allocated in bytes
1262 * @align: alignment of the region and block's size
1263 * @start: the lower bound of the memory region to allocate (phys address)
1264 * @end: the upper bound of the memory region to allocate (phys address)
1265 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1266 *
1267 * The allocation is performed from memory region limited by
1268 * memblock.current_limit if @max_addr == %MEMBLOCK_ALLOC_ACCESSIBLE.
1269 *
1270 * If the specified node can not hold the requested memory the
1271 * allocation falls back to any node in the system
1272 *
1273 * For systems with memory mirroring, the allocation is attempted first
1274 * from the regions with mirroring enabled and then retried from any
1275 * memory region.
1276 *
1277 * In addition, function sets the min_count to 0 using kmemleak_alloc_phys for
1278 * allocated boot memory block, so that it is never reported as leaks.
1279 *
1280 * Return:
1281 * Physical address of allocated memory block on success, %0 on failure.
1282 */
1283 static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
1284 phys_addr_t align, phys_addr_t start,
1285 phys_addr_t end, int nid)
1286 {
1287 enum memblock_flags flags = choose_memblock_flags();
1288 phys_addr_t found;
1289
1290 if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1291 nid = NUMA_NO_NODE;
1292
1293 if (!align) {
1294 /* Can't use WARNs this early in boot on powerpc */
1295 dump_stack();
1296 align = SMP_CACHE_BYTES;
1297 }
1298
1299 if (end > memblock.current_limit)
1300 end = memblock.current_limit;
1301
1302 again:
1303 found = memblock_find_in_range_node(size, align, start, end, nid,
1304 flags);
1305 if (found && !memblock_reserve(found, size))
1306 goto done;
1307
1308 if (nid != NUMA_NO_NODE) {
1309 found = memblock_find_in_range_node(size, align, start,
1310 end, NUMA_NO_NODE,
1311 flags);
1312 if (found && !memblock_reserve(found, size))
1313 goto done;
1314 }
1315
1316 if (flags & MEMBLOCK_MIRROR) {
1317 flags &= ~MEMBLOCK_MIRROR;
1318 pr_warn("Could not allocate %pap bytes of mirrored memory\n",
1319 &size);
1320 goto again;
1321 }
1322
1323 return 0;
1324
1325 done:
1326 /* Skip kmemleak for kasan_init() due to high volume. */
1327 if (end != MEMBLOCK_ALLOC_KASAN)
1328 /*
1329 * The min_count is set to 0 so that memblock allocated
1330 * blocks are never reported as leaks. This is because many
1331 * of these blocks are only referred via the physical
1332 * address which is not looked up by kmemleak.
1333 */
1334 kmemleak_alloc_phys(found, size, 0, 0);
1335
1336 return found;
1337 }
1338
1339 /**
1340 * memblock_phys_alloc_range - allocate a memory block inside specified range
1341 * @size: size of memory block to be allocated in bytes
1342 * @align: alignment of the region and block's size
1343 * @start: the lower bound of the memory region to allocate (physical address)
1344 * @end: the upper bound of the memory region to allocate (physical address)
1345 *
1346 * Allocate @size bytes in the between @start and @end.
1347 *
1348 * Return: physical address of the allocated memory block on success,
1349 * %0 on failure.
1350 */
1351 phys_addr_t __init memblock_phys_alloc_range(phys_addr_t size,
1352 phys_addr_t align,
1353 phys_addr_t start,
1354 phys_addr_t end)
1355 {
1356 return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE);
1357 }
1358
1359 /**
1360 * memblock_phys_alloc_try_nid - allocate a memory block from specified MUMA node
1361 * @size: size of memory block to be allocated in bytes
1362 * @align: alignment of the region and block's size
1363 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1364 *
1365 * Allocates memory block from the specified NUMA node. If the node
1366 * has no available memory, attempts to allocated from any node in the
1367 * system.
1368 *
1369 * Return: physical address of the allocated memory block on success,
1370 * %0 on failure.
1371 */
1372 phys_addr_t __init memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
1373 {
1374 return memblock_alloc_range_nid(size, align, 0,
1375 MEMBLOCK_ALLOC_ACCESSIBLE, nid);
1376 }
1377
1378 /**
1379 * memblock_alloc_internal - allocate boot memory block
1380 * @size: size of memory block to be allocated in bytes
1381 * @align: alignment of the region and block's size
1382 * @min_addr: the lower bound of the memory region to allocate (phys address)
1383 * @max_addr: the upper bound of the memory region to allocate (phys address)
1384 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1385 *
1386 * Allocates memory block using memblock_alloc_range_nid() and
1387 * converts the returned physical address to virtual.
1388 *
1389 * The @min_addr limit is dropped if it can not be satisfied and the allocation
1390 * will fall back to memory below @min_addr. Other constraints, such
1391 * as node and mirrored memory will be handled again in
1392 * memblock_alloc_range_nid().
1393 *
1394 * Return:
1395 * Virtual address of allocated memory block on success, NULL on failure.
1396 */
1397 static void * __init memblock_alloc_internal(
1398 phys_addr_t size, phys_addr_t align,
1399 phys_addr_t min_addr, phys_addr_t max_addr,
1400 int nid)
1401 {
1402 phys_addr_t alloc;
1403
1404 /*
1405 * Detect any accidental use of these APIs after slab is ready, as at
1406 * this moment memblock may be deinitialized already and its
1407 * internal data may be destroyed (after execution of memblock_free_all)
1408 */
1409 if (WARN_ON_ONCE(slab_is_available()))
1410 return kzalloc_node(size, GFP_NOWAIT, nid);
1411
1412 alloc = memblock_alloc_range_nid(size, align, min_addr, max_addr, nid);
1413
1414 /* retry allocation without lower limit */
1415 if (!alloc && min_addr)
1416 alloc = memblock_alloc_range_nid(size, align, 0, max_addr, nid);
1417
1418 if (!alloc)
1419 return NULL;
1420
1421 return phys_to_virt(alloc);
1422 }
1423
1424 /**
1425 * memblock_alloc_try_nid_raw - allocate boot memory block without zeroing
1426 * memory and without panicking
1427 * @size: size of memory block to be allocated in bytes
1428 * @align: alignment of the region and block's size
1429 * @min_addr: the lower bound of the memory region from where the allocation
1430 * is preferred (phys address)
1431 * @max_addr: the upper bound of the memory region from where the allocation
1432 * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
1433 * allocate only from memory limited by memblock.current_limit value
1434 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1435 *
1436 * Public function, provides additional debug information (including caller
1437 * info), if enabled. Does not zero allocated memory, does not panic if request
1438 * cannot be satisfied.
1439 *
1440 * Return:
1441 * Virtual address of allocated memory block on success, NULL on failure.
1442 */
1443 void * __init memblock_alloc_try_nid_raw(
1444 phys_addr_t size, phys_addr_t align,
1445 phys_addr_t min_addr, phys_addr_t max_addr,
1446 int nid)
1447 {
1448 void *ptr;
1449
1450 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pF\n",
1451 __func__, (u64)size, (u64)align, nid, &min_addr,
1452 &max_addr, (void *)_RET_IP_);
1453
1454 ptr = memblock_alloc_internal(size, align,
1455 min_addr, max_addr, nid);
1456 if (ptr && size > 0)
1457 page_init_poison(ptr, size);
1458
1459 return ptr;
1460 }
1461
1462 /**
1463 * memblock_alloc_try_nid - allocate boot memory block
1464 * @size: size of memory block to be allocated in bytes
1465 * @align: alignment of the region and block's size
1466 * @min_addr: the lower bound of the memory region from where the allocation
1467 * is preferred (phys address)
1468 * @max_addr: the upper bound of the memory region from where the allocation
1469 * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
1470 * allocate only from memory limited by memblock.current_limit value
1471 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1472 *
1473 * Public function, provides additional debug information (including caller
1474 * info), if enabled. This function zeroes the allocated memory.
1475 *
1476 * Return:
1477 * Virtual address of allocated memory block on success, NULL on failure.
1478 */
1479 void * __init memblock_alloc_try_nid(
1480 phys_addr_t size, phys_addr_t align,
1481 phys_addr_t min_addr, phys_addr_t max_addr,
1482 int nid)
1483 {
1484 void *ptr;
1485
1486 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pF\n",
1487 __func__, (u64)size, (u64)align, nid, &min_addr,
1488 &max_addr, (void *)_RET_IP_);
1489 ptr = memblock_alloc_internal(size, align,
1490 min_addr, max_addr, nid);
1491 if (ptr)
1492 memset(ptr, 0, size);
1493
1494 return ptr;
1495 }
1496
1497 /**
1498 * __memblock_free_late - free pages directly to buddy allocator
1499 * @base: phys starting address of the boot memory block
1500 * @size: size of the boot memory block in bytes
1501 *
1502 * This is only useful when the memblock allocator has already been torn
1503 * down, but we are still initializing the system. Pages are released directly
1504 * to the buddy allocator.
1505 */
1506 void __init __memblock_free_late(phys_addr_t base, phys_addr_t size)
1507 {
1508 phys_addr_t cursor, end;
1509
1510 end = base + size - 1;
1511 memblock_dbg("%s: [%pa-%pa] %pF\n",
1512 __func__, &base, &end, (void *)_RET_IP_);
1513 kmemleak_free_part_phys(base, size);
1514 cursor = PFN_UP(base);
1515 end = PFN_DOWN(base + size);
1516
1517 for (; cursor < end; cursor++) {
1518 memblock_free_pages(pfn_to_page(cursor), cursor, 0);
1519 totalram_pages_inc();
1520 }
1521 }
1522
1523 /*
1524 * Remaining API functions
1525 */
1526
1527 phys_addr_t __init_memblock memblock_phys_mem_size(void)
1528 {
1529 return memblock.memory.total_size;
1530 }
1531
1532 phys_addr_t __init_memblock memblock_reserved_size(void)
1533 {
1534 return memblock.reserved.total_size;
1535 }
1536
1537 phys_addr_t __init memblock_mem_size(unsigned long limit_pfn)
1538 {
1539 unsigned long pages = 0;
1540 struct memblock_region *r;
1541 unsigned long start_pfn, end_pfn;
1542
1543 for_each_memblock(memory, r) {
1544 start_pfn = memblock_region_memory_base_pfn(r);
1545 end_pfn = memblock_region_memory_end_pfn(r);
1546 start_pfn = min_t(unsigned long, start_pfn, limit_pfn);
1547 end_pfn = min_t(unsigned long, end_pfn, limit_pfn);
1548 pages += end_pfn - start_pfn;
1549 }
1550
1551 return PFN_PHYS(pages);
1552 }
1553
1554 /* lowest address */
1555 phys_addr_t __init_memblock memblock_start_of_DRAM(void)
1556 {
1557 return memblock.memory.regions[0].base;
1558 }
1559
1560 phys_addr_t __init_memblock memblock_end_of_DRAM(void)
1561 {
1562 int idx = memblock.memory.cnt - 1;
1563
1564 return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
1565 }
1566
1567 static phys_addr_t __init_memblock __find_max_addr(phys_addr_t limit)
1568 {
1569 phys_addr_t max_addr = PHYS_ADDR_MAX;
1570 struct memblock_region *r;
1571
1572 /*
1573 * translate the memory @limit size into the max address within one of
1574 * the memory memblock regions, if the @limit exceeds the total size
1575 * of those regions, max_addr will keep original value PHYS_ADDR_MAX
1576 */
1577 for_each_memblock(memory, r) {
1578 if (limit <= r->size) {
1579 max_addr = r->base + limit;
1580 break;
1581 }
1582 limit -= r->size;
1583 }
1584
1585 return max_addr;
1586 }
1587
1588 void __init memblock_enforce_memory_limit(phys_addr_t limit)
1589 {
1590 phys_addr_t max_addr = PHYS_ADDR_MAX;
1591
1592 if (!limit)
1593 return;
1594
1595 max_addr = __find_max_addr(limit);
1596
1597 /* @limit exceeds the total size of the memory, do nothing */
1598 if (max_addr == PHYS_ADDR_MAX)
1599 return;
1600
1601 /* truncate both memory and reserved regions */
1602 memblock_remove_range(&memblock.memory, max_addr,
1603 PHYS_ADDR_MAX);
1604 memblock_remove_range(&memblock.reserved, max_addr,
1605 PHYS_ADDR_MAX);
1606 }
1607
1608 void __init memblock_cap_memory_range(phys_addr_t base, phys_addr_t size)
1609 {
1610 int start_rgn, end_rgn;
1611 int i, ret;
1612
1613 if (!size)
1614 return;
1615
1616 ret = memblock_isolate_range(&memblock.memory, base, size,
1617 &start_rgn, &end_rgn);
1618 if (ret)
1619 return;
1620
1621 /* remove all the MAP regions */
1622 for (i = memblock.memory.cnt - 1; i >= end_rgn; i--)
1623 if (!memblock_is_nomap(&memblock.memory.regions[i]))
1624 memblock_remove_region(&memblock.memory, i);
1625
1626 for (i = start_rgn - 1; i >= 0; i--)
1627 if (!memblock_is_nomap(&memblock.memory.regions[i]))
1628 memblock_remove_region(&memblock.memory, i);
1629
1630 /* truncate the reserved regions */
1631 memblock_remove_range(&memblock.reserved, 0, base);
1632 memblock_remove_range(&memblock.reserved,
1633 base + size, PHYS_ADDR_MAX);
1634 }
1635
1636 void __init memblock_mem_limit_remove_map(phys_addr_t limit)
1637 {
1638 phys_addr_t max_addr;
1639
1640 if (!limit)
1641 return;
1642
1643 max_addr = __find_max_addr(limit);
1644
1645 /* @limit exceeds the total size of the memory, do nothing */
1646 if (max_addr == PHYS_ADDR_MAX)
1647 return;
1648
1649 memblock_cap_memory_range(0, max_addr);
1650 }
1651
1652 static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
1653 {
1654 unsigned int left = 0, right = type->cnt;
1655
1656 do {
1657 unsigned int mid = (right + left) / 2;
1658
1659 if (addr < type->regions[mid].base)
1660 right = mid;
1661 else if (addr >= (type->regions[mid].base +
1662 type->regions[mid].size))
1663 left = mid + 1;
1664 else
1665 return mid;
1666 } while (left < right);
1667 return -1;
1668 }
1669
1670 bool __init_memblock memblock_is_reserved(phys_addr_t addr)
1671 {
1672 return memblock_search(&memblock.reserved, addr) != -1;
1673 }
1674
1675 bool __init_memblock memblock_is_memory(phys_addr_t addr)
1676 {
1677 return memblock_search(&memblock.memory, addr) != -1;
1678 }
1679
1680 bool __init_memblock memblock_is_map_memory(phys_addr_t addr)
1681 {
1682 int i = memblock_search(&memblock.memory, addr);
1683
1684 if (i == -1)
1685 return false;
1686 return !memblock_is_nomap(&memblock.memory.regions[i]);
1687 }
1688
1689 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1690 int __init_memblock memblock_search_pfn_nid(unsigned long pfn,
1691 unsigned long *start_pfn, unsigned long *end_pfn)
1692 {
1693 struct memblock_type *type = &memblock.memory;
1694 int mid = memblock_search(type, PFN_PHYS(pfn));
1695
1696 if (mid == -1)
1697 return -1;
1698
1699 *start_pfn = PFN_DOWN(type->regions[mid].base);
1700 *end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size);
1701
1702 return type->regions[mid].nid;
1703 }
1704 #endif
1705
1706 /**
1707 * memblock_is_region_memory - check if a region is a subset of memory
1708 * @base: base of region to check
1709 * @size: size of region to check
1710 *
1711 * Check if the region [@base, @base + @size) is a subset of a memory block.
1712 *
1713 * Return:
1714 * 0 if false, non-zero if true
1715 */
1716 bool __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
1717 {
1718 int idx = memblock_search(&memblock.memory, base);
1719 phys_addr_t end = base + memblock_cap_size(base, &size);
1720
1721 if (idx == -1)
1722 return false;
1723 return (memblock.memory.regions[idx].base +
1724 memblock.memory.regions[idx].size) >= end;
1725 }
1726
1727 /**
1728 * memblock_is_region_reserved - check if a region intersects reserved memory
1729 * @base: base of region to check
1730 * @size: size of region to check
1731 *
1732 * Check if the region [@base, @base + @size) intersects a reserved
1733 * memory block.
1734 *
1735 * Return:
1736 * True if they intersect, false if not.
1737 */
1738 bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
1739 {
1740 memblock_cap_size(base, &size);
1741 return memblock_overlaps_region(&memblock.reserved, base, size);
1742 }
1743
1744 void __init_memblock memblock_trim_memory(phys_addr_t align)
1745 {
1746 phys_addr_t start, end, orig_start, orig_end;
1747 struct memblock_region *r;
1748
1749 for_each_memblock(memory, r) {
1750 orig_start = r->base;
1751 orig_end = r->base + r->size;
1752 start = round_up(orig_start, align);
1753 end = round_down(orig_end, align);
1754
1755 if (start == orig_start && end == orig_end)
1756 continue;
1757
1758 if (start < end) {
1759 r->base = start;
1760 r->size = end - start;
1761 } else {
1762 memblock_remove_region(&memblock.memory,
1763 r - memblock.memory.regions);
1764 r--;
1765 }
1766 }
1767 }
1768
1769 void __init_memblock memblock_set_current_limit(phys_addr_t limit)
1770 {
1771 memblock.current_limit = limit;
1772 }
1773
1774 phys_addr_t __init_memblock memblock_get_current_limit(void)
1775 {
1776 return memblock.current_limit;
1777 }
1778
1779 static void __init_memblock memblock_dump(struct memblock_type *type)
1780 {
1781 phys_addr_t base, end, size;
1782 enum memblock_flags flags;
1783 int idx;
1784 struct memblock_region *rgn;
1785
1786 pr_info(" %s.cnt = 0x%lx\n", type->name, type->cnt);
1787
1788 for_each_memblock_type(idx, type, rgn) {
1789 char nid_buf[32] = "";
1790
1791 base = rgn->base;
1792 size = rgn->size;
1793 end = base + size - 1;
1794 flags = rgn->flags;
1795 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1796 if (memblock_get_region_node(rgn) != MAX_NUMNODES)
1797 snprintf(nid_buf, sizeof(nid_buf), " on node %d",
1798 memblock_get_region_node(rgn));
1799 #endif
1800 pr_info(" %s[%#x]\t[%pa-%pa], %pa bytes%s flags: %#x\n",
1801 type->name, idx, &base, &end, &size, nid_buf, flags);
1802 }
1803 }
1804
1805 void __init_memblock __memblock_dump_all(void)
1806 {
1807 pr_info("MEMBLOCK configuration:\n");
1808 pr_info(" memory size = %pa reserved size = %pa\n",
1809 &memblock.memory.total_size,
1810 &memblock.reserved.total_size);
1811
1812 memblock_dump(&memblock.memory);
1813 memblock_dump(&memblock.reserved);
1814 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
1815 memblock_dump(&memblock.physmem);
1816 #endif
1817 }
1818
1819 void __init memblock_allow_resize(void)
1820 {
1821 memblock_can_resize = 1;
1822 }
1823
1824 static int __init early_memblock(char *p)
1825 {
1826 if (p && strstr(p, "debug"))
1827 memblock_debug = 1;
1828 return 0;
1829 }
1830 early_param("memblock", early_memblock);
1831
1832 static void __init __free_pages_memory(unsigned long start, unsigned long end)
1833 {
1834 int order;
1835
1836 while (start < end) {
1837 order = min(MAX_ORDER - 1UL, __ffs(start));
1838
1839 while (start + (1UL << order) > end)
1840 order--;
1841
1842 memblock_free_pages(pfn_to_page(start), start, order);
1843
1844 start += (1UL << order);
1845 }
1846 }
1847
1848 static unsigned long __init __free_memory_core(phys_addr_t start,
1849 phys_addr_t end)
1850 {
1851 unsigned long start_pfn = PFN_UP(start);
1852 unsigned long end_pfn = min_t(unsigned long,
1853 PFN_DOWN(end), max_low_pfn);
1854
1855 if (start_pfn >= end_pfn)
1856 return 0;
1857
1858 __free_pages_memory(start_pfn, end_pfn);
1859
1860 return end_pfn - start_pfn;
1861 }
1862
1863 static unsigned long __init free_low_memory_core_early(void)
1864 {
1865 unsigned long count = 0;
1866 phys_addr_t start, end;
1867 u64 i;
1868
1869 memblock_clear_hotplug(0, -1);
1870
1871 for_each_reserved_mem_region(i, &start, &end)
1872 reserve_bootmem_region(start, end);
1873
1874 /*
1875 * We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id
1876 * because in some case like Node0 doesn't have RAM installed
1877 * low ram will be on Node1
1878 */
1879 for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end,
1880 NULL)
1881 count += __free_memory_core(start, end);
1882
1883 return count;
1884 }
1885
1886 static int reset_managed_pages_done __initdata;
1887
1888 void reset_node_managed_pages(pg_data_t *pgdat)
1889 {
1890 struct zone *z;
1891
1892 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
1893 atomic_long_set(&z->managed_pages, 0);
1894 }
1895
1896 void __init reset_all_zones_managed_pages(void)
1897 {
1898 struct pglist_data *pgdat;
1899
1900 if (reset_managed_pages_done)
1901 return;
1902
1903 for_each_online_pgdat(pgdat)
1904 reset_node_managed_pages(pgdat);
1905
1906 reset_managed_pages_done = 1;
1907 }
1908
1909 /**
1910 * memblock_free_all - release free pages to the buddy allocator
1911 *
1912 * Return: the number of pages actually released.
1913 */
1914 unsigned long __init memblock_free_all(void)
1915 {
1916 unsigned long pages;
1917
1918 reset_all_zones_managed_pages();
1919
1920 pages = free_low_memory_core_early();
1921 totalram_pages_add(pages);
1922
1923 return pages;
1924 }
1925
1926 #if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_ARCH_DISCARD_MEMBLOCK)
1927
1928 static int memblock_debug_show(struct seq_file *m, void *private)
1929 {
1930 struct memblock_type *type = m->private;
1931 struct memblock_region *reg;
1932 int i;
1933 phys_addr_t end;
1934
1935 for (i = 0; i < type->cnt; i++) {
1936 reg = &type->regions[i];
1937 end = reg->base + reg->size - 1;
1938
1939 seq_printf(m, "%4d: ", i);
1940 seq_printf(m, "%pa..%pa\n", &reg->base, &end);
1941 }
1942 return 0;
1943 }
1944 DEFINE_SHOW_ATTRIBUTE(memblock_debug);
1945
1946 static int __init memblock_init_debugfs(void)
1947 {
1948 struct dentry *root = debugfs_create_dir("memblock", NULL);
1949
1950 debugfs_create_file("memory", 0444, root,
1951 &memblock.memory, &memblock_debug_fops);
1952 debugfs_create_file("reserved", 0444, root,
1953 &memblock.reserved, &memblock_debug_fops);
1954 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
1955 debugfs_create_file("physmem", 0444, root,
1956 &memblock.physmem, &memblock_debug_fops);
1957 #endif
1958
1959 return 0;
1960 }
1961 __initcall(memblock_init_debugfs);
1962
1963 #endif /* CONFIG_DEBUG_FS */