]>
git.ipfire.org Git - thirdparty/u-boot.git/blob - lib/lmb.c
bb6f232f6bc50791ac1249b2938d08d301891190
1 // SPDX-License-Identifier: GPL-2.0+
3 * Procedures for maintaining information about logical memory blocks.
5 * Peter Bergner, IBM Corp. June 2001.
6 * Copyright (C) 2001 Peter Bergner.
10 #include <efi_loader.h>
19 #include <asm/global_data.h>
20 #include <asm/sections.h>
21 #include <linux/kernel.h>
22 #include <linux/sizes.h>
24 DECLARE_GLOBAL_DATA_PTR
;
26 #define LMB_RGN_OVERLAP 1
27 #define LMB_RGN_ADJACENT 2
30 * The following low level LMB functions must not access the global LMB memory
31 * map since they are also used to manage IOVA memory maps in iommu drivers like
35 static long lmb_addrs_overlap(phys_addr_t base1
, phys_size_t size1
,
36 phys_addr_t base2
, phys_size_t size2
)
38 const phys_addr_t base1_end
= base1
+ size1
- 1;
39 const phys_addr_t base2_end
= base2
+ size2
- 1;
41 return ((base1
<= base2_end
) && (base2
<= base1_end
));
44 static long lmb_addrs_adjacent(phys_addr_t base1
, phys_size_t size1
,
45 phys_addr_t base2
, phys_size_t size2
)
47 if (base2
== base1
+ size1
)
49 else if (base1
== base2
+ size2
)
56 * lmb_regions_check() - Check if the regions overlap, or are adjacent
57 * @lmb_rgn_lst: List of LMB regions
58 * @r1: First region to check
59 * @r2: Second region to check
61 * Check if the two regions with matching flags, r1 and r2 are
62 * adjacent to each other, or if they overlap.
65 * * %LMB_RGN_OVERLAP - Regions overlap
66 * * %LMB_RGN_ADJACENT - Regions adjacent to each other
67 * * 0 - Neither of the above, or flags mismatch
69 static long lmb_regions_check(struct alist
*lmb_rgn_lst
, unsigned long r1
,
72 struct lmb_region
*rgn
= lmb_rgn_lst
->data
;
73 phys_addr_t base1
= rgn
[r1
].base
;
74 phys_size_t size1
= rgn
[r1
].size
;
75 phys_addr_t base2
= rgn
[r2
].base
;
76 phys_size_t size2
= rgn
[r2
].size
;
78 if (rgn
[r1
].flags
!= rgn
[r2
].flags
)
81 if (lmb_addrs_overlap(base1
, size1
, base2
, size2
))
82 return LMB_RGN_OVERLAP
;
83 else if (lmb_addrs_adjacent(base1
, size1
, base2
, size2
))
84 return LMB_RGN_ADJACENT
;
89 static void lmb_remove_region(struct alist
*lmb_rgn_lst
, unsigned long r
)
92 struct lmb_region
*rgn
= lmb_rgn_lst
->data
;
94 for (i
= r
; i
< lmb_rgn_lst
->count
- 1; i
++) {
95 rgn
[i
].base
= rgn
[i
+ 1].base
;
96 rgn
[i
].size
= rgn
[i
+ 1].size
;
97 rgn
[i
].flags
= rgn
[i
+ 1].flags
;
102 /* Assumption: base addr of region 1 < base addr of region 2 */
103 static void lmb_coalesce_regions(struct alist
*lmb_rgn_lst
, unsigned long r1
,
106 struct lmb_region
*rgn
= lmb_rgn_lst
->data
;
108 rgn
[r1
].size
+= rgn
[r2
].size
;
109 lmb_remove_region(lmb_rgn_lst
, r2
);
112 static long lmb_resize_regions(struct alist
*lmb_rgn_lst
,
113 unsigned long idx_start
,
114 phys_addr_t base
, phys_size_t size
)
117 unsigned long rgn_cnt
, idx
, idx_end
;
118 phys_addr_t rgnbase
, rgnend
;
119 phys_addr_t mergebase
, mergeend
;
120 struct lmb_region
*rgn
= lmb_rgn_lst
->data
;
127 * First thing to do is to identify how many regions
128 * the requested region overlaps.
129 * If the flags match, combine all these overlapping
130 * regions into a single region, and remove the merged
133 while (idx
<= lmb_rgn_lst
->count
- 1) {
134 rgnbase
= rgn
[idx
].base
;
135 rgnsize
= rgn
[idx
].size
;
137 if (lmb_addrs_overlap(base
, size
, rgnbase
,
139 if (rgn
[idx
].flags
!= LMB_NONE
)
147 /* The merged region's base and size */
148 rgnbase
= rgn
[idx_start
].base
;
149 mergebase
= min(base
, rgnbase
);
150 rgnend
= rgn
[idx_end
].base
+ rgn
[idx_end
].size
;
151 mergeend
= max(rgnend
, (base
+ size
));
153 rgn
[idx_start
].base
= mergebase
;
154 rgn
[idx_start
].size
= mergeend
- mergebase
;
156 /* Now remove the merged regions */
158 lmb_remove_region(lmb_rgn_lst
, idx_start
+ 1);
164 * lmb_add_region_flags() - Add an lmb region to the given list
165 * @lmb_rgn_lst: LMB list to which region is to be added(free/used)
166 * @base: Start address of the region
167 * @size: Size of the region to be added
168 * @flags: Attributes of the LMB region
170 * Add a region of memory to the list. If the region does not exist, add
171 * it to the list. Depending on the attributes of the region to be added,
172 * the function might resize an already existing region or coalesce two
176 * * %0 - Added successfully, or it's already added (only if LMB_NONE)
177 * * %-EEXIST - The region is already added, and flags != LMB_NONE
180 static long lmb_add_region_flags(struct alist
*lmb_rgn_lst
, phys_addr_t base
,
181 phys_size_t size
, u32 flags
)
183 unsigned long coalesced
= 0;
185 struct lmb_region
*rgn
= lmb_rgn_lst
->data
;
187 if (alist_err(lmb_rgn_lst
))
190 /* First try and coalesce this LMB with another. */
191 for (i
= 0; i
< lmb_rgn_lst
->count
; i
++) {
192 phys_addr_t rgnbase
= rgn
[i
].base
;
193 phys_size_t rgnsize
= rgn
[i
].size
;
194 u32 rgnflags
= rgn
[i
].flags
;
196 ret
= lmb_addrs_adjacent(base
, size
, rgnbase
, rgnsize
);
198 if (flags
!= rgnflags
)
204 } else if (ret
< 0) {
205 if (flags
!= rgnflags
)
210 } else if (lmb_addrs_overlap(base
, size
, rgnbase
, rgnsize
)) {
211 ret
= lmb_resize_regions(lmb_rgn_lst
, i
, base
, size
);
222 if (lmb_rgn_lst
->count
&& i
< lmb_rgn_lst
->count
- 1) {
223 ret
= lmb_regions_check(lmb_rgn_lst
, i
, i
+ 1);
224 if (ret
== LMB_RGN_ADJACENT
) {
225 lmb_coalesce_regions(lmb_rgn_lst
, i
, i
+ 1);
227 } else if (ret
== LMB_RGN_OVERLAP
) {
228 /* fix overlapping areas */
229 phys_addr_t rgnbase
= rgn
[i
].base
;
230 phys_size_t rgnsize
= rgn
[i
].size
;
232 ret
= lmb_resize_regions(lmb_rgn_lst
, i
,
244 if (alist_full(lmb_rgn_lst
) &&
245 !alist_expand_by(lmb_rgn_lst
, lmb_rgn_lst
->alloc
))
247 rgn
= lmb_rgn_lst
->data
;
249 /* Couldn't coalesce the LMB, so add it to the sorted table. */
250 for (i
= lmb_rgn_lst
->count
; i
>= 0; i
--) {
251 if (i
&& base
< rgn
[i
- 1].base
) {
256 rgn
[i
].flags
= flags
;
261 lmb_rgn_lst
->count
++;
266 static long _lmb_free(struct alist
*lmb_rgn_lst
, phys_addr_t base
,
269 struct lmb_region
*rgn
;
270 phys_addr_t rgnbegin
, rgnend
;
271 phys_addr_t end
= base
+ size
- 1;
274 /* Suppress GCC warnings */
278 rgn
= lmb_rgn_lst
->data
;
279 /* Find the region where (base, size) belongs to */
280 for (i
= 0; i
< lmb_rgn_lst
->count
; i
++) {
281 rgnbegin
= rgn
[i
].base
;
282 rgnend
= rgnbegin
+ rgn
[i
].size
- 1;
284 if (rgnbegin
<= base
&& end
<= rgnend
)
288 /* Didn't find the region */
289 if (i
== lmb_rgn_lst
->count
)
292 /* Check to see if we are removing entire region */
293 if (rgnbegin
== base
&& rgnend
== end
) {
294 lmb_remove_region(lmb_rgn_lst
, i
);
298 /* Check to see if region is matching at the front */
299 if (rgnbegin
== base
) {
300 rgn
[i
].base
= end
+ 1;
305 /* Check to see if the region is matching at the end */
312 * We need to split the entry - adjust the current one to the
313 * beginging of the hole and add the region after hole.
315 rgn
[i
].size
= base
- rgn
[i
].base
;
316 return lmb_add_region_flags(lmb_rgn_lst
, end
+ 1, rgnend
- end
,
320 static long lmb_overlaps_region(struct alist
*lmb_rgn_lst
, phys_addr_t base
,
324 struct lmb_region
*rgn
= lmb_rgn_lst
->data
;
326 for (i
= 0; i
< lmb_rgn_lst
->count
; i
++) {
327 phys_addr_t rgnbase
= rgn
[i
].base
;
328 phys_size_t rgnsize
= rgn
[i
].size
;
330 if (lmb_addrs_overlap(base
, size
, rgnbase
, rgnsize
))
334 return (i
< lmb_rgn_lst
->count
) ? i
: -1;
338 * IOVA LMB memory maps using lmb pointers instead of the global LMB memory map.
341 int io_lmb_setup(struct lmb
*io_lmb
)
345 ret
= alist_init(&io_lmb
->available_mem
, sizeof(struct lmb_region
),
346 (uint
)LMB_ALIST_INITIAL_SIZE
);
348 log_debug("Unable to initialise the list for LMB free IOVA\n");
352 ret
= alist_init(&io_lmb
->used_mem
, sizeof(struct lmb_region
),
353 (uint
)LMB_ALIST_INITIAL_SIZE
);
355 log_debug("Unable to initialise the list for LMB used IOVA\n");
359 io_lmb
->test
= false;
364 void io_lmb_teardown(struct lmb
*io_lmb
)
366 alist_uninit(&io_lmb
->available_mem
);
367 alist_uninit(&io_lmb
->used_mem
);
370 long io_lmb_add(struct lmb
*io_lmb
, phys_addr_t base
, phys_size_t size
)
372 return lmb_add_region_flags(&io_lmb
->available_mem
, base
, size
, LMB_NONE
);
375 /* derived and simplified from _lmb_alloc_base() */
376 phys_addr_t
io_lmb_alloc(struct lmb
*io_lmb
, phys_size_t size
, ulong align
)
379 phys_addr_t base
= 0;
380 phys_addr_t res_base
;
381 struct lmb_region
*lmb_used
= io_lmb
->used_mem
.data
;
382 struct lmb_region
*lmb_memory
= io_lmb
->available_mem
.data
;
384 for (i
= io_lmb
->available_mem
.count
- 1; i
>= 0; i
--) {
385 phys_addr_t lmbbase
= lmb_memory
[i
].base
;
386 phys_size_t lmbsize
= lmb_memory
[i
].size
;
390 base
= ALIGN_DOWN(lmbbase
+ lmbsize
- size
, align
);
392 while (base
&& lmbbase
<= base
) {
393 rgn
= lmb_overlaps_region(&io_lmb
->used_mem
, base
, size
);
395 /* This area isn't reserved, take it */
396 if (lmb_add_region_flags(&io_lmb
->used_mem
, base
,
403 res_base
= lmb_used
[rgn
].base
;
406 base
= ALIGN_DOWN(res_base
- size
, align
);
412 long io_lmb_free(struct lmb
*io_lmb
, phys_addr_t base
, phys_size_t size
)
414 return _lmb_free(&io_lmb
->used_mem
, base
, size
);
418 * Low level LMB functions are used to manage IOVA memory maps for the Apple
419 * dart iommu. They must not access the global LMB memory map.
420 * So keep the global LMB variable declaration unreachable from them.
423 static struct lmb lmb
;
425 static int lmb_map_update_notify(phys_addr_t addr
, phys_size_t size
,
426 enum lmb_map_op op
, u32 flags
)
428 if (CONFIG_IS_ENABLED(EFI_LOADER
) &&
429 !lmb
.test
&& !(flags
& LMB_NONOTIFY
))
430 return efi_map_update_notify(addr
, size
, op
);
435 static void lmb_print_region_flags(u32 flags
)
437 const char * const flag_str
[] = { "none", "no-map", "no-overwrite",
439 unsigned int pflags
= flags
&
440 (LMB_NOMAP
| LMB_NOOVERWRITE
| LMB_NONOTIFY
);
442 if (flags
!= pflags
) {
443 printf("invalid %#x\n", flags
);
448 int bitpos
= pflags
? fls(pflags
) - 1 : 0;
450 printf("%s", flag_str
[bitpos
]);
451 pflags
&= ~(1u << bitpos
);
452 puts(pflags
? ", " : "\n");
456 static void lmb_dump_region(struct alist
*lmb_rgn_lst
, char *name
)
458 struct lmb_region
*rgn
= lmb_rgn_lst
->data
;
459 unsigned long long base
, size
, end
;
463 printf(" %s.count = %#x\n", name
, lmb_rgn_lst
->count
);
465 for (i
= 0; i
< lmb_rgn_lst
->count
; i
++) {
468 end
= base
+ size
- 1;
469 flags
= rgn
[i
].flags
;
471 printf(" %s[%d]\t[%#llx-%#llx], %#llx bytes, flags: ",
472 name
, i
, base
, end
, size
);
473 lmb_print_region_flags(flags
);
477 void lmb_dump_all_force(void)
479 printf("lmb_dump_all:\n");
480 lmb_dump_region(&lmb
.available_mem
, "memory");
481 lmb_dump_region(&lmb
.used_mem
, "reserved");
484 void lmb_dump_all(void)
487 lmb_dump_all_force();
491 static void lmb_reserve_uboot_region(void)
495 phys_addr_t rsv_start
;
497 rsv_start
= gd
->start_addr_sp
- CONFIG_STACK_SIZE
;
501 * Reserve memory from aligned address below the bottom of U-Boot stack
502 * until end of RAM area to prevent LMB from overwriting that memory.
504 debug("## Current stack ends at 0x%08lx ", (ulong
)rsv_start
);
506 for (bank
= 0; bank
< CONFIG_NR_DRAM_BANKS
; bank
++) {
507 if (!gd
->bd
->bi_dram
[bank
].size
||
508 rsv_start
< gd
->bd
->bi_dram
[bank
].start
)
510 /* Watch out for RAM at end of address space! */
511 bank_end
= gd
->bd
->bi_dram
[bank
].start
+
512 gd
->bd
->bi_dram
[bank
].size
- 1;
513 if (rsv_start
> bank_end
)
518 lmb_reserve(rsv_start
, bank_end
- rsv_start
+ 1, LMB_NOOVERWRITE
);
520 if (gd
->flags
& GD_FLG_SKIP_RELOC
)
521 lmb_reserve((phys_addr_t
)(uintptr_t)_start
,
522 gd
->mon_len
, LMB_NOOVERWRITE
);
528 static void lmb_reserve_common(void *fdt_blob
)
530 lmb_reserve_uboot_region();
532 if (CONFIG_IS_ENABLED(OF_LIBFDT
) && fdt_blob
)
533 boot_fdt_add_mem_rsv_regions(fdt_blob
);
536 static __maybe_unused
void lmb_reserve_common_spl(void)
538 phys_addr_t rsv_start
;
539 phys_size_t rsv_size
;
542 * Assume a SPL stack of 16KB. This must be
543 * more than enough for the SPL stage.
545 if (IS_ENABLED(CONFIG_SPL_STACK_R_ADDR
)) {
546 rsv_start
= gd
->start_addr_sp
- 16384;
548 lmb_reserve(rsv_start
, rsv_size
, LMB_NOOVERWRITE
);
551 if (IS_ENABLED(CONFIG_SPL_SEPARATE_BSS
)) {
552 /* Reserve the bss region */
553 rsv_start
= (phys_addr_t
)(uintptr_t)__bss_start
;
554 rsv_size
= (phys_addr_t
)(uintptr_t)__bss_end
-
555 (phys_addr_t
)(uintptr_t)__bss_start
;
556 lmb_reserve(rsv_start
, rsv_size
, LMB_NOOVERWRITE
);
561 * lmb_can_reserve_region() - check if the region can be reserved
562 * @base: base address of region to be reserved
563 * @size: size of region to be reserved
564 * @flags: flag of the region to be reserved
566 * Go through all the reserved regions and ensure that the requested
567 * region does not overlap with any existing regions. An overlap is
568 * allowed only when the flag of the request region and the existing
569 * region is LMB_NONE.
571 * Return: true if region can be reserved, false otherwise
573 static bool lmb_can_reserve_region(phys_addr_t base
, phys_size_t size
,
577 struct lmb_region
*lmb_reserved
= lmb
.used_mem
.data
;
579 for (i
= 0; i
< lmb
.used_mem
.count
; i
++) {
580 u32 rgnflags
= lmb_reserved
[i
].flags
;
581 phys_addr_t rgnbase
= lmb_reserved
[i
].base
;
582 phys_size_t rgnsize
= lmb_reserved
[i
].size
;
584 if (lmb_addrs_overlap(base
, size
, rgnbase
, rgnsize
)) {
585 if (flags
!= LMB_NONE
|| flags
!= rgnflags
)
593 void lmb_add_memory(void)
596 phys_addr_t bank_end
;
598 u64 ram_top
= gd
->ram_top
;
599 struct bd_info
*bd
= gd
->bd
;
601 if (CONFIG_IS_ENABLED(LMB_ARCH_MEM_MAP
))
602 return lmb_arch_add_memory();
604 /* Assume a 4GB ram_top if not defined */
606 ram_top
= 0x100000000ULL
;
608 for (i
= 0; i
< CONFIG_NR_DRAM_BANKS
; i
++) {
609 size
= bd
->bi_dram
[i
].size
;
610 bank_end
= bd
->bi_dram
[i
].start
+ size
;
613 lmb_add(bd
->bi_dram
[i
].start
, size
);
616 * Reserve memory above ram_top as
617 * no-overwrite so that it cannot be
620 if (bd
->bi_dram
[i
].start
>= ram_top
)
621 lmb_reserve(bd
->bi_dram
[i
].start
, size
,
623 else if (bank_end
> ram_top
)
624 lmb_reserve(ram_top
, bank_end
- ram_top
,
630 /* This routine may be called with relocation disabled. */
631 long lmb_add(phys_addr_t base
, phys_size_t size
)
634 struct alist
*lmb_rgn_lst
= &lmb
.available_mem
;
636 ret
= lmb_add_region_flags(lmb_rgn_lst
, base
, size
, LMB_NONE
);
640 return lmb_map_update_notify(base
, size
, LMB_MAP_OP_ADD
, LMB_NONE
);
643 long lmb_free_flags(phys_addr_t base
, phys_size_t size
,
648 ret
= _lmb_free(&lmb
.used_mem
, base
, size
);
652 return lmb_map_update_notify(base
, size
, LMB_MAP_OP_FREE
, flags
);
655 long lmb_free(phys_addr_t base
, phys_size_t size
)
657 return lmb_free_flags(base
, size
, LMB_NONE
);
660 long lmb_reserve(phys_addr_t base
, phys_size_t size
, u32 flags
)
663 struct alist
*lmb_rgn_lst
= &lmb
.used_mem
;
665 if (!lmb_can_reserve_region(base
, size
, flags
))
668 ret
= lmb_add_region_flags(lmb_rgn_lst
, base
, size
, flags
);
672 return lmb_map_update_notify(base
, size
, LMB_MAP_OP_RESERVE
, flags
);
675 static phys_addr_t
_lmb_alloc_base(phys_size_t size
, ulong align
,
676 phys_addr_t max_addr
, u32 flags
)
680 phys_addr_t base
= 0;
681 phys_addr_t res_base
;
682 struct lmb_region
*lmb_used
= lmb
.used_mem
.data
;
683 struct lmb_region
*lmb_memory
= lmb
.available_mem
.data
;
685 for (i
= lmb
.available_mem
.count
- 1; i
>= 0; i
--) {
686 phys_addr_t lmbbase
= lmb_memory
[i
].base
;
687 phys_size_t lmbsize
= lmb_memory
[i
].size
;
692 if (max_addr
== LMB_ALLOC_ANYWHERE
) {
693 base
= ALIGN_DOWN(lmbbase
+ lmbsize
- size
, align
);
694 } else if (lmbbase
< max_addr
) {
695 base
= lmbbase
+ lmbsize
;
698 base
= min(base
, max_addr
);
699 base
= ALIGN_DOWN(base
- size
, align
);
704 while (base
&& lmbbase
<= base
) {
705 rgn
= lmb_overlaps_region(&lmb
.used_mem
, base
, size
);
707 /* This area isn't reserved, take it */
708 if (lmb_add_region_flags(&lmb
.used_mem
, base
,
712 ret
= lmb_map_update_notify(base
, size
,
721 res_base
= lmb_used
[rgn
].base
;
724 base
= ALIGN_DOWN(res_base
- size
, align
);
728 log_debug("%s: Failed to allocate 0x%lx bytes below 0x%lx\n",
729 __func__
, (ulong
)size
, (ulong
)max_addr
);
734 phys_addr_t
lmb_alloc(phys_size_t size
, ulong align
)
736 return _lmb_alloc_base(size
, align
, LMB_ALLOC_ANYWHERE
, LMB_NONE
);
739 phys_addr_t
lmb_alloc_base(phys_size_t size
, ulong align
, phys_addr_t max_addr
,
742 return _lmb_alloc_base(size
, align
, max_addr
, flags
);
745 int lmb_alloc_addr(phys_addr_t base
, phys_size_t size
, u32 flags
)
748 struct lmb_region
*lmb_memory
= lmb
.available_mem
.data
;
750 /* Check if the requested address is in one of the memory regions */
751 rgn
= lmb_overlaps_region(&lmb
.available_mem
, base
, size
);
754 * Check if the requested end address is in the same memory
757 if (lmb_addrs_overlap(lmb_memory
[rgn
].base
,
758 lmb_memory
[rgn
].size
,
759 base
+ size
- 1, 1)) {
760 /* ok, reserve the memory */
761 if (!lmb_reserve(base
, size
, flags
))
769 /* Return number of bytes from a given address that are free */
770 phys_size_t
lmb_get_free_size(phys_addr_t addr
)
774 struct lmb_region
*lmb_used
= lmb
.used_mem
.data
;
775 struct lmb_region
*lmb_memory
= lmb
.available_mem
.data
;
777 /* check if the requested address is in the memory regions */
778 rgn
= lmb_overlaps_region(&lmb
.available_mem
, addr
, 1);
780 for (i
= 0; i
< lmb
.used_mem
.count
; i
++) {
781 if (addr
< lmb_used
[i
].base
) {
782 /* first reserved range > requested address */
783 return lmb_used
[i
].base
- addr
;
785 if (lmb_used
[i
].base
+
786 lmb_used
[i
].size
> addr
) {
787 /* requested addr is in this reserved range */
791 /* if we come here: no reserved ranges above requested addr */
792 return lmb_memory
[lmb
.available_mem
.count
- 1].base
+
793 lmb_memory
[lmb
.available_mem
.count
- 1].size
- addr
;
798 int lmb_is_reserved_flags(phys_addr_t addr
, int flags
)
801 struct lmb_region
*lmb_used
= lmb
.used_mem
.data
;
803 for (i
= 0; i
< lmb
.used_mem
.count
; i
++) {
804 phys_addr_t upper
= lmb_used
[i
].base
+
805 lmb_used
[i
].size
- 1;
806 if (addr
>= lmb_used
[i
].base
&& addr
<= upper
)
807 return (lmb_used
[i
].flags
& flags
) == flags
;
812 static int lmb_setup(bool test
)
816 ret
= alist_init(&lmb
.available_mem
, sizeof(struct lmb_region
),
817 (uint
)LMB_ALIST_INITIAL_SIZE
);
819 log_debug("Unable to initialise the list for LMB free memory\n");
823 ret
= alist_init(&lmb
.used_mem
, sizeof(struct lmb_region
),
824 (uint
)LMB_ALIST_INITIAL_SIZE
);
826 log_debug("Unable to initialise the list for LMB used memory\n");
839 ret
= lmb_setup(false);
841 log_info("Unable to init LMB\n");
847 /* Reserve the U-Boot image region once U-Boot has relocated */
848 if (xpl_phase() == PHASE_SPL
)
849 lmb_reserve_common_spl();
850 else if (xpl_phase() == PHASE_BOARD_R
)
851 lmb_reserve_common((void *)gd
->fdt_blob
);
856 struct lmb
*lmb_get(void)
861 #if CONFIG_IS_ENABLED(UNIT_TEST)
862 int lmb_push(struct lmb
*store
)
867 ret
= lmb_setup(true);
874 void lmb_pop(struct lmb
*store
)
876 alist_uninit(&lmb
.available_mem
);
877 alist_uninit(&lmb
.used_mem
);
880 #endif /* UNIT_TEST */