1 // SPDX-License-Identifier: GPL-2.0+
3 * Procedures for maintaining information about logical memory blocks.
5 * Peter Bergner, IBM Corp. June 2001.
6 * Copyright (C) 2001 Peter Bergner.
9 #include <efi_loader.h>
16 #include <asm/global_data.h>
17 #include <asm/sections.h>
19 DECLARE_GLOBAL_DATA_PTR
;
21 #define LMB_ALLOC_ANYWHERE 0
23 static void lmb_dump_region(struct lmb_region
*rgn
, char *name
)
25 unsigned long long base
, size
, end
;
29 printf(" %s.cnt = 0x%lx / max = 0x%lx\n", name
, rgn
->cnt
, rgn
->max
);
31 for (i
= 0; i
< rgn
->cnt
; i
++) {
32 base
= rgn
->region
[i
].base
;
33 size
= rgn
->region
[i
].size
;
34 end
= base
+ size
- 1;
35 flags
= rgn
->region
[i
].flags
;
37 printf(" %s[%d]\t[0x%llx-0x%llx], 0x%08llx bytes flags: %x\n",
38 name
, i
, base
, end
, size
, flags
);
42 void lmb_dump_all_force(struct lmb
*lmb
)
44 printf("lmb_dump_all:\n");
45 lmb_dump_region(&lmb
->memory
, "memory");
46 lmb_dump_region(&lmb
->reserved
, "reserved");
49 void lmb_dump_all(struct lmb
*lmb
)
52 lmb_dump_all_force(lmb
);
56 static long lmb_addrs_overlap(phys_addr_t base1
, phys_size_t size1
,
57 phys_addr_t base2
, phys_size_t size2
)
59 const phys_addr_t base1_end
= base1
+ size1
- 1;
60 const phys_addr_t base2_end
= base2
+ size2
- 1;
62 return ((base1
<= base2_end
) && (base2
<= base1_end
));
65 static long lmb_addrs_adjacent(phys_addr_t base1
, phys_size_t size1
,
66 phys_addr_t base2
, phys_size_t size2
)
68 if (base2
== base1
+ size1
)
70 else if (base1
== base2
+ size2
)
76 static long lmb_regions_overlap(struct lmb_region
*rgn
, unsigned long r1
,
79 phys_addr_t base1
= rgn
->region
[r1
].base
;
80 phys_size_t size1
= rgn
->region
[r1
].size
;
81 phys_addr_t base2
= rgn
->region
[r2
].base
;
82 phys_size_t size2
= rgn
->region
[r2
].size
;
84 return lmb_addrs_overlap(base1
, size1
, base2
, size2
);
86 static long lmb_regions_adjacent(struct lmb_region
*rgn
, unsigned long r1
,
89 phys_addr_t base1
= rgn
->region
[r1
].base
;
90 phys_size_t size1
= rgn
->region
[r1
].size
;
91 phys_addr_t base2
= rgn
->region
[r2
].base
;
92 phys_size_t size2
= rgn
->region
[r2
].size
;
93 return lmb_addrs_adjacent(base1
, size1
, base2
, size2
);
96 static void lmb_remove_region(struct lmb_region
*rgn
, unsigned long r
)
100 for (i
= r
; i
< rgn
->cnt
- 1; i
++) {
101 rgn
->region
[i
].base
= rgn
->region
[i
+ 1].base
;
102 rgn
->region
[i
].size
= rgn
->region
[i
+ 1].size
;
103 rgn
->region
[i
].flags
= rgn
->region
[i
+ 1].flags
;
108 /* Assumption: base addr of region 1 < base addr of region 2 */
109 static void lmb_coalesce_regions(struct lmb_region
*rgn
, unsigned long r1
,
112 rgn
->region
[r1
].size
+= rgn
->region
[r2
].size
;
113 lmb_remove_region(rgn
, r2
);
116 /*Assumption : base addr of region 1 < base addr of region 2*/
117 static void lmb_fix_over_lap_regions(struct lmb_region
*rgn
, unsigned long r1
,
120 phys_addr_t base1
= rgn
->region
[r1
].base
;
121 phys_size_t size1
= rgn
->region
[r1
].size
;
122 phys_addr_t base2
= rgn
->region
[r2
].base
;
123 phys_size_t size2
= rgn
->region
[r2
].size
;
125 if (base1
+ size1
> base2
+ size2
) {
126 printf("This will not be a case any time\n");
129 rgn
->region
[r1
].size
= base2
+ size2
- base1
;
130 lmb_remove_region(rgn
, r2
);
133 void lmb_init(struct lmb
*lmb
)
135 #if IS_ENABLED(CONFIG_LMB_USE_MAX_REGIONS)
136 lmb
->memory
.max
= CONFIG_LMB_MAX_REGIONS
;
137 lmb
->reserved
.max
= CONFIG_LMB_MAX_REGIONS
;
139 lmb
->memory
.max
= CONFIG_LMB_MEMORY_REGIONS
;
140 lmb
->reserved
.max
= CONFIG_LMB_RESERVED_REGIONS
;
141 lmb
->memory
.region
= lmb
->memory_regions
;
142 lmb
->reserved
.region
= lmb
->reserved_regions
;
145 lmb
->reserved
.cnt
= 0;
148 void arch_lmb_reserve_generic(struct lmb
*lmb
, ulong sp
, ulong end
, ulong align
)
154 * Reserve memory from aligned address below the bottom of U-Boot stack
155 * until end of U-Boot area using LMB to prevent U-Boot from overwriting
158 debug("## Current stack ends at 0x%08lx ", sp
);
160 /* adjust sp by 4K to be safe */
162 for (bank
= 0; bank
< CONFIG_NR_DRAM_BANKS
; bank
++) {
163 if (!gd
->bd
->bi_dram
[bank
].size
||
164 sp
< gd
->bd
->bi_dram
[bank
].start
)
166 /* Watch out for RAM at end of address space! */
167 bank_end
= gd
->bd
->bi_dram
[bank
].start
+
168 gd
->bd
->bi_dram
[bank
].size
- 1;
174 lmb_reserve(lmb
, sp
, bank_end
- sp
+ 1);
176 if (gd
->flags
& GD_FLG_SKIP_RELOC
)
177 lmb_reserve(lmb
, (phys_addr_t
)(uintptr_t)_start
, gd
->mon_len
);
184 * efi_lmb_reserve() - add reservations for EFI memory
186 * Add reservations for all EFI memory areas that are not
187 * EFI_CONVENTIONAL_MEMORY.
189 * @lmb: lmb environment
190 * Return: 0 on success, 1 on failure
192 static __maybe_unused
int efi_lmb_reserve(struct lmb
*lmb
)
194 struct efi_mem_desc
*memmap
= NULL
, *map
;
195 efi_uintn_t i
, map_size
= 0;
198 ret
= efi_get_memory_map_alloc(&map_size
, &memmap
);
199 if (ret
!= EFI_SUCCESS
)
202 for (i
= 0, map
= memmap
; i
< map_size
/ sizeof(*map
); ++map
, ++i
) {
203 if (map
->type
!= EFI_CONVENTIONAL_MEMORY
) {
204 lmb_reserve_flags(lmb
,
205 map_to_sysmem((void *)(uintptr_t)
206 map
->physical_start
),
207 map
->num_pages
* EFI_PAGE_SIZE
,
208 map
->type
== EFI_RESERVED_MEMORY_TYPE
209 ? LMB_NOMAP
: LMB_NONE
);
212 efi_free_pool(memmap
);
217 static void lmb_reserve_common(struct lmb
*lmb
, void *fdt_blob
)
219 arch_lmb_reserve(lmb
);
220 board_lmb_reserve(lmb
);
222 if (CONFIG_IS_ENABLED(OF_LIBFDT
) && fdt_blob
)
223 boot_fdt_add_mem_rsv_regions(lmb
, fdt_blob
);
225 if (CONFIG_IS_ENABLED(EFI_LOADER
))
226 efi_lmb_reserve(lmb
);
229 /* Initialize the struct, add memory and call arch/board reserve functions */
230 void lmb_init_and_reserve(struct lmb
*lmb
, struct bd_info
*bd
, void *fdt_blob
)
236 for (i
= 0; i
< CONFIG_NR_DRAM_BANKS
; i
++) {
237 if (bd
->bi_dram
[i
].size
) {
238 lmb_add(lmb
, bd
->bi_dram
[i
].start
,
239 bd
->bi_dram
[i
].size
);
243 lmb_reserve_common(lmb
, fdt_blob
);
246 /* Initialize the struct, add memory and call arch/board reserve functions */
247 void lmb_init_and_reserve_range(struct lmb
*lmb
, phys_addr_t base
,
248 phys_size_t size
, void *fdt_blob
)
251 lmb_add(lmb
, base
, size
);
252 lmb_reserve_common(lmb
, fdt_blob
);
255 /* This routine called with relocation disabled. */
256 static long lmb_add_region_flags(struct lmb_region
*rgn
, phys_addr_t base
,
257 phys_size_t size
, enum lmb_flags flags
)
259 unsigned long coalesced
= 0;
263 rgn
->region
[0].base
= base
;
264 rgn
->region
[0].size
= size
;
265 rgn
->region
[0].flags
= flags
;
270 /* First try and coalesce this LMB with another. */
271 for (i
= 0; i
< rgn
->cnt
; i
++) {
272 phys_addr_t rgnbase
= rgn
->region
[i
].base
;
273 phys_size_t rgnsize
= rgn
->region
[i
].size
;
274 phys_size_t rgnflags
= rgn
->region
[i
].flags
;
275 phys_addr_t end
= base
+ size
- 1;
276 phys_addr_t rgnend
= rgnbase
+ rgnsize
- 1;
277 if (rgnbase
<= base
&& end
<= rgnend
) {
278 if (flags
== rgnflags
)
279 /* Already have this region, so we're done */
282 return -1; /* regions with new flags */
285 adjacent
= lmb_addrs_adjacent(base
, size
, rgnbase
, rgnsize
);
287 if (flags
!= rgnflags
)
289 rgn
->region
[i
].base
-= size
;
290 rgn
->region
[i
].size
+= size
;
293 } else if (adjacent
< 0) {
294 if (flags
!= rgnflags
)
296 rgn
->region
[i
].size
+= size
;
299 } else if (lmb_addrs_overlap(base
, size
, rgnbase
, rgnsize
)) {
300 /* regions overlap */
305 if (i
< rgn
->cnt
- 1 && rgn
->region
[i
].flags
== rgn
->region
[i
+ 1].flags
) {
306 if (lmb_regions_adjacent(rgn
, i
, i
+ 1)) {
307 lmb_coalesce_regions(rgn
, i
, i
+ 1);
309 } else if (lmb_regions_overlap(rgn
, i
, i
+ 1)) {
310 /* fix overlapping area */
311 lmb_fix_over_lap_regions(rgn
, i
, i
+ 1);
318 if (rgn
->cnt
>= rgn
->max
)
321 /* Couldn't coalesce the LMB, so add it to the sorted table. */
322 for (i
= rgn
->cnt
-1; i
>= 0; i
--) {
323 if (base
< rgn
->region
[i
].base
) {
324 rgn
->region
[i
+ 1].base
= rgn
->region
[i
].base
;
325 rgn
->region
[i
+ 1].size
= rgn
->region
[i
].size
;
326 rgn
->region
[i
+ 1].flags
= rgn
->region
[i
].flags
;
328 rgn
->region
[i
+ 1].base
= base
;
329 rgn
->region
[i
+ 1].size
= size
;
330 rgn
->region
[i
+ 1].flags
= flags
;
335 if (base
< rgn
->region
[0].base
) {
336 rgn
->region
[0].base
= base
;
337 rgn
->region
[0].size
= size
;
338 rgn
->region
[0].flags
= flags
;
346 static long lmb_add_region(struct lmb_region
*rgn
, phys_addr_t base
,
349 return lmb_add_region_flags(rgn
, base
, size
, LMB_NONE
);
352 /* This routine may be called with relocation disabled. */
353 long lmb_add(struct lmb
*lmb
, phys_addr_t base
, phys_size_t size
)
355 struct lmb_region
*_rgn
= &(lmb
->memory
);
357 return lmb_add_region(_rgn
, base
, size
);
360 long lmb_free(struct lmb
*lmb
, phys_addr_t base
, phys_size_t size
)
362 struct lmb_region
*rgn
= &(lmb
->reserved
);
363 phys_addr_t rgnbegin
, rgnend
;
364 phys_addr_t end
= base
+ size
- 1;
367 rgnbegin
= rgnend
= 0; /* supress gcc warnings */
369 /* Find the region where (base, size) belongs to */
370 for (i
= 0; i
< rgn
->cnt
; i
++) {
371 rgnbegin
= rgn
->region
[i
].base
;
372 rgnend
= rgnbegin
+ rgn
->region
[i
].size
- 1;
374 if ((rgnbegin
<= base
) && (end
<= rgnend
))
378 /* Didn't find the region */
382 /* Check to see if we are removing entire region */
383 if ((rgnbegin
== base
) && (rgnend
== end
)) {
384 lmb_remove_region(rgn
, i
);
388 /* Check to see if region is matching at the front */
389 if (rgnbegin
== base
) {
390 rgn
->region
[i
].base
= end
+ 1;
391 rgn
->region
[i
].size
-= size
;
395 /* Check to see if the region is matching at the end */
397 rgn
->region
[i
].size
-= size
;
402 * We need to split the entry - adjust the current one to the
403 * beginging of the hole and add the region after hole.
405 rgn
->region
[i
].size
= base
- rgn
->region
[i
].base
;
406 return lmb_add_region_flags(rgn
, end
+ 1, rgnend
- end
,
407 rgn
->region
[i
].flags
);
410 long lmb_reserve_flags(struct lmb
*lmb
, phys_addr_t base
, phys_size_t size
,
411 enum lmb_flags flags
)
413 struct lmb_region
*_rgn
= &(lmb
->reserved
);
415 return lmb_add_region_flags(_rgn
, base
, size
, flags
);
418 long lmb_reserve(struct lmb
*lmb
, phys_addr_t base
, phys_size_t size
)
420 return lmb_reserve_flags(lmb
, base
, size
, LMB_NONE
);
423 static long lmb_overlaps_region(struct lmb_region
*rgn
, phys_addr_t base
,
428 for (i
= 0; i
< rgn
->cnt
; i
++) {
429 phys_addr_t rgnbase
= rgn
->region
[i
].base
;
430 phys_size_t rgnsize
= rgn
->region
[i
].size
;
431 if (lmb_addrs_overlap(base
, size
, rgnbase
, rgnsize
))
435 return (i
< rgn
->cnt
) ? i
: -1;
438 phys_addr_t
lmb_alloc(struct lmb
*lmb
, phys_size_t size
, ulong align
)
440 return lmb_alloc_base(lmb
, size
, align
, LMB_ALLOC_ANYWHERE
);
443 phys_addr_t
lmb_alloc_base(struct lmb
*lmb
, phys_size_t size
, ulong align
, phys_addr_t max_addr
)
447 alloc
= __lmb_alloc_base(lmb
, size
, align
, max_addr
);
450 printf("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n",
451 (ulong
)size
, (ulong
)max_addr
);
456 static phys_addr_t
lmb_align_down(phys_addr_t addr
, phys_size_t size
)
458 return addr
& ~(size
- 1);
461 phys_addr_t
__lmb_alloc_base(struct lmb
*lmb
, phys_size_t size
, ulong align
, phys_addr_t max_addr
)
464 phys_addr_t base
= 0;
465 phys_addr_t res_base
;
467 for (i
= lmb
->memory
.cnt
- 1; i
>= 0; i
--) {
468 phys_addr_t lmbbase
= lmb
->memory
.region
[i
].base
;
469 phys_size_t lmbsize
= lmb
->memory
.region
[i
].size
;
473 if (max_addr
== LMB_ALLOC_ANYWHERE
)
474 base
= lmb_align_down(lmbbase
+ lmbsize
- size
, align
);
475 else if (lmbbase
< max_addr
) {
476 base
= lmbbase
+ lmbsize
;
479 base
= min(base
, max_addr
);
480 base
= lmb_align_down(base
- size
, align
);
484 while (base
&& lmbbase
<= base
) {
485 rgn
= lmb_overlaps_region(&lmb
->reserved
, base
, size
);
487 /* This area isn't reserved, take it */
488 if (lmb_add_region(&lmb
->reserved
, base
,
493 res_base
= lmb
->reserved
.region
[rgn
].base
;
496 base
= lmb_align_down(res_base
- size
, align
);
503 * Try to allocate a specific address range: must be in defined memory but not
506 phys_addr_t
lmb_alloc_addr(struct lmb
*lmb
, phys_addr_t base
, phys_size_t size
)
510 /* Check if the requested address is in one of the memory regions */
511 rgn
= lmb_overlaps_region(&lmb
->memory
, base
, size
);
514 * Check if the requested end address is in the same memory
517 if (lmb_addrs_overlap(lmb
->memory
.region
[rgn
].base
,
518 lmb
->memory
.region
[rgn
].size
,
519 base
+ size
- 1, 1)) {
520 /* ok, reserve the memory */
521 if (lmb_reserve(lmb
, base
, size
) >= 0)
528 /* Return number of bytes from a given address that are free */
529 phys_size_t
lmb_get_free_size(struct lmb
*lmb
, phys_addr_t addr
)
534 /* check if the requested address is in the memory regions */
535 rgn
= lmb_overlaps_region(&lmb
->memory
, addr
, 1);
537 for (i
= 0; i
< lmb
->reserved
.cnt
; i
++) {
538 if (addr
< lmb
->reserved
.region
[i
].base
) {
539 /* first reserved range > requested address */
540 return lmb
->reserved
.region
[i
].base
- addr
;
542 if (lmb
->reserved
.region
[i
].base
+
543 lmb
->reserved
.region
[i
].size
> addr
) {
544 /* requested addr is in this reserved range */
548 /* if we come here: no reserved ranges above requested addr */
549 return lmb
->memory
.region
[lmb
->memory
.cnt
- 1].base
+
550 lmb
->memory
.region
[lmb
->memory
.cnt
- 1].size
- addr
;
555 int lmb_is_reserved_flags(struct lmb
*lmb
, phys_addr_t addr
, int flags
)
559 for (i
= 0; i
< lmb
->reserved
.cnt
; i
++) {
560 phys_addr_t upper
= lmb
->reserved
.region
[i
].base
+
561 lmb
->reserved
.region
[i
].size
- 1;
562 if ((addr
>= lmb
->reserved
.region
[i
].base
) && (addr
<= upper
))
563 return (lmb
->reserved
.region
[i
].flags
& flags
) == flags
;
568 int lmb_is_reserved(struct lmb
*lmb
, phys_addr_t addr
)
570 return lmb_is_reserved_flags(lmb
, addr
, LMB_NONE
);
573 __weak
void board_lmb_reserve(struct lmb
*lmb
)
575 /* please define platform specific board_lmb_reserve() */
578 __weak
void arch_lmb_reserve(struct lmb
*lmb
)
580 /* please define platform specific arch_lmb_reserve() */