]> git.ipfire.org Git - thirdparty/u-boot.git/blame - lib/lmb.c
mx6sabresd: Reduce U-Boot proper size to fix boot regression
[thirdparty/u-boot.git] / lib / lmb.c
CommitLineData
83d290c5 1// SPDX-License-Identifier: GPL-2.0+
4ed6552f
KG
2/*
3 * Procedures for maintaining information about logical memory blocks.
4 *
5 * Peter Bergner, IBM Corp. June 2001.
6 * Copyright (C) 2001 Peter Bergner.
4ed6552f
KG
7 */
8
ed17a33f 9#include <alist.h>
06d514d7 10#include <efi_loader.h>
2f619152 11#include <event.h>
4d72caa5 12#include <image.h>
06d514d7 13#include <mapmem.h>
4ed6552f 14#include <lmb.h>
f7ae49fc 15#include <log.h>
336d4615 16#include <malloc.h>
f4fb154f 17#include <spl.h>
4ed6552f 18
1274698d 19#include <asm/global_data.h>
bd994c00 20#include <asm/sections.h>
ed17a33f 21#include <linux/kernel.h>
6534d26e 22#include <linux/sizes.h>
1274698d
MV
23
24DECLARE_GLOBAL_DATA_PTR;
25
fa5b4f5a
SG
26#define LMB_RGN_OVERLAP 1
27#define LMB_RGN_ADJACENT 2
28
174f53d2
JG
29/*
30 * The following low level LMB functions must not access the global LMB memory
31 * map since they are also used to manage IOVA memory maps in iommu drivers like
32 * apple_dart.
33 */
4ed6552f 34
e35d2a75
SG
35static long lmb_addrs_overlap(phys_addr_t base1, phys_size_t size1,
36 phys_addr_t base2, phys_size_t size2)
4ed6552f 37{
d67f33cf
SG
38 const phys_addr_t base1_end = base1 + size1 - 1;
39 const phys_addr_t base2_end = base2 + size2 - 1;
40
41 return ((base1 <= base2_end) && (base2 <= base1_end));
4ed6552f
KG
42}
43
391fd93a 44static long lmb_addrs_adjacent(phys_addr_t base1, phys_size_t size1,
e35d2a75 45 phys_addr_t base2, phys_size_t size2)
4ed6552f
KG
46{
47 if (base2 == base1 + size1)
48 return 1;
49 else if (base1 == base2 + size2)
50 return -1;
51
52 return 0;
53}
54
fa5b4f5a
SG
55/**
56 * lmb_regions_check() - Check if the regions overlap, or are adjacent
57 * @lmb_rgn_lst: List of LMB regions
58 * @r1: First region to check
59 * @r2: Second region to check
60 *
61 * Check if the two regions with matching flags, r1 and r2 are
62 * adjacent to each other, or if they overlap.
63 *
64 * Return:
65 * * %LMB_RGN_OVERLAP - Regions overlap
66 * * %LMB_RGN_ADJACENT - Regions adjacent to each other
67 * * 0 - Neither of the above, or flags mismatch
68 */
69static long lmb_regions_check(struct alist *lmb_rgn_lst, unsigned long r1,
70 unsigned long r2)
edb5824b 71{
ed17a33f 72 struct lmb_region *rgn = lmb_rgn_lst->data;
ed17a33f
SG
73 phys_addr_t base1 = rgn[r1].base;
74 phys_size_t size1 = rgn[r1].size;
75 phys_addr_t base2 = rgn[r2].base;
76 phys_size_t size2 = rgn[r2].size;
edb5824b 77
fa5b4f5a
SG
78 if (rgn[r1].flags != rgn[r2].flags)
79 return 0;
ed17a33f 80
fa5b4f5a
SG
81 if (lmb_addrs_overlap(base1, size1, base2, size2))
82 return LMB_RGN_OVERLAP;
83 else if (lmb_addrs_adjacent(base1, size1, base2, size2))
84 return LMB_RGN_ADJACENT;
85ebda86 85
fa5b4f5a 86 return 0;
4ed6552f
KG
87}
88
ed17a33f 89static void lmb_remove_region(struct alist *lmb_rgn_lst, unsigned long r)
4ed6552f
KG
90{
91 unsigned long i;
ed17a33f 92 struct lmb_region *rgn = lmb_rgn_lst->data;
4ed6552f 93
ed17a33f
SG
94 for (i = r; i < lmb_rgn_lst->count - 1; i++) {
95 rgn[i].base = rgn[i + 1].base;
96 rgn[i].size = rgn[i + 1].size;
97 rgn[i].flags = rgn[i + 1].flags;
4ed6552f 98 }
ed17a33f 99 lmb_rgn_lst->count--;
4ed6552f
KG
100}
101
102/* Assumption: base addr of region 1 < base addr of region 2 */
ed17a33f 103static void lmb_coalesce_regions(struct alist *lmb_rgn_lst, unsigned long r1,
e35d2a75 104 unsigned long r2)
4ed6552f 105{
ed17a33f
SG
106 struct lmb_region *rgn = lmb_rgn_lst->data;
107
108 rgn[r1].size += rgn[r2].size;
109 lmb_remove_region(lmb_rgn_lst, r2);
4ed6552f
KG
110}
111
5e9553cc
SG
112static long lmb_resize_regions(struct alist *lmb_rgn_lst,
113 unsigned long idx_start,
114 phys_addr_t base, phys_size_t size)
115{
116 phys_size_t rgnsize;
117 unsigned long rgn_cnt, idx, idx_end;
118 phys_addr_t rgnbase, rgnend;
119 phys_addr_t mergebase, mergeend;
120 struct lmb_region *rgn = lmb_rgn_lst->data;
121
122 rgn_cnt = 0;
123 idx = idx_start;
124 idx_end = idx_start;
125
126 /*
127 * First thing to do is to identify how many regions
128 * the requested region overlaps.
129 * If the flags match, combine all these overlapping
130 * regions into a single region, and remove the merged
131 * regions.
132 */
133 while (idx <= lmb_rgn_lst->count - 1) {
134 rgnbase = rgn[idx].base;
135 rgnsize = rgn[idx].size;
136
137 if (lmb_addrs_overlap(base, size, rgnbase,
138 rgnsize)) {
139 if (rgn[idx].flags != LMB_NONE)
140 return -1;
141 rgn_cnt++;
142 idx_end = idx;
143 }
144 idx++;
145 }
146
147 /* The merged region's base and size */
148 rgnbase = rgn[idx_start].base;
149 mergebase = min(base, rgnbase);
150 rgnend = rgn[idx_end].base + rgn[idx_end].size;
151 mergeend = max(rgnend, (base + size));
152
153 rgn[idx_start].base = mergebase;
154 rgn[idx_start].size = mergeend - mergebase;
155
156 /* Now remove the merged regions */
157 while (--rgn_cnt)
158 lmb_remove_region(lmb_rgn_lst, idx_start + 1);
159
160 return 0;
161}
162
ed17a33f
SG
163/**
164 * lmb_add_region_flags() - Add an lmb region to the given list
165 * @lmb_rgn_lst: LMB list to which region is to be added(free/used)
166 * @base: Start address of the region
167 * @size: Size of the region to be added
168 * @flags: Attributes of the LMB region
169 *
170 * Add a region of memory to the list. If the region does not exist, add
171 * it to the list. Depending on the attributes of the region to be added,
172 * the function might resize an already existing region or coalesce two
173 * adjacent regions.
174 *
8b8b35a4
SP
175 * Return:
176 * * %0 - Added successfully, or it's already added (only if LMB_NONE)
177 * * %-EEXIST - The region is already added, and flags != LMB_NONE
178 * * %-1 - Failure
ed17a33f
SG
179 */
180static long lmb_add_region_flags(struct alist *lmb_rgn_lst, phys_addr_t base,
3d56c065 181 phys_size_t size, u32 flags)
4ed6552f
KG
182{
183 unsigned long coalesced = 0;
5e9553cc 184 long ret, i;
ed17a33f 185 struct lmb_region *rgn = lmb_rgn_lst->data;
4ed6552f 186
ed17a33f
SG
187 if (alist_err(lmb_rgn_lst))
188 return -1;
4ed6552f
KG
189
190 /* First try and coalesce this LMB with another. */
ed17a33f
SG
191 for (i = 0; i < lmb_rgn_lst->count; i++) {
192 phys_addr_t rgnbase = rgn[i].base;
193 phys_size_t rgnsize = rgn[i].size;
3d56c065 194 u32 rgnflags = rgn[i].flags;
4ed6552f 195
5e9553cc
SG
196 ret = lmb_addrs_adjacent(base, size, rgnbase, rgnsize);
197 if (ret > 0) {
59c0ea5d
PD
198 if (flags != rgnflags)
199 break;
ed17a33f
SG
200 rgn[i].base -= size;
201 rgn[i].size += size;
4ed6552f
KG
202 coalesced++;
203 break;
5e9553cc 204 } else if (ret < 0) {
59c0ea5d 205 if (flags != rgnflags)
6e4df588 206 continue;
ed17a33f 207 rgn[i].size += size;
4ed6552f
KG
208 coalesced++;
209 break;
0f7c51a6 210 } else if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) {
8b8b35a4
SP
211 ret = lmb_resize_regions(lmb_rgn_lst, i, base, size);
212 if (ret < 0)
5e9553cc 213 return -1;
8b8b35a4
SP
214
215 coalesced++;
216 break;
85ebda86
SP
217
218 return -1;
4ed6552f
KG
219 }
220 }
221
ed17a33f 222 if (lmb_rgn_lst->count && i < lmb_rgn_lst->count - 1) {
fa5b4f5a
SG
223 ret = lmb_regions_check(lmb_rgn_lst, i, i + 1);
224 if (ret == LMB_RGN_ADJACENT) {
225 lmb_coalesce_regions(lmb_rgn_lst, i, i + 1);
226 coalesced++;
227 } else if (ret == LMB_RGN_OVERLAP) {
228 /* fix overlapping areas */
229 phys_addr_t rgnbase = rgn[i].base;
230 phys_size_t rgnsize = rgn[i].size;
231
232 ret = lmb_resize_regions(lmb_rgn_lst, i,
233 rgnbase, rgnsize);
234 if (ret < 0)
235 return -1;
236
237 coalesced++;
59c0ea5d 238 }
4ed6552f
KG
239 }
240
241 if (coalesced)
0f57b009 242 return 0;
ed17a33f
SG
243
244 if (alist_full(lmb_rgn_lst) &&
245 !alist_expand_by(lmb_rgn_lst, lmb_rgn_lst->alloc))
4ed6552f 246 return -1;
ed17a33f 247 rgn = lmb_rgn_lst->data;
4ed6552f
KG
248
249 /* Couldn't coalesce the LMB, so add it to the sorted table. */
ed17a33f
SG
250 for (i = lmb_rgn_lst->count; i >= 0; i--) {
251 if (i && base < rgn[i - 1].base) {
252 rgn[i] = rgn[i - 1];
4ed6552f 253 } else {
ed17a33f
SG
254 rgn[i].base = base;
255 rgn[i].size = size;
256 rgn[i].flags = flags;
4ed6552f
KG
257 break;
258 }
259 }
260
ed17a33f 261 lmb_rgn_lst->count++;
4ed6552f
KG
262
263 return 0;
264}
265
408b4ae8
JG
266static long _lmb_free(struct alist *lmb_rgn_lst, phys_addr_t base,
267 phys_size_t size)
63796c4e 268{
ed17a33f 269 struct lmb_region *rgn;
98874ff3 270 phys_addr_t rgnbegin, rgnend;
d67f33cf 271 phys_addr_t end = base + size - 1;
63796c4e
AF
272 int i;
273
85ebda86
SP
274 /* Suppress GCC warnings */
275 rgnbegin = 0;
276 rgnend = 0;
277
ed17a33f 278 rgn = lmb_rgn_lst->data;
63796c4e 279 /* Find the region where (base, size) belongs to */
ed17a33f
SG
280 for (i = 0; i < lmb_rgn_lst->count; i++) {
281 rgnbegin = rgn[i].base;
282 rgnend = rgnbegin + rgn[i].size - 1;
63796c4e 283
85ebda86 284 if (rgnbegin <= base && end <= rgnend)
63796c4e
AF
285 break;
286 }
287
288 /* Didn't find the region */
ed17a33f 289 if (i == lmb_rgn_lst->count)
63796c4e
AF
290 return -1;
291
292 /* Check to see if we are removing entire region */
85ebda86 293 if (rgnbegin == base && rgnend == end) {
ed17a33f 294 lmb_remove_region(lmb_rgn_lst, i);
63796c4e
AF
295 return 0;
296 }
297
298 /* Check to see if region is matching at the front */
299 if (rgnbegin == base) {
ed17a33f
SG
300 rgn[i].base = end + 1;
301 rgn[i].size -= size;
63796c4e
AF
302 return 0;
303 }
304
305 /* Check to see if the region is matching at the end */
306 if (rgnend == end) {
ed17a33f 307 rgn[i].size -= size;
63796c4e
AF
308 return 0;
309 }
310
311 /*
312 * We need to split the entry - adjust the current one to the
313 * beginging of the hole and add the region after hole.
314 */
ed17a33f
SG
315 rgn[i].size = base - rgn[i].base;
316 return lmb_add_region_flags(lmb_rgn_lst, end + 1, rgnend - end,
317 rgn[i].flags);
63796c4e
AF
318}
319
174f53d2
JG
320static long lmb_overlaps_region(struct alist *lmb_rgn_lst, phys_addr_t base,
321 phys_size_t size)
322{
323 unsigned long i;
324 struct lmb_region *rgn = lmb_rgn_lst->data;
325
326 for (i = 0; i < lmb_rgn_lst->count; i++) {
327 phys_addr_t rgnbase = rgn[i].base;
328 phys_size_t rgnsize = rgn[i].size;
85ebda86 329
174f53d2
JG
330 if (lmb_addrs_overlap(base, size, rgnbase, rgnsize))
331 break;
332 }
333
334 return (i < lmb_rgn_lst->count) ? i : -1;
335}
336
f6999cb5
JG
337/*
338 * IOVA LMB memory maps using lmb pointers instead of the global LMB memory map.
339 */
340
341int io_lmb_setup(struct lmb *io_lmb)
342{
343 int ret;
344
400c34db 345 ret = alist_init(&io_lmb->available_mem, sizeof(struct lmb_region),
f6999cb5
JG
346 (uint)LMB_ALIST_INITIAL_SIZE);
347 if (!ret) {
348 log_debug("Unable to initialise the list for LMB free IOVA\n");
349 return -ENOMEM;
350 }
351
352 ret = alist_init(&io_lmb->used_mem, sizeof(struct lmb_region),
353 (uint)LMB_ALIST_INITIAL_SIZE);
354 if (!ret) {
355 log_debug("Unable to initialise the list for LMB used IOVA\n");
356 return -ENOMEM;
357 }
358
359 io_lmb->test = false;
360
361 return 0;
362}
363
364void io_lmb_teardown(struct lmb *io_lmb)
365{
400c34db 366 alist_uninit(&io_lmb->available_mem);
f6999cb5
JG
367 alist_uninit(&io_lmb->used_mem);
368}
369
370long io_lmb_add(struct lmb *io_lmb, phys_addr_t base, phys_size_t size)
371{
400c34db 372 return lmb_add_region_flags(&io_lmb->available_mem, base, size, LMB_NONE);
f6999cb5
JG
373}
374
375/* derived and simplified from _lmb_alloc_base() */
376phys_addr_t io_lmb_alloc(struct lmb *io_lmb, phys_size_t size, ulong align)
377{
378 long i, rgn;
379 phys_addr_t base = 0;
380 phys_addr_t res_base;
381 struct lmb_region *lmb_used = io_lmb->used_mem.data;
400c34db 382 struct lmb_region *lmb_memory = io_lmb->available_mem.data;
f6999cb5 383
400c34db 384 for (i = io_lmb->available_mem.count - 1; i >= 0; i--) {
f6999cb5
JG
385 phys_addr_t lmbbase = lmb_memory[i].base;
386 phys_size_t lmbsize = lmb_memory[i].size;
387
388 if (lmbsize < size)
389 continue;
6c9f2750 390 base = ALIGN_DOWN(lmbbase + lmbsize - size, align);
f6999cb5
JG
391
392 while (base && lmbbase <= base) {
393 rgn = lmb_overlaps_region(&io_lmb->used_mem, base, size);
394 if (rgn < 0) {
395 /* This area isn't reserved, take it */
396 if (lmb_add_region_flags(&io_lmb->used_mem, base,
397 size, LMB_NONE) < 0)
398 return 0;
399
400 return base;
401 }
402
403 res_base = lmb_used[rgn].base;
404 if (res_base < size)
405 break;
6c9f2750 406 base = ALIGN_DOWN(res_base - size, align);
f6999cb5
JG
407 }
408 }
409 return 0;
410}
411
412long io_lmb_free(struct lmb *io_lmb, phys_addr_t base, phys_size_t size)
413{
414 return _lmb_free(&io_lmb->used_mem, base, size);
415}
416
174f53d2
JG
417/*
418 * Low level LMB functions are used to manage IOVA memory maps for the Apple
419 * dart iommu. They must not access the global LMB memory map.
420 * So keep the global LMB variable declaration unreachable from them.
421 */
422
423static struct lmb lmb;
424
08573d76
HS
425static int lmb_map_update_notify(phys_addr_t addr, phys_size_t size,
426 enum lmb_map_op op, u32 flags)
174f53d2 427{
41d57344
HS
428 if (CONFIG_IS_ENABLED(EFI_LOADER) &&
429 !lmb.test && !(flags & LMB_NONOTIFY))
430 return efi_map_update_notify(addr, size, op);
174f53d2
JG
431
432 return 0;
433}
434
3d56c065 435static void lmb_print_region_flags(u32 flags)
174f53d2 436{
8ab61628
SP
437 const char * const flag_str[] = { "none", "no-map", "no-overwrite",
438 "no-notify" };
1f66c0e1
HS
439 unsigned int pflags = flags &
440 (LMB_NOMAP | LMB_NOOVERWRITE | LMB_NONOTIFY);
441
442 if (flags != pflags) {
443 printf("invalid %#x\n", flags);
444 return;
445 }
174f53d2
JG
446
447 do {
1f66c0e1
HS
448 int bitpos = pflags ? fls(pflags) - 1 : 0;
449
174f53d2 450 printf("%s", flag_str[bitpos]);
1f66c0e1
HS
451 pflags &= ~(1u << bitpos);
452 puts(pflags ? ", " : "\n");
453 } while (pflags);
174f53d2
JG
454}
455
456static void lmb_dump_region(struct alist *lmb_rgn_lst, char *name)
457{
458 struct lmb_region *rgn = lmb_rgn_lst->data;
459 unsigned long long base, size, end;
3d56c065 460 u32 flags;
174f53d2
JG
461 int i;
462
dfe7ab35 463 printf(" %s.count = %#x\n", name, lmb_rgn_lst->count);
174f53d2
JG
464
465 for (i = 0; i < lmb_rgn_lst->count; i++) {
466 base = rgn[i].base;
467 size = rgn[i].size;
468 end = base + size - 1;
469 flags = rgn[i].flags;
470
dfe7ab35 471 printf(" %s[%d]\t[%#llx-%#llx], %#llx bytes, flags: ",
174f53d2
JG
472 name, i, base, end, size);
473 lmb_print_region_flags(flags);
474 }
475}
476
477void lmb_dump_all_force(void)
478{
479 printf("lmb_dump_all:\n");
400c34db 480 lmb_dump_region(&lmb.available_mem, "memory");
174f53d2
JG
481 lmb_dump_region(&lmb.used_mem, "reserved");
482}
483
484void lmb_dump_all(void)
485{
486#ifdef DEBUG
487 lmb_dump_all_force();
488#endif
489}
490
491static void lmb_reserve_uboot_region(void)
492{
493 int bank;
494 ulong end, bank_end;
495 phys_addr_t rsv_start;
496
497 rsv_start = gd->start_addr_sp - CONFIG_STACK_SIZE;
498 end = gd->ram_top;
499
500 /*
501 * Reserve memory from aligned address below the bottom of U-Boot stack
502 * until end of RAM area to prevent LMB from overwriting that memory.
503 */
504 debug("## Current stack ends at 0x%08lx ", (ulong)rsv_start);
505
506 for (bank = 0; bank < CONFIG_NR_DRAM_BANKS; bank++) {
507 if (!gd->bd->bi_dram[bank].size ||
508 rsv_start < gd->bd->bi_dram[bank].start)
509 continue;
510 /* Watch out for RAM at end of address space! */
511 bank_end = gd->bd->bi_dram[bank].start +
512 gd->bd->bi_dram[bank].size - 1;
513 if (rsv_start > bank_end)
514 continue;
515 if (bank_end > end)
516 bank_end = end - 1;
517
900a8951 518 lmb_reserve(rsv_start, bank_end - rsv_start + 1, LMB_NOOVERWRITE);
174f53d2
JG
519
520 if (gd->flags & GD_FLG_SKIP_RELOC)
900a8951
IA
521 lmb_reserve((phys_addr_t)(uintptr_t)_start,
522 gd->mon_len, LMB_NOOVERWRITE);
174f53d2
JG
523
524 break;
525 }
526}
527
528static void lmb_reserve_common(void *fdt_blob)
529{
530 lmb_reserve_uboot_region();
531
532 if (CONFIG_IS_ENABLED(OF_LIBFDT) && fdt_blob)
533 boot_fdt_add_mem_rsv_regions(fdt_blob);
534}
535
536static __maybe_unused void lmb_reserve_common_spl(void)
537{
538 phys_addr_t rsv_start;
539 phys_size_t rsv_size;
540
541 /*
542 * Assume a SPL stack of 16KB. This must be
543 * more than enough for the SPL stage.
544 */
545 if (IS_ENABLED(CONFIG_SPL_STACK_R_ADDR)) {
546 rsv_start = gd->start_addr_sp - 16384;
547 rsv_size = 16384;
900a8951 548 lmb_reserve(rsv_start, rsv_size, LMB_NOOVERWRITE);
174f53d2
JG
549 }
550
551 if (IS_ENABLED(CONFIG_SPL_SEPARATE_BSS)) {
552 /* Reserve the bss region */
553 rsv_start = (phys_addr_t)(uintptr_t)__bss_start;
554 rsv_size = (phys_addr_t)(uintptr_t)__bss_end -
555 (phys_addr_t)(uintptr_t)__bss_start;
900a8951 556 lmb_reserve(rsv_start, rsv_size, LMB_NOOVERWRITE);
174f53d2
JG
557 }
558}
559
56f186a6
SG
560/**
561 * lmb_can_reserve_region() - check if the region can be reserved
562 * @base: base address of region to be reserved
563 * @size: size of region to be reserved
564 * @flags: flag of the region to be reserved
565 *
566 * Go through all the reserved regions and ensure that the requested
567 * region does not overlap with any existing regions. An overlap is
568 * allowed only when the flag of the request region and the existing
569 * region is LMB_NONE.
570 *
571 * Return: true if region can be reserved, false otherwise
572 */
573static bool lmb_can_reserve_region(phys_addr_t base, phys_size_t size,
574 u32 flags)
575{
576 uint i;
577 struct lmb_region *lmb_reserved = lmb.used_mem.data;
578
579 for (i = 0; i < lmb.used_mem.count; i++) {
580 u32 rgnflags = lmb_reserved[i].flags;
581 phys_addr_t rgnbase = lmb_reserved[i].base;
582 phys_size_t rgnsize = lmb_reserved[i].size;
583
584 if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) {
585 if (flags != LMB_NONE || flags != rgnflags)
586 return false;
587 }
588 }
589
590 return true;
591}
592
174f53d2
JG
593void lmb_add_memory(void)
594{
595 int i;
1a48b0be 596 phys_addr_t bank_end;
174f53d2
JG
597 phys_size_t size;
598 u64 ram_top = gd->ram_top;
599 struct bd_info *bd = gd->bd;
600
601 if (CONFIG_IS_ENABLED(LMB_ARCH_MEM_MAP))
602 return lmb_arch_add_memory();
603
604 /* Assume a 4GB ram_top if not defined */
605 if (!ram_top)
606 ram_top = 0x100000000ULL;
607
608 for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
609 size = bd->bi_dram[i].size;
1a48b0be
SG
610 bank_end = bd->bi_dram[i].start + size;
611
174f53d2
JG
612 if (size) {
613 lmb_add(bd->bi_dram[i].start, size);
614
615 /*
616 * Reserve memory above ram_top as
617 * no-overwrite so that it cannot be
618 * allocated
619 */
620 if (bd->bi_dram[i].start >= ram_top)
900a8951
IA
621 lmb_reserve(bd->bi_dram[i].start, size,
622 LMB_NOOVERWRITE);
1a48b0be 623 else if (bank_end > ram_top)
900a8951
IA
624 lmb_reserve(ram_top, bank_end - ram_top,
625 LMB_NOOVERWRITE);
174f53d2
JG
626 }
627 }
628}
629
174f53d2
JG
630/* This routine may be called with relocation disabled. */
631long lmb_add(phys_addr_t base, phys_size_t size)
632{
633 long ret;
400c34db 634 struct alist *lmb_rgn_lst = &lmb.available_mem;
174f53d2 635
c207d6e3 636 ret = lmb_add_region_flags(lmb_rgn_lst, base, size, LMB_NONE);
174f53d2
JG
637 if (ret)
638 return ret;
639
08573d76 640 return lmb_map_update_notify(base, size, LMB_MAP_OP_ADD, LMB_NONE);
174f53d2
JG
641}
642
c8a8f019 643long lmb_free_flags(phys_addr_t base, phys_size_t size,
2f619152 644 uint flags)
c8a8f019 645{
2f619152
SG
646 long ret;
647
408b4ae8 648 ret = _lmb_free(&lmb.used_mem, base, size);
2f619152
SG
649 if (ret < 0)
650 return ret;
651
08573d76 652 return lmb_map_update_notify(base, size, LMB_MAP_OP_FREE, flags);
2f619152
SG
653}
654
655long lmb_free(phys_addr_t base, phys_size_t size)
656{
657 return lmb_free_flags(base, size, LMB_NONE);
c8a8f019
SG
658}
659
900a8951 660long lmb_reserve(phys_addr_t base, phys_size_t size, u32 flags)
4ed6552f 661{
2f619152 662 long ret = 0;
ed17a33f 663 struct alist *lmb_rgn_lst = &lmb.used_mem;
4ed6552f 664
56f186a6
SG
665 if (!lmb_can_reserve_region(base, size, flags))
666 return -EEXIST;
667
2f619152 668 ret = lmb_add_region_flags(lmb_rgn_lst, base, size, flags);
0f57b009
IA
669 if (ret)
670 return ret;
2f619152 671
08573d76 672 return lmb_map_update_notify(base, size, LMB_MAP_OP_RESERVE, flags);
59c0ea5d
PD
673}
674
8d0df5fd 675static phys_addr_t _lmb_alloc_base(phys_size_t size, ulong align,
3d56c065 676 phys_addr_t max_addr, u32 flags)
4ed6552f 677{
2f619152 678 int ret;
e35d2a75 679 long i, rgn;
391fd93a 680 phys_addr_t base = 0;
7570a994 681 phys_addr_t res_base;
ed17a33f 682 struct lmb_region *lmb_used = lmb.used_mem.data;
400c34db 683 struct lmb_region *lmb_memory = lmb.available_mem.data;
4ed6552f 684
400c34db 685 for (i = lmb.available_mem.count - 1; i >= 0; i--) {
ed17a33f
SG
686 phys_addr_t lmbbase = lmb_memory[i].base;
687 phys_size_t lmbsize = lmb_memory[i].size;
4ed6552f 688
7570a994
AF
689 if (lmbsize < size)
690 continue;
85ebda86
SP
691
692 if (max_addr == LMB_ALLOC_ANYWHERE) {
6c9f2750 693 base = ALIGN_DOWN(lmbbase + lmbsize - size, align);
85ebda86 694 } else if (lmbbase < max_addr) {
ad3fda52
NC
695 base = lmbbase + lmbsize;
696 if (base < lmbbase)
697 base = -1;
698 base = min(base, max_addr);
6c9f2750 699 base = ALIGN_DOWN(base - size, align);
85ebda86 700 } else {
4ed6552f 701 continue;
85ebda86 702 }
4ed6552f 703
7570a994 704 while (base && lmbbase <= base) {
ed17a33f 705 rgn = lmb_overlaps_region(&lmb.used_mem, base, size);
e35d2a75 706 if (rgn < 0) {
7570a994 707 /* This area isn't reserved, take it */
5e9553cc 708 if (lmb_add_region_flags(&lmb.used_mem, base,
0f57b009 709 size, flags))
7570a994 710 return 0;
2f619152 711
f6fb6a88 712 ret = lmb_map_update_notify(base, size,
08573d76 713 LMB_MAP_OP_RESERVE,
f6fb6a88
IA
714 flags);
715 if (ret)
716 return ret;
2f619152 717
7570a994
AF
718 return base;
719 }
ed17a33f
SG
720
721 res_base = lmb_used[rgn].base;
7570a994
AF
722 if (res_base < size)
723 break;
6c9f2750 724 base = ALIGN_DOWN(res_base - size, align);
7570a994 725 }
4ed6552f 726 }
2bf5811e
SG
727
728 log_debug("%s: Failed to allocate 0x%lx bytes below 0x%lx\n",
729 __func__, (ulong)size, (ulong)max_addr);
730
7570a994 731 return 0;
4ed6552f
KG
732}
733
ed17a33f 734phys_addr_t lmb_alloc(phys_size_t size, ulong align)
3d679aed 735{
2bf5811e 736 return _lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE, LMB_NONE);
3d679aed
SG
737}
738
30757080
IA
739phys_addr_t lmb_alloc_base(phys_size_t size, ulong align, phys_addr_t max_addr,
740 uint flags)
c8a8f019 741{
2bf5811e 742 return _lmb_alloc_base(size, align, max_addr, flags);
c8a8f019
SG
743}
744
67be2490 745int lmb_alloc_addr(phys_addr_t base, phys_size_t size, u32 flags)
4cc8af80 746{
e35d2a75 747 long rgn;
400c34db 748 struct lmb_region *lmb_memory = lmb.available_mem.data;
4cc8af80
SG
749
750 /* Check if the requested address is in one of the memory regions */
400c34db 751 rgn = lmb_overlaps_region(&lmb.available_mem, base, size);
e35d2a75 752 if (rgn >= 0) {
4cc8af80
SG
753 /*
754 * Check if the requested end address is in the same memory
755 * region we found.
756 */
ed17a33f
SG
757 if (lmb_addrs_overlap(lmb_memory[rgn].base,
758 lmb_memory[rgn].size,
e35d2a75 759 base + size - 1, 1)) {
4cc8af80 760 /* ok, reserve the memory */
900a8951 761 if (!lmb_reserve(base, size, flags))
67be2490 762 return 0;
4cc8af80
SG
763 }
764 }
5e9553cc 765
67be2490 766 return -1;
4cc8af80
SG
767}
768
769/* Return number of bytes from a given address that are free */
ed17a33f 770phys_size_t lmb_get_free_size(phys_addr_t addr)
4cc8af80
SG
771{
772 int i;
e35d2a75 773 long rgn;
ed17a33f 774 struct lmb_region *lmb_used = lmb.used_mem.data;
400c34db 775 struct lmb_region *lmb_memory = lmb.available_mem.data;
4cc8af80
SG
776
777 /* check if the requested address is in the memory regions */
400c34db 778 rgn = lmb_overlaps_region(&lmb.available_mem, addr, 1);
e35d2a75 779 if (rgn >= 0) {
ed17a33f
SG
780 for (i = 0; i < lmb.used_mem.count; i++) {
781 if (addr < lmb_used[i].base) {
4cc8af80 782 /* first reserved range > requested address */
ed17a33f 783 return lmb_used[i].base - addr;
4cc8af80 784 }
ed17a33f
SG
785 if (lmb_used[i].base +
786 lmb_used[i].size > addr) {
4cc8af80
SG
787 /* requested addr is in this reserved range */
788 return 0;
789 }
790 }
791 /* if we come here: no reserved ranges above requested addr */
400c34db
IA
792 return lmb_memory[lmb.available_mem.count - 1].base +
793 lmb_memory[lmb.available_mem.count - 1].size - addr;
4cc8af80
SG
794 }
795 return 0;
796}
797
ed17a33f 798int lmb_is_reserved_flags(phys_addr_t addr, int flags)
4ed6552f
KG
799{
800 int i;
ed17a33f 801 struct lmb_region *lmb_used = lmb.used_mem.data;
4ed6552f 802
ed17a33f
SG
803 for (i = 0; i < lmb.used_mem.count; i++) {
804 phys_addr_t upper = lmb_used[i].base +
805 lmb_used[i].size - 1;
806 if (addr >= lmb_used[i].base && addr <= upper)
807 return (lmb_used[i].flags & flags) == flags;
4ed6552f
KG
808 }
809 return 0;
810}
a16028da 811
2f619152 812static int lmb_setup(bool test)
ed17a33f
SG
813{
814 bool ret;
815
400c34db 816 ret = alist_init(&lmb.available_mem, sizeof(struct lmb_region),
ed17a33f
SG
817 (uint)LMB_ALIST_INITIAL_SIZE);
818 if (!ret) {
819 log_debug("Unable to initialise the list for LMB free memory\n");
820 return -ENOMEM;
821 }
822
823 ret = alist_init(&lmb.used_mem, sizeof(struct lmb_region),
824 (uint)LMB_ALIST_INITIAL_SIZE);
825 if (!ret) {
826 log_debug("Unable to initialise the list for LMB used memory\n");
827 return -ENOMEM;
828 }
829
2f619152
SG
830 lmb.test = test;
831
ed17a33f
SG
832 return 0;
833}
834
ed17a33f
SG
835int lmb_init(void)
836{
837 int ret;
838
2f619152 839 ret = lmb_setup(false);
ed17a33f
SG
840 if (ret) {
841 log_info("Unable to init LMB\n");
842 return ret;
843 }
844
8a9fc30f
SG
845 lmb_add_memory();
846
f4fb154f 847 /* Reserve the U-Boot image region once U-Boot has relocated */
456bdb70 848 if (xpl_phase() == PHASE_SPL)
f4fb154f 849 lmb_reserve_common_spl();
456bdb70 850 else if (xpl_phase() == PHASE_BOARD_R)
f4fb154f
SG
851 lmb_reserve_common((void *)gd->fdt_blob);
852
ed17a33f
SG
853 return 0;
854}
855
ed17a33f
SG
856struct lmb *lmb_get(void)
857{
858 return &lmb;
859}
860
1c30f7a8 861#if CONFIG_IS_ENABLED(UNIT_TEST)
ed17a33f
SG
862int lmb_push(struct lmb *store)
863{
864 int ret;
865
866 *store = lmb;
2f619152 867 ret = lmb_setup(true);
ed17a33f
SG
868 if (ret)
869 return ret;
870
871 return 0;
872}
873
874void lmb_pop(struct lmb *store)
875{
400c34db 876 alist_uninit(&lmb.available_mem);
ed17a33f
SG
877 alist_uninit(&lmb.used_mem);
878 lmb = *store;
879}
880#endif /* UNIT_TEST */