]>
Commit | Line | Data |
---|---|---|
1 | // SPDX-License-Identifier: GPL-2.0+ | |
2 | /* | |
3 | * Procedures for maintaining information about logical memory blocks. | |
4 | * | |
5 | * Peter Bergner, IBM Corp. June 2001. | |
6 | * Copyright (C) 2001 Peter Bergner. | |
7 | */ | |
8 | ||
9 | #include <alist.h> | |
10 | #include <efi_loader.h> | |
11 | #include <event.h> | |
12 | #include <image.h> | |
13 | #include <mapmem.h> | |
14 | #include <lmb.h> | |
15 | #include <log.h> | |
16 | #include <malloc.h> | |
17 | #include <spl.h> | |
18 | ||
19 | #include <asm/global_data.h> | |
20 | #include <asm/sections.h> | |
21 | #include <linux/kernel.h> | |
22 | #include <linux/sizes.h> | |
23 | ||
24 | DECLARE_GLOBAL_DATA_PTR; | |
25 | ||
26 | #define LMB_RGN_OVERLAP 1 | |
27 | #define LMB_RGN_ADJACENT 2 | |
28 | ||
29 | /* | |
30 | * The following low level LMB functions must not access the global LMB memory | |
31 | * map since they are also used to manage IOVA memory maps in iommu drivers like | |
32 | * apple_dart. | |
33 | */ | |
34 | ||
35 | static long lmb_addrs_overlap(phys_addr_t base1, phys_size_t size1, | |
36 | phys_addr_t base2, phys_size_t size2) | |
37 | { | |
38 | const phys_addr_t base1_end = base1 + size1 - 1; | |
39 | const phys_addr_t base2_end = base2 + size2 - 1; | |
40 | ||
41 | return ((base1 <= base2_end) && (base2 <= base1_end)); | |
42 | } | |
43 | ||
44 | static long lmb_addrs_adjacent(phys_addr_t base1, phys_size_t size1, | |
45 | phys_addr_t base2, phys_size_t size2) | |
46 | { | |
47 | if (base2 == base1 + size1) | |
48 | return 1; | |
49 | else if (base1 == base2 + size2) | |
50 | return -1; | |
51 | ||
52 | return 0; | |
53 | } | |
54 | ||
55 | /** | |
56 | * lmb_regions_check() - Check if the regions overlap, or are adjacent | |
57 | * @lmb_rgn_lst: List of LMB regions | |
58 | * @r1: First region to check | |
59 | * @r2: Second region to check | |
60 | * | |
61 | * Check if the two regions with matching flags, r1 and r2 are | |
62 | * adjacent to each other, or if they overlap. | |
63 | * | |
64 | * Return: | |
65 | * * %LMB_RGN_OVERLAP - Regions overlap | |
66 | * * %LMB_RGN_ADJACENT - Regions adjacent to each other | |
67 | * * 0 - Neither of the above, or flags mismatch | |
68 | */ | |
69 | static long lmb_regions_check(struct alist *lmb_rgn_lst, unsigned long r1, | |
70 | unsigned long r2) | |
71 | { | |
72 | struct lmb_region *rgn = lmb_rgn_lst->data; | |
73 | phys_addr_t base1 = rgn[r1].base; | |
74 | phys_size_t size1 = rgn[r1].size; | |
75 | phys_addr_t base2 = rgn[r2].base; | |
76 | phys_size_t size2 = rgn[r2].size; | |
77 | ||
78 | if (rgn[r1].flags != rgn[r2].flags) | |
79 | return 0; | |
80 | ||
81 | if (lmb_addrs_overlap(base1, size1, base2, size2)) | |
82 | return LMB_RGN_OVERLAP; | |
83 | else if (lmb_addrs_adjacent(base1, size1, base2, size2)) | |
84 | return LMB_RGN_ADJACENT; | |
85 | ||
86 | return 0; | |
87 | } | |
88 | ||
89 | static void lmb_remove_region(struct alist *lmb_rgn_lst, unsigned long r) | |
90 | { | |
91 | unsigned long i; | |
92 | struct lmb_region *rgn = lmb_rgn_lst->data; | |
93 | ||
94 | for (i = r; i < lmb_rgn_lst->count - 1; i++) { | |
95 | rgn[i].base = rgn[i + 1].base; | |
96 | rgn[i].size = rgn[i + 1].size; | |
97 | rgn[i].flags = rgn[i + 1].flags; | |
98 | } | |
99 | lmb_rgn_lst->count--; | |
100 | } | |
101 | ||
102 | /* Assumption: base addr of region 1 < base addr of region 2 */ | |
103 | static void lmb_coalesce_regions(struct alist *lmb_rgn_lst, unsigned long r1, | |
104 | unsigned long r2) | |
105 | { | |
106 | struct lmb_region *rgn = lmb_rgn_lst->data; | |
107 | ||
108 | rgn[r1].size += rgn[r2].size; | |
109 | lmb_remove_region(lmb_rgn_lst, r2); | |
110 | } | |
111 | ||
112 | static long lmb_resize_regions(struct alist *lmb_rgn_lst, | |
113 | unsigned long idx_start, | |
114 | phys_addr_t base, phys_size_t size) | |
115 | { | |
116 | phys_size_t rgnsize; | |
117 | unsigned long rgn_cnt, idx, idx_end; | |
118 | phys_addr_t rgnbase, rgnend; | |
119 | phys_addr_t mergebase, mergeend; | |
120 | struct lmb_region *rgn = lmb_rgn_lst->data; | |
121 | ||
122 | rgn_cnt = 0; | |
123 | idx = idx_start; | |
124 | idx_end = idx_start; | |
125 | ||
126 | /* | |
127 | * First thing to do is to identify how many regions | |
128 | * the requested region overlaps. | |
129 | * If the flags match, combine all these overlapping | |
130 | * regions into a single region, and remove the merged | |
131 | * regions. | |
132 | */ | |
133 | while (idx <= lmb_rgn_lst->count - 1) { | |
134 | rgnbase = rgn[idx].base; | |
135 | rgnsize = rgn[idx].size; | |
136 | ||
137 | if (lmb_addrs_overlap(base, size, rgnbase, | |
138 | rgnsize)) { | |
139 | if (rgn[idx].flags != LMB_NONE) | |
140 | return -1; | |
141 | rgn_cnt++; | |
142 | idx_end = idx; | |
143 | } | |
144 | idx++; | |
145 | } | |
146 | ||
147 | /* The merged region's base and size */ | |
148 | rgnbase = rgn[idx_start].base; | |
149 | mergebase = min(base, rgnbase); | |
150 | rgnend = rgn[idx_end].base + rgn[idx_end].size; | |
151 | mergeend = max(rgnend, (base + size)); | |
152 | ||
153 | rgn[idx_start].base = mergebase; | |
154 | rgn[idx_start].size = mergeend - mergebase; | |
155 | ||
156 | /* Now remove the merged regions */ | |
157 | while (--rgn_cnt) | |
158 | lmb_remove_region(lmb_rgn_lst, idx_start + 1); | |
159 | ||
160 | return 0; | |
161 | } | |
162 | ||
163 | /** | |
164 | * lmb_add_region_flags() - Add an lmb region to the given list | |
165 | * @lmb_rgn_lst: LMB list to which region is to be added(free/used) | |
166 | * @base: Start address of the region | |
167 | * @size: Size of the region to be added | |
168 | * @flags: Attributes of the LMB region | |
169 | * | |
170 | * Add a region of memory to the list. If the region does not exist, add | |
171 | * it to the list. Depending on the attributes of the region to be added, | |
172 | * the function might resize an already existing region or coalesce two | |
173 | * adjacent regions. | |
174 | * | |
175 | * Return: | |
176 | * * %0 - Added successfully, or it's already added (only if LMB_NONE) | |
177 | * * %-EEXIST - The region is already added, and flags != LMB_NONE | |
178 | * * %-1 - Failure | |
179 | */ | |
180 | static long lmb_add_region_flags(struct alist *lmb_rgn_lst, phys_addr_t base, | |
181 | phys_size_t size, u32 flags) | |
182 | { | |
183 | unsigned long coalesced = 0; | |
184 | long ret, i; | |
185 | struct lmb_region *rgn = lmb_rgn_lst->data; | |
186 | ||
187 | if (alist_err(lmb_rgn_lst)) | |
188 | return -1; | |
189 | ||
190 | /* First try and coalesce this LMB with another. */ | |
191 | for (i = 0; i < lmb_rgn_lst->count; i++) { | |
192 | phys_addr_t rgnbase = rgn[i].base; | |
193 | phys_size_t rgnsize = rgn[i].size; | |
194 | u32 rgnflags = rgn[i].flags; | |
195 | ||
196 | ret = lmb_addrs_adjacent(base, size, rgnbase, rgnsize); | |
197 | if (ret > 0) { | |
198 | if (flags != rgnflags) | |
199 | break; | |
200 | rgn[i].base -= size; | |
201 | rgn[i].size += size; | |
202 | coalesced++; | |
203 | break; | |
204 | } else if (ret < 0) { | |
205 | if (flags != rgnflags) | |
206 | continue; | |
207 | rgn[i].size += size; | |
208 | coalesced++; | |
209 | break; | |
210 | } else if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) { | |
211 | ret = lmb_resize_regions(lmb_rgn_lst, i, base, size); | |
212 | if (ret < 0) | |
213 | return -1; | |
214 | ||
215 | coalesced++; | |
216 | break; | |
217 | ||
218 | return -1; | |
219 | } | |
220 | } | |
221 | ||
222 | if (lmb_rgn_lst->count && i < lmb_rgn_lst->count - 1) { | |
223 | ret = lmb_regions_check(lmb_rgn_lst, i, i + 1); | |
224 | if (ret == LMB_RGN_ADJACENT) { | |
225 | lmb_coalesce_regions(lmb_rgn_lst, i, i + 1); | |
226 | coalesced++; | |
227 | } else if (ret == LMB_RGN_OVERLAP) { | |
228 | /* fix overlapping areas */ | |
229 | phys_addr_t rgnbase = rgn[i].base; | |
230 | phys_size_t rgnsize = rgn[i].size; | |
231 | ||
232 | ret = lmb_resize_regions(lmb_rgn_lst, i, | |
233 | rgnbase, rgnsize); | |
234 | if (ret < 0) | |
235 | return -1; | |
236 | ||
237 | coalesced++; | |
238 | } | |
239 | } | |
240 | ||
241 | if (coalesced) | |
242 | return 0; | |
243 | ||
244 | if (alist_full(lmb_rgn_lst) && | |
245 | !alist_expand_by(lmb_rgn_lst, lmb_rgn_lst->alloc)) | |
246 | return -1; | |
247 | rgn = lmb_rgn_lst->data; | |
248 | ||
249 | /* Couldn't coalesce the LMB, so add it to the sorted table. */ | |
250 | for (i = lmb_rgn_lst->count; i >= 0; i--) { | |
251 | if (i && base < rgn[i - 1].base) { | |
252 | rgn[i] = rgn[i - 1]; | |
253 | } else { | |
254 | rgn[i].base = base; | |
255 | rgn[i].size = size; | |
256 | rgn[i].flags = flags; | |
257 | break; | |
258 | } | |
259 | } | |
260 | ||
261 | lmb_rgn_lst->count++; | |
262 | ||
263 | return 0; | |
264 | } | |
265 | ||
266 | static long _lmb_free(struct alist *lmb_rgn_lst, phys_addr_t base, | |
267 | phys_size_t size) | |
268 | { | |
269 | struct lmb_region *rgn; | |
270 | phys_addr_t rgnbegin, rgnend; | |
271 | phys_addr_t end = base + size - 1; | |
272 | int i; | |
273 | ||
274 | /* Suppress GCC warnings */ | |
275 | rgnbegin = 0; | |
276 | rgnend = 0; | |
277 | ||
278 | rgn = lmb_rgn_lst->data; | |
279 | /* Find the region where (base, size) belongs to */ | |
280 | for (i = 0; i < lmb_rgn_lst->count; i++) { | |
281 | rgnbegin = rgn[i].base; | |
282 | rgnend = rgnbegin + rgn[i].size - 1; | |
283 | ||
284 | if (rgnbegin <= base && end <= rgnend) | |
285 | break; | |
286 | } | |
287 | ||
288 | /* Didn't find the region */ | |
289 | if (i == lmb_rgn_lst->count) | |
290 | return -1; | |
291 | ||
292 | /* Check to see if we are removing entire region */ | |
293 | if (rgnbegin == base && rgnend == end) { | |
294 | lmb_remove_region(lmb_rgn_lst, i); | |
295 | return 0; | |
296 | } | |
297 | ||
298 | /* Check to see if region is matching at the front */ | |
299 | if (rgnbegin == base) { | |
300 | rgn[i].base = end + 1; | |
301 | rgn[i].size -= size; | |
302 | return 0; | |
303 | } | |
304 | ||
305 | /* Check to see if the region is matching at the end */ | |
306 | if (rgnend == end) { | |
307 | rgn[i].size -= size; | |
308 | return 0; | |
309 | } | |
310 | ||
311 | /* | |
312 | * We need to split the entry - adjust the current one to the | |
313 | * beginging of the hole and add the region after hole. | |
314 | */ | |
315 | rgn[i].size = base - rgn[i].base; | |
316 | return lmb_add_region_flags(lmb_rgn_lst, end + 1, rgnend - end, | |
317 | rgn[i].flags); | |
318 | } | |
319 | ||
320 | static long lmb_overlaps_region(struct alist *lmb_rgn_lst, phys_addr_t base, | |
321 | phys_size_t size) | |
322 | { | |
323 | unsigned long i; | |
324 | struct lmb_region *rgn = lmb_rgn_lst->data; | |
325 | ||
326 | for (i = 0; i < lmb_rgn_lst->count; i++) { | |
327 | phys_addr_t rgnbase = rgn[i].base; | |
328 | phys_size_t rgnsize = rgn[i].size; | |
329 | ||
330 | if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) | |
331 | break; | |
332 | } | |
333 | ||
334 | return (i < lmb_rgn_lst->count) ? i : -1; | |
335 | } | |
336 | ||
337 | /* | |
338 | * IOVA LMB memory maps using lmb pointers instead of the global LMB memory map. | |
339 | */ | |
340 | ||
341 | int io_lmb_setup(struct lmb *io_lmb) | |
342 | { | |
343 | int ret; | |
344 | ||
345 | ret = alist_init(&io_lmb->available_mem, sizeof(struct lmb_region), | |
346 | (uint)LMB_ALIST_INITIAL_SIZE); | |
347 | if (!ret) { | |
348 | log_debug("Unable to initialise the list for LMB free IOVA\n"); | |
349 | return -ENOMEM; | |
350 | } | |
351 | ||
352 | ret = alist_init(&io_lmb->used_mem, sizeof(struct lmb_region), | |
353 | (uint)LMB_ALIST_INITIAL_SIZE); | |
354 | if (!ret) { | |
355 | log_debug("Unable to initialise the list for LMB used IOVA\n"); | |
356 | return -ENOMEM; | |
357 | } | |
358 | ||
359 | io_lmb->test = false; | |
360 | ||
361 | return 0; | |
362 | } | |
363 | ||
364 | void io_lmb_teardown(struct lmb *io_lmb) | |
365 | { | |
366 | alist_uninit(&io_lmb->available_mem); | |
367 | alist_uninit(&io_lmb->used_mem); | |
368 | } | |
369 | ||
370 | long io_lmb_add(struct lmb *io_lmb, phys_addr_t base, phys_size_t size) | |
371 | { | |
372 | return lmb_add_region_flags(&io_lmb->available_mem, base, size, LMB_NONE); | |
373 | } | |
374 | ||
375 | /* derived and simplified from _lmb_alloc_base() */ | |
376 | phys_addr_t io_lmb_alloc(struct lmb *io_lmb, phys_size_t size, ulong align) | |
377 | { | |
378 | long i, rgn; | |
379 | phys_addr_t base = 0; | |
380 | phys_addr_t res_base; | |
381 | struct lmb_region *lmb_used = io_lmb->used_mem.data; | |
382 | struct lmb_region *lmb_memory = io_lmb->available_mem.data; | |
383 | ||
384 | for (i = io_lmb->available_mem.count - 1; i >= 0; i--) { | |
385 | phys_addr_t lmbbase = lmb_memory[i].base; | |
386 | phys_size_t lmbsize = lmb_memory[i].size; | |
387 | ||
388 | if (lmbsize < size) | |
389 | continue; | |
390 | base = ALIGN_DOWN(lmbbase + lmbsize - size, align); | |
391 | ||
392 | while (base && lmbbase <= base) { | |
393 | rgn = lmb_overlaps_region(&io_lmb->used_mem, base, size); | |
394 | if (rgn < 0) { | |
395 | /* This area isn't reserved, take it */ | |
396 | if (lmb_add_region_flags(&io_lmb->used_mem, base, | |
397 | size, LMB_NONE) < 0) | |
398 | return 0; | |
399 | ||
400 | return base; | |
401 | } | |
402 | ||
403 | res_base = lmb_used[rgn].base; | |
404 | if (res_base < size) | |
405 | break; | |
406 | base = ALIGN_DOWN(res_base - size, align); | |
407 | } | |
408 | } | |
409 | return 0; | |
410 | } | |
411 | ||
412 | long io_lmb_free(struct lmb *io_lmb, phys_addr_t base, phys_size_t size) | |
413 | { | |
414 | return _lmb_free(&io_lmb->used_mem, base, size); | |
415 | } | |
416 | ||
417 | /* | |
418 | * Low level LMB functions are used to manage IOVA memory maps for the Apple | |
419 | * dart iommu. They must not access the global LMB memory map. | |
420 | * So keep the global LMB variable declaration unreachable from them. | |
421 | */ | |
422 | ||
423 | static struct lmb lmb; | |
424 | ||
425 | static int lmb_map_update_notify(phys_addr_t addr, phys_size_t size, | |
426 | enum lmb_map_op op, u32 flags) | |
427 | { | |
428 | if (CONFIG_IS_ENABLED(EFI_LOADER) && | |
429 | !lmb.test && !(flags & LMB_NONOTIFY)) | |
430 | return efi_map_update_notify(addr, size, op); | |
431 | ||
432 | return 0; | |
433 | } | |
434 | ||
435 | static void lmb_print_region_flags(u32 flags) | |
436 | { | |
437 | const char * const flag_str[] = { "none", "no-map", "no-overwrite", | |
438 | "no-notify" }; | |
439 | unsigned int pflags = flags & | |
440 | (LMB_NOMAP | LMB_NOOVERWRITE | LMB_NONOTIFY); | |
441 | ||
442 | if (flags != pflags) { | |
443 | printf("invalid %#x\n", flags); | |
444 | return; | |
445 | } | |
446 | ||
447 | do { | |
448 | int bitpos = pflags ? fls(pflags) - 1 : 0; | |
449 | ||
450 | printf("%s", flag_str[bitpos]); | |
451 | pflags &= ~(1u << bitpos); | |
452 | puts(pflags ? ", " : "\n"); | |
453 | } while (pflags); | |
454 | } | |
455 | ||
456 | static void lmb_dump_region(struct alist *lmb_rgn_lst, char *name) | |
457 | { | |
458 | struct lmb_region *rgn = lmb_rgn_lst->data; | |
459 | unsigned long long base, size, end; | |
460 | u32 flags; | |
461 | int i; | |
462 | ||
463 | printf(" %s.count = %#x\n", name, lmb_rgn_lst->count); | |
464 | ||
465 | for (i = 0; i < lmb_rgn_lst->count; i++) { | |
466 | base = rgn[i].base; | |
467 | size = rgn[i].size; | |
468 | end = base + size - 1; | |
469 | flags = rgn[i].flags; | |
470 | ||
471 | printf(" %s[%d]\t[%#llx-%#llx], %#llx bytes, flags: ", | |
472 | name, i, base, end, size); | |
473 | lmb_print_region_flags(flags); | |
474 | } | |
475 | } | |
476 | ||
477 | void lmb_dump_all_force(void) | |
478 | { | |
479 | printf("lmb_dump_all:\n"); | |
480 | lmb_dump_region(&lmb.available_mem, "memory"); | |
481 | lmb_dump_region(&lmb.used_mem, "reserved"); | |
482 | } | |
483 | ||
484 | void lmb_dump_all(void) | |
485 | { | |
486 | #ifdef DEBUG | |
487 | lmb_dump_all_force(); | |
488 | #endif | |
489 | } | |
490 | ||
491 | static void lmb_reserve_uboot_region(void) | |
492 | { | |
493 | int bank; | |
494 | ulong end, bank_end; | |
495 | phys_addr_t rsv_start; | |
496 | ||
497 | rsv_start = gd->start_addr_sp - CONFIG_STACK_SIZE; | |
498 | end = gd->ram_top; | |
499 | ||
500 | /* | |
501 | * Reserve memory from aligned address below the bottom of U-Boot stack | |
502 | * until end of RAM area to prevent LMB from overwriting that memory. | |
503 | */ | |
504 | debug("## Current stack ends at 0x%08lx ", (ulong)rsv_start); | |
505 | ||
506 | for (bank = 0; bank < CONFIG_NR_DRAM_BANKS; bank++) { | |
507 | if (!gd->bd->bi_dram[bank].size || | |
508 | rsv_start < gd->bd->bi_dram[bank].start) | |
509 | continue; | |
510 | /* Watch out for RAM at end of address space! */ | |
511 | bank_end = gd->bd->bi_dram[bank].start + | |
512 | gd->bd->bi_dram[bank].size - 1; | |
513 | if (rsv_start > bank_end) | |
514 | continue; | |
515 | if (bank_end > end) | |
516 | bank_end = end - 1; | |
517 | ||
518 | lmb_reserve(rsv_start, bank_end - rsv_start + 1, LMB_NOOVERWRITE); | |
519 | ||
520 | if (gd->flags & GD_FLG_SKIP_RELOC) | |
521 | lmb_reserve((phys_addr_t)(uintptr_t)_start, | |
522 | gd->mon_len, LMB_NOOVERWRITE); | |
523 | ||
524 | break; | |
525 | } | |
526 | } | |
527 | ||
528 | static void lmb_reserve_common(void *fdt_blob) | |
529 | { | |
530 | lmb_reserve_uboot_region(); | |
531 | ||
532 | if (CONFIG_IS_ENABLED(OF_LIBFDT) && fdt_blob) | |
533 | boot_fdt_add_mem_rsv_regions(fdt_blob); | |
534 | } | |
535 | ||
536 | static __maybe_unused void lmb_reserve_common_spl(void) | |
537 | { | |
538 | phys_addr_t rsv_start; | |
539 | phys_size_t rsv_size; | |
540 | ||
541 | /* | |
542 | * Assume a SPL stack of 16KB. This must be | |
543 | * more than enough for the SPL stage. | |
544 | */ | |
545 | if (IS_ENABLED(CONFIG_SPL_STACK_R_ADDR)) { | |
546 | rsv_start = gd->start_addr_sp - 16384; | |
547 | rsv_size = 16384; | |
548 | lmb_reserve(rsv_start, rsv_size, LMB_NOOVERWRITE); | |
549 | } | |
550 | ||
551 | if (IS_ENABLED(CONFIG_SPL_SEPARATE_BSS)) { | |
552 | /* Reserve the bss region */ | |
553 | rsv_start = (phys_addr_t)(uintptr_t)__bss_start; | |
554 | rsv_size = (phys_addr_t)(uintptr_t)__bss_end - | |
555 | (phys_addr_t)(uintptr_t)__bss_start; | |
556 | lmb_reserve(rsv_start, rsv_size, LMB_NOOVERWRITE); | |
557 | } | |
558 | } | |
559 | ||
560 | /** | |
561 | * lmb_can_reserve_region() - check if the region can be reserved | |
562 | * @base: base address of region to be reserved | |
563 | * @size: size of region to be reserved | |
564 | * @flags: flag of the region to be reserved | |
565 | * | |
566 | * Go through all the reserved regions and ensure that the requested | |
567 | * region does not overlap with any existing regions. An overlap is | |
568 | * allowed only when the flag of the request region and the existing | |
569 | * region is LMB_NONE. | |
570 | * | |
571 | * Return: true if region can be reserved, false otherwise | |
572 | */ | |
573 | static bool lmb_can_reserve_region(phys_addr_t base, phys_size_t size, | |
574 | u32 flags) | |
575 | { | |
576 | uint i; | |
577 | struct lmb_region *lmb_reserved = lmb.used_mem.data; | |
578 | ||
579 | for (i = 0; i < lmb.used_mem.count; i++) { | |
580 | u32 rgnflags = lmb_reserved[i].flags; | |
581 | phys_addr_t rgnbase = lmb_reserved[i].base; | |
582 | phys_size_t rgnsize = lmb_reserved[i].size; | |
583 | ||
584 | if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) { | |
585 | if (flags != LMB_NONE || flags != rgnflags) | |
586 | return false; | |
587 | } | |
588 | } | |
589 | ||
590 | return true; | |
591 | } | |
592 | ||
593 | void lmb_add_memory(void) | |
594 | { | |
595 | int i; | |
596 | phys_addr_t bank_end; | |
597 | phys_size_t size; | |
598 | u64 ram_top = gd->ram_top; | |
599 | struct bd_info *bd = gd->bd; | |
600 | ||
601 | if (CONFIG_IS_ENABLED(LMB_ARCH_MEM_MAP)) | |
602 | return lmb_arch_add_memory(); | |
603 | ||
604 | /* Assume a 4GB ram_top if not defined */ | |
605 | if (!ram_top) | |
606 | ram_top = 0x100000000ULL; | |
607 | ||
608 | for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) { | |
609 | size = bd->bi_dram[i].size; | |
610 | bank_end = bd->bi_dram[i].start + size; | |
611 | ||
612 | if (size) { | |
613 | lmb_add(bd->bi_dram[i].start, size); | |
614 | ||
615 | /* | |
616 | * Reserve memory above ram_top as | |
617 | * no-overwrite so that it cannot be | |
618 | * allocated | |
619 | */ | |
620 | if (bd->bi_dram[i].start >= ram_top) | |
621 | lmb_reserve(bd->bi_dram[i].start, size, | |
622 | LMB_NOOVERWRITE); | |
623 | else if (bank_end > ram_top) | |
624 | lmb_reserve(ram_top, bank_end - ram_top, | |
625 | LMB_NOOVERWRITE); | |
626 | } | |
627 | } | |
628 | } | |
629 | ||
630 | /* This routine may be called with relocation disabled. */ | |
631 | long lmb_add(phys_addr_t base, phys_size_t size) | |
632 | { | |
633 | long ret; | |
634 | struct alist *lmb_rgn_lst = &lmb.available_mem; | |
635 | ||
636 | ret = lmb_add_region_flags(lmb_rgn_lst, base, size, LMB_NONE); | |
637 | if (ret) | |
638 | return ret; | |
639 | ||
640 | return lmb_map_update_notify(base, size, LMB_MAP_OP_ADD, LMB_NONE); | |
641 | } | |
642 | ||
643 | long lmb_free_flags(phys_addr_t base, phys_size_t size, | |
644 | uint flags) | |
645 | { | |
646 | long ret; | |
647 | ||
648 | ret = _lmb_free(&lmb.used_mem, base, size); | |
649 | if (ret < 0) | |
650 | return ret; | |
651 | ||
652 | return lmb_map_update_notify(base, size, LMB_MAP_OP_FREE, flags); | |
653 | } | |
654 | ||
655 | long lmb_free(phys_addr_t base, phys_size_t size) | |
656 | { | |
657 | return lmb_free_flags(base, size, LMB_NONE); | |
658 | } | |
659 | ||
660 | long lmb_reserve(phys_addr_t base, phys_size_t size, u32 flags) | |
661 | { | |
662 | long ret = 0; | |
663 | struct alist *lmb_rgn_lst = &lmb.used_mem; | |
664 | ||
665 | if (!lmb_can_reserve_region(base, size, flags)) | |
666 | return -EEXIST; | |
667 | ||
668 | ret = lmb_add_region_flags(lmb_rgn_lst, base, size, flags); | |
669 | if (ret) | |
670 | return ret; | |
671 | ||
672 | return lmb_map_update_notify(base, size, LMB_MAP_OP_RESERVE, flags); | |
673 | } | |
674 | ||
675 | static phys_addr_t _lmb_alloc_base(phys_size_t size, ulong align, | |
676 | phys_addr_t max_addr, u32 flags) | |
677 | { | |
678 | int ret; | |
679 | long i, rgn; | |
680 | phys_addr_t base = 0; | |
681 | phys_addr_t res_base; | |
682 | struct lmb_region *lmb_used = lmb.used_mem.data; | |
683 | struct lmb_region *lmb_memory = lmb.available_mem.data; | |
684 | ||
685 | for (i = lmb.available_mem.count - 1; i >= 0; i--) { | |
686 | phys_addr_t lmbbase = lmb_memory[i].base; | |
687 | phys_size_t lmbsize = lmb_memory[i].size; | |
688 | ||
689 | if (lmbsize < size) | |
690 | continue; | |
691 | ||
692 | if (max_addr == LMB_ALLOC_ANYWHERE) { | |
693 | base = ALIGN_DOWN(lmbbase + lmbsize - size, align); | |
694 | } else if (lmbbase < max_addr) { | |
695 | base = lmbbase + lmbsize; | |
696 | if (base < lmbbase) | |
697 | base = -1; | |
698 | base = min(base, max_addr); | |
699 | base = ALIGN_DOWN(base - size, align); | |
700 | } else { | |
701 | continue; | |
702 | } | |
703 | ||
704 | while (base && lmbbase <= base) { | |
705 | rgn = lmb_overlaps_region(&lmb.used_mem, base, size); | |
706 | if (rgn < 0) { | |
707 | /* This area isn't reserved, take it */ | |
708 | if (lmb_add_region_flags(&lmb.used_mem, base, | |
709 | size, flags)) | |
710 | return 0; | |
711 | ||
712 | ret = lmb_map_update_notify(base, size, | |
713 | LMB_MAP_OP_RESERVE, | |
714 | flags); | |
715 | if (ret) | |
716 | return ret; | |
717 | ||
718 | return base; | |
719 | } | |
720 | ||
721 | res_base = lmb_used[rgn].base; | |
722 | if (res_base < size) | |
723 | break; | |
724 | base = ALIGN_DOWN(res_base - size, align); | |
725 | } | |
726 | } | |
727 | ||
728 | log_debug("%s: Failed to allocate 0x%lx bytes below 0x%lx\n", | |
729 | __func__, (ulong)size, (ulong)max_addr); | |
730 | ||
731 | return 0; | |
732 | } | |
733 | ||
734 | phys_addr_t lmb_alloc(phys_size_t size, ulong align) | |
735 | { | |
736 | return _lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE, LMB_NONE); | |
737 | } | |
738 | ||
739 | phys_addr_t lmb_alloc_base(phys_size_t size, ulong align, phys_addr_t max_addr, | |
740 | uint flags) | |
741 | { | |
742 | return _lmb_alloc_base(size, align, max_addr, flags); | |
743 | } | |
744 | ||
745 | int lmb_alloc_addr(phys_addr_t base, phys_size_t size, u32 flags) | |
746 | { | |
747 | long rgn; | |
748 | struct lmb_region *lmb_memory = lmb.available_mem.data; | |
749 | ||
750 | /* Check if the requested address is in one of the memory regions */ | |
751 | rgn = lmb_overlaps_region(&lmb.available_mem, base, size); | |
752 | if (rgn >= 0) { | |
753 | /* | |
754 | * Check if the requested end address is in the same memory | |
755 | * region we found. | |
756 | */ | |
757 | if (lmb_addrs_overlap(lmb_memory[rgn].base, | |
758 | lmb_memory[rgn].size, | |
759 | base + size - 1, 1)) { | |
760 | /* ok, reserve the memory */ | |
761 | if (!lmb_reserve(base, size, flags)) | |
762 | return 0; | |
763 | } | |
764 | } | |
765 | ||
766 | return -1; | |
767 | } | |
768 | ||
769 | /* Return number of bytes from a given address that are free */ | |
770 | phys_size_t lmb_get_free_size(phys_addr_t addr) | |
771 | { | |
772 | int i; | |
773 | long rgn; | |
774 | struct lmb_region *lmb_used = lmb.used_mem.data; | |
775 | struct lmb_region *lmb_memory = lmb.available_mem.data; | |
776 | ||
777 | /* check if the requested address is in the memory regions */ | |
778 | rgn = lmb_overlaps_region(&lmb.available_mem, addr, 1); | |
779 | if (rgn >= 0) { | |
780 | for (i = 0; i < lmb.used_mem.count; i++) { | |
781 | if (addr < lmb_used[i].base) { | |
782 | /* first reserved range > requested address */ | |
783 | return lmb_used[i].base - addr; | |
784 | } | |
785 | if (lmb_used[i].base + | |
786 | lmb_used[i].size > addr) { | |
787 | /* requested addr is in this reserved range */ | |
788 | return 0; | |
789 | } | |
790 | } | |
791 | /* if we come here: no reserved ranges above requested addr */ | |
792 | return lmb_memory[lmb.available_mem.count - 1].base + | |
793 | lmb_memory[lmb.available_mem.count - 1].size - addr; | |
794 | } | |
795 | return 0; | |
796 | } | |
797 | ||
798 | int lmb_is_reserved_flags(phys_addr_t addr, int flags) | |
799 | { | |
800 | int i; | |
801 | struct lmb_region *lmb_used = lmb.used_mem.data; | |
802 | ||
803 | for (i = 0; i < lmb.used_mem.count; i++) { | |
804 | phys_addr_t upper = lmb_used[i].base + | |
805 | lmb_used[i].size - 1; | |
806 | if (addr >= lmb_used[i].base && addr <= upper) | |
807 | return (lmb_used[i].flags & flags) == flags; | |
808 | } | |
809 | return 0; | |
810 | } | |
811 | ||
812 | static int lmb_setup(bool test) | |
813 | { | |
814 | bool ret; | |
815 | ||
816 | ret = alist_init(&lmb.available_mem, sizeof(struct lmb_region), | |
817 | (uint)LMB_ALIST_INITIAL_SIZE); | |
818 | if (!ret) { | |
819 | log_debug("Unable to initialise the list for LMB free memory\n"); | |
820 | return -ENOMEM; | |
821 | } | |
822 | ||
823 | ret = alist_init(&lmb.used_mem, sizeof(struct lmb_region), | |
824 | (uint)LMB_ALIST_INITIAL_SIZE); | |
825 | if (!ret) { | |
826 | log_debug("Unable to initialise the list for LMB used memory\n"); | |
827 | return -ENOMEM; | |
828 | } | |
829 | ||
830 | lmb.test = test; | |
831 | ||
832 | return 0; | |
833 | } | |
834 | ||
835 | int lmb_init(void) | |
836 | { | |
837 | int ret; | |
838 | ||
839 | ret = lmb_setup(false); | |
840 | if (ret) { | |
841 | log_info("Unable to init LMB\n"); | |
842 | return ret; | |
843 | } | |
844 | ||
845 | lmb_add_memory(); | |
846 | ||
847 | /* Reserve the U-Boot image region once U-Boot has relocated */ | |
848 | if (xpl_phase() == PHASE_SPL) | |
849 | lmb_reserve_common_spl(); | |
850 | else if (xpl_phase() == PHASE_BOARD_R) | |
851 | lmb_reserve_common((void *)gd->fdt_blob); | |
852 | ||
853 | return 0; | |
854 | } | |
855 | ||
856 | struct lmb *lmb_get(void) | |
857 | { | |
858 | return &lmb; | |
859 | } | |
860 | ||
861 | #if CONFIG_IS_ENABLED(UNIT_TEST) | |
862 | int lmb_push(struct lmb *store) | |
863 | { | |
864 | int ret; | |
865 | ||
866 | *store = lmb; | |
867 | ret = lmb_setup(true); | |
868 | if (ret) | |
869 | return ret; | |
870 | ||
871 | return 0; | |
872 | } | |
873 | ||
874 | void lmb_pop(struct lmb *store) | |
875 | { | |
876 | alist_uninit(&lmb.available_mem); | |
877 | alist_uninit(&lmb.used_mem); | |
878 | lmb = *store; | |
879 | } | |
880 | #endif /* UNIT_TEST */ |