]>
Commit | Line | Data |
---|---|---|
83d290c5 | 1 | // SPDX-License-Identifier: GPL-2.0+ |
4ed6552f KG |
2 | /* |
3 | * Procedures for maintaining information about logical memory blocks. | |
4 | * | |
5 | * Peter Bergner, IBM Corp. June 2001. | |
6 | * Copyright (C) 2001 Peter Bergner. | |
4ed6552f KG |
7 | */ |
8 | ||
9 | #include <common.h> | |
10 | #include <lmb.h> | |
11 | ||
12 | #define LMB_ALLOC_ANYWHERE 0 | |
13 | ||
14 | void lmb_dump_all(struct lmb *lmb) | |
15 | { | |
16 | #ifdef DEBUG | |
17 | unsigned long i; | |
18 | ||
19 | debug("lmb_dump_all:\n"); | |
20 | debug(" memory.cnt = 0x%lx\n", lmb->memory.cnt); | |
391fd93a BB |
21 | debug(" memory.size = 0x%llx\n", |
22 | (unsigned long long)lmb->memory.size); | |
e35d2a75 | 23 | for (i = 0; i < lmb->memory.cnt; i++) { |
9b55a253 | 24 | debug(" memory.reg[0x%lx].base = 0x%llx\n", i, |
e35d2a75 | 25 | (unsigned long long)lmb->memory.region[i].base); |
391fd93a | 26 | debug(" .size = 0x%llx\n", |
e35d2a75 | 27 | (unsigned long long)lmb->memory.region[i].size); |
4ed6552f KG |
28 | } |
29 | ||
9b55a253 WD |
30 | debug("\n reserved.cnt = 0x%lx\n", |
31 | lmb->reserved.cnt); | |
32 | debug(" reserved.size = 0x%llx\n", | |
e35d2a75 SG |
33 | (unsigned long long)lmb->reserved.size); |
34 | for (i = 0; i < lmb->reserved.cnt; i++) { | |
9b55a253 | 35 | debug(" reserved.reg[0x%lx].base = 0x%llx\n", i, |
e35d2a75 | 36 | (unsigned long long)lmb->reserved.region[i].base); |
391fd93a | 37 | debug(" .size = 0x%llx\n", |
e35d2a75 | 38 | (unsigned long long)lmb->reserved.region[i].size); |
4ed6552f KG |
39 | } |
40 | #endif /* DEBUG */ | |
41 | } | |
42 | ||
e35d2a75 SG |
43 | static long lmb_addrs_overlap(phys_addr_t base1, phys_size_t size1, |
44 | phys_addr_t base2, phys_size_t size2) | |
4ed6552f | 45 | { |
d67f33cf SG |
46 | const phys_addr_t base1_end = base1 + size1 - 1; |
47 | const phys_addr_t base2_end = base2 + size2 - 1; | |
48 | ||
49 | return ((base1 <= base2_end) && (base2 <= base1_end)); | |
4ed6552f KG |
50 | } |
51 | ||
391fd93a | 52 | static long lmb_addrs_adjacent(phys_addr_t base1, phys_size_t size1, |
e35d2a75 | 53 | phys_addr_t base2, phys_size_t size2) |
4ed6552f KG |
54 | { |
55 | if (base2 == base1 + size1) | |
56 | return 1; | |
57 | else if (base1 == base2 + size2) | |
58 | return -1; | |
59 | ||
60 | return 0; | |
61 | } | |
62 | ||
e35d2a75 SG |
63 | static long lmb_regions_adjacent(struct lmb_region *rgn, unsigned long r1, |
64 | unsigned long r2) | |
4ed6552f | 65 | { |
391fd93a BB |
66 | phys_addr_t base1 = rgn->region[r1].base; |
67 | phys_size_t size1 = rgn->region[r1].size; | |
68 | phys_addr_t base2 = rgn->region[r2].base; | |
69 | phys_size_t size2 = rgn->region[r2].size; | |
4ed6552f KG |
70 | |
71 | return lmb_addrs_adjacent(base1, size1, base2, size2); | |
72 | } | |
73 | ||
74 | static void lmb_remove_region(struct lmb_region *rgn, unsigned long r) | |
75 | { | |
76 | unsigned long i; | |
77 | ||
78 | for (i = r; i < rgn->cnt - 1; i++) { | |
79 | rgn->region[i].base = rgn->region[i + 1].base; | |
80 | rgn->region[i].size = rgn->region[i + 1].size; | |
81 | } | |
82 | rgn->cnt--; | |
83 | } | |
84 | ||
85 | /* Assumption: base addr of region 1 < base addr of region 2 */ | |
e35d2a75 SG |
86 | static void lmb_coalesce_regions(struct lmb_region *rgn, unsigned long r1, |
87 | unsigned long r2) | |
4ed6552f KG |
88 | { |
89 | rgn->region[r1].size += rgn->region[r2].size; | |
90 | lmb_remove_region(rgn, r2); | |
91 | } | |
92 | ||
93 | void lmb_init(struct lmb *lmb) | |
94 | { | |
d67f33cf | 95 | lmb->memory.cnt = 0; |
4ed6552f | 96 | lmb->memory.size = 0; |
d67f33cf | 97 | lmb->reserved.cnt = 0; |
4ed6552f KG |
98 | lmb->reserved.size = 0; |
99 | } | |
100 | ||
aa3c609e SG |
101 | /* Initialize the struct, add memory and call arch/board reserve functions */ |
102 | void lmb_init_and_reserve(struct lmb *lmb, phys_addr_t base, phys_size_t size, | |
103 | void *fdt_blob) | |
104 | { | |
105 | lmb_init(lmb); | |
106 | lmb_add(lmb, base, size); | |
107 | arch_lmb_reserve(lmb); | |
108 | board_lmb_reserve(lmb); | |
109 | ||
110 | if (IMAGE_ENABLE_OF_LIBFDT && fdt_blob) | |
111 | boot_fdt_add_mem_rsv_regions(lmb, fdt_blob); | |
112 | } | |
113 | ||
4ed6552f | 114 | /* This routine called with relocation disabled. */ |
391fd93a | 115 | static long lmb_add_region(struct lmb_region *rgn, phys_addr_t base, phys_size_t size) |
4ed6552f KG |
116 | { |
117 | unsigned long coalesced = 0; | |
118 | long adjacent, i; | |
119 | ||
d67f33cf | 120 | if (rgn->cnt == 0) { |
4ed6552f KG |
121 | rgn->region[0].base = base; |
122 | rgn->region[0].size = size; | |
d67f33cf | 123 | rgn->cnt = 1; |
4ed6552f KG |
124 | return 0; |
125 | } | |
126 | ||
127 | /* First try and coalesce this LMB with another. */ | |
e35d2a75 | 128 | for (i = 0; i < rgn->cnt; i++) { |
391fd93a BB |
129 | phys_addr_t rgnbase = rgn->region[i].base; |
130 | phys_size_t rgnsize = rgn->region[i].size; | |
4ed6552f KG |
131 | |
132 | if ((rgnbase == base) && (rgnsize == size)) | |
133 | /* Already have this region, so we're done */ | |
134 | return 0; | |
135 | ||
e35d2a75 SG |
136 | adjacent = lmb_addrs_adjacent(base, size, rgnbase, rgnsize); |
137 | if (adjacent > 0) { | |
4ed6552f KG |
138 | rgn->region[i].base -= size; |
139 | rgn->region[i].size += size; | |
140 | coalesced++; | |
141 | break; | |
e35d2a75 | 142 | } else if (adjacent < 0) { |
4ed6552f KG |
143 | rgn->region[i].size += size; |
144 | coalesced++; | |
145 | break; | |
0f7c51a6 SG |
146 | } else if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) { |
147 | /* regions overlap */ | |
148 | return -1; | |
4ed6552f KG |
149 | } |
150 | } | |
151 | ||
e35d2a75 SG |
152 | if ((i < rgn->cnt - 1) && lmb_regions_adjacent(rgn, i, i + 1)) { |
153 | lmb_coalesce_regions(rgn, i, i + 1); | |
4ed6552f KG |
154 | coalesced++; |
155 | } | |
156 | ||
157 | if (coalesced) | |
158 | return coalesced; | |
159 | if (rgn->cnt >= MAX_LMB_REGIONS) | |
160 | return -1; | |
161 | ||
162 | /* Couldn't coalesce the LMB, so add it to the sorted table. */ | |
163 | for (i = rgn->cnt-1; i >= 0; i--) { | |
164 | if (base < rgn->region[i].base) { | |
e35d2a75 SG |
165 | rgn->region[i + 1].base = rgn->region[i].base; |
166 | rgn->region[i + 1].size = rgn->region[i].size; | |
4ed6552f | 167 | } else { |
e35d2a75 SG |
168 | rgn->region[i + 1].base = base; |
169 | rgn->region[i + 1].size = size; | |
4ed6552f KG |
170 | break; |
171 | } | |
172 | } | |
173 | ||
174 | if (base < rgn->region[0].base) { | |
175 | rgn->region[0].base = base; | |
176 | rgn->region[0].size = size; | |
177 | } | |
178 | ||
179 | rgn->cnt++; | |
180 | ||
181 | return 0; | |
182 | } | |
183 | ||
184 | /* This routine may be called with relocation disabled. */ | |
391fd93a | 185 | long lmb_add(struct lmb *lmb, phys_addr_t base, phys_size_t size) |
4ed6552f KG |
186 | { |
187 | struct lmb_region *_rgn = &(lmb->memory); | |
188 | ||
189 | return lmb_add_region(_rgn, base, size); | |
190 | } | |
191 | ||
98874ff3 | 192 | long lmb_free(struct lmb *lmb, phys_addr_t base, phys_size_t size) |
63796c4e AF |
193 | { |
194 | struct lmb_region *rgn = &(lmb->reserved); | |
98874ff3 | 195 | phys_addr_t rgnbegin, rgnend; |
d67f33cf | 196 | phys_addr_t end = base + size - 1; |
63796c4e AF |
197 | int i; |
198 | ||
199 | rgnbegin = rgnend = 0; /* supress gcc warnings */ | |
200 | ||
201 | /* Find the region where (base, size) belongs to */ | |
e35d2a75 | 202 | for (i = 0; i < rgn->cnt; i++) { |
63796c4e | 203 | rgnbegin = rgn->region[i].base; |
d67f33cf | 204 | rgnend = rgnbegin + rgn->region[i].size - 1; |
63796c4e AF |
205 | |
206 | if ((rgnbegin <= base) && (end <= rgnend)) | |
207 | break; | |
208 | } | |
209 | ||
210 | /* Didn't find the region */ | |
211 | if (i == rgn->cnt) | |
212 | return -1; | |
213 | ||
214 | /* Check to see if we are removing entire region */ | |
215 | if ((rgnbegin == base) && (rgnend == end)) { | |
216 | lmb_remove_region(rgn, i); | |
217 | return 0; | |
218 | } | |
219 | ||
220 | /* Check to see if region is matching at the front */ | |
221 | if (rgnbegin == base) { | |
d67f33cf | 222 | rgn->region[i].base = end + 1; |
63796c4e AF |
223 | rgn->region[i].size -= size; |
224 | return 0; | |
225 | } | |
226 | ||
227 | /* Check to see if the region is matching at the end */ | |
228 | if (rgnend == end) { | |
229 | rgn->region[i].size -= size; | |
230 | return 0; | |
231 | } | |
232 | ||
233 | /* | |
234 | * We need to split the entry - adjust the current one to the | |
235 | * beginging of the hole and add the region after hole. | |
236 | */ | |
237 | rgn->region[i].size = base - rgn->region[i].base; | |
d67f33cf | 238 | return lmb_add_region(rgn, end + 1, rgnend - end); |
63796c4e AF |
239 | } |
240 | ||
391fd93a | 241 | long lmb_reserve(struct lmb *lmb, phys_addr_t base, phys_size_t size) |
4ed6552f KG |
242 | { |
243 | struct lmb_region *_rgn = &(lmb->reserved); | |
244 | ||
245 | return lmb_add_region(_rgn, base, size); | |
246 | } | |
247 | ||
750a6ff4 | 248 | static long lmb_overlaps_region(struct lmb_region *rgn, phys_addr_t base, |
391fd93a | 249 | phys_size_t size) |
4ed6552f KG |
250 | { |
251 | unsigned long i; | |
252 | ||
e35d2a75 | 253 | for (i = 0; i < rgn->cnt; i++) { |
391fd93a BB |
254 | phys_addr_t rgnbase = rgn->region[i].base; |
255 | phys_size_t rgnsize = rgn->region[i].size; | |
e35d2a75 | 256 | if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) |
4ed6552f | 257 | break; |
4ed6552f KG |
258 | } |
259 | ||
260 | return (i < rgn->cnt) ? i : -1; | |
261 | } | |
262 | ||
391fd93a | 263 | phys_addr_t lmb_alloc(struct lmb *lmb, phys_size_t size, ulong align) |
4ed6552f KG |
264 | { |
265 | return lmb_alloc_base(lmb, size, align, LMB_ALLOC_ANYWHERE); | |
266 | } | |
267 | ||
391fd93a | 268 | phys_addr_t lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align, phys_addr_t max_addr) |
4ed6552f | 269 | { |
391fd93a | 270 | phys_addr_t alloc; |
4ed6552f KG |
271 | |
272 | alloc = __lmb_alloc_base(lmb, size, align, max_addr); | |
273 | ||
274 | if (alloc == 0) | |
275 | printf("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n", | |
e35d2a75 | 276 | (ulong)size, (ulong)max_addr); |
4ed6552f KG |
277 | |
278 | return alloc; | |
279 | } | |
280 | ||
391fd93a | 281 | static phys_addr_t lmb_align_down(phys_addr_t addr, phys_size_t size) |
4ed6552f KG |
282 | { |
283 | return addr & ~(size - 1); | |
284 | } | |
285 | ||
391fd93a | 286 | phys_addr_t __lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align, phys_addr_t max_addr) |
4ed6552f | 287 | { |
e35d2a75 | 288 | long i, rgn; |
391fd93a | 289 | phys_addr_t base = 0; |
7570a994 | 290 | phys_addr_t res_base; |
4ed6552f | 291 | |
e35d2a75 | 292 | for (i = lmb->memory.cnt - 1; i >= 0; i--) { |
391fd93a BB |
293 | phys_addr_t lmbbase = lmb->memory.region[i].base; |
294 | phys_size_t lmbsize = lmb->memory.region[i].size; | |
4ed6552f | 295 | |
7570a994 AF |
296 | if (lmbsize < size) |
297 | continue; | |
4ed6552f KG |
298 | if (max_addr == LMB_ALLOC_ANYWHERE) |
299 | base = lmb_align_down(lmbbase + lmbsize - size, align); | |
300 | else if (lmbbase < max_addr) { | |
ad3fda52 SW |
301 | base = lmbbase + lmbsize; |
302 | if (base < lmbbase) | |
303 | base = -1; | |
304 | base = min(base, max_addr); | |
4ed6552f KG |
305 | base = lmb_align_down(base - size, align); |
306 | } else | |
307 | continue; | |
308 | ||
7570a994 | 309 | while (base && lmbbase <= base) { |
e35d2a75 SG |
310 | rgn = lmb_overlaps_region(&lmb->reserved, base, size); |
311 | if (rgn < 0) { | |
7570a994 AF |
312 | /* This area isn't reserved, take it */ |
313 | if (lmb_add_region(&lmb->reserved, base, | |
0f7c51a6 | 314 | size) < 0) |
7570a994 AF |
315 | return 0; |
316 | return base; | |
317 | } | |
e35d2a75 | 318 | res_base = lmb->reserved.region[rgn].base; |
7570a994 AF |
319 | if (res_base < size) |
320 | break; | |
321 | base = lmb_align_down(res_base - size, align); | |
322 | } | |
4ed6552f | 323 | } |
7570a994 | 324 | return 0; |
4ed6552f KG |
325 | } |
326 | ||
4cc8af80 SG |
327 | /* |
328 | * Try to allocate a specific address range: must be in defined memory but not | |
329 | * reserved | |
330 | */ | |
331 | phys_addr_t lmb_alloc_addr(struct lmb *lmb, phys_addr_t base, phys_size_t size) | |
332 | { | |
e35d2a75 | 333 | long rgn; |
4cc8af80 SG |
334 | |
335 | /* Check if the requested address is in one of the memory regions */ | |
e35d2a75 SG |
336 | rgn = lmb_overlaps_region(&lmb->memory, base, size); |
337 | if (rgn >= 0) { | |
4cc8af80 SG |
338 | /* |
339 | * Check if the requested end address is in the same memory | |
340 | * region we found. | |
341 | */ | |
e35d2a75 SG |
342 | if (lmb_addrs_overlap(lmb->memory.region[rgn].base, |
343 | lmb->memory.region[rgn].size, | |
344 | base + size - 1, 1)) { | |
4cc8af80 SG |
345 | /* ok, reserve the memory */ |
346 | if (lmb_reserve(lmb, base, size) >= 0) | |
347 | return base; | |
348 | } | |
349 | } | |
350 | return 0; | |
351 | } | |
352 | ||
353 | /* Return number of bytes from a given address that are free */ | |
65304aad | 354 | phys_size_t lmb_get_free_size(struct lmb *lmb, phys_addr_t addr) |
4cc8af80 SG |
355 | { |
356 | int i; | |
e35d2a75 | 357 | long rgn; |
4cc8af80 SG |
358 | |
359 | /* check if the requested address is in the memory regions */ | |
e35d2a75 SG |
360 | rgn = lmb_overlaps_region(&lmb->memory, addr, 1); |
361 | if (rgn >= 0) { | |
4cc8af80 SG |
362 | for (i = 0; i < lmb->reserved.cnt; i++) { |
363 | if (addr < lmb->reserved.region[i].base) { | |
364 | /* first reserved range > requested address */ | |
365 | return lmb->reserved.region[i].base - addr; | |
366 | } | |
367 | if (lmb->reserved.region[i].base + | |
368 | lmb->reserved.region[i].size > addr) { | |
369 | /* requested addr is in this reserved range */ | |
370 | return 0; | |
371 | } | |
372 | } | |
373 | /* if we come here: no reserved ranges above requested addr */ | |
374 | return lmb->memory.region[lmb->memory.cnt - 1].base + | |
375 | lmb->memory.region[lmb->memory.cnt - 1].size - addr; | |
376 | } | |
377 | return 0; | |
378 | } | |
379 | ||
391fd93a | 380 | int lmb_is_reserved(struct lmb *lmb, phys_addr_t addr) |
4ed6552f KG |
381 | { |
382 | int i; | |
383 | ||
384 | for (i = 0; i < lmb->reserved.cnt; i++) { | |
391fd93a | 385 | phys_addr_t upper = lmb->reserved.region[i].base + |
4ed6552f KG |
386 | lmb->reserved.region[i].size - 1; |
387 | if ((addr >= lmb->reserved.region[i].base) && (addr <= upper)) | |
388 | return 1; | |
389 | } | |
390 | return 0; | |
391 | } | |
a16028da | 392 | |
2c34f3f5 | 393 | __weak void board_lmb_reserve(struct lmb *lmb) |
a16028da MF |
394 | { |
395 | /* please define platform specific board_lmb_reserve() */ | |
396 | } | |
a16028da | 397 | |
2c34f3f5 | 398 | __weak void arch_lmb_reserve(struct lmb *lmb) |
a16028da MF |
399 | { |
400 | /* please define platform specific arch_lmb_reserve() */ | |
401 | } |