]>
Commit | Line | Data |
---|---|---|
4ed6552f KG |
1 | /* |
2 | * Procedures for maintaining information about logical memory blocks. | |
3 | * | |
4 | * Peter Bergner, IBM Corp. June 2001. | |
5 | * Copyright (C) 2001 Peter Bergner. | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or | |
8 | * modify it under the terms of the GNU General Public License | |
9 | * as published by the Free Software Foundation; either version | |
10 | * 2 of the License, or (at your option) any later version. | |
11 | */ | |
12 | ||
13 | #include <common.h> | |
14 | #include <lmb.h> | |
15 | ||
16 | #define LMB_ALLOC_ANYWHERE 0 | |
17 | ||
18 | void lmb_dump_all(struct lmb *lmb) | |
19 | { | |
20 | #ifdef DEBUG | |
21 | unsigned long i; | |
22 | ||
23 | debug("lmb_dump_all:\n"); | |
24 | debug(" memory.cnt = 0x%lx\n", lmb->memory.cnt); | |
25 | debug(" memory.size = 0x%08x\n", lmb->memory.size); | |
26 | for (i=0; i < lmb->memory.cnt ;i++) { | |
27 | debug(" memory.reg[0x%x].base = 0x%08x\n", i, | |
28 | lmb->memory.region[i].base); | |
29 | debug(" .size = 0x%08x\n", | |
30 | lmb->memory.region[i].size); | |
31 | } | |
32 | ||
33 | debug("\n reserved.cnt = 0x%lx\n", lmb->reserved.cnt); | |
34 | debug(" reserved.size = 0x%08x\n", lmb->reserved.size); | |
35 | for (i=0; i < lmb->reserved.cnt ;i++) { | |
36 | debug(" reserved.reg[0x%x].base = 0x%08x\n", i, | |
37 | lmb->reserved.region[i].base); | |
38 | debug(" .size = 0x%08x\n", | |
39 | lmb->reserved.region[i].size); | |
40 | } | |
41 | #endif /* DEBUG */ | |
42 | } | |
43 | ||
44 | static unsigned long lmb_addrs_overlap(ulong base1, | |
45 | ulong size1, ulong base2, ulong size2) | |
46 | { | |
47 | return ((base1 < (base2+size2)) && (base2 < (base1+size1))); | |
48 | } | |
49 | ||
50 | static long lmb_addrs_adjacent(ulong base1, ulong size1, | |
51 | ulong base2, ulong size2) | |
52 | { | |
53 | if (base2 == base1 + size1) | |
54 | return 1; | |
55 | else if (base1 == base2 + size2) | |
56 | return -1; | |
57 | ||
58 | return 0; | |
59 | } | |
60 | ||
61 | static long lmb_regions_adjacent(struct lmb_region *rgn, | |
62 | unsigned long r1, unsigned long r2) | |
63 | { | |
64 | ulong base1 = rgn->region[r1].base; | |
65 | ulong size1 = rgn->region[r1].size; | |
66 | ulong base2 = rgn->region[r2].base; | |
67 | ulong size2 = rgn->region[r2].size; | |
68 | ||
69 | return lmb_addrs_adjacent(base1, size1, base2, size2); | |
70 | } | |
71 | ||
72 | static void lmb_remove_region(struct lmb_region *rgn, unsigned long r) | |
73 | { | |
74 | unsigned long i; | |
75 | ||
76 | for (i = r; i < rgn->cnt - 1; i++) { | |
77 | rgn->region[i].base = rgn->region[i + 1].base; | |
78 | rgn->region[i].size = rgn->region[i + 1].size; | |
79 | } | |
80 | rgn->cnt--; | |
81 | } | |
82 | ||
83 | /* Assumption: base addr of region 1 < base addr of region 2 */ | |
84 | static void lmb_coalesce_regions(struct lmb_region *rgn, | |
85 | unsigned long r1, unsigned long r2) | |
86 | { | |
87 | rgn->region[r1].size += rgn->region[r2].size; | |
88 | lmb_remove_region(rgn, r2); | |
89 | } | |
90 | ||
91 | void lmb_init(struct lmb *lmb) | |
92 | { | |
93 | /* Create a dummy zero size LMB which will get coalesced away later. | |
94 | * This simplifies the lmb_add() code below... | |
95 | */ | |
96 | lmb->memory.region[0].base = 0; | |
97 | lmb->memory.region[0].size = 0; | |
98 | lmb->memory.cnt = 1; | |
99 | lmb->memory.size = 0; | |
100 | ||
101 | /* Ditto. */ | |
102 | lmb->reserved.region[0].base = 0; | |
103 | lmb->reserved.region[0].size = 0; | |
104 | lmb->reserved.cnt = 1; | |
105 | lmb->reserved.size = 0; | |
106 | } | |
107 | ||
108 | /* This routine called with relocation disabled. */ | |
109 | static long lmb_add_region(struct lmb_region *rgn, ulong base, ulong size) | |
110 | { | |
111 | unsigned long coalesced = 0; | |
112 | long adjacent, i; | |
113 | ||
114 | if ((rgn->cnt == 1) && (rgn->region[0].size == 0)) { | |
115 | rgn->region[0].base = base; | |
116 | rgn->region[0].size = size; | |
117 | return 0; | |
118 | } | |
119 | ||
120 | /* First try and coalesce this LMB with another. */ | |
121 | for (i=0; i < rgn->cnt; i++) { | |
122 | ulong rgnbase = rgn->region[i].base; | |
123 | ulong rgnsize = rgn->region[i].size; | |
124 | ||
125 | if ((rgnbase == base) && (rgnsize == size)) | |
126 | /* Already have this region, so we're done */ | |
127 | return 0; | |
128 | ||
129 | adjacent = lmb_addrs_adjacent(base,size,rgnbase,rgnsize); | |
130 | if ( adjacent > 0 ) { | |
131 | rgn->region[i].base -= size; | |
132 | rgn->region[i].size += size; | |
133 | coalesced++; | |
134 | break; | |
135 | } | |
136 | else if ( adjacent < 0 ) { | |
137 | rgn->region[i].size += size; | |
138 | coalesced++; | |
139 | break; | |
140 | } | |
141 | } | |
142 | ||
143 | if ((i < rgn->cnt-1) && lmb_regions_adjacent(rgn, i, i+1) ) { | |
144 | lmb_coalesce_regions(rgn, i, i+1); | |
145 | coalesced++; | |
146 | } | |
147 | ||
148 | if (coalesced) | |
149 | return coalesced; | |
150 | if (rgn->cnt >= MAX_LMB_REGIONS) | |
151 | return -1; | |
152 | ||
153 | /* Couldn't coalesce the LMB, so add it to the sorted table. */ | |
154 | for (i = rgn->cnt-1; i >= 0; i--) { | |
155 | if (base < rgn->region[i].base) { | |
156 | rgn->region[i+1].base = rgn->region[i].base; | |
157 | rgn->region[i+1].size = rgn->region[i].size; | |
158 | } else { | |
159 | rgn->region[i+1].base = base; | |
160 | rgn->region[i+1].size = size; | |
161 | break; | |
162 | } | |
163 | } | |
164 | ||
165 | if (base < rgn->region[0].base) { | |
166 | rgn->region[0].base = base; | |
167 | rgn->region[0].size = size; | |
168 | } | |
169 | ||
170 | rgn->cnt++; | |
171 | ||
172 | return 0; | |
173 | } | |
174 | ||
175 | /* This routine may be called with relocation disabled. */ | |
176 | long lmb_add(struct lmb *lmb, ulong base, ulong size) | |
177 | { | |
178 | struct lmb_region *_rgn = &(lmb->memory); | |
179 | ||
180 | return lmb_add_region(_rgn, base, size); | |
181 | } | |
182 | ||
183 | long lmb_reserve(struct lmb *lmb, ulong base, ulong size) | |
184 | { | |
185 | struct lmb_region *_rgn = &(lmb->reserved); | |
186 | ||
187 | return lmb_add_region(_rgn, base, size); | |
188 | } | |
189 | ||
190 | long lmb_overlaps_region(struct lmb_region *rgn, ulong base, | |
191 | ulong size) | |
192 | { | |
193 | unsigned long i; | |
194 | ||
195 | for (i=0; i < rgn->cnt; i++) { | |
196 | ulong rgnbase = rgn->region[i].base; | |
197 | ulong rgnsize = rgn->region[i].size; | |
198 | if ( lmb_addrs_overlap(base,size,rgnbase,rgnsize) ) { | |
199 | break; | |
200 | } | |
201 | } | |
202 | ||
203 | return (i < rgn->cnt) ? i : -1; | |
204 | } | |
205 | ||
206 | ulong lmb_alloc(struct lmb *lmb, ulong size, ulong align) | |
207 | { | |
208 | return lmb_alloc_base(lmb, size, align, LMB_ALLOC_ANYWHERE); | |
209 | } | |
210 | ||
211 | ulong lmb_alloc_base(struct lmb *lmb, ulong size, ulong align, ulong max_addr) | |
212 | { | |
213 | ulong alloc; | |
214 | ||
215 | alloc = __lmb_alloc_base(lmb, size, align, max_addr); | |
216 | ||
217 | if (alloc == 0) | |
218 | printf("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n", | |
219 | size, max_addr); | |
220 | ||
221 | return alloc; | |
222 | } | |
223 | ||
224 | static ulong lmb_align_down(ulong addr, ulong size) | |
225 | { | |
226 | return addr & ~(size - 1); | |
227 | } | |
228 | ||
229 | static ulong lmb_align_up(ulong addr, ulong size) | |
230 | { | |
231 | return (addr + (size - 1)) & ~(size - 1); | |
232 | } | |
233 | ||
234 | ulong __lmb_alloc_base(struct lmb *lmb, ulong size, ulong align, ulong max_addr) | |
235 | { | |
236 | long i, j; | |
237 | ulong base = 0; | |
238 | ||
239 | for (i = lmb->memory.cnt-1; i >= 0; i--) { | |
240 | ulong lmbbase = lmb->memory.region[i].base; | |
241 | ulong lmbsize = lmb->memory.region[i].size; | |
242 | ||
243 | if (max_addr == LMB_ALLOC_ANYWHERE) | |
244 | base = lmb_align_down(lmbbase + lmbsize - size, align); | |
245 | else if (lmbbase < max_addr) { | |
246 | base = min(lmbbase + lmbsize, max_addr); | |
247 | base = lmb_align_down(base - size, align); | |
248 | } else | |
249 | continue; | |
250 | ||
251 | while ((lmbbase <= base) && | |
252 | ((j = lmb_overlaps_region(&(lmb->reserved), base, size)) >= 0) ) | |
253 | base = lmb_align_down(lmb->reserved.region[j].base - size, | |
254 | align); | |
255 | ||
256 | if ((base != 0) && (lmbbase <= base)) | |
257 | break; | |
258 | } | |
259 | ||
260 | if (i < 0) | |
261 | return 0; | |
262 | ||
263 | if (lmb_add_region(&(lmb->reserved), base, lmb_align_up(size, align)) < 0) | |
264 | return 0; | |
265 | ||
266 | return base; | |
267 | } | |
268 | ||
269 | int lmb_is_reserved(struct lmb *lmb, ulong addr) | |
270 | { | |
271 | int i; | |
272 | ||
273 | for (i = 0; i < lmb->reserved.cnt; i++) { | |
274 | ulong upper = lmb->reserved.region[i].base + | |
275 | lmb->reserved.region[i].size - 1; | |
276 | if ((addr >= lmb->reserved.region[i].base) && (addr <= upper)) | |
277 | return 1; | |
278 | } | |
279 | return 0; | |
280 | } |