]>
Commit | Line | Data |
---|---|---|
18c8241a | 1 | /* |
35496679 | 2 | * BIRD Resource Manager -- A SLAB-like Memory Allocator |
18c8241a | 3 | * |
35496679 MM |
4 | * Heavily inspired by the original SLAB paper by Jeff Bonwick. |
5 | * | |
6 | * (c) 1998--2000 Martin Mares <mj@ucw.cz> | |
18c8241a MM |
7 | * |
8 | * Can be freely distributed and used under the terms of the GNU GPL. | |
9 | */ | |
10 | ||
5cc1e1f8 MM |
11 | /** |
12 | * DOC: Slabs | |
13 | * | |
14 | * Slabs are collections of memory blocks of a fixed size. | |
15 | * They support very fast allocation and freeing of such blocks, prevent memory | |
16 | * fragmentation and optimize L2 cache usage. Slabs have been invented by Jeff Bonwick | |
17 | * and published in USENIX proceedings as `The Slab Allocator: An Object-Caching Kernel | |
18 | * Memory Allocator'. Our implementation follows this article except that we don't use | |
19 | * constructors and destructors. | |
20 | * | |
21 | * When the |DEBUGGING| switch is turned on, we automatically fill all | |
58f7d004 | 22 | * newly allocated and freed blocks with a special pattern to make detection |
5cc1e1f8 MM |
23 | * of use of uninitialized or already freed memory easier. |
24 | * | |
58f7d004 | 25 | * Example: Nodes of a FIB are allocated from a per-FIB Slab. |
5cc1e1f8 MM |
26 | */ |
27 | ||
18c8241a | 28 | #include <stdlib.h> |
46eb80d5 | 29 | #include <stdint.h> |
18c8241a MM |
30 | |
31 | #include "nest/bird.h" | |
32 | #include "lib/resource.h" | |
221135d6 | 33 | #include "lib/string.h" |
18c8241a | 34 | |
35496679 MM |
35 | #undef FAKE_SLAB /* Turn on if you want to debug memory allocations */ |
36 | ||
b8e60d35 MM |
37 | #ifdef DEBUGGING |
38 | #define POISON /* Poison all regions after they are freed */ | |
39 | #endif | |
40 | ||
35496679 MM |
41 | static void slab_free(resource *r); |
42 | static void slab_dump(resource *r); | |
c9763428 | 43 | static resource *slab_lookup(resource *r, unsigned long addr); |
acb60628 | 44 | static size_t slab_memsize(resource *r); |
35496679 MM |
45 | |
46 | #ifdef FAKE_SLAB | |
47 | ||
18c8241a | 48 | /* |
35496679 | 49 | * Fake version used for debugging. |
18c8241a MM |
50 | */ |
51 | ||
18c8241a MM |
52 | struct slab { |
53 | resource r; | |
54 | unsigned size; | |
55 | list objs; | |
56 | }; | |
57 | ||
d4bc8dc0 | 58 | static struct resclass sl_class = { |
35496679 | 59 | "FakeSlab", |
18c8241a MM |
60 | sizeof(struct slab), |
61 | slab_free, | |
acb60628 | 62 | slab_dump, |
a03ede64 | 63 | NULL, |
acb60628 | 64 | slab_memsize |
18c8241a MM |
65 | }; |
66 | ||
35496679 MM |
67 | struct sl_obj { |
68 | node n; | |
d1abbeac | 69 | uintptr_t data_align[0]; |
35496679 MM |
70 | byte data[0]; |
71 | }; | |
72 | ||
18c8241a MM |
73 | slab * |
74 | sl_new(pool *p, unsigned size) | |
75 | { | |
76 | slab *s = ralloc(p, &sl_class); | |
77 | s->size = size; | |
78 | init_list(&s->objs); | |
79 | return s; | |
80 | } | |
81 | ||
82 | void * | |
83 | sl_alloc(slab *s) | |
84 | { | |
85 | struct sl_obj *o = xmalloc(sizeof(struct sl_obj) + s->size); | |
86 | ||
87 | add_tail(&s->objs, &o->n); | |
88 | return o->data; | |
89 | } | |
90 | ||
91 | void | |
92 | sl_free(slab *s, void *oo) | |
93 | { | |
94 | struct sl_obj *o = SKIP_BACK(struct sl_obj, data, oo); | |
95 | ||
96 | rem_node(&o->n); | |
97 | xfree(o); | |
98 | } | |
99 | ||
d4bc8dc0 | 100 | static void |
18c8241a MM |
101 | slab_free(resource *r) |
102 | { | |
103 | slab *s = (slab *) r; | |
104 | struct sl_obj *o, *p; | |
105 | ||
106 | for(o = HEAD(s->objs); p = (struct sl_obj *) o->n.next; o = p) | |
107 | xfree(o); | |
108 | } | |
109 | ||
d4bc8dc0 | 110 | static void |
18c8241a MM |
111 | slab_dump(resource *r) |
112 | { | |
113 | slab *s = (slab *) r; | |
114 | int cnt = 0; | |
115 | struct sl_obj *o; | |
116 | ||
117 | WALK_LIST(o, s->objs) | |
118 | cnt++; | |
119 | debug("(%d objects per %d bytes)\n", cnt, s->size); | |
120 | } | |
35496679 | 121 | |
acb60628 OZ |
122 | static size_t |
123 | slab_memsize(resource *r) | |
124 | { | |
125 | slab *s = (slab *) r; | |
20e8d040 | 126 | size_t cnt = 0; |
acb60628 OZ |
127 | struct sl_obj *o; |
128 | ||
129 | WALK_LIST(o, s->objs) | |
130 | cnt++; | |
131 | ||
132 | return ALLOC_OVERHEAD + sizeof(struct slab) + cnt * (ALLOC_OVERHEAD + s->size); | |
133 | } | |
134 | ||
135 | ||
35496679 MM |
136 | #else |
137 | ||
138 | /* | |
139 | * Real efficient version. | |
140 | */ | |
141 | ||
142 | #define SLAB_SIZE 4096 | |
143 | #define MAX_EMPTY_HEADS 1 | |
144 | ||
145 | struct slab { | |
146 | resource r; | |
be77b689 | 147 | unsigned obj_size, head_size, objs_per_slab, num_empty_heads, data_size; |
35496679 MM |
148 | list empty_heads, partial_heads, full_heads; |
149 | }; | |
150 | ||
151 | static struct resclass sl_class = { | |
152 | "Slab", | |
153 | sizeof(struct slab), | |
154 | slab_free, | |
c9763428 | 155 | slab_dump, |
acb60628 OZ |
156 | slab_lookup, |
157 | slab_memsize | |
35496679 MM |
158 | }; |
159 | ||
160 | struct sl_head { | |
161 | node n; | |
162 | struct sl_obj *first_free; | |
163 | int num_full; | |
164 | }; | |
165 | ||
166 | struct sl_obj { | |
167 | struct sl_head *slab; | |
168 | union { | |
169 | struct sl_obj *next; | |
170 | byte data[0]; | |
171 | } u; | |
172 | }; | |
173 | ||
174 | struct sl_alignment { /* Magic structure for testing of alignment */ | |
175 | byte data; | |
176 | int x[0]; | |
177 | }; | |
178 | ||
5cc1e1f8 MM |
179 | /** |
180 | * sl_new - create a new Slab | |
181 | * @p: resource pool | |
182 | * @size: block size | |
183 | * | |
184 | * This function creates a new Slab resource from which | |
185 | * objects of size @size can be allocated. | |
186 | */ | |
35496679 MM |
187 | slab * |
188 | sl_new(pool *p, unsigned size) | |
189 | { | |
190 | slab *s = ralloc(p, &sl_class); | |
191 | unsigned int align = sizeof(struct sl_alignment); | |
192 | if (align < sizeof(int)) | |
193 | align = sizeof(int); | |
be77b689 | 194 | s->data_size = size; |
35496679 MM |
195 | size += OFFSETOF(struct sl_obj, u.data); |
196 | if (size < sizeof(struct sl_obj)) | |
197 | size = sizeof(struct sl_obj); | |
198 | size = (size + align - 1) / align * align; | |
199 | s->obj_size = size; | |
200 | s->head_size = (sizeof(struct sl_head) + align - 1) / align * align; | |
201 | s->objs_per_slab = (SLAB_SIZE - s->head_size) / size; | |
202 | if (!s->objs_per_slab) | |
203 | bug("Slab: object too large"); | |
204 | s->num_empty_heads = 0; | |
205 | init_list(&s->empty_heads); | |
206 | init_list(&s->partial_heads); | |
207 | init_list(&s->full_heads); | |
208 | return s; | |
209 | } | |
210 | ||
9831e591 | 211 | static struct sl_head * |
35496679 MM |
212 | sl_new_head(slab *s) |
213 | { | |
214 | struct sl_head *h = xmalloc(SLAB_SIZE); | |
215 | struct sl_obj *o = (struct sl_obj *)((byte *)h+s->head_size); | |
216 | struct sl_obj *no; | |
217 | unsigned int n = s->objs_per_slab; | |
218 | ||
219 | h->first_free = o; | |
220 | h->num_full = 0; | |
221 | while (n--) | |
222 | { | |
223 | o->slab = h; | |
224 | no = (struct sl_obj *)((char *) o+s->obj_size); | |
225 | o->u.next = n ? no : NULL; | |
226 | o = no; | |
227 | } | |
228 | return h; | |
229 | } | |
230 | ||
5cc1e1f8 MM |
231 | /** |
232 | * sl_alloc - allocate an object from Slab | |
233 | * @s: slab | |
234 | * | |
235 | * sl_alloc() allocates space for a single object from the | |
236 | * Slab and returns a pointer to the object. | |
237 | */ | |
35496679 MM |
238 | void * |
239 | sl_alloc(slab *s) | |
240 | { | |
241 | struct sl_head *h; | |
242 | struct sl_obj *o; | |
243 | ||
244 | redo: | |
245 | h = HEAD(s->partial_heads); | |
246 | if (!h->n.next) | |
247 | goto no_partial; | |
248 | okay: | |
249 | o = h->first_free; | |
250 | if (!o) | |
251 | goto full_partial; | |
252 | h->first_free = o->u.next; | |
253 | h->num_full++; | |
be77b689 MM |
254 | #ifdef POISON |
255 | memset(o->u.data, 0xcd, s->data_size); | |
256 | #endif | |
35496679 MM |
257 | return o->u.data; |
258 | ||
259 | full_partial: | |
260 | rem_node(&h->n); | |
261 | add_tail(&s->full_heads, &h->n); | |
262 | goto redo; | |
263 | ||
264 | no_partial: | |
265 | h = HEAD(s->empty_heads); | |
266 | if (h->n.next) | |
267 | { | |
268 | rem_node(&h->n); | |
269 | add_head(&s->partial_heads, &h->n); | |
270 | s->num_empty_heads--; | |
271 | goto okay; | |
272 | } | |
273 | h = sl_new_head(s); | |
274 | add_head(&s->partial_heads, &h->n); | |
275 | goto okay; | |
276 | } | |
277 | ||
5cc1e1f8 MM |
278 | /** |
279 | * sl_free - return a free object back to a Slab | |
280 | * @s: slab | |
281 | * @oo: object returned by sl_alloc() | |
282 | * | |
283 | * This function frees memory associated with the object @oo | |
284 | * and returns it back to the Slab @s. | |
285 | */ | |
35496679 MM |
286 | void |
287 | sl_free(slab *s, void *oo) | |
288 | { | |
289 | struct sl_obj *o = SKIP_BACK(struct sl_obj, u.data, oo); | |
290 | struct sl_head *h = o->slab; | |
291 | ||
b8e60d35 | 292 | #ifdef POISON |
be77b689 | 293 | memset(oo, 0xdb, s->data_size); |
b8e60d35 | 294 | #endif |
35496679 MM |
295 | o->u.next = h->first_free; |
296 | h->first_free = o; | |
297 | if (!--h->num_full) | |
298 | { | |
299 | rem_node(&h->n); | |
300 | if (s->num_empty_heads >= MAX_EMPTY_HEADS) | |
301 | xfree(h); | |
302 | else | |
303 | { | |
304 | add_head(&s->empty_heads, &h->n); | |
305 | s->num_empty_heads++; | |
306 | } | |
307 | } | |
308 | else if (!o->u.next) | |
309 | { | |
310 | rem_node(&h->n); | |
311 | add_head(&s->partial_heads, &h->n); | |
312 | } | |
313 | } | |
314 | ||
315 | static void | |
316 | slab_free(resource *r) | |
317 | { | |
318 | slab *s = (slab *) r; | |
319 | struct sl_head *h, *g; | |
320 | ||
321 | WALK_LIST_DELSAFE(h, g, s->empty_heads) | |
322 | xfree(h); | |
323 | WALK_LIST_DELSAFE(h, g, s->partial_heads) | |
324 | xfree(h); | |
325 | WALK_LIST_DELSAFE(h, g, s->full_heads) | |
326 | xfree(h); | |
327 | } | |
328 | ||
329 | static void | |
330 | slab_dump(resource *r) | |
331 | { | |
332 | slab *s = (slab *) r; | |
333 | int ec=0, pc=0, fc=0; | |
334 | struct sl_head *h; | |
335 | ||
336 | WALK_LIST(h, s->empty_heads) | |
337 | ec++; | |
338 | WALK_LIST(h, s->partial_heads) | |
339 | pc++; | |
340 | WALK_LIST(h, s->full_heads) | |
341 | fc++; | |
342 | debug("(%de+%dp+%df blocks per %d objs per %d bytes)\n", ec, pc, fc, s->objs_per_slab, s->obj_size); | |
343 | } | |
344 | ||
acb60628 OZ |
345 | static size_t |
346 | slab_memsize(resource *r) | |
347 | { | |
348 | slab *s = (slab *) r; | |
20e8d040 | 349 | size_t heads = 0; |
acb60628 OZ |
350 | struct sl_head *h; |
351 | ||
352 | WALK_LIST(h, s->empty_heads) | |
353 | heads++; | |
354 | WALK_LIST(h, s->partial_heads) | |
355 | heads++; | |
356 | WALK_LIST(h, s->full_heads) | |
357 | heads++; | |
358 | ||
359 | return ALLOC_OVERHEAD + sizeof(struct slab) + heads * (ALLOC_OVERHEAD + SLAB_SIZE); | |
360 | } | |
361 | ||
c9763428 MM |
362 | static resource * |
363 | slab_lookup(resource *r, unsigned long a) | |
364 | { | |
365 | slab *s = (slab *) r; | |
366 | struct sl_head *h; | |
367 | ||
368 | WALK_LIST(h, s->partial_heads) | |
369 | if ((unsigned long) h < a && (unsigned long) h + SLAB_SIZE < a) | |
370 | return r; | |
371 | WALK_LIST(h, s->full_heads) | |
372 | if ((unsigned long) h < a && (unsigned long) h + SLAB_SIZE < a) | |
373 | return r; | |
374 | return NULL; | |
375 | } | |
376 | ||
35496679 | 377 | #endif |