]> git.ipfire.org Git - thirdparty/bird.git/blob - lib/slab.c
Use undefined scope for undefined IPv6 addresses.
[thirdparty/bird.git] / lib / slab.c
1 /*
2 * BIRD Resource Manager -- A SLAB-like Memory Allocator
3 *
4 * Heavily inspired by the original SLAB paper by Jeff Bonwick.
5 *
6 * (c) 1998--2000 Martin Mares <mj@ucw.cz>
7 *
8 * Can be freely distributed and used under the terms of the GNU GPL.
9 */
10
11 /**
12 * DOC: Slabs
13 *
14 * Slabs are collections of memory blocks of a fixed size.
15 * They support very fast allocation and freeing of such blocks, prevent memory
16 * fragmentation and optimize L2 cache usage. Slabs have been invented by Jeff Bonwick
17 * and published in USENIX proceedings as `The Slab Allocator: An Object-Caching Kernel
18 * Memory Allocator'. Our implementation follows this article except that we don't use
19 * constructors and destructors.
20 *
21 * When the |DEBUGGING| switch is turned on, we automatically fill all
22 * newly allocated and freed blocks with a special pattern to make detection
23 * of use of uninitialized or already freed memory easier.
24 *
25 * Example: Nodes of a FIB are allocated from a per-FIB Slab.
26 */
27
28 #include <stdlib.h>
29 #include <stdint.h>
30
31 #include "nest/bird.h"
32 #include "lib/resource.h"
33 #include "lib/string.h"
34
35 #undef FAKE_SLAB /* Turn on if you want to debug memory allocations */
36
37 #ifdef DEBUGGING
38 #define POISON /* Poison all regions after they are freed */
39 #endif
40
41 static void slab_free(resource *r);
42 static void slab_dump(resource *r);
43 static resource *slab_lookup(resource *r, unsigned long addr);
44 static size_t slab_memsize(resource *r);
45
46 #ifdef FAKE_SLAB
47
48 /*
49 * Fake version used for debugging.
50 */
51
52 struct slab {
53 resource r;
54 unsigned size;
55 list objs;
56 };
57
58 static struct resclass sl_class = {
59 "FakeSlab",
60 sizeof(struct slab),
61 slab_free,
62 slab_dump,
63 slab_memsize
64 };
65
66 struct sl_obj {
67 node n;
68 uintptr_t data_align[0];
69 byte data[0];
70 };
71
72 slab *
73 sl_new(pool *p, unsigned size)
74 {
75 slab *s = ralloc(p, &sl_class);
76 s->size = size;
77 init_list(&s->objs);
78 return s;
79 }
80
81 void *
82 sl_alloc(slab *s)
83 {
84 struct sl_obj *o = xmalloc(sizeof(struct sl_obj) + s->size);
85
86 add_tail(&s->objs, &o->n);
87 return o->data;
88 }
89
90 void
91 sl_free(slab *s, void *oo)
92 {
93 struct sl_obj *o = SKIP_BACK(struct sl_obj, data, oo);
94
95 rem_node(&o->n);
96 xfree(o);
97 }
98
99 static void
100 slab_free(resource *r)
101 {
102 slab *s = (slab *) r;
103 struct sl_obj *o, *p;
104
105 for(o = HEAD(s->objs); p = (struct sl_obj *) o->n.next; o = p)
106 xfree(o);
107 }
108
109 static void
110 slab_dump(resource *r)
111 {
112 slab *s = (slab *) r;
113 int cnt = 0;
114 struct sl_obj *o;
115
116 WALK_LIST(o, s->objs)
117 cnt++;
118 debug("(%d objects per %d bytes)\n", cnt, s->size);
119 }
120
121 static size_t
122 slab_memsize(resource *r)
123 {
124 slab *s = (slab *) r;
125 int cnt = 0;
126 struct sl_obj *o;
127
128 WALK_LIST(o, s->objs)
129 cnt++;
130
131 return ALLOC_OVERHEAD + sizeof(struct slab) + cnt * (ALLOC_OVERHEAD + s->size);
132 }
133
134
135 #else
136
137 /*
138 * Real efficient version.
139 */
140
141 #define SLAB_SIZE 4096
142 #define MAX_EMPTY_HEADS 1
143
144 struct slab {
145 resource r;
146 unsigned obj_size, head_size, objs_per_slab, num_empty_heads, data_size;
147 list empty_heads, partial_heads, full_heads;
148 };
149
150 static struct resclass sl_class = {
151 "Slab",
152 sizeof(struct slab),
153 slab_free,
154 slab_dump,
155 slab_lookup,
156 slab_memsize
157 };
158
159 struct sl_head {
160 node n;
161 struct sl_obj *first_free;
162 int num_full;
163 };
164
165 struct sl_obj {
166 struct sl_head *slab;
167 union {
168 struct sl_obj *next;
169 byte data[0];
170 } u;
171 };
172
173 struct sl_alignment { /* Magic structure for testing of alignment */
174 byte data;
175 int x[0];
176 };
177
178 /**
179 * sl_new - create a new Slab
180 * @p: resource pool
181 * @size: block size
182 *
183 * This function creates a new Slab resource from which
184 * objects of size @size can be allocated.
185 */
186 slab *
187 sl_new(pool *p, unsigned size)
188 {
189 slab *s = ralloc(p, &sl_class);
190 unsigned int align = sizeof(struct sl_alignment);
191 if (align < sizeof(int))
192 align = sizeof(int);
193 s->data_size = size;
194 size += OFFSETOF(struct sl_obj, u.data);
195 if (size < sizeof(struct sl_obj))
196 size = sizeof(struct sl_obj);
197 size = (size + align - 1) / align * align;
198 s->obj_size = size;
199 s->head_size = (sizeof(struct sl_head) + align - 1) / align * align;
200 s->objs_per_slab = (SLAB_SIZE - s->head_size) / size;
201 if (!s->objs_per_slab)
202 bug("Slab: object too large");
203 s->num_empty_heads = 0;
204 init_list(&s->empty_heads);
205 init_list(&s->partial_heads);
206 init_list(&s->full_heads);
207 return s;
208 }
209
210 static struct sl_head *
211 sl_new_head(slab *s)
212 {
213 struct sl_head *h = xmalloc(SLAB_SIZE);
214 struct sl_obj *o = (struct sl_obj *)((byte *)h+s->head_size);
215 struct sl_obj *no;
216 unsigned int n = s->objs_per_slab;
217
218 h->first_free = o;
219 h->num_full = 0;
220 while (n--)
221 {
222 o->slab = h;
223 no = (struct sl_obj *)((char *) o+s->obj_size);
224 o->u.next = n ? no : NULL;
225 o = no;
226 }
227 return h;
228 }
229
230 /**
231 * sl_alloc - allocate an object from Slab
232 * @s: slab
233 *
234 * sl_alloc() allocates space for a single object from the
235 * Slab and returns a pointer to the object.
236 */
237 void *
238 sl_alloc(slab *s)
239 {
240 struct sl_head *h;
241 struct sl_obj *o;
242
243 redo:
244 h = HEAD(s->partial_heads);
245 if (!h->n.next)
246 goto no_partial;
247 okay:
248 o = h->first_free;
249 if (!o)
250 goto full_partial;
251 h->first_free = o->u.next;
252 h->num_full++;
253 #ifdef POISON
254 memset(o->u.data, 0xcd, s->data_size);
255 #endif
256 return o->u.data;
257
258 full_partial:
259 rem_node(&h->n);
260 add_tail(&s->full_heads, &h->n);
261 goto redo;
262
263 no_partial:
264 h = HEAD(s->empty_heads);
265 if (h->n.next)
266 {
267 rem_node(&h->n);
268 add_head(&s->partial_heads, &h->n);
269 s->num_empty_heads--;
270 goto okay;
271 }
272 h = sl_new_head(s);
273 add_head(&s->partial_heads, &h->n);
274 goto okay;
275 }
276
277 /**
278 * sl_free - return a free object back to a Slab
279 * @s: slab
280 * @oo: object returned by sl_alloc()
281 *
282 * This function frees memory associated with the object @oo
283 * and returns it back to the Slab @s.
284 */
285 void
286 sl_free(slab *s, void *oo)
287 {
288 struct sl_obj *o = SKIP_BACK(struct sl_obj, u.data, oo);
289 struct sl_head *h = o->slab;
290
291 #ifdef POISON
292 memset(oo, 0xdb, s->data_size);
293 #endif
294 o->u.next = h->first_free;
295 h->first_free = o;
296 if (!--h->num_full)
297 {
298 rem_node(&h->n);
299 if (s->num_empty_heads >= MAX_EMPTY_HEADS)
300 xfree(h);
301 else
302 {
303 add_head(&s->empty_heads, &h->n);
304 s->num_empty_heads++;
305 }
306 }
307 else if (!o->u.next)
308 {
309 rem_node(&h->n);
310 add_head(&s->partial_heads, &h->n);
311 }
312 }
313
314 static void
315 slab_free(resource *r)
316 {
317 slab *s = (slab *) r;
318 struct sl_head *h, *g;
319
320 WALK_LIST_DELSAFE(h, g, s->empty_heads)
321 xfree(h);
322 WALK_LIST_DELSAFE(h, g, s->partial_heads)
323 xfree(h);
324 WALK_LIST_DELSAFE(h, g, s->full_heads)
325 xfree(h);
326 }
327
328 static void
329 slab_dump(resource *r)
330 {
331 slab *s = (slab *) r;
332 int ec=0, pc=0, fc=0;
333 struct sl_head *h;
334
335 WALK_LIST(h, s->empty_heads)
336 ec++;
337 WALK_LIST(h, s->partial_heads)
338 pc++;
339 WALK_LIST(h, s->full_heads)
340 fc++;
341 debug("(%de+%dp+%df blocks per %d objs per %d bytes)\n", ec, pc, fc, s->objs_per_slab, s->obj_size);
342 }
343
344 static size_t
345 slab_memsize(resource *r)
346 {
347 slab *s = (slab *) r;
348 int heads = 0;
349 struct sl_head *h;
350
351 WALK_LIST(h, s->empty_heads)
352 heads++;
353 WALK_LIST(h, s->partial_heads)
354 heads++;
355 WALK_LIST(h, s->full_heads)
356 heads++;
357
358 return ALLOC_OVERHEAD + sizeof(struct slab) + heads * (ALLOC_OVERHEAD + SLAB_SIZE);
359 }
360
361 static resource *
362 slab_lookup(resource *r, unsigned long a)
363 {
364 slab *s = (slab *) r;
365 struct sl_head *h;
366
367 WALK_LIST(h, s->partial_heads)
368 if ((unsigned long) h < a && (unsigned long) h + SLAB_SIZE < a)
369 return r;
370 WALK_LIST(h, s->full_heads)
371 if ((unsigned long) h < a && (unsigned long) h + SLAB_SIZE < a)
372 return r;
373 return NULL;
374 }
375
376 #endif