]>
git.ipfire.org Git - thirdparty/bird.git/blob - lib/slab.c
2 * BIRD Resource Manager -- A SLAB-like Memory Allocator
4 * Heavily inspired by the original SLAB paper by Jeff Bonwick.
6 * (c) 1998--2000 Martin Mares <mj@ucw.cz>
8 * Can be freely distributed and used under the terms of the GNU GPL.
14 * Slabs are collections of memory blocks of a fixed size.
15 * They support very fast allocation and freeing of such blocks, prevent memory
16 * fragmentation and optimize L2 cache usage. Slabs have been invented by Jeff Bonwick
17 * and published in USENIX proceedings as `The Slab Allocator: An Object-Caching Kernel
18 * Memory Allocator'. Our implementation follows this article except that we don't use
19 * constructors and destructors.
21 * When the |DEBUGGING| switch is turned on, we automatically fill all
22 * newly allocated and freed blocks with a special pattern to make detection
23 * of use of uninitialized or already freed memory easier.
25 * Example: Nodes of a FIB are allocated from a per-FIB Slab.
31 #include "nest/bird.h"
32 #include "lib/resource.h"
33 #include "lib/string.h"
35 #undef FAKE_SLAB /* Turn on if you want to debug memory allocations */
38 #define POISON /* Poison all regions after they are freed */
41 static void slab_free(resource
*r
);
42 static void slab_dump(resource
*r
);
43 static resource
*slab_lookup(resource
*r
, unsigned long addr
);
44 static size_t slab_memsize(resource
*r
);
49 * Fake version used for debugging.
58 static struct resclass sl_class
= {
68 uintptr_t data_align
[0];
73 sl_new(pool
*p
, unsigned size
)
75 slab
*s
= ralloc(p
, &sl_class
);
84 struct sl_obj
*o
= xmalloc(sizeof(struct sl_obj
) + s
->size
);
86 add_tail(&s
->objs
, &o
->n
);
91 sl_free(slab
*s
, void *oo
)
93 struct sl_obj
*o
= SKIP_BACK(struct sl_obj
, data
, oo
);
100 slab_free(resource
*r
)
102 slab
*s
= (slab
*) r
;
103 struct sl_obj
*o
, *p
;
105 for(o
= HEAD(s
->objs
); p
= (struct sl_obj
*) o
->n
.next
; o
= p
)
110 slab_dump(resource
*r
)
112 slab
*s
= (slab
*) r
;
116 WALK_LIST(o
, s
->objs
)
118 debug("(%d objects per %d bytes)\n", cnt
, s
->size
);
122 slab_memsize(resource
*r
)
124 slab
*s
= (slab
*) r
;
128 WALK_LIST(o
, s
->objs
)
131 return ALLOC_OVERHEAD
+ sizeof(struct slab
) + cnt
* (ALLOC_OVERHEAD
+ s
->size
);
138 * Real efficient version.
141 #define SLAB_SIZE 4096
142 #define MAX_EMPTY_HEADS 1
146 unsigned obj_size
, head_size
, objs_per_slab
, num_empty_heads
, data_size
;
147 list empty_heads
, partial_heads
, full_heads
;
150 static struct resclass sl_class
= {
161 struct sl_obj
*first_free
;
166 struct sl_head
*slab
;
173 struct sl_alignment
{ /* Magic structure for testing of alignment */
179 * sl_new - create a new Slab
183 * This function creates a new Slab resource from which
184 * objects of size @size can be allocated.
187 sl_new(pool
*p
, unsigned size
)
189 slab
*s
= ralloc(p
, &sl_class
);
190 unsigned int align
= sizeof(struct sl_alignment
);
191 if (align
< sizeof(int))
194 size
+= OFFSETOF(struct sl_obj
, u
.data
);
195 if (size
< sizeof(struct sl_obj
))
196 size
= sizeof(struct sl_obj
);
197 size
= (size
+ align
- 1) / align
* align
;
199 s
->head_size
= (sizeof(struct sl_head
) + align
- 1) / align
* align
;
200 s
->objs_per_slab
= (SLAB_SIZE
- s
->head_size
) / size
;
201 if (!s
->objs_per_slab
)
202 bug("Slab: object too large");
203 s
->num_empty_heads
= 0;
204 init_list(&s
->empty_heads
);
205 init_list(&s
->partial_heads
);
206 init_list(&s
->full_heads
);
210 static struct sl_head
*
213 struct sl_head
*h
= xmalloc(SLAB_SIZE
);
214 struct sl_obj
*o
= (struct sl_obj
*)((byte
*)h
+s
->head_size
);
216 unsigned int n
= s
->objs_per_slab
;
223 no
= (struct sl_obj
*)((char *) o
+s
->obj_size
);
224 o
->u
.next
= n
? no
: NULL
;
231 * sl_alloc - allocate an object from Slab
234 * sl_alloc() allocates space for a single object from the
235 * Slab and returns a pointer to the object.
244 h
= HEAD(s
->partial_heads
);
251 h
->first_free
= o
->u
.next
;
254 memset(o
->u
.data
, 0xcd, s
->data_size
);
260 add_tail(&s
->full_heads
, &h
->n
);
264 h
= HEAD(s
->empty_heads
);
268 add_head(&s
->partial_heads
, &h
->n
);
269 s
->num_empty_heads
--;
273 add_head(&s
->partial_heads
, &h
->n
);
278 * sl_free - return a free object back to a Slab
280 * @oo: object returned by sl_alloc()
282 * This function frees memory associated with the object @oo
283 * and returns it back to the Slab @s.
286 sl_free(slab
*s
, void *oo
)
288 struct sl_obj
*o
= SKIP_BACK(struct sl_obj
, u
.data
, oo
);
289 struct sl_head
*h
= o
->slab
;
292 memset(oo
, 0xdb, s
->data_size
);
294 o
->u
.next
= h
->first_free
;
299 if (s
->num_empty_heads
>= MAX_EMPTY_HEADS
)
303 add_head(&s
->empty_heads
, &h
->n
);
304 s
->num_empty_heads
++;
310 add_head(&s
->partial_heads
, &h
->n
);
315 slab_free(resource
*r
)
317 slab
*s
= (slab
*) r
;
318 struct sl_head
*h
, *g
;
320 WALK_LIST_DELSAFE(h
, g
, s
->empty_heads
)
322 WALK_LIST_DELSAFE(h
, g
, s
->partial_heads
)
324 WALK_LIST_DELSAFE(h
, g
, s
->full_heads
)
329 slab_dump(resource
*r
)
331 slab
*s
= (slab
*) r
;
332 int ec
=0, pc
=0, fc
=0;
335 WALK_LIST(h
, s
->empty_heads
)
337 WALK_LIST(h
, s
->partial_heads
)
339 WALK_LIST(h
, s
->full_heads
)
341 debug("(%de+%dp+%df blocks per %d objs per %d bytes)\n", ec
, pc
, fc
, s
->objs_per_slab
, s
->obj_size
);
345 slab_memsize(resource
*r
)
347 slab
*s
= (slab
*) r
;
351 WALK_LIST(h
, s
->empty_heads
)
353 WALK_LIST(h
, s
->partial_heads
)
355 WALK_LIST(h
, s
->full_heads
)
358 return ALLOC_OVERHEAD
+ sizeof(struct slab
) + heads
* (ALLOC_OVERHEAD
+ SLAB_SIZE
);
362 slab_lookup(resource
*r
, unsigned long a
)
364 slab
*s
= (slab
*) r
;
367 WALK_LIST(h
, s
->partial_heads
)
368 if ((unsigned long) h
< a
&& (unsigned long) h
+ SLAB_SIZE
< a
)
370 WALK_LIST(h
, s
->full_heads
)
371 if ((unsigned long) h
< a
&& (unsigned long) h
+ SLAB_SIZE
< a
)