slab *sl_new(pool *, unsigned size);
void *sl_alloc(slab *);
void *sl_allocz(slab *);
-void sl_free(slab *, void *);
+void sl_free(void *);
/*
* Low-level memory allocation functions, please don't use
}
void
-sl_free(slab *s, void *oo)
+sl_free(void *oo)
{
struct sl_obj *o = SKIP_BACK(struct sl_obj, data, oo);
};
struct sl_head {
+ struct slab *slab;
node n;
u32 num_full;
u32 used_bits[0];
struct sl_head *h;
redo:
- h = HEAD(s->partial_heads);
+ h = SKIP_BACK(struct sl_head, n, HEAD(s->partial_heads));
if (!h->n.next)
goto no_partial;
okay:
goto redo;
no_partial:
- h = HEAD(s->empty_heads);
+ h = SKIP_BACK(struct sl_head, n, HEAD(s->empty_heads));
if (h->n.next)
{
rem_node(&h->n);
s->num_empty_heads--;
goto okay;
}
+
h = alloc_page();
+ ASSERT_DIE(SL_GET_HEAD(h) == h);
+
#ifdef POISON
memset(h, 0xba, page_size);
#endif
- ASSERT_DIE(SL_GET_HEAD(h) == h);
+
memset(h, 0, s->head_size);
+ h->slab = s;
add_head(&s->partial_heads, &h->n);
goto okay;
}
* and returns it back to the Slab @s.
*/
void
-sl_free(slab *s, void *oo)
+sl_free(void *oo)
{
struct sl_head *h = SL_GET_HEAD(oo);
+ struct slab *s = h->slab;
#ifdef POISON
memset(oo, 0xdb, s->data_size);
slab_free(resource *r)
{
slab *s = (slab *) r;
- struct sl_head *h, *g;
+ struct sl_head *h;
+ node *nn, *nxt;
- WALK_LIST_DELSAFE(h, g, s->empty_heads)
+ WALK_LIST2_DELSAFE(h, nn, nxt, s->empty_heads, n)
free_page(h);
- WALK_LIST_DELSAFE(h, g, s->partial_heads)
+ WALK_LIST2_DELSAFE(h, nn, nxt, s->partial_heads, n)
free_page(h);
- WALK_LIST_DELSAFE(h, g, s->full_heads)
+ WALK_LIST2_DELSAFE(h, nn, nxt, s->full_heads, n)
free_page(h);
}
slab *s = (slab *) r;
int ec=0, pc=0, fc=0;
struct sl_head *h;
+ node *nn;
- WALK_LIST(h, s->empty_heads)
+ WALK_LIST2(h, nn, s->empty_heads, n)
ec++;
- WALK_LIST(h, s->partial_heads)
+ WALK_LIST2(h, nn, s->partial_heads, n)
pc++;
- WALK_LIST(h, s->full_heads)
+ WALK_LIST2(h, nn, s->full_heads, n)
fc++;
debug("(%de+%dp+%df blocks per %d objs per %d bytes)\n", ec, pc, fc, s->objs_per_slab, s->obj_size);
}
slab *s = (slab *) r;
size_t heads = 0;
struct sl_head *h;
+ node *nn;
- WALK_LIST(h, s->full_heads)
+ WALK_LIST2(h, nn, s->full_heads, n)
heads++;
size_t items = heads * s->objs_per_slab;
- WALK_LIST(h, s->partial_heads)
+ WALK_LIST2(h, nn, s->partial_heads, n)
{
heads++;
items += h->num_full;
}
- WALK_LIST(h, s->empty_heads)
+ WALK_LIST2(h, nn, s->empty_heads, n)
heads++;
size_t eff = items * s->data_size;
{
slab *s = (slab *) r;
struct sl_head *h;
+ node *nn;
- WALK_LIST(h, s->partial_heads)
+ WALK_LIST2(h, nn, s->partial_heads, n)
if ((unsigned long) h < a && (unsigned long) h + page_size < a)
return r;
- WALK_LIST(h, s->full_heads)
+ WALK_LIST2(h, nn, s->full_heads, n)
if ((unsigned long) h < a && (unsigned long) h + page_size < a)
return r;
return NULL;
#define TEST_SIZE 1024 * 128
#define ITEMS(sz) TEST_SIZE / ( (sz) >> u32_log2((sz))/2 )
+struct test_request {
+ int size;
+ enum strategy {
+ TEST_NONE,
+ TEST_FORWARDS,
+ TEST_BACKWARDS,
+ TEST_RANDOM,
+ TEST_MIXED,
+ TEST__MAX,
+ } strategy;
+};
+
+const char * const strategy_name[TEST__MAX] = {
+ [TEST_FORWARDS] = "forwards",
+ [TEST_BACKWARDS] = "backwards",
+ [TEST_RANDOM] = "random",
+ [TEST_MIXED] = "mixed",
+};
+
static inline byte *test_alloc(slab *s, int sz, struct resmem *sliz)
{
byte *out = sl_alloc(s);
block[p]++;
}
- sl_free(s, block);
+ sl_free(block);
struct resmem ns = rmemsize((resource *) s);
}
static int
-t_slab_forwards(const void *data)
+t_slab(const void *data)
{
- int sz = (intptr_t) data;
- slab *s = sl_new(&root_pool, sz);
-
- struct resmem sliz = get_memsize(s);
-
- int n = ITEMS(sz);
- byte **block = mb_alloc(&root_pool, n * sizeof(*block));
-
- for (int i = 0; i < n; i++)
- block[i] = test_alloc(s, sz, &sliz);
-
- for (int i = 0; i < n; i++)
- test_free(s, block[i], sz, &sliz);
+ const struct test_request *tr = data;
+ int sz = tr->size;
- mb_free(block);
-
- return 1;
-}
-
-static int
-t_slab_backwards(const void *data)
-{
- int sz = (intptr_t) data;
slab *s = sl_new(&root_pool, sz);
-
struct resmem sliz = get_memsize(s);
int n = ITEMS(sz);
byte **block = mb_alloc(&root_pool, n * sizeof(*block));
- for (int i = 0; i < n; i++)
- block[i] = test_alloc(s, sz, &sliz);
+ switch (tr->strategy) {
+ case TEST_FORWARDS:
+ for (int i = 0; i < n; i++)
+ block[i] = test_alloc(s, sz, &sliz);
- for (int i = n - 1; i >= 0; i--)
- test_free(s, block[i], sz, &sliz);
+ for (int i = 0; i < n; i++)
+ test_free(s, block[i], sz, &sliz);
- mb_free(block);
+ break;
- return 1;
-}
+ case TEST_BACKWARDS:
+ for (int i = 0; i < n; i++)
+ block[i] = test_alloc(s, sz, &sliz);
-static int
-t_slab_random(const void *data)
-{
- int sz = (intptr_t) data;
- slab *s = sl_new(&root_pool, sz);
+ for (int i = n - 1; i >= 0; i--)
+ test_free(s, block[i], sz, &sliz);
- struct resmem sliz = get_memsize(s);
+ break;
- int n = ITEMS(sz);
- byte **block = mb_alloc(&root_pool, n * sizeof(*block));
+ case TEST_RANDOM:
+ for (int i = 0; i < n; i++)
+ block[i] = test_alloc(s, sz, &sliz);
- for (int i = 0; i < n; i++)
- block[i] = test_alloc(s, sz, &sliz);
+ for (int i = 0; i < n; i++)
+ {
+ int pos = bt_random() % (n - i);
+ test_free(s, block[pos], sz, &sliz);
+ if (pos != n - i - 1)
+ block[pos] = block[n - i - 1];
+ }
- for (int i = 0; i < n; i++)
- {
- int pos = bt_random() % (n - i);
- test_free(s, block[pos], sz, &sliz);
- if (pos != n - i - 1)
- block[pos] = block[n - i - 1];
- }
+ break;
- mb_free(block);
+ case TEST_MIXED:
+ {
+ int cur = 0;
+ int pending = n;
- return 1;
-}
+ while (cur + pending > 0) {
+ int action = bt_random() % (cur + pending);
-static int
-t_slab_mixed(const void *data)
-{
- int sz = (intptr_t) data;
- slab *s = sl_new(&root_pool, sz);
-
- struct resmem sliz = get_memsize(s);
-
- int n = ITEMS(sz);
- byte **block = mb_alloc(&root_pool, n * sizeof(*block));
+ if (action < cur) {
+ test_free(s, block[action], sz, &sliz);
+ if (action != --cur)
+ block[action] = block[cur];
+ } else {
+ block[cur++] = test_alloc(s, sz, &sliz);
+ pending--;
+ }
+ }
- int cur = 0;
- int pending = n;
+ break;
+ }
- while (cur + pending > 0) {
- int action = bt_random() % (cur + pending);
-
- if (action < cur) {
- test_free(s, block[action], sz, &sliz);
- if (action != --cur)
- block[action] = block[cur];
- } else {
- block[cur++] = test_alloc(s, sz, &sliz);
- pending--;
- }
+ default: bug("This shouldn't happen");
}
mb_free(block);
-
return 1;
}
int main(int argc, char *argv[])
{
bt_init(argc, argv);
+ struct test_request tr;
+
for (uint i = 0; i < sizeof(sizes) / sizeof(*sizes); i++)
- {
- bt_test_suite_arg(t_slab_forwards, (void *) (intptr_t) sizes[i], "Slab deallocation from beginning to end, size=%d", sizes[i]);
- bt_test_suite_arg(t_slab_backwards, (void *) (intptr_t) sizes[i], "Slab deallocation from end to beginning, size=%d", sizes[i]);
- bt_test_suite_arg(t_slab_random, (void *) (intptr_t) sizes[i], "Slab deallocation in random order, size=%d", sizes[i]);
- bt_test_suite_arg(t_slab_mixed, (void *) (intptr_t) sizes[i], "Slab deallocation in mixed order, size=%d", sizes[i]);
- }
+ for (uint strategy = TEST_FORWARDS; strategy < TEST__MAX; strategy++)
+ {
+ tr = (struct test_request) {
+ .size = sizes[i],
+ .strategy = strategy,
+ };
+ bt_test_suite_arg(t_slab, &tr, "Slab allocator test, size=%d, strategy=%s",
+ tr.size, strategy_name[strategy]);
+ }
return bt_exit_value();
}
{
rem_node(&n->n);
rem_node(&n->if_n);
- sl_free(neigh_slab, n);
+ sl_free(n);
}
/**
{
HASH_DO_REMOVE(src_hash, RSH, sp);
idm_free(&src_ids, src->global_id);
- sl_free(rte_src_slab, src);
+ sl_free(src);
}
}
HASH_WALK_FILTER_END;
while (o)
{
n = o->next;
- sl_free(nexthop_slab(o), o);
+ sl_free(o);
o = n;
}
}
nexthop_free(a->nh.next);
ea_free(a->eattrs);
a->cached = 0;
- sl_free(rta_slab(a), a);
+ sl_free(a);
}
rta *
}
if (f->fib_slab)
- sl_free(f->fib_slab, E);
+ sl_free(E);
else
mb_free(E);
rt_unlock_source(e->src);
if (rta_is_cached(e->attrs))
rta_free(e->attrs);
- sl_free(rte_slab, e);
+ sl_free(e);
}
static inline void
{
rt_unlock_source(e->src);
rta_free(e->attrs);
- sl_free(rte_slab, e);
+ sl_free(e);
}
static int /* Actually better or at least as good as */
rem_node(&he->ln);
hc_remove(hc, he);
- sl_free(hc->slab, he);
+ sl_free(he);
hc->hash_items--;
if (hc->hash_items < hc->hash_min)
}
static void
-babel_expire_sources(struct babel_proto *p, struct babel_entry *e)
+babel_expire_sources(struct babel_proto *p UNUSED, struct babel_entry *e)
{
struct babel_source *n, *nx;
btime now_ = current_time();
if (n->expires && n->expires <= now_)
{
rem_node(NODE n);
- sl_free(p->source_slab, n);
+ sl_free(n);
}
}
}
}
static void
-babel_flush_route(struct babel_proto *p, struct babel_route *r)
+babel_flush_route(struct babel_proto *p UNUSED, struct babel_route *r)
{
DBG("Babel: Flush route %N router_id %lR neigh %I\n",
r->e->n.addr, r->router_id, r->neigh->addr);
if (r->e->selected == r)
r->e->selected = NULL;
- sl_free(p->route_slab, r);
+ sl_free(r);
}
static void
}
static void
-babel_remove_seqno_request(struct babel_proto *p, struct babel_seqno_request *sr)
+babel_remove_seqno_request(struct babel_proto *p UNUSED, struct babel_seqno_request *sr)
{
if (sr->nbr)
rem_node(&sr->nbr_node);
rem_node(NODE sr);
- sl_free(p->seqno_slab, sr);
+ sl_free(sr);
}
static int
static uint
babel_write_queue(struct babel_iface *ifa, list *queue)
{
- struct babel_proto *p = ifa->proto;
struct babel_write_state state = { .next_hop_ip6 = ifa->addr };
if (EMPTY_LIST(*queue))
pos += len;
rem_node(NODE msg);
- sl_free(p->msg_slab, msg);
+ sl_free(msg);
}
pos += babel_auth_add_tlvs(ifa, (struct babel_tlv *) pos, end - pos);
else if (res == PARSE_IGNORE)
{
DBG("Babel: Ignoring TLV of type %d\n", tlv->type);
- sl_free(p->msg_slab, msg);
+ sl_free(msg);
}
else /* PARSE_ERROR */
{
LOG_PKT("Bad TLV from %I via %s type %d pos %d - parse error",
saddr, ifa->iface->name, tlv->type, (int) ((byte *)tlv - (byte *)pkt));
- sl_free(p->msg_slab, msg);
+ sl_free(msg);
break;
}
}
if (tlv_data[msg->msg.type].handle_tlv)
tlv_data[msg->msg.type].handle_tlv(&msg->msg, ifa);
rem_node(NODE msg);
- sl_free(p->msg_slab, msg);
+ sl_free(msg);
}
}
HASH_REMOVE(p->session_hash_id, HASH_ID, s);
HASH_REMOVE(p->session_hash_ip, HASH_IP, s);
- sl_free(p->session_slab, s);
+ sl_free(s);
TRACE(D_EVENTS, "Session to %I removed", ip);
HASH_REMOVE2(c->prefix_hash, PXH, c->pool, px);
if (c->prefix_slab)
- sl_free(c->prefix_slab, px);
+ sl_free(px);
else
mb_free(px);
}
if (*ee == e)
{
*ee = e->next;
- sl_free(f->hash_slab, e);
+ sl_free(e);
if (f->hash_entries-- < f->hash_entries_min)
ospf_top_rehash(f, -HASH_LO_STEP);
return;
}
static inline void
-rip_remove_rte(struct rip_proto *p, struct rip_rte **rp)
+rip_remove_rte(struct rip_proto *p UNUSED, struct rip_rte **rp)
{
struct rip_rte *rt = *rp;
rip_unlock_neighbor(rt->from);
*rp = rt->next;
- sl_free(p->rte_slab, rt);
+ sl_free(rt);
}
static inline int rip_same_rte(struct rip_rte *a, struct rip_rte *b)