uint cur_order, new_order; \
struct { type *data; rw_spinlock lock; } *cur, *new; \
pool *pool; \
- event rehash; \
- event_list *target; \
+ callback rehash; \
}
#define SPINHASH_INIT(v,id,_pool,_target) \
(v).cur = mb_allocz(_pool, (1U << id##_ORDER) * sizeof *(v).cur); \
(v).new = NULL; \
(v).pool = _pool; \
- (v).rehash = (event) { .hook = id##_REHASH, .data = &(v), }; \
- (v).target = _target; \
+ if (_target) callback_init(&(v).rehash, id##_REHASH, _target); \
})
#define SPINHASH_FREE(v) \
({ \
- ev_postpone(&(v).rehash); \
+ callback_cancel(&(v).rehash); \
mb_free((v).cur); \
ASSERT_DIE((v).new == NULL); \
(v).cur = NULL; \
(v).cur_order = 0; \
(v).pool = NULL; \
- (v).target = NULL; \
+ (v).rehash = (callback) {}; \
})
#define SPINHASH_BEGIN_CHAIN(v,id,rw,n,key...) \
})
#define SPINHASH_REQUEST_REHASH(v,id,count) \
- if (SPINHASH_CHECK_REHASH(v,id,count) && (v).target) \
- ev_send((v).target, &(v).rehash);
+ if ((v).rehash.target && SPINHASH_CHECK_REHASH(v,id,count)) \
+ callback_activate(&(v).rehash); \
#define SPINHASH_DEFINE_REHASH_FN(id,type) \
-static void id##_REHASH(void *_v) { \
- SPINHASH(type) *v = _v; \
+static void id##_REHASH(callback *cb) { \
+ SKIP_BACK_DECLARE(SPINHASH(type), v, rehash, cb); \
SPINHASH_REHASH_FN_BODY(v,id,type); \
}
void lfuc_unlock_deferred(struct deferred_call *dc)
{
SKIP_BACK_DECLARE(struct lfuc_unlock_queue_item, luqi, dc, dc);
- lfuc_unlock_immediately(luqi->c, luqi->el, luqi->ev);
+ lfuc_unlock_immediately(luqi->c, luqi->cb);
}
#if 0
* If the usecount reaches zero, a prune event is run to possibly free the object.
* The prune event MUST use lfuc_finished() to check the object state.
*/
-static inline void lfuc_unlock_immediately(struct lfuc *c, event_list *el, event *ev)
+static inline void lfuc_unlock_immediately(struct lfuc *c, struct callback *cb)
{
/* Unlocking is tricky. We do it lockless so at the same time, the prune
* event may be running, therefore if the unlock gets us to zero, it must be
if (uc == pending)
/* If we're the last unlocker (every owner is already unlocking), schedule
* the owner's prune event */
- ev_send(el, ev);
+ callback_activate(cb);
else
ASSERT_DIE(uc > pending);
struct lfuc_unlock_queue_item {
struct deferred_call dc;
struct lfuc *c;
- event_list *el;
- event *ev;
+ struct callback *cb;
};
void lfuc_unlock_deferred(struct deferred_call *dc);
-static inline void lfuc_unlock(struct lfuc *c, event_list *el, event *ev)
+static inline void lfuc_unlock(struct lfuc *c, struct callback *cb)
{
struct lfuc_unlock_queue_item luqi = {
.dc.hook = lfuc_unlock_deferred,
.c = c,
- .el = el,
- .ev = ev,
+ .cb = cb,
};
defer_call(&luqi.dc, sizeof luqi);
#define NETINDEX_REHASH netindex_rehash
#define NETINDEX_PARAMS /8, *2, 2, 2, 12, 28
-static void NETINDEX_REHASH(void *_v) {
+static void NETINDEX_REHASH(callback *cb) {
log(L_TRACE "Netindex rehash: begin");
- netindex_spinhash *v = _v;
+ SKIP_BACK_DECLARE(netindex_spinhash, v, rehash, cb);
int step;
{
NH_LOCK(SKIP_BACK(netindex_hash, hash, v), _);
log(L_TRACE "Netindex rehash: done");
}
-static void netindex_hash_cleanup(void *netindex_hash);
+static void netindex_hash_cleanup(callback *cb);
static struct netindex *
net_lock_revive_unlock(netindex_hash *h, struct netindex *i)
return NULL;
lfuc_lock_revive(&i->uc);
- lfuc_unlock(&i->uc, h->cleanup_list, &h->cleanup_event);
+ lfuc_unlock(&i->uc, &h->cleanup);
return i;
}
* Index initialization
*/
netindex_hash *
-netindex_hash_new(pool *sp, event_list *cleanup_target, u8 type)
+netindex_hash_new(pool *sp, struct birdloop *cleanup_target, u8 type)
{
DOMAIN(attrs) dom = DOMAIN_NEW_RCU_SYNC(attrs);
LOCK_DOMAIN(attrs, dom);
hmap_init(&nh->id_map, nh->pool, 128);
- nh->cleanup_list = cleanup_target;
- nh->cleanup_event = (event) { .hook = netindex_hash_cleanup, nh };
+ callback_init(&nh->cleanup, netindex_hash_cleanup, cleanup_target);
UNLOCK_DOMAIN(attrs, dom);
return SKIP_BACK(netindex_hash, priv, nh);
}
static void
-netindex_hash_cleanup(void *_nh)
+netindex_hash_cleanup(callback *cb)
{
- struct netindex_hash_private *nh = _nh;
+ SKIP_BACK_DECLARE(struct netindex_hash_private, nh, cleanup, cb);
DOMAIN(attrs) dom = nh->lock;
LOCK_DOMAIN(attrs, dom);
kept += netindex_hash_cleanup_removed(nh, block, removed, removed_cnt);
/* Return now unless we're deleted */
- if (kept || !nh->deleted_event)
+ if (kept || !nh->deleted)
{
UNLOCK_DOMAIN(attrs, dom);
return;
}
- ev_postpone(&nh->cleanup_event);
+ callback_cancel(&nh->cleanup);
- event *e = nh->deleted_event;
- event_list *t = nh->deleted_target;
+ /* Store the callback */
+ callback *cd = nh->deleted;
/* Check cleanliness */
SPINHASH_WALK(nh->hash, NETINDEX, i)
DOMAIN_FREE(attrs, dom);
/* Notify the requestor */
- ev_send(t, e);
+ callback_activate(cd);
}
void
-netindex_hash_delete(netindex_hash *h, event *e, event_list *t)
+netindex_hash_delete(netindex_hash *h, callback *cb)
{
NH_LOCK(h, hp);
- hp->deleted_event = e;
- hp->deleted_target = t;
-
- ev_send(hp->cleanup_list, &hp->cleanup_event);
+ hp->deleted = cb;
+ callback_activate(&hp->cleanup);
}
/*
static struct netindex *
net_new_index_locked(struct netindex_hash_private *hp, const net_addr *n)
{
- ASSERT_DIE(!hp->deleted_event);
+ ASSERT_DIE(!hp->deleted);
u32 i = hmap_first_zero(&hp->id_map);
hmap_set(&hp->id_map, i);
void net_unlock_index(netindex_hash *h, struct netindex *i)
{
// log(L_TRACE "Unlock index %p", i);
- lfuc_unlock(&i->uc, h->cleanup_list, &h->cleanup_event);
+ lfuc_unlock(&i->uc, &h->cleanup);
}
struct netindex *
typedef union netindex_hash netindex_hash;
/* Initialization and teardown */
-netindex_hash *netindex_hash_new(pool *, event_list *, u8);
-void netindex_hash_delete(netindex_hash *, event *, event_list *);
+netindex_hash *netindex_hash_new(pool *, struct birdloop *, u8);
+void netindex_hash_delete(netindex_hash *, callback *);
/* Find/get/resolve index; pointer valid until end of task */
struct netindex *net_find_index(netindex_hash *, const net_addr *);
#define NETINDEX_HASH_PUBLIC \
DOMAIN(attrs) lock; /* Assigned lock */ \
- event_list *cleanup_list; /* Cleanup event list */ \
- event cleanup_event; /* Cleanup event */ \
+ callback cleanup; /* Usecount cleanup */ \
u8 net_type; /* Which NET_* is stored */ \
uint _Atomic block_size; /* How big block is */ \
struct netindex * _Atomic * _Atomic block; /* u32 to netindex */ \
slab *slab;
struct hmap id_map;
u32 block_epoch;
- event *deleted_event;
- event_list *deleted_target;
+ callback *deleted;
};
typedef union netindex_hash {
u32 hash_key;
u32 uc;
u32 debug;
- event_list *list;
- event *prune;
+ struct callback *prune_callback;
event *stop;
};
static inline void rt_unlock_source(struct rte_src *src)
{
- lfuc_unlock(&src->uc, src->owner->list, src->owner->prune);
+ lfuc_unlock(&src->uc, src->owner->prune_callback);
}
#ifdef RT_SOURCE_DEBUG
static void mpls_cleanup_ranges(void *_domain);
static void mpls_free_fec(struct mpls_fec_map *m, struct mpls_fec *fec);
-static void mpls_fec_map_cleanup(void *_m);
+static void mpls_fec_map_cleanup(callback *cb);
/*
* MPLS domain
if (!c->rts)
return;
- ev_send_loop(c->mpls_map->loop, c->mpls_map->cleanup_event);
+ callback_activate(&c->mpls_map->cleanup);
}
static void
DBGL("New FEC Map %p", m);
m->pool = p;
- m->loop = loop;
- m->cleanup_event = ev_new_init(p, mpls_fec_map_cleanup, m);
+ callback_init(&m->cleanup, mpls_fec_map_cleanup, loop);
m->channel = C;
channel_add_obstacle(C);
}
static void
-mpls_fec_map_cleanup(void *_m)
+mpls_fec_map_cleanup(callback *cb)
{
- struct mpls_fec_map *m = _m;
+ SKIP_BACK_DECLARE(struct mpls_fec_map, m, cleanup, cb);
_Bool finished = (m->channel->channel_state == CS_STOP);
HASH_WALK_DELSAFE(m->label_hash, next_l, fec)
if (lfuc_finished(&fec->uc))
if (finished)
{
- ev_postpone(m->cleanup_event);
+ callback_cancel(&m->cleanup);
channel_del_obstacle(m->channel);
}
}
inline void mpls_unlock_fec(struct mpls_fec *fec)
{
- lfuc_unlock(&fec->uc, birdloop_event_list(fec->map->loop), fec->map->cleanup_event);
+ lfuc_unlock(&fec->uc, &fec->map->cleanup);
DBGL("Unlocked FEC %p %u (deferred)", fec, fec->label);
}
struct mpls_fec_map {
pool *pool; /* Pool for FEC map */
- struct birdloop *loop; /* Owner's loop for sending events */
- event *cleanup_event; /* Event for unlocked FEC cleanup */
+ callback cleanup; /* Callback for unlocked FEC cleanup */
slab *slabs[4]; /* Slabs for FEC allocation */
HASH(struct mpls_fec) net_hash; /* Hash table for MPLS_POLICY_PREFIX FECs */
HASH(struct mpls_fec) attrs_hash; /* Hash table for MPLS_POLICY_AGGREGATE FECs */