u32 hash_key;
u32 uc;
u32 debug;
- struct callback *prune_callback;
- event *stop;
+ callback prune;
+ callback *stop;
};
extern DOMAIN(attrs) attrs_domain;
static inline void rt_unlock_source(struct rte_src *src)
{
- lfuc_unlock(&src->uc, src->owner->prune_callback);
+ lfuc_unlock(&src->uc, &src->owner->prune);
}
#ifdef RT_SOURCE_DEBUG
#define rt_unlock_source(x) ( log(L_INFO "Unlock source %uG at %s:%d", (x)->global_id, __FILE__, __LINE__), _rt_unlock_source_internal(x) )
#endif
-void rt_init_sources(struct rte_owner *, const char *name, event_list *list);
-void rt_destroy_sources(struct rte_owner *, event *);
+void rt_init_sources(struct rte_owner *, const char *name, struct birdloop *loop);
+void rt_destroy_sources(struct rte_owner *, callback *);
void rt_dump_sources(struct rte_owner *);
/* The same hostentry, but different dependent table */
SKIP_BACK_DECLARE(struct hostentry_adata, head, ad, heea->u.ad);
struct hostentry *he = head->he;
- ea_set_hostentry(&e.attrs, m->channel->table, he->owner, he->addr, he->link,
+ ea_set_hostentry(&e.attrs, m->channel->table, he->owner->tab, he->addr, he->link,
HOSTENTRY_LABEL_COUNT(head), head->labels);
}
else
proto_neigh_rem_node(&p->neighbors, n);
if ((p->proto_state == PS_DOWN) && EMPTY_TLIST(proto_neigh, &p->neighbors))
- proto_send_event(p, p->event);
+ callback_activate(&p->done);
n->proto = NULL;
ASSERT_DIE(!rt_export_feed_active(&c->reimporter));
channel_set_state(c, CS_DOWN);
- proto_send_event(c->proto, c->proto->event);
+ callback_activate(&c->proto->done);
break;
case CS_PAUSE:
/* Schedule protocol shutddown */
if (proto_is_done(c->proto))
- proto_send_event(c->proto, c->proto->event);
+ callback_activate(&c->proto->done);
}
void
static void
-proto_event(void *ptr)
+proto_event(callback *cb)
{
- struct proto *p = ptr;
+ SKIP_BACK_DECLARE(struct proto, p, done, cb);
if (p->do_stop)
{
p->vrf = c->vrf;
proto_add_after(&global_proto_list, p, after);
- p->event = ev_new_init(proto_pool, proto_event, p);
+ callback_init(&p->done, proto_event, p->loop);
PD(p, "Initializing%s", p->disabled ? " [disabled]" : "");
{
p->loop = birdloop_new(proto_pool, p->cf->loop_order, p->cf->loop_max_latency, "Protocol %s", p->cf->name);
p->pool = birdloop_pool(p->loop);
+ ASSERT_DIE(!callback_is_active(&p->done));
+ p->done.target = p->loop;
}
else
p->pool = rp_newf(proto_pool, the_bird_domain.the_bird, "Protocol %s", p->cf->name);
OBSREF_CLEAR(p->global_config);
proto_remove_channels(p);
proto_rem_node(&global_proto_list, p);
- rfree(p->event);
+ callback_cancel(&p->done);
mb_free(p->message);
mb_free(p);
if (!nc)
{
p->active = 1;
- rt_init_sources(&p->sources, p->name, proto_event_list(p));
+ rt_init_sources(&p->sources, p->name, p->loop);
if (!p->sources.class)
p->sources.class = &default_rte_owner_class;
p->pool_up = NULL;
proto_stop_channels(p);
- rt_destroy_sources(&p->sources, p->event);
+ rt_destroy_sources(&p->sources, &p->done);
p->do_stop = 1;
- proto_send_event(p, p->event);
+ callback_activate(&p->done);
}
static void
/* Shutdown is finished in the protocol event */
if (proto_is_done(p))
- proto_send_event(p, p->event);
+ callback_activate(&p->done);
}
as the protocol enters the STOP / DOWN state */
pool *pool_inloop; /* Pool containing local objects which need to be freed
before the protocol's birdloop actually stops, like olocks */
- event *event; /* Protocol event */
+ callback done; /* Protocol shutdown checker */
timer *restart_timer; /* Timer to restart the protocol from limits */
event *restart_event; /* Event to restart/shutdown the protocol from limits */
struct birdloop *loop; /* BIRDloop running this protocol */
_Atomic u32 routes_block_size; /* Size of the route object pointer block */ \
struct f_trie * _Atomic trie; /* Trie of prefixes defined in fib */ \
event *nhu_event; /* Nexthop updater */ \
- event *hcu_event; /* Hostcache updater */ \
+ callback shutdown_finished; /* Shutdown finisher */ \
struct rt_exporter export_all; /* Route export journal for all routes */ \
struct rt_exporter export_best; /* Route export journal for best routes */ \
extern struct rt_cork {
_Atomic uint active;
- event_list queue;
- event run;
+ struct rt_cork_callbacks {
+ struct rt_cork_callbacks *_Atomic next;
+ callback *uncork_block[0];
+ } *_Atomic callbacks;
} rt_cork;
static inline void rt_cork_acquire(void)
ev_send(&global_work_list, &rt_cork.run);
}
-static inline _Bool rt_cork_check(event *e)
+static inline _Bool rt_cork_check(callback *cb)
{
int corked = (atomic_load_explicit(&rt_cork.active, memory_order_acquire) > 0);
if (corked)
ip_addr link; /* (link-local) IP address of host, used as gw
if host is directly attached */
rtable *tab; /* Dependent table, part of key */
- rtable *owner; /* Nexthop owner table */
+ struct hostcache *owner; /* Nexthop owner hostcache (use with care) */
struct hostentry *next; /* Next in hash chain */
unsigned hash_key; /* Hash key */
u32 igp_metric; /* Chosen route IGP metric */
unsigned hash_order, hash_shift;
unsigned hash_max, hash_min;
unsigned hash_items;
+ u8 corked; /* Stuck by cork */
linpool *lp; /* Linpool for trie */
struct f_trie *trie; /* Trie of prefixes that might affect hostentries */
list hostentries; /* List of all hostentries */
struct rt_export_request req; /* Notifier */
event source_event;
+ callback update; /* Hostcache updater */
+ callback uncork; /* Hostcache uncorker */
};
struct rt_digestor {
ea_gen_hostentry_freed(const eattr *ea)
{
struct hostentry_adata *had = (struct hostentry_adata *) ea->u.ptr;
- lfuc_unlock(&had->he->uc, birdloop_event_list(had->he->owner->loop), had->he->owner->hcu_event);
+ lfuc_unlock(&had->he->uc, &had->he->owner->update);
}
struct ea_class ea_gen_hostentry = {
if (p->stop)
bug("Stopping route owner asked for another source.");
- ASSERT_DIE(birdloop_inside(p->list->loop));
+ ASSERT_DIE(birdloop_inside(p->prune.target));
struct rte_src *src = rt_find_source(p, id);
return src;
}
-static inline void
-rt_done_sources(struct rte_owner *o)
-{
- ev_send(o->list, o->stop);
-}
-
void
-rt_prune_sources(void *data)
+rt_prune_sources(callback *cb)
{
- struct rte_owner *o = data;
+ SKIP_BACK_DECLARE(struct rte_owner, o, prune, cb);
HASH_WALK_FILTER(o->hash, next, src, sp)
{
if (o->stop && !o->uc)
{
- rfree(o->prune);
+ callback_cancel(&o->prune);
RTA_UNLOCK;
if (o->debug & D_EVENTS)
log(L_TRACE "%s: all rte_src's pruned, scheduling stop event", o->name);
- rt_done_sources(o);
+ callback_activate(o->stop);
}
else
RTA_UNLOCK;
}
void
-rt_init_sources(struct rte_owner *o, const char *name, event_list *list)
+rt_init_sources(struct rte_owner *o, const char *name, struct birdloop *loop)
{
RTA_LOCK;
HASH_INIT(o->hash, rta_pool, RSH_INIT_ORDER);
o->hash_key = random_u32();
o->uc = 0;
o->name = name;
- o->prune = ev_new_init(rta_pool, rt_prune_sources, o);
+ callback_init(&o->prune, rt_prune_sources, loop);
o->stop = NULL;
- o->list = list;
RTA_UNLOCK;
if (o->debug & D_EVENTS)
log(L_TRACE "%s: initialized rte_src owner", o->name);
}
void
-rt_destroy_sources(struct rte_owner *o, event *done)
+rt_destroy_sources(struct rte_owner *o, callback *done)
{
o->stop = done;
if (o->debug & D_EVENTS)
log(L_TRACE "%s: rte_src owner destroy requested, already clean, scheduling stop event", o->name);
- RTA_LOCK;
- rfree(o->prune);
- RTA_UNLOCK;
-
- rt_done_sources(o);
+ callback_activate(o->stop);
}
else
if (o->debug & D_EVENTS)
uint s = 0;
if (ipa_nonzero(had->he->link) && !ipa_equal(had->he->link, had->he->addr))
- s = bsnprintf(buf, size, "via %I %I table %s", had->he->addr, had->he->link, had->he->owner->name);
+ s = bsnprintf(buf, size, "via %I %I table %s", had->he->addr, had->he->link, had->he->owner->tab->name);
else
- s = bsnprintf(buf, size, "via %I table %s", had->he->addr, had->he->owner->name);
+ s = bsnprintf(buf, size, "via %I table %s", had->he->addr, had->he->owner->tab->name);
uint lc = HOSTENTRY_LABEL_COUNT(had);
if (!lc)
#define RTAH_REHASH rta_rehash
#define RTAH_PARAMS /8, *2, 2, 2, 12, 28
-static void RTAH_REHASH(void *_ UNUSED) {
+static void RTAH_REHASH(callback *_ UNUSED) {
int step;
RTA_LOCK;
for (uint i=0; i<ARRAY_SIZE(ea_slab_sizes); i++)
ea_slab[i] = sl_new(rta_pool, ea_slab_sizes[i]);
- SPINHASH_INIT(rta_hash_table, RTAH, rta_pool, &global_work_list);
+ SPINHASH_INIT(rta_hash_table, RTAH, rta_pool, &main_birdloop);
rte_src_init();
ea_class_init();
/* Data structures for export journal */
static void rt_free_hostcache(struct rtable_private *tab);
-static void rt_hcu_uncork(void *_tab);
-static void rt_update_hostcache(void *tab);
+static void rt_hcu_update(callback *);
+static void rt_hcu_uncork(callback *);
static void rt_next_hop_update(void *_tab);
static void rt_nhu_uncork(void *_tab);
static inline void rt_next_hop_resolve_rte(rte *r);
if (t->id >= rtable_max_id)
rtable_max_id = t->id + 1;
- t->netindex = netindex_hash_new(birdloop_pool(t->loop), birdloop_event_list(t->loop), cf->addr_type);
+ t->netindex = netindex_hash_new(birdloop_pool(t->loop), t->loop, cf->addr_type);
atomic_store_explicit(&t->routes, mb_allocz(p, RT_INITIAL_ROUTES_BLOCK_SIZE * sizeof(net)), memory_order_relaxed);
atomic_store_explicit(&t->routes_block_size, RT_INITIAL_ROUTES_BLOCK_SIZE, memory_order_relaxed);
}
static void
-rt_shutdown_finished(void *tab_)
+rt_shutdown_finished(struct callback *cb)
{
- rtable *t = tab_;
- RT_LOCK(t, tab);
- birdloop_stop_self(t->loop, rt_delete, t);
+ RT_LOCK(SKIP_BACK(rtable, shutdown_finished, cb), tab);
+ birdloop_stop_self(tab->loop, rt_delete, tab);
}
static void
rtable *t = tab_;
RT_LOCK(t, tab);
+ callback_init(&tab->shutdown_finished, rt_shutdown_finished, tab->loop);
+
if (tab->export_digest)
{
rtex_export_unsubscribe(&tab->export_digest->req);
rt_exporter_shutdown(&tab->export_best, NULL);
rt_exporter_shutdown(&tab->export_all, NULL);
- netindex_hash_delete(tab->netindex,
- ev_new_init(tab->rp, rt_shutdown_finished, tab),
- birdloop_event_list(tab->loop));
+ netindex_hash_delete(tab->netindex, &tab->shutdown_finished);
}
static void
rt_check_cork_low(tab);
- if (tab->hcu_event)
- {
- if (ev_get_list(tab->hcu_event) == &rt_cork.queue)
- ev_postpone(tab->hcu_event);
-
+ /* Stop the hostcache updater */
+ if (rt_export_get_state(&tab->hostcache->req) != TES_DOWN)
rtex_export_unsubscribe(&tab->hostcache->req);
- }
rt_unlock_table(tab);
}
RT_EXPORT_WALK(&hc->req, u)
{
+ if (callback_is_active(&hc->update))
+ continue;
+
const net_addr *n = NULL;
switch (u->kind)
{
continue;
RT_LOCK(hc->tab, tab);
- if (ev_active(tab->hcu_event))
- continue;
-
if (!trie_match_net(hc->trie, n))
{
/* No interest in this update, mark seen only */
hc->req.name, n, NET_TO_INDEX(n)->index);
if ((rt_export_get_state(&hc->req) == TES_READY)
- && !ev_active(tab->hcu_event))
+ && !callback_is_active(&hc->update))
{
if (hc->req.trace_routes & D_EVENTS)
log(L_TRACE "%s requesting HCU", hc->req.name);
- ev_send_loop(tab->loop, tab->hcu_event);
+ callback_activate(&hc->update);
}
}
hc->trie = f_new_trie(hc->lp, 0);
hc->tab = RT_PUB(tab);
-
- tab->hcu_event = ev_new_init(tab->rp, rt_update_hostcache, tab);
- tab->hcu_uncork_event = ev_new_init(tab->rp, rt_hcu_uncork, tab);
tab->hostcache = hc;
- ev_send_loop(tab->loop, tab->hcu_event);
+ rt_lock_table(tab);
+ callback_init(&hc->update, rt_hcu_update, tab->loop);
+ callback_activate(&hc->update);
}
static void
}
static void
-rt_hcu_uncork(void *_tab)
+rt_update_hostcache(struct hostcache *hc, rtable **nhu_pending)
{
- RT_LOCKED((rtable *) _tab, tab)
- ev_send_loop(tab->loop, tab->hcu_event);
-}
-
-static void
-rt_update_hostcache(void *data)
-{
- rtable **nhu_pending;
-
- RT_LOCKED((rtable *) data, tab)
- {
- struct hostcache *hc = tab->hostcache;
+ RT_LOCK(hc->tab, tab);
/* Finish initialization */
if (!hc->req.name)
rtex_export_subscribe(&tab->export_best, &hc->req);
}
- /* Shutdown shortcut */
- if (rt_export_get_state(&hc->req) == TES_DOWN)
- return;
-
- if (rt_cork_check(tab->hcu_uncork_event))
- {
- rt_trace(tab, D_STATES, "Hostcache update corked");
- return;
- }
-
- /* Destination schedule map */
- nhu_pending = tmp_allocz(sizeof(rtable *) * rtable_max_id);
-
struct hostentry *he;
node *n, *x;
}
}
+static void
+rt_hcu_update(struct callback *cb)
+{
+ SKIP_BACK_DECLARE(struct hostcache, hc, update, cb);
+
+ /* Still corked, do nothing */
+ if (hc->corked)
+ return;
+
+ /* Shutdown shortcut */
+ if (hc->req.name && (rt_export_get_state(&hc->req) == TES_DOWN))
+ {
+ RT_LOCK(hc->tab, tab);
+ rt_unlock_table(tab);
+ return;
+ }
+
+ /* Cork check */
+ if (rt_cork_check(&hc->uncork))
+ {
+ hc->corked = 1;
+ rt_trace(tab, D_STATES, "Hostcache update corked");
+ return;
+ }
+
+ /* Destination schedule map */
+ rtable **nhu_pending = tmp_allocz(sizeof(rtable *) * rtable_max_id);
+
+ /* Find which destinations we have to ping */
+ rt_update_hostcache(hc, &nhu_pending);
+
+ /* And do the pinging */
for (uint i=0; i<rtable_max_id; i++)
if (nhu_pending[i])
RT_LOCKED(nhu_pending[i], dst)
rt_schedule_nhu(dst);
}
+static void
+rt_hcu_uncork(struct callback *cb)
+{
+ SKIP_BACK_DECLARE(struct hostcache, hc, uncork, cb);
+ hc->corked = 0;
+ callback_activate(&hc->update);
+}
+
static struct hostentry *
rt_get_hostentry(struct rtable_private *tab, ip_addr a, ip_addr ll, rtable *dep)
{