*/
+struct rt_uncork_callback {
+ event ev;
+ callback cb;
+};
+
struct rt_export_hook;
extern uint rtable_max_id;
* obstacle from this routing table.
*/
struct rt_export_request best_req; /* Internal request from best route announcement cleanup */
- struct event *nhu_uncork_event; /* Helper event to schedule NHU on uncork */
- struct event *hcu_uncork_event; /* Helper event to schedule HCU on uncork */
+ struct rt_uncork_callback nhu_uncork; /* Helper event to schedule NHU on uncork */
+ struct rt_uncork_callback hcu_uncork; /* Helper event to schedule HCU on uncork */
struct timer *prune_timer; /* Timer for periodic pruning / GC */
struct event *prune_event; /* Event for prune execution */
btime last_rt_change; /* Last time when route changed */
#define RT_PUB(tab) SKIP_BACK(rtable, priv, tab)
+#define RT_UNCORKING (1ULL << 44)
+
extern struct rt_cork {
- _Atomic uint active;
+ _Atomic u64 active;
+ DOMAIN(resource) dom;
event_list queue;
- event run;
} rt_cork;
static inline void rt_cork_acquire(void)
static inline void rt_cork_release(void)
{
- if (atomic_fetch_sub_explicit(&rt_cork.active, 1, memory_order_acq_rel) == 1)
- ev_send(&global_work_list, &rt_cork.run);
+ u64 upd = atomic_fetch_add_explicit(&rt_cork.active, RT_UNCORKING, memory_order_acq_rel) + RT_UNCORKING;
+
+ /* Actualy released? */
+ if ((upd >> 44) == (upd & (RT_UNCORKING - 1)))
+ {
+ LOCK_DOMAIN(resource, rt_cork.dom);
+ synchronize_rcu();
+ ev_run_list(&rt_cork.queue);
+ UNLOCK_DOMAIN(resource, rt_cork.dom);
+ }
+
+ atomic_fetch_sub_explicit(&rt_cork.active, RT_UNCORKING + 1, memory_order_acq_rel);
}
-static inline _Bool rt_cork_check(event *e)
-{
- int corked = (atomic_load_explicit(&rt_cork.active, memory_order_acquire) > 0);
- if (corked)
- ev_send(&rt_cork.queue, e);
+void rt_cork_send_callback(void *_data);
- if (atomic_load_explicit(&rt_cork.active, memory_order_acquire) == 0)
- ev_send(&global_work_list, &rt_cork.run);
+static inline _Bool rt_cork_check(struct rt_uncork_callback *rcc)
+{
+ /* Wait until all uncorks have finished */
+ while (1)
+ {
+ rcu_read_lock();
+
+ /* Not corked */
+ u64 corked = atomic_load_explicit(&rt_cork.active, memory_order_acquire);
+ if (!corked)
+ {
+ rcu_read_unlock();
+ return 0;
+ }
+
+ /* Yes, corked */
+ if (corked < RT_UNCORKING)
+ {
+ if (!rcc->ev.hook)
+ {
+ rcc->ev.hook = rt_cork_send_callback;
+ rcc->ev.data = rcc;
+ }
+
+ ev_send(&rt_cork.queue, &rcc->ev);
+ rcu_read_unlock();
+ return 1;
+ }
- return corked;
+ /* In progress, retry */
+ rcu_read_unlock();
+ birdloop_yield();
+ }
}
struct rt_pending_export {
/* Data structures for export journal */
static void rt_free_hostcache(struct rtable_private *tab);
-static void rt_hcu_uncork(void *_tab);
+static void rt_hcu_uncork(callback *);
static void rt_update_hostcache(void *tab);
static void rt_next_hop_update(void *_tab);
-static void rt_nhu_uncork(void *_tab);
+static void rt_nhu_uncork(callback *);
static inline void rt_next_hop_resolve_rte(rte *r);
static inline void rt_flowspec_resolve_rte(rte *r, struct channel *c);
static void rt_refresh_trace(struct rtable_private *tab, struct rt_import_hook *ih, const char *msg);
static void rt_prune_table(void *_tab);
static void rt_check_cork_low(struct rtable_private *tab);
static void rt_check_cork_high(struct rtable_private *tab);
-static void rt_cork_release_hook(void *);
static void rt_shutdown(void *);
static void rt_delete(void *);
hmap_set(&t->id_map, 0);
t->nhu_event = ev_new_init(p, rt_next_hop_update, t);
- t->nhu_uncork_event = ev_new_init(p, rt_nhu_uncork, t);
+ callback_init(&t->nhu_uncork.cb, rt_nhu_uncork, t->loop);
t->prune_timer = tm_new_init(p, rt_prune_timer, t, 0, 0);
t->prune_event = ev_new_init(p, rt_prune_table, t);
t->last_rt_change = t->gc_time = current_time();
init_list(&routing_tables);
init_list(&deleted_routing_tables);
ev_init_list(&rt_cork.queue, &main_birdloop, "Route cork release");
- rt_cork.run = (event) { .hook = rt_cork_release_hook };
+ rt_cork.dom = DOMAIN_NEW_RCU_SYNC(resource);
idm_init(&rtable_idm, rt_table_pool, 256);
ea_register_init(&ea_roa_aggregated);
}
}
-static void
-rt_cork_release_hook(void *data UNUSED)
+void
+rt_cork_send_callback(void *_rcc)
{
- do birdloop_yield();
- while (
- !atomic_load_explicit(&rt_cork.active, memory_order_acquire) &&
- ev_run_list(&rt_cork.queue)
- );
+ struct rt_uncork_callback *rcc = _rcc;
+ callback_activate(&rcc->cb);
}
/**
}
static void
-rt_nhu_uncork(void *_tab)
+rt_nhu_uncork(callback *cb)
{
- RT_LOCKED((rtable *) _tab, tab)
+ RT_LOCKED(SKIP_BACK(rtable, priv.nhu_uncork.cb, cb), tab)
{
ASSERT_DIE(tab->nhu_corked);
ASSERT_DIE(tab->nhu_state == 0);
return;
/* Check corkedness */
- if (rt_cork_check(tab->nhu_uncork_event))
+ if (rt_cork_check(&tab->nhu_uncork))
{
rt_trace(tab, D_STATES, "Next hop updater corked");
hc->tab = RT_PUB(tab);
tab->hcu_event = ev_new_init(tab->rp, rt_update_hostcache, tab);
- tab->hcu_uncork_event = ev_new_init(tab->rp, rt_hcu_uncork, tab);
+ callback_init(&tab->hcu_uncork.cb, rt_hcu_uncork, tab->loop);
tab->hostcache = hc;
ev_send_loop(tab->loop, tab->hcu_event);
}
static void
-rt_hcu_uncork(void *_tab)
+rt_hcu_uncork(callback *cb)
{
- RT_LOCKED((rtable *) _tab, tab)
- ev_send_loop(tab->loop, tab->hcu_event);
+ SKIP_BACK_DECLARE(rtable, tab, priv.hcu_uncork.cb, cb);
+ ev_send_loop(tab->loop, tab->hcu_event);
}
static void
if (rt_export_get_state(&hc->req) == TES_DOWN)
return;
- if (rt_cork_check(tab->hcu_uncork_event))
+ if (rt_cork_check(&tab->hcu_uncork))
{
rt_trace(tab, D_STATES, "Hostcache update corked");
return;
bgp_close(p);
}
- rfree(p->uncork_do_ev);
-
p->neigh = NULL;
BGP_TRACE(D_EVENTS, "Down");
p->last_rx_update = 0;
p->event = ev_new_init(p->p.pool, bgp_decision, p);
- p->uncork_main_ev = ev_new_init(p->p.pool, bgp_uncork_main, p);
- p->uncork_do_ev = ev_new_init(p->p.pool, bgp_do_uncork, p);
+ callback_init(&p->uncork.cb, bgp_do_uncork, p->p.loop);
p->startup_timer = tm_new_init(p->p.pool, bgp_startup_timeout, p, 0, 0);
p->gr_timer = tm_new_init(p->p.pool, bgp_graceful_restart_timeout, p, 0, 0);
struct bgp_listen_request listen; /* Shared listening socket */
struct bfd_request *bfd_req; /* BFD request, if BFD is used */
struct birdsock *postponed_sk; /* Postponed incoming socket for dynamic BGP */
- event *uncork_main_ev; /* Uncork event for mainloop */
- event *uncork_do_ev; /* Uncork event to actually uncork */
+ struct rt_uncork_callback uncork; /* Uncork hook */
struct bgp_stats stats; /* BGP statistics */
btime last_established; /* Last time of enter/leave of established state */
btime last_rx_update; /* Last time of RX update */
void bgp_kick_tx(void *vconn);
void bgp_tx(struct birdsock *sk);
int bgp_rx(struct birdsock *sk, uint size);
-void bgp_uncork_main(void *vp);
-void bgp_do_uncork(void *vp);
+void bgp_do_uncork(callback *);
const char * bgp_error_dsc(unsigned code, unsigned subcode);
void bgp_log_error(struct bgp_proto *p, u8 class, char *msg, unsigned code, unsigned subcode, byte *data, unsigned len);
}
void
-bgp_do_uncork(void *vp)
+bgp_do_uncork(callback *cb)
{
- struct bgp_proto *p = vp;
+ SKIP_BACK_DECLARE(struct bgp_proto, p, uncork.cb, cb);
+
ASSERT_DIE(birdloop_inside(p->p.loop));
ASSERT_DIE(p->p.active_loops--);
}
}
-void
-bgp_uncork_main(void *vp)
-{
- /* The uncork event is run from &main_birdloop and there is no useful way how
- * to assign the target loop to it, thus we have to lock it ourselves. */
-
- struct bgp_proto *p = vp;
- ev_send_loop(p->p.loop, p->uncork_do_ev);
-}
-
/**
* bgp_rx - handle received data
* @sk: socket
{
if ((conn->state == BS_CLOSE) || (conn->sk != sk))
return 0;
- if ((conn->state == BS_ESTABLISHED) && rt_cork_check(conn->bgp->uncork_main_ev))
+ if ((conn->state == BS_ESTABLISHED) && rt_cork_check(&conn->bgp->uncork))
{
sk_pause_rx(p->p.loop, sk);
p->p.active_loops++;