c->in_req = (struct rt_import_request) {
.name = rn,
+ .list = proto_work_list(c->proto),
.trace_routes = c->debug | c->proto->debug,
.dump_req = channel_dump_import_req,
.log_state_change = channel_import_log_state_change,
c->in_table = &cat->cat;
c->in_table->push = (struct rt_import_request) {
.name = cat->name,
+ .list = proto_work_list(c->proto),
.trace_routes = c->debug | c->proto->debug,
.dump_req = channel_in_push_dump_req,
.log_state_change = channel_push_log_state_change,
c->out_table = &cat->cat;
c->out_table->push = (struct rt_import_request) {
.name = cat->name,
+ .list = proto_work_list(c->proto),
.trace_routes = c->debug | c->proto->debug,
.dump_req = channel_out_push_dump_req,
.log_state_change = channel_push_log_state_change,
struct hmap id_map;
struct hostcache *hostcache;
struct event *prune_event; /* Event to prune abandoned routes */
+ struct event *announce_event; /* Event to announce pending exports */
struct event *ec_event; /* Event to prune finished exports */
struct event *hcu_event; /* Event to update host cache */
struct event *delete_event; /* Event to delete the table */
struct timer *settle_timer; /* Settle time for notifications */
list pending_exports; /* List of packed struct rt_pending_export */
- btime base_export_time; /* When first pending export was announced */
- struct timer *export_timer;
struct rt_pending_export *first_export; /* First export to announce */
u64 next_export_seq; /* The next export will have this ID */
byte sorted; /* Routes of network are sorted according to rte_better() */
btime min_settle_time; /* Minimum settle time for notifications */
btime max_settle_time; /* Maximum settle time for notifications */
- btime export_settle_time; /* Delay before exports are announced */
uint cork_limit; /* Amount of routes to be pending on export to cork imports */
};
char *name;
u8 trace_routes;
+ event_list *list; /* Where to schedule import events */
+
void (*dump_req)(struct rt_import_request *req);
void (*log_state_change)(struct rt_import_request *req, u8 state);
/* Preimport is called when the @new route is just-to-be inserted, replacing @old.
u8 stale_pruned; /* Last prune finished when this value was set at stale_valid */
u8 stale_pruning; /* Last prune started when this value was set at stale_valid */
+ struct event *export_announce_event; /* Event to run to announce new exports */
struct event *stopped; /* Event to run when import is stopped */
};
rt_notify_hostcache(tab, net);
}
- rt_schedule_notify(tab);
-
if (EMPTY_LIST(tab->exports) && EMPTY_LIST(tab->pending_exports))
{
/* No export hook and no pending exports to cleanup. We may free the route immediately. */
{
ev_cork(&rt_cork);
tab->cork_active = 1;
- tm_start_in(tab->export_timer, 0, tab->loop);
}
- else if (!tm_active(tab->export_timer))
- tm_start_in(tab->export_timer, tab->config->export_settle_time, tab->loop);
}
static struct rt_pending_export *
}
static void
-rt_announce_exports(timer *tm)
+rt_announce_exports(void *data)
{
- rtable_private *tab = tm->data;
+ rtable_private *tab = data;
ASSERT_DIE(birdloop_inside(tab->loop));
+ rt_schedule_notify(tab);
+
struct rt_export_hook *c; node *n;
WALK_LIST2(c, n, tab->exports, n)
{
}
}
+static void
+rt_import_announce_exports(void *data)
+{
+ struct rt_import_hook *hook = data;
+ RT_LOCKED(hook->table, tab)
+ {
+ if (hook->import_state == TIS_CLEARED)
+ {
+ rfree(hook->export_announce_event);
+
+ ev_send(hook->stopped->list, hook->stopped);
+ rem_node(&hook->n);
+ mb_free(hook);
+ rt_unlock_table(tab);
+ }
+ else
+ ev_send_loop(tab->loop, tab->announce_event);
+ }
+}
+
static struct rt_pending_export *
rt_last_export(rtable_private *tab)
{
rte_announce(table, net, new_stored, old_stored,
net->routes, old_best_stored);
+ ev_send(req->list, c->export_announce_event);
+
if (!net->routes &&
(table->gc_counter++ >= table->config->gc_max_ops) &&
(table->gc_time + table->config->gc_min_time <= current_time()))
hook->req = req;
hook->table = t;
+ hook->export_announce_event = ev_new_init(tab->rp, rt_import_announce_exports, hook);
+
if (!hook->stale_set)
hook->stale_set = hook->stale_valid = hook->stale_pruning = hook->stale_pruned = 1;
struct rt_import_hook *hook = req->hook;
RT_LOCK(hook->table);
+
rt_schedule_prune(RT_PRIV(hook->table));
rt_set_import_state(hook, TIS_STOP);
-
hook->stopped = stopped;
+
RT_UNLOCK(hook->table);
}
t->loop = birdloop_new(p, DOMAIN_ORDER(rtable), nb);
+ t->announce_event = ev_new_init(p, rt_announce_exports, t);
t->ec_event = ev_new_init(p, rt_export_cleanup, t);
t->prune_event = ev_new_init(p, rt_prune_table, t);
t->hcu_event = ev_new_init(p, rt_update_hostcache, t);
t->nhu_event->cork = &rt_cork;
t->prune_event->cork = &rt_cork;
- t->export_timer = tm_new_init(p, rt_announce_exports, t, 0, 0);
t->last_rt_change = t->gc_time = current_time();
t->next_export_seq = 1;
if (!first_export || (first_export->seq >= ih->flush_seq))
{
ih->import_state = TIS_CLEARED;
- ev_send(ih->stopped->list, ih->stopped);
- rem_node(&ih->n);
- mb_free(ih);
- rt_unlock_table(tab);
+ ev_send(ih->req->list, ih->export_announce_event);
}
-
- if (EMPTY_LIST(tab->pending_exports) && tm_active(tab->export_timer))
- tm_stop(tab->export_timer);
+ if (EMPTY_LIST(tab->pending_exports) && ev_active(tab->announce_event))
+ ev_postpone(tab->announce_event);
/* If reduced to at most one export block pending */
if (tab->cork_active &&
if (atomic_fetch_and_explicit(&tab->nhu_state, NHU_SCHEDULED, memory_order_acq_rel) != NHU_RUNNING)
ev_send_loop(tab->loop, tab->nhu_event);
+ ev_send_loop(tab->loop, tab->announce_event);
+
rt_unlock_table(tab);
}
r->loop = NULL;
r->prune_event->list = r->ec_event->list = NULL;
r->nhu_event->list = r->hcu_event->list = NULL;
+ r->announce_event->list = NULL;
ev_send(r->delete_event->list, r->delete_event);
}