pool *rt_table_pool;
- static linpool *rte_update_pool;
-
list routing_tables;
+list deleted_routing_tables;
+
+struct rt_cork rt_cork;
/* Data structures for export journal */
#define RT_PENDING_EXPORT_ITEMS (page_size - sizeof(struct rt_export_block)) / sizeof(struct rt_pending_export)
static void rt_free_hostcache(rtable *tab);
static void rt_notify_hostcache(rtable *tab, net *net);
-static void rt_update_hostcache(void *tab);
-static void rt_next_hop_update(void *tab);
-static inline void rt_prune_table(void *tab);
+static void rt_update_hostcache(rtable *tab);
+static void rt_next_hop_update(rtable *tab);
+static inline void rt_next_hop_resolve_rte(rte *r);
+static inline void rt_flowspec_resolve_rte(rte *r, struct channel *c);
+static inline void rt_prune_table(rtable *tab);
static inline void rt_schedule_notify(rtable *tab);
-static void rt_feed_channel(void *);
-
-static inline void rt_export_used(rtable *tab);
-static void rt_export_cleanup(void *tab);
+static void rt_flowspec_notify(rtable *tab, net *net);
+static void rt_kick_prune_timer(rtable *tab);
+static void rt_feed_by_fib(void *);
+static void rt_feed_by_trie(void *);
+static void rt_feed_equal(void *);
+static void rt_feed_for(void *);
+static uint rt_feed_net(struct rt_export_hook *c, net *n);
+static void rt_check_cork_low(rtable *tab);
+static void rt_check_cork_high(rtable *tab);
+static void rt_cork_release_hook(void *);
+
+static inline void rt_export_used(struct rt_exporter *);
+static void rt_export_cleanup(rtable *tab);
+
- static inline void rte_update_lock(void);
- static inline void rte_update_unlock(void);
-
+static int rte_same(rte *x, rte *y);
const char *rt_import_state_name_array[TIS_MAX] = {
[TIS_DOWN] = "DOWN",
}
/* Nothing to export */
- if (!new_best && !old_best)
- {
+ if (new_best || old_best)
+ do_rt_notify(c, n, new_best, old_best);
+ else
DBG("rt_notify_accepted: nothing to export\n");
- return;
- }
--
- do_rt_notify(c, n, new_best, old_best);
- rte_update_unlock(c);
-}
-
-
-static struct nexthop *
-nexthop_merge_rta(struct nexthop *nhs, rta *a, linpool *pool, int max)
-{
- return nexthop_merge(nhs, &(a->nh), 1, 0, max, pool);
}
rte *
}
/* Prepare new merged route */
- rte *new_merged = count ? rt_export_merged(c, feed, count, rte_update_pool, 0) : NULL;
- rte *new_merged = count ? rt_export_merged(c, feed, count, c->rte_update_pool, 0) : NULL;
++ rte *new_merged = count ? rt_export_merged(c, feed, count, tmp_linpool, 0) : NULL;
if (new_merged || old_best)
do_rt_notify(c, n, new_merged, old_best);
rt_notify_optimal(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe)
{
struct channel *c = SKIP_BACK(struct channel, out_req, req);
-
- rte_update_lock(c);
- rte *old = RTES_OR_NULL(rpe->old_best);
+ rte *o = RTE_VALID_OR_NULL(rpe->old_best);
struct rte_storage *new_best = rpe->new_best;
while (rpe)
}
- static int rte_update_nest_cnt; /* Nesting counter to allow recursive updates */
-
- static inline void
- rte_update_lock(void)
- {
- rte_update_nest_cnt++;
- }
-
- static inline void
- rte_update_unlock(void)
- {
- if (!--rte_update_nest_cnt)
- lp_flush(rte_update_pool);
- }
-
-rte *
+int
channel_preimport(struct rt_import_request *req, rte *new, rte *old)
{
struct channel *c = SKIP_BACK(struct channel, in_req, req);
const struct filter *filter = c->in_filter;
struct channel_import_stats *stats = &c->import_stats;
- rte_update_lock();
- rte_update_lock(c);
if (new)
{
new->net = n;
rte_import(&c->in_req, n, new, src);
- rte_update_unlock(c);
+ /* Now the route attributes are kept by the in-table cached version
+ * and we may drop the local handle */
+ if (new && (c->in_keep & RIK_PREFILTER))
+ {
+ /* There may be some updates on top of the original attribute block */
+ ea_list *a = new->attrs;
+ while (a->next)
+ a = a->next;
+
+ ea_free(a);
+ }
+
- rte_update_unlock();
}
void
rte rt = n->routes->rte;
- rte_update_lock();
- if (!rte_is_valid(&rt))
- return 0;
-
- rte_update_lock(c);
--
/* Rest is stripped down export_filter() */
int v = c->proto->preexport ? c->proto->preexport(c, &rt) : 0;
if (v == RIC_PROCESS)
- v = (f_run(filter, &rt, c->rte_update_pool, FF_SILENT) <= F_ACCEPT);
-
- rte_update_unlock(c);
+ v = (f_run(filter, &rt, FF_SILENT) <= F_ACCEPT);
- rte_update_unlock();
-
return v > 0;
}
{
rta_init();
rt_table_pool = rp_new(&root_pool, "Routing tables");
- rte_update_pool = lp_new_default(rt_table_pool);
init_list(&routing_tables);
- ev_init_cork(&rt_cork, "Route Table Cork");
+ init_list(&deleted_routing_tables);
+ ev_init_list(&rt_cork.queue, &main_birdloop, "Route cork release");
+ rt_cork.run = (event) { .hook = rt_cork_release_hook };
}
+
/**
* rt_prune_table - prune a routing table
*
(e->rte.stale_cycle < s->stale_valid) ||
(e->rte.stale_cycle > s->stale_set))
{
- rte_discard(n, &e->rte);
- if (limit <= 0)
- {
- FIB_ITERATE_PUT(fit);
- ev_schedule(tab->prune_event);
- return;
- }
-
+ rte_recalculate(e->rte.sender, n, NULL, e->rte.src);
limit--;
goto rescan;
}
FIB_ITERATE_END;
- c->event->hook = rt_export_hook;
- rt_send_export_event(c);
+ rt_feed_done(c);
+}
- rt_set_export_state(c, TES_READY);
+static void
+rt_feed_by_trie(void *data)
+{
+ struct rt_export_hook *c = data;
+ rtable *tab = SKIP_BACK(rtable, exporter, c->table);
+
+ ASSERT_DIE(c->walk_state);
+ struct f_trie_walk_state *ws = c->walk_state;
+
+ int max_feed = 256;
+
+ ASSERT(atomic_load_explicit(&c->export_state, memory_order_relaxed) == TES_FEEDING);
+
+ net_addr addr;
+ while (trie_walk_next(ws, &addr))
+ {
+ net *n = net_find(tab, &addr);
+ if (!n)
+ continue;
+
+ if ((max_feed -= rt_feed_net(c, n)) <= 0)
+ return;
+
+ if (atomic_load_explicit(&c->export_state, memory_order_acquire) != TES_FEEDING)
+ return;
+ }
+
+ rt_unlock_trie(tab, c->walk_lock);
+ c->walk_lock = NULL;
+
+ mb_free(c->walk_state);
+ c->walk_state = NULL;
+
+ rt_feed_done(c);
+}
+
+static void
+rt_feed_equal(void *data)
+{
+ struct rt_export_hook *c = data;
+ rtable *tab = SKIP_BACK(rtable, exporter, c->table);
+
+ ASSERT_DIE(atomic_load_explicit(&c->export_state, memory_order_relaxed) == TES_FEEDING);
+ ASSERT_DIE(c->req->addr_mode == TE_ADDR_EQUAL);
+
+ net *n = net_find(tab, c->req->addr);
+ if (n)
+ rt_feed_net(c, n);
+
+ rt_feed_done(c);
+}
+
+static void
+rt_feed_for(void *data)
+{
+ struct rt_export_hook *c = data;
+ rtable *tab = SKIP_BACK(rtable, exporter, c->table);
+
+ ASSERT_DIE(atomic_load_explicit(&c->export_state, memory_order_relaxed) == TES_FEEDING);
+ ASSERT_DIE(c->req->addr_mode == TE_ADDR_FOR);
+
+ net *n = net_route(tab, c->req->addr);
+ if (n)
+ rt_feed_net(c, n);
+
+ rt_feed_done(c);
+}
+
+static uint
+rt_feed_net(struct rt_export_hook *c, net *n)
+{
+ uint count = 0;
+
+ if (c->req->export_bulk)
+ {
+ count = rte_feed_count(n);
+ if (count)
+ {
- rte_update_lock();
+ rte **feed = alloca(count * sizeof(rte *));
+ rte_feed_obtain(n, feed, count);
+ c->req->export_bulk(c->req, n->n.addr, NULL, feed, count);
- rte_update_unlock();
+ }
+ }
+
+ else if (n->routes)
+ {
- rte_update_lock();
+ struct rt_pending_export rpe = { .new = n->routes, .new_best = n->routes };
+ c->req->export_one(c->req, n->n.addr, &rpe);
- rte_update_unlock();
+ count = 1;
+ }
+
+ for (struct rt_pending_export *rpe = n->first; rpe; rpe = rpe_next(rpe, NULL))
+ rpe_mark_seen(c, rpe);
+
+ return count;
+}
+
+/*
+ * Import table
+ */
+
+void channel_reload_export_bulk(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe UNUSED, rte **feed, uint count)
+{
+ struct channel *c = SKIP_BACK(struct channel, reload_req, req);
+
+ for (uint i=0; i<count; i++)
+ if (feed[i]->sender == c->in_req.hook)
+ {
+ /* Strip the later attribute layers */
+ rte new = *feed[i];
+ while (new.attrs->next)
+ new.attrs = new.attrs->next;
+
+ /* And reload the route */
+ rte_update(c, net, &new, new.src);
+ }
}