u32 proto_default_mrtdump; /* Default protocol mrtdump mask */
u32 channel_default_debug; /* Default channel debug mask */
u32 table_default_debug; /* Default table debug mask */
+ u32 show_route_debug; /* Exports to CLI debug mask */
u16 filter_vstk, filter_estk; /* Filter stack depth */
struct timeformat tf_route; /* Time format for 'show route' */
struct timeformat tf_proto; /* Time format for 'show protocol' */
return lfjour_schedule_cleanup(j);
WALK_TLIST(lfjour_recipient, r, &j->recipients)
- ev_send(r->target, r->event);
+ if (r->event)
+ ev_send(r->target, r->event);
}
static void
lfjour_register(struct lfjour *j, struct lfjour_recipient *r)
{
ASSERT_DIE(!j->domain || DG_IS_LOCKED(j->domain));
- ASSERT_DIE(r->event);
- ASSERT_DIE(r->target);
+ ASSERT_DIE(!r->event == !r->target);
atomic_store_explicit(&r->last, NULL, memory_order_relaxed);
ASSERT_DIE(!r->cur);
struct lfjour *j = lfjour_of_recipient(r);
ASSERT_DIE(!j->domain || DG_IS_LOCKED(j->domain));
+ if (r->cur)
+ lfjour_release(r);
+
lfjour_recipient_rem_node(&j->recipients, r);
lfjour_schedule_cleanup(j);
}
-src := cli.c cmds.c iface.c locks.c mpls.c neighbor.c password.c proto.c proto-build.c rt-attr.c rt-dev.c rt-fib.c rt-show.c rt-table.c
+src := cli.c cmds.c iface.c locks.c mpls.c neighbor.c password.c proto.c proto-build.c rt-attr.c rt-dev.c rt-export.c rt-fib.c rt-show.c rt-table.c
obj := $(src-o-files)
$(all-daemon)
$(cf-local)
void
cli_free(cli *c)
{
- int defer = 0;
- if (c->cleanup)
- defer = c->cleanup(c);
+ CALL(c->cleanup, c);
+
if (c == cmd_reconfig_stored_cli)
cmd_reconfig_stored_cli = NULL;
- if (defer)
- {
- sk_close(c->sock);
- c->sock = NULL;
- }
- else
- rp_free(c->pool);
+ rp_free(c->pool);
}
/**
struct cli_out *tx_buf, *tx_pos, *tx_write;
event *event;
void (*cont)(struct cli *c);
- int (*cleanup)(struct cli *c); /* Return 0 if finished and cli may be freed immediately.
- Otherwise return 1 and call rfree(c->pool) when appropriate. */
+ void (*cleanup)(struct cli *c); /* The CLI has closed prematurely */
void *rover; /* Private to continuation routine */
int last_reply;
int restricted; /* CLI is restricted to read-only commands */
this_channel->out_filter = $4;
}
| EXPORT imexport { this_channel->out_filter = $2; }
- | EXPORT BLOCK expr { this_channel->feed_block_size = $3; }
| RECEIVE LIMIT limit_spec { this_channel->rx_limit = $3; }
| IMPORT LIMIT limit_spec { this_channel->in_limit = $3; }
| EXPORT LIMIT limit_spec { this_channel->out_limit = $3; }
| DEBUG CHANNELS debug_mask { new_config->channel_default_debug = $3; }
| DEBUG TABLES debug_mask { new_config->table_default_debug = $3; }
| DEBUG COMMANDS expr { new_config->cli_debug = $3; }
+ | DEBUG SHOW ROUTE debug_mask { new_config->show_route_debug = $4; }
;
/* MRTDUMP PROTOCOLS is in systep/unix/config.Y */
static void channel_init_limit(struct channel *c, struct limit *l, int dir, struct channel_limit *cf);
static void channel_update_limit(struct channel *c, struct limit *l, int dir, struct channel_limit *cf);
static void channel_reset_limit(struct channel *c, struct limit *l, int dir);
-static int channel_refeed_prefilter(const struct rt_prefilter *p, const net_addr *n);
-static int channel_import_prefilter(const struct rt_prefilter *p, const net_addr *n);
-static void channel_feed_end(struct channel *c);
static void channel_stop_export(struct channel *c);
-static void channel_export_stopped(struct rt_export_request *req);
-static void channel_refeed_stopped(struct rt_export_request *req);
static void channel_check_stopped(struct channel *c);
-static void channel_reload_in_done(struct channel_import_request *cir);
-static void channel_request_partial_reload(struct channel *c, struct channel_import_request *cir);
+static inline void channel_reimport(struct channel *c, struct rt_feeding_request *rfr)
+{
+ rt_export_refeed(&c->reimporter, rfr);
+ ev_send(proto_event_list(c->proto), &c->reimport_event);
+}
+
+static inline void channel_refeed(struct channel *c, struct rt_feeding_request *rfr)
+{
+ rt_export_refeed(&c->out_req, rfr);
+}
static inline int proto_is_done(struct proto *p)
{ return (p->proto_state == PS_DOWN) && proto_is_inactive(p); }
CD(c, "State changed to %s", c_states[c->channel_state]);
}
-void
+static void
channel_import_log_state_change(struct rt_import_request *req, u8 state)
{
SKIP_BACK_DECLARE(struct channel, c, in_req, req);
CD(c, "Channel import state changed to %s", rt_import_state_name(state));
}
-void
-channel_export_log_state_change(struct rt_export_request *req, u8 state)
+static void
+channel_export_fed(struct rt_export_request *req)
{
SKIP_BACK_DECLARE(struct channel, c, out_req, req);
- CD(c, "Channel export state changed to %s", rt_export_state_name(state));
- switch (state)
+ struct limit *l = &c->out_limit;
+ if ((c->limit_active & (1 << PLD_OUT)) && (l->count <= l->max))
{
- case TES_FEEDING:
- if (c->proto->feed_begin)
- c->proto->feed_begin(c);
- break;
- case TES_READY:
- channel_feed_end(c);
- break;
+ c->limit_active &= ~(1 << PLD_OUT);
+ channel_request_full_refeed(c);
}
+ else
+ CALL(c->proto->export_fed, c);
}
void
-channel_refeed_log_state_change(struct rt_export_request *req, u8 state)
+channel_request_full_refeed(struct channel *c)
{
- SKIP_BACK_DECLARE(struct channel, c, refeed_req, req);
- CD(c, "Channel export state changed to %s", rt_export_state_name(state));
-
- switch (state)
- {
- case TES_FEEDING:
- if (c->proto->feed_begin)
- c->proto->feed_begin(c);
- break;
- case TES_READY:
- rt_stop_export(req, channel_refeed_stopped);
- break;
- }
+ rt_export_refeed(&c->out_req, NULL);
+ bmap_reset(&c->export_accepted_map, 16);
+ bmap_reset(&c->export_rejected_map, 16);
}
-
static void
channel_dump_import_req(struct rt_import_request *req)
{
debug(" Channel %s.%s export request %p\n", c->proto->name, c->name, req);
}
-static void
-channel_dump_refeed_req(struct rt_export_request *req)
-{
- SKIP_BACK_DECLARE(struct channel, c, refeed_req, req);
- debug(" Channel %s.%s refeed request %p\n", c->proto->name, c->name, req);
-}
-
-
-static void
-channel_rpe_mark_seen_export(struct rt_export_request *req, struct rt_pending_export *rpe)
-{
- channel_rpe_mark_seen(SKIP_BACK(struct channel, out_req, req), rpe);
-}
-
-static void
-channel_rpe_mark_seen_refeed(struct rt_export_request *req, struct rt_pending_export *rpe)
-{
- channel_rpe_mark_seen(SKIP_BACK(struct channel, refeed_req, req), rpe);
-}
-
-
-struct channel *
-channel_from_export_request(struct rt_export_request *req)
-{
- if (req->dump_req == channel_dump_export_req)
- return SKIP_BACK(struct channel, out_req, req);
-
- if (req->dump_req == channel_dump_refeed_req)
- return SKIP_BACK(struct channel, refeed_req, req);
-
- bug("Garbled channel export request");
-}
-
static void
proto_log_state_change(struct proto *p)
c->out_filter = cf->out_filter;
c->out_subprefix = cf->out_subprefix;
- c->feed_block_size = cf->feed_block_size;
-
channel_init_limit(c, &c->rx_limit, PLD_RX, &cf->rx_limit);
channel_init_limit(c, &c->in_limit, PLD_IN, &cf->in_limit);
channel_init_limit(c, &c->out_limit, PLD_OUT, &cf->out_limit);
struct settle settle;
struct channel *c;
rtable *tab;
- struct rt_export_request req;
+ struct rt_export_request reader;
+ event update_event;
struct f_trie *trie;
+ void (*refeed_hook)(struct channel *, struct rt_feeding_request *);
};
static void
-channel_roa_in_reload_done(struct channel_import_request *req)
+channel_roa_reload_done(struct rt_feeding_request *req)
{
- rfree(req->trie->lp);
+ rfree(req->prefilter.trie->lp);
+ /* FIXME: this should reset import/export filters if ACTION BLOCK */
}
static void
-channel_roa_in_changed(struct settle *se)
-{
- SKIP_BACK_DECLARE(struct roa_subscription, s, settle, se);
- struct channel *c = s->c;
-
- CD(c, "Reload triggered by RPKI change");
- struct channel_import_request *cir = lp_alloc(s->trie->lp, sizeof *cir);
- *cir = (struct channel_import_request) {
- .trie = s->trie,
- .done = channel_roa_in_reload_done,
- };
-
- s->trie = f_new_trie(lp_new(c->proto->pool), 0);
-
- channel_request_partial_reload(c, cir);
-}
-
-static void
-channel_roa_out_reload_done(struct channel_feeding_request *req)
-{
- rfree(req->trie->lp);
-}
-
-static void
-channel_roa_out_changed(struct settle *se)
+channel_roa_changed(struct settle *se)
{
SKIP_BACK_DECLARE(struct roa_subscription, s, settle, se);
struct channel *c = s->c;
CD(c, "Feeding triggered by RPKI change");
/* Setup feeding request */
- struct channel_feeding_request *cfr = lp_alloc(s->trie->lp, sizeof *cfr);
- *cfr = (struct channel_feeding_request) {
- .type = CFRT_AUXILIARY,
- .trie = s->trie,
- .done = channel_roa_out_reload_done,
+ struct rt_feeding_request *rfr = lp_alloc(s->trie->lp, sizeof *rfr);
+ *rfr = (struct rt_feeding_request) {
+ .prefilter = {
+ .mode = TE_ADDR_TRIE,
+ .trie = s->trie,
+ },
+ .done = channel_roa_reload_done,
};
/* Prepare new trie */
s->trie = f_new_trie(lp_new(c->proto->pool), 0);
/* Actually request the feed */
- channel_request_feeding(c, cfr);
+ s->refeed_hook(c, rfr);
}
static void
-channel_export_one_roa(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *first)
+channel_roa_update_net(struct roa_subscription *s, const net_addr *net)
{
- SKIP_BACK_DECLARE(struct roa_subscription, s, req, req);
-
switch (net->type)
{
case NET_ROA4:
}
settle_kick(&s->settle, s->c->proto->loop);
+}
- rpe_mark_seen_all(req->hook, first, NULL, NULL);
+static void
+channel_roa_update(void *_s)
+{
+ struct roa_subscription *s = _s;
+
+ RT_EXPORT_WALK(&s->reader, u)
+ switch (u->kind)
+ {
+ case RT_EXPORT_STOP:
+ bug("Main table export stopped");
+ break;
+
+ case RT_EXPORT_FEED:
+ if (u->feed->count_routes)
+ channel_roa_update_net(s, u->feed->block[0].net);
+ break;
+
+ case RT_EXPORT_UPDATE:
+ /* Only switched ROA from one source to another */
+ if (!u->update->new || !u->update->old)
+ channel_roa_update_net(s, u->update->new ? u->update->new->net : u->update->old->net);
+ break;
+
+ }
+}
+
+static inline void (*channel_roa_reload_hook(int dir))(struct channel *, struct rt_feeding_request *)
+{
+ return dir ? channel_reimport : channel_refeed;
}
static void
channel_dump_roa_req(struct rt_export_request *req)
{
- SKIP_BACK_DECLARE(struct roa_subscription, s, req, req);
+ SKIP_BACK_DECLARE(struct roa_subscription, s, reader, req);
struct channel *c = s->c;
debug(" Channel %s.%s ROA %s change notifier request %p\n",
c->proto->name, c->name,
- (s->settle.hook == channel_roa_in_changed) ? "import" : "export",
+ (s->refeed_hook == channel_roa_reload_hook(1)) ? "import" : "export",
req);
}
static int
channel_roa_is_subscribed(struct channel *c, rtable *tab, int dir)
{
- void (*hook)(struct settle *) =
- dir ? channel_roa_in_changed : channel_roa_out_changed;
-
struct roa_subscription *s;
node *n;
WALK_LIST2(s, n, c->roa_subscriptions, roa_node)
- if ((tab == s->tab) && (s->settle.hook == hook))
+ if ((tab == s->tab) && (s->refeed_hook == channel_roa_reload_hook(dir)))
return 1;
return 0;
struct roa_subscription *s = mb_allocz(c->proto->pool, sizeof(struct roa_subscription));
*s = (struct roa_subscription) {
- .settle = SETTLE_INIT(&c->roa_settle, dir ? channel_roa_in_changed : channel_roa_out_changed, NULL),
+ .settle = SETTLE_INIT(&c->roa_settle, channel_roa_changed, NULL),
+ .refeed_hook = channel_roa_reload_hook(dir),
.c = c,
.trie = f_new_trie(lp_new(c->proto->pool), 0),
.tab = tab,
- .req = {
+ .update_event = {
+ .hook = channel_roa_update,
+ .data = s,
+ },
+ .reader = {
.name = mb_sprintf(c->proto->pool, "%s.%s.roa-%s.%s",
c->proto->name, c->name, dir ? "in" : "out", tab->name),
- .list = proto_work_list(c->proto),
+ .r = {
+ .target = proto_work_list(c->proto),
+ .event = &s->update_event,
+ },
.pool = c->proto->pool,
.trace_routes = c->debug | c->proto->debug,
- .dump_req = channel_dump_roa_req,
- .export_one = channel_export_one_roa,
+ .dump = channel_dump_roa_req,
},
};
add_tail(&c->roa_subscriptions, &s->roa_node);
- rt_request_export(tab, &s->req);
+ rt_export_subscribe(tab, best, &s->reader);
}
static void
-channel_roa_unsubscribed(struct rt_export_request *req)
+channel_roa_unsubscribe(struct roa_subscription *s)
{
- SKIP_BACK_DECLARE(struct roa_subscription, s, req, req);
struct channel *c = s->c;
+ rt_export_unsubscribe(best, &s->reader);
+ settle_cancel(&s->settle);
+ s->settle.hook = NULL;
+ ev_postpone(&s->update_event);
+
+ ASSERT_DIE(rt_export_get_state(&s->reader) == TES_DOWN);
+
+ rfree(s->trie->lp);
+
rem_node(&s->roa_node);
mb_free(s);
channel_check_stopped(c);
}
-static void
-channel_roa_unsubscribe(struct roa_subscription *s)
-{
- rfree(s->trie->lp);
- rt_stop_export(&s->req, channel_roa_unsubscribed);
- settle_cancel(&s->settle);
- s->settle.hook = NULL;
-}
-
static void
channel_roa_subscribe_filter(struct channel *c, int dir)
{
rt_request_import(c->table, &c->in_req);
}
+void channel_notify_basic(void *);
+void channel_notify_accepted(void *);
+void channel_notify_merged(void *);
+
static void
channel_start_export(struct channel *c)
{
- if (c->out_req.hook)
- {
- log(L_WARN "%s.%s: Attempted to start channel's already started export", c->proto->name, c->name);
- return;
- }
+ if (rt_export_get_state(&c->out_req) != TES_DOWN)
+ bug("%s.%s: Attempted to start channel's already started export", c->proto->name, c->name);
ASSERT(c->channel_state == CS_UP);
c->out_req = (struct rt_export_request) {
.name = mb_sprintf(p, "%s.%s", c->proto->name, c->name),
- .list = proto_work_list(c->proto),
+ .r = {
+ .target = proto_work_list(c->proto),
+ .event = &c->out_event,
+ },
.pool = p,
- .feed_block_size = c->feed_block_size,
- .prefilter = {
+ .feeder.prefilter = {
.mode = c->out_subprefix ? TE_ADDR_IN : TE_ADDR_NONE,
.addr = c->out_subprefix,
},
.trace_routes = c->debug | c->proto->debug,
- .dump_req = channel_dump_export_req,
- .log_state_change = channel_export_log_state_change,
- .mark_seen = channel_rpe_mark_seen_export,
+ .dump = channel_dump_export_req,
+ .fed = channel_export_fed,
+ };
+
+ c->out_event = (event) {
+ .data = c,
};
- bmap_init(&c->export_map, p, 16);
- bmap_init(&c->export_reject_map, p, 16);
- bmap_init(&c->refeed_map, p, 16);
+ bmap_init(&c->export_accepted_map, p, 16);
+ bmap_init(&c->export_rejected_map, p, 16);
channel_reset_limit(c, &c->out_limit, PLD_OUT);
memset(&c->export_stats, 0, sizeof(struct channel_export_stats));
+ DBG("%s.%s: Channel start export req=%p\n", c->proto->name, c->name, &c->out_req);
+
switch (c->ra_mode) {
case RA_OPTIMAL:
- c->out_req.export_one = rt_notify_optimal;
+ c->out_event.hook = channel_notify_basic;
+ rt_export_subscribe(c->table, best, &c->out_req);
break;
case RA_ANY:
- c->out_req.export_one = rt_notify_any;
- c->out_req.export_bulk = rt_feed_any;
+ c->out_event.hook = channel_notify_basic;
+ rt_export_subscribe(c->table, all, &c->out_req);
break;
case RA_ACCEPTED:
- c->out_req.export_bulk = rt_notify_accepted;
+ c->out_event.hook = channel_notify_accepted;
+ rt_export_subscribe(c->table, all, &c->out_req);
break;
case RA_MERGED:
- c->out_req.export_bulk = rt_notify_merged;
+ c->out_event.hook = channel_notify_merged;
+ rt_export_subscribe(c->table, all, &c->out_req);
break;
default:
bug("Unknown route announcement mode");
}
-
- c->refeed_req = c->out_req;
- c->refeed_req.pool = rp_newf(c->proto->pool, c->proto->pool->domain, "Channel %s.%s export refeed", c->proto->name, c->name);
- c->refeed_req.name = mb_sprintf(c->refeed_req.pool, "%s.%s.refeed", c->proto->name, c->name);
- c->refeed_req.dump_req = channel_dump_refeed_req;
- c->refeed_req.log_state_change = channel_refeed_log_state_change;
- c->refeed_req.mark_seen = channel_rpe_mark_seen_refeed;
-
- DBG("%s.%s: Channel start export req=%p\n", c->proto->name, c->name, &c->out_req);
- rt_request_export(c->table, &c->out_req);
}
static void
switch (c->channel_state)
{
case CS_STOP:
- if (c->obstacles || !EMPTY_LIST(c->roa_subscriptions) || c->out_req.hook || c->refeed_req.hook || c->in_req.hook || c->reload_req.hook)
+ if (c->obstacles || !EMPTY_LIST(c->roa_subscriptions) || c->in_req.hook)
return;
+ ASSERT_DIE(rt_export_get_state(&c->out_req) == TES_DOWN);
+ ASSERT_DIE(!rt_export_feed_active(&c->reimporter));
+
channel_set_state(c, CS_DOWN);
proto_send_event(c->proto, c->proto->event);
break;
case CS_PAUSE:
- if (c->obstacles || !EMPTY_LIST(c->roa_subscriptions) || c->out_req.hook || c->refeed_req.hook || c->reload_req.hook)
+ if (c->obstacles || !EMPTY_LIST(c->roa_subscriptions))
return;
+ ASSERT_DIE(rt_export_get_state(&c->out_req) == TES_DOWN);
+ ASSERT_DIE(!rt_export_feed_active(&c->reimporter));
+
channel_set_state(c, CS_START);
break;
}
}
static void
-channel_export_stopped(struct rt_export_request *req)
+channel_do_reload(void *_c)
{
- SKIP_BACK_DECLARE(struct channel, c, out_req, req);
-
- /* The hook has already stopped */
- req->hook = NULL;
+ struct channel *c = _c;
- if (c->refeed_pending)
- {
- ASSERT_DIE(!c->refeeding);
- c->refeeding = c->refeed_pending;
- c->refeed_pending = NULL;
-
- channel_reset_limit(c, &c->out_limit, PLD_OUT);
-
- bmap_reset(&c->export_map, 16);
- bmap_reset(&c->export_reject_map, 16);
- bmap_reset(&c->refeed_map, 16);
-
- rt_request_export(c->table, req);
- return;
- }
-
- bmap_free(&c->export_map);
- bmap_free(&c->export_reject_map);
-
- c->out_req.name = NULL;
- rfree(c->out_req.pool);
-
- channel_check_stopped(c);
-}
+ RT_FEED_WALK(&c->reimporter, f)
+ if (task_still_in_limit())
+ {
+ for (uint i = 0; i < f->count_routes; i++)
+ {
+ rte *r = &f->block[i];
-static void
-channel_refeed_stopped(struct rt_export_request *req)
-{
- SKIP_BACK_DECLARE(struct channel, c, refeed_req, req);
+ if (r->flags & REF_OBSOLETE)
+ break;
- req->hook = NULL;
+ if (r->sender == c->in_req.hook)
+ {
+ /* Strip the table-specific information */
+ rte new = rte_init_from(r);
- channel_feed_end(c);
- channel_check_stopped(c);
-}
+ /* Strip the later attribute layers */
+ new.attrs = ea_strip_to(new.attrs, BIT32_ALL(EALS_PREIMPORT));
-static void
-channel_init_feeding(struct channel *c)
-{
- int no_trie = 0;
+ /* And reload the route */
+ rte_update(c, r->net, &new, new.src);
+ }
+ }
- for (struct channel_feeding_request *cfrp = c->refeed_pending; cfrp; cfrp = cfrp->next)
- if (cfrp->type == CFRT_DIRECT)
+ /* Local data needed no more */
+ tmp_flush();
+ }
+ else
{
- /* Direct feeding requested? Restart the export by force. */
- channel_stop_export(c);
+ ev_send(proto_work_list(c->proto), &c->reimport_event);
return;
}
- else if (!cfrp->trie)
- no_trie = 1;
-
- /* No direct feeding, running auxiliary refeed. */
- c->refeeding = c->refeed_pending;
- c->refeed_pending = NULL;
- bmap_reset(&c->refeed_map, 16);
-
- if (no_trie)
- {
- c->refeed_req.prefilter.mode = TE_ADDR_NONE;
- c->refeed_req.prefilter.hook = NULL;
- }
- else
- {
- c->refeed_req.prefilter.mode = TE_ADDR_HOOK;
- c->refeed_req.prefilter.hook = channel_refeed_prefilter;
- }
-
- rt_request_export(c->table, &c->refeed_req);
-}
-
-static int
-channel_refeed_prefilter(const struct rt_prefilter *p, const net_addr *n)
-{
- const struct channel *c =
- SKIP_BACK(struct channel, refeed_req,
- SKIP_BACK(struct rt_export_request, prefilter, p)
- );
-
- ASSERT_DIE(c->refeeding);
- for (struct channel_feeding_request *cfr = c->refeeding; cfr; cfr = cfr->next)
- if (!cfr->trie || trie_match_net(cfr->trie, n))
- return 1;
- return 0;
-}
-
-int
-channel_import_request_prefilter(struct channel_import_request *cir_head, const net_addr *n)
-{
- for (struct channel_import_request *cir = cir_head; cir; cir = cir->next)
- {
- if (!cir->trie || trie_match_net(cir->trie, n))
- return 1;
- }
- return 0;
-}
-
-static int
-channel_import_prefilter(const struct rt_prefilter *p, const net_addr *n)
-{
- const struct channel *c =
- SKIP_BACK(struct channel, reload_req,
- SKIP_BACK(struct rt_export_request, prefilter, p)
- );
- ASSERT_DIE(c->importing);
-
- return channel_import_request_prefilter(c->importing, n);
-}
-
-static void
-channel_feed_end(struct channel *c)
-{
- /* Reset export limit if the feed ended with acceptable number of exported routes */
- struct limit *l = &c->out_limit;
- if (c->refeeding &&
- (c->limit_active & (1 << PLD_OUT)) &&
- (l->count <= l->max))
- {
- log(L_INFO "Protocol %s resets route export limit (%u)", c->proto->name, l->max);
- c->limit_active &= ~(1 << PLD_OUT);
-
- /* Queue the same refeed batch back into pending */
- struct channel_feeding_request **ptr = &c->refeed_pending;
- while (*ptr)
- ptr = &((*ptr)->next);
-
- *ptr = c->refeeding;
-
- /* Mark the requests to be redone */
- for (struct channel_feeding_request *cfr = c->refeeding; cfr; cfr = cfr->next)
- cfr->state = CFRS_PENDING;
-
- c->refeeding = NULL;
- }
-
- /* Inform the protocol about the feed ending */
- CALL(c->proto->feed_end, c);
-
- /* Free the dynamic feeding requests */
- for (struct channel_feeding_request *cfr = c->refeeding, *next = cfr ? cfr->next : NULL;
- cfr;
- (cfr = next), (next = next ? next->next : NULL))
- CALL(cfr->done, cfr);
-
- /* Drop the refeed batch */
- c->refeeding = NULL;
-
- /* Run the pending batch */
- if (c->refeed_pending)
- channel_init_feeding(c);
-}
-
-/* Called by protocol for reload from in_table */
-void
-channel_schedule_reload(struct channel *c, struct channel_import_request *cir)
-{
- ASSERT(c->in_req.hook);
- int no_trie = 0;
- if (cir)
- {
- cir->next = c->import_pending;
- c->import_pending = cir;
- }
-
- if (c->reload_req.hook)
- {
- CD(c, "Reload triggered before the previous one has finished");
- c->reload_pending = 1;
- return;
- }
-
- /* If there is any full-reload request, we can disregard all partials */
- for (struct channel_import_request *last = cir; last && no_trie==0;)
- {
- if (!last->trie)
- no_trie = 1;
- last = last->next;
- }
-
- /* activating pending imports */
- c->importing = c->import_pending;
- c->import_pending = NULL;
-
- if (no_trie)
- {
- c->reload_req.prefilter.mode = TE_ADDR_NONE;
- c->reload_req.prefilter.hook = NULL;
- }
- else
- {
- c->reload_req.prefilter.mode = TE_ADDR_HOOK;
- c->reload_req.prefilter.hook = channel_import_prefilter;
- }
-
- rt_request_export(c->table, &c->reload_req);
-}
-
-static void
-channel_reload_stopped(struct rt_export_request *req)
-{
- SKIP_BACK_DECLARE(struct channel, c, reload_req, req);
-
- req->hook = NULL;
-
- /* Restart reload */
- if (c->reload_pending)
- {
- c->reload_pending = 0;
- channel_request_reload(c);
- }
-
- if (c->channel_state != CS_UP)
- channel_check_stopped(c);
-}
-
-static void
-channel_reload_log_state_change(struct rt_export_request *req, u8 state)
-{
- SKIP_BACK_DECLARE(struct channel, c, reload_req, req);
-
- if (state == TES_READY)
- {
- if (c->channel_state == CS_UP)
- rt_refresh_end(&c->in_req);
-
- rt_stop_export(req, channel_reload_stopped);
- }
-}
-
-static void
-channel_reload_dump_req(struct rt_export_request *req)
-{
- SKIP_BACK_DECLARE(struct channel, c, reload_req, req);
- debug(" Channel %s.%s import reload request %p\n", c->proto->name, c->name, req);
}
/* Called by protocol to activate in_table */
static void
channel_setup_in_table(struct channel *c)
{
- c->reload_req = (struct rt_export_request) {
- .name = mb_sprintf(c->proto->pool, "%s.%s.import", c->proto->name, c->name),
- .list = proto_work_list(c->proto),
- .pool = c->proto->pool,
- .feed_block_size = c->feed_block_size,
- .trace_routes = c->debug | c->proto->debug,
- .export_bulk = channel_reload_export_bulk,
- .dump_req = channel_reload_dump_req,
- .log_state_change = channel_reload_log_state_change,
+ c->reimporter = (struct rt_export_feeder) {
+ .name = mb_sprintf(c->proto->pool, "%s.%s.reimport", c->proto->name, c->name),
+ .trace_routes = c->debug,
};
+ c->reimport_event = (event) {
+ .hook = channel_do_reload,
+ .data = c,
+ };
+ rt_feeder_subscribe(&c->table->export_all, &c->reimporter);
}
/* Drop ROA subscriptions */
channel_roa_unsubscribe_all(c);
- /* Need to abort feeding */
- c->reload_pending = 0;
-
- if (c->reload_req.hook && atomic_load_explicit(&c->reload_req.hook->export_state, memory_order_acquire) != TES_STOP)
- rt_stop_export(&c->reload_req, channel_reload_stopped);
-
/* Stop export */
- c->refeed_pending = 0;
channel_stop_export(c);
}
if (c->in_req.hook)
rt_stop_import(&c->in_req, channel_import_stopped);
+ /* Need to abort reimports as well */
+ rt_feeder_unsubscribe(&c->reimporter);
+
c->gr_wait = 0;
if (c->gr_lock)
channel_graceful_restart_unlock(c);
static void
channel_do_down(struct channel *c)
{
- ASSERT(!c->reload_req.hook);
+ ASSERT_DIE(!rt_export_feed_active(&c->reimporter));
c->proto->active_channels--;
channel_log_state_change(c);
}
-/**
- * channel_request_feeding - request feeding routes to the channel
- * @c: given channel
- *
- * Sometimes it is needed to send again all routes to the channel. This is
- * called feeding and can be requested by this function. This would cause
- * channel export state transition to ES_FEEDING (during feeding) and when
- * completed, it will switch back to ES_READY. This function can be called
- * even when feeding is already running, in that case it is restarted.
- */
-void
-channel_request_feeding(struct channel *c, struct channel_feeding_request *cfr)
-{
- CD(c, "Feeding requested (%s)",
- cfr->type == CFRT_DIRECT ? "direct" :
- (cfr->trie ? "partial" : "auxiliary"));
-
- /* Enqueue the request */
- cfr->next = c->refeed_pending;
- c->refeed_pending = cfr;
-
- /* Initialize refeeds unless already refeeding */
- if (!c->refeeding)
- channel_init_feeding(c);
-}
-
-static void
-channel_feeding_request_done_dynamic(struct channel_feeding_request *req)
-{
- mb_free(req);
-}
-
-void
-channel_request_feeding_dynamic(struct channel *c, enum channel_feeding_request_type type)
-{
- struct channel_feeding_request *req = mb_alloc(c->proto->pool, sizeof *req);
- *req = (struct channel_feeding_request) {
- .type = type,
- .done = channel_feeding_request_done_dynamic,
- };
-
- channel_request_feeding(c, req);
-}
-
static void
channel_stop_export(struct channel *c)
{
- if (c->refeed_req.hook && (atomic_load_explicit(&c->refeed_req.hook->export_state, memory_order_acquire) != TES_STOP))
- rt_stop_export(&c->refeed_req, channel_refeed_stopped);
+ switch (rt_export_get_state(&c->out_req))
+ {
+ case TES_FEEDING:
+ case TES_PARTIAL:
+ case TES_READY:
+ if (c->ra_mode == RA_OPTIMAL)
+ rt_export_unsubscribe(best, &c->out_req);
+ else
+ rt_export_unsubscribe(all, &c->out_req);
- if (c->out_req.hook && (atomic_load_explicit(&c->out_req.hook->export_state, memory_order_acquire) != TES_STOP))
- rt_stop_export(&c->out_req, channel_export_stopped);
-}
+ bmap_free(&c->export_accepted_map);
+ bmap_free(&c->export_rejected_map);
-static void
-channel_import_request_done_dynamic(struct channel_import_request *req)
-{
- mb_free(req);
-}
+ c->out_req.name = NULL;
+ rfree(c->out_req.pool);
-void
-channel_request_reload(struct channel *c)
-{
- ASSERT(c->in_req.hook);
- ASSERT(channel_reloadable(c));
+ channel_check_stopped(c);
+ break;
- CD(c, "Reload requested");
- struct channel_import_request* cir = mb_alloc(c->proto->pool, sizeof *cir);
- cir->trie = NULL;
- cir->done = channel_import_request_done_dynamic;
+ case TES_DOWN:
+ break;
- if ((c->in_keep & RIK_PREFILTER) == RIK_PREFILTER)
- channel_schedule_reload(c, cir);
- else if (! c->proto->reload_routes(c, cir))
- bug("Channel %s.%s refused full import reload.", c->proto->name, c->name);
+ case TES_STOP:
+ case TES_MAX:
+ bug("Impossible export state");
+ }
}
-static void
-channel_request_partial_reload(struct channel *c, struct channel_import_request *cir)
+void
+channel_request_reload(struct channel *c, struct rt_feeding_request *cir)
{
ASSERT(c->in_req.hook);
ASSERT(channel_reloadable(c));
- CD(c, "Partial import reload requested");
+ if (cir)
+ CD(c, "Partial import reload requested");
+ else
+ CD(c, "Full import reload requested");
if ((c->in_keep & RIK_PREFILTER) == RIK_PREFILTER)
- channel_schedule_reload(c, cir);
+ channel_reimport(c, cir);
else if (! c->proto->reload_routes(c, cir))
cli_msg(-15, "%s.%s: partial reload refused, please run full reload instead", c->proto->name, c->name);
}
cf->table = tab;
cf->out_filter = FILTER_REJECT;
- cf->feed_block_size = 16384;
-
cf->net_type = net_type;
cf->ra_mode = RA_OPTIMAL;
cf->preference = proto->protocol->preference;
// c->ra_mode = cf->ra_mode;
c->merge_limit = cf->merge_limit;
c->preference = cf->preference;
- c->out_req.prefilter.addr = c->out_subprefix = cf->out_subprefix;
+ c->out_req.feeder.prefilter.addr = c->out_subprefix = cf->out_subprefix;
c->debug = cf->debug;
c->in_req.trace_routes = c->out_req.trace_routes = c->debug | c->proto->debug;
c->rpki_reload = cf->rpki_reload;
log(L_INFO "Reloading channel %s.%s", c->proto->name, c->name);
if (import_changed)
- channel_request_reload(c);
+ channel_request_reload(c, NULL);
if (export_changed)
- channel_request_feeding_dynamic(c, CFRT_AUXILIARY);
+ channel_request_full_refeed(c);
done:
CD(c, "Reconfigured");
debug("\tOutput filter: %s\n", filter_name(c->out_filter));
debug("\tChannel state: %s/%s/%s\n", c_states[c->channel_state],
c->in_req.hook ? rt_import_state_name(rt_import_get_state(c->in_req.hook)) : "-",
- c->out_req.hook ? rt_export_state_name(rt_export_get_state(c->out_req.hook)) : "-");
+ rt_export_state_name(rt_export_get_state(&c->out_req)));
}
debug("\tSOURCES\n");
struct channel_import_stats *ch_is = &c->import_stats;
struct channel_export_stats *ch_es = &c->export_stats;
struct rt_import_stats *rt_is = c->in_req.hook ? &c->in_req.hook->stats : NULL;
- struct rt_export_stats *rt_es = c->out_req.hook ? &c->out_req.hook->stats : NULL;
+ struct rt_export_stats *rt_es = &c->out_req.stats;
#define SON(ie, item) ((ie) ? (ie)->item : 0)
#define SCI(item) SON(ch_is, item)
cli_msg(-1006, " Channel %s", c->name);
cli_msg(-1006, " State: %s", c_states[c->channel_state]);
cli_msg(-1006, " Import state: %s", rt_import_state_name(rt_import_get_state(c->in_req.hook)));
- cli_msg(-1006, " Export state: %s", rt_export_state_name(rt_export_get_state(c->out_req.hook)));
+ cli_msg(-1006, " Export state: %s", rt_export_state_name(rt_export_get_state(&c->out_req)));
cli_msg(-1006, " Table: %s", c->table->name);
cli_msg(-1006, " Preference: %d", c->preference);
cli_msg(-1006, " Input filter: %s", filter_name(c->in_filter));
cli_msg(-12, "%s: restarted", p->name);
}
-struct channel_cmd_reload_feeding_request {
- struct channel_feeding_request cfr;
- struct proto_reload_request *prr;
-};
-
-struct channel_cmd_reload_import_request {
- struct channel_import_request cir;
+struct channel_cmd_reload_request {
+ struct rt_feeding_request cfr;
struct proto_reload_request *prr;
};
static void
-channel_reload_out_done(struct channel_feeding_request *cfr)
+channel_reload_done(struct rt_feeding_request *cfr)
{
- SKIP_BACK_DECLARE(struct channel_cmd_reload_feeding_request, ccrfr, cfr, cfr);
+ SKIP_BACK_DECLARE(struct channel_cmd_reload_request, ccrfr, cfr, cfr);
if (atomic_fetch_sub_explicit(&ccrfr->prr->counter, 1, memory_order_acq_rel) == 1)
ev_send_loop(&main_birdloop, &ccrfr->prr->ev);
}
-static void
-channel_reload_in_done(struct channel_import_request *cir)
+static struct rt_feeding_request *
+channel_create_reload_request(struct proto_reload_request *prr)
{
- SKIP_BACK_DECLARE(struct channel_cmd_reload_import_request, ccrir, cir, cir);
- if (atomic_fetch_sub_explicit(&ccrir->prr->counter, 1, memory_order_acq_rel) == 1)
- ev_send_loop(&main_birdloop, &ccrir->prr->ev);
+ if (!prr->trie)
+ return NULL;
+
+ /* Increase the refeed counter */
+ atomic_fetch_add_explicit(&prr->counter, 1, memory_order_relaxed);
+ ASSERT_DIE(this_cli->parser_pool != prr->trie->lp);
+
+ struct channel_cmd_reload_request *req = lp_alloc(prr->trie->lp, sizeof *req);
+ *req = (struct channel_cmd_reload_request) {
+ .cfr = {
+ .done = channel_reload_done,
+ .prefilter = {
+ .mode = TE_ADDR_TRIE,
+ .trie = prr->trie,
+ },
+ },
+ .prr = prr,
+ };
+
+ return &req->cfr;
}
void
log(L_INFO "Reloading protocol %s", p->name);
/* re-importing routes */
- if (prr->dir != CMD_RELOAD_OUT)
- WALK_LIST(c, p->channels)
- if (c->channel_state == CS_UP)
- {
- if (prr->trie)
- {
- /* Increase the refeed counter */
- atomic_fetch_add_explicit(&prr->counter, 1, memory_order_relaxed);
- ASSERT_DIE(this_cli->parser_pool != prr->trie->lp);
-
- struct channel_cmd_reload_import_request *req = lp_alloc(prr->trie->lp, sizeof *req);
- *req = (struct channel_cmd_reload_import_request) {
- .cir = {
- .done = channel_reload_in_done,
- .trie = prr->trie,
- },
- .prr = prr,
- };
- channel_request_partial_reload(c, &req->cir);
- }
- else
- channel_request_reload(c);
- }
+ WALK_LIST(c, p->channels)
+ if (c->channel_state == CS_UP)
+ {
+ if (prr->dir & CMD_RELOAD_IN)
+ channel_request_reload(c, channel_create_reload_request(prr));
- /* re-exporting routes */
- if (prr->dir != CMD_RELOAD_IN)
- WALK_LIST(c, p->channels)
- if ((c->channel_state == CS_UP) && (c->out_req.hook))
- if (prr->trie)
- {
- /* Increase the refeed counter */
- atomic_fetch_add_explicit(&prr->counter, 1, memory_order_relaxed);
- ASSERT_DIE(this_cli->parser_pool != prr->trie->lp);
-
- /* Request actually the feeding */
-
- struct channel_cmd_reload_feeding_request *req = lp_alloc(prr->trie->lp, sizeof *req);
- *req = (struct channel_cmd_reload_feeding_request) {
- .cfr = {
- .type = CFRT_AUXILIARY,
- .done = channel_reload_out_done,
- .trie = prr->trie,
- },
- .prr = prr,
- };
-
- channel_request_feeding(c, &req->cfr);
- }
- else
- channel_request_feeding_dynamic(c, CFRT_AUXILIARY);
+ if (prr->dir & CMD_RELOAD_OUT)
+ rt_export_refeed(&c->out_req, channel_create_reload_request(prr));
+ }
cli_msg(-15, "%s: reloading", p->name);
}
/* Protocol-specific data follow... */
};
-struct channel_import_request {
- struct channel_import_request *next; /* Next in request chain */
- void (*done)(struct channel_import_request *); /* Called when import finishes */
- const struct f_trie *trie; /* Reload only matching nets */
-};
-
#define TLIST_PREFIX proto
#define TLIST_TYPE struct proto
#define TLIST_ITEM n
* reload_routes Request channel to reload all its routes to the core
* (using rte_update()). Returns: 0=reload cannot be done,
* 1= reload is scheduled and will happen (asynchronously).
- * feed_begin Notify channel about beginning of route feeding.
- * feed_end Notify channel about finish of route feeding.
*/
void (*rt_notify)(struct proto *, struct channel *, const net_addr *net, struct rte *new, const struct rte *old);
int (*preexport)(struct channel *, struct rte *rt);
- int (*reload_routes)(struct channel *, struct channel_import_request *cir);
- void (*feed_begin)(struct channel *);
- void (*feed_end)(struct channel *);
+ void (*export_fed)(struct channel *);
+ int (*reload_routes)(struct channel *, struct rt_feeding_request *cir);
/*
* Routing entry hooks (called only for routes belonging to this protocol):
struct settle_config roa_settle; /* Settle times for ROA-induced reload */
- uint feed_block_size; /* How many routes to feed at once */
-
u8 net_type; /* Routing table network type (NET_*), 0 for undefined */
u8 ra_mode; /* Mode of received route advertisements (RA_*) */
u16 preference; /* Default route preference */
const struct filter *in_filter; /* Input filter */
const struct filter *out_filter; /* Output filter */
const net_addr *out_subprefix; /* Export only subprefixes of this net */
- struct bmap export_map; /* Keeps track which routes were really exported */
- struct bmap export_reject_map; /* Keeps track which routes were rejected by export filter */
+ struct bmap export_accepted_map; /* Keeps track which routes were really exported */
+ struct bmap export_rejected_map; /* Keeps track which routes were rejected by export filter */
struct limit rx_limit; /* Receive limit (for in_keep & RIK_REJECTED) */
struct limit in_limit; /* Input limit */
struct rt_import_request in_req; /* Table import connection */
struct rt_export_request out_req; /* Table export connection */
-
- struct rt_export_request refeed_req; /* Auxiliary refeed request */
- struct bmap refeed_map; /* Auxiliary refeed netindex bitmap */
- struct channel_feeding_request *refeeding; /* Refeeding the channel */
- struct channel_feeding_request *refeed_pending; /* Scheduled refeeds */
- struct channel_import_request *importing; /* Importing the channel */
- struct channel_import_request *import_pending; /* Scheduled imports */
-
- uint feed_block_size; /* How many routes to feed at once */
+ event out_event; /* Table export event */
u8 net_type; /* Routing table network type (NET_*), 0 for undefined */
u8 ra_mode; /* Mode of received route advertisements (RA_*) */
btime last_state_change; /* Time of last state transition */
- struct rt_export_request reload_req; /* Feeder for import reload */
+ struct rt_export_feeder reimporter; /* Feeder for import reload */
+ event reimport_event; /* Event doing that import reload */
- u8 reload_pending; /* Reloading and another reload is scheduled */
u8 rpki_reload; /* RPKI changes trigger channel reload */
struct rt_exporter *out_table; /* Internal table for exported routes */
int proto_configure_channel(struct proto *p, struct channel **c, struct channel_config *cf);
void channel_set_state(struct channel *c, uint state);
-void channel_schedule_reload(struct channel *c, struct channel_import_request *cir);
-int channel_import_request_prefilter(struct channel_import_request *cir_head, const net_addr *n);
void channel_add_obstacle(struct channel *c);
void channel_del_obstacle(struct channel *c);
static inline void channel_open(struct channel *c) { channel_set_state(c, CS_UP); }
static inline void channel_close(struct channel *c) { channel_set_state(c, CS_STOP); }
-struct channel_feeding_request {
- struct channel_feeding_request *next; /* Next in request chain */
- void (*done)(struct channel_feeding_request *); /* Called when refeed finishes */
- const struct f_trie *trie; /* Reload only matching nets */
- PACKED enum channel_feeding_request_type {
- CFRT_DIRECT = 1, /* Refeed by export restart */
- CFRT_AUXILIARY, /* Refeed by auxiliary request */
- } type;
- PACKED enum {
- CFRS_INACTIVE = 0, /* Inactive request */
- CFRS_PENDING, /* Request enqueued, do not touch */
- CFRS_RUNNING, /* Request active, do not touch */
- } state;
-};
-
-struct channel *channel_from_export_request(struct rt_export_request *req);
-void channel_request_feeding(struct channel *c, struct channel_feeding_request *);
-void channel_request_feeding_dynamic(struct channel *c, enum channel_feeding_request_type);
-
-static inline int channel_net_is_refeeding(struct channel *c, const net_addr *n)
-{
- /* Not refeeding if not refeeding at all */
- if (!c->refeeding)
- return 0;
-
- /* Not refeeding if already refed */
- struct netindex *ni = NET_TO_INDEX(n);
- if (bmap_test(&c->refeed_map, ni->index))
- return 0;
-
- /* Refeeding if matching any request */
- for (struct channel_feeding_request *cfr = c->refeeding; cfr; cfr = cfr->next)
- if (!cfr->trie || trie_match_net(cfr->trie, n))
- return 1;
-
- /* Not matching any request */
- return 0;
-}
-static inline void channel_net_mark_refed(struct channel *c, const net_addr *n)
-{
- ASSERT_DIE(c->refeeding);
-
- struct netindex *ni = NET_TO_INDEX(n);
- bmap_set(&c->refeed_map, ni->index);
-}
-
-void channel_request_reload(struct channel *c);
+void channel_request_reload(struct channel *c, struct rt_feeding_request *cir);
+void channel_request_full_refeed(struct channel *c);
void *channel_config_new(const struct channel_class *cc, const char *name, uint net_type, struct proto_config *proto);
void *channel_config_get(const struct channel_class *cc, const char *name, uint net_type, struct proto_config *proto);
* BIRD Internet Routing Daemon -- Routing Table
*
* (c) 1998--2000 Martin Mares <mj@ucw.cz>
- * (c) 2019--2021 Maria Matejka <mq@jmq.cz>
+ * (c) 2019--2024 Maria Matejka <mq@jmq.cz>
*
* Can be freely distributed and used under the terms of the GNU GPL.
*/
struct f_trie_walk_state;
struct cli;
-struct rt_cork_threshold {
- u64 low, high;
-};
-
/*
* Master Routing Tables. Generally speaking, each of them contains a FIB
* with each entry pointing to a list of route entries representing routes
u32 debug; /* Debugging flags (D_*) */
byte sorted; /* Routes of network are sorted according to rte_better() */
byte trie_used; /* Rtable has attached trie */
- struct rt_cork_threshold cork_threshold; /* Cork threshold values */
+ struct rt_cork_threshold {
+ u64 low, high;
+ } cork_threshold; /* Cork threshold values */
struct settle_config export_settle; /* Export announcement settler */
struct settle_config export_rr_settle;/* Export announcement settler config valid when any
route refresh is running */
};
+/*
+ * Route export journal
+ *
+ * The journal itself is held in struct rt_exporter.
+ * Workflow:
+ * (1) Initialize by rt_exporter_init()
+ * (2) Push data by rt_exporter_push() (the export item is copied)
+ * (3) Shutdown by rt_exporter_shutdown(), event is called after cleanup
+ *
+ * Subscribers:
+ * (1) Initialize by rt_export_subscribe()
+ * (2a) Get data by rt_export_get();
+ * (2b) Release data after processing by rt_export_release()
+ * (3) Request refeed by rt_export_refeed()
+ * (4) Unsubscribe by rt_export_unsubscribe()
+ */
+
+struct rt_export_request {
+ /* Formal name */
+ char *name;
+
+ /* Memory */
+ pool *pool;
+
+ /* State information */
+ enum rt_export_state {
+#define RT_EXPORT_STATES \
+ DOWN, \
+ FEEDING, \
+ PARTIAL, \
+ READY, \
+ STOP, \
+
+#define RT_EXPORT_STATES_ENUM_HELPER(p) TES_##p,
+ MACRO_FOREACH(RT_EXPORT_STATES_ENUM_HELPER, RT_EXPORT_STATES)
+ TES_MAX
+#undef RT_EXPORT_STATES_ENUM_HELPER
+ } _Atomic export_state;
+ btime last_state_change;
+
+ /* Table feeding contraption */
+ struct rt_export_feeder {
+ /* Formal name */
+ char *name;
+
+ /* Enlisting */
+ struct rt_exporter * _Atomic exporter;
+ struct rt_export_feeder * _Atomic next;
+
+ /* Prefiltering, useful for more scenarios */
+ struct rt_prefilter {
+ /* Network prefilter mode (TE_ADDR_*) */
+ enum {
+ TE_ADDR_NONE = 0, /* No address matching */
+ TE_ADDR_EQUAL, /* Exact query - show route <addr> */
+ TE_ADDR_FOR, /* Longest prefix match - show route for <addr> */
+ TE_ADDR_IN, /* Interval query - show route in <addr> */
+ TE_ADDR_TRIE, /* Query defined by trie */
+ TE_ADDR_HOOK, /* Query processed by supplied custom hook */
+ } mode;
+
+ union {
+ const struct f_trie *trie;
+ const net_addr *addr;
+ int (*hook)(const struct rt_prefilter *, const net_addr *);
+ };
+ } prefilter;
+
+#define TLIST_PREFIX rt_export_feeder
+#define TLIST_TYPE struct rt_export_feeder
+#define TLIST_ITEM n
+#define TLIST_WANT_WALK
+#define TLIST_WANT_ADD_TAIL
+
+ /* Feeding itself */
+ union {
+ u64 feed_index; /* Index of the feed in progress */
+ struct rt_feeding_index *feed_index_ptr; /* Use this when u64 is not enough */
+ };
+ struct rt_feeding_request {
+ struct rt_feeding_request *next; /* Next in request chain */
+ void (*done)(struct rt_feeding_request *);/* Called when this refeed finishes */
+ struct rt_prefilter prefilter; /* Reload only matching nets */
+ PACKED enum {
+ RFRS_INACTIVE = 0, /* Inactive request */
+ RFRS_PENDING, /* Request enqueued, do not touch */
+ RFRS_RUNNING, /* Request active, do not touch */
+ } state;
+ } *feeding, *feed_pending;
+ TLIST_DEFAULT_NODE;
+ u8 trace_routes;
+ } feeder;
+
+ /* Regular updates */
+ struct bmap seq_map; /* Which lfjour items are already processed */
+ struct bmap feed_map; /* Which nets were already fed (for initial feeding) */
+ struct lfjour_recipient r;
+ struct rt_export_union *cur;
+
+ /* Statistics */
+ struct rt_export_stats {
+ u32 updates_received; /* Number of route updates received */
+ u32 withdraws_received; /* Number of route withdraws received */
+ } stats;
+
+ /* Tracing */
+ u8 trace_routes;
+ void (*dump)(struct rt_export_request *req);
+ void (*fed)(struct rt_export_request *req);
+};
+
+#include "lib/tlists.h"
+
+struct rt_export_union {
+ enum rt_export_kind {
+ RT_EXPORT_STOP = 1,
+ RT_EXPORT_FEED,
+ RT_EXPORT_UPDATE,
+ } kind;
+ const struct rt_export_item {
+ LFJOUR_ITEM_INHERIT(li); /* Member of lockfree journal */
+ char data[0]; /* Memcpy helper */
+ const rte *new, *old; /* Route update */
+ } *update;
+ const struct rt_export_feed {
+ uint count_routes, count_exports;
+ const struct netindex *ni;
+ rte *block;
+ u64 *exports;
+ char data[0];
+ } *feed;
+ struct rt_export_request *req;
+};
+
+struct rt_exporter {
+ struct lfjour journal; /* Journal for update keeping */
+ TLIST_LIST(rt_export_feeder) feeders; /* List of active feeder structures */
+ _Bool _Atomic feeders_lock; /* Spinlock for the above list */
+ u8 trace_routes; /* Debugging flags (D_*) */
+ const char *name; /* Name for logging */
+ void (*stopped)(struct rt_exporter *); /* Callback when exporter can stop */
+ void (*cleanup_done)(struct rt_exporter *, u64 end); /* Callback when cleanup has been done */
+ struct rt_export_feed *(*feed_net)(struct rt_exporter *, struct rcu_unwinder *, const net_addr *, const struct rt_export_item *first);
+ const net_addr *(*feed_next)(struct rt_exporter *, struct rcu_unwinder *, struct rt_export_feeder *);
+ void (*feed_cleanup)(struct rt_exporter *, struct rt_export_feeder *);
+};
+
+/* Exporter API */
+void rt_exporter_init(struct rt_exporter *, struct settle_config *);
+struct rt_export_item *rt_exporter_push(struct rt_exporter *, const struct rt_export_item *);
+void rt_exporter_shutdown(struct rt_exporter *, void (*stopped)(struct rt_exporter *));
+
+/* Standalone feeds */
+void rt_feeder_subscribe(struct rt_exporter *, struct rt_export_feeder *);
+void rt_feeder_unsubscribe(struct rt_export_feeder *);
+void rt_export_refeed_feeder(struct rt_export_feeder *, struct rt_feeding_request *);
+
+struct rt_export_feed *rt_export_next_feed(struct rt_export_feeder *);
+#define RT_FEED_WALK(_feeder, _f) \
+ for (const struct rt_export_feed *_f; _f = rt_export_next_feed(_feeder); ) \
+
+static inline _Bool rt_export_feed_active(struct rt_export_feeder *f)
+{ return !!atomic_load_explicit(&f->exporter, memory_order_acquire); }
+
+/* Full blown exports */
+void rtex_export_subscribe(struct rt_exporter *, struct rt_export_request *);
+void rtex_export_unsubscribe(struct rt_export_request *);
+
+const struct rt_export_union * rt_export_get(struct rt_export_request *);
+void rt_export_release(const struct rt_export_union *);
+void rt_export_retry_later(const struct rt_export_union *);
+void rt_export_processed(struct rt_export_request *, u64);
+void rt_export_refeed_request(struct rt_export_request *rer, struct rt_feeding_request *rfr);
+
+static inline enum rt_export_state rt_export_get_state(struct rt_export_request *r)
+{ return atomic_load_explicit(&r->export_state, memory_order_acquire); }
+const char *rt_export_state_name(enum rt_export_state state);
+
+static inline void rt_export_walk_cleanup(const struct rt_export_union **up)
+{
+ if (*up)
+ rt_export_release(*up);
+}
+
+#define RT_EXPORT_WALK(_reader, _u) \
+ for (CLEANUP(rt_export_walk_cleanup) const struct rt_export_union *_u;\
+ _u = rt_export_get(_reader); \
+ rt_export_release(_u)) \
+
+/* Convenince common call to request refeed */
+#define rt_export_refeed(h, r) _Generic((h), \
+ struct rt_export_feeder *: rt_export_refeed_feeder, \
+ struct rt_export_request *: rt_export_refeed_request, \
+ void *: bug)(h, r)
+
+/* Subscription to regular table exports needs locking */
+#define rt_export_subscribe(_t, _kind, f) do { \
+ RT_LOCKED(_t, tp) { \
+ rt_lock_table(tp); \
+ rtex_export_subscribe(&tp->export_##_kind, f); \
+ }} while (0) \
+
+#define rt_export_unsubscribe(_kind, _fx) do { \
+ struct rt_export_request *_f = _fx; \
+ struct rt_exporter *e = atomic_load_explicit(&_f->feeder.exporter, memory_order_acquire); \
+ RT_LOCKED(SKIP_BACK(rtable, export_##_kind, e), _tp) { \
+ rtex_export_unsubscribe(_f); \
+ rt_unlock_table(_tp); \
+ }} while (0) \
+
+static inline int rt_prefilter_net(const struct rt_prefilter *p, const net_addr *n)
+{
+ switch (p->mode)
+ {
+ case TE_ADDR_NONE: return 1;
+ case TE_ADDR_IN: return net_in_netX(n, p->addr);
+ case TE_ADDR_EQUAL: return net_equal(n, p->addr);
+ case TE_ADDR_FOR: return net_in_netX(p->addr, n);
+ case TE_ADDR_TRIE: return trie_match_net(p->trie, n);
+ case TE_ADDR_HOOK: return p->hook(p, n);
+ }
+
+ bug("Crazy prefilter application attempt failed wildly.");
+}
+
+static inline _Bool
+rt_net_is_feeding_feeder(struct rt_export_feeder *ref, const net_addr *n)
+{
+ for (struct rt_feeding_request *rfr = ref->feeding; rfr; rfr = rfr->next)
+ if (rt_prefilter_net(&rfr->prefilter, n))
+ return 1;
+
+ return 0;
+}
+
+static inline _Bool
+rt_net_is_feeding_request(struct rt_export_request *req, const net_addr *n)
+{
+ struct netindex *ni = NET_TO_INDEX(n);
+ return
+ !bmap_test(&req->feed_map, ni->index)
+ && rt_net_is_feeding_feeder(&req->feeder, n);
+}
+
+#define rt_net_is_feeding(h, n) _Generic((h), \
+ struct rt_export_feeder *: rt_net_is_feeding_feeder, \
+ struct rt_export_request *: rt_net_is_feeding_request, \
+ void *: bug)(h, n)
+
+
+/*
+ * The original rtable
+ *
+ * To be kept as is for now until we refactor the new structures out of BGP Attrs.
+ */
+
+
struct rt_export_hook;
-struct rt_export_request;
-struct rt_exporter;
extern uint rtable_max_id;
struct f_trie * _Atomic trie; /* Trie of prefixes defined in fib */ \
event *nhu_event; /* Nexthop updater */ \
event *hcu_event; /* Hostcache updater */ \
+ struct rt_exporter export_all; /* Route export journal for all routes */ \
+ struct rt_exporter export_best; /* Route export journal for best routes */ \
/* The complete rtable structure */
struct rtable_private {
u32 debug; /* Debugging flags (D_*) */
list imports; /* Registered route importers */
- struct lfjour journal; /* Exporter API structure */
+
TLIST_STRUCT_DEF(rt_flowspec_link, struct rt_flowspec_link) flowspec_links; /* Links serving flowspec reload */
struct hmap id_map;
* delete as soon as use_count becomes 0 and remove
* obstacle from this routing table.
*/
+ struct rt_export_request best_req; /* Internal request from best route announcement cleanup */
struct event *nhu_uncork_event; /* Helper event to schedule NHU on uncork */
struct event *hcu_uncork_event; /* Helper event to schedule HCU on uncork */
struct timer *prune_timer; /* Timer for periodic pruning / GC */
return corked;
}
+struct rt_pending_export {
+ struct rt_export_item it;
+ struct rt_pending_export *_Atomic next; /* Next export for the same net */
+ u64 seq_all; /* Interlink from BEST to ALL */
+};
+
+struct rt_net_pending_export {
+ struct rt_pending_export * _Atomic first, * _Atomic last;
+};
typedef struct network {
- struct rte_storage * _Atomic routes; /* Available routes for this network */
- struct rt_pending_export * _Atomic first, * _Atomic last; /* Uncleaned pending exports */
+ struct rte_storage * _Atomic routes; /* Available routes for this network */
+
+ /* Uncleaned pending exports */
+ struct rt_net_pending_export all;
+ struct rt_net_pending_export best;
} net;
struct rte_storage {
#define RTE_GET_NETINDEX(e) NET_TO_INDEX((e)->net)
-/* Table-channel connections */
-
-struct rt_prefilter {
- union {
- const struct f_trie *trie;
- const net_addr *addr; /* Network prefilter address */
- int (*hook)(const struct rt_prefilter *, const net_addr *);
- };
- /* Network prefilter mode (TE_ADDR_*) */
- enum {
- TE_ADDR_NONE = 0, /* No address matching */
- TE_ADDR_EQUAL, /* Exact query - show route <addr> */
- TE_ADDR_FOR, /* Longest prefix match - show route for <addr> */
- TE_ADDR_IN, /* Interval query - show route in <addr> */
- TE_ADDR_TRIE, /* Query defined by trie */
- TE_ADDR_HOOK, /* Query processed by supplied custom hook */
- } mode;
-} PACKED;
+/* Table import */
struct rt_import_request {
struct rt_import_hook *hook; /* The table part of importer */
event cleanup_event; /* Used to finally unhook the import from the table */
};
-struct rt_pending_export {
- LFJOUR_ITEM_INHERIT(li);
- struct rt_pending_export * _Atomic next; /* Next export for the same destination */
- const rte *new, *new_best, *old, *old_best;
-};
-
-struct rt_export_feed {
- uint count_routes, count_exports;
- struct netindex *ni;
- rte *block;
- u64 *exports;
- char data[0];
-};
-
-struct rt_export_request {
- struct rt_export_hook *hook; /* Table part of the export */
- char *name;
- u8 trace_routes;
- uint feed_block_size; /* How many routes to feed at once */
- struct rt_prefilter prefilter;
-
- event_list *list; /* Where to schedule export events */
- pool *pool; /* Pool to use for allocations */
-
- /* There are two methods of export. You can either request feeding every single change
- * or feeding the whole route feed. In case of regular export, &export_one is preferred.
- * Anyway, when feeding, &export_bulk is preferred, falling back to &export_one.
- * Thus, for RA_OPTIMAL, &export_one is only set,
- * for RA_MERGED and RA_ACCEPTED, &export_bulk is only set
- * and for RA_ANY, both are set to accomodate for feeding all routes but receiving single changes
- */
- void (*export_one)(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe);
- void (*export_bulk)(struct rt_export_request *req, const net_addr *net,
- struct rt_pending_export *rpe, struct rt_pending_export *last,
- const rte **feed, uint count);
-
- void (*mark_seen)(struct rt_export_request *req, struct rt_pending_export *rpe);
-
- void (*dump_req)(struct rt_export_request *req);
- void (*log_state_change)(struct rt_export_request *req, u8);
-};
-
-static inline int rt_prefilter_net(const struct rt_prefilter *p, const net_addr *n)
-{
- switch (p->mode)
- {
- case TE_ADDR_NONE: return 1;
- case TE_ADDR_IN: return net_in_netX(n, p->addr);
- case TE_ADDR_EQUAL: return net_equal(n, p->addr);
- case TE_ADDR_FOR: return net_in_netX(p->addr, n);
- case TE_ADDR_TRIE: return trie_match_net(p->trie, n);
- case TE_ADDR_HOOK: return p->hook(p, n);
- }
-
- bug("Crazy prefilter application attempt failed wildly.");
-}
-
-struct rt_export_hook {
- struct lfjour_recipient recipient; /* Journal recipient structure */
-
- pool *pool;
-
- struct rt_export_request *req; /* The requestor */
-
- struct rt_export_stats {
- /* Export - from core to protocol */
- u32 updates_received; /* Number of route updates received */
- u32 withdraws_received; /* Number of route withdraws received */
- } stats;
-
- btime last_state_change; /* Time of last state transition */
-
- _Atomic u8 export_state; /* Route export state (TES_*, see below) */
- struct event *event; /* Event running all the export operations */
-
- struct bmap seq_map; /* Keep track which exports were already procesed */
-
- void (*stopped)(struct rt_export_request *); /* Stored callback when export is stopped */
-
- /* Table-specific items */
-
- rtable *tab; /* The table pointer to use in corner cases */
- u32 feed_index; /* Routing table iterator used during feeding */
-
- u8 refeed_pending; /* Refeeding and another refeed is scheduled */
- u8 feed_type; /* Which feeding method is used (TFT_*, see below) */
-};
-
#define TIS_DOWN 0
#define TIS_UP 1
#define TIS_CLEARED 5
#define TIS_MAX 6
-#define TES_DOWN 0
-#define TES_HUNGRY 1
-#define TES_FEEDING 2
-#define TES_READY 3
-#define TES_STOP 4
-#define TES_MAX 5
-
-
-#define TFT_FIB 1
-#define TFT_TRIE 2
-#define TFT_HASH 3
void rt_request_import(rtable *tab, struct rt_import_request *req);
-void rt_request_export(rtable *tab, struct rt_export_request *req);
-void rt_request_export_other(struct rt_exporter *tab, struct rt_export_request *req);
-
void rt_stop_import(struct rt_import_request *, void (*stopped)(struct rt_import_request *));
-void rt_stop_export(struct rt_export_request *, void (*stopped)(struct rt_export_request *));
-
const char *rt_import_state_name(u8 state);
-const char *rt_export_state_name(u8 state);
-
static inline u8 rt_import_get_state(struct rt_import_hook *ih) { return ih ? ih->import_state : TIS_DOWN; }
-static inline u8 rt_export_get_state(struct rt_export_hook *eh) { return eh ? atomic_load_explicit(&eh->export_state, memory_order_acquire) : TES_DOWN; }
-
-u8 rt_set_export_state(struct rt_export_hook *hook, u32 expected_mask, u8 state);
void rte_import(struct rt_import_request *req, const net_addr *net, rte *new, struct rte_src *src);
+#if 0
/*
* For table export processing
*/
/* Get pending export seen status */
int rpe_get_seen(struct rt_export_hook *hook, struct rt_pending_export *rpe);
-/*
- * For rt_export_hook and rt_exporter inheritance
- */
-
-void rt_init_export(struct rt_exporter *re, struct rt_export_hook *hook);
-struct rt_export_hook *rt_alloc_export(struct rt_exporter *re, pool *pool, uint size);
-void rt_stop_export_common(struct rt_export_hook *hook);
-void rt_export_stopped(struct rt_export_hook *hook);
-void rt_exporter_init(struct rt_exporter *re);
+#endif
/*
* Channel export hooks. To be refactored out.
int channel_preimport(struct rt_import_request *req, rte *new, const rte *old);
-void channel_reload_export_bulk(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *first, struct rt_pending_export *last, const rte **feed, uint count);
-
-void rt_notify_optimal(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe);
-void rt_notify_any(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe);
-void rt_feed_any(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *first, struct rt_pending_export *last, const rte **feed, uint count);
-void rt_notify_accepted(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *first, struct rt_pending_export *last, const rte **feed, uint count);
-void rt_notify_merged(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *first, struct rt_pending_export *last, const rte **feed, uint count);
-
-void channel_rpe_mark_seen(struct channel *c, struct rt_pending_export *rpe);
/* Types of route announcement, also used as flags */
#define RA_UNDEF 0 /* Undefined RA type */
struct f_trie *trie; /* Trie of prefixes that might affect hostentries */
list hostentries; /* List of all hostentries */
struct rt_export_request req; /* Notifier */
+ event source_event;
};
#define rte_update channel_rte_import
void rt_flowspec_unlink(rtable *src, rtable *dst);
rtable *rt_setup(pool *, struct rtable_config *);
-struct rt_export_feed *rt_net_feed(rtable *t, net_addr *a);
-rte rt_net_best(rtable *t, net_addr *a);
+struct rt_export_feed *rt_net_feed(rtable *t, const net_addr *a, const struct rt_pending_export *first);
+rte rt_net_best(rtable *t, const net_addr *a);
int rt_examine(rtable *t, net_addr *a, struct channel *c, const struct filter *filter);
-rte *rt_export_merged(struct channel *c, const net_addr *n, const rte ** feed, uint count, linpool *pool, int silent);
+rte *rt_export_merged(struct channel *c, const struct rt_export_feed *feed, linpool *pool, int silent);
void rt_refresh_begin(struct rt_import_request *);
void rt_refresh_end(struct rt_import_request *);
void rt_schedule_prune(struct rtable_private *t);
void rt_dump_hooks_all(void);
int rt_reload_channel(struct channel *c);
void rt_reload_channel_abort(struct channel *c);
-void rt_refeed_channel(struct channel *c);
void rt_prune_sync(rtable *t, int all);
struct rtable_config *rt_new_table(struct symbol *s, uint addr_type);
void rt_new_default_table(struct symbol *s);
struct channel *export_channel;
struct channel *prefilter;
struct krt_proto *kernel;
+ struct rt_export_feeder req; /* Export feeder in use */
};
struct rt_show_data {
list tables;
struct rt_show_data_rtable *tab; /* Iterator over table list */
struct rt_show_data_rtable *last_table; /* Last table in output */
- struct rt_export_request req; /* Export request in use */
int verbose, tables_defined_by;
const struct filter *filter;
struct proto *show_protocol;
struct proto *export_protocol;
struct channel *export_channel;
struct config *running_on_config;
- struct rt_export_hook *kernel_export_hook;
+// struct rt_export_hook *kernel_export_hook;
int export_mode, addr_mode, primary_only, filtered, stats;
int net_counter, rt_counter, show_counter, table_counter;
if (src)
{
+#ifdef RT_SOURCE_DEBUG
+ log(L_INFO "Found source %uG", src->global_id);
+#endif
lfuc_lock_revive(&src->uc);
return src;
}
--- /dev/null
+/*
+ * BIRD -- Route Export Mechanisms
+ *
+ * (c) 2024 Maria Matejka <mq@jmq.cz>
+ *
+ * Can be freely distributed and used under the terms of the GNU GPL.
+ */
+
+#include "nest/bird.h"
+#include "nest/route.h"
+#include "nest/protocol.h"
+
+#define rtex_trace(_req, _cat, msg, args...) do { \
+ if ((_req)->trace_routes & _cat) \
+ log(L_TRACE "%s: " msg, (_req)->name, ##args); \
+} while (0)
+
+static inline enum rt_export_state
+rt_export_change_state(struct rt_export_request *r, u32 expected_mask, enum rt_export_state state)
+{
+ r->last_state_change = current_time();
+ enum rt_export_state old = atomic_exchange_explicit(&r->export_state, state, memory_order_acq_rel);
+ if (!((1 << old) & expected_mask))
+ bug("Unexpected export state change from %s to %s, expected mask %02x",
+ rt_export_state_name(old),
+ rt_export_state_name(state),
+ expected_mask
+ );
+
+ rtex_trace(r, D_STATES, "Export state changed from %s to %s",
+ rt_export_state_name(old), rt_export_state_name(state));
+
+ return old;
+}
+
+const struct rt_export_union *
+rt_export_get(struct rt_export_request *r)
+{
+ ASSERT_DIE(!r->cur);
+
+#define EXPORT_FOUND(_kind) do { \
+ struct rt_export_union *reu = tmp_alloc(sizeof *reu); \
+ *reu = (struct rt_export_union) { \
+ .kind = _kind, \
+ .req = r, \
+ .update = update, \
+ .feed = feed, \
+ }; \
+ return (r->cur = reu); \
+} while (0) \
+
+#define NOT_THIS_UPDATE do { \
+ lfjour_release(&r->r); \
+ return rt_export_get(r); \
+} while (0) \
+
+ enum rt_export_state es = rt_export_get_state(r);
+ switch (es)
+ {
+ case TES_DOWN:
+ rtex_trace(r, (D_ROUTES|D_STATES), "Export is down");
+ return NULL;
+
+ case TES_STOP:
+ rtex_trace(r, (D_ROUTES|D_STATES), "Received stop event");
+ struct rt_export_union *reu = tmp_alloc(sizeof *reu);
+ *reu = (struct rt_export_union) {
+ .kind = RT_EXPORT_STOP,
+ .req = r,
+ };
+ return (r->cur = reu);
+
+ case TES_PARTIAL:
+ case TES_FEEDING:
+ case TES_READY:
+ break;
+
+ case TES_MAX:
+ bug("invalid export state");
+ }
+
+ /* Process sequence number reset event */
+ if (lfjour_reset_seqno(&r->r))
+ bmap_reset(&r->seq_map, 4);
+
+ /* Get a new update */
+ SKIP_BACK_DECLARE(struct rt_export_item, update, li, lfjour_get(&r->r));
+ SKIP_BACK_DECLARE(struct rt_exporter, e, journal, lfjour_of_recipient(&r->r));
+ struct rt_export_feed *feed = NULL;
+
+ /* No update, try feed */
+ if (!update)
+ if (es == TES_READY)
+ {
+ /* Fed up of feeding */
+ rtex_trace(r, D_ROUTES, "Export drained");
+ return NULL;
+ }
+ else if (feed = rt_export_next_feed(&r->feeder))
+ {
+ /* Feeding more */
+ bmap_set(&r->feed_map, feed->ni->index);
+ rtex_trace(r, D_ROUTES, "Feeding %N", feed->ni->addr);
+
+ EXPORT_FOUND(RT_EXPORT_FEED);
+ }
+ else if (rt_export_get_state(r) == TES_DOWN)
+ {
+ /* Torn down inbetween */
+ rtex_trace(r, D_STATES, "Export ended itself");
+ return NULL;
+ }
+ else
+ {
+ /* No more food */
+ rt_export_change_state(r, BIT32_ALL(TES_FEEDING, TES_PARTIAL), TES_READY);
+ rtex_trace(r, D_STATES, "Fed up");
+ CALL(r->fed, r);
+ return NULL;
+ }
+
+ /* There actually is an update */
+ if (bmap_test(&r->seq_map, update->seq))
+ {
+ /* But this update has been already processed, let's try another one */
+ rtex_trace(r, D_ROUTES, "Skipping an already processed update %lu", update->seq);
+ NOT_THIS_UPDATE;
+ }
+
+ /* Is this update allowed by prefilter? */
+ const net_addr *n = (update->new ?: update->old)->net;
+ if (!rt_prefilter_net(&r->feeder.prefilter, n))
+ {
+ rtex_trace(r, D_ROUTES, "Not exporting %N due to prefilter", n);
+ NOT_THIS_UPDATE;
+ }
+
+ if ((es != TES_READY) && rt_net_is_feeding(r, n))
+ {
+ /* But this net shall get a feed first! */
+ rtex_trace(r, D_ROUTES, "Expediting %N feed due to pending update %lu", n, update->seq);
+ RCU_ANCHOR(u);
+ feed = e->feed_net(e, u, n, update);
+
+ bmap_set(&r->feed_map, NET_TO_INDEX(n)->index);
+ ASSERT_DIE(feed);
+
+ EXPORT_FOUND(RT_EXPORT_FEED);
+ }
+
+ /* OK, now this actually is an update, thank you for your patience */
+ rtex_trace(r, D_ROUTES, "Updating %N, seq %lu", n, update->seq);
+
+ EXPORT_FOUND(RT_EXPORT_UPDATE);
+
+#undef NOT_THIS_UPDATE
+#undef EXPORT_FOUND
+}
+
+void
+rt_export_release(const struct rt_export_union *u)
+{
+ /* May be already released */
+ if (!u->req)
+ return;
+
+ struct rt_export_request *r = u->req;
+
+ /* Must be crosslinked */
+ ASSERT_DIE(r->cur == u);
+ r->cur = NULL;
+
+ switch (u->kind)
+ {
+ case RT_EXPORT_FEED:
+ for (uint i = 0; i < u->feed->count_exports; i++)
+ bmap_set(&r->seq_map, u->feed->exports[i]);
+
+ if (!u->update)
+ break;
+
+ /* fall through */
+
+ case RT_EXPORT_UPDATE:
+ rtex_trace(r, D_ROUTES, "Export %lu released", u->update->seq);
+ lfjour_release(&r->r);
+
+ break;
+
+ case RT_EXPORT_STOP:
+ /* Checking that we have indeed stopped the exporter */
+ ASSERT_DIE(rt_export_get_state(r) == TES_DOWN);
+ rtex_trace(r, D_ROUTES, "Export stopped");
+ break;
+
+ default:
+ bug("strange export kind");
+ }
+}
+
+void
+rt_export_processed(struct rt_export_request *r, u64 seq)
+{
+ rtex_trace(r, D_ROUTES, "Marking export %lu as processed", seq);
+
+ /* Check sequence number reset event */
+ if (lfjour_reset_seqno(&r->r))
+ bmap_reset(&r->seq_map, 4);
+
+ ASSERT_DIE(!bmap_test(&r->seq_map, seq));
+ bmap_set(&r->seq_map, seq);
+}
+
+struct rt_export_feed *
+rt_export_next_feed(struct rt_export_feeder *f)
+{
+ ASSERT_DIE(f);
+ while (1)
+ {
+ RCU_ANCHOR(u);
+
+ struct rt_exporter *e = atomic_load_explicit(&f->exporter, memory_order_acquire);
+ if (!e)
+ {
+ rtex_trace(f, (D_ROUTES|D_STATES), "Exporter kicked us away");
+ return NULL;
+ }
+
+ const net_addr *a = e->feed_next(e, u, f);
+ if (!a)
+ break;
+
+ if (!rt_prefilter_net(&f->prefilter, a))
+ {
+ rtex_trace(f, D_ROUTES, "Not feeding %N due to prefilter", a);
+ continue;
+ }
+
+ if (f->feeding && !rt_net_is_feeding_feeder(f, a))
+ {
+ rtex_trace(f, D_ROUTES, "Not feeding %N, not requested", a);
+ continue;
+ }
+
+ struct rt_export_feed *feed = e->feed_net(e, u, a, NULL);
+ if (feed)
+ {
+ rtex_trace(f, D_ROUTES, "Feeding %u routes for %N", feed->count_routes, a);
+ return feed;
+ }
+ }
+
+ /* Feeding done */
+ while (f->feeding)
+ {
+ struct rt_feeding_request *rfr = f->feeding;
+ f->feeding = rfr->next;
+ CALL(rfr->done, rfr);
+ }
+
+ f->feed_index = 0;
+
+ if (f->feed_pending)
+ {
+ rtex_trace(f, D_STATES, "Feeding done, refeed request pending");
+ f->feeding = f->feed_pending;
+ f->feed_pending = NULL;
+ return rt_export_next_feed(f);
+ }
+ else
+ {
+ rtex_trace(f, D_STATES, "Feeding done (%u)", f->feed_index);
+ return NULL;
+ }
+}
+
+static void
+rt_feeding_request_default_done(struct rt_feeding_request *rfr)
+{
+ mb_free(rfr);
+}
+
+void
+rt_export_refeed_feeder(struct rt_export_feeder *f, struct rt_feeding_request *rfr)
+{
+ if (!rfr)
+ return;
+
+ rfr->next = f->feed_pending;
+ f->feed_pending = rfr;
+}
+
+void rt_export_refeed_request(struct rt_export_request *rer, struct rt_feeding_request *rfr)
+{
+ if (!rfr)
+ {
+ rfr = mb_allocz(rer->pool, sizeof *rfr);
+ rfr->done = rt_feeding_request_default_done;
+ }
+
+ bmap_reset(&rer->feed_map, 4);
+ rt_export_refeed_feeder(&rer->feeder, rfr);
+ rt_export_change_state(rer, BIT32_ALL(TES_FEEDING, TES_PARTIAL, TES_READY), TES_PARTIAL);
+ if (rer->r.event)
+ ev_send(rer->r.target, rer->r.event);
+}
+
+void
+rtex_export_subscribe(struct rt_exporter *e, struct rt_export_request *r)
+{
+ rt_export_change_state(r, BIT32_ALL(TES_DOWN), TES_FEEDING);
+
+ ASSERT_DIE(r->pool);
+
+ rt_feeder_subscribe(e, &r->feeder);
+
+ lfjour_register(&e->journal, &r->r);
+
+ r->stats = (struct rt_export_stats) {};
+ r->last_state_change = current_time();
+ bmap_init(&r->seq_map, r->pool, 4);
+ bmap_init(&r->feed_map, r->pool, 4);
+
+ rt_export_refeed_request(r, NULL);
+}
+
+void
+rtex_export_unsubscribe(struct rt_export_request *r)
+{
+ rt_feeder_unsubscribe(&r->feeder);
+
+ if (r->cur)
+ rt_export_release(r->cur);
+
+ switch (rt_export_change_state(r, BIT32_ALL(TES_FEEDING, TES_PARTIAL, TES_READY, TES_STOP), TES_DOWN))
+ {
+ case TES_FEEDING:
+ case TES_PARTIAL:
+ case TES_READY:
+ case TES_STOP:
+ lfjour_unregister(&r->r);
+ break;
+ default:
+ bug("not implemented");
+ }
+
+ bmap_free(&r->feed_map);
+ bmap_free(&r->seq_map);
+}
+
+static void
+rt_exporter_cleanup_done(struct lfjour *j, u64 begin_seq UNUSED, u64 end_seq)
+{
+ SKIP_BACK_DECLARE(struct rt_exporter, e, journal, j);
+
+ /* TODO: log the begin_seq / end_seq values */
+
+ CALL(e->cleanup_done, e, end_seq);
+ if (e->stopped && (lfjour_count_recipients(j) == 0))
+ {
+ settle_cancel(&j->announce_timer);
+ ev_postpone(&j->cleanup_event);
+ e->stopped(e);
+ }
+}
+
+void
+rt_exporter_init(struct rt_exporter *e, struct settle_config *scf)
+{
+ rtex_trace(e, D_STATES, "Exporter init");
+ e->journal.cleanup_done = rt_exporter_cleanup_done;
+ lfjour_init(&e->journal, scf);
+ ASSERT_DIE(e->feed_net);
+ ASSERT_DIE(e->feed_next);
+}
+
+struct rt_export_item *
+rt_exporter_push(struct rt_exporter *e, const struct rt_export_item *uit)
+{
+ /* Get the object */
+ struct lfjour_item *lit = lfjour_push_prepare(&e->journal);
+ if (!lit)
+ return NULL;
+
+ SKIP_BACK_DECLARE(struct rt_export_item, it, li, lit);
+
+ /* Copy the data, keeping the header */
+ memcpy(&it->data, &uit->data, e->journal.item_size - OFFSETOF(struct rt_export_item, data));
+
+ /* Commit the update */
+ rtex_trace(e, D_ROUTES, "Announcing change %lu at %N: %p (%u) -> %p (%u)",
+ lit->seq, (uit->new ?: uit->old)->net,
+ uit->old, uit->old ? uit->old->id : 0,
+ uit->new, uit->new ? uit->new->id : 0);
+
+ lfjour_push_commit(&e->journal);
+
+ /* Return the update pointer */
+ return it;
+}
+
+#define RTEX_FEEDERS_LOCK(e) \
+ while (atomic_exchange_explicit(&e->feeders_lock, 1, memory_order_acq_rel)) \
+ birdloop_yield(); \
+ CLEANUP(_rtex_feeders_unlock_) UNUSED struct rt_exporter *_rtex_feeders_locked_ = e;
+
+static inline void _rtex_feeders_unlock_(struct rt_exporter **e)
+{
+ ASSERT_DIE(atomic_exchange_explicit(&(*e)->feeders_lock, 0, memory_order_acq_rel));
+}
+
+void
+rt_feeder_subscribe(struct rt_exporter *e, struct rt_export_feeder *f)
+{
+ f->feed_index = 0;
+
+ atomic_store_explicit(&f->exporter, e, memory_order_relaxed);
+
+ RTEX_FEEDERS_LOCK(e);
+ rt_export_feeder_add_tail(&e->feeders, f);
+
+ rtex_trace(f, D_STATES, "Subscribed to exporter %s", e->name);
+}
+
+void
+rt_feeder_unsubscribe(struct rt_export_feeder *f)
+{
+ RCU_ANCHOR(a);
+ struct rt_exporter *e = atomic_exchange_explicit(&f->exporter, NULL, memory_order_acquire);
+ if (e)
+ {
+ RTEX_FEEDERS_LOCK(e);
+ rt_export_feeder_rem_node(&e->feeders, f);
+
+ rtex_trace(f, D_STATES, "Unsubscribed from exporter %s", e->name);
+ }
+ else
+ rtex_trace(f, D_STATES, "Already unsubscribed");
+}
+
+void
+rt_exporter_shutdown(struct rt_exporter *e, void (*stopped)(struct rt_exporter *))
+{
+ rtex_trace(e, D_STATES, "Exporter shutdown");
+
+ /* Last lock check before dropping the domain reference */
+ if (e->journal.domain)
+ ASSERT_DIE(DG_IS_LOCKED(e->journal.domain));
+
+ e->journal.domain = NULL;
+
+ /* We have to tell every receiver to stop */
+ _Bool done = 1;
+ WALK_TLIST(lfjour_recipient, r, &e->journal.recipients)
+ {
+ done = 0;
+ rt_export_change_state(
+ SKIP_BACK(struct rt_export_request, r, r),
+ BIT32_ALL(TES_FEEDING, TES_PARTIAL, TES_READY, TES_STOP),
+ TES_STOP);
+ }
+
+ /* We can drop feeders synchronously */
+ {
+ RTEX_FEEDERS_LOCK(e);
+ WALK_TLIST_DELSAFE(rt_export_feeder, f, &e->feeders)
+ {
+ ASSERT_DIE(atomic_exchange_explicit(&f->exporter, NULL, memory_order_acq_rel) == e);
+ rt_export_feeder_rem_node(&e->feeders, f);
+ }
+ }
+
+ /* Wait for feeders to finish */
+ synchronize_rcu();
+
+ /* The rest is done via the cleanup routine */
+ lfjour_do_cleanup_now(&e->journal);
+
+ if (done)
+ {
+ ev_postpone(&e->journal.cleanup_event);
+ settle_cancel(&e->journal.announce_timer);
+ CALL(stopped, e);
+ }
+ else
+// e->stopped = stopped;
+ bug("not implemented yet");
+}
#include "filter/data.h"
#include "sysdep/unix/krt.h"
+static void rt_show_cont(struct cli *c);
+static void rt_show_done(struct rt_show_data *d);
+
static void
rt_show_table(struct rt_show_data *d)
{
}
static void
-rt_show_net(struct rt_show_data *d, const net_addr *n, const rte **feed, uint count)
+rt_show_net(struct rt_show_data *d, const struct rt_export_feed *feed)
{
struct cli *c = d->cli;
byte ia[NET_MAX_TEXT_LENGTH+16+1];
uint last_label = 0;
int pass = 0;
- for (uint i = 0; i < count; i++)
+ for (uint i = 0; i < feed->count_routes; i++)
{
- if (!d->tab->prefilter && (rte_is_filtered(feed[i]) != d->filtered))
+ rte *e = &feed->block[i];
+ if (e->flags & REF_OBSOLETE)
+ break;
+
+ if (!d->tab->prefilter && (rte_is_filtered(e) != d->filtered))
continue;
d->rt_counter++;
if (pass)
continue;
- struct rte e = *feed[i];
if (d->tab->prefilter)
- if (e.sender != d->tab->prefilter->in_req.hook)
+ if (e->sender != d->tab->prefilter->in_req.hook)
continue;
- else while (e.attrs->next)
- e.attrs = e.attrs->next;
+ else
+ e->attrs = ea_strip_to(e->attrs, BIT32_ALL(EALS_PREIMPORT));
/* Export channel is down, do not try to export routes to it */
- if (ec && !ec->out_req.hook)
+ if (ec && (rt_export_get_state(&ec->out_req) == TES_DOWN))
goto skip;
if (d->export_mode == RSEM_EXPORTED)
{
- if (!bmap_test(&ec->export_map, e.id))
+ if (!bmap_test(&ec->export_accepted_map, e->id))
goto skip;
// if (ec->ra_mode != RA_ANY)
{
/* Special case for merged export */
pass = 1;
- rte *em = rt_export_merged(ec, n, feed, count, tmp_linpool, 1);
+ rte *em = rt_export_merged(ec, feed, tmp_linpool, 1);
if (em)
- e = *em;
+ e = em;
else
goto skip;
}
else if (d->export_mode)
{
struct proto *ep = ec->proto;
- int ic = ep->preexport ? ep->preexport(ec, &e) : 0;
+ int ic = ep->preexport ? ep->preexport(ec, e) : 0;
if (ec->ra_mode == RA_OPTIMAL || ec->ra_mode == RA_MERGED)
pass = 1;
* command may change the export filter and do not update routes.
*/
int do_export = (ic > 0) ||
- (f_run(ec->out_filter, &e, FF_SILENT) <= F_ACCEPT);
+ (f_run(ec->out_filter, e, FF_SILENT) <= F_ACCEPT);
if (do_export != (d->export_mode == RSEM_EXPORT))
goto skip;
}
}
- if (d->show_protocol && (&d->show_protocol->sources != e.src->owner))
+ if (d->show_protocol && (&d->show_protocol->sources != e->src->owner))
goto skip;
- if (f_run(d->filter, &e, 0) > F_ACCEPT)
+ if (f_run(d->filter, e, 0) > F_ACCEPT)
goto skip;
if (d->stats < 2)
{
- uint label = ea_get_int(e.attrs, &ea_gen_mpls_label, ~0U);
+ uint label = ea_get_int(e->attrs, &ea_gen_mpls_label, ~0U);
if (first_show || (last_label != label))
{
if (!~label)
- net_format(n, ia, sizeof(ia));
+ net_format(feed->ni->addr, ia, sizeof(ia));
else
- bsnprintf(ia, sizeof(ia), "%N mpls %d", n, label);
+ bsnprintf(ia, sizeof(ia), "%N mpls %d", feed->ni->addr, label);
}
else
ia[0] = 0;
- rt_show_rte(c, ia, &e, d, !d->tab->prefilter && !i);
+ rt_show_rte(c, ia, e, d, !d->tab->prefilter && !i);
first_show = 0;
last_label = label;
}
skip:
if (d->primary_only)
break;
+#undef e
}
if ((d->show_counter - d->show_counter_last_flush) > 64)
}
static void
-rt_show_net_export_bulk(struct rt_export_request *req, const net_addr *n,
- struct rt_pending_export *first UNUSED, struct rt_pending_export *last UNUSED,
- const rte **feed, uint count)
-{
- SKIP_BACK_DECLARE(struct rt_show_data, d, req, req);
- return rt_show_net(d, n, feed, count);
-}
-
-static void
-rt_show_export_stopped_cleanup(struct rt_export_request *req)
-{
- SKIP_BACK_DECLARE(struct rt_show_data, d, req, req);
-
- /* The hook is now invalid */
- req->hook = NULL;
-
- /* And free the CLI (deferred) */
- rp_free(d->cli->pool);
-}
-
-static int
rt_show_cleanup(struct cli *c)
{
struct rt_show_data *d = c->rover;
+ struct rt_show_data_rtable *tab, *tabx;
c->cleanup = NULL;
- /* Cancel the feed */
- if (d->req.hook)
+ /* Cancel the feeds */
+ WALK_LIST_DELSAFE(tab, tabx, d->tables)
{
- rt_stop_export(&d->req, rt_show_export_stopped_cleanup);
- return 1;
+ if (rt_export_feed_active(&tab->req))
+ rt_feeder_unsubscribe(&tab->req);
}
- else
- return 0;
}
-static void rt_show_export_stopped(struct rt_export_request *req);
-
static void
-rt_show_log_state_change(struct rt_export_request *req, u8 state)
+rt_show_done(struct rt_show_data *d)
{
- if (state == TES_READY)
- rt_stop_export(req, rt_show_export_stopped);
-}
+ /* Force the cleanup */
+ rt_show_cleanup(d->cli);
-static void
-rt_show_dump_req(struct rt_export_request *req)
-{
- debug(" CLI Show Route Feed %p\n", req);
-}
+ /* Write pending messages */
+ cli_write_trigger(d->cli);
-static void
-rt_show_done(struct rt_show_data *d)
-{
/* No more action */
d->cli->cleanup = NULL;
d->cli->cont = NULL;
d->cli->rover = NULL;
-
- /* Write pending messages */
- cli_write_trigger(d->cli);
}
static void
-rt_show_cont(struct rt_show_data *d)
+rt_show_cont(struct cli *c)
{
- struct cli *c = d->cli;
+ struct rt_show_data *d = c->rover;
if (d->running_on_config && (d->running_on_config != config))
{
return rt_show_done(d);
}
- d->req = (struct rt_export_request) {
- .prefilter.addr = d->addr,
- .name = "CLI Show Route",
- .list = &global_work_list,
- .pool = c->pool,
- .export_bulk = rt_show_net_export_bulk,
- .dump_req = rt_show_dump_req,
- .log_state_change = rt_show_log_state_change,
- .prefilter.mode = d->addr_mode,
- };
-
d->table_counter++;
d->show_counter_last = d->show_counter;
if (d->tables_defined_by & RSD_TDB_SET)
rt_show_table(d);
- rt_request_export(d->tab->table, &d->req);
-}
+ RT_FEED_WALK(&d->tab->req, f)
+ if (f->count_routes)
+ rt_show_net(d, f);
-static void
-rt_show_export_stopped(struct rt_export_request *req)
-{
- SKIP_BACK_DECLARE(struct rt_show_data, d, req, req);
-
- /* The hook is now invalid */
- req->hook = NULL;
+ if (rt_export_feed_active(&d->tab->req))
+ rt_feeder_unsubscribe(&d->tab->req);
+ else
+ {
+ cli_printf(c, 8004, "Table is shutting down");
+ return rt_show_done(d);
+ }
if (d->stats)
{
rt_show_table(d);
cli_printf(d->cli, -1007, "%d of %d routes for %d networks in table %s",
- d->show_counter - d->show_counter_last, d->rt_counter - d->rt_counter_last,
- d->net_counter - d->net_counter_last, d->tab->name);
+ d->show_counter - d->show_counter_last, d->rt_counter - d->rt_counter_last,
+ d->net_counter - d->net_counter_last, d->tab->name);
}
d->tab = NODE_NEXT(d->tab);
if (NODE_VALID(d->tab))
- return rt_show_cont(d);
+ /* Gonna be called later by this_cli->cont() */
+ return;
/* Printout total stats */
if (d->stats && (d->table_counter > 1))
{
if (d->last_table) cli_printf(d->cli, -1007, "");
cli_printf(d->cli, 14, "Total: %d of %d routes for %d networks in %d tables",
- d->show_counter, d->rt_counter, d->net_counter, d->table_counter);
+ d->show_counter, d->rt_counter, d->net_counter, d->table_counter);
}
else if (!d->rt_counter && ((d->addr_mode == TE_ADDR_EQUAL) || (d->addr_mode == TE_ADDR_FOR)))
cli_printf(d->cli, 8001, "Network not found");
{
WALK_LIST(c, d->export_protocol->channels)
{
- if (!c->out_req.hook)
+ if (rt_export_get_state(&c->out_req) == TES_DOWN)
continue;
tab = rt_show_add_table(d, c->table);
rem_node(&(tab->n));
continue;
}
+
+ /* Open the export request */
+ tab->req = (struct rt_export_feeder) {
+ .name = "cli.feeder",
+ .prefilter = {
+ .addr = d->addr,
+ .mode = d->addr_mode,
+ },
+ .trace_routes = config->show_route_debug,
+ };
+
+ rt_feeder_subscribe(&tab->table->export_all, &tab->req);
}
/* Ensure there is at least one table */
cf_error("No valid tables");
}
-static void
-rt_show_dummy_cont(struct cli *c UNUSED)
-{
- /* Explicitly do nothing to prevent CLI from trying to parse another command. */
-}
-
void
rt_show(struct rt_show_data *d)
{
this_cli->cleanup = rt_show_cleanup;
this_cli->rover = d;
- this_cli->cont = rt_show_dummy_cont;
+ this_cli->cont = rt_show_cont;
- rt_show_cont(d);
+ cli_write_trigger(this_cli);
}
* BIRD -- Routing Tables
*
* (c) 1998--2000 Martin Mares <mj@ucw.cz>
+ * (c) 2019--2024 Maria Matejka <mq@jmq.cz>
*
* Can be freely distributed and used under the terms of the GNU GPL.
*/
list deleted_routing_tables;
netindex_hash *rt_global_netindex_hash;
+#define RT_INITIAL_ROUTES_BLOCK_SIZE 128
struct rt_cork rt_cork;
-struct rt_export_block {
- struct lfjour_block lb;
- struct rt_pending_export export[];
-};
-
-#define RT_INITIAL_ROUTES_BLOCK_SIZE 128
-
/* Data structures for export journal */
static void rt_free_hostcache(struct rtable_private *tab);
static void rt_refresh_trace(struct rtable_private *tab, struct rt_import_hook *ih, const char *msg);
static void rt_kick_prune_timer(struct rtable_private *tab);
static void rt_prune_table(void *_tab);
-static void rt_feed_by_fib(void *);
-static void rt_feed_equal(void *);
-static void rt_feed_for(void *);
static void rt_check_cork_low(struct rtable_private *tab);
static void rt_check_cork_high(struct rtable_private *tab);
static void rt_cork_release_hook(void *);
int rte_same(const rte *x, const rte *y);
+static inline void rt_rte_trace_in(uint flag, struct rt_import_request *req, const rte *e, const char *msg);
+
const char *rt_import_state_name_array[TIS_MAX] = {
[TIS_DOWN] = "DOWN",
[TIS_UP] = "UP",
};
const char *rt_export_state_name_array[TES_MAX] = {
- [TES_DOWN] = "DOWN",
- [TES_HUNGRY] = "HUNGRY",
- [TES_FEEDING] = "FEEDING",
- [TES_READY] = "READY",
- [TES_STOP] = "STOP"
+#define RT_EXPORT_STATES_ENUM_HELPER(p) [TES_##p] = #p,
+ MACRO_FOREACH(RT_EXPORT_STATES_ENUM_HELPER, RT_EXPORT_STATES)
+#undef RT_EXPORT_STATES_ENUM_HELPER
};
const char *rt_import_state_name(u8 state)
return rt_import_state_name_array[state];
}
-const char *rt_export_state_name(u8 state)
+const char *rt_export_state_name(enum rt_export_state state)
{
- if (state >= TES_MAX)
- return "!! INVALID !!";
- else
- return rt_export_state_name_array[state];
+ ASSERT_DIE((state < TES_MAX) && (state >= 0));
+
+ return rt_export_state_name_array[state];
}
static struct hostentry *rt_get_hostentry(struct rtable_private *tab, ip_addr a, ip_addr ll, rtable *dep);
struct rcu_unwinder *u;
};
-static inline void _rt_rcu_unlock_(struct rtable_reading *o)
-{
- if (o->t)
- rcu_read_unlock();
-}
-
#define RT_READ_ANCHORED(_o, _i, _u) \
struct rtable_reading _s##_i = { .t = _o, .u = _u, }, *_i = &_s##_i;
static void
rte_free(struct rte_storage *e, struct rtable_private *tab)
{
+ /* Wait for very slow table readers */
+ synchronize_rcu();
+
+ rt_rte_trace_in(D_ROUTES, e->rte.sender->req, &e->rte, "freeing");
+
struct netindex *i = RTE_GET_NETINDEX(&e->rte);
net_unlock_index(tab->netindex, i);
static void
rte_trace(const char *name, const rte *e, int dir, const char *msg)
{
- log(L_TRACE "%s %c %s %N (%u) src %luL %uG %uS id %u %s",
- name, dir, msg, e->net, NET_TO_INDEX(e->net)->index,
+ log(L_TRACE "%s %c %s %N ptr %p (%u) src %luL %uG %uS id %u %s",
+ name, dir, msg, e->net, e, NET_TO_INDEX(e->net)->index,
e->src->private_id, e->src->global_id, e->stale_cycle, e->id,
rta_dest_name(rte_dest(e)));
}
channel_rte_trace_in(uint flag, struct channel *c, const rte *e, const char *msg)
{
if ((c->debug & flag) || (c->proto->debug & flag))
- log(L_TRACE "%s > %s %N (-) src %luL %uG %uS id %u %s",
- c->in_req.name, msg, e->net,
+ log(L_TRACE "%s > %s %N ptr %p (-) src %luL %uG %uS id %u %s",
+ c->in_req.name, msg, e->net, e,
e->src->private_id, e->src->global_id, e->stale_cycle, e->id,
rta_dest_name(rte_dest(e)));
}
return count;
}
+#if 0
static void
rte_feed_obtain(struct rtable_reading *tr, net *n, const rte **feed, uint count)
{
if (i != count)
RT_READ_RETRY(tr);
}
+#endif
static void
rte_feed_obtain_copy(struct rtable_reading *tr, net *n, rte *feed, uint count)
struct channel_export_stats *stats = &c->export_stats;
/* Do nothing if we have already rejected the route */
- if (silent && bmap_test(&c->export_reject_map, rt->id))
+ if (silent && bmap_test(&c->export_rejected_map, rt->id))
goto reject_noset;
int v = p->preexport ? p->preexport(c, rt) : 0;
accept:
/* We have accepted the route */
- bmap_clear(&c->export_reject_map, rt->id);
+ bmap_clear(&c->export_rejected_map, rt->id);
return rt;
reject:
/* We have rejected the route by filter */
- bmap_set(&c->export_reject_map, rt->id);
+ bmap_set(&c->export_rejected_map, rt->id);
reject_noset:
/* Discard temporary rte */
struct proto *p = c->proto;
struct channel_export_stats *stats = &c->export_stats;
+ ASSERT_DIE(old || new);
+
if (!old && new)
if (CHANNEL_LIMIT_PUSH(c, OUT))
{
stats->withdraws_accepted++;
if (old)
- bmap_clear(&c->export_map, old->id);
+ bmap_clear(&c->export_accepted_map, old->id);
if (new)
- bmap_set(&c->export_map, new->id);
+ bmap_set(&c->export_accepted_map, new->id);
if (new && old)
channel_rte_trace_out(D_ROUTES, c, new, "replaced");
}
static void
-rt_notify_basic(struct channel *c, const net_addr *net, rte *new, const rte *old, int force)
+rt_notify_basic(struct channel *c, const rte *new, const rte *old)
{
- if (new && old && rte_same(new, old) && !force)
- {
- channel_rte_trace_out(D_ROUTES, c, new, "already exported");
+ const rte *trte = new ?: old;
- if ((new->id != old->id) && bmap_test(&c->export_map, old->id))
- {
- bmap_set(&c->export_map, new->id);
- bmap_clear(&c->export_map, old->id);
- }
+ /* Ignore invalid routes */
+ if (!rte_is_valid(new))
+ new = NULL;
+
+ if (!rte_is_valid(old))
+ old = NULL;
+
+ if (!new && !old)
+ {
+ channel_rte_trace_out(D_ROUTES, c, trte, "idempotent withdraw (filtered on import)");
return;
}
- /* Refeeding and old is new */
- if (force && !old && bmap_test(&c->export_map, new->id))
+ /* If this is a refeed, we may need to copy the new route to the old one */
+ if (!old && bmap_test(&c->export_accepted_map, new->id))
+ {
+ ASSERT_DIE(rt_export_get_state(&c->out_req) == TES_PARTIAL);
old = new;
+ }
+ /* Run the filters, actually */
+ rte n0, *np = NULL;
if (new)
- new = export_filter(c, new, 0);
+ {
+ n0 = *new;
+ np = export_filter(c, &n0, 0);
+ }
- if (old && !bmap_test(&c->export_map, old->id))
+ /* Have we exported the old route? */
+ if (old && !bmap_test(&c->export_accepted_map, old->id))
old = NULL;
- if (!new && !old)
+ /* Withdraw to withdraw. */
+ if (!np && !old)
+ {
+ channel_rte_trace_out(D_ROUTES, c, trte, "idempotent withdraw (filtered on export)");
return;
+ }
- do_rt_notify(c, net, new, old);
-}
-
-void
-channel_rpe_mark_seen(struct channel *c, struct rt_pending_export *rpe)
-{
- if (rpe->seq)
- channel_trace(c, D_ROUTES, "Marking seen %p (%lu)", rpe, rpe->seq);
-
- ASSERT_DIE(c->out_req.hook);
- rpe_mark_seen(c->out_req.hook, rpe);
-
- if (c->refeed_req.hook && (atomic_load_explicit(&c->refeed_req.hook->export_state, memory_order_acquire) == TES_FEEDING))
- rpe_mark_seen(c->refeed_req.hook, rpe);
-
- if (rpe->old)
- bmap_clear(&c->export_reject_map, rpe->old->id);
+ /* OK, notify. */
+ do_rt_notify(c, np ? np->net : old->net, np, old);
}
-void
-rt_notify_accepted(struct rt_export_request *req, const net_addr *n,
- struct rt_pending_export *first, struct rt_pending_export *last,
- const rte **feed, uint count)
+static void
+rt_notify_accepted(struct channel *c, const struct rt_export_feed *feed)
{
- struct channel *c = channel_from_export_request(req);
- int refeeding = channel_net_is_refeeding(c, n);
-
- rte nb0, *new_best = NULL;
- const rte *old_best = NULL;
+ rte *old_best, *new_best;
+ _Bool feeding = rt_net_is_feeding(&c->out_req, feed->ni->addr);
+ _Bool idempotent = 0;
- for (uint i = 0; i < count; i++)
+ for (uint i = 0; i < feed->count_routes; i++)
{
- if (!rte_is_valid(feed[i]))
- continue;
-
- /* Has been already rejected, won't bother with it */
- if (!refeeding && bmap_test(&c->export_reject_map, feed[i]->id))
- continue;
+ rte *r = &feed->block[i];
/* Previously exported */
- if (!old_best && bmap_test(&c->export_map, feed[i]->id))
+ if (!old_best && bmap_test(&c->export_accepted_map, r->id))
{
- if (new_best)
- {
- /* is superseded */
- old_best = feed[i];
- break;
- }
- else if (refeeding)
- /* is superseeded but maybe by a new version of itself */
- old_best = feed[i];
- else
+ old_best = r;
+
+ /* Is still the best and need not be refed anyway */
+ if (!new_best && !feeding)
{
- /* is still best */
- DBG("rt_notify_accepted: idempotent\n");
- goto done;
+ idempotent = 1;
+ new_best = r;
}
}
- /* Have no new best route yet */
- if (!new_best)
- {
- /* Try this route not seen before */
- nb0 = *feed[i];
- new_best = export_filter(c, &nb0, 0);
- DBG("rt_notify_accepted: checking route id %u: %s\n", feed[i]->id, new_best ? "ok" : "no");
- }
- }
+ /* Unflag obsolete routes */
+ if (r->flags & REF_OBSOLETE)
+ bmap_clear(&c->export_rejected_map, r->id);
-done:
- /* Check obsolete routes for previously exported */
- RPE_WALK(first, rpe, NULL)
- {
- channel_rpe_mark_seen(c, rpe);
- if (rpe->old)
+ /* Mark invalid as rejected */
+ else if (!rte_is_valid(r))
+ bmap_set(&c->export_rejected_map, r->id);
+
+ /* Already rejected */
+ else if (!feeding && bmap_test(&c->export_rejected_map, r->id))
+ ;
+
+ /* No new best route yet and this is a valid candidate */
+ else if (!new_best)
{
- if (bmap_test(&c->export_map, rpe->old->id))
- {
- ASSERT_DIE(old_best == NULL);
- old_best = rpe->old;
- }
+ /* This branch should not be executed if this route is old best */
+ ASSERT_DIE(r != old_best);
+
+ /* Have no new best route yet, try this route not seen before */
+ new_best = export_filter(c, r, 0);
+ DBG("rt_notify_accepted: checking route id %u: %s\n", r->id, new_best ? "ok" : "no");
}
- if (rpe == last)
- break;
}
/* Nothing to export */
- if (new_best || old_best)
- do_rt_notify(c, n, new_best, old_best);
+ if (!idempotent && (new_best || old_best))
+ do_rt_notify(c, feed->ni->addr, new_best, old_best);
else
DBG("rt_notify_accepted: nothing to export\n");
-
- if (refeeding)
- channel_net_mark_refed(c, n);
}
-rte *
-rt_export_merged(struct channel *c, const net_addr *n, const rte **feed, uint count, linpool *pool, int silent)
+void
+channel_notify_accepted(void *_channel)
{
- _Thread_local static rte rloc;
+ struct channel *c = _channel;
- int refeeding = !silent && channel_net_is_refeeding(c, n);
+ RT_EXPORT_WALK(&c->out_req, u)
+ {
+ switch (u->kind)
+ {
+ case RT_EXPORT_STOP:
+ bug("Main table export stopped");
- if (refeeding)
- channel_net_mark_refed(c, n);
+ case RT_EXPORT_FEED:
+ if (u->feed->count_routes)
+ rt_notify_accepted(c, u->feed);
+ break;
+
+ case RT_EXPORT_UPDATE:
+ {
+ struct rt_export_feed *f = rt_net_feed(c->table, u->update->new ? u->update->new->net : u->update->old->net, SKIP_BACK(struct rt_pending_export, it, u->update));
+ rt_notify_accepted(c, f);
+ for (uint i=0; i<f->count_exports; i++)
+ rt_export_processed(&c->out_req, f->exports[i]);
+ break;
+ }
+ }
+
+ if (!task_still_in_limit())
+ return ev_send(c->out_req.r.target, c->out_req.r.event);
+ }
+}
+
+rte *
+rt_export_merged(struct channel *c, const struct rt_export_feed *feed, linpool *pool, int silent)
+{
+ _Bool feeding = !silent && rt_net_is_feeding(&c->out_req, feed->ni->addr);
// struct proto *p = c->proto;
struct nexthop_adata *nhs = NULL;
- const rte *best0 = feed[0];
+ rte *best0 = &feed->block[0];
rte *best = NULL;
+ /* First route is obsolete */
+ if (best0->flags & REF_OBSOLETE)
+ return NULL;
+
+ /* First route is invalid */
if (!rte_is_valid(best0))
return NULL;
/* Already rejected, no need to re-run the filter */
- if (!refeeding && bmap_test(&c->export_reject_map, best0->id))
+ if (!feeding && bmap_test(&c->export_rejected_map, best0->id))
return NULL;
- rloc = *best0;
- best = export_filter(c, &rloc, silent);
+ best = export_filter(c, best0, silent);
+ /* Best route doesn't pass the filter */
if (!best)
- /* Best route doesn't pass the filter */
return NULL;
+ /* Unreachable routes can't be merged */
if (!rte_is_reachable(best))
- /* Unreachable routes can't be merged */
return best;
- for (uint i = 1; i < count; i++)
+ for (uint i = 1; i < feed->count_routes; i++)
{
- if (!rte_mergable(best0, feed[i]))
+ rte *r = &feed->block[i];
+
+ /* Obsolete routes can't be merged */
+ if (r->flags & REF_OBSOLETE)
+ break;
+
+ /* Failed to pass mergable test */
+ if (!rte_mergable(best0, r))
+ continue;
+
+ /* Already rejected by filters */
+ if (!feeding && bmap_test(&c->export_rejected_map, r->id))
continue;
- rte tmp0 = *feed[i];
- rte *tmp = export_filter(c, &tmp0, !refeeding);
+ /* Running export filter on new or accepted route */
+ rte *tmp = export_filter(c, r, silent);
+ /* New route rejected or unreachable */
if (!tmp || !rte_is_reachable(tmp))
continue;
+ /* Merging next hops */
eattr *nhea = ea_find(tmp->attrs, &ea_gen_nexthop);
ASSERT_DIE(nhea);
nhs = (struct nexthop_adata *) nhea->u.ptr;
}
+ /* There is some nexthop, we shall set the merged version to the route */
if (nhs)
{
eattr *nhea = ea_find(best->attrs, &ea_gen_nexthop);
return best;
}
-void
-rt_notify_merged(struct rt_export_request *req, const net_addr *n,
- struct rt_pending_export *first, struct rt_pending_export *last,
- const rte **feed, uint count)
+static void
+rt_notify_merged(struct channel *c, const struct rt_export_feed *f)
{
- struct channel *c = channel_from_export_request(req);
- // struct proto *p = c->proto;
-
-#if 0 /* TODO: Find whether this check is possible when processing multiple changes at once. */
- /* Check whether the change is relevant to the merged route */
- if ((new_best == old_best) &&
- (new_changed != old_changed) &&
- !rte_mergable(new_best, new_changed) &&
- !rte_mergable(old_best, old_changed))
- return;
-#endif
-
const rte *old_best = NULL;
/* Find old best route */
- for (uint i = 0; i < count; i++)
- if (bmap_test(&c->export_map, feed[i]->id))
+ for (uint i = 0; i < f->count_routes; i++)
+ if (bmap_test(&c->export_accepted_map, f->block[i].id))
{
- old_best = feed[i];
+ old_best = &f->block[i];
break;
}
- /* Check obsolete routes for previously exported */
- RPE_WALK(first, rpe, NULL)
- {
- channel_rpe_mark_seen(c, rpe);
- if (rpe->old)
- {
- if (bmap_test(&c->export_map, rpe->old->id))
- {
- ASSERT_DIE(old_best == NULL);
- old_best = rpe->old;
- }
- }
- if (rpe == last)
- break;
- }
-
/* Prepare new merged route */
- rte *new_merged = count ? rt_export_merged(c, n, feed, count, tmp_linpool, 0) : NULL;
+ rte *new_merged = f->count_routes ? rt_export_merged(c, f, tmp_linpool, 0) : NULL;
+ /* And notify the protocol */
if (new_merged || old_best)
- do_rt_notify(c, n, new_merged, old_best);
+ do_rt_notify(c, f->ni->addr, new_merged, old_best);
}
+
void
-rt_notify_optimal(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *first)
+channel_notify_merged(void *_channel)
{
- struct channel *c = channel_from_export_request(req);
- const rte *o = RTE_VALID_OR_NULL(first->old_best);
- const rte *new_best = first->new_best;
-
- int refeeding = channel_net_is_refeeding(c, net);
+ struct channel *c = _channel;
- RPE_WALK(first, rpe, NULL)
+ RT_EXPORT_WALK(&c->out_req, u)
{
- channel_rpe_mark_seen(c, rpe);
- new_best = rpe->new_best;
- }
+ switch (u->kind)
+ {
+ case RT_EXPORT_STOP:
+ bug("Main table export stopped");
- rte n0 = RTE_COPY_VALID(new_best);
- if (n0.src || o)
- rt_notify_basic(c, net, n0.src ? &n0 : NULL, o, refeeding);
+ case RT_EXPORT_FEED:
+ if (u->feed->count_routes)
+ rt_notify_merged(c, u->feed);
+ break;
- if (refeeding)
- channel_net_mark_refed(c, net);
+ case RT_EXPORT_UPDATE:
+ {
+ struct rt_export_feed *f = rt_net_feed(c->table, u->update->new ? u->update->new->net : u->update->old->net, SKIP_BACK(struct rt_pending_export, it, u->update));
+ rt_notify_merged(c, f);
+ for (uint i=0; i<f->count_exports; i++)
+ rt_export_processed(&c->out_req, f->exports[i]);
+ break;
+ }
+ }
+
+ if (!task_still_in_limit())
+ return ev_send(c->out_req.r.target, c->out_req.r.event);
+ }
}
void
-rt_notify_any(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *first)
+channel_notify_basic(void *_channel)
{
- struct channel *c = channel_from_export_request(req);
+ struct channel *c = _channel;
- const rte *n = RTE_VALID_OR_NULL(first->new);
- const rte *o = RTE_VALID_OR_NULL(first->old);
+ RT_EXPORT_WALK(&c->out_req, u)
+ {
+ switch (u->kind)
+ {
+ case RT_EXPORT_STOP:
+ bug("Main table export stopped");
- channel_trace(c, D_ROUTES,
- "Notifying any, net %N, first %p (%lu), new %p, old %p",
- net, first, first->seq, n, o);
+ case RT_EXPORT_FEED:
+ {
+ /* Find where the old route block begins */
+ uint oldpos = 0;
+ while ((oldpos < u->feed->count_routes) && !(u->feed->block[oldpos].flags & REF_OBSOLETE))
+ oldpos++;
- if (!n && !o || channel_net_is_refeeding(c, net))
- {
- /* We want to skip this notification because:
- * - there is nothing to notify, or
- * - this net is going to get a full refeed soon
- */
- channel_rpe_mark_seen(c, first);
- return;
- }
+ /* Send updates one after another */
+ for (uint i = 0; i < oldpos; i++)
+ {
+ rte *new = &u->feed->block[i];
+ rte *old = NULL;
+ for (uint o = oldpos; o < u->feed->count_routes; o++)
+ if (new->src == u->feed->block[o].src)
+ {
+ old = &u->feed->block[o];
+ break;
+ }
- struct rte_src *src = n ? n->src : o->src;
- const rte *new_latest = first->new;
+ rt_notify_basic(c, new, old);
- RPE_WALK(first, rpe, src)
- {
- channel_rpe_mark_seen(c, rpe);
- new_latest = rpe->new;
- }
+ /* Mark old processed */
+ if (old)
+ old->src = NULL;
+ }
- rte n0 = RTE_COPY_VALID(new_latest);
- if (n0.src || o)
- rt_notify_basic(c, net, n0.src ? &n0 : NULL, o, 0);
+ /* Send withdraws */
+ for (uint o = oldpos; o < u->feed->count_routes; o++)
+ if (u->feed->block[o].src)
+ rt_notify_basic(c, NULL, &u->feed->block[o]);
+ }
+ break;
- channel_trace(c, D_ROUTES, "Notified net %N", net);
-}
+ case RT_EXPORT_UPDATE:
+ {
+ const rte *new = u->update->new;
+ const rte *old = u->update->old;
+ struct rte_src *src = (c->ra_mode == RA_ANY) ? (new ? new->src : old->src) : NULL;
+
+ /* Squashing subsequent updates */
+ for (SKIP_BACK_DECLARE(const struct rt_pending_export, rpe, it, u->update);
+ rpe = atomic_load_explicit(&rpe->next, memory_order_acquire) ;)
+ /* Either new is the same as this update's "old". Then the squash
+ * is obvious.
+ *
+ * Or we're squashing an update-from-nothing with a withdrawal,
+ * and then either src is set because it must match (RA_ANY)
+ * or it doesn't matter at all (RA_OPTIMAL).
+ */
+ if ((rpe->it.old == new) && (new || src && (src == rpe->it.new->src)))
+ {
+ new = rpe->it.new;
+ rt_export_processed(&c->out_req, rpe->it.seq);
+ }
-void
-rt_feed_any(struct rt_export_request *req, const net_addr *net,
- struct rt_pending_export *first, struct rt_pending_export *last,
- const rte **feed, uint count)
-{
- struct channel *c = channel_from_export_request(req);
- int refeeding = channel_net_is_refeeding(c, net);
+ if (new && old && rte_same(new, old))
+ {
+ channel_rte_trace_out(D_ROUTES, c, new, "already exported");
- channel_trace(c, D_ROUTES, "Feeding any, net %N, first %p (%lu), %p (%lu), count %u",
- net, first, first ? first->seq : 0, last ? last->seq : 0, count);
+ if ((new->id != old->id) && bmap_test(&c->export_accepted_map, old->id))
+ {
+ bmap_set(&c->export_accepted_map, new->id);
+ bmap_clear(&c->export_accepted_map, old->id);
+ }
+ }
+ else if (!new && !old)
+ channel_rte_trace_out(D_ROUTES, c, u->update->new, "idempotent withdraw (squash)");
+ else
+ rt_notify_basic(c, new, old);
- for (uint i=0; i<count; i++)
- if (rte_is_valid(feed[i]))
- {
- rte n0 = *feed[i];
- rt_notify_basic(c, net, &n0, NULL, refeeding);
+ break;
+ }
}
- RPE_WALK(first, rpe, NULL)
- {
- channel_rpe_mark_seen(c, rpe);
- if (rpe == last)
- break;
+ if (!task_still_in_limit())
+ return ev_send(c->out_req.r.target, c->out_req.r.event);
}
-
- channel_trace(c, D_ROUTES, "Fed %N", net);
- if (refeeding)
- channel_net_mark_refed(c, net);
}
-void
-rpe_mark_seen(struct rt_export_hook *hook, struct rt_pending_export *rpe)
+static void
+rt_flush_best(struct rtable_private *tab, u64 upto)
{
- bmap_set(&hook->seq_map, rpe->seq);
+ RT_EXPORT_WALK(&tab->best_req, u)
+ {
+ ASSERT_DIE(u->kind == RT_EXPORT_UPDATE);
+ ASSERT_DIE(u->update->seq <= upto);
+ if (u->update->seq == upto)
+ return;
+ }
}
-struct rt_pending_export *
-rpe_next(struct rt_pending_export *rpe, struct rte_src *src)
+static struct rt_pending_export *
+rte_announce_to(struct rt_exporter *e, struct rt_net_pending_export *npe, const rte *new, const rte *old)
{
- struct rt_pending_export *next = atomic_load_explicit(&rpe->next, memory_order_acquire);
-
- if (!next)
+ if (new == old)
return NULL;
- if (!src)
- return next;
-
- while (rpe = next)
- if (src == (rpe->new ? rpe->new->src : rpe->old->src))
- return rpe;
- else
- next = atomic_load_explicit(&rpe->next, memory_order_acquire);
-
- return NULL;
-}
-
-static void
-rte_export(struct rt_export_hook *hook, struct rt_pending_export *rpe)
-{
- /* Seen already? */
- if (bmap_test(&hook->seq_map, rpe->seq))
- return;
-
- const net_addr *n = rpe->new_best ? rpe->new_best->net : rpe->old_best->net;
-
- /* Check export eligibility of this net */
- if (!rt_prefilter_net(&hook->req->prefilter, n))
- return;
-
- if (hook->req->prefilter.mode == TE_ADDR_FOR)
- bug("Continuos export of best prefix match not implemented yet.");
-
- if (rpe->new)
- hook->stats.updates_received++;
- else
- hook->stats.withdraws_received++;
-
- if (rpe->old)
- ASSERT_DIE(rpe->old->flags & REF_OBSOLETE);
-
- if (hook->req->export_one)
- hook->req->export_one(hook->req, n, rpe);
- else if (hook->req->export_bulk)
- {
- uint count = 0;
- const rte **feed = NULL;
-
- const SKIP_BACK_DECLARE(struct netindex, i, addr, (net_addr (*)[0]) n);
- ASSERT_DIE(i->index < atomic_load_explicit(&hook->tab->routes_block_size, memory_order_relaxed));
-
- struct rt_pending_export *last;
+ struct rt_pending_export rpe = {
+ .it = {
+ .new = new,
+ .old = old,
+ },
+ };
- {
- RT_READ(hook->tab, tr);
+ struct rt_export_item *rei = rt_exporter_push(e, &rpe.it);
+ if (!rei)
+ return NULL;
- /* Get the route block. */
- net *routes = atomic_load_explicit(&tr->t->routes, memory_order_acquire);
- net *net = &routes[i->index];
+ SKIP_BACK_DECLARE(struct rt_pending_export, pushed, it, rei);
- /* Get the feed itself. It may change under our hands tho. */
- last = atomic_load_explicit(&net->last, memory_order_acquire);
- count = rte_feed_count(tr, net);
- if (count)
- {
- feed = alloca(count * sizeof(rte *));
- rte_feed_obtain(tr, net, feed, count);
- }
+ struct rt_pending_export *last = atomic_load_explicit(&npe->last, memory_order_relaxed);
+ if (last)
+ ASSERT_DIE(atomic_exchange_explicit(&last->next, pushed, memory_order_acq_rel) == NULL);
- /* Check that it indeed didn't change and the last export is still the same. */
- if (last != atomic_load_explicit(&net->last, memory_order_acquire))
- RT_READ_RETRY(tr);
- }
+ atomic_store_explicit(&npe->last, pushed, memory_order_release);
+ if (!atomic_load_explicit(&npe->first, memory_order_relaxed))
+ atomic_store_explicit(&npe->first, pushed, memory_order_release);
- hook->req->export_bulk(hook->req, n, rpe, last, feed, count);
- }
- else
- bug("Export request must always provide an export method");
+ return pushed;
}
-/**
- * rte_announce - announce a routing table change
- * @tab: table the route has been added to
- * @net: network in question
- * @new: the new or changed route
- * @old: the previous route replaced by the new one
- * @new_best: the new best route for the same network
- * @old_best: the previous best route for the same network
- *
- * This function gets a routing table update and announces it to all protocols
- * that are connected to the same table by their channels.
- *
- * There are two ways of how routing table changes are announced. First, there
- * is a change of just one route in @net (which may caused a change of the best
- * route of the network). In this case @new and @old describes the changed route
- * and @new_best and @old_best describes best routes. Other routes are not
- * affected, but in sorted table the order of other routes might change.
- *
- * The function announces the change to all associated channels. For each
- * channel, an appropriate preprocessing is done according to channel &ra_mode.
- * For example, %RA_OPTIMAL channels receive just changes of best routes.
- *
- * In general, we first call preexport() hook of a protocol, which performs
- * basic checks on the route (each protocol has a right to veto or force accept
- * of the route before any filter is asked). Then we consult an export filter
- * of the channel and verify the old route in an export map of the channel.
- * Finally, the rt_notify() hook of the protocol gets called.
- *
- * Note that there are also calls of rt_notify() hooks due to feed, but that is
- * done outside of scope of rte_announce().
- */
static void
rte_announce(struct rtable_private *tab, const struct netindex *i, net *net, const rte *new, const rte *old,
const rte *new_best, const rte *old_best)
if (old_best_valid)
old_best->sender->stats.pref--;
- SKIP_BACK_DECLARE(struct rt_pending_export, rpe, li, lfjour_push_prepare(&tab->journal));
+ /* Try to push */
+ struct rt_pending_export *best_rpe = NULL;
+ struct rt_pending_export *all_rpe = rte_announce_to(&tab->export_all, &net->all, new, old);
+ if (all_rpe)
+ {
+ /* Also best may have changed */
+ best_rpe = rte_announce_to(&tab->export_best, &net->best, new_best, old_best);
+ if (best_rpe)
+ /* Announced best, need an anchor to all */
+ best_rpe->seq_all = all_rpe->it.seq;
+ else if (new_best != old_best)
+ /* Would announce best but it's empty with no reader */
+ rt_flush_best(tab, all_rpe->it.seq);
- if (!rpe)
+ rt_check_cork_high(tab);
+ }
+ else
{
- rt_trace(tab, D_ROUTES, "Not announcing %N, "
- "new=%p id %u from %s, "
- "old=%p id %u from %s, "
- "new_best=%p id %u, "
- "old_best=%p id %u (no exporter present)",
- i->addr,
- new, new ? new->id : 0, new ? new->sender->req->name : NULL,
- old, old ? old->id : 0, old ? old->sender->req->name : NULL,
- new_best, new_best ? new_best->id : 0,
- old_best, old_best ? old_best->id : 0);
- /* Not announcing, can free old route immediately */
- if (old)
- {
- hmap_clear(&tab->id_map, old->id);
- rte_free(SKIP_BACK(struct rte_storage, rte, old), tab);
- }
- return;
+ /* Not announced anything, cleanup now */
+ ASSERT_DIE(new_best == old_best);
+ hmap_clear(&tab->id_map, old->id);
+ rte_free(SKIP_BACK(struct rte_storage, rte, old), tab);
}
+}
- rt_trace(tab, D_ROUTES, "Announcing %N, "
- "new=%p id %u from %s, "
- "old=%p id %u from %s, "
- "new_best=%p id %u, "
- "old_best=%p id %u seq=%lu",
- i->addr,
- new, new ? new->id : 0, new ? new->sender->req->name : NULL,
- old, old ? old->id : 0, old ? old->sender->req->name : NULL,
- new_best, new_best ? new_best->id : 0,
- old_best, old_best ? old_best->id : 0,
- rpe->li.seq);
-
- *rpe = (struct rt_pending_export) {
- .li = rpe->li, /* Keep the item's internal state */
- .new = new,
- .new_best = new_best,
- .old = old,
- .old_best = old_best,
- };
-
- lfjour_push_commit(&tab->journal);
+static net *
+rt_cleanup_find_net(struct rtable_private *tab, struct rt_pending_export *rpe)
+{
+ /* Find the appropriate struct network */
+ ASSERT_DIE(rpe->it.new || rpe->it.old);
+ const net_addr *n = rpe->it.new ?
+ rpe->it.new->net :
+ rpe->it.old->net;
+ struct netindex *ni = NET_TO_INDEX(n);
+ ASSERT_DIE(ni->index < atomic_load_explicit(&tab->routes_block_size, memory_order_relaxed));
+ net *routes = atomic_load_explicit(&tab->routes, memory_order_relaxed);
+ return &routes[ni->index];
+}
- /* Append to the same-network squasher list */
- struct rt_pending_export *last = atomic_load_explicit(&net->last, memory_order_relaxed);
- if (last)
- {
- struct rt_pending_export *rpenull = NULL;
- ASSERT_DIE(atomic_compare_exchange_strong_explicit(
- &last->next, &rpenull, rpe,
- memory_order_release,
- memory_order_relaxed));
- }
+static _Bool
+rt_cleanup_update_pointers(struct rt_net_pending_export *npe, struct rt_pending_export *rpe)
+{
+ struct rt_pending_export *first = atomic_load_explicit(&npe->first, memory_order_relaxed);
+ struct rt_pending_export *last = atomic_load_explicit(&npe->last, memory_order_relaxed);
+ ASSERT_DIE(rpe == first);
- ASSERT_DIE(atomic_compare_exchange_strong_explicit(
- &net->last, &last, rpe,
- memory_order_release,
- memory_order_relaxed));
+ atomic_store_explicit(
+ &npe->first,
+ atomic_load_explicit(&rpe->next, memory_order_relaxed),
+ memory_order_release
+ );
- struct rt_pending_export *rpenull = NULL;
- atomic_compare_exchange_strong_explicit(
- &net->first, &rpenull, rpe,
- memory_order_release,
- memory_order_relaxed);
+ if (rpe != last)
+ return 0;
- rt_check_cork_high(tab);
+ atomic_store_explicit(&npe->last, NULL, memory_order_release);
+ return 1;
}
-static inline void
-rt_send_export_event(struct rt_export_hook *hook)
+static void
+rt_cleanup_export_best(struct lfjour *j, struct lfjour_item *i)
{
- ev_send(hook->req->list, hook->event);
+ SKIP_BACK_DECLARE(struct rt_pending_export, rpe, it.li, i);
+ SKIP_BACK_DECLARE(struct rtable_private, tab, export_best.journal, j);
+ rt_flush_best(tab, rpe->seq_all);
+
+ /* Find the appropriate struct network */
+ net *net = rt_cleanup_find_net(tab, rpe);
+
+ /* Update the first and last pointers */
+ rt_cleanup_update_pointers(&net->best, rpe);
+
+ /* Wait for readers before releasing */
+ synchronize_rcu();
}
static void
-rt_cleanup_export(struct lfjour *j, struct lfjour_item *i)
+rt_cleanup_export_all(struct lfjour *j, struct lfjour_item *i)
{
- SKIP_BACK_DECLARE(struct rtable_private, tab, journal, j);
- SKIP_BACK_DECLARE(struct rt_pending_export, rpe, li, i);
+ SKIP_BACK_DECLARE(struct rt_pending_export, rpe, it.li, i);
+ SKIP_BACK_DECLARE(struct rtable_private, tab, export_all.journal, j);
- /* Unlink this export from struct network */
- ASSERT_DIE(rpe->new || rpe->old);
- const net_addr *n = rpe->new ?
- rpe->new->net :
- rpe->old->net;
- struct netindex *ni = NET_TO_INDEX(n);
- ASSERT_DIE(ni->index < atomic_load_explicit(&tab->routes_block_size, memory_order_relaxed));
- net *routes = atomic_load_explicit(&tab->routes, memory_order_relaxed);
- net *net = &routes[ni->index];
-
- ASSERT_DIE(rpe == atomic_load_explicit(&net->first, memory_order_relaxed));
+ /* Find the appropriate struct network */
+ net *net = rt_cleanup_find_net(tab, rpe);
/* Update the first and last pointers */
- struct rt_pending_export *last = rpe,
- *next = atomic_load_explicit(&rpe->next, memory_order_relaxed);
- if (atomic_compare_exchange_strong_explicit(
- &net->last, &last, NULL,
- memory_order_acq_rel, memory_order_acquire))
- ASSERT_DIE(next == NULL);
-
- ASSERT_DIE(atomic_compare_exchange_strong_explicit(
- &net->first, &rpe, next,
- memory_order_acq_rel, memory_order_relaxed));
-
- /* Wait for very slow table readers */
- synchronize_rcu();
+ _Bool is_last = rt_cleanup_update_pointers(&net->all, rpe);
- if (rpe->old)
+ /* Free the old route */
+ if (rpe->it.old)
{
- ASSERT_DIE(rpe->old->flags & REF_OBSOLETE);
- hmap_clear(&tab->id_map, rpe->old->id);
- rte_free(SKIP_BACK(struct rte_storage, rte, rpe->old), tab);
+ ASSERT_DIE(rpe->it.old->flags & REF_OBSOLETE);
+ hmap_clear(&tab->id_map, rpe->it.old->id);
+ rte_free(SKIP_BACK(struct rte_storage, rte, rpe->it.old), tab);
}
- if (!routes && !next)
+ if (is_last)
tab->gc_counter++;
+
+ /* Wait for readers before releasing */
+ synchronize_rcu();
+}
+
+static void
+rt_dump_best_req(struct rt_export_request *req)
+{
+ SKIP_BACK_DECLARE(struct rtable_private, tab, best_req, req);
+ debug(" Table %s best cleanup request (%p)\n", tab->name, req);
}
static void
}
static void
-rt_cleanup_done(struct lfjour *j, u64 begin_seq, u64 end_seq)
+rt_cleanup_done_all(struct rt_exporter *e, u64 end_seq)
{
- SKIP_BACK_DECLARE(struct rtable_private, tab, journal, j);
+ SKIP_BACK_DECLARE(struct rtable_private, tab, export_all, e);
ASSERT_DIE(DG_IS_LOCKED(tab->lock.rtable));
if (~end_seq)
- rt_trace(tab, D_STATES, "Export cleanup done on seq %lu to %lu", begin_seq, end_seq);
+ rt_trace(tab, D_STATES, "Export all cleanup done up to seq %lu", end_seq);
else
- rt_trace(tab, D_STATES, "Export cleanup complete (begin seq %lu)", begin_seq);
+ rt_trace(tab, D_STATES, "Export all cleanup complete");
rt_check_cork_low(tab);
if (tab->wait_counter)
WALK_LIST2_DELSAFE(ih, n, x, tab->imports, n)
if (ih->import_state == TIS_WAITING)
+ {
if (end_seq >= ih->flush_seq)
{
ih->import_state = TIS_CLEARED;
};
ev_send_loop(ih->req->loop, &ih->cleanup_event);
}
+ }
if (!EMPTY_LIST(tab->imports) &&
(tab->gc_counter >= tab->config->gc_threshold))
rt_kick_prune_timer(tab);
}
-#define RT_EXPORT_BULK 1024
-
static void
-rt_export_hook(void *_data)
+rt_cleanup_done_best(struct rt_exporter *e, u64 end_seq)
{
- struct rt_export_hook *c = _data;
- struct lfjour_recipient *r = &c->recipient;
-
- ASSERT_DIE(atomic_load_explicit(&c->export_state, memory_order_relaxed) == TES_READY);
+ SKIP_BACK_DECLARE(struct rtable_private, tab, export_best, e);
- /* Process the export */
- for (uint i=0; i<RT_EXPORT_BULK; i++)
+ if (~end_seq)
+ rt_trace(tab, D_STATES, "Export best cleanup done up to seq %lu", end_seq);
+ else
{
- /* Get the next export if exists */
- struct lfjour_item *li = lfjour_get(r);
-
- /* Stop exporting if no export is available */
- if (!li)
- return;
-
- /* Process sequence number reset event */
- if (lfjour_reset_seqno(r))
- bmap_reset(&c->seq_map, 16);
-
- /* Process the export */
- rte_export(c, SKIP_BACK(struct rt_pending_export, li, li));
-
- /* And release the export */
- lfjour_release(r);
+ rt_trace(tab, D_STATES, "Export best cleanup complete, flushing regular");
+ rt_flush_best(tab, ~0ULL);
}
-
- /*
- * is this actually needed?
- if (used)
- RT_LOCKED(tab, t)
- if (no_next || t->cork_active)
- rt_export_used(c->table, c->req->name, no_next ? "finished export bulk" : "cork active");
- */
-
- /* Request continuation */
- rt_send_export_event(c);
}
+#define RT_EXPORT_BULK 1024
static inline int
rte_validate(struct channel *ch, rte *e)
}
}
-struct rt_export_feed *
-rt_net_feed(rtable *t, net_addr *a)
+/*
+ * Feeding
+ */
+
+static const net_addr *
+rt_feed_next(struct rtable_reading *tr, struct rt_export_feeder *f)
{
- RT_READ(t, tr);
+ u32 rbs = atomic_load_explicit(&tr->t->routes_block_size, memory_order_acquire);
+ for (; f->feed_index < rbs; f->feed_index++)
+ {
+ struct netindex *ni = net_resolve_index(tr->t->netindex, tr->t->addr_type, f->feed_index);
+ if (ni)
+ {
+ f->feed_index++;
+ return ni->addr;
+ }
+ }
- const struct netindex *i = net_find_index(t->netindex, a);
- net *n = i ? net_find(tr, i) : NULL;
- if (!n)
- return 0;
+ f->feed_index = ~0ULL;
+ return NULL;
+}
+
+static const net_addr *
+rt_feed_next_best(struct rt_exporter *e, struct rcu_unwinder *u, struct rt_export_feeder *f)
+{
+ RT_READ_ANCHORED(SKIP_BACK(rtable, priv.export_best, e), tr, u);
+ return rt_feed_next(tr, f);
+}
+
+static const net_addr *
+rt_feed_next_all(struct rt_exporter *e, struct rcu_unwinder *u, struct rt_export_feeder *f)
+{
+ RT_READ_ANCHORED(SKIP_BACK(rtable, priv.export_all, e), tr, u);
+ return rt_feed_next(tr, f);
+}
+
+static struct rt_export_feed *
+rt_alloc_feed(uint routes, uint exports)
+{
+ struct rt_export_feed *feed;
+ uint size = sizeof *feed
+ + routes * sizeof *feed->block + _Alignof(typeof(*feed->block))
+ + exports * sizeof *feed->exports + _Alignof(typeof(*feed->exports));
+
+ feed = tmp_alloc(size);
+
+ feed->count_routes = routes;
+ feed->count_exports = exports;
+ BIRD_SET_ALIGNED_POINTER(feed->block, feed->data);
+ BIRD_SET_ALIGNED_POINTER(feed->exports, &feed->block[routes]);
+
+ /* Consistency check */
+ ASSERT_DIE(((void *) &feed->exports[exports]) <= ((void *) feed) + size);
+
+ return feed;
+}
+
+static net *
+rt_net_feed_get_net(struct rtable_reading *tr, uint index)
+{
+ /* Get the route block from the table */
+ net *routes = atomic_load_explicit(&tr->t->routes, memory_order_acquire);
+ u32 bs = atomic_load_explicit(&tr->t->routes_block_size, memory_order_acquire);
+
+ /* Nothing to actually feed */
+ if (index >= bs)
+ return NULL;
+
+ /* We have a net to feed! */
+ return &routes[index];
+}
+
+static const struct rt_pending_export *
+rt_net_feed_validate_first(
+ struct rtable_reading *tr,
+ const struct rt_pending_export *first_in_net,
+ const struct rt_pending_export *last_in_net,
+ const struct rt_pending_export *first)
+{
+ /* Inconsistent input */
+ if (!first_in_net != !last_in_net)
+ RT_READ_RETRY(tr);
+
+ if (!first)
+ return first_in_net;
+
+ for (uint i = 1; i < 4096; i++)
+ {
+ /* Export item validity check: we must find it between first_in_net and last_in_net */
+ const struct rt_pending_export *rpe = first_in_net;
+ while (rpe)
+ if (rpe == first)
+ return first;
+ else if (rpe == last_in_net)
+ /* Got to the end without finding the beginning */
+ break;
+ else
+ rpe = atomic_load_explicit(&rpe->next, memory_order_acquire);
+
+ birdloop_yield();
+ }
+ log(L_WARN "Waiting too long for table announcement to finish");
+ RT_READ_RETRY(tr);
+}
+
+static struct rt_export_feed *
+rt_net_feed_index(struct rtable_reading *tr, net *n, const struct rt_pending_export *first)
+{
/* Get the feed itself. It may change under our hands tho. */
- struct rt_pending_export *first = atomic_load_explicit(&n->first, memory_order_acquire);
- struct rt_pending_export *last = atomic_load_explicit(&n->last, memory_order_acquire);
+ struct rt_pending_export *first_in_net, *last_in_net;
+ first_in_net = atomic_load_explicit(&n->all.first, memory_order_acquire);
+ last_in_net = atomic_load_explicit(&n->all.last, memory_order_acquire);
+
+ first = rt_net_feed_validate_first(tr, first_in_net, last_in_net, first);
/* Count the elements */
uint rcnt = rte_feed_count(tr, n);
uint ecnt = 0;
uint ocnt = 0;
- for (struct rt_pending_export *rpe = first; rpe;
+ for (const struct rt_pending_export *rpe = first; rpe;
rpe = atomic_load_explicit(&rpe->next, memory_order_acquire))
{
ecnt++;
- if (rpe->old)
+ if (rpe->it.old)
ocnt++;
}
if (rcnt || ocnt || ecnt)
{
- uint size = sizeof *feed
- + (rcnt+ocnt) * sizeof *feed->block + _Alignof(typeof(*feed->block))
- + ecnt * sizeof *feed->exports + _Alignof(typeof(*feed->exports));
-
- feed = tmp_alloc(size);
-
- feed->ni = i;
- feed->count_routes = rcnt+ocnt;
- feed->count_exports = ecnt;
- BIRD_SET_ALIGNED_POINTER(feed->block, feed->data);
- BIRD_SET_ALIGNED_POINTER(feed->exports, &feed->block[rcnt+ocnt]);
-
- /* Consistency check */
- ASSERT_DIE(((void *) &feed->exports[ecnt]) <= ((void *) feed) + size);
+ feed = rt_alloc_feed(rcnt+ocnt, ecnt);
if (rcnt)
rte_feed_obtain_copy(tr, n, feed->block, rcnt);
{
uint e = 0;
uint rpos = rcnt;
- for (struct rt_pending_export *rpe = first; rpe;
+ for (const struct rt_pending_export *rpe = first; rpe;
rpe = atomic_load_explicit(&rpe->next, memory_order_acquire))
if (e >= ecnt)
RT_READ_RETRY(tr);
else
{
- feed->exports[e++] = rpe->seq;
+ feed->exports[e++] = rpe->it.seq;
/* Copy also obsolete routes */
- if (rpe->old)
+ if (rpe->it.old)
{
ASSERT_DIE(rpos < rcnt + ocnt);
- feed->block[rpos++] = *rpe->old;
- ea_free_later(ea_ref(rpe->old->attrs));
+ feed->block[rpos++] = *rpe->it.old;
+ ea_free_later(ea_ref(rpe->it.old->attrs));
}
}
ASSERT_DIE(e == ecnt);
}
+
+ feed->ni = NET_TO_INDEX(feed->block[0].net);
}
/* Check that it indeed didn't change and the last export is still the same. */
- if (last != atomic_load_explicit(&n->last, memory_order_acquire) ||
- first != atomic_load_explicit(&n->first, memory_order_acquire))
+ if (
+ (first_in_net != atomic_load_explicit(&n->all.first, memory_order_acquire))
+ || (last_in_net != atomic_load_explicit(&n->all.last, memory_order_acquire)))
RT_READ_RETRY(tr);
return feed;
}
+static struct rt_export_feed *
+rt_net_feed_internal(struct rtable_reading *tr, const net_addr *a, const struct rt_pending_export *first)
+{
+ const struct netindex *i = net_find_index(tr->t->netindex, a);
+ net *n = rt_net_feed_get_net(tr, i->index);
+ if (!n)
+ return NULL;
+
+ return rt_net_feed_index(tr, n, first);
+}
+
+struct rt_export_feed *
+rt_net_feed(rtable *t, const net_addr *a, const struct rt_pending_export *first)
+{
+ RT_READ(t, tr);
+ return rt_net_feed_internal(tr, a, first);
+}
+
+static struct rt_export_feed *
+rt_feed_net_all(struct rt_exporter *e, struct rcu_unwinder *u, const net_addr *a, const struct rt_export_item *_first)
+{
+ RT_READ_ANCHORED(SKIP_BACK(rtable, export_all, e), tr, u);
+ return rt_net_feed_internal(tr, a, SKIP_BACK(const struct rt_pending_export, it, _first));
+}
+
rte
-rt_net_best(rtable *t, net_addr *a)
+rt_net_best(rtable *t, const net_addr *a)
{
rte rt = {};
if (!e || !rte_is_valid(&e->rte))
return rt;
+ ASSERT_DIE(e->rte.net == i->addr);
ea_free_later(ea_ref(e->rte.attrs));
return RTE_COPY(e);
}
+static struct rt_export_feed *
+rt_feed_net_best(struct rt_exporter *e, struct rcu_unwinder *u, const net_addr *a, const struct rt_export_item *_first)
+{
+ SKIP_BACK_DECLARE(rtable, t, export_best, e);
+ SKIP_BACK_DECLARE(const struct rt_pending_export, first, it, _first);
+
+ struct netindex *ni = NET_TO_INDEX(a);
+
+ RT_READ_ANCHORED(t, tr, u);
+
+ net *n = rt_net_feed_get_net(tr, ni->index);
+ if (!n)
+ /* No more to feed, we are fed up! */
+ return NULL;
+
+ const struct rt_pending_export *first_in_net, *last_in_net;
+ first_in_net = atomic_load_explicit(&n->best.first, memory_order_acquire);
+ last_in_net = atomic_load_explicit(&n->best.last, memory_order_acquire);
+ first = rt_net_feed_validate_first(tr, first_in_net, last_in_net, first);
+
+ uint ecnt = 0;
+ for (const struct rt_pending_export *rpe = first; rpe;
+ rpe = atomic_load_explicit(&rpe->next, memory_order_acquire))
+ ecnt++;
+
+ struct rte_storage *best = atomic_load_explicit(&n->routes, memory_order_acquire);
+ if (!ecnt && !best)
+ return NULL;
+
+ struct rt_export_feed *feed = rt_alloc_feed(!!best, ecnt);
+ feed->ni = ni;
+ if (best)
+ feed->block[0] = best->rte;
+
+ if (ecnt)
+ {
+ uint e = 0;
+ for (const struct rt_pending_export *rpe = first; rpe;
+ rpe = atomic_load_explicit(&rpe->next, memory_order_acquire))
+ if (e >= ecnt)
+ RT_READ_RETRY(tr);
+ else
+ feed->exports[e++] = rpe->it.seq;
+
+ ASSERT_DIE(e == ecnt);
+ }
+
+ /* Check that it indeed didn't change and the last export is still the same. */
+ if (
+ (first_in_net != atomic_load_explicit(&n->best.first, memory_order_acquire))
+ || (last_in_net != atomic_load_explicit(&n->best.last, memory_order_acquire)))
+ RT_READ_RETRY(tr);
+
+ /* And we're finally done */
+ return feed;
+}
+
+
/* Check rtable for best route to given net whether it would be exported do p */
int
rt_examine(rtable *t, net_addr *a, struct channel *c, const struct filter *filter)
return v > 0;
}
-static void
-rt_table_export_done(void *hh)
-{
- struct rt_export_hook *hook = hh;
- struct rt_export_request *req = hook->req;
- void (*stopped)(struct rt_export_request *) = hook->stopped;
- rtable *t = hook->tab;
-
- /* Drop the hook */
- RT_LOCKED(t, tab)
- {
- /* Unlink from the table */
- if (lfjour_of_recipient(&hook->recipient))
- lfjour_unregister(&hook->recipient);
-
- DBG("Export hook %p in table %s finished uc=%u\n", hook, tab->name, tab->use_count);
-
- /* Free the hook */
- rp_free(hook->pool);
- }
-
- /* Inform the stopper */
- CALL(stopped, req);
-
- /* Unlock the table */
- rt_unlock_table(t);
-}
-
static inline void
rt_set_import_state(struct rt_import_hook *hook, u8 state)
{
CALL(hook->req->log_state_change, hook->req, state);
}
-u8
-rt_set_export_state(struct rt_export_hook *hook, u32 expected_mask, u8 state)
-{
- hook->last_state_change = current_time();
- u8 old = atomic_exchange_explicit(&hook->export_state, state, memory_order_release);
- if (!((1 << old) & expected_mask))
- bug("Unexpected export state change from %s to %s, expected mask %02x",
- rt_export_state_name(old),
- rt_export_state_name(state),
- expected_mask
- );
-
- if (old != state)
- CALL(hook->req->log_state_change, hook->req, state);
-
- return old;
-}
-
void
rt_request_import(rtable *t, struct rt_import_request *req)
{
}
}
-static void rt_table_export_start_feed(struct rtable_private *tab, struct rt_export_hook *hook);
-static void
-rt_table_export_uncork(void *_hook)
-{
- ASSERT_DIE(birdloop_inside(&main_birdloop));
-
- struct rt_export_hook *hook = _hook;
- struct birdloop *loop = hook->req->list->loop;
-
- if (loop != &main_birdloop)
- birdloop_enter(loop);
-
- u8 state;
- RT_LOCKED(hook->tab, tab)
- switch (state = atomic_load_explicit(&hook->export_state, memory_order_relaxed))
- {
- case TES_HUNGRY:
- rt_table_export_start_feed(tab, hook);
- break;
- case TES_STOP:
- hook->event->hook = rt_table_export_done;
- rt_send_export_event(hook);
- break;
- default:
- bug("Uncorking a table export in a strange state: %u", state);
- }
-
- if (loop != &main_birdloop)
- birdloop_leave(loop);
-}
-
-static void
-rt_table_export_start_locked(struct rtable_private *tab, struct rt_export_request *req)
-{
- rt_lock_table(tab);
-
- pool *p = rp_new(req->pool, req->pool->domain, "Export hook");
- struct rt_export_hook *hook = req->hook = mb_allocz(p, sizeof(struct rt_export_hook));
- hook->req = req;
- hook->tab = RT_PUB(tab);
- hook->pool = p;
- atomic_store_explicit(&hook->export_state, TES_DOWN, memory_order_release);
- hook->event = ev_new_init(p, rt_table_export_uncork, hook);
-
- if (rt_cork_check(hook->event))
- rt_set_export_state(hook, BIT32_ALL(TES_DOWN), TES_HUNGRY);
- else
- rt_table_export_start_feed(tab, hook);
-}
-
-static void
-rt_table_export_start_feed(struct rtable_private *tab, struct rt_export_hook *hook)
-{
- struct rt_export_request *req = hook->req;
-
- /* stats zeroed by mb_allocz */
- switch (req->prefilter.mode)
- {
- case TE_ADDR_IN:
- case TE_ADDR_NONE:
- case TE_ADDR_TRIE:
- case TE_ADDR_HOOK:
- hook->feed_index = 0;
- hook->event->hook = rt_feed_by_fib;
- break;
-
- case TE_ADDR_EQUAL:
- hook->event->hook = rt_feed_equal;
- break;
-
- case TE_ADDR_FOR:
- hook->event->hook = rt_feed_for;
- break;
-
- default:
- bug("Requested an unknown export address mode");
- }
-
- DBG("New export hook %p req %p in table %s uc=%u\n", hook, req, tab->name, tab->use_count);
-
- hook->recipient = (struct lfjour_recipient) {
- .event = hook->event,
- .target = req->list,
- };
- lfjour_register(&tab->journal, &hook->recipient);
-
- SKIP_BACK_DECLARE(struct rt_pending_export, rpe, li, atomic_load_explicit(&hook->recipient.last, memory_order_relaxed));
- req_trace(req, D_STATES, "Export initialized, last export %p (%lu)", rpe, rpe ? rpe->seq : 0);
-
- bmap_init(&hook->seq_map, hook->pool, 16);
-
- /* Regular export */
- rt_set_export_state(hook, BIT32_ALL(TES_DOWN, TES_HUNGRY), TES_FEEDING);
- rt_send_export_event(hook);
-}
-
-#if 0
-static void
-rt_table_export_start(struct rt_exporter *re, struct rt_export_request *req)
-{
- RT_LOCKED(SKIP_BACK(rtable, priv.exporter, re), tab)
- rt_table_export_start_locked(tab, req);
-}
-#endif
-
-void rt_request_export(rtable *t, struct rt_export_request *req)
-{
- RT_LOCKED(t, tab)
- rt_table_export_start_locked(tab, req); /* Is locked inside */
-}
-
-static void
-rt_stop_export_locked(struct rtable_private *tab, struct rt_export_hook *hook)
-{
- struct rt_export_request *req = hook->req;
-
- /* Update export state, get old */
- switch (rt_set_export_state(hook, BIT32_ALL(TES_HUNGRY, TES_FEEDING, TES_READY), TES_STOP))
- {
- case TES_STOP:
- rt_trace(tab, D_EVENTS, "Stopping export hook %s already requested", req->name);
- return;
-
- case TES_HUNGRY:
- rt_trace(tab, D_EVENTS, "Stopping export hook %s must wait for uncorking", req->name);
- return;
-
- case TES_FEEDING:
- break;
- }
-
- rt_trace(tab, D_EVENTS, "Stopping export hook %s right now", req->name);
-
- /* Reset the event as the stopped event */
- ASSERT_DIE(birdloop_inside(req->list->loop));
- hook->event->hook = rt_table_export_done;
-
- /* Run the stopped event */
- rt_send_export_event(hook);
-}
-
-void
-rt_stop_export(struct rt_export_request *req, void (*stopped)(struct rt_export_request *))
-{
- ASSERT_DIE(birdloop_inside(req->list->loop));
- struct rt_export_hook *hook = req->hook;
- ASSERT_DIE(hook);
-
- RT_LOCKED(hook->tab, t)
- {
- /* Set the stopped callback */
- hook->stopped = stopped;
-
- /* Do the rest */
- rt_stop_export_locked(t, hook);
- }
-}
-
/**
* rt_refresh_begin - start a refresh cycle
ih->last_state_change, ih->import_state, ih->stopped);
}
+#if 0
+ /* FIXME: I'm very lazy to write this now */
WALK_TLIST(lfjour_recipient, r, &tab->journal.recipients)
{
SKIP_BACK_DECLARE(struct rt_export_hook, eh, recipient, r);
eh, eh->req, eh->refeed_pending, eh->last_state_change,
atomic_load_explicit(&eh->export_state, memory_order_relaxed));
}
+#endif
debug("\n");
}
rtable *dst;
u32 uc;
struct rt_export_request req;
+ event event;
};
#include "lib/tlists.h"
static void
-rt_flowspec_export_one(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *first)
+rt_flowspec_export(void *_link)
{
- SKIP_BACK_DECLARE(struct rt_flowspec_link, ln, req, req);
+ struct rt_flowspec_link *ln = _link;
rtable *dst_pub = ln->dst;
ASSUME(rt_is_flow(dst_pub));
- RT_LOCKED(dst_pub, dst)
- {
+ RT_EXPORT_WALK(&ln->req, u)
+ {
+ const net_addr *n = NULL;
+ switch (u->kind)
+ {
+ case RT_EXPORT_STOP:
+ bug("Main table export stopped");
+
+ case RT_EXPORT_FEED:
+ if (u->feed->count_routes)
+ n = u->feed->block[0].net;
+ break;
+
+ case RT_EXPORT_UPDATE:
+ {
+ /* Conflate following updates */
+ const rte *old = RTE_VALID_OR_NULL(u->update->old);
+ const rte *new = u->update->new;
+ for (
+ SKIP_BACK_DECLARE(struct rt_pending_export, rpe, it, u->update);
+ rpe = atomic_load_explicit(&rpe->next, memory_order_acquire) ;)
+ {
+ ASSERT_DIE(new == rpe->it.old);
+ new = rpe->it.new;
+ rt_export_processed(&ln->req, rpe->it.seq);
+ }
+
+ /* Ignore idempotent */
+ if ((old == new) || old && new && rte_same(old, new))
+ continue;
- /* No need to inspect it further if recalculation is already scheduled */
- if ((dst->nhu_state == NHU_SCHEDULED) || (dst->nhu_state == NHU_DIRTY)
- || !trie_match_net(dst->flowspec_trie, net))
- {
- rpe_mark_seen_all(req->hook, first, NULL, NULL);
- return;
- }
+ n = (new ?: old)->net;
+ }
+ break;
+ }
- /* This net may affect some flowspecs, check the actual change */
- const rte *o = RTE_VALID_OR_NULL(first->old_best);
- const rte *new_best = first->new_best;
+ if (!n)
+ continue;
- RPE_WALK(first, rpe, NULL)
- {
- rpe_mark_seen(req->hook, rpe);
- new_best = rpe->new_best;
- }
+ RT_LOCKED(dst_pub, dst)
+ {
+ /* No need to inspect it further if recalculation is already scheduled */
+ if ((dst->nhu_state == NHU_SCHEDULED) || (dst->nhu_state == NHU_DIRTY))
+ break;
+
+ /* Irrelevant prefix */
+ if (!trie_match_net(dst->flowspec_trie, n))
+ break;
- /* Yes, something has actually changed. Schedule the update. */
- if (o != RTE_VALID_OR_NULL(new_best))
- rt_schedule_nhu(dst);
+ /* Actually, schedule NHU */
+ rt_schedule_nhu(dst);
+ }
+ if (!task_still_in_limit())
+ return ev_send_loop(dst_pub->loop, &ln->event);
}
}
debug(" Flowspec link for table %s (%p)\n", ln->dst->name, req);
}
-static void
-rt_flowspec_log_state_change(struct rt_export_request *req, u8 state)
-{
- SKIP_BACK_DECLARE(struct rt_flowspec_link, ln, req, req);
- rt_trace(ln->dst, D_STATES, "Flowspec link from %s export state changed to %s",
- ln->src->name, rt_export_state_name(state));
-}
-
static struct rt_flowspec_link *
rt_flowspec_find_link(struct rtable_private *src, rtable *dst)
{
WALK_TLIST(rt_flowspec_link, ln, &src->flowspec_links)
- if (ln->dst == dst && ln->req.hook)
- switch (atomic_load_explicit(&ln->req.hook->export_state, memory_order_acquire))
+ if (ln->dst == dst)
+ switch (rt_export_get_state(&ln->req))
{
- case TES_HUNGRY:
case TES_FEEDING:
case TES_READY:
return ln;
+
+ default:
+ bug("Unexpected flowspec link state");
}
return NULL;
ln->dst = dst_pub;
ln->req = (struct rt_export_request) {
.name = mb_sprintf(p, "%s.flowspec.notifier", dst_pub->name),
- .list = birdloop_event_list(dst_pub->loop),
+ .r = {
+ .event = &ln->event,
+ .target = birdloop_event_list(dst_pub->loop),
+ },
.pool = p,
.trace_routes = src->config->debug,
- .dump_req = rt_flowspec_dump_req,
- .log_state_change = rt_flowspec_log_state_change,
- .export_one = rt_flowspec_export_one,
+ .dump = rt_flowspec_dump_req,
+ };
+ ln->event = (event) {
+ .hook = rt_flowspec_export,
+ .data = ln,
};
rt_flowspec_link_add_tail(&src->flowspec_links, ln);
- rt_table_export_start_locked(src, &ln->req);
+ rtex_export_subscribe(&src->export_best, &ln->req);
lock_dst = 1;
}
birdloop_leave(dst_pub->loop);
}
-static void
-rt_flowspec_link_stopped(struct rt_export_request *req)
-{
- SKIP_BACK_DECLARE(struct rt_flowspec_link, ln, req, req);
- rtable *dst = ln->dst;
-
- mb_free(ln);
- rt_unlock_table(dst);
-}
-
void
rt_flowspec_unlink(rtable *src, rtable *dst)
{
birdloop_enter(dst->loop);
+ _Bool unlock_dst = 0;
+
struct rt_flowspec_link *ln;
RT_LOCKED(src, t)
{
if (!--ln->uc)
{
rt_flowspec_link_rem_node(&t->flowspec_links, ln);
- ln->req.hook->stopped = rt_flowspec_link_stopped;
- rt_stop_export_locked(t, ln->req.hook);
+ rtex_export_unsubscribe(&ln->req);
+ ev_postpone(&ln->event);
+ mb_free(ln);
+ unlock_dst = 1;
}
}
+ if (unlock_dst)
+ rt_unlock_table(dst);
+
birdloop_leave(dst->loop);
}
}
static void
-rt_res_dump(resource *_r, unsigned indent)
+rt_res_dump(resource *_r, unsigned indent UNUSED)
{
SKIP_BACK_DECLARE(struct rtable_private, r, r, _r);
debug("name \"%s\", addr_type=%s, rt_count=%u, use_count=%d\n",
r->name, net_label[r->addr_type], r->rt_count, r->use_count);
+#if 0
+ /* TODO: rethink this completely */
/* TODO: move this to lfjour */
char x[32];
bsprintf(x, "%%%dspending export %%p\n", indent + 2);
WALK_TLIST(lfjour_block, n, &r->journal.pending)
debug(x, "", n);
+#endif
}
static struct resclass rt_class = {
t->prune_event = ev_new_init(p, rt_prune_table, t);
t->last_rt_change = t->gc_time = current_time();
- t->journal.loop = t->loop;
- t->journal.domain = t->lock.rtable;
- t->journal.item_size = sizeof(struct rt_pending_export);
- t->journal.item_done = rt_cleanup_export;
- t->journal.cleanup_done = rt_cleanup_done;
- lfjour_init(&t->journal, &cf->export_settle);
+ t->export_best = (struct rt_exporter) {
+ .journal = {
+ .loop = t->loop,
+ .domain = t->lock.rtable,
+ .item_size = sizeof(struct rt_pending_export),
+ .item_done = rt_cleanup_export_best,
+ },
+ .name = mb_sprintf(p, "%s.export-best", t->name),
+ .trace_routes = t->debug,
+ .cleanup_done = rt_cleanup_done_best,
+ .feed_net = rt_feed_net_best,
+ .feed_next = rt_feed_next_best,
+ };
+
+ rt_exporter_init(&t->export_best, &cf->export_settle);
+
+ t->export_all = (struct rt_exporter) {
+ .journal = {
+ .loop = t->loop,
+ .domain = t->lock.rtable,
+ .item_size = sizeof(struct rt_pending_export),
+ .item_done = rt_cleanup_export_all,
+ },
+ .name = mb_sprintf(p, "%s.export-all", t->name),
+ .trace_routes = t->debug,
+ .cleanup_done = rt_cleanup_done_all,
+ .feed_net = rt_feed_net_all,
+ .feed_next = rt_feed_next_all,
+ };
+
+ rt_exporter_init(&t->export_all, &cf->export_settle);
+
+ t->best_req = (struct rt_export_request) {
+ .name = mb_sprintf(p, "%s.best-cleanup", t->name),
+ .pool = p,
+ .trace_routes = t->debug,
+ .dump = rt_dump_best_req,
+ };
+
+ /* Subscribe and pre-feed the best_req */
+ rtex_export_subscribe(&t->export_all, &t->best_req);
+ RT_EXPORT_WALK(&t->best_req, u)
+ ASSERT_DIE(u->kind == RT_EXPORT_FEED);
t->cork_threshold = cf->cork_threshold;
}
rt_trace(tab, D_EVENTS, "Prune done");
- lfjour_announce_now(&tab->journal);
+ lfjour_announce_now(&tab->export_all.journal);
+ lfjour_announce_now(&tab->export_best.journal);
/* state change 2->0, 3->1 */
if (tab->prune_state &= 1)
if (ih->import_state == TIS_FLUSHING)
{
DBG("flushing %s %s rr %u", ih->req->name, tab->name, tab->rr_counter);
- ih->flush_seq = tab->journal.next_seq;
+ ih->flush_seq = tab->export_all.journal.next_seq;
rt_set_import_state(ih, TIS_WAITING);
tab->rr_counter--;
tab->wait_counter++;
- lfjour_schedule_cleanup(&tab->journal);
+ lfjour_schedule_cleanup(&tab->export_best.journal);
+ lfjour_schedule_cleanup(&tab->export_all.journal);
}
else if (ih->stale_pruning != ih->stale_pruned)
{
continue;
}
- ea_set_attr_u32(to, &ea_gen_igp_metric, 0, he->igp_metric);
+ /* Jump-away block for applying the actual attributes */
+ do {
+ ea_set_attr_u32(to, &ea_gen_igp_metric, 0, he->igp_metric);
- if (!he->src)
- {
- ea_set_dest(to, 0, RTD_UNREACHABLE);
- break;
- }
+ if (!he->src)
+ {
+ ea_set_dest(to, 0, RTD_UNREACHABLE);
+ break;
+ }
- eattr *he_nh_ea = ea_find(he->src, &ea_gen_nexthop);
- ASSERT_DIE(he_nh_ea);
+ eattr *he_nh_ea = ea_find(he->src, &ea_gen_nexthop);
+ ASSERT_DIE(he_nh_ea);
- struct nexthop_adata *nhad = (struct nexthop_adata *) he_nh_ea->u.ptr;
- int idest = nhea_dest(he_nh_ea);
+ struct nexthop_adata *nhad = (struct nexthop_adata *) he_nh_ea->u.ptr;
+ int idest = nhea_dest(he_nh_ea);
- if ((idest != RTD_UNICAST) ||
- !lnum && he->nexthop_linkable)
- {
- /* Just link the nexthop chain, no label append happens. */
- ea_copy_attr(to, he->src, &ea_gen_nexthop);
- break;
- }
+ if ((idest != RTD_UNICAST) ||
+ !lnum && he->nexthop_linkable)
+ {
+ /* Just link the nexthop chain, no label append happens. */
+ ea_copy_attr(to, he->src, &ea_gen_nexthop);
+ break;
+ }
- uint total_size = OFFSETOF(struct nexthop_adata, nh);
+ uint total_size = OFFSETOF(struct nexthop_adata, nh);
- NEXTHOP_WALK(nh, nhad)
- {
- if (nh->labels + lnum > MPLS_MAX_LABEL_STACK)
+ NEXTHOP_WALK(nh, nhad)
{
- log(L_WARN "Sum of label stack sizes %d + %d = %d exceedes allowed maximum (%d)",
- nh->labels, lnum, nh->labels + lnum, MPLS_MAX_LABEL_STACK);
- continue;
+ if (nh->labels + lnum > MPLS_MAX_LABEL_STACK)
+ {
+ log(L_WARN "Sum of label stack sizes %d + %d = %d exceedes allowed maximum (%d)",
+ nh->labels, lnum, nh->labels + lnum, MPLS_MAX_LABEL_STACK);
+ continue;
+ }
+
+ total_size += NEXTHOP_SIZE_CNT(nh->labels + lnum);
}
- total_size += NEXTHOP_SIZE_CNT(nh->labels + lnum);
- }
+ if (total_size == OFFSETOF(struct nexthop_adata, nh))
+ {
+ log(L_WARN "No valid nexthop remaining, setting route unreachable");
- if (total_size == OFFSETOF(struct nexthop_adata, nh))
- {
- log(L_WARN "No valid nexthop remaining, setting route unreachable");
+ struct nexthop_adata nha = {
+ .ad.length = NEXTHOP_DEST_SIZE,
+ .dest = RTD_UNREACHABLE,
+ };
- struct nexthop_adata nha = {
- .ad.length = NEXTHOP_DEST_SIZE,
- .dest = RTD_UNREACHABLE,
- };
+ ea_set_attr_data(to, &ea_gen_nexthop, 0, &nha.ad.data, nha.ad.length);
+ break;
+ }
- ea_set_attr_data(to, &ea_gen_nexthop, 0, &nha.ad.data, nha.ad.length);
- break;
- }
+ struct nexthop_adata *new = (struct nexthop_adata *) tmp_alloc_adata(total_size);
+ struct nexthop *dest = &new->nh;
- struct nexthop_adata *new = (struct nexthop_adata *) tmp_alloc_adata(total_size);
- struct nexthop *dest = &new->nh;
+ NEXTHOP_WALK(nh, nhad)
+ {
+ if (nh->labels + lnum > MPLS_MAX_LABEL_STACK)
+ continue;
- NEXTHOP_WALK(nh, nhad)
- {
- if (nh->labels + lnum > MPLS_MAX_LABEL_STACK)
- continue;
+ memcpy(dest, nh, NEXTHOP_SIZE(nh));
+ if (lnum)
+ {
+ memcpy(&(dest->label[dest->labels]), labels, lnum * sizeof labels[0]);
+ dest->labels += lnum;
+ }
- memcpy(dest, nh, NEXTHOP_SIZE(nh));
- if (lnum)
- {
- memcpy(&(dest->label[dest->labels]), labels, lnum * sizeof labels[0]);
- dest->labels += lnum;
- }
+ if (ipa_nonzero(nh->gw))
+ /* Router nexthop */
+ dest->flags = (dest->flags & RNF_ONLINK);
+ else if (!(nh->iface->flags & IF_MULTIACCESS) || (nh->iface->flags & IF_LOOPBACK))
+ dest->gw = IPA_NONE; /* PtP link - no need for nexthop */
+ else if (ipa_nonzero(he->link))
+ dest->gw = he->link; /* Device nexthop with link-local address known */
+ else
+ dest->gw = he->addr; /* Device nexthop with link-local address unknown */
- if (ipa_nonzero(nh->gw))
- /* Router nexthop */
- dest->flags = (dest->flags & RNF_ONLINK);
- else if (!(nh->iface->flags & IF_MULTIACCESS) || (nh->iface->flags & IF_LOOPBACK))
- dest->gw = IPA_NONE; /* PtP link - no need for nexthop */
- else if (ipa_nonzero(he->link))
- dest->gw = he->link; /* Device nexthop with link-local address known */
- else
- dest->gw = he->addr; /* Device nexthop with link-local address unknown */
+ dest = NEXTHOP_NEXT(dest);
+ }
- dest = NEXTHOP_NEXT(dest);
+ /* Fix final length */
+ new->ad.length = (void *) dest - (void *) new->ad.data;
+ ea_set_attr(to, EA_LITERAL_DIRECT_ADATA(
+ &ea_gen_nexthop, 0, &new->ad));
}
-
- /* Fix final length */
- new->ad.length = (void *) dest - (void *) new->ad.data;
- ea_set_attr(to, EA_LITERAL_DIRECT_ADATA(
- &ea_gen_nexthop, 0, &new->ad));
+ while (0);
/* Has the HE version changed? */
u32 end_version = atomic_load_explicit(&he->version, memory_order_acquire);
}
ASSERT_DIE(~nbpos);
- const char *best_indicator[2][2] = {
- { "autoupdated", "autoupdated [-best]" },
- { "autoupdated [+best]", "autoupdated [best]" }
- };
-
- /* Best both updated and promoted: announce it first */
- if (nbpos && updates[nbpos].new_stored)
- {
- rt_rte_trace_in(D_ROUTES, updates[nbpos].new.sender->req, &updates[nbpos].new,
- best_indicator[1][0]);
- rte_announce(tab, ni, n,
- &updates[nbpos].new_stored->rte, &updates[nbpos].old->rte,
- &new_best->rte, &old_best->rte);
- }
- else
- nbpos = 0;
+ struct rt_pending_export *best_rpe =
+ (new_best != old_best) ?
+ rte_announce_to(&tab->export_best, &n->best, &new_best->rte, &old_best->rte)
+ : NULL;
uint total = 0;
+ u64 last_seq = 0;
/* Announce the changes */
for (uint i=0; i<count; i++)
if (!updates[i].new_stored)
continue;
- /* Already announced */
- if (nbpos && (i == nbpos))
- continue;
+ struct rt_pending_export *this_rpe =
+ rte_announce_to(&tab->export_all, &n->all,
+ &updates[i].new_stored->rte, &updates[i].old->rte);
+ ASSERT_DIE(this_rpe);
_Bool nb = (new_best->rte.src == updates[i].new.src), ob = (i == 0);
- rt_rte_trace_in(D_ROUTES, updates[i].new.sender->req, &updates[i].new, best_indicator[nb][ob]);
- rte_announce(tab, ni, n,
- &updates[i].new_stored->rte, &updates[i].old->rte,
- &new_best->rte, (!nbpos && !i) ? &old_best->rte : &new_best->rte);
+ char info[96];
+ char best_indicator[2][2] = { { ' ', '+' }, { '-', '=' } };
+ bsnprintf(info, sizeof info, "autoupdated [%cbest]", best_indicator[ob][nb]);
+
+ rt_rte_trace_in(D_ROUTES, updates[i].new.sender->req, &updates[i].new, info);
+
+ /* Double announcement of this specific route */
+ if (ob && best_rpe)
+ {
+ ASSERT_DIE(best_rpe->it.old == &updates[i].old->rte);
+ ASSERT_DIE(!best_rpe->seq_all);
+ best_rpe->seq_all = this_rpe->it.seq;
+ }
+ else
+ last_seq = this_rpe->it.seq;
total++;
}
+ if (best_rpe && !best_rpe->seq_all)
+ {
+ ASSERT_DIE(!updates[0].new_stored);
+ best_rpe->seq_all = last_seq;
+ }
+
/* Now we can finally release the changes back into the table */
atomic_store_explicit(&n->routes, new_best, memory_order_release);
rt_trace(tab, D_STATES, "Next hop updater corked");
if (tab->nhu_state & NHU_RUNNING)
- lfjour_announce_now(&tab->journal);
+ {
+ lfjour_announce_now(&tab->export_best.journal);
+ lfjour_announce_now(&tab->export_all.journal);
+ }
tab->nhu_corked = tab->nhu_state;
tab->nhu_state = 0;
rt_trace(r, D_STATES, "Unlocked at %s:%d", file, line);
if (!--r->use_count && r->deleted)
/* Stop the service thread to finish this up */
- ev_send(&global_event_list, ev_new_init(r->rp, rt_shutdown, r));
+ ev_send_loop(r->loop, ev_new_init(r->rp, rt_shutdown, r));
}
static void
rt_shutdown(void *tab_)
{
- struct rtable_private *r = tab_;
- birdloop_stop(r->loop, rt_delete, r);
+ rtable *t = tab_;
+ RT_LOCK(t, tab);
+
+ rtex_export_unsubscribe(&tab->best_req);
+
+ rt_exporter_shutdown(&tab->export_best, NULL);
+ rt_exporter_shutdown(&tab->export_all, NULL);
+
+ birdloop_stop_self(t->loop, rt_delete, t);
}
static void
struct rtable_private *tab = RT_LOCK_SIMPLE((rtable *) tab_);
struct config *conf = tab->deleted;
DOMAIN(rtable) dom = tab->lock;
-
RT_UNLOCK_SIMPLE(RT_PUB(tab));
/* Everything is freed by freeing the loop */
if (!tab->cork_active)
return;
- if (tab->deleted || (lfjour_pending_items(&tab->journal) < tab->cork_threshold.low))
+ if (tab->deleted ||
+ (lfjour_pending_items(&tab->export_best.journal) < tab->cork_threshold.low)
+ && (lfjour_pending_items(&tab->export_all.journal) < tab->cork_threshold.low))
{
tab->cork_active = 0;
rt_cork_release();
static void
rt_check_cork_high(struct rtable_private *tab)
{
- if (!tab->deleted && !tab->cork_active && (lfjour_pending_items(&tab->journal) >= tab->cork_threshold.high))
+ if (!tab->deleted && !tab->cork_active && (
+ (lfjour_pending_items(&tab->export_best.journal) >= tab->cork_threshold.low)
+ || (lfjour_pending_items(&tab->export_all.journal) >= tab->cork_threshold.low)))
{
tab->cork_active = 1;
rt_cork_acquire();
- lfjour_schedule_cleanup(&tab->journal);
+ lfjour_schedule_cleanup(&tab->export_best.journal);
+ lfjour_schedule_cleanup(&tab->export_all.journal);
// rt_export_used(&tab->exporter, tab->name, "corked");
rt_trace(tab, D_STATES, "Corked");
tab->name = new->name;
tab->config = new;
tab->debug = new->debug;
+ tab->export_all.trace_routes = tab->export_best.trace_routes = new->debug;
if (tab->hostcache)
tab->hostcache->req.trace_routes = new->debug;
if (ev_get_list(tab->hcu_event) == &rt_cork.queue)
ev_postpone(tab->hcu_event);
- rt_stop_export_locked(tab, tab->hostcache->req.hook);
+ rtex_export_unsubscribe(&tab->hostcache->req);
}
+
rt_unlock_table(tab);
}
birdloop_leave(o->table->loop);
DBG("\tdone\n");
}
-static void
-rt_feed_done(struct rt_export_hook *c)
-{
- c->event->hook = rt_export_hook;
-
- rt_set_export_state(c, BIT32_ALL(TES_FEEDING), TES_READY);
-
- rt_send_export_event(c);
-}
-
-static enum {
- RT_FEED_OK = 0,
- RT_FEED_EMPTY = 1,
- RT_FEED_OVERFLOW = 2,
- RT_FEED_REJECTED = 3,
-}
-rt_feed_index(struct rt_export_hook *h, uint index)
-{
- struct rt_export_request *req = h->req;
- const net_addr *a;
- uint cnt;
- const rte **feed;
- struct rt_pending_export *first, *last;
- {
- RT_READ(h->tab, tr);
-
- /* Get the route block from the table */
- net *routes = atomic_load_explicit(&tr->t->routes, memory_order_acquire);
- u32 bs = atomic_load_explicit(&tr->t->routes_block_size, memory_order_acquire);
-
- /* Nothing to actually feed */
- if (index >= bs)
- return RT_FEED_OVERFLOW;
-
- /* Validate the network structure */
- net *n = &routes[index];
- struct rte_storage *s = NET_READ_BEST_ROUTE(tr, n);
- last = atomic_load_explicit(&n->last, memory_order_acquire);
- if (s)
- a = s->rte.net;
- else if (!last)
- return RT_FEED_EMPTY;
- else if (last->old)
- a = last->old->net;
- else
- RT_READ_RETRY(tr);
-
- /* Prefilter based on net_addr */
- if (!rt_prefilter_net(&req->prefilter, a))
- {
- req_trace(req, D_ROUTES, "Feeding %N rejected by prefilter", a);
- return RT_FEED_REJECTED;
- }
-
- /* Obtain the actual feed */
- cnt = rte_feed_count(tr, n);
- if (cnt)
- {
- feed = alloca(cnt * sizeof *feed);
- rte_feed_obtain(tr, n, feed, cnt);
- }
-
- /* Check journal pointers; retry if changed */
- first = atomic_load_explicit(&n->first, memory_order_acquire);
- if (last != atomic_load_explicit(&n->last, memory_order_acquire))
- RT_READ_RETRY(tr);
- }
-
- if (cnt)
- {
- if (req->export_bulk)
- {
- /* Call export_bulk preferably */
- req->export_bulk(req, a, first, last, feed, cnt);
- return RT_FEED_OK;
- }
- else
- {
- /* This by definition exports best only, yes, it's stupid, i know */
- struct rt_pending_export rpe = { .new = feed[0], .new_best = feed[0], };
- req->export_one(req, a, &rpe);
- }
- }
-
- /* Unless export_bulk was called, the exporters didn't have enough
- * information about seen journal items */
- if (req->mark_seen)
- RPE_WALK(first, rpe, NULL)
- {
- req->mark_seen(req, rpe);
- if (rpe == last) break;
- }
- else
- rpe_mark_seen_all(h, first, NULL, NULL);
-
- return RT_FEED_OK;
-}
-
-/**
- * rt_feed_by_fib - advertise all routes to a channel by walking a fib
- * @c: channel to be fed
- *
- * This function performs one pass of advertisement of routes to a channel that
- * is in the ES_FEEDING state. It is called by the protocol code as long as it
- * has something to do. (We avoid transferring all the routes in single pass in
- * order not to monopolize CPU time.)
- */
-static void
-rt_feed_by_fib(void *data)
-{
- struct rt_export_hook *c = data;
-
- ASSERT(atomic_load_explicit(&c->export_state, memory_order_relaxed) == TES_FEEDING);
-
- uint count = 0;
-
- for (; (&main_birdloop == this_birdloop) ?
- (count < 4096) :
- task_still_in_limit();
- c->feed_index++)
- {
- switch (rt_feed_index(c, c->feed_index))
- {
- case RT_FEED_REJECTED:
- case RT_FEED_EMPTY:
- break;
-
- case RT_FEED_OK:
- count++;
- break;
-
- case RT_FEED_OVERFLOW:
- rt_feed_done(c);
- return;
- }
- }
-
- rt_send_export_event(c);
-}
-
-static void
-rt_feed_equal(void *data)
-{
- struct rt_export_hook *c = data;
-
- ASSERT_DIE(atomic_load_explicit(&c->export_state, memory_order_relaxed) == TES_FEEDING);
- ASSERT_DIE(c->req->prefilter.mode == TE_ADDR_EQUAL);
-
- struct netindex *ni = net_find_index(c->tab->netindex, c->req->prefilter.addr);
- if (ni)
- rt_feed_index(c, ni->index);
-
- rt_feed_done(c);
-}
-
-static void
-rt_feed_for(void *data)
-{
- struct rt_export_hook *c = data;
-
- ASSERT_DIE(atomic_load_explicit(&c->export_state, memory_order_relaxed) == TES_FEEDING);
- ASSERT_DIE(c->req->prefilter.mode == TE_ADDR_FOR);
-
- u32 index;
- {
- RT_READ(c->tab, tr);
- net *routes = atomic_load_explicit(&tr->t->routes, memory_order_acquire);
- net *n = net_route(tr, c->req->prefilter.addr);
- if (!n)
- {
- rt_feed_done(c);
- return;
- }
- index = (n - routes);
- }
-
- rt_feed_index(c, index);
- rt_feed_done(c);
-}
-
-
-/*
- * Import table
- */
-
-void channel_reload_export_bulk(struct rt_export_request *req, const net_addr *net,
- struct rt_pending_export *first, struct rt_pending_export *last,
- const rte **feed, uint count)
-{
- SKIP_BACK_DECLARE(struct channel, c, reload_req, req);
-
- for (uint i=0; i<count; i++)
- if (feed[i]->sender == c->in_req.hook)
- {
- /* Strip the table-specific information */
- rte new = rte_init_from(feed[i]);
-
- /* Strip the later attribute layers */
- new.attrs = ea_strip_to(new.attrs, BIT32_ALL(EALS_PREIMPORT));
-
- /* And reload the route */
- rte_update(c, net, &new, new.src);
- }
-
- rpe_mark_seen_all(req->hook, first, last, NULL);
-}
-
/*
* Hostcache
}
static void
-hc_notify_log_state_change(struct rt_export_request *req, u8 state)
-{
- SKIP_BACK_DECLARE(struct hostcache, hc, req, req);
- rt_trace(hc->tab, D_STATES, "HCU Export state changed to %s", rt_export_state_name(state));
-}
-
-static void
-hc_notify_export_one(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *first)
+hc_notify_export(void *_hc)
{
- SKIP_BACK_DECLARE(struct hostcache, hc, req, req);
+ struct hostcache *hc = _hc;
- RT_LOCKED(hc->tab, tab)
- if (ev_active(tab->hcu_event) || !trie_match_net(hc->trie, net))
+ RT_EXPORT_WALK(&hc->req, u)
+ {
+ const net_addr *n = NULL;
+ switch (u->kind)
{
- if (req->trace_routes & D_ROUTES)
- log(L_TRACE "%s < boring %N (%u)",
- req->name, net, NET_TO_INDEX(net)->index);
+ case RT_EXPORT_STOP:
+ bug("Main table export stopped");
+ break;
+
+ case RT_EXPORT_FEED:
+ if (u->feed->count_routes)
+ n = u->feed->block[0].net;
+ break;
+
+ case RT_EXPORT_UPDATE:
+ {
+ /* Conflate following updates */
+ const rte *old = RTE_VALID_OR_NULL(u->update->old);
+ const rte *new = u->update->new;
+ for (
+ SKIP_BACK_DECLARE(struct rt_pending_export, rpe, it, u->update);
+ rpe = atomic_load_explicit(&rpe->next, memory_order_acquire) ;)
+ {
+ ASSERT_DIE(new == rpe->it.old);
+ new = rpe->it.new;
+ rt_export_processed(&hc->req, rpe->it.seq);
+ }
+
+ /* Ignore idempotent */
+ if ((old == new) || old && new && rte_same(old, new))
+ continue;
+ n = (new ?: old)->net;
+ }
+ break;
+ }
+
+ if (!n)
+ continue;
+
+ RT_LOCK(hc->tab, tab);
+ if (ev_active(tab->hcu_event))
+ continue;
+
+ if (!trie_match_net(hc->trie, n))
+ {
/* No interest in this update, mark seen only */
- rpe_mark_seen_all(req->hook, first, NULL, NULL);
+ if (hc->req.trace_routes & D_ROUTES)
+ log(L_TRACE "%s < boring %N (%u)",
+ hc->req.name, n, NET_TO_INDEX(n)->index);
}
else
{
- /* This net may affect some hostentries, check the actual change */
- const rte *o = RTE_VALID_OR_NULL(first->old_best);
- const rte *new_best = first->new_best;
-
- RPE_WALK(first, rpe, NULL)
- {
- rpe_mark_seen(req->hook, rpe);
- new_best = rpe->new_best;
- }
-
- if (req->trace_routes & D_ROUTES)
+ if (hc->req.trace_routes & D_ROUTES)
log(L_TRACE "%s < checking %N (%u)",
- req->name, net, NET_TO_INDEX(net)->index);
+ hc->req.name, n, NET_TO_INDEX(n)->index);
- /* Yes, something has actually changed. Do the hostcache update. */
- if ((o != RTE_VALID_OR_NULL(new_best))
- && (atomic_load_explicit(&req->hook->export_state, memory_order_acquire) == TES_READY)
+ if ((rt_export_get_state(&hc->req) == TES_READY)
&& !ev_active(tab->hcu_event))
{
- if (req->trace_routes & D_EVENTS)
- log(L_TRACE "%s requesting HCU", req->name);
+ if (hc->req.trace_routes & D_EVENTS)
+ log(L_TRACE "%s requesting HCU", hc->req.name);
ev_send_loop(tab->loop, tab->hcu_event);
}
}
+
+ if (!task_still_in_limit())
+ return ev_send(hc->req.r.target, hc->req.r.event);
+ }
}
he->igp_metric = rt_get_igp_metric(&e->rte);
if ((old_src != he->src) && (tab->debug & D_ROUTES))
- if (ipa_zero(he->link))
+ if (ipa_zero(he->link) || ipa_equal(he->link, he->addr))
log(L_TRACE "%s: Hostentry %p for %I in %s resolved via %N (%uG)",
tab->name, he, he->addr, he->tab->name, e->rte.net, e->rte.src->global_id);
else
tab->name, he, he->addr, he->link, he->tab->name, e->rte.net, e->rte.src->global_id);
}
else if (old_src && (tab->debug & D_ROUTES))
- if (ipa_zero(he->link))
+ if (ipa_zero(he->link) || ipa_equal(he->link, he->addr))
log(L_TRACE "%s: Hostentry %p for %I in %s not resolved",
tab->name, he, he->addr, he->tab->name);
else
{
hc->req = (struct rt_export_request) {
.name = mb_sprintf(tab->rp, "%s.hcu.notifier", tab->name),
- .list = birdloop_event_list(tab->loop),
+ .r = {
+ .event = &hc->source_event,
+ .target = birdloop_event_list(tab->loop),
+ },
.pool = birdloop_pool(tab->loop),
.trace_routes = tab->config->debug,
- .dump_req = hc_notify_dump_req,
- .log_state_change = hc_notify_log_state_change,
- .export_one = hc_notify_export_one,
+ .dump = hc_notify_dump_req,
+ };
+ hc->source_event = (event) {
+ .hook = hc_notify_export,
+ .data = hc,
};
- rt_table_export_start_locked(tab, &hc->req);
+ rtex_export_subscribe(&tab->export_best, &hc->req);
}
/* Shutdown shortcut */
- if (!hc->req.hook)
+ if (rt_export_get_state(&hc->req) == TES_DOWN)
return;
if (rt_cork_check(tab->hcu_uncork_event))
rte *
krt_export_net(struct channel *c, const net_addr *a, linpool *lp)
{
- uint count;
- const rte **feed;
- struct rte_storage *best;
+ if (c->ra_mode == RA_MERGED)
{
- RT_READ(c->table, tr);
+ struct rt_export_feed *feed = rt_net_feed(c->table, a, NULL);
+ if (!feed->count_routes)
+ return NULL;
- struct netindex *i = net_find_index(tr->t->netindex, a);
- if (!i) return NULL;
-
- net *net = net_find(tr, i);
- if (!net) return NULL;
-
- best = NET_READ_BEST_ROUTE(tr, net);
- if (!best) return NULL;
- if (!bmap_test(&c->export_map, best->rte.id)) return NULL;
-
- if (c->ra_mode == RA_MERGED)
- {
- count = rte_feed_count(tr, net);
- if (!count)
- return NULL;
+ if (!bmap_test(&c->export_accepted_map, feed->block[0].id))
+ return NULL;
- feed = alloca(count * sizeof(rte *));
- rte_feed_obtain(tr, net, feed, count);
+ return rt_export_merged(c, feed, lp, 1);
}
- }
-
- if (c->ra_mode == RA_MERGED)
- return rt_export_merged(c, a, feed, count, lp, 1);
- const struct filter *filter = c->out_filter;
-
- static _Thread_local rte rt;
- rt = best->rte;
+ static _Thread_local rte best;
+ best = rt_net_best(c->table, a);
- if (!rte_is_valid(&rt))
+ if (!best.attrs)
return NULL;
- if (filter == FILTER_REJECT)
+ if (c->out_filter == FILTER_REJECT)
return NULL;
/* We could run krt_preexport() here, but it is already handled by krt_is_installed() */
- if (filter == FILTER_ACCEPT)
- return &rt;
+ if (c->out_filter == FILTER_ACCEPT)
+ return &best;
- if (f_run(filter, &rt, FF_SILENT) > F_ACCEPT)
+ if (f_run(c->out_filter, &best, FF_SILENT) > F_ACCEPT)
return NULL;
- return &rt;
+ return &best;
}
-
-
-
-/*
- * Documentation for functions declared inline in route.h
- */
-#if 0
-
-/**
- * net_find - find a network entry
- * @tab: a routing table
- * @addr: address of the network
- *
- * net_find() looks up the given network in routing table @tab and
- * returns a pointer to its &net entry or %NULL if no such network
- * exists.
- */
-static inline net *net_find(rtable *tab, net_addr *addr)
-{ DUMMY; }
-
-/**
- * rte_cow - copy a route for writing
- * @r: a route entry to be copied
- *
- * rte_cow() takes a &rte and prepares it for modification. The exact action
- * taken depends on the flags of the &rte -- if it's a temporary entry, it's
- * just returned unchanged, else a new temporary entry with the same contents
- * is created.
- *
- * The primary use of this function is inside the filter machinery -- when
- * a filter wants to modify &rte contents (to change the preference or to
- * attach another set of attributes), it must ensure that the &rte is not
- * shared with anyone else (and especially that it isn't stored in any routing
- * table).
- *
- * Result: a pointer to the new writable &rte.
- */
-static inline rte * rte_cow(rte *r)
-{ DUMMY; }
-
-#endif
}
}
-static void
-babel_feed_begin(struct channel *C)
-{
- if (!C->refeeding || C->refeed_req.hook)
- return;
-
- struct babel_proto *p = (struct babel_proto *) C->proto;
- struct fib *rtable = (C->net_type == NET_IP4) ? &p->ip4_rtable : &p->ip6_rtable;
-
- FIB_WALK(rtable, struct babel_entry, e)
- if (e->valid == BABEL_ENTRY_VALID)
- e->valid = BABEL_ENTRY_REFEEDING;
- FIB_WALK_END;
-}
-
-static void
-babel_feed_end(struct channel *C)
-{
- if (!C->refeeding || C->refeed_req.hook)
- return;
-
- struct babel_proto *p = (struct babel_proto *) C->proto;
- struct fib *rtable = (C->net_type == NET_IP4) ? &p->ip4_rtable : &p->ip6_rtable;
- int changed = 0;
-
- FIB_WALK(rtable, struct babel_entry, e)
- if (e->valid == BABEL_ENTRY_REFEEDING)
- {
- babel_entry_invalidate(e);
- changed++;
- }
- FIB_WALK_END;
-
- if (changed)
- babel_trigger_update(p);
-}
-
-
static int
babel_rte_better(const rte *new, const rte *old)
{
P->iface_sub.if_notify = babel_if_notify;
P->rt_notify = babel_rt_notify;
P->preexport = babel_preexport;
- P->feed_begin = babel_feed_begin;
- P->feed_end = babel_feed_end;
P->sources.class = &babel_rte_owner_class;
#define BABEL_ENTRY_DUMMY 0 /* No outgoing route */
#define BABEL_ENTRY_VALID 1 /* Valid outgoing route */
#define BABEL_ENTRY_STALE 2 /* Stale outgoing route, waiting for GC */
-#define BABEL_ENTRY_REFEEDING 3 /* Route valid until feed ends */
/*
}
void
-bgp_rte_modify_stale(struct rt_export_request *req, const net_addr *n,
- struct rt_pending_export *first, struct rt_pending_export *last,
- const rte **feed, uint count)
+bgp_rte_modify_stale(void *_bc)
{
- SKIP_BACK_DECLARE(struct bgp_channel, c, stale_feed, req);
+ struct bgp_channel *c = _bc;
struct rt_import_hook *irh = c->c.in_req.hook;
- /* Find our routes among others */
- for (uint i=0; i<count; i++)
- {
- const rte *r = feed[i];
-
- if (
- !rte_is_valid(r) || /* Not a valid route */
- (r->sender != irh) || /* Not our route */
- (r->stale_cycle == irh->stale_set)) /* A new route, do not mark as stale */
- continue;
+ RT_FEED_WALK(&c->stale_feed, f) TMP_SAVED
+ if (task_still_in_limit())
+ {
+ for (uint i = 0; i < f->count_routes; i++)
+ {
+ rte *r = &f->block[i];
+ if ((r->sender != irh) || /* Not our route */
+ (r->stale_cycle == irh->stale_set)) /* A new route, do not mark as stale */
+ continue;
- eattr *ea = ea_find(r->attrs, BGP_EA_ID(BA_COMMUNITY));
- const struct adata *ad = ea ? ea->u.ptr : NULL;
- uint flags = ea ? ea->flags : BAF_PARTIAL;
+ /* Don't reinstate obsolete routes */
+ if (r->flags & REF_OBSOLETE)
+ break;
- /* LLGR not allowed, withdraw the route */
- if (ad && int_set_contains(ad, BGP_COMM_NO_LLGR))
+ eattr *ea = ea_find(r->attrs, BGP_EA_ID(BA_COMMUNITY));
+ const struct adata *ad = ea ? ea->u.ptr : NULL;
+ uint flags = ea ? ea->flags : BAF_PARTIAL;
+
+ /* LLGR not allowed, withdraw the route */
+ if (ad && int_set_contains(ad, BGP_COMM_NO_LLGR))
+ {
+ rte_import(&c->c.in_req, r->net, NULL, r->src);
+ continue;
+ }
+
+ /* Route already marked as LLGR, do nothing */
+ if (ad && int_set_contains(ad, BGP_COMM_LLGR_STALE))
+ continue;
+
+ /* Mark the route as LLGR */
+ bgp_set_attr_ptr(&r->attrs, BA_COMMUNITY, flags, int_set_add(tmp_linpool, ad, BGP_COMM_LLGR_STALE));
+
+ /* We need to update the route but keep it stale. */
+ ASSERT_DIE(irh->stale_set == irh->stale_valid + 1);
+ irh->stale_set--;
+ rte_import(&c->c.in_req, r->net, r, r->src);
+ irh->stale_set++;
+ }
+ }
+ else
{
- rte_import(&c->c.in_req, n, NULL, r->src);
- continue;
+ proto_send_event(c->c.proto, &c->stale_event);
+ return;
}
- /* Route already marked as LLGR, do nothing */
- if (ad && int_set_contains(ad, BGP_COMM_LLGR_STALE))
- continue;
-
- /* Store the tmp_linpool state to aggresively save memory */
- struct lp_state *tmpp = lp_save(tmp_linpool);
-
- /* Mark the route as LLGR */
- rte e0 = *r;
- bgp_set_attr_ptr(&e0.attrs, BA_COMMUNITY, flags, int_set_add(tmp_linpool, ad, BGP_COMM_LLGR_STALE));
-
- /* We need to update the route but keep it stale. */
- ASSERT_DIE(irh->stale_set == irh->stale_valid + 1);
- irh->stale_set--;
- rte_import(&c->c.in_req, n, &e0, r->src);
- irh->stale_set++;
-
- /* Restore the memory state */
- lp_restore(tmp_linpool, tmpp);
- }
-
- rpe_mark_seen_all(req->hook, first, last, NULL);
+ rt_feeder_unsubscribe(&c->stale_feed);
}
if (peer->gr_aware)
c->load_state = BFS_LOADING;
+ /* We'll also send End-of-RIB */
+ if (p->cf->gr_mode)
+ c->feed_state = BFS_LOADING;
+
c->ext_next_hop = c->cf->ext_next_hop && (bgp_channel_is_ipv6(c) || rem->ext_next_hop);
c->add_path_rx = (loc->add_path & BGP_ADD_PATH_RX) && (rem->add_path & BGP_ADD_PATH_TX);
c->add_path_tx = (loc->add_path & BGP_ADD_PATH_TX) && (rem->add_path & BGP_ADD_PATH_RX);
tm_start_in(p->gr_timer, p->conn->remote_caps->gr_time S, p->p.loop);
}
-static void
-bgp_graceful_restart_feed_done(struct rt_export_request *req)
-{
- req->hook = NULL;
-}
-
-static void
-bgp_graceful_restart_feed_dump_req(struct rt_export_request *req)
-{
- SKIP_BACK_DECLARE(struct bgp_channel, c, stale_feed, req);
- debug(" BGP-GR %s.%s export request %p\n", c->c.proto->name, c->c.name, req);
-}
-
-static void
-bgp_graceful_restart_feed_log_state_change(struct rt_export_request *req, u8 state)
-{
- SKIP_BACK_DECLARE(struct bgp_channel, c, stale_feed, req);
- struct bgp_proto *p = (void *) c->c.proto;
- BGP_TRACE(D_EVENTS, "Long-lived graceful restart export state changed to %s", rt_export_state_name(state));
-
- if (state == TES_READY)
- rt_stop_export(req, bgp_graceful_restart_feed_done);
-}
-
-static void
-bgp_graceful_restart_drop_export(struct rt_export_request *req UNUSED, const net_addr *n UNUSED, struct rt_pending_export *rpe UNUSED)
-{ /* Nothing to do */ }
static void
bgp_graceful_restart_feed(struct bgp_channel *c)
{
- c->stale_feed = (struct rt_export_request) {
- .name = "BGP-GR",
- .list = proto_event_list(c->c.proto),
- .pool = c->c.proto->pool,
- .feed_block_size = c->c.feed_block_size,
- .trace_routes = c->c.debug | c->c.proto->debug,
- .dump_req = bgp_graceful_restart_feed_dump_req,
- .log_state_change = bgp_graceful_restart_feed_log_state_change,
- .export_bulk = bgp_rte_modify_stale,
- .export_one = bgp_graceful_restart_drop_export,
+ c->stale_feed = (struct rt_export_feeder) {
+ .name = mb_sprintf(c->c.proto->pool, "%s.%s.llgr", c->c.proto->name, c->c.name),
+ .trace_routes = c->c.debug,
+ };
+ c->stale_event = (event) {
+ .hook = bgp_rte_modify_stale,
+ .data = c,
};
- rt_request_export(c->c.table, &c->stale_feed);
+ rt_feeder_subscribe(&c->c.table->export_all, &c->stale_feed);
+ proto_send_event(c->c.proto, &c->stale_event);
}
-
/**
* bgp_graceful_restart_done - finish active BGP graceful restart
* @c: BGP channel
cli_msg(-8006, "%s: bgp reload out not implemented yet", P->name);
}
+struct bgp_enhanced_refresh_request {
+ struct rt_feeding_request rfr;
+ struct bgp_channel *c;
+};
-static void
-bgp_feed_begin(struct channel *C)
+void
+bgp_done_route_refresh(struct rt_feeding_request *rfr)
{
- struct bgp_proto *p = (void *) C->proto;
- struct bgp_channel *c = (void *) C;
-
- /* Ignore non-BGP channels */
- if (C->class != &channel_bgp)
- return;
-
- /* This should not happen */
- if (!p->conn)
- return;
-
- if (!C->refeeding)
- {
- if (p->cf->gr_mode)
- c->feed_state = BFS_LOADING;
- return;
- }
-
- if (!C->refeed_req.hook)
- {
- /* Direct refeed */
- if (C->out_table)
- {
- /* FIXME: THIS IS BROKEN, IT DOESN'T PRUNE THE OUT TABLE */
- c->feed_out_table = 1;
- return;
- }
+ SKIP_BACK_DECLARE(struct bgp_enhanced_refresh_request, berr, rfr, rfr);
+ struct bgp_channel *c = berr->c;
+ SKIP_BACK_DECLARE(struct bgp_proto, p, p, c->c.proto);
- ASSERT_DIE(p->enhanced_refresh);
+ /* Schedule EoRR packet */
+ ASSERT_DIE(c->feed_state == BFS_REFRESHING);
- /* It is refeed and both sides support enhanced route refresh */
- /* BoRR must not be sent before End-of-RIB */
- ASSERT_DIE((c->feed_state != BFS_LOADING) && (c->feed_state != BFS_LOADED));
+ c->feed_state = BFS_REFRESHED;
+ bgp_schedule_packet(p->conn, c, PKT_UPDATE);
- c->feed_state = BFS_REFRESHING;
- bgp_schedule_packet(p->conn, c, PKT_BEGIN_REFRESH);
- }
+ mb_free(berr);
}
static void
-bgp_feed_end(struct channel *C)
+bgp_export_fed(struct channel *C)
{
- struct bgp_proto *p = (void *) C->proto;
- struct bgp_channel *c = (void *) C;
-
- /* Ignore non-BGP channels */
- if (C->class != &channel_bgp)
- return;
-
- if (c->feed_out_table)
- {
- c->feed_out_table = 0;
- return;
- }
-
- /* This should not happen */
- if (!p->conn)
- return;
-
- /* Non-demarcated feed ended, nothing to do */
- if (c->feed_state == BFS_NONE)
- return;
+ SKIP_BACK_DECLARE(struct bgp_channel, c, c, C);
+ SKIP_BACK_DECLARE(struct bgp_proto, p, p, c->c.proto);
/* Schedule End-of-RIB packet */
if (c->feed_state == BFS_LOADING)
+ {
c->feed_state = BFS_LOADED;
-
- /* Schedule EoRR packet */
- if (c->feed_state == BFS_REFRESHING)
- c->feed_state = BFS_REFRESHED;
-
- /* Kick TX hook */
- bgp_schedule_packet(p->conn, c, PKT_UPDATE);
+ bgp_schedule_packet(p->conn, c, PKT_UPDATE);
+ }
}
-
static void
bgp_start_locked(void *_p)
{
P->rt_notify = bgp_rt_notify;
P->preexport = bgp_preexport;
P->iface_sub.neigh_notify = bgp_neigh_notify;
- P->feed_begin = bgp_feed_begin;
- P->feed_end = bgp_feed_end;
+ P->export_fed = bgp_export_fed;
P->sources.class = &bgp_rte_owner_class;
P->sources.rte_recalculate = cf->deterministic_med ? bgp_rte_recalculate : NULL;
timer *stale_timer; /* Long-lived stale timer for LLGR */
u32 stale_time; /* Stored LLGR stale time from last session */
- struct rt_export_request stale_feed; /* Feeder request for stale route modification */
+ struct rt_export_feeder stale_feed; /* Feeder request for stale route modification */
+ event stale_event; /* Feeder event for stale route modification */
u8 add_path_rx; /* Session expects receive of ADD-PATH extended NLRI */
u8 add_path_tx; /* Session expects transmit of ADD-PATH extended NLRI */
int bgp_rte_better(const rte *, const rte *);
int bgp_rte_mergable(const rte *pri, const rte *sec);
int bgp_rte_recalculate(struct rtable_private *table, net *net, struct rte_storage *new, struct rte_storage *old, struct rte_storage *old_best);
-void bgp_rte_modify_stale(struct rt_export_request *req, const net_addr *n, struct rt_pending_export *first, struct rt_pending_export *last, const rte **feed, uint count);
+void bgp_rte_modify_stale(void *bgp_channel);
u32 bgp_rte_igp_metric(const rte *);
void bgp_rt_notify(struct proto *P, struct channel *C, const net_addr *n, rte *new, const rte *old);
int bgp_preexport(struct channel *, struct rte *);
/* FIXME: REQUEST REFRESH FROM OUT TABLE */
}
else
- channel_request_feeding_dynamic(&c->c, p->enhanced_refresh ? CFRT_DIRECT : CFRT_AUXILIARY);
+ rt_export_refeed(&c->c.out_req, NULL);
break;
case BGP_RR_BEGIN:
}
}
-/* TODO: unify the code between l3vpn and pipe */
-void pipe_import_by_refeed_free(struct channel_feeding_request *cfr);
-
static int
-l3vpn_reload_routes(struct channel *C, struct channel_import_request *cir)
+l3vpn_reload_routes(struct channel *C, struct rt_feeding_request *rfr)
{
struct l3vpn_proto *p = (void *) C->proto;
struct channel *feed = NULL;
case NET_MPLS:
/* MPLS doesn't support partial refeed, always do a full one. */
- channel_request_feeding_dynamic(p->ip4_channel, CFRT_DIRECT);
- channel_request_feeding_dynamic(p->ip6_channel, CFRT_DIRECT);
- cir->done(cir);
+ channel_request_full_refeed(p->ip4_channel);
+ channel_request_full_refeed(p->ip6_channel);
+ rfr->done(rfr);
return 1;
}
- if (cir->trie)
- {
- struct import_to_export_reload *reload = lp_alloc(cir->trie->lp, sizeof *reload);
- *reload = (struct import_to_export_reload) {
- .cir = cir,
- .cfr = {
- .type = CFRT_AUXILIARY,
- .done = pipe_import_by_refeed_free,
- .trie = cir->trie,
- },
- };
- channel_request_feeding(feed, &reload->cfr);
- }
- else
- {
- /* Route reload on one channel is just refeed on the other */
- channel_request_feeding_dynamic(feed, CFRT_DIRECT);
- cir->done(cir);
- }
-
+ rt_export_refeed(&feed->out_req, rfr);
return 1;
}
l3vpn_prepare_import_targets(p);
if (p->vpn4_channel && (p->vpn4_channel->channel_state == CS_UP))
- channel_request_feeding_dynamic(p->vpn4_channel, CFRT_AUXILIARY);
+ channel_request_full_refeed(p->vpn4_channel);
if (p->vpn6_channel && (p->vpn6_channel->channel_state == CS_UP))
- channel_request_feeding_dynamic(p->vpn6_channel, CFRT_AUXILIARY);
+ channel_request_full_refeed(p->vpn6_channel);
}
if (export_changed)
l3vpn_prepare_export_targets(p);
if (p->ip4_channel && (p->ip4_channel->channel_state == CS_UP))
- channel_request_feeding_dynamic(p->ip4_channel, CFRT_AUXILIARY);
+ channel_request_full_refeed(p->ip4_channel);
if (p->ip6_channel && (p->ip6_channel->channel_state == CS_UP))
- channel_request_feeding_dynamic(p->ip6_channel, CFRT_AUXILIARY);
+ channel_request_full_refeed(p->ip6_channel);
}
return 1;
#include "lib/macro.h"
static int ospf_preexport(struct channel *C, rte *new);
-static int ospf_reload_routes(struct channel *C, struct channel_import_request *cir);
+static int ospf_reload_routes(struct channel *C, struct rt_feeding_request *rfr);
static int ospf_rte_better(const rte *new, const rte *old);
static u32 ospf_rte_igp_metric(const rte *rt);
static void ospf_disp(timer *timer);
P->iface_sub.ifa_notify = cf->ospf2 ? ospf_ifa_notify2 : ospf_ifa_notify3;
P->preexport = ospf_preexport;
P->reload_routes = ospf_reload_routes;
- P->feed_begin = ospf_feed_begin;
- P->feed_end = ospf_feed_end;
P->sources.class = &ospf_rte_owner_class;
}
static int
-ospf_reload_routes(struct channel *C, struct channel_import_request *cir)
+ospf_reload_routes(struct channel *C, struct rt_feeding_request *rfr)
{
struct ospf_proto *p = (struct ospf_proto *) C->proto;
- if (cir)
- CALL(cir->done, cir);
+ if (rfr)
+ CALL(rfr->done, rfr);
if (p->calcrt == 2)
return 1;
}
}
-void
-ospf_feed_begin(struct channel *C)
-{
- if (!C->refeeding || C->refeed_req.hook)
- return;
-
- struct ospf_proto *p = (struct ospf_proto *) C->proto;
- struct top_hash_entry *en;
-
- /* Mark all external LSAs as stale */
- WALK_SLIST(en, p->lsal)
- if (en->mode == LSA_M_EXPORT)
- en->mode = LSA_M_EXPORT_STALE;
-}
-
-void
-ospf_feed_end(struct channel *C)
-{
- if (!C->refeeding || C->refeed_req.hook)
- return;
-
- struct ospf_proto *p = (struct ospf_proto *) C->proto;
- struct top_hash_entry *en;
-
- /* Flush stale LSAs */
- WALK_SLIST(en, p->lsal)
- if (en->mode == LSA_M_EXPORT_STALE)
- ospf_flush_lsa(p, en);
-}
-
static u32
ort_to_lsaid(struct ospf_proto *p, ort *nf)
{
{
struct pipe_proto *p = (void *) P;
struct channel *dst = (src_ch == p->pri) ? p->sec : p->pri;
- uint *flags = (src_ch == p->pri) ? &p->sec_flags : &p->pri_flags;
if (!new && !old)
return;
- /* Start the route refresh if requested to */
- if (*flags & PIPE_FL_RR_BEGIN_PENDING)
- {
- *flags &= ~PIPE_FL_RR_BEGIN_PENDING;
- rt_refresh_begin(&dst->in_req);
- }
-
if (new)
{
rte e0 = rte_init_from(new);
return 0;
}
-void
-pipe_import_by_refeed_free(struct channel_feeding_request *cfr)
-{
- SKIP_BACK_DECLARE(struct import_to_export_reload, reload, cfr, cfr);
- reload->cir->done(reload->cir);
-}
-
static int
-pipe_reload_routes(struct channel *C, struct channel_import_request *cir)
+pipe_reload_routes(struct channel *C, struct rt_feeding_request *rfr)
{
- struct pipe_proto *p = (void *) C->proto;
- if (cir->trie)
- {
- struct import_to_export_reload *reload = lp_alloc(cir->trie->lp, sizeof *reload);
- *reload = (struct import_to_export_reload) {
- .cir = cir,
- .cfr = {
- .type = CFRT_AUXILIARY,
- .done = pipe_import_by_refeed_free,
- .trie = cir->trie,
- },
- };
- channel_request_feeding((C == p->pri) ? p->sec : p->pri, &reload->cfr);
- }
- else
- {
- /* Route reload on one channel is just refeed on the other */
- channel_request_feeding_dynamic((C == p->pri) ? p->sec : p->pri, CFRT_DIRECT);
- cir->done(cir);
- }
+ SKIP_BACK_DECLARE(struct pipe_proto, p, p, C->proto);
+ rt_export_refeed(&((C == p->pri) ? p->sec : p->pri)->out_req, rfr);
return 1;
}
-static void
-pipe_feed_begin(struct channel *C)
-{
- if (!C->refeeding || C->refeed_req.hook)
- return;
-
- struct pipe_proto *p = (void *) C->proto;
- uint *flags = (C == p->pri) ? &p->sec_flags : &p->pri_flags;
-
- *flags |= PIPE_FL_RR_BEGIN_PENDING;
-}
-
-static void
-pipe_feed_end(struct channel *C)
-{
- if (!C->refeeding || C->refeed_req.hook)
- return;
-
- struct pipe_proto *p = (void *) C->proto;
- struct channel *dst = (C == p->pri) ? p->sec : p->pri;
- uint *flags = (C == p->pri) ? &p->sec_flags : &p->pri_flags;
-
- /* If not even started, start the RR now */
- if (*flags & PIPE_FL_RR_BEGIN_PENDING)
- {
- *flags &= ~PIPE_FL_RR_BEGIN_PENDING;
- rt_refresh_begin(&dst->in_req);
- }
-
- /* Finish RR always */
- rt_refresh_end(&dst->in_req);
-}
-
static void
pipe_postconfig(struct proto_config *CF)
{
P->rt_notify = pipe_rt_notify;
P->preexport = pipe_preexport;
P->reload_routes = pipe_reload_routes;
- P->feed_begin = pipe_feed_begin;
- P->feed_end = pipe_feed_end;
p->rl_gen = (struct tbf) TBF_DEFAULT_LOG_LIMITS;
struct channel_export_stats *s2e = &p->sec->export_stats;
struct rt_import_stats *rs1i = p->pri->in_req.hook ? &p->pri->in_req.hook->stats : NULL;
- struct rt_export_stats *rs1e = p->pri->out_req.hook ? &p->pri->out_req.hook->stats : NULL;
+ struct rt_export_stats *rs1e = &p->pri->out_req.stats;
struct rt_import_stats *rs2i = p->sec->in_req.hook ? &p->sec->in_req.hook->stats : NULL;
- struct rt_export_stats *rs2e = p->sec->out_req.hook ? &p->sec->out_req.hook->stats : NULL;
+ struct rt_export_stats *rs2e = &p->sec->out_req.stats;
u32 pri_routes = p->pri->in_limit.count;
u32 sec_routes = p->sec->in_limit.count;
cli_msg(-1006, " Channel %s", "main");
cli_msg(-1006, " Table: %s", p->pri->table->name);
cli_msg(-1006, " Peer table: %s", p->sec->table->name);
- cli_msg(-1006, " Import state: %s", rt_export_state_name(rt_export_get_state(p->sec->out_req.hook)));
- cli_msg(-1006, " Export state: %s", rt_export_state_name(rt_export_get_state(p->pri->out_req.hook)));
+ cli_msg(-1006, " Import state: %s", rt_export_state_name(rt_export_get_state(&p->sec->out_req)));
+ cli_msg(-1006, " Export state: %s", rt_export_state_name(rt_export_get_state(&p->pri->out_req)));
cli_msg(-1006, " Import filter: %s", filter_name(p->sec->out_filter));
cli_msg(-1006, " Export filter: %s", filter_name(p->pri->out_filter));
struct proto p;
struct channel *pri;
struct channel *sec;
- uint pri_flags;
- uint sec_flags;
struct tbf rl_gen;
};
-#define PIPE_FL_RR_BEGIN_PENDING 1 /* Route refresh should start with the first route notified */
#endif
-
-struct import_to_export_reload {
- struct channel_import_request *cir; /* We can not free this struct before reload finishes. */
- struct channel_feeding_request cfr; /* New request we actually need - import was changed to feed the other side. */
-};
/* We started to accept routes so we need to refeed them */
if (!old->propagate_routes && new->propagate_routes)
- channel_request_feeding_dynamic(p->p.main_channel, CFRT_DIRECT);
+ channel_request_full_refeed(p->p.main_channel);
IFACE_WALK(iface)
{
}
}
-void
-rip_feed_begin(struct channel *C)
-{
- if (!C->refeeding || C->refeed_req.hook)
- return;
-
- struct rip_proto *p = (struct rip_proto *) C->proto;
-
- FIB_WALK(&p->rtable, struct rip_entry, en)
- {
- if (en->valid == RIP_ENTRY_VALID)
- en->valid = RIP_ENTRY_REFEEDING;
- }
- FIB_WALK_END;
-}
-
-void
-rip_feed_end(struct channel *C)
-{
- if (!C->refeeding || C->refeed_req.hook)
- return;
-
- struct rip_proto *p = (struct rip_proto *) C->proto;
- int changed = 0;
-
- FIB_WALK(&p->rtable, struct rip_entry, en)
- {
- if (en->valid == RIP_ENTRY_REFEEDING)
- {
- rip_withdraw_entry(p, en);
- changed++;
- }
- }
- FIB_WALK_END;
-
- if (changed)
- rip_trigger_update(p);
-}
-
-
void
rip_flush_table(struct rip_proto *p, struct rip_neighbor *n)
{
*/
static int
-rip_reload_routes(struct channel *C, struct channel_import_request *cir)
+rip_reload_routes(struct channel *C, struct rt_feeding_request *rfr)
{
struct rip_proto *p = (struct rip_proto *) C->proto;
/* Always reload full */
- if (cir)
- CALL(cir->done, cir);
+ if (rfr)
+ CALL(rfr->done, rfr);
if (p->rt_reload)
return 1;
P->iface_sub.neigh_notify = rip_neigh_notify;
P->reload_routes = rip_reload_routes;
P->sources.class = &rip_rte_owner_class;
- P->feed_begin = rip_feed_begin;
- P->feed_end = rip_feed_end;
return P;
}
#define RIP_ENTRY_DUMMY 0 /* Only used to store list of incoming routes */
#define RIP_ENTRY_VALID 1 /* Valid outgoing route */
#define RIP_ENTRY_STALE 2 /* Stale outgoing route, waiting for GC */
-#define RIP_ENTRY_REFEEDING 3 /* Route valid until feed ends */
static inline int rip_is_v2(struct rip_proto *p)
{ return p->rip2; }
}
static void
-static_mark_partial(struct static_proto *p, struct channel_import_request *cir)
+static_mark_partial(struct static_proto *p, struct rt_feeding_request *rfr)
{
struct static_config *cf = (void *) p->p.cf;
struct static_route *r;
WALK_LIST(r, cf->routes)
- if (r->state == SRS_CLEAN && trie_match_net(cir->trie, r->net))
+ if (r->state == SRS_CLEAN && (!rfr || rt_prefilter_net(&rfr->prefilter, r->net)))
{
r->state = SRS_DIRTY;
BUFFER_PUSH(p->marked) = r;
if (!ev_active(p->event))
ev_schedule(p->event);
- cir->done(cir);
+ rfr->done(rfr);
}
}
static int
-static_reload_routes(struct channel *C, struct channel_import_request *cir)
+static_reload_routes(struct channel *C, struct rt_feeding_request *rfr)
{
struct static_proto *p = (void *) C->proto;
TRACE(D_EVENTS, "Scheduling route reload");
- if (cir->trie)
- static_mark_partial(p, cir);
+ if (rfr)
+ static_mark_partial(p, rfr);
else
static_mark_all(p);
ec = netlink_error_to_os(e->error);
if (ec && !(ignore_esrch && (ec == ESRCH)))
log_rl(&rl_netlink_err, L_WARN "Netlink: %s", strerror(ec));
+
return ec;
}
case KPS_FLUSHING:
bug("Can't scan, flushing");
}
+
+ bug("Bad kernel sync state");
}
static void
p->sync_state = KPS_PRUNING;
KRT_TRACE(p, D_EVENTS, "Pruning table %s", p->p.main_channel->table->name);
rt_refresh_end(&p->p.main_channel->in_req);
- channel_request_feeding_dynamic(p->p.main_channel, CFRT_DIRECT);
- return;
+ channel_request_full_refeed(p->p.main_channel);
+ break;
case KPS_PRUNING:
bug("Kernel scan double-prune");
#endif
if (!krt_capable(e))
+ {
+ if (C->debug & D_ROUTES)
+ log(L_TRACE "%s.%s: refusing incapable route for %N",
+ C->proto->name, C->name, e->net);
return -1;
+ }
/* Before first scan we don't touch the routes */
if (!SKIP_BACK(struct krt_proto, p, C->proto)->ready)
+ {
+ if (C->debug & D_ROUTES)
+ log(L_TRACE "%s.%s not ready yet to accept route for %N",
+ C->proto->name, C->name, e->net);
return -1;
+ }
return 0;
}
}
static int
-krt_reload_routes(struct channel *C, struct channel_import_request *cir)
+krt_reload_routes(struct channel *C, struct rt_feeding_request *rfr)
{
struct krt_proto *p = (void *) C->proto;
-
- if (cir->trie)
- {
- cir->done(cir);
- return 0;
- }
-
- /* Although we keep learned routes in krt_table, we rather schedule a scan */
-
if (KRT_CF->learn)
{
p->reload = 1;
krt_scan_timer_kick(p);
}
- cir->done(cir);
+ if (rfr)
+ CALL(rfr->done, rfr);
+
return 1;
}
static void krt_cleanup(struct krt_proto *p);
static void
-krt_feed_end(struct channel *C)
+krt_export_fed(struct channel *C)
{
struct krt_proto *p = (void *) C->proto;
- if (C->refeeding && C->refeed_req.hook)
- return;
-
p->ready = 1;
p->initialized = 1;
switch (p->sync_state)
{
- case KPS_PRUNING:
- KRT_TRACE(p, D_EVENTS, "Table %s pruned", C->table->name);
- p->sync_state = KPS_IDLE;
- return;
-
case KPS_IDLE:
- case KPS_SCANNING:
krt_scan_timer_kick(p);
- return;
+ break;
+
+ case KPS_SCANNING:
+ break;
+
+ case KPS_PRUNING:
+ KRT_TRACE(p, D_EVENTS, "Table %s pruned", p->p.main_channel->table->name);
+ p->sync_state = KPS_IDLE;
+ break;
case KPS_FLUSHING:
krt_do_scan(p);
p->p.rt_notify = krt_rt_notify;
p->p.iface_sub.if_notify = krt_if_notify;
p->p.reload_routes = krt_reload_routes;
- p->p.feed_end = krt_feed_end;
+ p->p.export_fed = krt_export_fed;
p->p.sources.class = &krt_rte_owner_class;
if (p->initialized && !KRT_CF->persist && (P->down_code != PDC_CMD_GR_DOWN))
{
p->sync_state = KPS_FLUSHING;
- channel_request_feeding_dynamic(p->p.main_channel, CFRT_AUXILIARY);
+ channel_request_full_refeed(p->p.main_channel);
/* Keeping the protocol UP until the feed-to-flush is done */
return PS_UP;
krt_get_sync_error(struct krt_proto *p, struct rte *e)
{
return (p->p.proto_state == PS_UP) &&
- bmap_test(&p->p.main_channel->export_map, e->id) &&
+ bmap_test(&p->p.main_channel->export_accepted_map, e->id) &&
!bmap_test(&p->sync_map, e->id);
}