command was used to change filters.
If partial <prefix> option is used, only corresponding routes are reloaded.
- Protocol BGP does partial reload only if it has locked table, otherwise partial reload for BGP is refused.
+ Protocol BGP does partial reload only if it has _import_ table enabled, otherwise partial reload for BGP is refused.
Re-export always succeeds, but re-import is protocol-dependent and might
fail (for example, if BGP neighbor does not support route-refresh
uint count_ = (fib)->hash_size; \
uint hpos_ = (it)->hash; \
type *z; \
- for(;;) { \
- if (!fn_) \
+ for(;;fn_ = fn_->next) { \
+ while (!fn_ && ++hpos_ < count_) \
{ \
- if (++hpos_ >= count_) \
- break; \
fn_ = (fib)->hash_table[hpos_]; \
- continue; \
} \
+ if (hpos_ >= count_) \
+ break; \
z = fib_node_to_user(fib, fn_);
-#define FIB_ITERATE_END fn_ = fn_->next; } } while(0)
+#define FIB_ITERATE_END } } while(0)
#define FIB_ITERATE_PUT(it) fit_put(it, fn_)
this_proto = NULL;
}
+static void
+channel_reload_out_done_main(void *_prr)
+{
+ struct proto_reload_request *prr = _prr;
+ ASSERT_THE_BIRD_LOCKED;
+
+ rfree(prr->trie->lp);
+}
+
static inline void
proto_call_cmd_reload(struct proto_spec ps, int dir, const struct f_trie *trie)
{
*prr = (struct proto_reload_request) {
.trie = trie,
.dir = dir,
- .counter =1,
+ .counter = 1,
};
if (trie)
{
+ /* CLI linpool is moved to trie, because trie is need for longer time
+ than the linpool would exist in CLI. The linpool is freed in channel_reload_out_done_main */
ASSERT_DIE(this_cli->parser_pool == prr->trie->lp);
rmove(this_cli->parser_pool, &root_pool);
this_cli->parser_pool = lp_new(this_cli->pool);
prr->ev = (event) {
.data = prr,
+ .hook = channel_reload_out_done_main,
};
}
proto_apply_cmd(ps, proto_cmd_reload, 1, (uintptr_t) prr);
+ /* This function used reference to this trie, so it is freed here as well as in protocols*/
if (trie)
if (atomic_fetch_sub_explicit(&prr->counter, 1, memory_order_acq_rel) == 1)
ev_send_loop(&main_birdloop, &prr->ev);
static void channel_check_stopped(struct channel *c);
static void channel_reload_in_done(struct channel_import_request *cir);
static void channel_request_partial_reload(struct channel *c, struct channel_import_request *cir);
-void print_trie_node(const struct f_trie_node4 *t, int i);
-
static inline int proto_is_done(struct proto *p)
{ return (p->proto_state == PS_DOWN) && proto_is_inactive(p); }
{
struct channel *c = SKIP_BACK(struct channel, out_req, req);
CD(c, "Channel export state changed to %s", rt_export_state_name(state));
+
switch (state)
{
case TES_FEEDING:
{
struct channel *c = SKIP_BACK(struct channel, refeed_req, req);
CD(c, "Channel export state changed to %s", rt_export_state_name(state));
+
switch (state)
{
case TES_FEEDING:
add_tail(&p->channels, &c->n);
CD(c, "Connected to table %s", c->table->name);
+
return c;
}
proto_remove_channel(p, c);
}
+
struct roa_subscription {
node roa_node;
struct settle settle;
channel_export_one_roa(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *first)
{
struct roa_subscription *s = SKIP_BACK(struct roa_subscription, req, req);
+
switch (net->type)
{
case NET_ROA4:
default:
bug("ROA table sent us a non-roa export");
}
+
settle_kick(&s->settle, s->c->proto->loop);
rpe_mark_seen_all(req->hook, first, NULL, NULL);
struct roa_subscription *s = SKIP_BACK(struct roa_subscription, req, req);
struct channel *c = s->c;
struct rtable_private *tab = SKIP_BACK(struct rtable_private, exporter.e, req->hook->table);
+
debug(" Channel %s.%s ROA %s change notifier from table %s request %p\n",
c->proto->name, c->name,
(s->settle.hook == channel_roa_in_changed) ? "import" : "export",
{
void (*hook)(struct settle *) =
dir ? channel_roa_in_changed : channel_roa_out_changed;
+
struct roa_subscription *s;
node *n;
return;
struct roa_subscription *s = mb_allocz(c->proto->pool, sizeof(struct roa_subscription));
+
*s = (struct roa_subscription) {
.settle = SETTLE_INIT(&c->roa_settle, dir ? channel_roa_in_changed : channel_roa_out_changed, NULL),
.c = c,
.export_one = channel_export_one_roa,
},
};
+
add_tail(&c->roa_subscriptions, &s->roa_node);
rt_request_export(tab, &s->req);
}
struct channel *c = SKIP_BACK(struct channel, refeed_req, req);
req->hook = NULL;
+
channel_feed_end(c);
}
channel_init_feeding(struct channel *c)
{
int no_trie = 0;
+
for (struct channel_feeding_request *cfrp = c->refeed_pending; cfrp; cfrp = cfrp->next)
if (cfrp->type == CFRT_DIRECT)
{
SKIP_BACK(struct channel, refeed_req,
SKIP_BACK(struct rt_export_request, prefilter, p)
);
+
ASSERT_DIE(c->refeeding);
for (struct channel_feeding_request *cfr = c->refeeding; cfr; cfr = cfr->next)
if (!cfr->trie || trie_match_net(cfr->trie, n))
}
int
-import_prefilter_for_protocols(struct channel_import_request *cir_head, const net_addr *n)
+channel_import_request_prefilter(struct channel_import_request *cir_head, const net_addr *n)
{
for (struct channel_import_request *cir = cir_head; cir; cir = cir->next)
{
SKIP_BACK(struct rt_export_request, prefilter, p)
);
ASSERT_DIE(c->importing);
- for (struct channel_import_request *cir = c->importing; cir; cir = cir->next)
- {
- if (!cir->trie || trie_match_net(cir->trie, n))
- return 1;
- }
- return 0;
+
+ return channel_import_request_prefilter(c->importing, n);
}
static void
c->reload_pending = 1;
return;
}
- struct channel_import_request *last = cir;
- while (last)
+
+ /* If there is any full-reload request, we can disregard all partials */
+ for (struct channel_import_request *last = cir; last && no_trie==0;)
{
if (!last->trie)
no_trie = 1;
- last = last->next;
+ last = last->next;
}
+ /* activating pending imports */
c->importing = c->import_pending;
c->import_pending = NULL;
channel_request_feeding(struct channel *c, struct channel_feeding_request *cfr)
{
ASSERT_DIE(c->out_req.hook);
+
CD(c, "Feeding requested (%s)",
cfr->type == CFRT_DIRECT ? "direct" :
(cfr->trie ? "partial" : "auxiliary"));
if ((c->in_keep & RIK_PREFILTER) == RIK_PREFILTER)
channel_schedule_reload(c, cir);
else if (! c->proto->reload_routes(c, cir))
- bug( "Partial reload was refused. Maybe you tried partial reload on BGP with unlocked table?");
+ bug("Channel %s.%s refused full import reload.", c->proto->name, c->name);
}
static void
if ((c->in_keep & RIK_PREFILTER) == RIK_PREFILTER)
channel_schedule_reload(c, cir);
else if (! c->proto->reload_routes(c, cir))
- cli_msg(-15, "Partial reload was refused. Maybe you tried partial reload on BGP with unlocked table?");
+ cli_msg(-15, "%s.%s: partial reload refused, please run full reload instead", c->proto->name, c->name);
}
const struct channel_class channel_basic = {
struct proto_reload_request *prr;
};
-static void
-channel_reload_out_done_main(void *_prr)
-{
- struct proto_reload_request *prr = _prr;
- ASSERT_THE_BIRD_LOCKED;
-
- rfree(prr->trie->lp);
-}
-
static void
channel_reload_out_done(struct channel_feeding_request *cfr)
{
proto_cmd_reload(struct proto *p, uintptr_t _prr, int cnt UNUSED)
{
struct proto_reload_request *prr = (void *) _prr;
- if (prr->trie)
- prr->ev.hook = channel_reload_out_done_main;
struct channel *c;
if (p->disabled)
{
void channel_set_state(struct channel *c, uint state);
void channel_schedule_reload(struct channel *c, struct channel_import_request *cir);
-int import_prefilter_for_protocols(struct channel_import_request *cir_head, const net_addr *n);
+int channel_import_request_prefilter(struct channel_import_request *cir_head, const net_addr *n);
static inline void channel_init(struct channel *c) { channel_set_state(c, CS_START); }
static inline void channel_open(struct channel *c) { channel_set_state(c, CS_UP); }
}
static struct rt_pending_export * rt_next_export_fast(struct rt_pending_export *last);
-
static int
rte_export(struct rt_table_export_hook *th, struct rt_pending_export *rpe)
{
{
struct rt_exporter *re = &tab->exporter.e;
struct rt_export_request *req = hook->h.req;
+
/* stats zeroed by mb_allocz */
switch (req->prefilter.mode)
{
/* Ignore non-BGP channels */
if (C->class != &channel_bgp)
+ {
+ if (cir)
+ cir->done(cir);
return 1;
+ }
+
if (cir)
{
if (cir->trie)
cir->done(cir);
return 0;
}
+ /* We do not need cir anymore and later we will not be able to detect when to free it. */
cir->done(cir);
}
net_normalize_ip4(&net);
// XXXX validate prefix
+
bgp_rte_update(s, (net_addr *) &net, path_id, a);
}
}
net_normalize_ip6(&net);
// XXXX validate prefix
+
bgp_rte_update(s, (net_addr *) &net, path_id, a);
}
}
p->cir = cir;
}
if (p->calcrt == 2)
- {
- /*todo*/
return 1;
- }
OSPF_TRACE(D_EVENTS, "Scheduling routing table calculation with route reload");
p->calcrt = 2;
/* Calculate routing table */
if (p->calcrt)
- {
ospf_rt_spf(p);
- }
/* Cleanup after graceful restart */
if (p->gr_cleanup)
}
}
-
/* RFC 2328 16.1. calculating shortest paths for an area */
static void
ospf_rt_spfa(struct ospf_area *oa)
struct ospf_proto *p = oa->po;
struct top_hash_entry *act;
node *n;
+
if (oa->rt == NULL)
return;
if (oa->rt->lsa.age == LSA_MAXAGE)
return;
+
OSPF_TRACE(D_EVENTS, "Starting routing table calculation for area %R", oa->areaid);
/* 16.1. (1) */
DBG("Working on LSA: rt: %R, id: %R, type: %u\n",
act->lsa.rt, act->lsa.id, act->lsa_type);
+
act->color = INSPF;
switch (act->lsa_type)
{
rt_sync(p);
lp_flush(p->nhpool);
- if (p->cir == NULL)
+ if (p->cir == NULL) /* If there is no more cir waiting for reload */
p->calcrt = 0;
}
again1:
FIB_ITERATE_START(fib, &fit, ort, nf)
{
+ if (cir && !channel_import_request_prefilter(cir, nf->fn.addr))
+ continue;
/* Sanity check of next-hop addresses, failure should not happen */
if (nf->n.type && nf->n.nhs)
{
if (reload || ort_changed(nf, &eattrs.l))
{
- if (cir == NULL || import_prefilter_for_protocols(cir, nf->fn.addr))
- {
- nf->old_metric1 = nf->n.metric1;
- nf->old_metric2 = nf->n.metric2;
- nf->old_tag = nf->n.tag;
- nf->old_rid = nf->n.rid;
-
- eattrs.a[eattrs.l.count++] =
- EA_LITERAL_EMBEDDED(&ea_ospf_metric1, 0, nf->n.metric1);
+ nf->old_metric1 = nf->n.metric1;
+ nf->old_metric2 = nf->n.metric2;
+ nf->old_tag = nf->n.tag;
+ nf->old_rid = nf->n.rid;
- if (nf->n.type == RTS_OSPF_EXT2)
- eattrs.a[eattrs.l.count++] =
- EA_LITERAL_EMBEDDED(&ea_ospf_metric2, 0, nf->n.metric2);
+ eattrs.a[eattrs.l.count++] =
+ EA_LITERAL_EMBEDDED(&ea_ospf_metric1, 0, nf->n.metric1);
- if ((nf->n.type == RTS_OSPF_EXT1) || (nf->n.type == RTS_OSPF_EXT2))
- eattrs.a[eattrs.l.count++] =
- EA_LITERAL_EMBEDDED(&ea_ospf_tag, 0, nf->n.tag);
+ if (nf->n.type == RTS_OSPF_EXT2)
+ eattrs.a[eattrs.l.count++] =
+ EA_LITERAL_EMBEDDED(&ea_ospf_metric2, 0, nf->n.metric2);
+ if ((nf->n.type == RTS_OSPF_EXT1) || (nf->n.type == RTS_OSPF_EXT2))
eattrs.a[eattrs.l.count++] =
- EA_LITERAL_EMBEDDED(&ea_ospf_router_id, 0, nf->n.rid);
+ EA_LITERAL_EMBEDDED(&ea_ospf_tag, 0, nf->n.tag);
- ASSERT_DIE(ARRAY_SIZE(eattrs.a) >= eattrs.l.count);
+ eattrs.a[eattrs.l.count++] =
+ EA_LITERAL_EMBEDDED(&ea_ospf_router_id, 0, nf->n.rid);
- ea_list *eal = ea_lookup(&eattrs.l, 0);
- ea_free(nf->old_ea);
- nf->old_ea = eal;
+ ASSERT_DIE(ARRAY_SIZE(eattrs.a) >= eattrs.l.count);
- rte e0 = {
- .attrs = eal,
- .src = p->p.main_source,
- };
+ ea_list *eal = ea_lookup(&eattrs.l, 0);
+ ea_free(nf->old_ea);
+ nf->old_ea = eal;
- /*
- DBG("Mod rte type %d - %N via %I on iface %s, met %d\n",
- a0.source, nf->fn.addr, a0.gw, a0.iface ? a0.iface->name : "(none)", nf->n.metric1);
- */
- rte_update(p->p.main_channel, nf->fn.addr, &e0, p->p.main_source);
- }
+ rte e0 = {
+ .attrs = eal,
+ .src = p->p.main_source,
+ };
+
+ /*
+ DBG("Mod rte type %d - %N via %I on iface %s, met %d\n",
+ a0.source, nf->fn.addr, a0.gw, a0.iface ? a0.iface->name : "(none)", nf->n.metric1);
+ */
+ rte_update(p->p.main_channel, nf->fn.addr, &e0, p->p.main_source);
}
}
else if (nf->old_ea)
}
/* Propagating eventual change */
- if (changed || p->rt_reload)
+ if ((changed || p->rt_reload) && (cir == NULL || channel_import_request_prefilter(cir, en->n.addr)))
{
- if (cir == NULL || import_prefilter_for_protocols(cir, en->n.addr))
- {
- /*
- * We have to restart the iteration because there may be a cascade of
- * synchronous events rip_announce_rte() -> nest table change ->
- * rip_rt_notify() -> p->rtable change, invalidating hidden variables.
- */
- FIB_ITERATE_PUT_NEXT(&fit, &p->rtable);
- rip_announce_rte(p, en);
- goto loop;
- }
+ /*
+ * We have to restart the iteration because there may be a cascade of
+ * synchronous events rip_announce_rte() -> nest table change ->
+ * rip_rt_notify() -> p->rtable change, invalidating hidden variables.
+ */
+ FIB_ITERATE_PUT_NEXT(&fit, &p->rtable);
+ rip_announce_rte(p, en);
+ goto loop;
}
/* Checking stale entries for garbage collection timeout */
static_reload_routes(struct channel *C, struct channel_import_request *cir)
{
struct static_proto *p = (void *) C->proto;
+
TRACE(D_EVENTS, "Scheduling route reload");
+
if (cir && cir->trie)
static_mark_partial(p, cir);
else
+++ /dev/null
-log "bird.log" all;
-
-debug protocols all;
-debug channels all;
-debug tables all;
-
-ipv4 table master1;
-ipv4 table master2;
-
-protocol device {
- scan time 10;
-}
-
-protocol static static1 {
- ipv4 { table master1; };
- route 10.0.0.0/16 unreachable;
- route 12.0.0.0/16 unreachable;
- route 127.0.0.0/8 unreachable;
- route 192.0.0.0/8 unreachable;
- route 192.168.0.0/16 unreachable;
- route 195.113.26.206/32 unreachable;
- route 1.1.1.1/32 unreachable;
-}
-
-ipv4 table ct_4;
-protocol pipe {
- table master1;
- peer table master2;
- import filter {
- print net;
- accept;
- };
- export filter {
- print net;
- accept;
- };
-}
-
-protocol rip rip4 {
- ipv4 {
- export all;
- };
- interface "ve0";
- interface "ve1", "ve2" { metric 1; mode multicast; };
-}
-
}
static int
-krt_reload_routes(struct channel *C, struct channel_import_request *cir UNUSED)
+krt_reload_routes(struct channel *C, struct channel_import_request *cir)
{
struct krt_proto *p = (void *) C->proto;
+ if (cir && cir->trie)
+ {
+ cir->done(cir);
+ return 0;
+ }
+ if (cir)
+ cir->done(cir);
+
/* Although we keep learned routes in krt_table, we rather schedule a scan */
if (KRT_CF->learn)
&ea_krt_metric,
);
}
-
-