return 0;
}
+int
+import_prefilter_for_protocols(struct channel_import_request *cir_head, const net_addr *n)
+{
+ for (struct channel_import_request *cir = cir_head; cir; cir = cir->next)
+ {
+ if (!cir->trie || trie_match_net(cir->trie, n))
+ return 1;
+ }
+ return 0;
+}
+
static int
channel_import_prefilter(const struct rt_prefilter *p, const net_addr *n)
{
for (struct channel_import_request *cir = c->importing; cir; cir = cir->next)
{
if (!cir->trie || trie_match_net(cir->trie, n))
- {
- log(L_TRACE "%N passed to import", n);
return 1;
- }
}
- log(L_TRACE "%N filtered out of import trie", n);
return 0;
}
CD(c, "Reload requested");
- if ((c->in_keep & RIK_PREFILTER) == RIK_PREFILTER) {
- struct channel_import_request* cir = mb_alloc(c->proto->pool, sizeof *cir);;
- cir->trie = NULL;
- cir->done = channel_import_request_done_dynamic;
+ struct channel_import_request* cir = mb_alloc(c->proto->pool, sizeof *cir);
+ cir->trie = NULL;
+ cir->done = channel_import_request_done_dynamic;
+
+ if ((c->in_keep & RIK_PREFILTER) == RIK_PREFILTER)
channel_schedule_reload(c, cir);
- }
else
- c->proto->reload_routes(c);
+ c->proto->reload_routes(c, cir);
}
static void
if ((c->in_keep & RIK_PREFILTER) == RIK_PREFILTER)
channel_schedule_reload(c, cir);
- /* TODO */
- else
- CD(c, "Partial import reload requested, but with ric cosi");
- /*c->proto->reload_routes(c);
- */
+ else if (! c->proto->reload_routes(c, cir))
+ cli_msg(-15, "Partial reload was refused. Maybe you tried partial reload on bgp?");
}
const struct channel_class channel_basic = {
{
struct proto_reload_request *prr = (void *) _prr;
struct channel *c;
+ log("channel proto_cmd_reload_called");
if (p->disabled)
{
}
log(L_INFO "Reloading protocol %s", p->name);
+
+ ASSERT_DIE(this_cli->parser_pool == prr->trie->lp);
+ rmove(this_cli->parser_pool, &root_pool);
+ this_cli->parser_pool = lp_new(this_cli->pool);
+ prr->ev = (event) {
+ .hook = channel_reload_out_done_main,
+ .data = prr,
+ };
+ prr->counter = 1;
/* re-importing routes */
if (prr->dir != CMD_RELOAD_OUT)
if (prr->trie)
{
/* Increase the refeed counter */
- if (atomic_fetch_add_explicit(&prr->counter, 1, memory_order_relaxed) == 0)
- {
- /* First occurence */
- ASSERT_DIE(this_cli->parser_pool == prr->trie->lp);
- rmove(this_cli->parser_pool, &root_pool);
- this_cli->parser_pool = lp_new(this_cli->pool);
- prr->ev = (event) {
- .hook = channel_reload_out_done_main,
- .data = prr,
- };
- }
- else
- ASSERT_DIE(this_cli->parser_pool != prr->trie->lp);
+ atomic_fetch_add_explicit(&prr->counter, 1, memory_order_relaxed);
+ ASSERT_DIE(this_cli->parser_pool != prr->trie->lp);
struct channel_cmd_reload_import_request *req = lp_alloc(prr->trie->lp, sizeof *req);
*req = (struct channel_cmd_reload_import_request) {
if (prr->trie)
{
/* Increase the refeed counter */
- if (atomic_fetch_add_explicit(&prr->counter, 1, memory_order_relaxed) == 0)
- {
- /* First occurence */
- ASSERT_DIE(this_cli->parser_pool == prr->trie->lp);
- rmove(this_cli->parser_pool, &root_pool);
- this_cli->parser_pool = lp_new(this_cli->pool);
- prr->ev = (event) {
- .hook = channel_reload_out_done_main,
- .data = prr,
- };
- }
- else
- ASSERT_DIE(this_cli->parser_pool != prr->trie->lp);
+ atomic_fetch_add_explicit(&prr->counter, 1, memory_order_relaxed);
+ ASSERT_DIE(this_cli->parser_pool != prr->trie->lp);
/* Request actually the feeding */
channel_request_feeding_dynamic(c, CFRT_AUXILIARY);
cli_msg(-15, "%s: reloading", p->name);
+ if (atomic_fetch_sub_explicit(&prr->counter, 1, memory_order_acq_rel) == 1)
+ ev_send_loop(&main_birdloop, &prr->ev);
}
extern void pipe_update_debug(struct proto *P);
/* Protocol-specific data follow... */
};
+struct channel_import_request {
+ struct channel_import_request *next; /* Next in request chain */
+ void (*done)(struct channel_import_request *); /* Called when import finishes */
+ const struct f_trie *trie; /* Reload only matching nets */
+};
+
#define TLIST_PREFIX proto
#define TLIST_TYPE struct proto
#define TLIST_ITEM n
void (*rt_notify)(struct proto *, struct channel *, const net_addr *net, struct rte *new, const struct rte *old);
int (*preexport)(struct channel *, struct rte *rt);
- void (*reload_routes)(struct channel *);
+ int (*reload_routes)(struct channel *, struct channel_import_request *cir);
void (*feed_begin)(struct channel *);
void (*feed_end)(struct channel *);
void channel_set_state(struct channel *c, uint state);
void channel_schedule_reload(struct channel *c, struct channel_import_request *cir);
+int import_prefilter_for_protocols(struct channel_import_request *cir_head, const net_addr *n);
static inline void channel_init(struct channel *c) { channel_set_state(c, CS_START); }
static inline void channel_open(struct channel *c) { channel_set_state(c, CS_UP); }
} state;
};
-struct channel_import_request {
- struct channel_import_request *next; /* Next in request chain */
- void (*done)(struct channel_import_request *); /* Called when import finishes */
- const struct f_trie *trie; /* Reload only matching nets */
-};
-
struct channel *channel_from_export_request(struct rt_export_request *req);
void channel_request_feeding(struct channel *c, struct channel_feeding_request *);
void channel_request_feeding_dynamic(struct channel *c, enum channel_feeding_request_type);
struct rt_pending_export *rpe = rt_last_export(hook->table);
req_trace(req, D_STATES, "Export initialized, last export %p (%lu)", rpe, rpe ? rpe->seq : 0);
atomic_store_explicit(&hook->last_export, rpe, memory_order_relaxed);
-
+
rt_init_export(re, req->hook);
}
while (new.attrs->next)
new.attrs = new.attrs->next;
- log(L_TRACE "chanel_reload_export_bulk %N", net);
/* And reload the route */
rte_update(c, net, &new, new.src);
}
}
}
-static void
-bgp_reload_routes(struct channel *C)
+static int
+bgp_reload_routes(struct channel *C, struct channel_import_request *cir)
{
struct bgp_proto *p = (void *) C->proto;
struct bgp_channel *c = (void *) C;
+ log("in bgp");
/* Ignore non-BGP channels */
if (C->class != &channel_bgp)
- return;
+ return 1;
+ if (cir)
+ {
+ if (cir->trie)
+ {
+ cir->done(cir);
+ return 0;
+ }
+ cir->done(cir);
+ }
ASSERT(p->conn && p->route_refresh);
bgp_schedule_packet(p->conn, c, PKT_ROUTE_REFRESH);
+ return 1;
}
static void
net_normalize_ip4(&net);
// XXXX validate prefix
-
bgp_rte_update(s, (net_addr *) &net, path_id, a);
}
}
net_normalize_ip6(&net);
// XXXX validate prefix
-
bgp_rte_update(s, (net_addr *) &net, path_id, a);
}
}
#include "lib/macro.h"
static int ospf_preexport(struct channel *C, rte *new);
-static void ospf_reload_routes(struct channel *C);
+static int ospf_reload_routes(struct channel *C, struct channel_import_request *cir);
static int ospf_rte_better(const rte *new, const rte *old);
static u32 ospf_rte_igp_metric(const rte *rt);
static void ospf_disp(timer *timer);
p->calcrt = 1;
}
-static void
-ospf_reload_routes(struct channel *C)
+static int
+ospf_reload_routes(struct channel *C, struct channel_import_request *cir)
{
struct ospf_proto *p = (struct ospf_proto *) C->proto;
+ if (cir) {
+ pthread_mutex_lock(&p->mutex);
+ cir->next = p->cir;
+ p->cir = cir;
+ pthread_mutex_unlock(&p->mutex);
+ }
if (p->calcrt == 2)
- return;
+ return 1;
OSPF_TRACE(D_EVENTS, "Scheduling routing table calculation with route reload");
p->calcrt = 2;
+ return 1;
}
/* Calculate routing table */
if (p->calcrt)
+ {
ospf_rt_spf(p);
+ }
/* Cleanup after graceful restart */
if (p->gr_cleanup)
#ifndef _BIRD_OSPF_H_
#define _BIRD_OSPF_H_
+#include <pthread.h>
#include "nest/bird.h"
#include "lib/checksum.h"
slist lsal; /* List of all LSA's */
int calcrt; /* Routing table calculation scheduled?
0=no, 1=normal, 2=forced reload */
+ struct channel_import_request *cir; /* Struct with trie for partial reload */
+ pthread_mutex_t mutex; /* Mutex for partial reload */
list iface_list; /* List of OSPF interfaces (struct ospf_iface) */
list area_list; /* List of OSPF areas (struct ospf_area) */
int areano; /* Number of area I belong to */
}
}
+
/* RFC 2328 16.1. calculating shortest paths for an area */
static void
ospf_rt_spfa(struct ospf_area *oa)
struct ospf_proto *p = oa->po;
struct top_hash_entry *act;
node *n;
-
if (oa->rt == NULL)
return;
if (oa->rt->lsa.age == LSA_MAXAGE)
return;
-
OSPF_TRACE(D_EVENTS, "Starting routing table calculation for area %R", oa->areaid);
/* 16.1. (1) */
add_head(&oa->cand, &oa->rt->cn);
DBG("RT LSA: rt: %R, id: %R, type: %u\n",
oa->rt->lsa.rt, oa->rt->lsa.id, oa->rt->lsa_type);
+
while (!EMPTY_LIST(oa->cand))
{
DBG("Working on LSA: rt: %R, id: %R, type: %u\n",
act->lsa.rt, act->lsa.id, act->lsa_type);
-
act->color = INSPF;
switch (act->lsa_type)
{
OSPF_TRACE(D_EVENTS, "Starting routing table synchronization");
+ pthread_mutex_lock(&p->mutex);
+ struct channel_import_request *cir = p->cir;
+ p->cir = NULL;
+ pthread_mutex_unlock(&p->mutex);
+
DBG("Now syncing my rt table with nest's\n");
FIB_ITERATE_INIT(&fit, fib);
again1:
if (reload || ort_changed(nf, &eattrs.l))
{
- nf->old_metric1 = nf->n.metric1;
- nf->old_metric2 = nf->n.metric2;
- nf->old_tag = nf->n.tag;
- nf->old_rid = nf->n.rid;
-
- eattrs.a[eattrs.l.count++] =
- EA_LITERAL_EMBEDDED(&ea_ospf_metric1, 0, nf->n.metric1);
+ if (cir == NULL || import_prefilter_for_protocols(cir, nf->fn.addr))
+ {
+ nf->old_metric1 = nf->n.metric1;
+ nf->old_metric2 = nf->n.metric2;
+ nf->old_tag = nf->n.tag;
+ nf->old_rid = nf->n.rid;
- if (nf->n.type == RTS_OSPF_EXT2)
eattrs.a[eattrs.l.count++] =
- EA_LITERAL_EMBEDDED(&ea_ospf_metric2, 0, nf->n.metric2);
+ EA_LITERAL_EMBEDDED(&ea_ospf_metric1, 0, nf->n.metric1);
- if ((nf->n.type == RTS_OSPF_EXT1) || (nf->n.type == RTS_OSPF_EXT2))
- eattrs.a[eattrs.l.count++] =
- EA_LITERAL_EMBEDDED(&ea_ospf_tag, 0, nf->n.tag);
+ if (nf->n.type == RTS_OSPF_EXT2)
+ eattrs.a[eattrs.l.count++] =
+ EA_LITERAL_EMBEDDED(&ea_ospf_metric2, 0, nf->n.metric2);
- eattrs.a[eattrs.l.count++] =
- EA_LITERAL_EMBEDDED(&ea_ospf_router_id, 0, nf->n.rid);
+ if ((nf->n.type == RTS_OSPF_EXT1) || (nf->n.type == RTS_OSPF_EXT2))
+ eattrs.a[eattrs.l.count++] =
+ EA_LITERAL_EMBEDDED(&ea_ospf_tag, 0, nf->n.tag);
- ASSERT_DIE(ARRAY_SIZE(eattrs.a) >= eattrs.l.count);
+ eattrs.a[eattrs.l.count++] =
+ EA_LITERAL_EMBEDDED(&ea_ospf_router_id, 0, nf->n.rid);
- ea_list *eal = ea_lookup(&eattrs.l, 0);
- ea_free(nf->old_ea);
- nf->old_ea = eal;
+ ASSERT_DIE(ARRAY_SIZE(eattrs.a) >= eattrs.l.count);
- rte e0 = {
- .attrs = eal,
- .src = p->p.main_source,
- };
+ ea_list *eal = ea_lookup(&eattrs.l, 0);
+ ea_free(nf->old_ea);
+ nf->old_ea = eal;
- /*
- DBG("Mod rte type %d - %N via %I on iface %s, met %d\n",
- a0.source, nf->fn.addr, a0.gw, a0.iface ? a0.iface->name : "(none)", nf->n.metric1);
- */
+ rte e0 = {
+ .attrs = eal,
+ .src = p->p.main_source,
+ };
- rte_update(p->p.main_channel, nf->fn.addr, &e0, p->p.main_source);
+ /*
+ DBG("Mod rte type %d - %N via %I on iface %s, met %d\n",
+ a0.source, nf->fn.addr, a0.gw, a0.iface ? a0.iface->name : "(none)", nf->n.metric1);
+ */
+ rte_update(p->p.main_channel, nf->fn.addr, &e0, p->p.main_source);
+ }
}
}
else if (nf->old_ea)
}
}
FIB_ITERATE_END;
+
+ if(cir)
+ cir->done(cir);
WALK_LIST(oa, p->area_list)
{
return 0;
}
-static void
-pipe_reload_routes(struct channel *C)
+void
+pipe_import_by_refeed_free(struct channel_feeding_request *cfr)
{
- struct pipe_proto *p = (void *) C->proto;
+ struct import_to_export_reload *reload = SKIP_BACK(struct import_to_export_reload, cfr, cfr);
+ reload->cir->done(reload->cir);
+ log("free is done");
+}
- /* Route reload on one channel is just refeed on the other */
- channel_request_feeding_dynamic((C == p->pri) ? p->sec : p->pri, CFRT_DIRECT);
+static int
+pipe_reload_routes(struct channel *C, struct channel_import_request *cir)
+{
+ struct pipe_proto *p = (void *) C->proto;
+ if (cir && cir->trie)
+ {
+ struct import_to_export_reload *reload = lp_alloc(cir->trie->lp, sizeof *reload);
+ *reload = (struct import_to_export_reload) {
+ .cir = cir,
+ .cfr = {
+ .type = CFRT_AUXILIARY,
+ .done = pipe_import_by_refeed_free,
+ .trie = cir->trie,
+ },
+ };
+ channel_request_feeding((C == p->pri) ? p->sec : p->pri, &reload->cfr);
+ }
+ else{
+ if(cir)
+ cir->done(cir);
+ /* Route reload on one channel is just refeed on the other */
+ channel_request_feeding_dynamic((C == p->pri) ? p->sec : p->pri, CFRT_DIRECT);
+ }
+ return 1;
}
static void
#define PIPE_FL_RR_BEGIN_PENDING 1 /* Route refresh should start with the first route notified */
#endif
+
+struct import_to_export_reload {
+ struct channel_import_request *cir; /* We can not free this struct before reload finishes. */
+ struct channel_feeding_request cfr; /* New request we actually need - import was changed to feed the other side. */
+};
TRACE(D_EVENTS, "Main timer fired");
FIB_ITERATE_INIT(&fit, &p->rtable);
+
+ pthread_mutex_lock(&p->mutex);
+ struct channel_import_request *cir = p->cir;
+ p->cir = NULL;
+ pthread_mutex_unlock(&p->mutex);
loop:
FIB_ITERATE_START(&p->rtable, &fit, struct rip_entry, en)
/* Propagating eventual change */
if (changed || p->rt_reload)
{
- /*
- * We have to restart the iteration because there may be a cascade of
- * synchronous events rip_announce_rte() -> nest table change ->
- * rip_rt_notify() -> p->rtable change, invalidating hidden variables.
- */
-
- FIB_ITERATE_PUT_NEXT(&fit, &p->rtable);
- rip_announce_rte(p, en);
- goto loop;
+ if (cir == NULL || import_prefilter_for_protocols(cir, en->n.addr))
+ {
+ /*
+ * We have to restart the iteration because there may be a cascade of
+ * synchronous events rip_announce_rte() -> nest table change ->
+ * rip_rt_notify() -> p->rtable change, invalidating hidden variables.
+ */
+ FIB_ITERATE_PUT_NEXT(&fit, &p->rtable);
+ rip_announce_rte(p, en);
+ goto loop;
+ }
}
/* Checking stale entries for garbage collection timeout */
}
}
+ if (cir)
+ cir->done(cir);
tm_start(p->timer, MAX(next - now_, 100 MS));
}
* RIP protocol glue
*/
-static void
-rip_reload_routes(struct channel *C)
+static int
+rip_reload_routes(struct channel *C, struct channel_import_request *cir)
{
struct rip_proto *p = (struct rip_proto *) C->proto;
-
+
+ if (cir) {
+ pthread_mutex_lock(&p->mutex);
+ cir->next = p->cir;
+ p->cir = cir;
+ pthread_mutex_lock(&p->mutex);
+ }
if (p->rt_reload)
- return;
+ return 1;
TRACE(D_EVENTS, "Scheduling route reload");
p->rt_reload = 1;
rip_kick_timer(p);
+ return 1;
}
static struct rte_owner_class rip_rte_owner_class;
#ifndef _BIRD_RIP_H_
#define _BIRD_RIP_H_
+#include <pthread.h>
#include "nest/bird.h"
#include "nest/cli.h"
#include "nest/iface.h"
struct tbf log_pkt_tbf; /* TBF for packet messages */
struct tbf log_rte_tbf; /* TBF for RTE messages */
+ struct channel_import_request *cir; /* Trie for partial reload */
+ pthread_mutex_t mutex; /* Mutex for partial reload */
};
struct rip_iface
ev_schedule(p->event);
}
+static void
+static_mark_partial(struct static_proto *p, struct channel_import_request *cir)
+{
+ struct static_config *cf = (void *) p->p.cf;
+ struct static_route *r;
+
+ WALK_LIST(r, cf->routes)
+ if (r->state == SRS_CLEAN && trie_match_net(cir->trie, r->net))
+ {
+ r->state = SRS_DIRTY;
+ BUFFER_PUSH(p->marked) = r;
+ }
+
+ if (!ev_active(p->event))
+ ev_schedule(p->event);
+
+ cir->done(cir);
+}
+
static void
static_announce_marked(void *P)
static_mark_rte(p, r->mp_head);
}
-static void
-static_reload_routes(struct channel *C)
+static int
+static_reload_routes(struct channel *C, struct channel_import_request *cir)
{
struct static_proto *p = (void *) C->proto;
-
TRACE(D_EVENTS, "Scheduling route reload");
-
- static_mark_all(p);
+ if (cir && cir->trie)
+ static_mark_partial(p, cir);
+ else
+ {
+ if (cir)
+ cir->done(cir);
+ static_mark_all(p);
+ }
+ return 1;
}
static int
};
}
+protocol rip rip4 {
+ ipv4 {
+ export all;
+ };
+ interface "ve0";
+ interface "ve1", "ve2" { metric 1; mode multicast; };
+}
+