static void channel_update_limit(struct channel *c, struct limit *l, int dir, struct channel_limit *cf);
static void channel_reset_limit(struct channel *c, struct limit *l, int dir);
static int channel_refeed_prefilter(const struct rt_prefilter *p, const net_addr *n);
+static int channel_import_prefilter(const struct rt_prefilter *p, const net_addr *n);
static void channel_feed_end(struct channel *c);
static void channel_stop_export(struct channel *c);
static void channel_export_stopped(struct rt_export_request *req);
.dump_req = channel_dump_import_req,
.log_state_change = channel_import_log_state_change,
.preimport = channel_preimport,
+ .prefilter = {
+ .mode = c->out_subprefix ? TE_ADDR_IN : TE_ADDR_NONE,
+ .addr = c->out_subprefix,
+ },
};
ASSERT(c->channel_state == CS_UP);
for (struct channel_feeding_request *cfr = c->refeeding; cfr; cfr = cfr->next)
if (!cfr->trie || trie_match_net(cfr->trie, n))
+ {
+ log(L_TRACE "Export this one");
return 1;
-
+ }
+ log(L_TRACE "%N filtered out of export", n);
return 0;
}
+static int
+channel_import_prefilter(const struct rt_prefilter *p, const net_addr *n)
+{
+ const struct channel *c =
+ SKIP_BACK(struct channel, reload_req,
+ SKIP_BACK(struct rt_export_request, prefilter, p)
+ );
+ for (struct channel_import_request *cir = c->importing; cir; cir = cir->next)
+ if (!cir->trie || trie_match_net(cir->trie, n))
+ {
+ log(L_TRACE "Export this one");
+ return 1;
+ }
+ log(L_TRACE "%N filtered out of import", n);
+ return 0;
+}
static void
channel_feed_end(struct channel *c)
/* Called by protocol for reload from in_table */
void
-channel_schedule_reload(struct channel *c)
+channel_schedule_reload(struct channel *c, struct channel_import_request *cir)
{
+ log(L_TRACE "channel_schedule_reload %i %i",cir, (cir && cir->trie));
ASSERT(c->in_req.hook);
-
- if (c->reload_req.hook)
+ int no_trie = 0;
+ if (cir)
+ {
+ struct channel_import_request* last = c->import_pending;
+ while (last)
+ {
+ if (!last->trie)
+ no_trie = 1;
+ last = last->next;
+ }
+ last = cir;
+ no_trie = !last->trie;
+ }
+ if (c->reload_req.hook)
{
CD(c, "Reload triggered before the previous one has finished");
c->reload_pending = 1;
return;
}
+ c->importing = c->import_pending;
+ c->import_pending = NULL;
+
+ if (no_trie)
+ {
+ c->reload_req.prefilter.mode = TE_ADDR_NONE;
+ c->reload_req.prefilter.hook = NULL;
+ }
+ else
+ {
+ CD(c, "Import with trie");
+ c->reload_req.prefilter.mode = TE_ADDR_HOOK;
+ c->reload_req.prefilter.hook = channel_import_prefilter;
+ }
- rt_refresh_begin(&c->in_req);
rt_request_export(c->table, &c->reload_req);
}
CD(c, "Reload requested");
- if ((c->in_keep & RIK_PREFILTER) == RIK_PREFILTER)
- channel_schedule_reload(c);
+ if ((c->in_keep & RIK_PREFILTER) == RIK_PREFILTER) {
+ struct channel_import_request* cir = mb_alloc(c->proto->pool, sizeof *cir);;
+ cir->trie = NULL;
+ channel_schedule_reload(c, cir);
+ }
else
c->proto->reload_routes(c);
}
+static void
+channel_request_partial_reload(struct channel *c, struct channel_import_request *cir)
+{
+ ASSERT(c->in_req.hook);
+ ASSERT(channel_reloadable(c));
+
+ CD(c, "Partial import reload requested");
+
+ if ((c->in_keep & RIK_PREFILTER) == RIK_PREFILTER)
+ channel_schedule_reload(c, cir);
+ /* TODO*/
+ else
+ CD(c, "Partial import reload requested, but with ric cosi");
+ /*c->proto->reload_routes(c);
+ */
+}
+
const struct channel_class channel_basic = {
.channel_size = sizeof(struct channel),
.config_size = sizeof(struct channel_config)
struct proto_reload_request *prr;
};
+struct channel_cmd_reload_import_request {
+ struct channel_import_request cir;
+ struct proto_reload_request *prr;
+};
+
static void
channel_reload_out_done_main(void *_prr)
{
ev_send_loop(&main_birdloop, &ccrfr->prr->ev);
}
+static void
+channel_reload_in_done(struct channel_import_request *cir)
+{
+ struct channel_cmd_reload_import_request *ccrir = SKIP_BACK(struct channel_cmd_reload_import_request, cir, cir);
+ if (atomic_fetch_sub_explicit(&ccrir->prr->counter, 1, memory_order_acq_rel) == 1)
+ ev_send_loop(&main_birdloop, &ccrir->prr->ev);
+}
+
void
proto_cmd_reload(struct proto *p, uintptr_t _prr, int cnt UNUSED)
{
if (prr->dir != CMD_RELOAD_OUT)
WALK_LIST(c, p->channels)
if (c->channel_state == CS_UP)
- channel_request_reload(c);
+ {
+ if (prr->trie)
+ {
+ /* Increase the refeed counter */
+ if (atomic_fetch_add_explicit(&prr->counter, 1, memory_order_relaxed) == 0)
+ {
+ /* First occurence */
+ ASSERT_DIE(this_cli->parser_pool == prr->trie->lp);
+ rmove(this_cli->parser_pool, &root_pool);
+ this_cli->parser_pool = lp_new(this_cli->pool);
+ prr->ev = (event) {
+ .hook = channel_reload_out_done_main,
+ .data = prr,
+ };
+ }
+ else
+ ASSERT_DIE(this_cli->parser_pool != prr->trie->lp);
+
+ struct channel_cmd_reload_import_request *req = lp_alloc(prr->trie->lp, sizeof *req);
+ *req = (struct channel_cmd_reload_import_request) {
+ .cir = {
+ .done = channel_reload_in_done,
+ .trie = prr->trie,
+ },
+ .prr = prr,
+ };
+ channel_request_partial_reload(c, &req->cir);
+ }
+ else
+ channel_request_reload(c);
+ }
/* re-exporting routes */
if (prr->dir != CMD_RELOAD_IN)
struct f_trie *refeed_trie; /* Auxiliary refeed trie */
struct channel_feeding_request *refeeding; /* Refeeding the channel */
struct channel_feeding_request *refeed_pending; /* Scheduled refeeds */
+ struct channel_import_request *importing; /* Importing the channel */
+ struct channel_import_request *import_pending; /* Scheduled imports */
uint feed_block_size; /* How many routes to feed at once */
int proto_configure_channel(struct proto *p, struct channel **c, struct channel_config *cf);
void channel_set_state(struct channel *c, uint state);
-void channel_schedule_reload(struct channel *c);
+void channel_schedule_reload(struct channel *c, struct channel_import_request *cir);
static inline void channel_init(struct channel *c) { channel_set_state(c, CS_START); }
static inline void channel_open(struct channel *c) { channel_set_state(c, CS_UP); }
} state;
};
+struct channel_import_request {
+ struct channel_import_request *next; /* Next in request chain */
+ void (*done)(struct channel_import_request *); /* Called when import finishes */
+ const struct f_trie *trie; /* Reload only matching nets */
+};
+
struct channel *channel_from_export_request(struct rt_export_request *req);
void channel_request_feeding(struct channel *c, struct channel_feeding_request *);
void channel_request_feeding_dynamic(struct channel *c, enum channel_feeding_request_type);
/* Table-channel connections */
+struct rt_prefilter {
+ union {
+ const struct f_trie *trie;
+ const net_addr *addr; /* Network prefilter address */
+ int (*hook)(const struct rt_prefilter *, const net_addr *);
+ };
+ /* Network prefilter mode (TE_ADDR_*) */
+ enum {
+ TE_ADDR_NONE = 0, /* No address matching */
+ TE_ADDR_EQUAL, /* Exact query - show route <addr> */
+ TE_ADDR_FOR, /* Longest prefix match - show route for <addr> */
+ TE_ADDR_IN, /* Interval query - show route in <addr> */
+ TE_ADDR_TRIE, /* Query defined by trie */
+ TE_ADDR_HOOK, /* Query processed by supplied custom hook */
+ } mode;
+} PACKED;
+
struct rt_import_request {
struct rt_import_hook *hook; /* The table part of importer */
char *name;
u8 trace_routes;
+ struct rt_prefilter prefilter;
event_list *list; /* Where to schedule announce events */
u64 seq; /* Sequential ID (table-local) of the pending export */
};
-struct rt_prefilter {
- union {
- const struct f_trie *trie;
- const net_addr *addr; /* Network prefilter address */
- int (*hook)(const struct rt_prefilter *, const net_addr *);
- };
- /* Network prefilter mode (TE_ADDR_*) */
- enum {
- TE_ADDR_NONE = 0, /* No address matching */
- TE_ADDR_EQUAL, /* Exact query - show route <addr> */
- TE_ADDR_FOR, /* Longest prefix match - show route for <addr> */
- TE_ADDR_IN, /* Interval query - show route in <addr> */
- TE_ADDR_TRIE, /* Query defined by trie */
- TE_ADDR_HOOK, /* Query processed by supplied custom hook */
- } mode;
-} PACKED;
-
struct rt_export_request {
struct rt_export_hook *hook; /* Table part of the export */
char *name; /* Network prefilter address */