net_addr* copy = lp_alloc(s->linpool, sizeof(net_addr));
net_copy(copy, net);
struct f_trie * trie = f_new_trie(s->linpool, 0);
- log_msg(L_DEBUG "min len %i", net_pxlen(net));
if (net->type == NET_IP4 || net->type == NET_VPN4 || net->type == NET_ROA4){
- trie_add_prefix(trie, net, net_pxlen(net), net_pxlen(net)+5);
+ trie_add_prefix(trie, net, net_pxlen(net), 48);
}
else trie_add_prefix(trie, net, net_pxlen(net), 128);
s->trie = trie;
}
ASSERT(c->channel_state == CS_UP);
+
+ c->reqv_trie_lp = lp_new(c->proto->pool);
+ struct f_trie * trie = f_new_trie(c->reqv_trie_lp, 0);
+ trie_add_prefix(trie, c->out_subprefix, net_pxlen(c->out_subprefix), net_pxlen(c->out_subprefix));
c->out_req = (struct rt_export_request) {
.name = mb_sprintf(c->proto->pool, "%s.%s", c->proto->name, c->name),
.list = proto_work_list(c->proto),
.pool = c->proto->pool,
.feed_block_size = c->feed_block_size,
- .addr = c->out_subprefix,
- .addr_mode = c->out_subprefix ? TE_ADDR_IN : TE_ADDR_NONE,
+ .prefilter.net_filter_trie = trie,
+ .prefilter.addr_mode = c->out_subprefix ? TE_ADDR_IN : TE_ADDR_NONE,
.trace_routes = c->debug | c->proto->debug,
.dump_req = channel_dump_export_req,
.log_state_change = channel_export_log_state_change,
// c->ra_mode = cf->ra_mode;
c->merge_limit = cf->merge_limit;
c->preference = cf->preference;
- c->out_req.addr = c->out_subprefix = cf->out_subprefix;
+ c->out_subprefix = cf->out_subprefix;
+ c->reqv_trie_lp = lp_new(c->proto->pool);
+ struct f_trie *trie = f_new_trie(c->reqv_trie_lp, 0);
+ trie_add_prefix(trie, c->out_subprefix, net_pxlen(c->out_subprefix), net_pxlen(c->out_subprefix));
+ c->out_req.prefilter.net_filter_trie = trie;
c->debug = cf->debug;
c->in_req.trace_routes = c->out_req.trace_routes = c->debug | c->proto->debug;
c->rpki_reload = cf->rpki_reload;
struct rt_import_request in_req; /* Table import connection */
struct rt_export_request out_req; /* Table export connection */
+
+ linpool * reqv_trie_lp;
u32 refeed_count; /* Number of routes exported during refeed regardless of out_limit */
}
d->req = (struct rt_export_request) {
- .addr = d->addr,
+ .prefilter.addr = d->addr,
.name = "CLI Show Route",
.list = &global_work_list,
.pool = c->pool,
.export_bulk = rt_show_net_export_bulk,
.dump_req = rt_show_dump_req,
.log_state_change = rt_show_log_state_change,
- .addr_mode = d->addr_mode,
+ .prefilter.addr_mode = d->addr_mode,
};
d->table_counter++;
goto ignore; /* Seen already */
const net_addr *n = rpe->new_best ? rpe->new_best->rte.net : rpe->old_best->rte.net;
-
- switch (hook->req->addr_mode)
+ ASSERT_DIE(1);
+ switch (hook->req->prefilter.addr_mode)
{
case TE_ADDR_NONE:
break;
case TE_ADDR_IN:
- if (!net_in_netX(n, hook->req->addr))
+ if (!net_in_netX(n, hook->req->prefilter.addr))
goto ignore;
break;
case TE_ADDR_EQUAL:
- if (!net_equal(n, hook->req->addr))
+ if (!net_equal(n, hook->req->prefilter.addr))
goto ignore;
break;
bug("Continuos export of best prefix match not implemented yet.");
default:
- bug("Strange table export address mode: %d", hook->req->addr_mode);
+ bug("Strange table export address mode: %d", hook->req->prefilter.addr_mode);
}
if (rpe->new)
struct rt_export_request *req = hook->h.req;
/* stats zeroed by mb_allocz */
- switch (req->addr_mode)
+ switch (req->prefilter.addr_mode)
{
case TE_ADDR_IN:
if (tab->trie && net_val_match(tab->addr_type, NB_IP))
rt_trace(tab, D_EVENTS, "Stopping export hook %s must wait for uncorking", hook->h.req->name);
return 0;
case TES_FEEDING:
- switch (hh->req->addr_mode)
+ switch (hh->req->prefilter.addr_mode)
{
case TE_ADDR_IN:
if (hook->walk_lock)
FIB_ITERATE_START(&tab->fib, fit, net, n)
{
- if ((c->h.req->addr_mode == TE_ADDR_NONE) || net_in_netX(n->n.addr, c->h.req->addr))
+ if ((c->h.req->prefilter.addr_mode == TE_ADDR_NONE) || trie_match_net( c->h.req->prefilter.net_filter_trie, n->n.addr) )/*net_in_netX(n->n.addr, c->h.req->addr)) net n trie_match_net(const struct f_trie *t, const net_addr *n)*/
{
if (!rt_prepare_feed(c, n, &block))
{
RT_LOCKED(RT_PUB(SKIP_BACK(struct rtable_private, exporter, c->table)), tab)
{
ASSERT_DIE(atomic_load_explicit(&c->h.export_state, memory_order_relaxed) == TES_FEEDING);
- ASSERT_DIE(c->h.req->addr_mode == TE_ADDR_EQUAL);
+ ASSERT_DIE(c->h.req->prefilter.addr_mode == TE_ADDR_EQUAL);
if (n = net_find(tab, c->h.req->addr))
ASSERT_DIE(rt_prepare_feed(c, n, &block));
RT_LOCKED(RT_PUB(SKIP_BACK(struct rtable_private, exporter, c->table)), tab)
{
ASSERT_DIE(atomic_load_explicit(&c->h.export_state, memory_order_relaxed) == TES_FEEDING);
- ASSERT_DIE(c->h.req->addr_mode == TE_ADDR_FOR);
+ ASSERT_DIE(c->h.req->prefilter.addr_mode == TE_ADDR_FOR);
if (n = net_route(tab, c->h.req->addr))
ASSERT_DIE(rt_prepare_feed(c, n, &block));
u64 seq; /* Sequential ID (table-local) of the pending export */
};
+struct rt_prefilter_address{
+ union{
+ const struct f_trie *net_filter_trie;
+ const net_addr *addr; /* Network prefilter address */
+ };
+ u8 addr_mode; /* Network prefilter mode (TE_ADDR_*) */
+}PACKED;
+
struct rt_export_request {
struct rt_export_hook *hook; /* Table part of the export */
char *name;
const net_addr *addr; /* Network prefilter address */
u8 trace_routes;
- u8 addr_mode; /* Network prefilter mode (TE_ADDR_*) */
uint feed_block_size; /* How many routes to feed at once */
+ struct rt_prefilter_address prefilter;
event_list *list; /* Where to schedule export events */
pool *pool; /* Pool to use for allocations */
int max = 512;
- const net_addr *neq = (hook->h.req->addr_mode == TE_ADDR_EQUAL) ? hook->h.req->addr : NULL;
+ const net_addr *neq = (hook->h.req->prefilter.addr_mode == TE_ADDR_EQUAL) ? hook->h.req->addr : NULL;
const net_addr *cand = NULL;
do {
HASH_WALK_ITER(c->prefix_hash, PXH, n, hook->hash_iter)
{
- switch (hook->h.req->addr_mode)
+ switch (hook->h.req->prefilter.addr_mode)
{
case TE_ADDR_IN:
if (!net_in_netX(n->net, hook->h.req->addr))