<tag><label id="proto-rpki-reload">rpki reload <m/switch/</tag>
Import or export filters may depend on route RPKI status (using
- <cf/roa_check()/ operator). In contrast to to other filter operators,
- this status for the same route may change as the content of ROA tables
- changes. When this option is active, BIRD activates automatic reload of
- the appropriate subset of prefixes imported or exported by the channels
- whenever ROA tables are updated (after a short settle
- time). When disabled, route reloads have to be requested manually. The
- option is ignored if <cf/roa_check()/ is not used in channel filters.
- Note that for BGP channels, automatic reload requires
+ <cf/roa_check()/ or <cf/aspa_check()/ operators). In contrast to other
+ filter operators, this status for the same route may change as the
+ content of ROA and ASPA tables changes. When this option is active, BIRD
- activates automatic reload of affected channels whenever ROA and ASPA
++ activates automatic reload of the appropriate subset of prefixes imported
++ or exported by the channels whenever ROA and ASPA
+ tables are updated (after a short settle time). When disabled, route
+ reloads have to be requested manually. The option is ignored if neither
+ <cf/roa_check()/ nor <cf/aspa_check()/ is used in channel filters. Note
+ that for BGP channels, automatic reload requires
<ref id="bgp-import-table" name="import table"> or
<ref id="bgp-export-table" name="export table"> (for respective
direction). Default: on.
enum aspa_result {
ASPA_UNKNOWN = 0,
ASPA_VALID,
- ASPA_INVALID_EMPTY,
- ASPA_INVALID_CONFED,
- ASPA_INVALID_LEAK,
+ ASPA_INVALID,
};
+int net_roa_check(rtable *tab, const net_addr *n, u32 asn);
+enum aspa_result aspa_check(rtable *tab, const struct adata *path, bool force_upstream);
+
#endif
return ASPA_UNKNOWN;
/* Now there is surely a valley there. */
- return ASPA_INVALID_LEAK;
+ return ASPA_INVALID;
}
-/**
- * rte_find - find a route
- * @net: network node
- * @src: route source
- *
- * The rte_find() function returns a route for destination @net
- * which is from route source @src.
- */
-rte *
-rte_find(net *net, struct rte_src *src)
+struct rte_storage *
+rte_store(const rte *r, struct netindex *i, struct rtable_private *tab)
{
- rte *e = net->routes;
+ struct rte_storage *s = sl_alloc(tab->rte_slab);
+ struct rte *e = RTES_WRITE(s);
+
+ *e = *r;
+ e->net = i->addr;
+ net_lock_index(tab->netindex, i);
+
+ rt_lock_source(e->src);
+
+ e->attrs = ea_lookup(e->attrs, BIT32_ALL(EALS_PREIMPORT, EALS_FILTERED), EALS_IN_TABLE);
+
+#if 0
+ debug("(store) %N ", i->addr);
+ ea_dump(e->attrs);
+ debug("\n");
+#endif
- while (e && e->src != src)
- e = e->next;
- return e;
+ return s;
}
+static void rte_free_deferred(struct deferred_call *dc);
+
+struct rte_free_deferred_item {
+ struct deferred_call dc;
+ struct rte_storage *e;
+ rtable *tab;
+};
+
/**
- * rte_get_temp - get a temporary &rte
- * @a: attributes to assign to the new route (a &rta; in case it's
- * un-cached, rte_update() will create a cached copy automatically)
- * @src: route source
+ * rte_free_defer - delete a &rte (happens later)
+ * @e: &struct rte_storage to be deleted
+ * @tab: the table which the rte belongs to
*
- * Create a temporary &rte and bind it with the attributes @a.
+ * rte_free() deletes the given &rte from the routing table it's linked to.
*/
-rte *
-rte_get_temp(rta *a, struct rte_src *src)
+
+static void
+rte_free(struct rte_storage *e, struct rtable_private *tab)
{
- rte *e = sl_alloc(rte_slab);
+ struct rte_free_deferred_item rfdi = {
+ .dc.hook = rte_free_deferred,
+ .e = e,
+ .tab = RT_PUB(tab),
+ };
- e->attrs = a;
- e->id = 0;
- e->flags = 0;
- e->pflags = 0;
- rt_lock_source(e->src = src);
- return e;
+ if (!tab->rte_free_deferred++)
+ rt_lock_table(tab);
+
+ rt_rte_trace_in(D_ROUTES, e->rte.sender->req, &e->rte, "freeing");
+ defer_call(&rfdi.dc, sizeof rfdi);
}
-rte *
-rte_do_cow(rte *r)
+static void
+rte_free_deferred(struct deferred_call *dc)
{
- rte *e = sl_alloc(rte_slab);
+ SKIP_BACK_DECLARE(struct rte_free_deferred_item, rfdi, dc, dc);
- memcpy(e, r, sizeof(rte));
+ struct rte_storage *e = rfdi->e;
+ RT_LOCK(rfdi->tab, tab);
- rt_lock_source(e->src);
- e->attrs = rta_clone(r->attrs);
- e->flags = 0;
- return e;
-}
+ /* No need for synchronize_rcu, implied by the deferred_call */
-/**
- * rte_cow_rta - get a private writable copy of &rte with writable &rta
- * @r: a route entry to be copied
- * @lp: a linpool from which to allocate &rta
- *
- * rte_cow_rta() takes a &rte and prepares it and associated &rta for
- * modification. There are three possibilities: First, both &rte and &rta are
- * private copies, in that case they are returned unchanged. Second, &rte is
- * private copy, but &rta is cached, in that case &rta is duplicated using
- * rta_do_cow(). Third, both &rte is shared and &rta is cached, in that case
- * both structures are duplicated by rte_do_cow() and rta_do_cow().
- *
- * Note that in the second case, cached &rta loses one reference, while private
- * copy created by rta_do_cow() is a shallow copy sharing indirect data (eattrs,
- * nexthops, ...) with it. To work properly, original shared &rta should have
- * another reference during the life of created private copy.
- *
- * Result: a pointer to the new writable &rte with writable &rta.
- */
-rte *
-rte_cow_rta(rte *r, linpool *lp)
-{
- if (!rta_is_cached(r->attrs))
- return r;
+ struct netindex *i = RTE_GET_NETINDEX(&e->rte);
+ net_unlock_index(tab->netindex, i);
- r = rte_cow(r);
- rta *a = rta_do_cow(r->attrs, lp);
- rta_free(r->attrs);
- r->attrs = a;
- return r;
+ rt_unlock_source(e->rte.src);
+
+ ea_free(e->rte.attrs);
+ sl_free(e);
+
+ if (!--tab->rte_free_deferred)
+ rt_unlock_table(tab);
}
static int /* Actually better or at least as good as */
bgp_proto_channel: bgp_channel_start bgp_channel_opt_list bgp_channel_end;
-
-dynamic_attr: BGP_ORIGIN
- { $$ = f_new_dynamic_attr(EAF_TYPE_INT, T_ENUM_BGP_ORIGIN, EA_CODE(PROTOCOL_BGP, BA_ORIGIN)); $$.flags = BAF_TRANSITIVE; } ;
-dynamic_attr: BGP_PATH
- { $$ = f_new_dynamic_attr(EAF_TYPE_AS_PATH, T_PATH, EA_CODE(PROTOCOL_BGP, BA_AS_PATH)); $$.flags = BAF_TRANSITIVE; } ;
-dynamic_attr: BGP_NEXT_HOP
- { $$ = f_new_dynamic_attr(EAF_TYPE_IP_ADDRESS, T_IP, EA_CODE(PROTOCOL_BGP, BA_NEXT_HOP)); $$.flags = BAF_TRANSITIVE; } ;
-dynamic_attr: BGP_MED
- { $$ = f_new_dynamic_attr(EAF_TYPE_INT, T_INT, EA_CODE(PROTOCOL_BGP, BA_MULTI_EXIT_DISC)); $$.flags = BAF_OPTIONAL; } ;
-dynamic_attr: BGP_LOCAL_PREF
- { $$ = f_new_dynamic_attr(EAF_TYPE_INT, T_INT, EA_CODE(PROTOCOL_BGP, BA_LOCAL_PREF)); $$.flags = BAF_TRANSITIVE; } ;
-dynamic_attr: BGP_ATOMIC_AGGR
- { $$ = f_new_dynamic_attr(EAF_TYPE_OPAQUE, T_ENUM_EMPTY, EA_CODE(PROTOCOL_BGP, BA_ATOMIC_AGGR)); $$.flags = BAF_TRANSITIVE; } ;
-dynamic_attr: BGP_AGGREGATOR
- { $$ = f_new_dynamic_attr(EAF_TYPE_OPAQUE, T_ENUM_EMPTY, EA_CODE(PROTOCOL_BGP, BA_AGGREGATOR)); $$.flags = BAF_OPTIONAL | BAF_TRANSITIVE; } ;
-dynamic_attr: BGP_COMMUNITY
- { $$ = f_new_dynamic_attr(EAF_TYPE_INT_SET, T_CLIST, EA_CODE(PROTOCOL_BGP, BA_COMMUNITY)); $$.flags = BAF_OPTIONAL | BAF_TRANSITIVE; } ;
-dynamic_attr: BGP_ORIGINATOR_ID
- { $$ = f_new_dynamic_attr(EAF_TYPE_ROUTER_ID, T_QUAD, EA_CODE(PROTOCOL_BGP, BA_ORIGINATOR_ID)); $$.flags = BAF_OPTIONAL; } ;
-dynamic_attr: BGP_CLUSTER_LIST
- { $$ = f_new_dynamic_attr(EAF_TYPE_INT_SET, T_CLIST, EA_CODE(PROTOCOL_BGP, BA_CLUSTER_LIST)); $$.flags = BAF_OPTIONAL; } ;
-dynamic_attr: BGP_EXT_COMMUNITY
- { $$ = f_new_dynamic_attr(EAF_TYPE_EC_SET, T_ECLIST, EA_CODE(PROTOCOL_BGP, BA_EXT_COMMUNITY)); $$.flags = BAF_OPTIONAL | BAF_TRANSITIVE; } ;
-dynamic_attr: BGP_AIGP
- { $$ = f_new_dynamic_attr(EAF_TYPE_OPAQUE, T_ENUM_EMPTY, EA_CODE(PROTOCOL_BGP, BA_AIGP)); $$.flags = BAF_OPTIONAL; } ;
-dynamic_attr: BGP_LARGE_COMMUNITY
- { $$ = f_new_dynamic_attr(EAF_TYPE_LC_SET, T_LCLIST, EA_CODE(PROTOCOL_BGP, BA_LARGE_COMMUNITY)); $$.flags = BAF_OPTIONAL | BAF_TRANSITIVE; } ;
-dynamic_attr: BGP_OTC
- { $$ = f_new_dynamic_attr(EAF_TYPE_INT, T_INT, EA_CODE(PROTOCOL_BGP, BA_ONLY_TO_CUSTOMER)); $$.flags = BAF_OPTIONAL | BAF_TRANSITIVE; } ;
-
-custom_attr: ATTRIBUTE BGP expr type symbol ';' {
+custom_attr: ATTRIBUTE BGP NUM type symbol ';' {
if ($3 > 255 || $3 < 1)
- cf_error("Invalid attribute number (Given %i, must be 1-255)", $3);
- if ($4 != T_BYTESTRING)
- cf_error("Attribute type must be bytestring, not %s", f_type_name($4));
- if (bgp_attr_name($3))
- cf_error("Attribute BGP.%d already known as %s", $3, bgp_attr_name($3));
-
- struct f_dynamic_attr *a = cfg_alloc(sizeof(struct f_dynamic_attr));
- *a = f_new_dynamic_attr(f_type_attr($4), T_BYTESTRING, EA_CODE(PROTOCOL_BGP, $3));
- a->flags = BAF_TRANSITIVE | BAF_OPTIONAL;
- cf_define_symbol(new_config, $5, SYM_ATTRIBUTE, attribute, a);
+ cf_error("Invalid attribute number. (Given %i, must be 1-255.)", $3);
+
+ struct ea_class *ac = bgp_find_ea_class_by_id($3);
+ ASSERT_DIE(ac);
+ if ($4 != ac->type)
+ cf_error("Attribute %d type must be %s, not %s.", $3, f_type_name(ac->type), f_type_name($4));
+
+ ea_ref_class(new_config->pool, ac);
+ cf_define_symbol(new_config, $5, SYM_ATTRIBUTE, attribute, ac);
};
-CF_ENUM(T_ENUM_BGP_ORIGIN, ORIGIN_, IGP, EGP, INCOMPLETE)
+CF_CLI(RELOAD BGP, proto_patt, [<name>], [[Send and request route refresh to/from neighbor]])
+{
+ proto_apply_cmd($3, bgp_reload_in, 1, 0);
+ proto_apply_cmd($3, bgp_reload_out, 1, 0);
+};
+
+CF_CLI(RELOAD BGP IN, proto_patt, [<name>], [[Request route refresh from neighbor]])
+{
+ proto_apply_cmd($4, bgp_reload_in, 1, 0);
+}
+
+CF_CLI(RELOAD BGP OUT, proto_patt, [<name>], [[Refresh routes to neighbor]])
+{
+ proto_apply_cmd($4, bgp_reload_out, 1, 0);
+}
/* ASPA shortcuts */
- term: ASPA_CHECK '(' rtable ')' { $$ =
+ term: ASPA_CHECK_DOWNSTREAM '(' rtable ')' { $$ =
f_new_inst(FI_ASPA_CHECK_EXPLICIT,
f_new_inst(FI_EA_GET,
f_new_inst(FI_CONSTANT, (struct f_val) { .type = T_ROUTE, .val.rte = NULL, }),
- f_new_dynamic_attr(EAF_TYPE_AS_PATH, T_PATH, EA_CODE(PROTOCOL_BGP, BA_AS_PATH))
+ ea_class_find_by_name("bgp_path")
- ),
+ ),
f_new_inst(FI_CONSTANT, (struct f_val) { .type = T_BOOL, .val.i = 0, }),
$3
);
f_new_inst(FI_ASPA_CHECK_EXPLICIT,
f_new_inst(FI_EA_GET,
f_new_inst(FI_CONSTANT, (struct f_val) { .type = T_ROUTE, .val.rte = NULL, }),
- f_new_dynamic_attr(EAF_TYPE_AS_PATH, T_PATH, EA_CODE(PROTOCOL_BGP, BA_AS_PATH))
+ ea_class_find_by_name("bgp_path")
- ),
+ ),
f_new_inst(FI_CONSTANT, (struct f_val) { .type = T_BOOL, .val.i = 1, }),
$3
);
[ERROR] = 16,
};
-static int rpki_send_error_pdu(struct rpki_cache *cache, const enum pdu_error_type error_code, const u32 err_pdu_len, const struct pdu_header *erroneous_pdu, const char *fmt, ...);
+ static inline int rpki_pdu_aspa_provider_asn_count(const struct pdu_aspa *pdu)
+ { return (pdu->len - sizeof(struct pdu_aspa)) / (sizeof(u32)); }
+
+static int rpki_send_error_pdu_(struct rpki_cache *cache, const enum pdu_error_type error_code, const u32 err_pdu_len, const struct pdu_header *erroneous_pdu, const char *fmt, ...);
+
+#define rpki_send_error_pdu(cache, error_code, err_pdu_len, erroneous_pdu, fmt...) ({ \
+ rpki_send_error_pdu_(cache, error_code, err_pdu_len, erroneous_pdu, #fmt); \
+ CACHE_TRACE(D_PACKETS, cache, #fmt); \
+ })
static void
rpki_pdu_to_network_byte_order(struct pdu_header *pdu)
rpki_table_remove_roa(struct rpki_cache *cache, struct channel *channel, const net_addr_union *pfxr)
{
struct rpki_proto *p = cache->p;
- rte_update2(channel, &pfxr->n, NULL, p->p.main_source);
+ rte_update(channel, &pfxr->n, NULL, p->p.main_source);
}
- rta a0 = {
- .pref = channel->preference,
- .source = RTS_RPKI,
- .scope = SCOPE_UNIVERSE,
- .dest = RTD_NONE,
- };
+ void
+ rpki_table_add_aspa(struct rpki_cache *cache, struct channel *channel,
+ u32 customer, void *providers, uint providers_length)
+ {
+ struct rpki_proto *p = cache->p;
+
+ net_addr_union n = { .aspa = NET_ADDR_ASPA(customer) };
- ea_set_attr_data(&a0.eattrs, tmp_linpool, EA_ASPA_PROVIDERS, 0,
- EAF_TYPE_INT_SET, providers, providers_length);
+
- rta *a = rta_lookup(&a0);
- rte *e = rte_get_temp(a, p->p.main_source);
++ ea_list *ea = NULL;
++ ea_set_attr_u32(&ea, &ea_gen_preference, 0, channel->preference);
++ ea_set_attr_u32(&ea, &ea_gen_source, 0, RTS_RPKI);
++
++ ea_set_attr_data(&ea, &ea_gen_aspa_providers, 0, providers, providers_length);
+
- rte_update2(channel, &n.n, e, e->src);
++ rte e0 = { .attrs = ea, .src = p->p.main_source, };
+
- rte_update2(channel, &n.n, NULL, p->p.main_source);
++ rte_update(channel, &n.n, &e0, p->p.main_source);
+ }
+
+ void
+ rpki_table_remove_aspa(struct rpki_cache *cache, struct channel *channel, u32 customer)
+ {
+ struct rpki_proto *p = cache->p;
+ net_addr_union n = { .aspa = NET_ADDR_ASPA(customer) };
++ rte_update(channel, &n.n, NULL, p->p.main_source);
++}
++
+void
+rpki_start_refresh(struct rpki_proto *p)
+{
+ if (p->roa4_channel)
+ rt_refresh_begin(&p->roa4_channel->in_req);
+ if (p->roa6_channel)
+ rt_refresh_begin(&p->roa6_channel->in_req);
++ if (p->aspa_channel)
++ rt_refresh_begin(&p->aspa_channel->in_req);
+
+ p->refresh_channels = 1;
}
+void
+rpki_stop_refresh(struct rpki_proto *p)
+{
+ if (!p->refresh_channels)
+ return;
+
+ p->refresh_channels = 0;
+
+ if (p->roa4_channel)
+ rt_refresh_end(&p->roa4_channel->in_req);
+ if (p->roa6_channel)
+ rt_refresh_end(&p->roa6_channel->in_req);
++ if (p->aspa_channel)
++ rt_refresh_end(&p->aspa_channel->in_req);
+}
/*
* RPKI Protocol Logic
void rpki_table_add_roa(struct rpki_cache *cache, struct channel *channel, const net_addr_union *pfxr);
void rpki_table_remove_roa(struct rpki_cache *cache, struct channel *channel, const net_addr_union *pfxr);
+ void rpki_table_add_aspa(struct rpki_cache *cache, struct channel *channel, u32 customer, void *providers, uint providers_length);
+ void rpki_table_remove_aspa(struct rpki_cache *cache, struct channel *channel, u32 customer);
+
+void rpki_start_refresh(struct rpki_proto *p);
+void rpki_stop_refresh(struct rpki_proto *p);
/*
* RPKI Protocol Logic