INT, BOOL, IP, PREFIX, RD, PAIR, QUAD, EC, LC,
SET, STRING, BYTESTRING, BGPMASK, BGPPATH, CLIST, ECLIST, LCLIST,
IF, THEN, ELSE, CASE,
- FOR, DO,
+ FOR, IN, DO,
TRUE, FALSE, RT, RO, UNKNOWN, GENERIC,
- FROM, GW, NET, PROTO, SOURCE, SCOPE, DEST, IFNAME, IFINDEX, WEIGHT, GW_MPLS, GW_MPLS_STACK, ONLINK,
- PREFERENCE,
+ FROM, GW, NET, PROTO, SCOPE, DEST, IFNAME, IFINDEX, WEIGHT, GW_MPLS,
- ROA_CHECK,
+ ROA_CHECK, ASPA_CHECK,
DEFINED,
ADD, DELETE, RESET,
PREPEND,
| DELETE '(' term ',' term ')' { $$ = f_dispatch_method_x("delete", $3->type, $3, $5); }
| FILTER '(' term ',' term ')' { $$ = f_dispatch_method_x("filter", $3->type, $3, $5); }
- | ROA_CHECK '(' rtable ')' { $$ = f_new_inst(FI_ROA_CHECK_IMPLICIT, $3); }
- | ROA_CHECK '(' rtable ',' term ',' term ')' { $$ = f_new_inst(FI_ROA_CHECK_EXPLICIT, $5, $7, $3); }
+ | ROA_CHECK '(' rtable ')' { $$ = f_implicit_roa_check($3); }
+ | ROA_CHECK '(' rtable ',' term ',' term ')' { $$ = f_new_inst(FI_ROA_CHECK, $5, $7, $3); }
+ | ASPA_CHECK '(' rtable ',' term ')' { $$ = f_new_inst(FI_ASPA_CHECK_EXPLICIT, $5, $3); }
| FORMAT '(' term ')' { $$ = f_new_inst(FI_FORMAT, $3); }
[T_ENUM_RTS] = "enum rts",
[T_ENUM_BGP_ORIGIN] = "enum bgp_origin",
[T_ENUM_SCOPE] = "enum scope",
- [T_ENUM_RTC] = "enum rtc",
[T_ENUM_RTD] = "enum rtd",
[T_ENUM_ROA] = "enum roa",
+ [T_ENUM_ASPA] = "enum aspa",
[T_ENUM_NETTYPE] = "enum nettype",
[T_ENUM_RA_PREFERENCE] = "enum ra_preference",
[T_ENUM_AF] = "enum af",
}
- struct rtable *table = rtc->table;
+ INST(FI_ASPA_CHECK_EXPLICIT, 1, 1) { /* ASPA Check */
+ NEVER_CONSTANT;
+ ARG(1, T_PATH);
+ RTC(2);
++ rtable *table = rtc->table;
+
+ if (!table)
+ runtime("Missing ASPA table");
+
+ if (table->addr_type != NET_ASPA)
+ runtime("Table type must be ASPA");
+
+ RESULT(T_ENUM_ASPA, i, [[ aspa_check(table, v1.val.ad) ]]);
+ }
+
INST(FI_FROM_HEX, 1, 1) { /* Convert hex text to bytestring */
ARG(1, T_STRING);
--- /dev/null
+/*
+ * BIRD Internet Routing Daemon -- Internal Data Types
+ *
+ * (c) 2022 Maria Matejka <mq@jmq.cz>
+ *
+ * Can be freely distributed and used under the terms of the GNU GPL.
+ */
+
+#ifndef _BIRD_TYPE_H_
+#define _BIRD_TYPE_H_
+
+#include "lib/birdlib.h"
+#include "lib/attrs.h"
+
+union bval {
+#define BVAL_ITEMS \
+ struct { \
+ u32 data; /* Integer type inherited from eattrs */ \
+ PADDING(data, 0, 4); /* Must be padded on 64-bits */ \
+ }; \
+ struct { \
+ u32 i; /* Integer type inherited from filters */ \
+ PADDING(i, 0, 4); /* Must be padded on 64-bits */ \
+ }; \
+ const struct adata *ptr; /* Generic attribute data inherited from eattrs */ \
+ const struct adata *ad; /* Generic attribute data inherited from filters */ \
+ const void * v_ptr; /* Stored pointer */ \
+
+ BVAL_ITEMS;
+};
+
+union bval_long {
+ union bval bval; /* For direct assignments */
+ BVAL_ITEMS; /* For item-wise access */
+
+ u64 ec;
+ lcomm lc;
+ ip_addr ip;
+ const net_addr *net;
+ const char *s;
+ const struct adata *bs;
+ const struct f_tree *t;
+ const struct f_trie *ti;
+ const struct f_path_mask *path_mask;
+ struct f_path_mask_item pmi;
+ struct rte *rte;
+ struct rte_block {
+ struct rte **rte;
+ uint len;
+ } rte_block;
+};
+
+
+/* Internal types */
+enum btype {
+/* Nothing. Simply nothing. */
+ T_VOID = 0,
+ T_NONE = 0xff,
+
+/* Something but inaccessible. */
+ T_OPAQUE = 0x02, /* Opaque byte string (not filterable) */
+ T_IFACE = 0x0c, /* Pointer to an interface (inside adata) */
+ T_ROUTES_BLOCK = 0x68, /* Block of route pointers */
+ T_ROUTE = 0x6a, /* One route pointer */
+ T_NEXTHOP_LIST = 0x6c, /* The whole nexthop block */
+ T_HOSTENTRY = 0x6e, /* Hostentry with possible MPLS labels */
+
+/* Types shared with eattrs */
+ T_INT = 0x01, /* 32-bit unsigned integer number */
+ T_IP = 0x04, /* IP address */
+ T_QUAD = 0x05, /* Router ID (IPv4 address) */
+ T_PATH = 0x06, /* BGP AS path (encoding per RFC 1771:4.3) */
+ T_CLIST = 0x0a, /* Set of u32's (e.g., a community list) */
+ T_ECLIST = 0x0e, /* Set of pairs of u32's - ext. community list */
+ T_LCLIST = 0x08, /* Set of triplets of u32's - large community list */
+ T_STRING = 0x10,
+ T_PTR = 0x11, /* Void pointer */
+
+ T_ENUM_BGP_ORIGIN = 0x13, /* BGP Origin enum */
+ T_ENUM_RA_PREFERENCE = 0x15, /* RA Preference enum */
+ T_ENUM_FLOWSPEC_VALID = 0x17, /* Flowspec validation result */
+
+#define EAF_EMBEDDED 0x01 /* Data stored in eattr.u.data (part of type spec) */
+ /* Otherwise, attribute data is adata */
+
+/* Other user visible types which fit in int */
+ T_BOOL = 0xa0,
+ T_PAIR = 0xa4, /* Notice that pair is stored as integer: first << 16 | second */
+
+/* Put enumerational types in 0x20..0x3f range */
+ T_ENUM_LO = 0x12,
+ T_ENUM_HI = 0x3f,
+
++ T_ENUM_ASPA = 0x2f, /* ASPA validation result */
+ T_ENUM_RTS = 0x31,
+ T_ENUM_SCOPE = 0x33,
+ T_ENUM_MPLS_POLICY = 0x35,
+ T_ENUM_RTD = 0x37,
+ T_ENUM_ROA = 0x39,
+ T_ENUM_NETTYPE = 0x3b,
+ T_ENUM_AF = 0x3d,
+
+/* new enums go here */
+
+#define T_ENUM T_ENUM_LO ... T_ENUM_HI
+
+/* Bigger ones */
+ T_NET = 0xb0,
+ T_PATH_MASK = 0xb8, /* mask for BGP path */
+ T_EC = 0xbc, /* Extended community value, u64 */
+ T_LC = 0xc0, /* Large community value, lcomm */
+ T_RD = 0xc4, /* Route distinguisher for VPN addresses */
+ T_PATH_MASK_ITEM = 0xc8, /* Path mask item for path mask constructors */
+ T_BYTESTRING = 0xcc,
+ T_ROA_AGGREGATED = 0xd0, /* ASN and maxlen tuple list */
+
+
+ T_SET = 0x80,
+ T_PREFIX_SET = 0x84,
+
+/* protocol */
+ T_ENUM_STATE = 0xd1,
+ T_BTIME = 0xd4,
+} PACKED;
+
+typedef enum btype btype;
+
+STATIC_ASSERT(sizeof(btype) == sizeof(byte));
+
+
+#endif
#include "lib/bitmap.h"
#include "lib/resource.h"
#include "lib/net.h"
+#include "lib/netindex.h"
+#include "lib/obstacle.h"
+#include "lib/type.h"
+#include "lib/fib.h"
+#include "lib/route.h"
+#include "lib/event.h"
+#include "lib/rcu.h"
+#include "lib/io-loop.h"
+#include "lib/settle.h"
+
+#include "filter/data.h"
+
+#include "conf/conf.h"
+
+#include <stdatomic.h>
struct ea_list;
+ struct adata;
struct protocol;
struct proto;
+struct channel;
struct rte_src;
+struct hostcache;
struct symbol;
struct timer;
-struct fib;
struct filter;
struct f_trie;
struct f_trie_walk_state;
#define ROA_VALID 1
#define ROA_INVALID 2
+ #define ASPA_UNKNOWN 0
+ #define ASPA_VALID 1
+ #define ASPA_INVALID 2
+ #define ASPA_CONTAINS_CONFED 3
+
+int net_roa_check(rtable *tab, const net_addr *n, u32 asn);
++int aspa_check(rtable *tab, const struct adata *path);
+
#endif
}
return anything ? ROA_INVALID : ROA_UNKNOWN;
-#undef ROA_PARTIAL_CHECK
#undef TW
-#undef FW
}
- struct lp_state lps;
- lp_save(tmp_linpool, &lps);
+ /**
+ * aspa_check - check validity of AS Path in an ASPA table
+ * @tab: ASPA table
+ * @path: AS Path to check
+ *
+ * Implements draft-ietf-sidrops-aspa-verification-16.
+ */
+ int aspa_check(rtable *tab, const adata *path)
+ {
- net *n = net_find(tab, &nau.n);
- if (!n || !n->routes)
++ /* Restore tmp linpool state after this check */
++ CLEANUP(lp_saved_cleanup) struct lp_state *_lps = lp_save(tmp_linpool);
+
+ /* No support for confed paths */
+ if (as_path_contains_confed(path))
+ return ASPA_CONTAINS_CONFED;
+
+ /* Normalize the AS Path: drop stuffings */
+ uint len = as_path_getlen(path);
+ u32 *asns = alloca(sizeof(u32) * len);
+ uint ppos = 0;
+ int nsz = 0;
+ while (as_path_walk(path, &ppos, &asns[nsz]))
+ if ((nsz == 0) || (asns[nsz] != asns[nsz-1]))
+ nsz++;
+
+ /* Find the provider blocks for every AS on the path
+ * and check allowed directions */
+ bool *up = alloca(sizeof(bool) * nsz);
+ bool *down = alloca(sizeof(bool) * nsz);
+ bool unknown_flag = false;
+
++ RT_READ(tab, tr);
++
+ for (int ap=0; ap<nsz; ap++)
+ {
+ net_addr_union nau = { .aspa = NET_ADDR_ASPA(asns[ap]), };
- /* No ASPA for this ASN, therefore UNKNOWN */
++ bool seen = false;
++
++ /* Find some ASPAs */
++ struct netindex *ni = net_find_index(tr->t->netindex, &nau.n);
++ net *n = ni ? net_find(tr, ni) : NULL;
++
++ if (!n)
+ {
- for (rte *e = n->routes; e; e = e->next)
+ unknown_flag = up[ap] = down[ap] = true;
+ continue;
+ }
+
+ up[ap] = down[ap] = false;
+
- if (!rte_is_valid(e))
++ /* Walk the existing records */
++ NET_READ_WALK_ROUTES(tr, n, ep, e)
+ {
- eattr *ea = ea_find(e->attrs->eattrs, EA_ASPA_PROVIDERS);
++ if (!rte_is_valid(&e->rte))
+ continue;
+
- if (down[ap] || up[ap])
- goto peering_found;
++ eattr *ea = ea_find(e->rte.attrs, &ea_gen_aspa_providers);
+ if (!ea)
+ continue;
+
++ seen = true;
++
+ for (uint i=0; i * sizeof(u32) < ea->u.ptr->length; i++)
+ {
+ if ((ap > 0) && ((u32 *) ea->u.ptr->data)[i] == asns[ap-1])
+ down[ap] = true;
+ if ((ap + 1 < nsz) && ((u32 *) ea->u.ptr->data)[i] == asns[ap+1])
+ up[ap] = true;
+
-peering_found:;
++ if (down[ap] && up[ap])
++ break;
+ }
++
++ if (down[ap] && up[ap])
++ break;
+ }
-/**
- * rte_find - find a route
- * @net: network node
- * @src: route source
- *
- * The rte_find() function returns a route for destination @net
- * which is from route source @src.
- */
-rte *
-rte_find(net *net, struct rte_src *src)
++
++ /* No ASPA for this ASN, therefore UNKNOWN */
++ if (!seen)
++ unknown_flag = up[ap] = down[ap] = true;
+ }
+
+ /* Check whether the topology is first ramp up and then ramp down. */
+ int up_end = 0;
+ while (up_end < nsz && up[up_end])
+ up_end++;
+
+ int down_end = nsz - 1;
+ while (down_end > 0 && down[down_end])
+ down_end--;
+
+ /* A significant overlap of obvious unknowns or misconfigured ASPAs. */
+ if (up_end - down_end >= 2)
+ return ASPA_UNKNOWN;
+
+ /* The path has either a single transit provider, or a peering pair on top */
+ else if (up_end - down_end >= 0)
+ return unknown_flag ? ASPA_UNKNOWN : ASPA_VALID;
+
+ /* There is a gap between valid ramp up and valid ramp down */
+ else
+ return ASPA_INVALID;
+ }
+
+struct rte_storage *
+rte_store(const rte *r, struct netindex *i, struct rtable_private *tab)
{
- rte *e = net->routes;
+ struct rte_storage *s = sl_alloc(tab->rte_slab);
+ struct rte *e = RTES_WRITE(s);
+
+ *e = *r;
+ e->net = i->addr;
+ net_lock_index(tab->netindex, i);
- while (e && e->src != src)
- e = e->next;
- return e;
+ rt_lock_source(e->src);
+
+ e->attrs = ea_lookup(e->attrs, BIT32_ALL(EALS_PREIMPORT, EALS_FILTERED), EALS_IN_TABLE);
+
+#if 0
+ debug("(store) %N ", i->addr);
+ ea_dump(e->attrs);
+ debug("\n");
+#endif
+
+ return s;
}
+static void rte_free_deferred(struct deferred_call *dc);
+
+struct rte_free_deferred_item {
+ struct deferred_call dc;
+ struct rte_storage *e;
+ rtable *tab;
+};
+
/**
- * rte_get_temp - get a temporary &rte
- * @a: attributes to assign to the new route (a &rta; in case it's
- * un-cached, rte_update() will create a cached copy automatically)
- * @src: route source
+ * rte_free_defer - delete a &rte (happens later)
+ * @e: &struct rte_storage to be deleted
+ * @tab: the table which the rte belongs to
*
- * Create a temporary &rte and bind it with the attributes @a.
+ * rte_free() deletes the given &rte from the routing table it's linked to.
*/
-rte *
-rte_get_temp(rta *a, struct rte_src *src)
+
+static void
+rte_free(struct rte_storage *e, struct rtable_private *tab)
{
- rte *e = sl_alloc(rte_slab);
+ struct rte_free_deferred_item rfdi = {
+ .dc.hook = rte_free_deferred,
+ .e = e,
+ .tab = RT_PUB(tab),
+ };
+
+ if (!tab->rte_free_deferred++)
+ rt_lock_table(tab);
- e->attrs = a;
- e->id = 0;
- e->flags = 0;
- e->pflags = 0;
- rt_lock_source(e->src = src);
- return e;
+ rt_rte_trace_in(D_ROUTES, e->rte.sender->req, &e->rte, "freeing");
+ defer_call(&rfdi.dc, sizeof rfdi);
}
-rte *
-rte_do_cow(rte *r)
+static void
+rte_free_deferred(struct deferred_call *dc)
{
- rte *e = sl_alloc(rte_slab);
+ SKIP_BACK_DECLARE(struct rte_free_deferred_item, rfdi, dc, dc);
- memcpy(e, r, sizeof(rte));
+ struct rte_storage *e = rfdi->e;
+ RT_LOCK(rfdi->tab, tab);
- rt_lock_source(e->src);
- e->attrs = rta_clone(r->attrs);
- e->flags = 0;
- return e;
-}
+ /* No need for synchronize_rcu, implied by the deferred_call */
-/**
- * rte_cow_rta - get a private writable copy of &rte with writable &rta
- * @r: a route entry to be copied
- * @lp: a linpool from which to allocate &rta
- *
- * rte_cow_rta() takes a &rte and prepares it and associated &rta for
- * modification. There are three possibilities: First, both &rte and &rta are
- * private copies, in that case they are returned unchanged. Second, &rte is
- * private copy, but &rta is cached, in that case &rta is duplicated using
- * rta_do_cow(). Third, both &rte is shared and &rta is cached, in that case
- * both structures are duplicated by rte_do_cow() and rta_do_cow().
- *
- * Note that in the second case, cached &rta loses one reference, while private
- * copy created by rta_do_cow() is a shallow copy sharing indirect data (eattrs,
- * nexthops, ...) with it. To work properly, original shared &rta should have
- * another reference during the life of created private copy.
- *
- * Result: a pointer to the new writable &rte with writable &rta.
- */
-rte *
-rte_cow_rta(rte *r, linpool *lp)
-{
- if (!rta_is_cached(r->attrs))
- return r;
+ struct netindex *i = RTE_GET_NETINDEX(&e->rte);
+ net_unlock_index(tab->netindex, i);
+
+ rt_unlock_source(e->rte.src);
- r = rte_cow(r);
- rta *a = rta_do_cow(r->attrs, lp);
- rta_free(r->attrs);
- r->attrs = a;
- return r;
+ ea_free(e->rte.attrs);
+ sl_free(e);
+
+ if (!--tab->rte_free_deferred)
+ rt_unlock_table(tab);
}
static int /* Actually better or at least as good as */