cf_error("Too many nested method calls");
struct sym_scope *scope = f_type_method_scope(object->type);
- if (!scope)
+ if (!scope && object->type != T_ROUTE)
cf_error("No methods defined for type %s", f_type_name(object->type));
++ if (!scope)
++ scope = config->root_scope->next;
++
+ /* Replacing the current symbol scope with the appropriate method scope
+ for the given type. */
FM = (struct f_method_scope) {
.object = object,
.main = new_config->current_scope,
.scope = {
- .next = NULL,
- .next = scope ? global_root_scope : NULL,
- .hash = scope ? scope->hash : global_root_scope->hash,
- .active = 1,
++ .next = scope->next,
+ .hash = scope->hash,
.block = 1,
.readonly = 1,
},
f_lval_getter(struct f_lval *lval)
{
switch (lval->type) {
+ case F_LVAL_CONSTANT: return f_new_inst(FI_CONSTANT, *(lval->sym->val));
case F_LVAL_VARIABLE: return f_new_inst(FI_VAR_GET, lval->sym);
- case F_LVAL_SA: return f_new_inst(FI_RTA_GET, lval->sa);
- case F_LVAL_EA: return f_new_inst(FI_EA_GET, lval->da);
+ case F_LVAL_SA: return f_new_inst(FI_RTA_GET, lval->rte, lval->sa);
+ case F_LVAL_EA: return f_new_inst(FI_EA_GET, lval->rte, lval->da);
+ case F_LVAL_ATTR_BIT:
+ {
+ struct f_inst *c = f_new_inst(FI_CONSTANT, (struct f_val) { .type = T_INT, .val.i = (1U << lval->fab.bit)});
- return f_new_inst(FI_EQ, c, f_new_inst(FI_BITAND, f_new_inst(FI_EA_GET, lval->fab.class), c));
++ return f_new_inst(FI_EQ, c, f_new_inst(FI_BITAND, f_new_inst(FI_EA_GET, lval->rte, lval->fab.class), c));
+ }
default: bug("Unknown lval type");
}
}
f_lval_setter(struct f_lval *lval, struct f_inst *expr)
{
switch (lval->type) {
+ case F_LVAL_CONSTANT: cf_error("Constant %s is read-only", lval->sym->name);
case F_LVAL_VARIABLE: return f_new_inst(FI_VAR_SET, expr, lval->sym);
- case F_LVAL_SA: return f_new_inst(FI_RTA_SET, expr, lval->sa);
+ case F_LVAL_SA:
+ if (lval->sa.readonly)
+ cf_error( "This static attribute is read-only.");
+ return f_new_inst(FI_RTA_SET, expr, lval->sa);
+
case F_LVAL_EA: return f_new_inst(FI_EA_SET, expr, lval->da);
- f_new_inst(FI_EA_GET, lval->fab.class)
+ case F_LVAL_ATTR_BIT: return f_new_inst(FI_CONDITION, expr,
+ f_new_inst(FI_EA_SET,
+ f_new_inst(FI_BITOR,
+ f_new_inst(FI_CONSTANT, (struct f_val) { .type = T_INT, .val.i = (1U << lval->fab.bit)}),
- f_new_inst(FI_EA_GET, lval->fab.class)
++ f_new_inst(FI_EA_GET, lval->rte, lval->fab.class)
+ ),
+ lval->fab.class),
+ f_new_inst(FI_EA_SET,
+ f_new_inst(FI_BITAND,
+ f_new_inst(FI_CONSTANT, (struct f_val) { .type = T_INT, .val.i = ~(1U << lval->fab.bit)}),
++ f_new_inst(FI_EA_GET, lval->rte, lval->fab.class)
+ ),
+ lval->fab.class)
+ );
default: bug("Unknown lval type");
}
}
%nonassoc ELSE
%type <xp> cmds_int cmd_prep
- %type <x> term term_bs cmd cmd_var cmds cmds_scoped constant constructor var var_list var_list_r function_call symbol_value bgp_path_expr bgp_path bgp_path_tail term_dot_method method_name_cont
+ %type <x> term term_bs cmd cmd_var cmds cmds_scoped constant constructor var var_list var_list_r function_call bgp_path_expr bgp_path bgp_path_tail term_dot_method method_name_cont
-%type <fda> dynamic_attr
%type <fsa> static_attr
%type <f> filter where_filter
%type <fl> filter_body function_body
}
;
- symbol_value: CF_SYM_KNOWN
- {
- switch ($1->class) {
- case SYM_CONSTANT_RANGE:
- $$ = f_new_inst(FI_CONSTANT, *($1->val));
- break;
- case SYM_VARIABLE_RANGE:
- $$ = f_new_inst(FI_VAR_GET, $1);
- break;
- case SYM_ATTRIBUTE:
- $$ = f_new_inst(FI_EA_GET, $1->attribute);
- break;
- default:
- cf_error("Can't get value of symbol %s", $1->name);
- }
- }
- ;
static_attr:
- FROM { $$ = f_new_static_attr(T_IP, SA_FROM, 0); }
- | GW { $$ = f_new_static_attr(T_IP, SA_GW, 0); }
+ GW { $$ = f_new_static_attr(T_IP, SA_GW, 0); }
| NET { $$ = f_new_static_attr(T_NET, SA_NET, 1); }
| PROTO { $$ = f_new_static_attr(T_STRING, SA_PROTO, 1); }
- | SOURCE { $$ = f_new_static_attr(T_ENUM_RTS, SA_SOURCE, 1); }
- | SCOPE { $$ = f_new_static_attr(T_ENUM_SCOPE, SA_SCOPE, 0); }
| DEST { $$ = f_new_static_attr(T_ENUM_RTD, SA_DEST, 0); }
| IFNAME { $$ = f_new_static_attr(T_STRING, SA_IFNAME, 0); }
| IFINDEX { $$ = f_new_static_attr(T_INT, SA_IFINDEX, 1); }
} '(' var_list ')' {
$$ = f_dispatch_method($1, FM.object, $4, 1);
}
- | dynamic_attr {
+ | static_attr {
+ if (FM.object->type != T_ROUTE)
+ cf_error("Getting a route attribute from %s, need a route", f_type_name(FM.object->type));
+ $$ = f_new_inst(FI_RTA_GET, FM.object, $1);
+ }
- $$ = f_new_inst(FI_EA_GET, FM.object, $1);
++ | CF_SYM_KNOWN {
++ if ($1->class != SYM_ATTRIBUTE)
++ cf_error("Not a method of %s: %s", f_type_name(FM.object->type), $1->name);
+ if (FM.object->type != T_ROUTE)
+ cf_error("Getting a route attribute from %s, need a route", f_type_name(FM.object->type));
++ $$ = f_new_inst(FI_EA_GET, FM.object, $1->attribute);
+ }
;
term:
$$ = f_new_inst(FI_RETURN, $2);
}
- | static_attr '=' term ';' {
- if ($1.readonly)
- cf_error( "This static attribute is read-only.");
- $$ = f_new_inst(FI_RTA_SET, $3, $1);
- }
- | UNSET '(' dynamic_attr ')' ';' {
- $$ = f_new_inst(FI_EA_UNSET, $3);
+ | UNSET '(' CF_SYM_KNOWN ')' ';' {
+ if ($3->class != SYM_ATTRIBUTE)
+ cf_error("Can't unset %s", $3->name);
+ if ($3->attribute->readonly)
+ cf_error("Attribute %s is read-only", $3->attribute->name);
+ $$ = f_new_inst(FI_EA_UNSET, $3->attribute);
}
- | UNSET '(' symbol_known ')' ';' {
- switch ($3->class) {
- case SYM_ATTRIBUTE:
- $$ = f_new_inst(FI_EA_UNSET, *$3->attribute);
- break;
- default:
- cf_error("Can't unset symbol %s", $3->name);
- }
- }
| break_command var_list_r ';' {
$$ = f_print($2, !!$2, $1);
}
CF_SYM_KNOWN {
switch ($1->class)
{
+ case SYM_CONSTANT_RANGE:
+ $$ = (struct f_lval) { .type = F_LVAL_CONSTANT, .sym = $1, };
+ break;
case SYM_VARIABLE_RANGE:
- $$ = (struct f_lval) { .type = F_LVAL_VARIABLE, .sym = $1 };
+ $$ = (struct f_lval) { .type = F_LVAL_VARIABLE, .sym = $1, };
break;
case SYM_ATTRIBUTE:
- $$ = (struct f_lval) { .type = F_LVAL_EA, .da = $1->attribute };
- $$ = (struct f_lval) { .type = F_LVAL_EA, .da = *($1->attribute), .rte = f_new_inst(FI_CURRENT_ROUTE), };
++ $$ = (struct f_lval) { .type = F_LVAL_EA, .da = $1->attribute, .rte = f_new_inst(FI_CURRENT_ROUTE), };
break;
default:
- cf_error("Variable name or custom attribute name required");
+ cf_error("Variable name or attribute name required");
}
}
- | static_attr { $$ = (struct f_lval) { .type = F_LVAL_SA, .sa = $1 }; }
+ | static_attr { $$ = (struct f_lval) { .type = F_LVAL_SA, .sa = $1, .rte = f_new_inst(FI_CURRENT_ROUTE), }; }
- | dynamic_attr { $$ = (struct f_lval) { .type = F_LVAL_EA, .da = $1, .rte = f_new_inst(FI_CURRENT_ROUTE), }; }
;
CF_END
[T_LCLIST] = "lclist",
[T_RD] = "rd",
+ [T_ROUTE] = "route",
++
+ [T_SET] = "set",
+ [T_PREFIX_SET] = "prefix set",
};
+STATIC_ASSERT((1 << (8 * sizeof(btype))) == ARRAY_SIZE(f_type_str));
+
const char *
-f_type_name(enum f_type t)
+f_type_name(btype t)
{
- if (t < ARRAY_SIZE(f_type_str))
- return f_type_str[t] ?: "?";
-
- if ((t == T_SET) || (t == T_PREFIX_SET))
- return "set";
-
- return "?";
+ return f_type_str[t] ?: "?";
}
-enum f_type
-f_type_element_type(enum f_type t)
+btype
+f_type_element_type(btype t)
{
switch(t) {
case T_PATH: return T_INT;
return F_CMP_ERROR;
}
- buffer_print(buf, "Route [%d] to %N from %s.%s via %s",
- rte->src->global_id, rte->net->n.addr,
- rte->sender->proto->name, rte->sender->name,
- rte->src->proto->name);
+ /*
+ * rte_format - format route information
+ */
+ static void
+ rte_format(const struct rte *rte, buffer *buf)
+ {
+ if (rte)
++ buffer_print(buf, "Route [%d] to %N from %s via %s",
++ rte->src->global_id, rte->net,
++ rte->sender->req->name,
++ rte->src->owner->name);
+ else
+ buffer_puts(buf, "[No route]");
+ }
+
/*
* val_format - format filter value
*/
#define _BIRD_FILTER_DATA_H_
#include "nest/bird.h"
-
-/* Type numbers must be in 0..0xff range */
-#define T_MASK 0xff
-
-/* Internal types */
-enum f_type {
-/* Nothing. Simply nothing. */
- T_VOID = 0,
-
- T_NONE = 1, /* Special hack to represent missing arguments */
-
-/* User visible types, which fit in int */
- T_INT = 0x10,
- T_BOOL = 0x11,
- T_PAIR = 0x12, /* Notice that pair is stored as integer: first << 16 | second */
- T_QUAD = 0x13,
-
-/* Put enumerational types in 0x30..0x3f range */
- T_ENUM_LO = 0x30,
- T_ENUM_HI = 0x3f,
-
- T_ENUM_RTS = 0x30,
- T_ENUM_BGP_ORIGIN = 0x31,
- T_ENUM_SCOPE = 0x32,
- T_ENUM_RTC = 0x33,
- T_ENUM_RTD = 0x34,
- T_ENUM_ROA = 0x35,
- T_ENUM_NETTYPE = 0x36,
- T_ENUM_RA_PREFERENCE = 0x37,
- T_ENUM_AF = 0x38,
-
-/* new enums go here */
- T_ENUM_EMPTY = 0x3f, /* Special hack for atomic_aggr */
-
-#define T_ENUM T_ENUM_LO ... T_ENUM_HI
-
-/* Bigger ones */
- T_IP = 0x20,
- T_NET = 0x21,
- T_STRING = 0x22,
- T_PATH_MASK = 0x23, /* mask for BGP path */
- T_PATH = 0x24, /* BGP path */
- T_CLIST = 0x25, /* Community list */
- T_EC = 0x26, /* Extended community value, u64 */
- T_ECLIST = 0x27, /* Extended community list */
- T_LC = 0x28, /* Large community value, lcomm */
- T_LCLIST = 0x29, /* Large community list */
- T_RD = 0x2a, /* Route distinguisher for VPN addresses */
- T_PATH_MASK_ITEM = 0x2b, /* Path mask item for path mask constructors */
- T_BYTESTRING = 0x2c,
-
- T_ROUTE = 0x78,
- T_SET = 0x80,
- T_PREFIX_SET = 0x81,
-} PACKED;
+#include "lib/type.h"
++#include "nest/iface.h"
struct f_method {
struct symbol *sym;
/* Static attribute definition (members of struct rta) */
struct f_static_attr {
- enum f_type f_type; /* Filter type */
+ btype type; /* Data type */
enum f_sa_code sa_code; /* Static attribute id */
- int readonly:1; /* Don't allow writing */
+ int readonly:1; /* Don't allow writing */
};
+struct f_attr_bit {
+ const struct ea_class *class;
+ uint bit;
+};
+
+#define f_new_dynamic_attr_bit(_bit, _name) ((struct f_attr_bit) { .bit = _bit, .class = ea_class_find(_name) })
+
/* Filter l-value type */
enum f_lval_type {
+ F_LVAL_CONSTANT,
F_LVAL_VARIABLE,
- F_LVAL_PREFERENCE,
F_LVAL_SA,
F_LVAL_EA,
+ F_LVAL_ATTR_BIT,
};
/* Filter l-value */
struct f_lval {
enum f_lval_type type;
+ struct f_inst *rte;
union {
struct symbol *sym;
- struct f_dynamic_attr da;
+ const struct ea_class *da;
struct f_static_attr sa;
+ struct f_attr_bit fab;
};
};
}
}
- INST(FI_RTA_GET, 0, 1) {
+ INST(FI_CURRENT_ROUTE, 0, 1) {
+ NEVER_CONSTANT;
- ACCESS_EATTRS;
++ ACCESS_RTE;
+ RESULT_TYPE(T_ROUTE);
- RESULT_VAL([[(struct f_val) { .type = T_ROUTE, .val.rte = *fs->rte, .val.eattrs = *fs->eattrs, }]]);
++ RESULT_VAL([[(struct f_val) { .type = T_ROUTE, .val.rte = fs->rte, }]]);
+ }
+
+ INST(FI_RTA_GET, 1, 1) {
{
- ACCESS_RTE;
+ ARG(1, T_ROUTE);
STATIC_ATTR;
- ACCESS_RTE;
+
- struct rta *rta = v1.val.rte->attrs;
++ struct rte *rte = v1.val.rte;
switch (sa.sa_code)
{
- case SA_NET: RESULT(sa.type, net, fs->rte->net); break;
- case SA_PROTO: RESULT(sa.type, s, fs->rte->src->owner->name); break;
- case SA_FROM: RESULT(sa.f_type, ip, rta->from); break;
- case SA_GW: RESULT(sa.f_type, ip, rta->nh.gw); break;
- case SA_NET: RESULT(sa.f_type, net, (*fs->rte)->net->n.addr); break;
- case SA_PROTO: RESULT(sa.f_type, s, (*fs->rte)->src->proto->name); break;
- case SA_SOURCE: RESULT(sa.f_type, i, rta->source); break;
- case SA_SCOPE: RESULT(sa.f_type, i, rta->scope); break;
- case SA_DEST: RESULT(sa.f_type, i, rta->dest); break;
- case SA_IFNAME: RESULT(sa.f_type, s, rta->nh.iface ? rta->nh.iface->name : ""); break;
- case SA_IFINDEX: RESULT(sa.f_type, i, rta->nh.iface ? rta->nh.iface->index : 0); break;
- case SA_WEIGHT: RESULT(sa.f_type, i, rta->nh.weight + 1); break;
- case SA_PREF: RESULT(sa.f_type, i, rta->pref); break;
- case SA_GW_MPLS: RESULT(sa.f_type, i, rta->nh.labels ? rta->nh.label[0] : MPLS_NULL); break;
- case SA_ONLINK: RESULT(sa.f_type, i, rta->nh.flags & RNF_ONLINK ? 1 : 0); break;
-
++ case SA_NET: RESULT(sa.type, net, rte->net); break;
++ case SA_PROTO: RESULT(sa.type, s, rte->src->owner->name); break;
default:
- bug("Invalid static attribute access (%u/%u)", sa.f_type, sa.sa_code);
+ {
- struct eattr *nhea = ea_find(fs->rte->attrs, &ea_gen_nexthop);
++ struct eattr *nhea = ea_find(rte->attrs, &ea_gen_nexthop);
+ struct nexthop_adata *nhad = nhea ? (struct nexthop_adata *) nhea->u.ptr : NULL;
+ struct nexthop *nh = nhad ? &nhad->nh : NULL;
+
+ switch (sa.sa_code)
+ {
+ case SA_DEST:
+ RESULT(sa.type, i, nhad ?
+ (NEXTHOP_IS_REACHABLE(nhad) ? RTD_UNICAST : nhad->dest)
+ : RTD_NONE);
+ break;
+ case SA_GW:
+ RESULT(sa.type, ip, nh ? nh->gw : IPA_NONE);
+ break;
+ case SA_IFNAME:
+ RESULT(sa.type, s, (nh && nh->iface) ? nh->iface->name : "");
+ break;
+ case SA_IFINDEX:
+ RESULT(sa.type, i, (nh && nh->iface) ? nh->iface->index : 0);
+ break;
+ case SA_WEIGHT:
+ RESULT(sa.type, i, (nh ? nh->weight : 0) + 1);
+ break;
+ case SA_GW_MPLS:
+ RESULT(sa.type, i, (nh && nh->labels) ? nh->label[0] : MPLS_NULL);
+ break;
+ default:
+ bug("Invalid static attribute access (%u/%u)", sa.type, sa.sa_code);
+ }
+ }
}
}
}
}
}
- INST(FI_EA_GET, 0, 1) { /* Access to extended attributes */
+ INST(FI_EA_GET, 1, 1) { /* Access to extended attributes */
- ACCESS_RTE;
- ACCESS_EATTRS;
+ ARG(1, T_ROUTE);
DYNAMIC_ATTR;
- ACCESS_RTE;
- RESULT_TYPE(da.f_type);
+ RESULT_TYPE(da->type);
{
- struct ea_list *eal = v1.val.eattrs;
- eattr *e = ea_find(eal, da.ea_code);
+ struct f_val empty;
- const eattr *e = ea_find(fs->rte->attrs, da->id);
++ const eattr *e = ea_find(v1.val.rte->attrs, da->id);
- if (!e) {
- RESULT_VAL(val_empty(da.f_type));
- break;
- }
+ if (e)
+ {
+ ASSERT_DIE(e->type == da->type);
- switch (e->type & EAF_TYPE_MASK) {
- case EAF_TYPE_INT:
- RESULT_(da.f_type, i, e->u.data);
- break;
- case EAF_TYPE_ROUTER_ID:
- RESULT_(T_QUAD, i, e->u.data);
- break;
- case EAF_TYPE_OPAQUE:
- RESULT_(T_ENUM_EMPTY, i, 0);
- break;
- case EAF_TYPE_IP_ADDRESS:
- RESULT_(T_IP, ip, *((ip_addr *) e->u.ptr->data));
- break;
- case EAF_TYPE_AS_PATH:
- RESULT_(T_PATH, ad, e->u.ptr);
- break;
- case EAF_TYPE_BITFIELD:
- RESULT_(T_BOOL, i, !!(e->u.data & (1u << da.bit)));
- break;
- case EAF_TYPE_INT_SET:
- RESULT_(T_CLIST, ad, e->u.ptr);
- break;
- case EAF_TYPE_EC_SET:
- RESULT_(T_ECLIST, ad, e->u.ptr);
- break;
- case EAF_TYPE_LC_SET:
- RESULT_(T_LCLIST, ad, e->u.ptr);
- break;
- default:
- bug("Unknown dynamic attribute type");
+ switch (e->type) {
+ case T_IP:
+ RESULT_(T_IP, ip, *((const ip_addr *) e->u.ptr->data));
+ break;
+ default:
+ RESULT_VAL([[(struct f_val) {
+ .type = e->type,
+ .val.bval = e->u,
+ }]]);
+ }
}
+ else if ((empty = f_get_empty(da->type)).type != T_VOID)
+ RESULT_VAL(empty);
+ else
+ RESULT_VOID;
}
}
return ms->method->new_inst(term, loop_start);
}
- struct f_inst *path_getter = f_new_inst(FI_EA_GET, def);
+struct f_inst *
+f_implicit_roa_check(struct rtable_config *tab)
+{
+ const struct ea_class *def = ea_class_find("bgp_path");
+ if (!def)
+ bug("Couldn't find BGP AS Path attribute definition.");
+
- f_new_inst(FI_RTA_GET, fsa),
++ struct f_inst *path_getter = f_new_inst(FI_EA_GET, f_new_inst(FI_CURRENT_ROUTE), def);
+ struct sym_scope *scope = f_type_method_scope(path_getter->type);
+ struct symbol *ms = scope ? cf_find_symbol_scope(scope, "last") : NULL;
+
+ if (!ms)
+ bug("Couldn't find the \"last\" method for AS Path.");
+
+ struct f_static_attr fsa = f_new_static_attr(T_NET, SA_NET, 1);
+
+ return f_new_inst(FI_ROA_CHECK,
++ f_new_inst(FI_RTA_GET, f_new_inst(FI_CURRENT_ROUTE), fsa),
+ ms->method->new_inst(path_getter, NULL),
+ tab);
+}
+
struct f_inst *
f_print(struct f_inst *vars, int flush, enum filter_return fret)
{
--- /dev/null
+/*
+ * BIRD Internet Routing Daemon -- Routing data structures
+ *
+ * (c) 1998--2000 Martin Mares <mj@ucw.cz>
+ * (c) 2022 Maria Matejka <mq@jmq.cz>
+ *
+ * Can be freely distributed and used under the terms of the GNU GPL.
+ */
+
+#ifndef _BIRD_LIB_ROUTE_H_
+#define _BIRD_LIB_ROUTE_H_
+
+#undef RT_SOURCE_DEBUG
+
+#include "lib/type.h"
+#include "lib/rcu.h"
+#include "lib/hash.h"
+#include "lib/event.h"
+
+struct network;
+struct proto;
+struct cli;
+struct rtable_private;
+struct rte_storage;
+
+#define RTE_IN_TABLE_WRITABLE \
+ byte pflags; /* Protocol-specific flags; may change in-table (!) */ \
+ u8 stale_cycle; /* Auxiliary value for route refresh; may change in-table (!) */ \
+
+typedef struct rte {
+ RTE_IN_TABLE_WRITABLE;
+ byte flags; /* Table-specific flags */
+ u8 generation; /* If this route import is based on other previously exported route,
+ this value should be 1 + MAX(generation of the parent routes).
+ Otherwise the route is independent and this value is zero. */
+ u32 id; /* Table specific route id */
+ struct ea_list *attrs; /* Attributes of this route */
+ const net_addr *net; /* Network this RTE belongs to */
+ struct rte_src *src; /* Route source that created the route */
+ struct rt_import_hook *sender; /* Import hook used to send the route to the routing table */
+ btime lastmod; /* Last modified (set by table) */
+} rte;
+
+#define REF_FILTERED 2 /* Route is rejected by import filter */
+#define REF_PENDING 32 /* Route has not propagated completely yet */
+
+/* Route is valid for propagation (may depend on other flags in the future), accepts NULL */
+static inline int rte_is_valid(const rte *r) { return r && !(r->flags & REF_FILTERED); }
+
+/* Route just has REF_FILTERED flag */
+static inline int rte_is_filtered(const rte *r) { return !!(r->flags & REF_FILTERED); }
+
+/* Strip the route of the table-specific values */
+static inline rte rte_init_from(const rte *r)
+{
+ return (rte) {
+ .attrs = r->attrs,
+ .net = r->net,
+ .src = r->src,
+ };
+}
+
++int rte_same(const rte *, const rte *);
++
+struct rte_src {
+ struct rte_src *next; /* Hash chain */
+ struct rte_owner *owner; /* Route source owner */
+ u32 private_id; /* Private ID, assigned by the protocol */
+ u32 global_id; /* Globally unique ID of the source */
+ _Atomic u64 uc; /* Use count */
+};
+
+struct rte_owner_class {
+ void (*get_route_info)(const rte *, byte *buf); /* Get route information (for `show route' command) */
+ int (*rte_better)(const rte *, const rte *);
+ int (*rte_mergable)(const rte *, const rte *);
+ u32 (*rte_igp_metric)(const rte *);
+};
+
+struct rte_owner {
+ struct rte_owner_class *class;
+ int (*rte_recalculate)(struct rtable_private *, struct network *, struct rte_storage *new, struct rte_storage *, struct rte_storage *);
+ HASH(struct rte_src) hash;
+ const char *name;
+ u32 hash_key;
+ u32 uc;
+ event_list *list;
+ event *prune;
+ event *stop;
+};
+
+DEFINE_DOMAIN(attrs);
+extern DOMAIN(attrs) attrs_domain;
+
+#define RTA_LOCK LOCK_DOMAIN(attrs, attrs_domain)
+#define RTA_UNLOCK UNLOCK_DOMAIN(attrs, attrs_domain)
+
+#define RTE_SRC_PU_SHIFT 44
+#define RTE_SRC_IN_PROGRESS (1ULL << RTE_SRC_PU_SHIFT)
+
+/* Get a route source. This also locks the source, therefore the caller has to
+ * unlock the source after the route has been propagated. */
+struct rte_src *rt_get_source_o(struct rte_owner *o, u32 id);
+#define rt_get_source(p, id) rt_get_source_o(&(p)->sources, (id))
+
+struct rte_src *rt_find_source_global(u32 id);
+
+#ifdef RT_SOURCE_DEBUG
+#define rt_lock_source _rt_lock_source_internal
+#define rt_unlock_source _rt_unlock_source_internal
+#endif
+
+static inline void rt_lock_source(struct rte_src *src)
+{
+ /* Locking a source is trivial; somebody already holds it so we just increase
+ * the use count. Nothing can be freed underneath our hands. */
+ u64 uc = atomic_fetch_add_explicit(&src->uc, 1, memory_order_acq_rel);
+ ASSERT_DIE(uc > 0);
+}
+
+static inline void rt_unlock_source(struct rte_src *src)
+{
+ /* Unlocking is tricky. We do it lockless so at the same time, the prune
+ * event may be running, therefore if the unlock gets us to zero, it must be
+ * the last thing in this routine, otherwise the prune routine may find the
+ * source's usecount zeroed, freeing it prematurely.
+ *
+ * The usecount is split into two parts:
+ * the top 20 bits are an in-progress indicator
+ * the bottom 44 bits keep the actual usecount.
+ *
+ * Therefore at most 1 million of writers can simultaneously unlock the same
+ * source, while at most ~17T different routes can reference it. Both limits
+ * are insanely high from the 2022 point of view. Let's suppose that when 17T
+ * routes or 1M writers get real, we get also 128bit atomic variables in the
+ * C norm. */
+
+ /* First, we push the in-progress indicator */
+ u64 uc = atomic_fetch_add_explicit(&src->uc, RTE_SRC_IN_PROGRESS, memory_order_acq_rel);
+
+ /* Then we split the indicator to its parts. Remember, we got the value before the operation happened. */
+ u64 pending = (uc >> RTE_SRC_PU_SHIFT) + 1;
+ uc &= RTE_SRC_IN_PROGRESS - 1;
+
+ /* We per-use the RCU critical section indicator to make the prune event wait
+ * until we finish here in the rare case we get preempted. */
+ rcu_read_lock();
+
+ /* Obviously, there can't be more pending unlocks than the usecount itself */
+ if (uc == pending)
+ /* If we're the last unlocker, schedule the owner's prune event */
+ ev_send(src->owner->list, src->owner->prune);
+ else
+ ASSERT_DIE(uc > pending);
+
+ /* And now, finally, simultaneously pop the in-progress indicator and the
+ * usecount, possibly allowing the source pruning routine to free this structure */
+ atomic_fetch_sub_explicit(&src->uc, RTE_SRC_IN_PROGRESS + 1, memory_order_acq_rel);
+
+ /* ... and to reduce the load a bit, the source pruning routine will better wait for
+ * RCU synchronization instead of a busy loop. */
+ rcu_read_unlock();
+}
+
+#ifdef RT_SOURCE_DEBUG
+#undef rt_lock_source
+#undef rt_unlock_source
+
+#define rt_lock_source(x) ( log(L_INFO "Lock source %uG at %s:%d", (x)->global_id, __FILE__, __LINE__), _rt_lock_source_internal(x) )
+#define rt_unlock_source(x) ( log(L_INFO "Unlock source %uG at %s:%d", (x)->global_id, __FILE__, __LINE__), _rt_unlock_source_internal(x) )
+#endif
+
+void rt_init_sources(struct rte_owner *, const char *name, event_list *list);
+void rt_destroy_sources(struct rte_owner *, event *);
+
+/*
+ * Route Attributes
+ *
+ * Beware: All standard BGP attributes must be represented here instead
+ * of making them local to the route. This is needed to ensure proper
+ * construction of BGP route attribute lists.
+ */
+
+/* Nexthop structure */
+struct nexthop {
+ ip_addr gw; /* Next hop */
+ struct iface *iface; /* Outgoing interface */
+ byte flags;
+ byte weight;
+ byte labels; /* Number of all labels */
+ u32 label[0];
+};
+
+/* For packing one into eattrs */
+struct nexthop_adata {
+ struct adata ad;
+ /* There is either a set of nexthops or a special destination (RTD_*) */
+ union {
+ struct nexthop nh;
+ uint dest;
+ };
+};
+
+#define NEXTHOP_DEST_SIZE (OFFSETOF(struct nexthop_adata, dest) + sizeof(uint) - OFFSETOF(struct adata, data))
+#define NEXTHOP_DEST_LITERAL(x) ((struct nexthop_adata) { \
+ .ad.length = NEXTHOP_DEST_SIZE, .dest = (x), })
+
+#define RNF_ONLINK 0x1 /* Gateway is onlink regardless of IP ranges */
+
+
+#define RTS_STATIC 1 /* Normal static route */
+#define RTS_INHERIT 2 /* Route inherited from kernel */
+#define RTS_DEVICE 3 /* Device route */
+#define RTS_STATIC_DEVICE 4 /* Static device route */
+#define RTS_REDIRECT 5 /* Learned via redirect */
+#define RTS_RIP 6 /* RIP route */
+#define RTS_OSPF 7 /* OSPF route */
+#define RTS_OSPF_IA 8 /* OSPF inter-area route */
+#define RTS_OSPF_EXT1 9 /* OSPF external route type 1 */
+#define RTS_OSPF_EXT2 10 /* OSPF external route type 2 */
+#define RTS_BGP 11 /* BGP route */
+#define RTS_PIPE 12 /* Inter-table wormhole */
+#define RTS_BABEL 13 /* Babel route */
+#define RTS_RPKI 14 /* Route Origin Authorization */
+#define RTS_PERF 15 /* Perf checker */
+#define RTS_MAX 16
+
+#define RTD_NONE 0 /* Undefined next hop */
+#define RTD_UNICAST 1 /* A standard next hop */
+#define RTD_BLACKHOLE 2 /* Silently drop packets */
+#define RTD_UNREACHABLE 3 /* Reject as unreachable */
+#define RTD_PROHIBIT 4 /* Administratively prohibited */
+#define RTD_MAX 5
+
+extern const char * rta_dest_names[RTD_MAX];
+
+static inline const char *rta_dest_name(uint n)
+{ return (n < RTD_MAX) ? rta_dest_names[n] : "???"; }
+
+
+/*
+ * Extended Route Attributes
+ */
+
+typedef struct eattr {
+ word id; /* EA_CODE(PROTOCOL_..., protocol-dependent ID) */
+ byte flags; /* Protocol-dependent flags */
+ byte type; /* Attribute type */
+ byte rfu:5;
+ byte originated:1; /* The attribute has originated locally */
+ byte fresh:1; /* An uncached attribute (e.g. modified in export filter) */
+ byte undef:1; /* Explicitly undefined */
+
+ PADDING(unused, 3, 3);
+
+ union bval u;
+} eattr;
+
+
+#define EA_CODE_MASK 0xffff
+#define EA_ALLOW_UNDEF 0x10000 /* ea_find: allow EAF_TYPE_UNDEF */
+#define EA_BIT(n) ((n) << 24) /* Used in bitfield accessors */
+#define EA_BIT_GET(ea) ((ea) >> 24)
+
+typedef struct ea_list {
+ struct ea_list *next; /* In case we have an override list */
+ byte flags; /* Flags: EALF_... */
+ byte rfu;
+ word count; /* Number of attributes */
+ eattr attrs[0]; /* Attribute definitions themselves */
+} ea_list;
+
+struct ea_storage {
+ struct ea_storage *next_hash; /* Next in hash chain */
+ struct ea_storage **pprev_hash; /* Previous in hash chain */
+ _Atomic u32 uc; /* Use count */
+ u32 hash_key; /* List hash */
+ ea_list l[0]; /* The list itself */
+};
+
+#define EALF_SORTED 1 /* Attributes are sorted by code */
+#define EALF_BISECT 2 /* Use interval bisection for searching */
+#define EALF_CACHED 4 /* List is cached */
+#define EALF_HUGE 8 /* List is too big to fit into slab */
+
+struct ea_class {
+#define EA_CLASS_INSIDE \
+ const char *name; /* Name (both print and filter) */ \
+ struct symbol *sym; /* Symbol to export to configs */ \
+ uint id; /* Autoassigned attribute ID */ \
+ uint uc; /* Reference count */ \
+ btype type; /* Data type ID */ \
+ uint readonly:1; /* This attribute can't be changed by filters */ \
+ uint conf:1; /* Requested by config */ \
+ uint hidden:1; /* Technical attribute, do not show, do not expose to filters */ \
+ void (*format)(const eattr *ea, byte *buf, uint size); \
+ void (*stored)(const eattr *ea); /* When stored into global hash */ \
+ void (*freed)(const eattr *ea); /* When released from global hash */ \
+
+ EA_CLASS_INSIDE;
+};
+
+struct ea_class_ref {
+ resource r;
+ struct ea_class *class;
+};
+
+void ea_register_init(struct ea_class *);
+struct ea_class_ref *ea_register_alloc(pool *, struct ea_class);
+struct ea_class_ref *ea_ref_class(pool *, struct ea_class *); /* Reference for an attribute alias */
+
+#define EA_REGISTER_ALL_HELPER(x) ea_register_init(x);
+#define EA_REGISTER_ALL(...) MACRO_FOREACH(EA_REGISTER_ALL_HELPER, __VA_ARGS__)
+
+struct ea_class *ea_class_find_by_id(uint id);
+struct ea_class *ea_class_find_by_name(const char *name);
+static inline struct ea_class *ea_class_self(struct ea_class *self) { return self; }
+#define ea_class_find(_arg) _Generic((_arg), \
+ uint: ea_class_find_by_id, \
+ word: ea_class_find_by_id, \
+ char *: ea_class_find_by_name, \
+ const char *: ea_class_find_by_name, \
+ struct ea_class *: ea_class_self)(_arg)
+
+struct ea_walk_state {
+ ea_list *eattrs; /* Ccurrent ea_list, initially set by caller */
+ eattr *ea; /* Current eattr, initially NULL */
+ u32 visited[4]; /* Bitfield, limiting max to 128 */
+};
+
+#define ea_find(_l, _arg) _Generic((_arg), uint: ea_find_by_id, struct ea_class *: ea_find_by_class, char *: ea_find_by_name)(_l, _arg)
+eattr *ea_find_by_id(ea_list *, unsigned ea);
+static inline eattr *ea_find_by_class(ea_list *l, const struct ea_class *def)
+{ return ea_find_by_id(l, def->id); }
+static inline eattr *ea_find_by_name(ea_list *l, const char *name)
+{
+ const struct ea_class *def = ea_class_find_by_name(name);
+ return def ? ea_find_by_class(l, def) : NULL;
+}
+
+#define ea_get_int(_l, _ident, _def) ({ \
+ struct ea_class *cls = ea_class_find((_ident)); \
+ ASSERT_DIE(cls->type & EAF_EMBEDDED); \
+ const eattr *ea = ea_find((_l), cls->id); \
+ (ea ? ea->u.data : (_def)); \
+ })
+
+#define ea_get_ip(_l, _ident, _def) ({ \
+ struct ea_class *cls = ea_class_find((_ident)); \
+ ASSERT_DIE(cls->type == T_IP); \
+ const eattr *ea = ea_find((_l), cls->id); \
+ (ea ? *((const ip_addr *) ea->u.ptr->data) : (_def)); \
+ })
+
+eattr *ea_walk(struct ea_walk_state *s, uint id, uint max);
+void ea_dump(ea_list *);
+int ea_same(ea_list *x, ea_list *y); /* Test whether two ea_lists are identical */
+uint ea_hash(ea_list *e); /* Calculate 16-bit hash value */
+ea_list *ea_append(ea_list *to, ea_list *what);
+void ea_format_bitfield(const struct eattr *a, byte *buf, int bufsize, const char **names, int min, int max);
+
+/* Normalize ea_list; allocates the result from tmp_linpool */
+ea_list *ea_normalize(ea_list *e, int overlay);
+
+uint ea_list_size(ea_list *);
+void ea_list_copy(ea_list *dest, ea_list *src, uint size);
+
+#define EA_LOCAL_LIST(N) struct { ea_list l; eattr a[N]; }
+
+#define EA_LITERAL_EMBEDDED(_class, _flags, _val) ({ \
+ btype _type = (_class)->type; \
+ ASSERT_DIE(_type & EAF_EMBEDDED); \
+ EA_LITERAL_GENERIC((_class)->id, _type, _flags, .u.i = _val); \
+ })
+
+#define EA_LITERAL_STORE_ADATA(_class, _flags, _buf, _len) ({ \
+ btype _type = (_class)->type; \
+ ASSERT_DIE(!(_type & EAF_EMBEDDED)); \
+ EA_LITERAL_GENERIC((_class)->id, _type, _flags, .u.ad = tmp_store_adata((_buf), (_len))); \
+ })
+
+#define EA_LITERAL_DIRECT_ADATA(_class, _flags, _adata) ({ \
+ btype _type = (_class)->type; \
+ ASSERT_DIE(!(_type & EAF_EMBEDDED)); \
+ EA_LITERAL_GENERIC((_class)->id, _type, _flags, .u.ad = _adata); \
+ })
+
+#define EA_LITERAL_GENERIC(_id, _type, _flags, ...) \
+ ((eattr) { .id = _id, .type = _type, .flags = _flags, __VA_ARGS__ })
+
+static inline eattr *
+ea_set_attr(ea_list **to, eattr a)
+{
+ EA_LOCAL_LIST(1) *ea = tmp_alloc(sizeof(*ea));
+ *ea = (typeof(*ea)) {
+ .l.flags = EALF_SORTED,
+ .l.count = 1,
+ .l.next = *to,
+ .a[0] = a,
+ };
+
+ *to = &ea->l;
+ return &ea->a[0];
+}
+
+static inline void
+ea_unset_attr(ea_list **to, _Bool local, const struct ea_class *def)
+{
+ ea_set_attr(to, EA_LITERAL_GENERIC(def->id, 0, 0,
+ .fresh = local, .originated = local, .undef = 1));
+}
+
+static inline void
+ea_set_attr_u32(ea_list **to, const struct ea_class *def, uint flags, u64 data)
+{ ea_set_attr(to, EA_LITERAL_EMBEDDED(def, flags, data)); }
+
+static inline void
+ea_set_attr_data(ea_list **to, const struct ea_class *def, uint flags, const void *data, uint len)
+{ ea_set_attr(to, EA_LITERAL_STORE_ADATA(def, flags, data, len)); }
+
+static inline void
+ea_copy_attr(ea_list **to, ea_list *from, const struct ea_class *def)
+{
+ eattr *e = ea_find_by_class(from, def);
+ if (e)
+ if (e->type & EAF_EMBEDDED)
+ ea_set_attr_u32(to, def, e->flags, e->u.data);
+ else
+ ea_set_attr_data(to, def, e->flags, e->u.ptr->data, e->u.ptr->length);
+ else
+ ea_unset_attr(to, 0, def);
+}
+
+/*
+ * Common route attributes
+ */
+
+/* Preference: first-order comparison */
+extern struct ea_class ea_gen_preference;
+static inline u32 rt_get_preference(const rte *rt)
+{ return ea_get_int(rt->attrs, &ea_gen_preference, 0); }
+
+/* IGP metric: second-order comparison */
+extern struct ea_class ea_gen_igp_metric;
+u32 rt_get_igp_metric(const rte *rt);
+#define IGP_METRIC_UNKNOWN 0x80000000 /* Default igp_metric used when no other
+ protocol-specific metric is availabe */
+
+/* From: Advertising router */
+extern struct ea_class ea_gen_from;
+
+/* Source: An old method to devise the route source protocol and kind.
+ * To be superseded in a near future by something more informative. */
+extern struct ea_class ea_gen_source;
+static inline u32 rt_get_source_attr(const rte *rt)
+{ return ea_get_int(rt->attrs, &ea_gen_source, 0); }
+
+/* Flowspec validation result */
+enum flowspec_valid {
+ FLOWSPEC_UNKNOWN = 0,
+ FLOWSPEC_VALID = 1,
+ FLOWSPEC_INVALID = 2,
+ FLOWSPEC__MAX,
+};
+
+extern const char * flowspec_valid_names[FLOWSPEC__MAX];
+static inline const char *flowspec_valid_name(enum flowspec_valid v)
+{ return (v < FLOWSPEC__MAX) ? flowspec_valid_names[v] : "???"; }
+
+extern struct ea_class ea_gen_flowspec_valid;
+static inline enum flowspec_valid rt_get_flowspec_valid(const rte *rt)
+{ return ea_get_int(rt->attrs, &ea_gen_flowspec_valid, FLOWSPEC_UNKNOWN); }
+
+/* Next hop: For now, stored as adata */
+extern struct ea_class ea_gen_nexthop;
+
+static inline void ea_set_dest(struct ea_list **to, uint flags, uint dest)
+{
+ struct nexthop_adata nhad = NEXTHOP_DEST_LITERAL(dest);
+ ea_set_attr_data(to, &ea_gen_nexthop, flags, &nhad.ad.data, nhad.ad.length);
+}
+
+/* Next hop structures */
+
+#define NEXTHOP_ALIGNMENT (_Alignof(struct nexthop))
+#define NEXTHOP_MAX_SIZE (sizeof(struct nexthop) + sizeof(u32)*MPLS_MAX_LABEL_STACK)
+#define NEXTHOP_SIZE(_nh) NEXTHOP_SIZE_CNT(((_nh)->labels))
+#define NEXTHOP_SIZE_CNT(cnt) BIRD_ALIGN((sizeof(struct nexthop) + sizeof(u32) * (cnt)), NEXTHOP_ALIGNMENT)
+#define nexthop_size(nh) NEXTHOP_SIZE((nh))
+
+#define NEXTHOP_NEXT(_nh) ((void *) (_nh) + NEXTHOP_SIZE(_nh))
+#define NEXTHOP_END(_nhad) ((_nhad)->ad.data + (_nhad)->ad.length)
+#define NEXTHOP_VALID(_nh, _nhad) ((void *) (_nh) < (void *) NEXTHOP_END(_nhad))
+#define NEXTHOP_ONE(_nhad) (NEXTHOP_NEXT(&(_nhad)->nh) == NEXTHOP_END(_nhad))
+
+#define NEXTHOP_WALK(_iter, _nhad) for ( \
+ struct nexthop *_iter = &(_nhad)->nh; \
+ (void *) _iter < (void *) NEXTHOP_END(_nhad); \
+ _iter = NEXTHOP_NEXT(_iter))
+
+
+static inline int nexthop_same(struct nexthop_adata *x, struct nexthop_adata *y)
+{ return adata_same(&x->ad, &y->ad); }
+struct nexthop_adata *nexthop_merge(struct nexthop_adata *x, struct nexthop_adata *y, int max, linpool *lp);
+struct nexthop_adata *nexthop_sort(struct nexthop_adata *x, linpool *lp);
+int nexthop_is_sorted(struct nexthop_adata *x);
+
+#define NEXTHOP_IS_REACHABLE(nhad) ((nhad)->ad.length > NEXTHOP_DEST_SIZE)
+
+/* Route has regular, reachable nexthop (i.e. not RTD_UNREACHABLE and like) */
+static inline int rte_is_reachable(rte *r)
+{
+ eattr *nhea = ea_find(r->attrs, &ea_gen_nexthop);
+ if (!nhea)
+ return 0;
+
+ struct nexthop_adata *nhad = (void *) nhea->u.ptr;
+ return NEXTHOP_IS_REACHABLE(nhad);
+}
+
+static inline int nhea_dest(eattr *nhea)
+{
+ if (!nhea)
+ return RTD_NONE;
+
+ struct nexthop_adata *nhad = nhea ? (struct nexthop_adata *) nhea->u.ptr : NULL;
+ if (NEXTHOP_IS_REACHABLE(nhad))
+ return RTD_UNICAST;
+ else
+ return nhad->dest;
+}
+
+static inline int rte_dest(const rte *r)
+{
+ return nhea_dest(ea_find(r->attrs, &ea_gen_nexthop));
+}
+
+void rta_init(void);
+ea_list *ea_lookup(ea_list *, int overlay); /* Get a cached (and normalized) variant of this attribute list */
+static inline int ea_is_cached(const ea_list *r) { return r->flags & EALF_CACHED; }
+static inline struct ea_storage *ea_get_storage(ea_list *r)
+{
+ ASSERT_DIE(ea_is_cached(r));
+ return SKIP_BACK(struct ea_storage, l[0], r);
+}
+
+static inline ea_list *ea_clone(ea_list *r) {
+ ASSERT_DIE(0 < atomic_fetch_add_explicit(&ea_get_storage(r)->uc, 1, memory_order_acq_rel));
+ return r;
+}
+void ea__free(struct ea_storage *r);
+static inline void ea_free(ea_list *l) {
+ if (!l) return;
+ struct ea_storage *r = ea_get_storage(l);
+ if (1 == atomic_fetch_sub_explicit(&r->uc, 1, memory_order_acq_rel)) ea__free(r);
+}
+
+void ea_dump(ea_list *);
+void ea_dump_all(void);
+void ea_show_list(struct cli *, ea_list *);
+
+#define rta_lookup ea_lookup
+#define rta_is_cached ea_is_cached
+#define rta_clone ea_clone
+#define rta_free ea_free
+
+#endif
--- /dev/null
- T_NEXTHOP_LIST = 0x2c, /* The whole nexthop block */
- T_HOSTENTRY = 0x2e, /* Hostentry with possible MPLS labels */
+/*
+ * BIRD Internet Routing Daemon -- Internal Data Types
+ *
+ * (c) 2022 Maria Matejka <mq@jmq.cz>
+ *
+ * Can be freely distributed and used under the terms of the GNU GPL.
+ */
+
+#ifndef _BIRD_TYPE_H_
+#define _BIRD_TYPE_H_
+
+#include "lib/birdlib.h"
+#include "lib/attrs.h"
+
+union bval {
+#define BVAL_ITEMS \
+ struct { \
+ u32 data; /* Integer type inherited from eattrs */ \
+ PADDING(data, 0, 4); /* Must be padded on 64-bits */ \
+ }; \
+ struct { \
+ u32 i; /* Integer type inherited from filters */ \
+ PADDING(i, 0, 4); /* Must be padded on 64-bits */ \
+ }; \
+ const struct adata *ptr; /* Generic attribute data inherited from eattrs */ \
+ const struct adata *ad; /* Generic attribute data inherited from filters */ \
+
+ BVAL_ITEMS;
+};
+
+union bval_long {
+ union bval bval; /* For direct assignments */
+ BVAL_ITEMS; /* For item-wise access */
+
+ u64 ec;
+ lcomm lc;
+ ip_addr ip;
+ const net_addr *net;
+ const char *s;
+ const struct adata *bs;
+ const struct f_tree *t;
+ const struct f_trie *ti;
+ const struct f_path_mask *path_mask;
+ struct f_path_mask_item pmi;
++ struct rte *rte;
+};
+
+
+/* Internal types */
+enum btype {
+/* Nothing. Simply nothing. */
+ T_VOID = 0,
+ T_NONE = 0xff,
+
+/* Something but inaccessible. */
+ T_OPAQUE = 0x02, /* Opaque byte string (not filterable) */
+ T_IFACE = 0x0c, /* Pointer to an interface (inside adata) */
++ T_ROUTE = 0x6a, /* One route pointer */
++ T_NEXTHOP_LIST = 0x6c, /* The whole nexthop block */
++ T_HOSTENTRY = 0x6e, /* Hostentry with possible MPLS labels */
+
+/* Types shared with eattrs */
+ T_INT = 0x01, /* 32-bit unsigned integer number */
+ T_IP = 0x04, /* IP address */
+ T_QUAD = 0x05, /* Router ID (IPv4 address) */
+ T_PATH = 0x06, /* BGP AS path (encoding per RFC 1771:4.3) */
+ T_CLIST = 0x0a, /* Set of u32's (e.g., a community list) */
+ T_ECLIST = 0x0e, /* Set of pairs of u32's - ext. community list */
+ T_LCLIST = 0x08, /* Set of triplets of u32's - large community list */
+
+ T_ENUM_BGP_ORIGIN = 0x11, /* BGP Origin enum */
+ T_ENUM_RA_PREFERENCE = 0x13, /* RA Preference enum */
+ T_ENUM_FLOWSPEC_VALID = 0x15, /* Flowspec validation result */
+
+#define EAF_TYPE__MAX 0x1f
+#define EAF_EMBEDDED 0x01 /* Data stored in eattr.u.data (part of type spec) */
+ /* Otherwise, attribute data is adata */
+
+/* Other user visible types which fit in int */
+ T_BOOL = 0xa0,
+ T_PAIR = 0xa4, /* Notice that pair is stored as integer: first << 16 | second */
+
+/* Put enumerational types in 0x20..0x3f range */
+ T_ENUM_LO = 0x10,
+ T_ENUM_HI = 0x3f,
+
+ T_ENUM_RTS = 0x31,
+ T_ENUM_SCOPE = 0x33,
+ T_ENUM_RTD = 0x37,
+ T_ENUM_ROA = 0x39,
+ T_ENUM_NETTYPE = 0x3b,
+ T_ENUM_AF = 0x3d,
+
+/* new enums go here */
+
+#define T_ENUM T_ENUM_LO ... T_ENUM_HI
+
+/* Bigger ones */
+ T_NET = 0xb0,
+ T_STRING = 0xb4,
+ T_PATH_MASK = 0xb8, /* mask for BGP path */
+ T_EC = 0xbc, /* Extended community value, u64 */
+ T_LC = 0xc0, /* Large community value, lcomm */
+ T_RD = 0xc4, /* Route distinguisher for VPN addresses */
+ T_PATH_MASK_ITEM = 0xc8, /* Path mask item for path mask constructors */
+ T_BYTESTRING = 0xcc,
+
+ T_SET = 0x80,
+ T_PREFIX_SET = 0x84,
+} PACKED;
+
+typedef enum btype btype;
+
+STATIC_ASSERT(sizeof(btype) == sizeof(byte));
+
+
+#endif
#include "proto/bgp/bgp.h"
#endif
-pool *rt_table_pool;
+#include <stdatomic.h>
-static slab *rte_slab;
-static linpool *rte_update_pool;
+pool *rt_table_pool;
list routing_tables;
- static int rte_same(const rte *x, const rte *y);
+list deleted_routing_tables;
+
+struct rt_cork rt_cork;
+
+/* Data structures for export journal */
+#define RT_PENDING_EXPORT_ITEMS (page_size - sizeof(struct rt_export_block)) / sizeof(struct rt_pending_export)
+
+struct rt_export_block {
+ node n;
+ _Atomic u32 end;
+ _Atomic _Bool not_last;
+ struct rt_pending_export export[];
+};
+
+static void rt_free_hostcache(struct rtable_private *tab);
+static void rt_update_hostcache(void *tab);
+static void rt_next_hop_update(struct rtable_private *tab);
+static void rt_nhu_uncork(void *_tab);
+static inline void rt_next_hop_resolve_rte(rte *r);
+static inline void rt_flowspec_resolve_rte(rte *r, struct channel *c);
+static void rt_refresh_trace(struct rtable_private *tab, struct rt_import_hook *ih, const char *msg);
+static inline void rt_prune_table(struct rtable_private *tab);
+static void rt_kick_prune_timer(struct rtable_private *tab);
+static void rt_feed_by_fib(void *);
+static void rt_feed_by_trie(void *);
+static void rt_feed_equal(void *);
+static void rt_feed_for(void *);
+static void rt_check_cork_low(struct rtable_private *tab);
+static void rt_check_cork_high(struct rtable_private *tab);
+static void rt_cork_release_hook(void *);
+static void rt_shutdown(void *);
+static void rt_delete(void *);
+
+static void rt_export_used(struct rt_table_exporter *, const char *, const char *);
+static void rt_export_cleanup(struct rtable_private *tab);
+
++int rte_same(const rte *x, const rte *y);
+
+const char *rt_import_state_name_array[TIS_MAX] = {
+ [TIS_DOWN] = "DOWN",
+ [TIS_UP] = "UP",
+ [TIS_STOP] = "STOP",
+ [TIS_FLUSHING] = "FLUSHING",
+ [TIS_WAITING] = "WAITING",
+ [TIS_CLEARED] = "CLEARED",
+};
+
+const char *rt_export_state_name_array[TES_MAX] = {
+ [TES_DOWN] = "DOWN",
+ [TES_HUNGRY] = "HUNGRY",
+ [TES_FEEDING] = "FEEDING",
+ [TES_READY] = "READY",
+ [TES_STOP] = "STOP"
+};
+
+const char *rt_import_state_name(u8 state)
+{
+ if (state >= TIS_MAX)
+ return "!! INVALID !!";
+ else
+ return rt_import_state_name_array[state];
+}
+
+const char *rt_export_state_name(u8 state)
+{
+ if (state >= TES_MAX)
+ return "!! INVALID !!";
+ else
+ return rt_export_state_name_array[state];
+}
+
+static struct hostentry *rt_get_hostentry(struct rtable_private *tab, ip_addr a, ip_addr ll, rtable *dep);
+
+static inline rtable *rt_priv_to_pub(struct rtable_private *tab) { return RT_PUB(tab); }
+static inline rtable *rt_pub_to_pub(rtable *tab) { return tab; }
+#define RT_ANY_TO_PUB(tab) _Generic((tab),rtable*:rt_pub_to_pub,struct rtable_private*:rt_priv_to_pub)((tab))
-static void rt_free_hostcache(rtable *tab);
-static void rt_notify_hostcache(rtable *tab, net *net);
-static void rt_update_hostcache(rtable *tab);
-static void rt_next_hop_update(rtable *tab);
-static inline void rt_prune_table(rtable *tab);
-static inline void rt_schedule_notify(rtable *tab);
-static void rt_flowspec_notify(rtable *tab, net *net);
-static void rt_kick_prune_timer(rtable *tab);
+#define rt_trace(tab, level, fmt, args...) do {\
+ rtable *t = RT_ANY_TO_PUB((tab)); \
+ if (t->config->debug & (level)) \
+ log(L_TRACE "%s: " fmt, t->name, ##args); \
+} while (0)
+#define req_trace(r, level, fmt, args...) do { \
+ if (r->trace_routes & (level)) \
+ log(L_TRACE "%s: " fmt, r->name, ##args); \
+} while (0)
+
+#define channel_trace(c, level, fmt, args...) do {\
+ if ((c->debug & (level)) || (c->proto->debug & (level))) \
+ log(L_TRACE "%s.%s: " fmt, c->proto->name, c->name, ##args);\
+} while (0)
static void
net_init_with_trie(struct fib *f, void *N)
return;
}
- int new_ok = rte_is_ok(new);
- int old_ok = rte_is_ok(old);
+ RT_UNLOCK(tab);
+ }
+
+ int used = 0;
+ int no_next = 0;
+
+ /* Process the export */
+ for (uint i=0; i<RT_EXPORT_BULK; i++)
+ {
+ used += rte_export(c, c->rpe_next);
- struct channel_limit *l = &c->rx_limit;
- if (l->action && !old && new && !c->in_table)
+ if (!c->rpe_next)
{
- u32 all_routes = stats->imp_routes + stats->filt_routes;
+ no_next = 1;
+ break;
+ }
+ }
- if (all_routes >= l->limit)
- channel_notify_limit(c, l, PLD_RX, all_routes);
+ if (used)
+ RT_LOCKED(tab, t)
+ if (no_next || t->cork_active)
+ rt_export_used(c->table, c->h.req->name, no_next ? "finished export bulk" : "cork active");
- if (l->state == PLS_BLOCKED)
- {
- /* In receive limit the situation is simple, old is NULL so
- we just free new and exit like nothing happened */
+ rt_send_export_event(&c->h);
+}
- stats->imp_updates_ignored++;
- rte_trace_in(D_FILTERS, c, new, "ignored [limit]");
- rte_free_quick(new);
- return;
- }
+
+static inline int
+rte_validate(struct channel *ch, rte *e)
+{
+ int c;
+ const net_addr *n = e->net;
+
+ if (!net_validate(n))
+ {
+ log(L_WARN "Ignoring bogus prefix %N received via %s",
+ n, ch->proto->name);
+ return 0;
+ }
+
+ /* FIXME: better handling different nettypes */
+ c = !net_is_flow(n) ?
+ net_classify(n): (IADDR_HOST | SCOPE_UNIVERSE);
+ if ((c < 0) || !(c & IADDR_HOST) || ((c & IADDR_SCOPE_MASK) <= SCOPE_LINK))
+ {
+ log(L_WARN "Ignoring bogus route %N received via %s",
+ n, ch->proto->name);
+ return 0;
+ }
+
+ if (net_type_match(n, NB_DEST))
+ {
+ eattr *nhea = ea_find(e->attrs, &ea_gen_nexthop);
+ int dest = nhea_dest(nhea);
+
+ if (dest == RTD_NONE)
+ {
+ log(L_WARN "Ignoring route %N with no destination received via %s",
+ n, ch->proto->name);
+ return 0;
}
- l = &c->in_limit;
- if (l->action && !old_ok && new_ok)
+ if ((dest == RTD_UNICAST) &&
+ !nexthop_is_sorted((struct nexthop_adata *) nhea->u.ptr))
{
- if (stats->imp_routes >= l->limit)
- channel_notify_limit(c, l, PLD_IN, stats->imp_routes);
+ log(L_WARN "Ignoring unsorted multipath route %N received via %s",
+ n, ch->proto->name);
+ return 0;
+ }
+ }
+ else if (ea_find(e->attrs, &ea_gen_nexthop))
+ {
+ log(L_WARN "Ignoring route %N having a nexthop attribute received via %s",
+ n, ch->proto->name);
+ return 0;
+ }
- if (l->state == PLS_BLOCKED)
- {
- /* In import limit the situation is more complicated. We
- shouldn't just drop the route, we should handle it like
- it was filtered. We also have to continue the route
- processing if old or new is non-NULL, but we should exit
- if both are NULL as this case is probably assumed to be
- already handled. */
+ return 1;
+}
- static int
- stats->imp_updates_ignored++;
- rte_trace_in(D_FILTERS, c, new, "ignored [limit]");
++int
+rte_same(const rte *x, const rte *y)
+{
+ /* rte.flags / rte.pflags are not checked, as they are internal to rtable */
+ return
- x->attrs == y->attrs &&
++ (
++ (x->attrs == y->attrs) ||
++ ((!(x->attrs->flags & EALF_CACHED) || !(y->attrs->flags & EALF_CACHED)) && ea_same(x->attrs, y->attrs))
++ ) &&
+ x->src == y->src &&
+ rte_is_filtered(x) == rte_is_filtered(y);
+}
- if (c->in_keep_filtered)
- new->flags |= REF_FILTERED;
- else
- { rte_free_quick(new); new = NULL; }
+static inline int rte_is_ok(const rte *e) { return e && !rte_is_filtered(e); }
- /* Note that old && !new could be possible when
- c->in_keep_filtered changed in the recent past. */
+static int
+rte_recalculate(struct rtable_private *table, struct rt_import_hook *c, net *net, rte *new, struct rte_src *src)
+{
+ struct rt_import_request *req = c->req;
+ struct rt_import_stats *stats = &c->stats;
+ struct rte_storage *old_best_stored = net->routes, *old_stored = NULL;
+ const rte *old_best = old_best_stored ? &old_best_stored->rte : NULL;
+ const rte *old = NULL;
+
+ /* If the new route is identical to the old one, we find the attributes in
+ * cache and clone these with no performance drop. OTOH, if we were to lookup
+ * the attributes, such a route definitely hasn't been anywhere yet,
+ * therefore it's definitely worth the time. */
+ struct rte_storage *new_stored = NULL;
+ if (new)
+ {
+ new_stored = rte_store(new, net, table);
+ new = RTES_WRITE(new_stored);
+ }
+
+ /* Find and remove original route from the same protocol */
+ struct rte_storage **before_old = rte_find(net, src);
- if (!old && !new)
- return;
+ if (*before_old)
+ {
+ old = &(old_stored = (*before_old))->rte;
+
+ /* If there is the same route in the routing table but from
+ * a different sender, then there are two paths from the
+ * source protocol to this routing table through transparent
+ * pipes, which is not allowed.
+ * We log that and ignore the route. */
+ if (old->sender != c)
+ {
+ if (!old->generation && !new->generation)
+ bug("Two protocols claim to author a route with the same rte_src in table %s: %N %s/%u:%u",
+ c->table->name, net->n.addr, old->src->owner->name, old->src->private_id, old->src->global_id);
- new_ok = 0;
- goto skip_stats1;
+ log_rl(&table->rl_pipe, L_ERR "Route source collision in table %s: %N %s/%u:%u",
+ c->table->name, net->n.addr, old->src->owner->name, old->src->private_id, old->src->global_id);
}
+
+ if (new && rte_same(old, &new_stored->rte))
+ {
+ /* No changes, ignore the new route and refresh the old one */
+ old_stored->stale_cycle = new->stale_cycle;
+
+ if (!rte_is_filtered(new))
+ {
+ stats->updates_ignored++;
+ rt_rte_trace_in(D_ROUTES, req, new, "ignored");
+ }
+
+ /* We need to free the already stored route here before returning */
+ rte_free(new_stored);
+ return 0;
+ }
+
+ *before_old = (*before_old)->next;
+ table->rt_count--;
}
+ if (!old && !new)
+ {
+ stats->withdraws_ignored++;
+ return 0;
+ }
+
+ /* If rejected by import limit, we need to pretend there is no route */
+ if (req->preimport && (req->preimport(req, new, old) == 0))
+ {
+ rte_free(new_stored);
+ new_stored = NULL;
+ new = NULL;
+ }
+
+ int new_ok = rte_is_ok(new);
+ int old_ok = rte_is_ok(old);
+
if (new_ok)
- stats->imp_updates_accepted++;
+ stats->updates_accepted++;
else if (old_ok)
- stats->imp_withdraws_accepted++;
+ stats->withdraws_accepted++;
else
- stats->imp_withdraws_ignored++;
+ stats->withdraws_ignored++;
if (old_ok || new_ok)
table->last_rt_change = current_time();