HASH_DEFINE_REHASH_FN(SYM, struct symbol)
-struct sym_scope *global_root_scope;
+/* Global symbol scopes */
pool *global_root_scope_pool;
linpool *global_root_scope_linpool;
- static struct sym_scope
++struct sym_scope
+ global_root_scope = {
+ },
+ global_filter_scope = {
+ .next = &global_root_scope,
+ };
+
+/* Local symbol scope: TODO this isn't thread-safe */
+struct sym_scope *conf_this_scope;
linpool *cfg_mem;
struct proto_spec ps;
struct channel_limit cl;
struct timeformat *tf;
- mpls_label_stack *mls;
+ struct settle_config settle;
+ struct adata *ad;
const struct adata *bs;
+ struct aggr_item_node *ai;
}
%token END CLI_MARKER INVALID_TOKEN ELSECOL DDOT
fi
fi
-all_protocols="aggregator $proto_bfd babel bgp mrt ospf perf pipe radv rip rpki static"
-
+# temporarily removed "mrt" from all_protocols to speed up 3.0-alpha1 release
- all_protocols="bfd babel bgp ospf perf pipe radv rip rpki static"
++all_protocols="aggregator bfd babel bgp ospf perf pipe radv rip rpki static"
all_protocols=`echo $all_protocols | sed 's/ /,/g'`
if test "$with_protocols" = all ; then
cf_error("Too many nested method calls");
struct sym_scope *scope = f_type_method_scope(object->type);
-- if (!scope && object->type != T_ROUTE)
++ if (!scope->hash.count && !scope->next)
cf_error("No methods defined for type %s", f_type_name(object->type));
- if (!scope)
- scope = config->root_scope->next;
-
+ /* Replacing the current symbol scope with the appropriate method scope
+ for the given type. */
FM = (struct f_method_scope) {
.object = object,
.main = new_config->current_scope,
sym->method = dsc;
}
++extern struct sym_scope global_filter_scope;
++
void f_type_methods_register(void)
{
struct f_method *method;
for (uint i = 0; i < ARRAY_SIZE(f_type_method_scopes); i++)
f_type_method_scopes[i].readonly = 1;
++
++ f_type_method_scopes[T_ROUTE].next = &global_filter_scope;
}
/* Line dumpers */
--- /dev/null
- #define RTS_MAX 16
+/*
+ * BIRD Internet Routing Daemon -- Routing data structures
+ *
+ * (c) 1998--2000 Martin Mares <mj@ucw.cz>
+ * (c) 2022 Maria Matejka <mq@jmq.cz>
+ *
+ * Can be freely distributed and used under the terms of the GNU GPL.
+ */
+
+#ifndef _BIRD_LIB_ROUTE_H_
+#define _BIRD_LIB_ROUTE_H_
+
+#undef RT_SOURCE_DEBUG
+
+#include "lib/type.h"
+#include "lib/rcu.h"
+#include "lib/hash.h"
+#include "lib/event.h"
+
+struct network;
+struct proto;
+struct cli;
+struct rtable_private;
+struct rte_storage;
+
+#define RTE_IN_TABLE_WRITABLE \
+ byte pflags; /* Protocol-specific flags; may change in-table (!) */ \
+ u8 stale_cycle; /* Auxiliary value for route refresh; may change in-table (!) */ \
+
+typedef struct rte {
+ RTE_IN_TABLE_WRITABLE;
+ byte flags; /* Table-specific flags */
+ u8 generation; /* If this route import is based on other previously exported route,
+ this value should be 1 + MAX(generation of the parent routes).
+ Otherwise the route is independent and this value is zero. */
+ u32 id; /* Table specific route id */
+ struct ea_list *attrs; /* Attributes of this route */
+ const net_addr *net; /* Network this RTE belongs to */
+ struct rte_src *src; /* Route source that created the route */
+ struct rt_import_hook *sender; /* Import hook used to send the route to the routing table */
+ btime lastmod; /* Last modified (set by table) */
+} rte;
+
+#define REF_FILTERED 2 /* Route is rejected by import filter */
+#define REF_PENDING 32 /* Route has not propagated completely yet */
+
+/* Route is valid for propagation (may depend on other flags in the future), accepts NULL */
+static inline int rte_is_valid(const rte *r) { return r && !(r->flags & REF_FILTERED); }
+
+/* Route just has REF_FILTERED flag */
+static inline int rte_is_filtered(const rte *r) { return !!(r->flags & REF_FILTERED); }
+
+/* Strip the route of the table-specific values */
+static inline rte rte_init_from(const rte *r)
+{
+ return (rte) {
+ .attrs = r->attrs,
+ .net = r->net,
+ .src = r->src,
+ };
+}
+
+int rte_same(const rte *, const rte *);
+
+struct rte_src {
+ struct rte_src *next; /* Hash chain */
+ struct rte_owner *owner; /* Route source owner */
+ u32 private_id; /* Private ID, assigned by the protocol */
+ u32 global_id; /* Globally unique ID of the source */
+ _Atomic u64 uc; /* Use count */
+};
+
+struct rte_owner_class {
+ void (*get_route_info)(const rte *, byte *buf); /* Get route information (for `show route' command) */
+ int (*rte_better)(const rte *, const rte *);
+ int (*rte_mergable)(const rte *, const rte *);
+ u32 (*rte_igp_metric)(const rte *);
+};
+
+struct rte_owner {
+ struct rte_owner_class *class;
+ int (*rte_recalculate)(struct rtable_private *, struct network *, struct rte_storage *new, struct rte_storage *, struct rte_storage *);
+ HASH(struct rte_src) hash;
+ const char *name;
+ u32 hash_key;
+ u32 uc;
+ event_list *list;
+ event *prune;
+ event *stop;
+};
+
+DEFINE_DOMAIN(attrs);
+extern DOMAIN(attrs) attrs_domain;
+
+#define RTA_LOCK LOCK_DOMAIN(attrs, attrs_domain)
+#define RTA_UNLOCK UNLOCK_DOMAIN(attrs, attrs_domain)
+
+#define RTE_SRC_PU_SHIFT 44
+#define RTE_SRC_IN_PROGRESS (1ULL << RTE_SRC_PU_SHIFT)
+
+/* Get a route source. This also locks the source, therefore the caller has to
+ * unlock the source after the route has been propagated. */
+struct rte_src *rt_get_source_o(struct rte_owner *o, u32 id);
+#define rt_get_source(p, id) rt_get_source_o(&(p)->sources, (id))
+
+struct rte_src *rt_find_source_global(u32 id);
+
+#ifdef RT_SOURCE_DEBUG
+#define rt_lock_source _rt_lock_source_internal
+#define rt_unlock_source _rt_unlock_source_internal
+#endif
+
+static inline void rt_lock_source(struct rte_src *src)
+{
+ /* Locking a source is trivial; somebody already holds it so we just increase
+ * the use count. Nothing can be freed underneath our hands. */
+ u64 uc = atomic_fetch_add_explicit(&src->uc, 1, memory_order_acq_rel);
+ ASSERT_DIE(uc > 0);
+}
+
+static inline void rt_unlock_source(struct rte_src *src)
+{
+ /* Unlocking is tricky. We do it lockless so at the same time, the prune
+ * event may be running, therefore if the unlock gets us to zero, it must be
+ * the last thing in this routine, otherwise the prune routine may find the
+ * source's usecount zeroed, freeing it prematurely.
+ *
+ * The usecount is split into two parts:
+ * the top 20 bits are an in-progress indicator
+ * the bottom 44 bits keep the actual usecount.
+ *
+ * Therefore at most 1 million of writers can simultaneously unlock the same
+ * source, while at most ~17T different routes can reference it. Both limits
+ * are insanely high from the 2022 point of view. Let's suppose that when 17T
+ * routes or 1M writers get real, we get also 128bit atomic variables in the
+ * C norm. */
+
+ /* First, we push the in-progress indicator */
+ u64 uc = atomic_fetch_add_explicit(&src->uc, RTE_SRC_IN_PROGRESS, memory_order_acq_rel);
+
+ /* Then we split the indicator to its parts. Remember, we got the value before the operation happened. */
+ u64 pending = (uc >> RTE_SRC_PU_SHIFT) + 1;
+ uc &= RTE_SRC_IN_PROGRESS - 1;
+
+ /* We per-use the RCU critical section indicator to make the prune event wait
+ * until we finish here in the rare case we get preempted. */
+ rcu_read_lock();
+
+ /* Obviously, there can't be more pending unlocks than the usecount itself */
+ if (uc == pending)
+ /* If we're the last unlocker, schedule the owner's prune event */
+ ev_send(src->owner->list, src->owner->prune);
+ else
+ ASSERT_DIE(uc > pending);
+
+ /* And now, finally, simultaneously pop the in-progress indicator and the
+ * usecount, possibly allowing the source pruning routine to free this structure */
+ atomic_fetch_sub_explicit(&src->uc, RTE_SRC_IN_PROGRESS + 1, memory_order_acq_rel);
+
+ /* ... and to reduce the load a bit, the source pruning routine will better wait for
+ * RCU synchronization instead of a busy loop. */
+ rcu_read_unlock();
+}
+
+#ifdef RT_SOURCE_DEBUG
+#undef rt_lock_source
+#undef rt_unlock_source
+
+#define rt_lock_source(x) ( log(L_INFO "Lock source %uG at %s:%d", (x)->global_id, __FILE__, __LINE__), _rt_lock_source_internal(x) )
+#define rt_unlock_source(x) ( log(L_INFO "Unlock source %uG at %s:%d", (x)->global_id, __FILE__, __LINE__), _rt_unlock_source_internal(x) )
+#endif
+
+void rt_init_sources(struct rte_owner *, const char *name, event_list *list);
+void rt_destroy_sources(struct rte_owner *, event *);
+
+/*
+ * Route Attributes
+ *
+ * Beware: All standard BGP attributes must be represented here instead
+ * of making them local to the route. This is needed to ensure proper
+ * construction of BGP route attribute lists.
+ */
+
+/* Nexthop structure */
+struct nexthop {
+ ip_addr gw; /* Next hop */
+ struct iface *iface; /* Outgoing interface */
+ byte flags;
+ byte weight;
+ byte labels; /* Number of all labels */
+ u32 label[0];
+};
+
+/* For packing one into eattrs */
+struct nexthop_adata {
+ struct adata ad;
+ /* There is either a set of nexthops or a special destination (RTD_*) */
+ union {
+ struct nexthop nh;
+ uint dest;
+ };
+};
+
+#define NEXTHOP_DEST_SIZE (OFFSETOF(struct nexthop_adata, dest) + sizeof(uint) - OFFSETOF(struct adata, data))
+#define NEXTHOP_DEST_LITERAL(x) ((struct nexthop_adata) { \
+ .ad.length = NEXTHOP_DEST_SIZE, .dest = (x), })
+
+#define RNF_ONLINK 0x1 /* Gateway is onlink regardless of IP ranges */
+
+
+#define RTS_STATIC 1 /* Normal static route */
+#define RTS_INHERIT 2 /* Route inherited from kernel */
+#define RTS_DEVICE 3 /* Device route */
+#define RTS_STATIC_DEVICE 4 /* Static device route */
+#define RTS_REDIRECT 5 /* Learned via redirect */
+#define RTS_RIP 6 /* RIP route */
+#define RTS_OSPF 7 /* OSPF route */
+#define RTS_OSPF_IA 8 /* OSPF inter-area route */
+#define RTS_OSPF_EXT1 9 /* OSPF external route type 1 */
+#define RTS_OSPF_EXT2 10 /* OSPF external route type 2 */
+#define RTS_BGP 11 /* BGP route */
+#define RTS_PIPE 12 /* Inter-table wormhole */
+#define RTS_BABEL 13 /* Babel route */
+#define RTS_RPKI 14 /* Route Origin Authorization */
+#define RTS_PERF 15 /* Perf checker */
++#define RTS_AGGREGATED 16 /* Aggregated route */
++#define RTS_MAX 17
+
+#define RTD_NONE 0 /* Undefined next hop */
+#define RTD_UNICAST 1 /* A standard next hop */
+#define RTD_BLACKHOLE 2 /* Silently drop packets */
+#define RTD_UNREACHABLE 3 /* Reject as unreachable */
+#define RTD_PROHIBIT 4 /* Administratively prohibited */
+#define RTD_MAX 5
+
+extern const char * rta_dest_names[RTD_MAX];
+
+static inline const char *rta_dest_name(uint n)
+{ return (n < RTD_MAX) ? rta_dest_names[n] : "???"; }
+
+
+/*
+ * Extended Route Attributes
+ */
+
+typedef struct eattr {
+ word id; /* EA_CODE(PROTOCOL_..., protocol-dependent ID) */
+ byte flags; /* Protocol-dependent flags */
+ byte type; /* Attribute type */
+ byte rfu:5;
+ byte originated:1; /* The attribute has originated locally */
+ byte fresh:1; /* An uncached attribute (e.g. modified in export filter) */
+ byte undef:1; /* Explicitly undefined */
+
+ PADDING(unused, 3, 3);
+
+ union bval u;
+} eattr;
+
+
+#define EA_CODE_MASK 0xffff
+#define EA_ALLOW_UNDEF 0x10000 /* ea_find: allow EAF_TYPE_UNDEF */
+#define EA_BIT(n) ((n) << 24) /* Used in bitfield accessors */
+#define EA_BIT_GET(ea) ((ea) >> 24)
+
+typedef struct ea_list {
+ struct ea_list *next; /* In case we have an override list */
+ byte flags; /* Flags: EALF_... */
+ byte rfu;
+ word count; /* Number of attributes */
+ eattr attrs[0]; /* Attribute definitions themselves */
+} ea_list;
+
+struct ea_storage {
+ struct ea_storage *next_hash; /* Next in hash chain */
+ struct ea_storage **pprev_hash; /* Previous in hash chain */
+ _Atomic u32 uc; /* Use count */
+ u32 hash_key; /* List hash */
+ ea_list l[0]; /* The list itself */
+};
+
+#define EALF_SORTED 1 /* Attributes are sorted by code */
+#define EALF_BISECT 2 /* Use interval bisection for searching */
+#define EALF_CACHED 4 /* List is cached */
+#define EALF_HUGE 8 /* List is too big to fit into slab */
+
+struct ea_class {
+#define EA_CLASS_INSIDE \
+ const char *name; /* Name (both print and filter) */ \
+ struct symbol *sym; /* Symbol to export to configs */ \
+ uint id; /* Autoassigned attribute ID */ \
+ uint uc; /* Reference count */ \
+ btype type; /* Data type ID */ \
+ uint readonly:1; /* This attribute can't be changed by filters */ \
+ uint conf:1; /* Requested by config */ \
+ uint hidden:1; /* Technical attribute, do not show, do not expose to filters */ \
+ void (*format)(const eattr *ea, byte *buf, uint size); \
+ void (*stored)(const eattr *ea); /* When stored into global hash */ \
+ void (*freed)(const eattr *ea); /* When released from global hash */ \
+
+ EA_CLASS_INSIDE;
+};
+
+struct ea_class_ref {
+ resource r;
+ struct ea_class *class;
+};
+
+void ea_register_init(struct ea_class *);
+struct ea_class_ref *ea_register_alloc(pool *, struct ea_class);
+struct ea_class_ref *ea_ref_class(pool *, struct ea_class *); /* Reference for an attribute alias */
+
+#define EA_REGISTER_ALL_HELPER(x) ea_register_init(x);
+#define EA_REGISTER_ALL(...) MACRO_FOREACH(EA_REGISTER_ALL_HELPER, __VA_ARGS__)
+
+struct ea_class *ea_class_find_by_id(uint id);
+struct ea_class *ea_class_find_by_name(const char *name);
+static inline struct ea_class *ea_class_self(struct ea_class *self) { return self; }
+#define ea_class_find(_arg) _Generic((_arg), \
+ uint: ea_class_find_by_id, \
+ word: ea_class_find_by_id, \
+ char *: ea_class_find_by_name, \
+ const char *: ea_class_find_by_name, \
+ struct ea_class *: ea_class_self)(_arg)
+
+struct ea_walk_state {
+ ea_list *eattrs; /* Ccurrent ea_list, initially set by caller */
+ eattr *ea; /* Current eattr, initially NULL */
+ u32 visited[4]; /* Bitfield, limiting max to 128 */
+};
+
+#define ea_find(_l, _arg) _Generic((_arg), uint: ea_find_by_id, struct ea_class *: ea_find_by_class, char *: ea_find_by_name)(_l, _arg)
+eattr *ea_find_by_id(ea_list *, unsigned ea);
+static inline eattr *ea_find_by_class(ea_list *l, const struct ea_class *def)
+{ return ea_find_by_id(l, def->id); }
+static inline eattr *ea_find_by_name(ea_list *l, const char *name)
+{
+ const struct ea_class *def = ea_class_find_by_name(name);
+ return def ? ea_find_by_class(l, def) : NULL;
+}
+
+#define ea_get_int(_l, _ident, _def) ({ \
+ struct ea_class *cls = ea_class_find((_ident)); \
+ ASSERT_DIE(cls->type & EAF_EMBEDDED); \
+ const eattr *ea = ea_find((_l), cls->id); \
+ (ea ? ea->u.data : (_def)); \
+ })
+
+#define ea_get_ip(_l, _ident, _def) ({ \
+ struct ea_class *cls = ea_class_find((_ident)); \
+ ASSERT_DIE(cls->type == T_IP); \
+ const eattr *ea = ea_find((_l), cls->id); \
+ (ea ? *((const ip_addr *) ea->u.ptr->data) : (_def)); \
+ })
+
+eattr *ea_walk(struct ea_walk_state *s, uint id, uint max);
+void ea_dump(ea_list *);
+int ea_same(ea_list *x, ea_list *y); /* Test whether two ea_lists are identical */
+uint ea_hash(ea_list *e); /* Calculate 16-bit hash value */
+ea_list *ea_append(ea_list *to, ea_list *what);
+void ea_format_bitfield(const struct eattr *a, byte *buf, int bufsize, const char **names, int min, int max);
+
+/* Normalize ea_list; allocates the result from tmp_linpool */
+ea_list *ea_normalize(ea_list *e, int overlay);
+
+uint ea_list_size(ea_list *);
+void ea_list_copy(ea_list *dest, ea_list *src, uint size);
+
+#define EA_LOCAL_LIST(N) struct { ea_list l; eattr a[N]; }
+
+#define EA_LITERAL_EMBEDDED(_class, _flags, _val) ({ \
+ btype _type = (_class)->type; \
+ ASSERT_DIE(_type & EAF_EMBEDDED); \
+ EA_LITERAL_GENERIC((_class)->id, _type, _flags, .u.i = _val); \
+ })
+
+#define EA_LITERAL_STORE_ADATA(_class, _flags, _buf, _len) ({ \
+ btype _type = (_class)->type; \
+ ASSERT_DIE(!(_type & EAF_EMBEDDED)); \
+ EA_LITERAL_GENERIC((_class)->id, _type, _flags, .u.ad = tmp_store_adata((_buf), (_len))); \
+ })
+
+#define EA_LITERAL_DIRECT_ADATA(_class, _flags, _adata) ({ \
+ btype _type = (_class)->type; \
+ ASSERT_DIE(!(_type & EAF_EMBEDDED)); \
+ EA_LITERAL_GENERIC((_class)->id, _type, _flags, .u.ad = _adata); \
+ })
+
+#define EA_LITERAL_GENERIC(_id, _type, _flags, ...) \
+ ((eattr) { .id = _id, .type = _type, .flags = _flags, __VA_ARGS__ })
+
+static inline eattr *
+ea_set_attr(ea_list **to, eattr a)
+{
+ EA_LOCAL_LIST(1) *ea = tmp_alloc(sizeof(*ea));
+ *ea = (typeof(*ea)) {
+ .l.flags = EALF_SORTED,
+ .l.count = 1,
+ .l.next = *to,
+ .a[0] = a,
+ };
+
+ *to = &ea->l;
+ return &ea->a[0];
+}
+
+static inline void
+ea_unset_attr(ea_list **to, _Bool local, const struct ea_class *def)
+{
+ ea_set_attr(to, EA_LITERAL_GENERIC(def->id, 0, 0,
+ .fresh = local, .originated = local, .undef = 1));
+}
+
+static inline void
+ea_set_attr_u32(ea_list **to, const struct ea_class *def, uint flags, u64 data)
+{ ea_set_attr(to, EA_LITERAL_EMBEDDED(def, flags, data)); }
+
+static inline void
+ea_set_attr_data(ea_list **to, const struct ea_class *def, uint flags, const void *data, uint len)
+{ ea_set_attr(to, EA_LITERAL_STORE_ADATA(def, flags, data, len)); }
+
+static inline void
+ea_copy_attr(ea_list **to, ea_list *from, const struct ea_class *def)
+{
+ eattr *e = ea_find_by_class(from, def);
+ if (e)
+ if (e->type & EAF_EMBEDDED)
+ ea_set_attr_u32(to, def, e->flags, e->u.data);
+ else
+ ea_set_attr_data(to, def, e->flags, e->u.ptr->data, e->u.ptr->length);
+ else
+ ea_unset_attr(to, 0, def);
+}
+
+/*
+ * Common route attributes
+ */
+
+/* Preference: first-order comparison */
+extern struct ea_class ea_gen_preference;
+static inline u32 rt_get_preference(const rte *rt)
+{ return ea_get_int(rt->attrs, &ea_gen_preference, 0); }
+
+/* IGP metric: second-order comparison */
+extern struct ea_class ea_gen_igp_metric;
+u32 rt_get_igp_metric(const rte *rt);
+#define IGP_METRIC_UNKNOWN 0x80000000 /* Default igp_metric used when no other
+ protocol-specific metric is availabe */
+
+/* From: Advertising router */
+extern struct ea_class ea_gen_from;
+
+/* Source: An old method to devise the route source protocol and kind.
+ * To be superseded in a near future by something more informative. */
+extern struct ea_class ea_gen_source;
+static inline u32 rt_get_source_attr(const rte *rt)
+{ return ea_get_int(rt->attrs, &ea_gen_source, 0); }
+
+/* Flowspec validation result */
+enum flowspec_valid {
+ FLOWSPEC_UNKNOWN = 0,
+ FLOWSPEC_VALID = 1,
+ FLOWSPEC_INVALID = 2,
+ FLOWSPEC__MAX,
+};
+
+extern const char * flowspec_valid_names[FLOWSPEC__MAX];
+static inline const char *flowspec_valid_name(enum flowspec_valid v)
+{ return (v < FLOWSPEC__MAX) ? flowspec_valid_names[v] : "???"; }
+
+extern struct ea_class ea_gen_flowspec_valid;
+static inline enum flowspec_valid rt_get_flowspec_valid(const rte *rt)
+{ return ea_get_int(rt->attrs, &ea_gen_flowspec_valid, FLOWSPEC_UNKNOWN); }
+
+/* Next hop: For now, stored as adata */
+extern struct ea_class ea_gen_nexthop;
+
+static inline void ea_set_dest(struct ea_list **to, uint flags, uint dest)
+{
+ struct nexthop_adata nhad = NEXTHOP_DEST_LITERAL(dest);
+ ea_set_attr_data(to, &ea_gen_nexthop, flags, &nhad.ad.data, nhad.ad.length);
+}
+
+/* Next hop structures */
+
+#define NEXTHOP_ALIGNMENT (_Alignof(struct nexthop))
+#define NEXTHOP_MAX_SIZE (sizeof(struct nexthop) + sizeof(u32)*MPLS_MAX_LABEL_STACK)
+#define NEXTHOP_SIZE(_nh) NEXTHOP_SIZE_CNT(((_nh)->labels))
+#define NEXTHOP_SIZE_CNT(cnt) BIRD_ALIGN((sizeof(struct nexthop) + sizeof(u32) * (cnt)), NEXTHOP_ALIGNMENT)
+#define nexthop_size(nh) NEXTHOP_SIZE((nh))
+
+#define NEXTHOP_NEXT(_nh) ((void *) (_nh) + NEXTHOP_SIZE(_nh))
+#define NEXTHOP_END(_nhad) ((_nhad)->ad.data + (_nhad)->ad.length)
+#define NEXTHOP_VALID(_nh, _nhad) ((void *) (_nh) < (void *) NEXTHOP_END(_nhad))
+#define NEXTHOP_ONE(_nhad) (NEXTHOP_NEXT(&(_nhad)->nh) == NEXTHOP_END(_nhad))
+
+#define NEXTHOP_WALK(_iter, _nhad) for ( \
+ struct nexthop *_iter = &(_nhad)->nh; \
+ (void *) _iter < (void *) NEXTHOP_END(_nhad); \
+ _iter = NEXTHOP_NEXT(_iter))
+
+
+static inline int nexthop_same(struct nexthop_adata *x, struct nexthop_adata *y)
+{ return adata_same(&x->ad, &y->ad); }
+struct nexthop_adata *nexthop_merge(struct nexthop_adata *x, struct nexthop_adata *y, int max, linpool *lp);
+struct nexthop_adata *nexthop_sort(struct nexthop_adata *x, linpool *lp);
+int nexthop_is_sorted(struct nexthop_adata *x);
+
+#define NEXTHOP_IS_REACHABLE(nhad) ((nhad)->ad.length > NEXTHOP_DEST_SIZE)
+
+/* Route has regular, reachable nexthop (i.e. not RTD_UNREACHABLE and like) */
+static inline int rte_is_reachable(rte *r)
+{
+ eattr *nhea = ea_find(r->attrs, &ea_gen_nexthop);
+ if (!nhea)
+ return 0;
+
+ struct nexthop_adata *nhad = (void *) nhea->u.ptr;
+ return NEXTHOP_IS_REACHABLE(nhad);
+}
+
+static inline int nhea_dest(eattr *nhea)
+{
+ if (!nhea)
+ return RTD_NONE;
+
+ struct nexthop_adata *nhad = nhea ? (struct nexthop_adata *) nhea->u.ptr : NULL;
+ if (NEXTHOP_IS_REACHABLE(nhad))
+ return RTD_UNICAST;
+ else
+ return nhad->dest;
+}
+
+static inline int rte_dest(const rte *r)
+{
+ return nhea_dest(ea_find(r->attrs, &ea_gen_nexthop));
+}
+
+void rta_init(void);
+ea_list *ea_lookup(ea_list *, int overlay); /* Get a cached (and normalized) variant of this attribute list */
+static inline int ea_is_cached(const ea_list *r) { return r->flags & EALF_CACHED; }
+static inline struct ea_storage *ea_get_storage(ea_list *r)
+{
+ ASSERT_DIE(ea_is_cached(r));
+ return SKIP_BACK(struct ea_storage, l[0], r);
+}
+
+static inline ea_list *ea_clone(ea_list *r) {
+ ASSERT_DIE(0 < atomic_fetch_add_explicit(&ea_get_storage(r)->uc, 1, memory_order_acq_rel));
+ return r;
+}
+void ea__free(struct ea_storage *r);
+static inline void ea_free(ea_list *l) {
+ if (!l) return;
+ struct ea_storage *r = ea_get_storage(l);
+ if (1 == atomic_fetch_sub_explicit(&r->uc, 1, memory_order_acq_rel)) ea__free(r);
+}
+
+void ea_dump(ea_list *);
+void ea_dump_all(void);
+void ea_show_list(struct cli *, ea_list *);
+
+#define rta_lookup ea_lookup
+#define rta_is_cached ea_is_cached
+#define rta_clone ea_clone
+#define rta_free ea_free
+
+#endif
[RTS_PIPE] = "pipe",
[RTS_BABEL] = "Babel",
[RTS_RPKI] = "RPKI",
+ [RTS_PERF] = "Perf",
+ [RTS_AGGREGATED] = "aggregated",
};
+static void
+ea_gen_source_format(const eattr *a, byte *buf, uint size)
+{
+ if ((a->u.data >= RTS_MAX) || !rta_src_names[a->u.data])
+ bsnprintf(buf, size, "unknown");
+ else
+ bsnprintf(buf, size, "%s", rta_src_names[a->u.data]);
+}
+
+struct ea_class ea_gen_source = {
+ .name = "source",
+ .type = T_ENUM_RTS,
+ .readonly = 1,
+ .format = ea_gen_source_format,
+};
+
+struct ea_class ea_gen_nexthop = {
+ .name = "nexthop",
+ .type = T_NEXTHOP_LIST,
+};
+
+/*
+ * ea_set_hostentry() acquires hostentry from hostcache.
+ * New hostentry has zero use count. Cached rta locks its
+ * hostentry (increases its use count), uncached rta does not lock it.
+ * Hostentry with zero use count is removed asynchronously
+ * during host cache update, therefore it is safe to hold
+ * such hostentry temporarily as long as you hold the table lock.
+ *
+ * There is no need to hold a lock for hostentry->dep table, because that table
+ * contains routes responsible for that hostentry, and therefore is non-empty if
+ * given hostentry has non-zero use count. If the hostentry has zero use count,
+ * the entry is removed before dep is referenced.
+ *
+ * The protocol responsible for routes with recursive next hops should hold a
+ * lock for a 'source' table governing that routes (argument tab),
+ * because its routes reference hostentries related to the governing table.
+ * When all such routes are
+ * removed, rtas are immediately removed achieving zero uc. Then the 'source'
+ * table lock could be immediately released, although hostentries may still
+ * exist - they will be freed together with the 'source' table.
+ */
+
+ static void
+ea_gen_hostentry_stored(const eattr *ea)
+{
+ struct hostentry_adata *had = (struct hostentry_adata *) ea->u.ptr;
+ had->he->uc++;
+}
+
+static void
+ea_gen_hostentry_freed(const eattr *ea)
+{
+ struct hostentry_adata *had = (struct hostentry_adata *) ea->u.ptr;
+ had->he->uc--;
+}
+
+struct ea_class ea_gen_hostentry = {
+ .name = "hostentry",
+ .type = T_HOSTENTRY,
+ .readonly = 1,
+ .stored = ea_gen_hostentry_stored,
+ .freed = ea_gen_hostentry_freed,
+};
+
const char * rta_dest_names[RTD_MAX] = {
[RTD_NONE] = "",
[RTD_UNICAST] = "unicast",
return best;
}
-static void
-rt_notify_merged(struct channel *c, net *net, rte *new_changed, rte *old_changed,
- rte *new_best, rte *old_best, int refeed)
+void
+rt_notify_merged(struct rt_export_request *req, const net_addr *n,
+ struct rt_pending_export *first, struct rt_pending_export *last,
+ const rte **feed, uint count)
{
-
+ struct channel *c = channel_from_export_request(req);
// struct proto *p = c->proto;
- rte *new_free = NULL;
-
- /* We assume that all rte arguments are either NULL or rte_is_valid() */
-
- /* This check should be done by the caller */
- if (!new_best && !old_best)
- return;
+#if 0 /* TODO: Find whether this check is possible when processing multiple changes at once. */
/* Check whether the change is relevant to the merged route */
if ((new_best == old_best) &&
(new_changed != old_changed) &&
--- /dev/null
-aggregator_bucket_update(struct aggregator_proto *p, struct aggregator_bucket *bucket, struct network *net)
+ /*
+ * BIRD Internet Routing Daemon -- Route aggregation
+ *
+ * (c) 2023--2023 Igor Putovny <igor.putovny@nic.cz>
+ * (c) 2023 CZ.NIC, z.s.p.o.
+ *
+ * Can be freely distributed and used under the terms of the GNU GPL.
+ */
+
+ /**
+ * DOC: Route aggregation
+ *
+ * This is an implementation of route aggregation functionality.
+ * It enables user to specify a set of route attributes in the configuarion file
+ * and then, for a given destination (net), aggregate routes with the same
+ * values of these attributes into a single multi-path route.
+ *
+ * Structure &channel contains pointer to aggregation list which is represented
+ * by &aggr_list_linearized. In rt_notify_aggregated(), attributes from this
+ * list are evaluated for every route of a given net and results are stored
+ * in &rte_val_list which contains pointer to this route and array of &f_val.
+ * Array of pointers to &rte_val_list entries is sorted using
+ * sort_rte_val_list(). For comparison of &f_val structures, val_compare()
+ * is used. Comparator function is written so that sorting is stable. If all
+ * attributes have the same values, routes are compared by their global IDs.
+ *
+ * After sorting, &rte_val_list entries containing equivalent routes will be
+ * adjacent to each other. Function process_rte_list() iterates through these
+ * entries to identify sequences of equivalent routes. New route will be
+ * created for each such sequence, even if only from a single route.
+ * Only attributes from the aggreagation list will be set for the new route.
+ * New &rta is created and prepare_rta() is used to copy static and dynamic
+ * attributes to new &rta from &rta of the original route. New route is created
+ * by create_merged_rte() from new &rta and exported to the routing table.
+ */
+
+ #undef LOCAL_DEBUG
+
+ #ifndef _GNU_SOURCE
+ #define _GNU_SOURCE
+ #endif
+
+ #include "nest/bird.h"
+ #include "nest/iface.h"
+ #include "filter/filter.h"
+ #include "aggregator.h"
+
+ #include <stdlib.h>
+
+ /*
+ * Compare list of &f_val entries.
+ * @count: number of &f_val entries
+ */
+ static int
+ same_val_list(const struct f_val *v1, const struct f_val *v2, uint len)
+ {
+ for (uint i = 0; i < len; i++)
+ if (!val_same(&v1[i], &v2[i]))
+ return 0;
+
+ return 1;
+ }
+
+ /*
+ * Create and export new merged route.
+ * @old: first route in a sequence of equivalent routes that are to be merged
+ * @rte_val: first element in a sequence of equivalent rte_val_list entries
+ * @length: number of equivalent routes that are to be merged (at least 1)
+ * @ail: aggregation list
+ */
+ static void
- rte_update2(p->dst, net->n.addr, NULL, bucket->last_src);
++aggregator_bucket_update(struct aggregator_proto *p, struct aggregator_bucket *bucket, const net_addr *net)
+ {
+ /* Empty bucket */
+ if (!bucket->rte)
+ {
- struct lp_state tmp_state;
- lp_save(tmp_linpool, &tmp_state);
-
- /* Allocate RTA */
- struct rta *rta = allocz(rta_size(bucket->rte->attrs));
- rta->dest = RTD_UNREACHABLE;
- rta->source = RTS_AGGREGATED;
- rta->scope = SCOPE_UNIVERSE;
++ rte_update(p->dst, net, NULL, bucket->last_src);
+ bucket->last_src = NULL;
+ return;
+ }
+
+ /* Store TMP linpool state */
- struct rte *new = rte_get_temp(rta, bucket->rte->src);
- new->net = net;
++ struct lp_state *tmp_state = lp_save(tmp_linpool);
+
+ /* Allocate route */
- f_eval_rte(p->premerge, &new, tmp_linpool, p->aggr_on_count, bucket->aggr_data, 0, NULL);
++ struct rte new = { .net = net, .src = bucket->rte->rte.src };
++ ea_set_attr(&new.attrs, EA_LITERAL_EMBEDDED(&ea_gen_source, 0, RTS_AGGREGATED));
++
++ if (net_type_match(net, NB_DEST))
++ ea_set_dest(&new.attrs, 0, RTD_UNREACHABLE);
+
+ /* Seed the attributes from aggregator rule */
- .val.rte = bucket->rte,
++ f_eval_rte(p->premerge, &new, p->aggr_on_count, bucket->aggr_data, 0, NULL);
+
+ /*
+ log("=============== CREATE MERGED ROUTE ===============");
+ log("New route created: id = %d, protocol: %s", new->src->global_id, new->src->proto->name);
+ log("===================================================");
+ */
+
+ /* merge filter needs one argument called "routes" */
+ struct f_val val = {
+ .type = T_ROUTES_BLOCK,
- /* Actually run the merge rule */
- enum filter_return fret = f_eval_rte(p->merge_by, &new, tmp_linpool, 1, &val, 0, NULL);
++ .val.rte_block = {},
+ };
+
- /* Src must be stored now, rte_update2() may return new */
- struct rte_src *new_src = new ? new->src : NULL;
++ for (struct aggregator_route *rte = bucket->rte; rte; rte = rte->next_rte)
++ val.val.rte_block.len++;
+
- rte_update2(p->dst, net->n.addr, new, bucket->last_src ?: new->src);
++ val.val.rte_block.rte = tmp_alloc(sizeof(struct rte *) * val.val.rte_block.len);
++ {
++ uint i = 0;
++ for (struct aggregator_route *rte = bucket->rte; rte; rte = rte->next_rte)
++ val.val.rte_block.rte[i++] = &rte->rte;
++ ASSERT_DIE(i == val.val.rte_block.len);
++ }
++
++ /* Actually run the merge rule */
++ enum filter_return fret = f_eval_rte(p->merge_by, &new, 1, &val, 0, NULL);
+
+ /* Finally import the route */
+ switch (fret)
+ {
+ /* Pass the route to the protocol */
+ case F_ACCEPT:
- rte_update2(p->dst, net->n.addr, NULL, bucket->last_src);
++ rte_update(p->dst, net, &new, bucket->last_src ?: new.src);
+ break;
+
+ /* Something bad happened */
+ default:
+ ASSERT_DIE(fret == F_ERROR);
+ /* fall through */
+
+ /* We actually don't want this route */
+ case F_REJECT:
+ if (bucket->last_src)
- if (bucket->last_src != new_src)
++ rte_update(p->dst, net, NULL, bucket->last_src);
+ break;
+ }
+
+ /* Switch source lock for bucket->last_src */
- if (new_src)
- rt_lock_source(new_src);
++ if (bucket->last_src != new.src)
+ {
- bucket->last_src = new_src;
++ if (new.src)
++ rt_lock_source(new.src);
+ if (bucket->last_src)
+ rt_unlock_source(bucket->last_src);
+
- lp_restore(tmp_linpool, &tmp_state);
++ bucket->last_src = new.src;
+ }
+
- aggregator_bucket_update(p, b, b->rte->net);
++ lp_restore(tmp_linpool, tmp_state);
+ }
+
+ /*
+ * Reload all the buckets on reconfiguration if merge filter has changed.
+ * TODO: make this splitted
+ */
+ static void
+ aggregator_reload_buckets(void *data)
+ {
+ struct aggregator_proto *p = data;
+
+ HASH_WALK(p->buckets, next_hash, b)
+ if (b->rte)
- net *net;
++ aggregator_bucket_update(p, b, b->rte->rte.net);
+ HASH_WALK_END;
+ }
+
+ static inline u32 aggr_route_hash(const rte *e)
+ {
+ struct {
-aggregator_rt_notify(struct proto *P, struct channel *src_ch, net *net, rte *new, rte *old)
++ const net_addr *net; /* the net_addr pointer is stable as long as any route exists for it in the source table */
+ struct rte_src *src;
+ } obj = {
+ .net = e->net,
+ .src = e->src,
+ };
+
+ return mem_hash(&obj, sizeof obj);
+ }
+
+ #define AGGR_RTE_KEY(n) (&(n)->rte)
+ #define AGGR_RTE_NEXT(n) ((n)->next_hash)
+ #define AGGR_RTE_EQ(a,b) (((a)->src == (b)->src) && ((a)->net == (b)->net))
+ #define AGGR_RTE_FN(_n) aggr_route_hash(_n)
+ #define AGGR_RTE_ORDER 4 /* Initial */
+
+ #define AGGR_RTE_REHASH aggr_rte_rehash
+ #define AGGR_RTE_PARAMS /8, *2, 2, 2, 4, 24
+
+ HASH_DEFINE_REHASH_FN(AGGR_RTE, struct aggregator_route);
+
+
+ #define AGGR_BUCK_KEY(n) (n)
+ #define AGGR_BUCK_NEXT(n) ((n)->next_hash)
+ #define AGGR_BUCK_EQ(a,b) (((a)->hash == (b)->hash) && (same_val_list((a)->aggr_data, (b)->aggr_data, p->aggr_on_count)))
+ #define AGGR_BUCK_FN(n) ((n)->hash)
+ #define AGGR_BUCK_ORDER 4 /* Initial */
+
+ #define AGGR_BUCK_REHASH aggr_buck_rehash
+ #define AGGR_BUCK_PARAMS /8, *2, 2, 2, 4, 24
+
+ HASH_DEFINE_REHASH_FN(AGGR_BUCK, struct aggregator_bucket);
+
+
+ #define AGGR_DATA_MEMSIZE (sizeof(struct f_val) * p->aggr_on_count)
+
+ static void
- struct lp_state tmp_state;
- lp_save(tmp_linpool, &tmp_state);
++aggregator_rt_notify(struct proto *P, struct channel *src_ch, const net_addr *net, rte *new, const rte *old)
+ {
+ struct aggregator_proto *p = SKIP_BACK(struct aggregator_proto, p, P);
+ ASSERT_DIE(src_ch == p->src);
+ struct aggregator_bucket *new_bucket = NULL, *old_bucket = NULL;
+ struct aggregator_route *old_route = NULL;
+
+ /* Find the objects for the old route */
+ if (old)
+ old_route = HASH_FIND(p->routes, AGGR_RTE, old);
+
+ if (old_route)
+ old_bucket = old_route->bucket;
+
+ /* Find the bucket for the new route */
+ if (new)
+ {
+ /* Routes are identical, do nothing */
+ if (old_route && rte_same(&old_route->rte, new))
+ return;
+
+ /* Evaluate route attributes. */
+ struct aggregator_bucket *tmp_bucket = sl_allocz(p->bucket_slab);
- struct rte *rt1 = new;
- enum filter_return fret = f_eval_rte(p->aggr_on, &new, tmp_linpool, 0, NULL, p->aggr_on_count, tmp_bucket->aggr_data);
++ struct lp_state *tmp_state = lp_save(tmp_linpool);
+
- if (rt1 != new)
- {
- rte_free(rt1);
- log(L_WARN "Aggregator rule modifies the route, reverting");
- }
++ struct ea_list *oa = new->attrs;
++ enum filter_return fret = f_eval_rte(p->aggr_on, new, 0, NULL, p->aggr_on_count, tmp_bucket->aggr_data);
+
- lp_restore(tmp_linpool, &tmp_state);
++ if (new->attrs != oa)
++ log(L_WARN "Aggregator rule modifies the route");
+
+ /* Check filter return value */
+ if (fret > F_RETURN)
+ {
+ sl_free(tmp_bucket);
- new->attrs = rta_lookup(new->attrs);
++ lp_restore(tmp_linpool, tmp_state);
+
+ return;
+ }
+
+ /* Compute the hash */
+ u64 haux;
+ mem_hash_init(&haux);
+ for (uint i = 0; i < p->aggr_on_count; i++)
+ mem_hash_mix_f_val(&haux, &tmp_bucket->aggr_data[i]);
+ tmp_bucket->hash = mem_hash_value(&haux);
+
+ /* Find the existing bucket */
+ if (new_bucket = HASH_FIND(p->buckets, AGGR_BUCK, tmp_bucket))
+ sl_free(tmp_bucket);
+ else
+ {
+ new_bucket = tmp_bucket;
+ HASH_INSERT2(p->buckets, AGGR_BUCK, p->p.pool, new_bucket);
+ }
+
+ /* Store the route attributes */
+ if (rta_is_cached(new->attrs))
+ rta_clone(new->attrs);
+ else
- arte->rte.next = new_bucket->rte,
- new_bucket->rte = &arte->rte;
++ new->attrs = rta_lookup(new->attrs, 0);
+
+ /* Insert the new route into the bucket */
+ struct aggregator_route *arte = sl_alloc(p->route_slab);
+ *arte = (struct aggregator_route) {
+ .bucket = new_bucket,
+ .rte = *new,
++ .next_rte = new_bucket->rte,
+ };
- lp_restore(tmp_linpool, &tmp_state);
++ new_bucket->rte = arte;
+ new_bucket->count++;
+ HASH_INSERT2(p->routes, AGGR_RTE, p->p.pool, arte);
+
- for (struct rte **k = &old_bucket->rte; *k; k = &(*k)->next)
- if (*k == &old_route->rte)
++ lp_restore(tmp_linpool, tmp_state);
+ }
+
+ /* Remove the old route from its bucket */
+ if (old_bucket)
+ {
- *k = (*k)->next;
++ for (struct aggregator_route **k = &old_bucket->rte; *k; k = &(*k)->next_rte)
++ if (*k == old_route)
+ {
- if (new->sender == p->dst)
++ *k = (*k)->next_rte;
+ break;
+ }
+
+ old_bucket->count--;
+ HASH_REMOVE2(p->routes, AGGR_RTE, p->p.pool, old_route);
+ rta_free(old_route->rte.attrs);
+ sl_free(old_route);
+ }
+
+ /* Announce changes */
+ if (old_bucket)
+ aggregator_bucket_update(p, old_bucket, net);
+
+ if (new_bucket && (new_bucket != old_bucket))
+ aggregator_bucket_update(p, new_bucket, net);
+
+ /* Cleanup the old bucket if empty */
+ if (old_bucket && (!old_bucket->rte || !old_bucket->count))
+ {
+ ASSERT_DIE(!old_bucket->rte && !old_bucket->count);
+ HASH_REMOVE2(p->buckets, AGGR_BUCK, p->p.pool, old_bucket);
+ sl_free(old_bucket);
+ }
+ }
+
+ static int
+ aggregator_preexport(struct channel *C, struct rte *new)
+ {
+ struct aggregator_proto *p = SKIP_BACK(struct aggregator_proto, p, C->proto);
+ /* Reject our own routes */
- if (new->attrs->source == RTS_AGGREGATED)
++ if (new->sender == p->dst->in_req.hook)
+ return -1;
+
+ /* Disallow aggregating already aggregated routes */
- log(L_ERR "Multiple aggregations of the same route not supported in BIRD 2.");
++ if (ea_get_int(new->attrs, &ea_gen_source, 0) == RTS_AGGREGATED)
+ {
- while (b->rte)
++ log(L_ERR "Multiple aggregations of the same route not supported.");
+ return -1;
+ }
+
+ return 0;
+ }
+
+ static void
+ aggregator_postconfig(struct proto_config *CF)
+ {
+ struct aggregator_config *cf = SKIP_BACK(struct aggregator_config, c, CF);
+
+ if (!cf->dst->table)
+ cf_error("Source table not specified");
+
+ if (!cf->src->table)
+ cf_error("Destination table not specified");
+
+ if (cf->dst->table->addr_type != cf->src->table->addr_type)
+ cf_error("Both tables must be of the same type");
+
+ cf->dst->in_filter = cf->src->in_filter;
+
+ cf->src->in_filter = FILTER_REJECT;
+ cf->dst->out_filter = FILTER_REJECT;
+
+ cf->dst->debug = cf->src->debug;
+ }
+
+ static struct proto *
+ aggregator_init(struct proto_config *CF)
+ {
+ struct proto *P = proto_new(CF);
+ struct aggregator_proto *p = SKIP_BACK(struct aggregator_proto, p, P);
+ struct aggregator_config *cf = SKIP_BACK(struct aggregator_config, c, CF);
+
+ proto_configure_channel(P, &p->src, cf->src);
+ proto_configure_channel(P, &p->dst, cf->dst);
+
+ p->aggr_on_count = cf->aggr_on_count;
+ p->aggr_on = cf->aggr_on;
+ p->premerge = cf->premerge;
+ p->merge_by = cf->merge_by;
+
+ P->rt_notify = aggregator_rt_notify;
+ P->preexport = aggregator_preexport;
+
+ return P;
+ }
+
+ static int
+ aggregator_start(struct proto *P)
+ {
+ struct aggregator_proto *p = SKIP_BACK(struct aggregator_proto, p, P);
+
+ p->bucket_slab = sl_new(P->pool, sizeof(struct aggregator_bucket) + AGGR_DATA_MEMSIZE);
+ HASH_INIT(p->buckets, P->pool, AGGR_BUCK_ORDER);
+
+ p->route_slab = sl_new(P->pool, sizeof(struct aggregator_route));
+ HASH_INIT(p->routes, P->pool, AGGR_RTE_ORDER);
+
+ p->reload_buckets = (event) {
+ .hook = aggregator_reload_buckets,
+ .data = p,
+ };
+
+ return PS_UP;
+ }
+
+ static int
+ aggregator_shutdown(struct proto *P)
+ {
+ struct aggregator_proto *p = SKIP_BACK(struct aggregator_proto, p, P);
+
+ HASH_WALK_DELSAFE(p->buckets, next_hash, b)
+ {
- struct aggregator_route *arte = SKIP_BACK(struct aggregator_route, rte, b->rte);
- b->rte = arte->rte.next;
++ for (struct aggregator_route *arte; arte = b->rte; )
+ {
- .class = PROTOCOL_AGGREGATOR,
++ b->rte = arte->next_rte;
+ b->count--;
+ HASH_REMOVE(p->routes, AGGR_RTE, arte);
+ rta_free(arte->rte.attrs);
+ sl_free(arte);
+ }
+
+ ASSERT_DIE(b->count == 0);
+ HASH_REMOVE(p->buckets, AGGR_BUCK, b);
+ sl_free(b);
+ }
+ HASH_WALK_END;
+
+ return PS_DOWN;
+ }
+
+ static int
+ aggregator_reconfigure(struct proto *P, struct proto_config *CF)
+ {
+ struct aggregator_proto *p = SKIP_BACK(struct aggregator_proto, p, P);
+ struct aggregator_config *cf = SKIP_BACK(struct aggregator_config, c, CF);
+
+ TRACE(D_EVENTS, "Reconfiguring");
+
+ /* Compare numeric values (shortcut) */
+ if (cf->aggr_on_count != p->aggr_on_count)
+ return 0;
+
+ /* Compare aggregator rule */
+ if (!f_same(cf->aggr_on, p->aggr_on) || !f_same(cf->premerge, p->premerge))
+ return 0;
+
+ /* Compare merge filter */
+ if (!f_same(cf->merge_by, p->merge_by))
+ ev_schedule(&p->reload_buckets);
+
+ p->aggr_on = cf->aggr_on;
+ p->premerge = cf->premerge;
+ p->merge_by = cf->merge_by;
+
+ return 1;
+ }
+
+ struct protocol proto_aggregator = {
+ .name = "Aggregator",
+ .template = "aggregator%d",
+ .preference = 1,
+ .channel_mask = NB_ANY,
+ .proto_size = sizeof(struct aggregator_proto),
+ .config_size = sizeof(struct aggregator_config),
++ .startup = PROTOCOL_STARTUP_CONNECTOR,
+ .postconfig = aggregator_postconfig,
+ .init = aggregator_init,
+ .start = aggregator_start,
+ .shutdown = aggregator_shutdown,
+ .reconfigure = aggregator_reconfigure,
+ };
+
+ void
+ aggregator_build(void)
+ {
+ proto_build(&proto_aggregator);
+ }
--- /dev/null
- struct rte *rte; /* Pointer to struct aggregator_route.rte */
+ /*
+ * BIRD -- Aggregator Pseudoprotocol
+ *
+ * (c) 2023 Igor Putovny <igor.putovny@nic.cz>
+ * (c) 2023 Maria Matejka <mq@ucw.cz>
+ * (c) 2023 CZ.NIC z.s.p.o.
+ *
+ * Can be freely distributed and used under the terms of the GNU GPL.
+ *
+ * This file contains the data structures used by Babel.
+ */
+
+ #ifndef _BIRD_AGGREGATOR_H_
+ #define _BIRD_AGGREGATOR_H_
+
+ #include "nest/bird.h"
+ #include "nest/protocol.h"
+ #include "lib/hash.h"
+
+ struct aggregator_config {
+ struct proto_config c;
+ struct channel_config *src, *dst;
+ const struct f_line *aggr_on;
+ const struct f_line *premerge;
+ const struct f_line *merge_by;
+ uint aggr_on_count;
+ u8 aggr_on_net;
+ };
+
+ struct aggregator_route {
+ struct aggregator_route *next_hash;
++ struct aggregator_route *next_rte;
+ struct aggregator_bucket *bucket;
+ struct rte rte;
+ };
+
+ struct aggregator_bucket {
+ struct aggregator_bucket *next_hash;
++ struct aggregator_route *rte; /* Pointer to struct aggregator_route.rte */
+ struct rte_src *last_src; /* Which src we announced the bucket last with */
+ u32 count;
+ u32 hash;
+ struct f_val aggr_data[0];
+ };
+
+ struct aggregator_proto {
+ struct proto p;
+ struct channel *src, *dst;
+
+ /* Buckets by aggregator rule */
+ HASH(struct aggregator_bucket) buckets;
+ slab *bucket_slab;
+
+ /* Routes by net and src */
+ HASH(struct aggregator_route) routes;
+ slab *route_slab;
+
+ /* Aggregator rule */
+ const struct f_line *aggr_on;
+ uint aggr_on_count;
+ u8 aggr_on_net;
+
+ /* Merge filter */
+ const struct f_line *premerge;
+ const struct f_line *merge_by;
+ event reload_buckets;
+ };
+
+ #endif
--- /dev/null
+ /*
+ * BIRD -- Aggregator configuration
+ *
+ * (c) 2023 Igor Putovny <igor.putovny@nic.cz>
+ * (c) 2023 Maria Matejka <mq@ucw.cz>
+ * (c) 2023 CZ.NIC z.s.p.o.
+ *
+ * Can be freely distributed and used under the terms of the GNU GPL.
+ */
+
+ CF_HDR
+
+ #include "proto/aggregator/aggregator.h"
+
+ CF_DEFINES
+
+ #define AGGREGATOR_CFG ((struct aggregator_config *) this_proto)
+ #define AGGR_ITEM_ALLOC ((struct aggr_item_node *) cfg_allocz(sizeof(struct aggr_item_node)))
+
+ CF_DECLS
+
+ CF_KEYWORDS(AGGREGATOR, AGGREGATE, ON, MERGE, BY)
+
+ %type <xp> aggr_item aggr_list
+
+ CF_GRAMMAR
+
+ proto: aggregator_proto ;
+
+ aggregator_proto_start: proto_start AGGREGATOR
+ {
+ this_proto = proto_config_new(&proto_aggregator, $1);
+ this_channel = AGGREGATOR_CFG->src = channel_config_new(NULL, "source", 0, this_proto);
+ AGGREGATOR_CFG->dst = channel_config_new(NULL, "destination", 0, this_proto);
+
+ AGGREGATOR_CFG->src->ra_mode = AGGREGATOR_CFG->dst->ra_mode = RA_ANY;
+ };
+
+ aggregator_proto_item:
+ proto_item
+ | channel_item_
+ | PEER TABLE rtable { AGGREGATOR_CFG->dst->table = $3; }
+ | AGGREGATE ON {
+ if (AGGREGATOR_CFG->aggr_on)
+ cf_error("Only one aggregate on clause allowed");
+
++ cf_enter_filters();
+ cf_push_block_scope(new_config);
+ } aggr_list {
+ int count = new_config->current_scope->slots;
+ cf_pop_block_scope(new_config);
++ cf_exit_filters();
+
+ if (!AGGREGATOR_CFG->aggr_on_net)
+ cf_error("aggregate on must be always include 'net'.");
+
+ AGGREGATOR_CFG->aggr_on_count = count;
+ AGGREGATOR_CFG->aggr_on = f_linearize($4.begin, count);
+
+ struct f_line *premerge = f_linearize($4.end, 0);
+ premerge->args = count;
+ AGGREGATOR_CFG->premerge = premerge;
+ }
+ | MERGE BY {
++ cf_enter_filters();
+ cf_push_block_scope(new_config);
+ f_predefined_variable(new_config, "routes", T_ROUTES_BLOCK);
+ } function_body {
+ cf_pop_block_scope(new_config);
++ cf_exit_filters();
+ $4->args++;
+ AGGREGATOR_CFG->merge_by = $4;
+ }
+ ;
+
+ aggregator_proto_opts: /* empty */ | aggregator_proto_opts aggregator_proto_item ';' ;
+ aggregator_proto: aggregator_proto_start proto_name '{' aggregator_proto_opts '}' ;
+
+
+ aggr_list:
+ aggr_item
+ | aggr_list ',' aggr_item {
+ if ($$.begin = $3.begin)
+ $$.begin->next = $1.begin;
+ else
+ $$.begin = $1.begin;
+
+ if ($$.end = $3.end)
+ $$.end->next = $1.end;
+ else
+ $$.end = $1.end;
+ }
+ ;
+
+ aggr_item:
+ '(' term ')' {
+ switch ($2->type) {
+ case T_INT:
+ case T_BOOL:
+ case T_PAIR:
+ case T_QUAD:
+ case T_ENUM:
+ case T_IP:
+ case T_EC:
+ case T_LC:
+ case T_RD:
+ /* Fits, OK */
+ break;
+
+ default:
+ cf_error("Expression evaluated to type %s unsupported by aggregator. Store this value as a custom attribute instead", f_type_name($2->type));
+ }
+
+ $$.begin = $2;
+ $$.end = NULL;
+ f_new_var(new_config->current_scope);
+ }
+ | lvalue {
+ $$.begin = f_lval_getter(&$1);
+ int vari = f_new_var(new_config->current_scope);
+
+ if ($1.type == F_LVAL_SA && $1.sa.sa_code == SA_NET)
+ AGGREGATOR_CFG->aggr_on_net = 1;
+ if (($1.type == F_LVAL_CONSTANT) ||
+ ($1.type == F_LVAL_SA && $1.sa.readonly))
+ $$.end = NULL;
+ else
+ {
+ char varname[12];
+ bsnprintf(varname, 12, "!aggr%d", vari);
+ $$.end = f_lval_setter(&$1,
+ f_new_inst(FI_VAR_GET, cf_define_symbol(
+ new_config, cf_get_symbol(new_config, varname),
+ SYM_VARIABLE | $$.begin->type, offset, vari
+ )));
+ }
+ }
+ ;
+
+ CF_CODE
+
+ CF_END