--- /dev/null
- struct rte *next;
- struct network *net; /* Network this RTE belongs to */
- struct rte_src *src; /* Route source that created the route */
- struct channel *sender; /* Channel used to send the route to the routing table */
+ /*
+ * BIRD Internet Routing Daemon -- Routing data structures
+ *
+ * (c) 1998--2000 Martin Mares <mj@ucw.cz>
+ * (c) 2022 Maria Matejka <mq@jmq.cz>
+ *
+ * Can be freely distributed and used under the terms of the GNU GPL.
+ */
+
+ #ifndef _BIRD_LIB_ROUTE_H_
+ #define _BIRD_LIB_ROUTE_H_
+
+ struct network;
+ struct proto;
+ struct cli;
+
++
+ typedef struct rte {
- byte flags; /* Flags (REF_...) */
+ struct rta *attrs; /* Attributes of this route */
++ const net_addr *net; /* Network this RTE belongs to */
++ struct rte_src *src; /* Route source that created the route */
++ struct rt_import_hook *sender; /* Import hook used to send the route to the routing table */
++ btime lastmod; /* Last modified (set by table) */
+ u32 id; /* Table specific route id */
- btime lastmod; /* Last modified */
++ byte flags; /* Table-specific flags */
+ byte pflags; /* Protocol-specific flags */
-#define REF_COW 1 /* Copy this rte on write */
++ u8 generation; /* If this route import is based on other previously exported route,
++ this value should be 1 + MAX(generation of the parent routes).
++ Otherwise the route is independent and this value is zero. */
+ } rte;
+
+ #define REF_FILTERED 2 /* Route is rejected by import filter */
+ #define REF_STALE 4 /* Route is stale in a refresh cycle */
+ #define REF_DISCARD 8 /* Route is scheduled for discard */
+ #define REF_MODIFY 16 /* Route is scheduled for modify */
+
+ /* Route is valid for propagation (may depend on other flags in the future), accepts NULL */
+ static inline int rte_is_valid(rte *r) { return r && !(r->flags & REF_FILTERED); }
+
+ /* Route just has REF_FILTERED flag */
+ static inline int rte_is_filtered(rte *r) { return !!(r->flags & REF_FILTERED); }
+
+ struct rte_src {
+ struct rte_src *next; /* Hash chain */
+ struct proto *proto; /* Protocol the source is based on */
+ u32 private_id; /* Private ID, assigned by the protocol */
+ u32 global_id; /* Globally unique ID of the source */
+ unsigned uc; /* Use count */
+ };
+
+
+ struct rte_src *rt_find_source(struct proto *p, u32 id);
+ struct rte_src *rt_get_source(struct proto *p, u32 id);
+ static inline void rt_lock_source(struct rte_src *src) { src->uc++; }
+ static inline void rt_unlock_source(struct rte_src *src) { src->uc--; }
+ void rt_prune_sources(void);
+
+ /*
+ * Route Attributes
+ *
+ * Beware: All standard BGP attributes must be represented here instead
+ * of making them local to the route. This is needed to ensure proper
+ * construction of BGP route attribute lists.
+ */
+
+ /* Nexthop structure */
+ struct nexthop {
+ ip_addr gw; /* Next hop */
+ struct iface *iface; /* Outgoing interface */
+ struct nexthop *next;
+ byte flags;
+ byte weight;
+ byte labels_orig; /* Number of labels before hostentry was applied */
+ byte labels; /* Number of all labels */
+ u32 label[0];
+ };
+
+ #define RNF_ONLINK 0x1 /* Gateway is onlink regardless of IP ranges */
+
+
+ typedef struct rta {
+ struct rta *next, **pprev; /* Hash chain */
+ u32 uc; /* Use count */
+ u32 hash_key; /* Hash over important fields */
+ struct ea_list *eattrs; /* Extended Attribute chain */
+ struct hostentry *hostentry; /* Hostentry for recursive next-hops */
+ ip_addr from; /* Advertising router */
+ u32 igp_metric; /* IGP metric to next hop (for iBGP routes) */
+ u16 cached:1; /* Are attributes cached? */
+ u16 source:7; /* Route source (RTS_...) */
+ u16 scope:4; /* Route scope (SCOPE_... -- see ip.h) */
+ u16 dest:4; /* Route destination type (RTD_...) */
+ word pref;
+ struct nexthop nh; /* Next hop */
+ } rta;
+
+ #define RTS_STATIC 1 /* Normal static route */
+ #define RTS_INHERIT 2 /* Route inherited from kernel */
+ #define RTS_DEVICE 3 /* Device route */
+ #define RTS_STATIC_DEVICE 4 /* Static device route */
+ #define RTS_REDIRECT 5 /* Learned via redirect */
+ #define RTS_RIP 6 /* RIP route */
+ #define RTS_OSPF 7 /* OSPF route */
+ #define RTS_OSPF_IA 8 /* OSPF inter-area route */
+ #define RTS_OSPF_EXT1 9 /* OSPF external route type 1 */
+ #define RTS_OSPF_EXT2 10 /* OSPF external route type 2 */
+ #define RTS_BGP 11 /* BGP route */
+ #define RTS_PIPE 12 /* Inter-table wormhole */
+ #define RTS_BABEL 13 /* Babel route */
+ #define RTS_RPKI 14 /* Route Origin Authorization */
+ #define RTS_PERF 15 /* Perf checker */
+ #define RTS_MAX 16
+
+ #define RTD_NONE 0 /* Undefined next hop */
+ #define RTD_UNICAST 1 /* Next hop is neighbor router */
+ #define RTD_BLACKHOLE 2 /* Silently drop packets */
+ #define RTD_UNREACHABLE 3 /* Reject as unreachable */
+ #define RTD_PROHIBIT 4 /* Administratively prohibited */
+ #define RTD_MAX 5
+
+ #define IGP_METRIC_UNKNOWN 0x80000000 /* Default igp_metric used when no other
+ protocol-specific metric is availabe */
+
+
+ extern const char * rta_dest_names[RTD_MAX];
+
+ static inline const char *rta_dest_name(uint n)
+ { return (n < RTD_MAX) ? rta_dest_names[n] : "???"; }
+
+ /* Route has regular, reachable nexthop (i.e. not RTD_UNREACHABLE and like) */
+ static inline int rte_is_reachable(rte *r)
+ { return r->attrs->dest == RTD_UNICAST; }
+
+
+ /*
+ * Extended Route Attributes
+ */
+
+ typedef struct eattr {
+ word id; /* EA_CODE(PROTOCOL_..., protocol-dependent ID) */
+ byte flags; /* Protocol-dependent flags */
+ byte type:5; /* Attribute type */
+ byte originated:1; /* The attribute has originated locally */
+ byte fresh:1; /* An uncached attribute (e.g. modified in export filter) */
+ byte undef:1; /* Explicitly undefined */
+
+ union bval u;
+ } eattr;
+
+
+ #define EA_CODE(proto,id) (((proto) << 8) | (id))
+ #define EA_ID(ea) ((ea) & 0xff)
+ #define EA_PROTO(ea) ((ea) >> 8)
+ #define EA_CUSTOM(id) ((id) | EA_CUSTOM_BIT)
+ #define EA_IS_CUSTOM(ea) ((ea) & EA_CUSTOM_BIT)
+ #define EA_CUSTOM_ID(ea) ((ea) & ~EA_CUSTOM_BIT)
+
+ const char *ea_custom_name(uint ea);
+
+ #define EA_GEN_IGP_METRIC EA_CODE(PROTOCOL_NONE, 0)
+
+ #define EA_CODE_MASK 0xffff
+ #define EA_CUSTOM_BIT 0x8000
+ #define EA_ALLOW_UNDEF 0x10000 /* ea_find: allow EAF_TYPE_UNDEF */
+ #define EA_BIT(n) ((n) << 24) /* Used in bitfield accessors */
+ #define EA_BIT_GET(ea) ((ea) >> 24)
+
+ typedef struct adata {
+ uint length; /* Length of data */
+ byte data[0];
+ } adata;
+
+ extern const adata null_adata; /* adata of length 0 */
+
+ static inline struct adata *
+ lp_alloc_adata(struct linpool *pool, uint len)
+ {
+ struct adata *ad = lp_alloc(pool, sizeof(struct adata) + len);
+ ad->length = len;
+ return ad;
+ }
+
+ static inline int adata_same(const struct adata *a, const struct adata *b)
+ { return (a->length == b->length && !memcmp(a->data, b->data, a->length)); }
+
+
+ typedef struct ea_list {
+ struct ea_list *next; /* In case we have an override list */
+ byte flags; /* Flags: EALF_... */
+ byte rfu;
+ word count; /* Number of attributes */
+ eattr attrs[0]; /* Attribute definitions themselves */
+ } ea_list;
+
+ #define EALF_SORTED 1 /* Attributes are sorted by code */
+ #define EALF_BISECT 2 /* Use interval bisection for searching */
+ #define EALF_CACHED 4 /* Attributes belonging to cached rta */
+
+ struct ea_walk_state {
+ ea_list *eattrs; /* Ccurrent ea_list, initially set by caller */
+ eattr *ea; /* Current eattr, initially NULL */
+ u32 visited[4]; /* Bitfield, limiting max to 128 */
+ };
+
+ eattr *ea_find(ea_list *, unsigned ea);
+ eattr *ea_walk(struct ea_walk_state *s, uint id, uint max);
+
+ /**
+ * ea_get_int - fetch an integer attribute
+ * @e: attribute list
+ * @id: attribute ID
+ * @def: default value
+ *
+ * This function is a shortcut for retrieving a value of an integer attribute
+ * by calling ea_find() to find the attribute, extracting its value or returning
+ * a provided default if no such attribute is present.
+ */
+ static inline u32
+ ea_get_int(ea_list *e, unsigned id, u32 def)
+ {
+ eattr *a = ea_find(e, id);
+ return a ? a->u.data : def;
+ }
+
+ void ea_dump(ea_list *);
+ void ea_sort(ea_list *); /* Sort entries in all sub-lists */
+ unsigned ea_scan(ea_list *); /* How many bytes do we need for merged ea_list */
+ void ea_merge(ea_list *from, ea_list *to); /* Merge sub-lists to allocated buffer */
+ int ea_same(ea_list *x, ea_list *y); /* Test whether two ea_lists are identical */
+ uint ea_hash(ea_list *e); /* Calculate 16-bit hash value */
+ ea_list *ea_append(ea_list *to, ea_list *what);
+ void ea_format_bitfield(const struct eattr *a, byte *buf, int bufsize, const char **names, int min, int max);
+
+ #define ea_normalize(ea) do { \
+ if (ea->next) { \
+ ea_list *t = alloca(ea_scan(ea)); \
+ ea_merge(ea, t); \
+ ea = t; \
+ } \
+ ea_sort(ea); \
+ if (ea->count == 0) \
+ ea = NULL; \
+ } while(0) \
+
+ struct ea_one_attr_list {
+ ea_list l;
+ eattr a;
+ };
+
+ static inline eattr *
+ ea_set_attr(ea_list **to, struct linpool *pool, uint id, uint flags, uint type, union bval val)
+ {
+ struct ea_one_attr_list *ea = lp_alloc(pool, sizeof(*ea));
+ *ea = (struct ea_one_attr_list) {
+ .l.flags = EALF_SORTED,
+ .l.count = 1,
+ .l.next = *to,
+
+ .a.id = id,
+ .a.type = type,
+ .a.flags = flags,
+ };
+
+ ea->a.u = val;
+ *to = &ea->l;
+
+ return &ea->a;
+ }
+
+ static inline void
+ ea_unset_attr(ea_list **to, struct linpool *pool, _Bool local, uint code)
+ {
+ struct ea_one_attr_list *ea = lp_alloc(pool, sizeof(*ea));
+ *ea = (struct ea_one_attr_list) {
+ .l.flags = EALF_SORTED,
+ .l.count = 1,
+ .l.next = *to,
+ .a.id = code,
+ .a.fresh = local,
+ .a.originated = local,
+ .a.undef = 1,
+ };
+
+ *to = &ea->l;
+ }
+
+ static inline void
+ ea_set_attr_u32(ea_list **to, struct linpool *pool, uint id, uint flags, uint type, u32 data)
+ {
+ union bval bv = { .data = data };
+ ea_set_attr(to, pool, id, flags, type, bv);
+ }
+
+ static inline void
+ ea_set_attr_data(ea_list **to, struct linpool *pool, uint id, uint flags, uint type, void *data, uint len)
+ {
+ struct adata *a = lp_alloc_adata(pool, len);
+ memcpy(a->data, data, len);
+ union bval bv = { .ptr = a, };
+ ea_set_attr(to, pool, id, flags, type, bv);
+ }
+
+
+ #define NEXTHOP_MAX_SIZE (sizeof(struct nexthop) + sizeof(u32)*MPLS_MAX_LABEL_STACK)
+
+ static inline size_t nexthop_size(const struct nexthop *nh)
+ { return sizeof(struct nexthop) + sizeof(u32)*nh->labels; }
+ int nexthop__same(struct nexthop *x, struct nexthop *y); /* Compare multipath nexthops */
+ static inline int nexthop_same(struct nexthop *x, struct nexthop *y)
+ { return (x == y) || nexthop__same(x, y); }
+ struct nexthop *nexthop_merge(struct nexthop *x, struct nexthop *y, int rx, int ry, int max, linpool *lp);
+ struct nexthop *nexthop_sort(struct nexthop *x);
+ static inline void nexthop_link(struct rta *a, struct nexthop *from)
+ { memcpy(&a->nh, from, nexthop_size(from)); }
+ void nexthop_insert(struct nexthop **n, struct nexthop *y);
+ int nexthop_is_sorted(struct nexthop *x);
+
+ void rta_init(void);
+ static inline size_t rta_size(const rta *a) { return sizeof(rta) + sizeof(u32)*a->nh.labels; }
+ #define RTA_MAX_SIZE (sizeof(rta) + sizeof(u32)*MPLS_MAX_LABEL_STACK)
+ rta *rta_lookup(rta *); /* Get rta equivalent to this one, uc++ */
+ static inline int rta_is_cached(rta *r) { return r->cached; }
+ static inline rta *rta_clone(rta *r) { r->uc++; return r; }
+ void rta__free(rta *r);
+ static inline void rta_free(rta *r) { if (r && !--r->uc) rta__free(r); }
+ rta *rta_do_cow(rta *o, linpool *lp);
+ static inline rta * rta_cow(rta *r, linpool *lp) { return rta_is_cached(r) ? rta_do_cow(r, lp) : r; }
+ void rta_dump(rta *);
+ void rta_dump_all(void);
+ void rta_show(struct cli *, rta *);
+
+ u32 rt_get_igp_metric(rte *rt);
+
+ #endif
#include "lib/lists.h"
#include "lib/resource.h"
#include "lib/event.h"
- #include "nest/route.h"
+ #include "nest/rt.h"
+#include "nest/limit.h"
#include "conf/conf.h"
struct iface;
net_addr_roa4 *roa = (void *) fn->addr;
net *r = fib_node_to_user(&tab->fib, fn);
-- if (net_equal_prefix_roa4(roa, &roa0) && rte_is_valid(r->routes))
++ if (net_equal_prefix_roa4(roa, &roa0) && r->routes && rte_is_valid(&r->routes->rte))
{
anything = 1;
if (asn && (roa->asn == asn) && (roa->max_pxlen >= px->pxlen))
net_addr_roa4 *roa = (void *) fn->addr;
net *r = fib_node_to_user(&tab->fib, fn);
-- if (net_equal_prefix_roa4(roa, &n) && rte_is_valid(r->routes))
++ if (net_equal_prefix_roa4(roa, &n) && r->routes && rte_is_valid(&r->routes->rte))
{
anything = 1;
if (asn && (roa->asn == asn) && (roa->max_pxlen >= px->pxlen))
net_addr_roa6 *roa = (void *) fn->addr;
net *r = fib_node_to_user(&tab->fib, fn);
-- if (net_equal_prefix_roa6(roa, &roa0) && rte_is_valid(r->routes))
++ if (net_equal_prefix_roa6(roa, &roa0) && r->routes && rte_is_valid(&r->routes->rte))
{
anything = 1;
if (asn && (roa->asn == asn) && (roa->max_pxlen >= px->pxlen))
net_addr_roa6 *roa = (void *) fn->addr;
net *r = fib_node_to_user(&tab->fib, fn);
-- if (net_equal_prefix_roa6(roa, &n) && rte_is_valid(r->routes))
++ if (net_equal_prefix_roa6(roa, &n) && r->routes && rte_is_valid(&r->routes->rte))
{
anything = 1;
if (asn && (roa->asn == asn) && (roa->max_pxlen >= px->pxlen))
* done outside of scope of rte_announce().
*/
static void
-rte_announce(rtable *tab, uint type, net *net, rte *new, rte *old,
- rte *new_best, rte *old_best)
+rte_announce(rtable *tab, net *net, struct rte_storage *new, struct rte_storage *old,
+ struct rte_storage *new_best, struct rte_storage *old_best)
{
-- if (!rte_is_valid(new))
++ if (!rte_is_valid(RTE_OR_NULL(new)))
new = NULL;
-- if (!rte_is_valid(old))
++ if (!rte_is_valid(RTE_OR_NULL(old)))
old = NULL;
-- if (!rte_is_valid(new_best))
++ if (!rte_is_valid(RTE_OR_NULL(new_best)))
new_best = NULL;
-- if (!rte_is_valid(old_best))
++ if (!rte_is_valid(RTE_OR_NULL(old_best)))
old_best = NULL;
if (!new && !old && !new_best && !old_best)
/* Check rtable for best route to given net whether it would be exported do p */
int
-rt_examine(rtable *t, net_addr *a, struct proto *p, const struct filter *filter)
+rt_examine(rtable *t, net_addr *a, struct channel *c, const struct filter *filter)
{
net *n = net_find(t, a);
- rte *rt = n ? n->routes : NULL;
- if (!n || !rte_is_valid(n->routes))
- if (!rte_is_valid(rt))
++ if (!n || !rte_is_valid(RTE_OR_NULL(n->routes)))
return 0;
+ rte rt = n->routes->rte;
+
rte_update_lock();
/* Rest is stripped down export_filter() */
u32 igp_metric; /* Chosen route IGP metric */
};
- typedef struct rte {
- struct rta *attrs; /* Attributes of this route */
- const net_addr *net; /* Network this RTE belongs to */
- struct rte_src *src; /* Route source that created the route */
- struct rt_import_hook *sender; /* Import hook used to send the route to the routing table */
- btime lastmod; /* Last modified (set by table) */
- u32 id; /* Table specific route id */
- byte flags; /* Table-specific flags */
- byte pflags; /* Protocol-specific flags */
- u8 generation; /* If this route import is based on other previously exported route,
- this value should be 1 + MAX(generation of the parent routes).
- Otherwise the route is independent and this value is zero. */
- } rte;
-
+struct rte_storage {
+ struct rte_storage *next; /* Next in chain */
+ struct rte rte; /* Route data */
+};
+
- #define RTE_COPY(r, l) ((r) ? (((*(l)) = (r)->rte), (l)) : NULL)
- #define RTE_OR_NULL(r) ((r) ? &((r)->rte) : NULL)
-
- #define REF_FILTERED 2 /* Route is rejected by import filter */
- #define REF_STALE 4 /* Route is stale in a refresh cycle */
- #define REF_DISCARD 8 /* Route is scheduled for discard */
- #define REF_MODIFY 16 /* Route is scheduled for modify */
-
- /* Route is valid for propagation (may depend on other flags in the future), accepts NULL */
- static inline int rte_is_valid_rte(const rte *r) { return r && !(r->flags & REF_FILTERED); }
- static inline int rte_is_valid_storage(const struct rte_storage *r) { return r && rte_is_valid_rte(&r->rte); }
-
- #define rte_is_valid(r) _Generic((*r), rte: rte_is_valid_rte, struct rte_storage: rte_is_valid_storage)(r)
-
- /* Route just has REF_FILTERED flag */
- static inline int rte_is_filtered(const rte *r) { return !!(r->flags & REF_FILTERED); }
-
++#define RTE_COPY(r, l) ((r) ? (((*(l)) = (r)->rte), (l)) : NULL)
++#define RTE_OR_NULL(r) ((r) ? &((r)->rte) : NULL)
+
+/* Table-channel connections */
+
+struct rt_import_request {
+ struct rt_import_hook *hook; /* The table part of importer */
+ char *name;
+ u8 trace_routes;
+
+ void (*dump_req)(struct rt_import_request *req);
+ void (*log_state_change)(struct rt_import_request *req, u8 state);
+ /* Preimport is called when the @new route is just-to-be inserted, replacing @old.
+ * Return a route (may be different or modified in-place) to continue or NULL to withdraw. */
+ struct rte *(*preimport)(struct rt_import_request *req, struct rte *new, struct rte *old);
+ struct rte *(*rte_modify)(struct rte *, struct linpool *);
+};
+
+struct rt_import_hook {
+ node n;
+ rtable *table; /* The connected table */
+ struct rt_import_request *req; /* The requestor */
+
+ struct rt_import_stats {
+ /* Import - from protocol to core */
+ u32 pref; /* Number of routes selected as best in the (adjacent) routing table */
+ u32 updates_ignored; /* Number of route updates rejected as already in route table */
+ u32 updates_accepted; /* Number of route updates accepted and imported */
+ u32 withdraws_ignored; /* Number of route withdraws rejected as already not in route table */
+ u32 withdraws_accepted; /* Number of route withdraws accepted and processed */
+ } stats;
+
+ btime last_state_change; /* Time of last state transition */
+
+ u8 import_state; /* IS_* */
+
+ void (*stopped)(struct rt_import_request *); /* Stored callback when import is stopped */
+};
+
+struct rt_pending_export {
+ struct rte_storage *new, *new_best, *old, *old_best;
+};
+
+struct rt_export_request {
+ struct rt_export_hook *hook; /* Table part of the export */
+ char *name;
+ u8 trace_routes;
+
+ /* There are two methods of export. You can either request feeding every single change
+ * or feeding the whole route feed. In case of regular export, &export_one is preferred.
+ * Anyway, when feeding, &export_bulk is preferred, falling back to &export_one.
+ * Thus, for RA_OPTIMAL, &export_one is only set,
+ * for RA_MERGED and RA_ACCEPTED, &export_bulk is only set
+ * and for RA_ANY, both are set to accomodate for feeding all routes but receiving single changes
+ */
+ void (*export_one)(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe);
+ void (*export_bulk)(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe, rte **feed, uint count);
+
+ void (*dump_req)(struct rt_export_request *req);
+ void (*log_state_change)(struct rt_export_request *req, u8);
+};
+
+struct rt_export_hook {
+ node n;
+ rtable *table; /* The connected table */
+
+ pool *pool;
+ linpool *lp;
+
+ struct rt_export_request *req; /* The requestor */
+
+ struct rt_export_stats {
+ /* Export - from core to protocol */
+ u32 updates_received; /* Number of route updates received */
+ u32 withdraws_received; /* Number of route withdraws received */
+ } stats;
+
+ struct fib_iterator feed_fit; /* Routing table iterator used during feeding */
+
+ btime last_state_change; /* Time of last state transition */
+
+ u8 refeed_pending; /* Refeeding and another refeed is scheduled */
+ u8 export_state; /* Route export state (TES_*, see below) */
+
+ struct event *event; /* Event running all the export operations */
+
+ void (*stopped)(struct rt_export_request *); /* Stored callback when export is stopped */
+};
+
+#define TIS_DOWN 0
+#define TIS_UP 1
+#define TIS_STOP 2
+#define TIS_FLUSHING 3
+#define TIS_WAITING 4
+#define TIS_CLEARED 5
+#define TIS_MAX 6
+
+#define TES_DOWN 0
+#define TES_HUNGRY 1
+#define TES_FEEDING 2
+#define TES_READY 3
+#define TES_STOP 4
+#define TES_MAX 5
+
+void rt_request_import(rtable *tab, struct rt_import_request *req);
+void rt_request_export(rtable *tab, struct rt_export_request *req);
+
+void rt_stop_import(struct rt_import_request *, void (*stopped)(struct rt_import_request *));
+void rt_stop_export(struct rt_export_request *, void (*stopped)(struct rt_export_request *));
+
+const char *rt_import_state_name(u8 state);
+const char *rt_export_state_name(u8 state);
+
+static inline u8 rt_import_get_state(struct rt_import_hook *ih) { return ih ? ih->import_state : TIS_DOWN; }
+static inline u8 rt_export_get_state(struct rt_export_hook *eh) { return eh ? eh->export_state : TES_DOWN; }
+
+void rte_import(struct rt_import_request *req, const net_addr *net, rte *new, struct rte_src *src);
+
/* Types of route announcement, also used as flags */
#define RA_UNDEF 0 /* Undefined RA type */
#define RA_OPTIMAL 1 /* Announcement of optimal route change */
static inline net *net_get(rtable *tab, const net_addr *addr) { return (net *) fib_get(&tab->fib, addr); }
net *net_get(rtable *tab, const net_addr *addr);
net *net_route(rtable *tab, const net_addr *n);
- int net_roa_check(rtable *tab, const net_addr *n, u32 asn);
-rte *rte_find(net *net, struct rte_src *src);
-rte *rte_get_temp(struct rta *, struct rte_src *src);
-void rte_update2(struct channel *c, const net_addr *n, rte *new, struct rte_src *src);
-/* rte_update() moved to protocol.h to avoid dependency conflicts */
-int rt_examine(rtable *t, net_addr *a, struct proto *p, const struct filter *filter);
-rte *rt_export_merged(struct channel *c, net *net, rte **rt_free, linpool *pool, int silent);
-void rt_refresh_begin(rtable *t, struct channel *c);
-void rt_refresh_end(rtable *t, struct channel *c);
-void rt_modify_stale(rtable *t, struct channel *c);
+int rt_examine(rtable *t, net_addr *a, struct channel *c, const struct filter *filter);
+rte *rt_export_merged(struct channel *c, rte ** feed, uint count, linpool *pool, int silent);
+void rt_refresh_begin(rtable *t, struct rt_import_request *);
+void rt_refresh_end(rtable *t, struct rt_import_request *);
+void rt_modify_stale(rtable *t, struct rt_import_request *);
void rt_schedule_prune(rtable *t);
-void rte_dump(rte *);
-void rte_free(rte *);
-rte *rte_do_cow(rte *);
-static inline rte * rte_cow(rte *r) { return (r->flags & REF_COW) ? rte_do_cow(r) : r; }
-rte *rte_cow_rta(rte *r, linpool *lp);
+void rte_dump(struct rte_storage *);
+void rte_free(struct rte_storage *);
+struct rte_storage *rte_store(const rte *, net *net, rtable *);
void rt_dump(rtable *);
void rt_dump_all(void);
-int rt_feed_channel(struct channel *c);
-void rt_feed_channel_abort(struct channel *c);
-int rte_update_in(struct channel *c, const net_addr *n, rte *new, struct rte_src *src);
+void rt_dump_hooks(rtable *);
+void rt_dump_hooks_all(void);
int rt_reload_channel(struct channel *c);
void rt_reload_channel_abort(struct channel *c);
+void rt_refeed_channel(struct channel *c);
void rt_prune_sync(rtable *t, int all);
-int rte_update_out(struct channel *c, const net_addr *n, rte *new, rte *old, rte **old_exported, int refeed);
+int rte_update_in(struct channel *c, const net_addr *n, rte *new, struct rte_src *src);
+int rte_update_out(struct channel *c, const net_addr *n, rte *new, const rte *old, struct rte_storage **old_exported);
struct rtable_config *rt_new_table(struct symbol *s, uint addr_type);
static inline int rt_is_ip(rtable *tab)
}
/* The default case - find a new best-in-group route */
- r = new; /* new may not be in the list */
- for (s=net->routes; rte_is_valid(s); s=s->next)
- if (use_deterministic_med(s) && same_group(s, lpref, lasn))
+ rte *r = new; /* new may not be in the list */
- for (struct rte_storage *s = net->routes; rte_is_valid(s); s = s->next)
++ for (struct rte_storage *s = net->routes; rte_is_valid(RTE_OR_NULL(s)); s = s->next)
+ if (use_deterministic_med(s) && same_group(&s->rte, lpref, lasn))
{
- s->pflags |= BGP_REF_SUPPRESSED;
- if (!r || bgp_rte_better(s, r))
- r = s;
+ s->rte.pflags |= BGP_REF_SUPPRESSED;
+ if (!r || bgp_rte_better(&s->rte, r))
+ r = &s->rte;
}
/* Simple case - the last route in group disappears */
new->pflags &= ~BGP_REF_SUPPRESSED;
/* Found all existing routes mergable with best-in-group */
- for (struct rte_storage *s = net->routes; rte_is_valid(s); s = s->next)
- for (s=net->routes; rte_is_valid(s); s=s->next)
- if (use_deterministic_med(s) && same_group(s, lpref, lasn))
- if ((s != r) && bgp_rte_mergable(r, s))
- s->pflags &= ~BGP_REF_SUPPRESSED;
++ for (struct rte_storage *s = net->routes; rte_is_valid(RTE_OR_NULL(s)); s = s->next)
+ if (use_deterministic_med(s) && same_group(&s->rte, lpref, lasn))
+ if ((&s->rte != r) && bgp_rte_mergable(r, &s->rte))
+ s->rte.pflags &= ~BGP_REF_SUPPRESSED;
/* Found best-in-group */
r->pflags &= ~BGP_REF_SUPPRESSED;