]])
m4_ifelse($4,,,[[
FID_DUMP_BODY()m4_dnl
- debug("%s" $4 "\n", INDENT, $5);
+ RDUMP("%s" $4 "\n", INDENT, $5);
]])
FID_INTERPRET_EXEC()m4_dnl
-const $1 $2 = whati->$2
+$1 $2 = whati->$2
FID_INTERPRET_BODY')
# Instruction arguments are needed only until linearization is done.
}
}
- void channel_filter_dump(const struct filter *f)
++void channel_filter_dump(struct dump_request *dreq, const struct filter *f)
+{
+ if (f == FILTER_ACCEPT)
- debug(" ALL");
++ RDUMP(" ALL");
+ else if (f == FILTER_REJECT)
- debug(" NONE");
++ RDUMP(" NONE");
+ else if (f == FILTER_UNDEF)
- debug(" UNDEF");
++ RDUMP(" UNDEF");
+ else if (f->sym) {
+ ASSERT(f->sym->filter == f);
- debug(" named filter %s", f->sym->name);
++ RDUMP(" named filter %s", f->sym->name);
+ } else {
- debug("\n");
- f_dump_line(f->root, 2);
++ RDUMP("\n");
++ f_dump_line(dreq, f->root, 2);
+ }
+}
+
- void filters_dump_all(void)
+ void filters_dump_all(struct dump_request *dreq)
{
struct symbol *sym;
- WALK_LIST(sym, config->symbols) {
+ WALK_LIST(sym, OBSREF_GET(config)->symbols) {
switch (sym->class) {
case SYM_FILTER:
- debug("Named filter %s:\n", sym->name);
- f_dump_line(sym->filter->root, 1);
+ RDUMP("Named filter %s:\n", sym->name);
+ f_dump_line(dreq, sym->filter->root, 1);
break;
case SYM_FUNCTION:
- debug("Function %s:\n", sym->name);
- f_dump_line(sym->function, 1);
+ RDUMP("Function %s:\n", sym->name);
+ f_dump_line(dreq, sym->function, 1);
break;
case SYM_PROTO:
{
- debug("Protocol %s:\n", sym->name);
+ RDUMP("Protocol %s:\n", sym->name);
struct channel *c;
WALK_LIST(c, sym->proto->proto->channels) {
- debug(" Channel %s (%s) IMPORT", c->name, net_label[c->net_type]);
- channel_filter_dump(c->in_filter);
- debug(" EXPORT", c->name, net_label[c->net_type]);
- channel_filter_dump(c->out_filter);
- debug("\n");
+ RDUMP(" Channel %s (%s) IMPORT", c->name, net_label[c->net_type]);
- if (c->in_filter == FILTER_ACCEPT)
- RDUMP(" ALL\n");
- else if (c->in_filter == FILTER_REJECT)
- RDUMP(" NONE\n");
- else if (c->in_filter == FILTER_UNDEF)
- RDUMP(" UNDEF\n");
- else if (c->in_filter->sym) {
- ASSERT(c->in_filter->sym->filter == c->in_filter);
- RDUMP(" named filter %s\n", c->in_filter->sym->name);
- } else {
- RDUMP("\n");
- f_dump_line(dreq, c->in_filter->root, 2);
- }
++ channel_filter_dump(dreq, c->in_filter);
++ RDUMP(" EXPORT", c->name, net_label[c->net_type]);
++ channel_filter_dump(dreq, c->out_filter);
++ RDUMP("\n");
}
}
}
int filter_same(const struct filter *new, const struct filter *old);
int f_same(const struct f_line *f1, const struct f_line *f2);
+void filter_preconfig(struct config *new);
void filter_commit(struct config *new, struct config *old);
- void filters_dump_all(void);
+ void filters_dump_all(struct dump_request *);
#define FILTER_ACCEPT NULL
#define FILTER_REJECT ((struct filter *) 1)
static inline u32 u64_hash(u64 v)
{ return hash_value(u64_hash0(v, HASH_PARAM, 0)); }
+
+/* Yield for a little while. Use only in special cases. */
+void birdloop_yield(void);
+
+
+ /* Dumping */
+ struct dump_request {
+ u64 size;
+ btime begin;
+ uint indent, offset;
+ void (*write)(struct dump_request *, const char *fmt, ...);
+ void (*report)(struct dump_request *, int state, const char *fmt, ...);
+ };
+
+ #define RDUMP(...) dreq->write(dreq, __VA_ARGS__)
+
#endif
{
event *e = (event *) r;
- debug("(code %p, data %p, %s)\n",
+ RDUMP("(code %p, data %p, %s)\n",
e->hook,
e->data,
- e->n.next ? "scheduled" : "inactive");
+ atomic_load_explicit(&e->next, memory_order_relaxed) ? "scheduled" : "inactive");
}
static struct resclass ev_class = {
uint total, total_large;
};
-_Thread_local linpool *tmp_linpool;
-
+static void *lp_alloc_slow(struct linpool *, uint);
static void lp_free(resource *);
- static void lp_dump(resource *, unsigned);
+ static void lp_dump(struct dump_request *, resource *);
static resource *lp_lookup(resource *, unsigned long);
static struct resmem lp_memsize(resource *r);
* is freed upon shutdown of the module.
*/
- static void pool_dump(resource *, unsigned);
-struct pool {
- resource r;
- list inside;
- const char *name;
-};
-
+ static void pool_dump(struct dump_request *, resource *);
static void pool_free(resource *);
static resource *pool_lookup(resource *, unsigned long);
static struct resmem pool_memsize(resource *P);
{
r->class->free(r);
xfree(r);
- r = rr;
}
+ POOL_UNLOCK;
}
-
static void
- pool_dump(resource *P, unsigned indent)
+ pool_dump(struct dump_request *dreq, resource *P)
{
pool *p = (pool *) P;
- resource *r;
+
+ POOL_LOCK;
- debug("%s\n", p->name);
+ RDUMP("%s\n", p->name);
+ dreq->indent += 3;
- WALK_LIST(r, p->inside)
+ WALK_TLIST_DELSAFE(resource, r, &p->inside)
- rdump(r, indent + 3);
+ rdump(dreq, r);
+ dreq->indent -= 3;
+
+ POOL_UNLOCK;
}
static struct resmem
/* Generic resource manipulation */
-typedef struct pool pool;
+typedef struct pool {
+ resource r;
+ TLIST_LIST(resource) inside;
+ struct domain_generic *domain;
+ const char *name;
+} pool;
+
void resource_init(void);
-pool *rp_new(pool *, const char *); /* Create new pool */
-pool *rp_newf(pool *, const char *, ...); /* Create a new pool with a formatted string as its name */
void rfree(void *); /* Free single resource */
- void rdump(void *, unsigned indent); /* Dump to debug output */
+
+ struct dump_request;
+ void rdump(struct dump_request *, void *); /* Dump to debug output */
+ void resource_dump(struct dump_request *); /* Dump the root pool */
struct resmem rmemsize(void *res); /* Return size of memory used by the resource */
void rlookup(unsigned long); /* Look up address (only for debugging) */
void rmove(void *, pool *); /* Move to a different pool */
--- /dev/null
- void rt_dump_sources(struct rte_owner *);
+/*
+ * BIRD Internet Routing Daemon -- Routing data structures
+ *
+ * (c) 1998--2000 Martin Mares <mj@ucw.cz>
+ * (c) 2022 Maria Matejka <mq@jmq.cz>
+ *
+ * Can be freely distributed and used under the terms of the GNU GPL.
+ */
+
+#ifndef _BIRD_LIB_ROUTE_H_
+#define _BIRD_LIB_ROUTE_H_
+
+#undef RT_SOURCE_DEBUG
+#undef EA_FREE_DEBUG
+
+#include "lib/type.h"
+#include "lib/rcu.h"
+#include "lib/hash.h"
+#include "lib/event.h"
+#include "lib/lockfree.h"
+
+struct network;
+struct proto;
+struct cli;
+struct rtable_private;
+struct rte_storage;
+
+#define RTE_IN_TABLE_WRITABLE \
+ byte pflags; /* Protocol-specific flags; may change in-table (!) */ \
+ byte flags; /* Table-specific flags */ \
+ u8 stale_cycle; /* Auxiliary value for route refresh; may change in-table (!) */ \
+
+typedef struct rte {
+ RTE_IN_TABLE_WRITABLE;
+ u8 generation; /* If this route import is based on other previously exported route,
+ this value should be 1 + MAX(generation of the parent routes).
+ Otherwise the route is independent and this value is zero. */
+ u32 id; /* Table specific route id */
+ struct ea_list *attrs; /* Attributes of this route */
+ const net_addr *net; /* Network this RTE belongs to */
+ struct rte_src *src; /* Route source that created the route */
+ struct rt_import_hook *sender; /* Import hook used to send the route to the routing table */
+ btime lastmod; /* Last modified (set by table) */
+} rte;
+
+#define REF_FILTERED 2 /* Route is rejected by import filter */
+#define REF_OBSOLETE 16 /* Route is obsolete, pending propagation */
+#define REF_PENDING 32 /* Route has not propagated completely yet */
+
+/* Route is valid for propagation (may depend on other flags in the future), accepts NULL */
+static inline int rte_is_valid(const rte *r) { return r && !(r->flags & REF_FILTERED); }
+
+/* Route just has REF_FILTERED flag */
+static inline int rte_is_filtered(const rte *r) { return !!(r->flags & REF_FILTERED); }
+
+/* Strip the route of the table-specific values */
+static inline rte rte_init_from(const rte *r)
+{
+ return (rte) {
+ .attrs = r->attrs,
+ .net = r->net,
+ .src = r->src,
+ };
+}
+
+int rte_same(const rte *, const rte *);
+
+struct rte_src {
+ struct rte_src *next; /* Hash chain */
+ struct rte_owner *owner; /* Route source owner */
+ u64 private_id; /* Private ID, assigned by the protocol */
+ u32 global_id; /* Globally unique ID of the source */
+ struct lfuc uc; /* Use count */
+};
+
+struct rte_owner_class {
+ void (*get_route_info)(const rte *, byte *buf); /* Get route information (for `show route' command) */
+ int (*rte_better)(const rte *, const rte *);
+ int (*rte_mergable)(const rte *, const rte *);
+ u32 (*rte_igp_metric)(const rte *);
+};
+
+struct rte_owner {
+ struct rte_owner_class *class;
+ int (*rte_recalculate)(struct rtable_private *, struct network *, struct rte_storage *new, struct rte_storage *, struct rte_storage *);
+ HASH(struct rte_src) hash;
+ const char *name;
+ u32 hash_key;
+ u32 uc;
+ u32 debug;
+ event_list *list;
+ event *prune;
+ event *stop;
+};
+
+extern DOMAIN(attrs) attrs_domain;
+
+#define RTA_LOCK LOCK_DOMAIN(attrs, attrs_domain)
+#define RTA_UNLOCK UNLOCK_DOMAIN(attrs, attrs_domain)
+
+#define RTE_SRC_PU_SHIFT 44
+#define RTE_SRC_IN_PROGRESS (1ULL << RTE_SRC_PU_SHIFT)
+
+/* Get a route source. This also locks the source, therefore the caller has to
+ * unlock the source after the route has been propagated. */
+struct rte_src *rt_get_source_o(struct rte_owner *o, u32 id);
+#define rt_get_source(p, id) rt_get_source_o(&(p)->sources, (id))
+
+struct rte_src *rt_find_source_global(u32 id);
+
+#ifdef RT_SOURCE_DEBUG
+#define rt_lock_source _rt_lock_source_internal
+#define rt_unlock_source _rt_unlock_source_internal
+#endif
+
+static inline void rt_lock_source(struct rte_src *src)
+{
+ lfuc_lock(&src->uc);
+}
+
+static inline void rt_unlock_source(struct rte_src *src)
+{
+ lfuc_unlock(&src->uc, src->owner->list, src->owner->prune);
+}
+
+#ifdef RT_SOURCE_DEBUG
+#undef rt_lock_source
+#undef rt_unlock_source
+
+#define rt_lock_source(x) ( log(L_INFO "Lock source %uG at %s:%d", (x)->global_id, __FILE__, __LINE__), _rt_lock_source_internal(x) )
+#define rt_unlock_source(x) ( log(L_INFO "Unlock source %uG at %s:%d", (x)->global_id, __FILE__, __LINE__), _rt_unlock_source_internal(x) )
+#endif
+
+void rt_init_sources(struct rte_owner *, const char *name, event_list *list);
+void rt_destroy_sources(struct rte_owner *, event *);
+
- void ea_dump(ea_list *);
++void rt_dump_sources(struct dump_request *, struct rte_owner *);
+
+/*
+ * Route Attributes
+ *
+ * Beware: All standard BGP attributes must be represented here instead
+ * of making them local to the route. This is needed to ensure proper
+ * construction of BGP route attribute lists.
+ */
+
+/* Nexthop structure */
+struct nexthop {
+ ip_addr gw; /* Next hop */
+ struct iface *iface; /* Outgoing interface */
+ byte flags;
+ byte weight;
+ byte labels; /* Number of all labels */
+ u32 label[0];
+};
+
+/* For packing one into eattrs */
+struct nexthop_adata {
+ struct adata ad;
+ /* There is either a set of nexthops or a special destination (RTD_*) */
+ union {
+ struct nexthop nh;
+ uint dest;
+ };
+};
+
+/* For MPLS label stack generation */
+struct nexthop_adata_mpls {
+ struct nexthop_adata nhad;
+ u32 label_space[MPLS_MAX_LABEL_STACK];
+};
+
+#define NEXTHOP_DEST_SIZE (OFFSETOF(struct nexthop_adata, dest) + sizeof(uint) - OFFSETOF(struct adata, data))
+#define NEXTHOP_DEST_LITERAL(x) ((struct nexthop_adata) { \
+ .ad.length = NEXTHOP_DEST_SIZE, .dest = (x), })
+
+#define RNF_ONLINK 0x1 /* Gateway is onlink regardless of IP ranges */
+
+
+#define RTS_STATIC 1 /* Normal static route */
+#define RTS_INHERIT 2 /* Route inherited from kernel */
+#define RTS_DEVICE 3 /* Device route */
+#define RTS_STATIC_DEVICE 4 /* Static device route */
+#define RTS_REDIRECT 5 /* Learned via redirect */
+#define RTS_RIP 6 /* RIP route */
+#define RTS_OSPF 7 /* OSPF route */
+#define RTS_OSPF_IA 8 /* OSPF inter-area route */
+#define RTS_OSPF_EXT1 9 /* OSPF external route type 1 */
+#define RTS_OSPF_EXT2 10 /* OSPF external route type 2 */
+#define RTS_BGP 11 /* BGP route */
+#define RTS_PIPE 12 /* Inter-table wormhole */
+#define RTS_BABEL 13 /* Babel route */
+#define RTS_RPKI 14 /* Route Origin Authorization */
+#define RTS_PERF 15 /* Perf checker */
+#define RTS_L3VPN 16 /* MPLS L3VPN */
+#define RTS_AGGREGATED 17 /* Aggregated route */
+#define RTS_MAX 18
+
+#define RTD_NONE 0 /* Undefined next hop */
+#define RTD_UNICAST 1 /* A standard next hop */
+#define RTD_BLACKHOLE 2 /* Silently drop packets */
+#define RTD_UNREACHABLE 3 /* Reject as unreachable */
+#define RTD_PROHIBIT 4 /* Administratively prohibited */
+#define RTD_MAX 5
+
+extern const char * rta_dest_names[RTD_MAX];
+
+static inline const char *rta_dest_name(uint n)
+{ return (n < RTD_MAX) ? rta_dest_names[n] : "???"; }
+
+
+/*
+ * Extended Route Attributes
+ */
+
+typedef struct eattr {
+ word id; /* EA_CODE(PROTOCOL_..., protocol-dependent ID) */
+ byte flags; /* Protocol-dependent flags */
+ byte type; /* Attribute type */
+ byte rfu:5;
+ byte originated:1; /* The attribute has originated locally */
+ byte fresh:1; /* An uncached attribute (e.g. modified in export filter) */
+ byte undef:1; /* Explicitly undefined */
+
+ PADDING(unused, 3, 3);
+
+ union bval u;
+} eattr;
+
+
+#define EA_CODE_MASK 0xffff
+#define EA_ALLOW_UNDEF 0x10000 /* ea_find: allow EAF_TYPE_UNDEF */
+#define EA_BIT(n) ((n) << 24) /* Used in bitfield accessors */
+#define EA_BIT_GET(ea) ((ea) >> 24)
+
+typedef struct ea_list {
+ struct ea_list *next; /* In case we have an override list */
+ byte flags; /* Flags: EALF_... */
+ byte stored:5; /* enum ea_stored */
+ byte rfu:3;
+ word count; /* Number of attributes */
+ eattr attrs[0]; /* Attribute definitions themselves */
+} ea_list;
+
+enum ea_stored {
+ EALS_NONE = 0, /* This is a temporary ea_list */
+ EALS_PREIMPORT = 1, /* State when route entered rte_update() */
+ EALS_FILTERED = 2, /* State after filters */
+ EALS_IN_TABLE = 3, /* State in table */
+ EALS_KEY = 4, /* EA list used as key */
+ EALS_CUSTOM = 0x10, /* OR this with custom values */
+ EALS_MAX = 0x20,
+};
+
+struct ea_storage {
+ struct ea_storage *next_hash; /* Next in hash chain */
+ _Atomic u64 uc; /* Use count */
+ u32 hash_key; /* List hash */
+ PADDING(unused, 0, 4); /* Sorry, we need u64 for the usecount */
+ ea_list l[0]; /* The list itself */
+};
+
+#define EALF_SORTED 1 /* Attributes are sorted by code */
+#define EALF_BISECT 2 /* Use interval bisection for searching */
+#define EALF_HUGE 8 /* List is too big to fit into slab */
+
+struct ea_class {
+#define EA_CLASS_INSIDE \
+ const char *name; /* Name (both print and filter) */ \
+ struct symbol *sym; /* Symbol to export to configs */ \
+ uint id; /* Autoassigned attribute ID */ \
+ uint uc; /* Reference count */ \
+ btype type; /* Data type ID */ \
+ u16 flags; /* Protocol-dependent flags */ \
+ uint readonly:1; /* This attribute can't be changed by filters */ \
+ uint conf:1; /* Requested by config */ \
+ uint hidden:1; /* Technical attribute, do not show, do not expose to filters */ \
+ void (*format)(const eattr *ea, byte *buf, uint size); \
+ void (*stored)(const eattr *ea); /* When stored into global hash */ \
+ void (*freed)(const eattr *ea); /* When released from global hash */ \
+ struct f_val (*empty)(const struct ea_class *); /* Return this instead of T_VOID as default value for filters */ \
+
+ EA_CLASS_INSIDE;
+};
+
+struct ea_class_ref {
+ resource r;
+ struct ea_class *class;
+};
+
+void ea_register_init(struct ea_class *);
+struct ea_class_ref *ea_register_alloc(pool *, struct ea_class);
+struct ea_class_ref *ea_ref_class(pool *, struct ea_class *); /* Reference for an attribute alias */
+
+#define EA_REGISTER_ALL_HELPER(x) ea_register_init(x);
+#define EA_REGISTER_ALL(...) MACRO_FOREACH(EA_REGISTER_ALL_HELPER, __VA_ARGS__)
+
+struct ea_class *ea_class_find_by_id(uint id);
+struct ea_class *ea_class_find_by_name(const char *name);
+static inline struct ea_class *ea_class_self(struct ea_class *self) { return self; }
+#define ea_class_find(_arg) _Generic((_arg), \
+ uint: ea_class_find_by_id, \
+ word: ea_class_find_by_id, \
+ char *: ea_class_find_by_name, \
+ const char *: ea_class_find_by_name, \
+ struct ea_class *: ea_class_self)(_arg)
+
+struct ea_walk_state {
+ ea_list *eattrs; /* Ccurrent ea_list, initially set by caller */
+ eattr *ea; /* Current eattr, initially NULL */
+ u32 visited[4]; /* Bitfield, limiting max to 128 */
+};
+
+#define ea_find(_l, _arg) _Generic((_arg), uint: ea_find_by_id, struct ea_class *: ea_find_by_class, char *: ea_find_by_name)(_l, _arg)
+eattr *ea_find_by_id(ea_list *, unsigned ea);
+static inline eattr *ea_find_by_class(ea_list *l, const struct ea_class *def)
+{ return ea_find_by_id(l, def->id); }
+static inline eattr *ea_find_by_name(ea_list *l, const char *name)
+{
+ const struct ea_class *def = ea_class_find_by_name(name);
+ return def ? ea_find_by_class(l, def) : NULL;
+}
+
+#define ea_get_int(_l, _ident, _def) ({ \
+ struct ea_class *cls = ea_class_find((_ident)); \
+ ASSERT_DIE(cls->type & EAF_EMBEDDED && cls->type != T_PTR); \
+ const eattr *ea = ea_find((_l), cls->id); \
+ (ea ? ea->u.data : (_def)); \
+ })
+
+#define ea_get_ptr(_l, _ident, _def) ({ \
+ struct ea_class *cls = ea_class_find((_ident)); \
+ ASSERT_DIE(cls->type == T_PTR); \
+ const eattr *ea = ea_find((_l), cls->id); \
+ (ea ? ea->u.v_ptr : (_def)); \
+ })
+
+#define ea_get_ip(_l, _ident, _def) ({ \
+ struct ea_class *cls = ea_class_find((_ident)); \
+ ASSERT_DIE(cls->type == T_IP); \
+ const eattr *ea = ea_find((_l), cls->id); \
+ (ea ? *((const ip_addr *) ea->u.ptr->data) : (_def)); \
+ })
+
+#define ea_get_adata(_l, _ident) ({ \
+ struct ea_class *cls = ea_class_find((_ident)); \
+ ASSERT_DIE(!(cls->type & EAF_EMBEDDED)); \
+ const eattr *ea = ea_find((_l), cls->id); \
+ (ea ? ea->u.ptr : &null_adata); \
+ })
+
+eattr *ea_walk(struct ea_walk_state *s, uint id, uint max);
- void ea_dump(ea_list *);
- void ea_dump_all(void);
+int ea_same(ea_list *x, ea_list *y); /* Test whether two ea_lists are identical */
+uint ea_hash(ea_list *e); /* Calculate attributes hash value */
+ea_list *ea_append(ea_list *to, ea_list *what);
+void ea_format_bitfield(const struct eattr *a, byte *buf, int bufsize, const char **names, int min, int max);
+
+/* Normalize ea_list; allocates the result from tmp_linpool */
+ea_list *ea_normalize(ea_list *e, u32 upto);
+
+uint ea_list_size(ea_list *);
+void ea_list_copy(ea_list *dest, ea_list *src, uint size);
+
+#define EA_LOCAL_LIST(N) struct { ea_list l; eattr a[N]; }
+
+#define EA_LITERAL_EMBEDDED(_class, _flags, _val) ({ \
+ btype _type = (_class)->type; \
+ ASSERT_DIE(_type & EAF_EMBEDDED && _type != T_PTR); \
+ EA_LITERAL_GENERIC((_class)->id, _type, _flags, .u.i = _val); \
+ })
+
+#define EA_LITERAL_STORE_PTR(_class, _flags, _ptr) ({ \
+ btype _type = (_class)->type; \
+ ASSERT_DIE(_type == T_PTR); \
+ EA_LITERAL_GENERIC((_class)->id, _type, _flags, .u.v_ptr = _ptr); \
+ })
+
+#define EA_LITERAL_STORE_ADATA(_class, _flags, _buf, _len) ({ \
+ btype _type = (_class)->type; \
+ ASSERT_DIE(!(_type & EAF_EMBEDDED)); \
+ EA_LITERAL_GENERIC((_class)->id, _type, _flags, .u.ad = tmp_store_adata((_buf), (_len))); \
+ })
+
+#define EA_LITERAL_DIRECT_ADATA(_class, _flags, _adata) ({ \
+ btype _type = (_class)->type; \
+ ASSERT_DIE(!(_type & EAF_EMBEDDED)); \
+ EA_LITERAL_GENERIC((_class)->id, _type, _flags, .u.ad = _adata); \
+ })
+
+#define EA_LITERAL_GENERIC(_id, _type, _flags, ...) \
+ ((eattr) { .id = _id, .type = _type, .flags = _flags, __VA_ARGS__ })
+
+#define EA_LITERAL_STORE_STRING(_class, _flags, string) ({EA_LITERAL_STORE_ADATA(_class, _flags, string, strlen(string)+1);})
+
+static inline eattr *
+ea_set_attr(ea_list **to, eattr a)
+{
+ if (!a.id)
+ bug("You have forgotten to register your EA class");
+
+ EA_LOCAL_LIST(1) *ea = tmp_alloc(sizeof(*ea));
+ *ea = (typeof(*ea)) {
+ .l.flags = EALF_SORTED,
+ .l.count = 1,
+ .l.next = *to,
+ .a[0] = a,
+ };
+
+ *to = &ea->l;
+ return &ea->a[0];
+}
+
+static inline void
+ea_unset_attr(ea_list **to, bool local, const struct ea_class *def)
+{
+ ea_set_attr(to, EA_LITERAL_GENERIC(def->id, 0, 0,
+ .fresh = local, .originated = local, .undef = 1));
+}
+
+static inline void
+ea_set_attr_u32(ea_list **to, const struct ea_class *def, uint flags, u64 data)
+{ ea_set_attr(to, EA_LITERAL_EMBEDDED(def, flags, data)); }
+
+static inline void
+ea_set_attr_ptr(ea_list **to, const struct ea_class *def, uint flags, void *data)
+{ ea_set_attr(to, EA_LITERAL_STORE_PTR(def, flags, data)); }
+
+static inline void
+ea_set_attr_data(ea_list **to, const struct ea_class *def, uint flags, const void *data, uint len)
+{ ea_set_attr(to, EA_LITERAL_STORE_ADATA(def, flags, data, len)); }
+
+static inline void
+ea_copy_attr(ea_list **to, ea_list *from, const struct ea_class *def)
+{
+ eattr *e = ea_find_by_class(from, def);
+ if (e)
+ if (e->type == T_PTR)
+ ea_set_attr_ptr(to, def, e->flags, (void *)e->u.v_ptr);
+ else if (e->type & EAF_EMBEDDED)
+ ea_set_attr_u32(to, def, e->flags, e->u.data);
+ else
+ ea_set_attr_data(to, def, e->flags, e->u.ptr->data, e->u.ptr->length);
+ else
+ ea_unset_attr(to, 0, def);
+}
+
+/*
+ * Common route attributes
+ */
+
+/* Preference: first-order comparison */
+extern struct ea_class ea_gen_preference;
+static inline u32 rt_get_preference(const rte *rt)
+{ return ea_get_int(rt->attrs, &ea_gen_preference, 0); }
+
+/* IGP metric: second-order comparison */
+extern struct ea_class ea_gen_igp_metric;
+u32 rt_get_igp_metric(const rte *rt);
+#define IGP_METRIC_UNKNOWN 0x80000000 /* Default igp_metric used when no other
+ protocol-specific metric is availabe */
+
+/* From: Advertising router */
+extern struct ea_class ea_gen_from;
+
+
+/* MPLS Label, Policy and Class */
+extern struct ea_class ea_gen_mpls_label,
+ ea_gen_mpls_policy, ea_gen_mpls_class;
+
+
+/* Source: An old method to devise the route source protocol and kind.
+ * To be superseded in a near future by something more informative. */
+extern struct ea_class ea_gen_source;
+static inline u32 rt_get_source_attr(const rte *rt)
+{ return ea_get_int(rt->attrs, &ea_gen_source, 0); }
+
+/* Flowspec validation result */
+enum flowspec_valid {
+ FLOWSPEC_UNKNOWN = 0,
+ FLOWSPEC_VALID = 1,
+ FLOWSPEC_INVALID = 2,
+ FLOWSPEC__MAX,
+};
+
+extern const char * flowspec_valid_names[FLOWSPEC__MAX];
+static inline const char *flowspec_valid_name(enum flowspec_valid v)
+{ return (v < FLOWSPEC__MAX) ? flowspec_valid_names[v] : "???"; }
+
+extern struct ea_class ea_gen_flowspec_valid;
+static inline enum flowspec_valid rt_get_flowspec_valid(const rte *rt)
+{ return ea_get_int(rt->attrs, &ea_gen_flowspec_valid, FLOWSPEC_UNKNOWN); }
+
+/* Next hop: For now, stored as adata */
+extern struct ea_class ea_gen_nexthop;
+
+static inline void ea_set_dest(struct ea_list **to, uint flags, uint dest)
+{
+ struct nexthop_adata nhad = NEXTHOP_DEST_LITERAL(dest);
+ ea_set_attr_data(to, &ea_gen_nexthop, flags, &nhad.ad.data, nhad.ad.length);
+}
+
+/* Next hop structures */
+
+#define NEXTHOP_ALIGNMENT (_Alignof(struct nexthop))
+#define NEXTHOP_MAX_SIZE (sizeof(struct nexthop) + sizeof(u32)*MPLS_MAX_LABEL_STACK)
+#define NEXTHOP_SIZE(_nh) NEXTHOP_SIZE_CNT(((_nh)->labels))
+#define NEXTHOP_SIZE_CNT(cnt) BIRD_ALIGN((sizeof(struct nexthop) + sizeof(u32) * (cnt)), NEXTHOP_ALIGNMENT)
+#define nexthop_size(nh) NEXTHOP_SIZE((nh))
+
+#define NEXTHOP_NEXT(_nh) ((void *) (_nh) + NEXTHOP_SIZE(_nh))
+#define NEXTHOP_END(_nhad) ((_nhad)->ad.data + (_nhad)->ad.length)
+#define NEXTHOP_VALID(_nh, _nhad) ((void *) (_nh) < (void *) NEXTHOP_END(_nhad))
+#define NEXTHOP_ONE(_nhad) (NEXTHOP_NEXT(&(_nhad)->nh) == NEXTHOP_END(_nhad))
+
+#define NEXTHOP_WALK(_iter, _nhad) for ( \
+ struct nexthop *_iter = &(_nhad)->nh; \
+ (void *) _iter < (void *) NEXTHOP_END(_nhad); \
+ _iter = NEXTHOP_NEXT(_iter))
+
+
+static inline int nexthop_same(struct nexthop_adata *x, struct nexthop_adata *y)
+{ return adata_same(&x->ad, &y->ad); }
+struct nexthop_adata *nexthop_merge(struct nexthop_adata *x, struct nexthop_adata *y, int max, linpool *lp);
+struct nexthop_adata *nexthop_sort(struct nexthop_adata *x, linpool *lp);
+int nexthop_is_sorted(struct nexthop_adata *x);
+
+#define NEXTHOP_IS_REACHABLE(nhad) ((nhad)->ad.length > NEXTHOP_DEST_SIZE)
+
+static inline struct nexthop_adata *
+rte_get_nexthops(rte *r)
+{
+ eattr *nhea = ea_find(r->attrs, &ea_gen_nexthop);
+ return nhea ? SKIP_BACK(struct nexthop_adata, ad, nhea->u.ptr) : NULL;
+}
+
+/* Route has regular, reachable nexthop (i.e. not RTD_UNREACHABLE and like) */
+static inline int rte_is_reachable(rte *r)
+{
+ struct nexthop_adata *nhad = rte_get_nexthops(r);
+ return nhad && NEXTHOP_IS_REACHABLE(nhad);
+}
+
+static inline int nhea_dest(eattr *nhea)
+{
+ if (!nhea)
+ return RTD_NONE;
+
+ struct nexthop_adata *nhad = nhea ? (struct nexthop_adata *) nhea->u.ptr : NULL;
+ if (NEXTHOP_IS_REACHABLE(nhad))
+ return RTD_UNICAST;
+ else
+ return nhad->dest;
+}
+
+static inline int rte_dest(const rte *r)
+{
+ return nhea_dest(ea_find(r->attrs, &ea_gen_nexthop));
+}
+
+/* ASPA Providers eattr */
+extern struct ea_class ea_gen_aspa_providers;
+
+
+void rta_init(void);
+
+ea_list *ea_lookup_slow(ea_list *r, u32 squash_upto, enum ea_stored oid);
+
+static inline struct ea_storage *ea_get_storage(ea_list *r)
+{
+ ASSERT_DIE(r->stored);
+ return SKIP_BACK(struct ea_storage, l[0], r);
+}
+
+static inline ea_list *ea_ref(ea_list *r)
+{
+ ASSERT_DIE(0 < atomic_fetch_add_explicit(&ea_get_storage(r)->uc, 1, memory_order_acq_rel));
+ return r;
+}
+
+static inline ea_list *ea_lookup(ea_list *r, u32 squash_upto, enum ea_stored oid)
+{
+ ASSERT_DIE(oid);
+ if ((r->stored == oid) || BIT32_TEST(&squash_upto, r->stored))
+ return ea_ref(r);
+ else
+ return ea_lookup_slow(r, squash_upto, oid);
+}
+
+struct ea_free_deferred {
+ struct deferred_call dc;
+ ea_list *attrs;
+};
+
+void ea_free_deferred(struct deferred_call *dc);
+
+#ifdef EA_FREE_DEBUG
+#define ea_free_later _ea_free_later_internal
+#endif
+
+static inline ea_list *ea_free_later(ea_list *r)
+{
+ if (!r)
+ return NULL;
+
+ struct ea_free_deferred efd = {
+ .dc.hook = ea_free_deferred,
+ .attrs = r,
+ };
+
+ defer_call(&efd.dc, sizeof efd);
+ return r;
+}
+
+#ifdef EA_FREE_DEBUG
+#undef ea_free_later
+#define ea_free_later(x) ( log(L_INFO "EA free request %p at %s:%d", (x), __FILE__, __LINE__), _ea_free_later_internal(x) )
+#endif
+
+#define ea_free ea_free_later
+
+static inline ea_list *ea_lookup_tmp(ea_list *r, u32 squash_upto, enum ea_stored oid)
+{
+ return ea_free_later(ea_lookup(r, squash_upto, oid));
+}
+
+static inline ea_list *ea_ref_tmp(ea_list *r)
+{
+ ASSERT_DIE(r->stored);
+ return ea_free_later(ea_ref(r));
+}
+
+static inline ea_list *ea_strip_to(ea_list *r, u32 strip_to)
+{
+ ASSERT_DIE(strip_to);
+ while (r && !BIT32_TEST(&strip_to, r->stored))
+ r = r->next;
+
+ return r;
+}
+
++void ea_dump(struct dump_request *, ea_list *);
++void ea_dump_all(struct dump_request *);
+void ea_show_list(struct cli *, ea_list *);
+
+#endif
{
timer *t = (void *) r;
- debug("(code %p, data %p, ", t->hook, t->data);
+ RDUMP("(code %p, data %p, ", t->hook, t->data);
if (t->randomize)
- debug("rand %d, ", t->randomize);
+ RDUMP("rand %d, ", t->randomize);
if (t->recurrent)
- debug("recur %ld, ", t->recurrent);
+ RDUMP("recur %ld, ", t->recurrent);
if (t->expires)
- debug("in loop %p expires in %ld ms)\n", t->loop, (t->expires - current_time()) TO_MS);
- RDUMP("expires in %ld ms)\n", (t->expires - current_time()) TO_MS);
++ RDUMP("in loop %p expires in %ld ms)\n", t->loop, (t->expires - current_time()) TO_MS);
else
- debug("inactive)\n");
+ RDUMP("inactive)\n");
}
| sym_args CF_SYM_KNOWN { $$ = $1; $$->sym = $2; }
;
-
- CF_CLI_HELP(DUMP, ..., [[Dump debugging information]])
- CF_CLI(DUMP RESOURCES,,, [[Dump all allocated resource]])
- { rdump(&root_pool, 0); cli_msg(0, ""); } ;
- CF_CLI(DUMP SOCKETS,,, [[Dump open sockets]])
- { sk_dump_all(); cli_msg(0, ""); } ;
- CF_CLI(DUMP EVENTS,,, [[Dump event log]])
- { io_log_dump(); cli_msg(0, ""); } ;
- CF_CLI(DUMP INTERFACES,,, [[Dump interface information]])
- { if_dump_all(); cli_msg(0, ""); } ;
- CF_CLI(DUMP NEIGHBORS,,, [[Dump neighbor cache]])
- { neigh_dump_all(); cli_msg(0, ""); } ;
- CF_CLI(DUMP ATTRIBUTES,,, [[Dump attribute cache]])
- { ea_dump_all(); cli_msg(0, ""); } ;
- CF_CLI(DUMP ROUTES,,, [[Dump routes]])
- { rt_dump_all(); cli_msg(0, ""); } ;
- CF_CLI(DUMP TABLES,,, [[Dump table connections]])
- { rt_dump_hooks_all(); cli_msg(0, ""); } ;
- CF_CLI(DUMP PROTOCOLS,,, [[Dump protocol information]])
- { protos_dump_all(); cli_msg(0, ""); } ;
- CF_CLI(DUMP FILTER ALL,,, [[Dump all filters in linearized form]])
- { filters_dump_all(); cli_msg(0, ""); } ;
+ CF_CLI_HELP(DUMP, ..., [[Dump debugging information to the given file]])
+ CF_CLI(DUMP RESOURCES, text,, [[Dump all allocated resource]])
+ { cmd_dump_file(this_cli, $3, "resources", resource_dump); } ;
+ CF_CLI(DUMP SOCKETS, text,, [[Dump open sockets]])
+ { cmd_dump_file(this_cli, $3, "sockets", sk_dump_all); } ;
+ CF_CLI(DUMP EVENTS, text,, [[Dump event log]])
+ { cmd_dump_file(this_cli, $3, "event log", io_log_dump); } ;
+ CF_CLI(DUMP INTERFACES, text,, [[Dump interface information]])
+ { cmd_dump_file(this_cli, $3, "interfaces", if_dump_all); } ;
+ CF_CLI(DUMP NEIGHBORS, text,, [[Dump neighbor cache]])
+ { cmd_dump_file(this_cli, $3, "neighbor cache", neigh_dump_all); } ;
+ CF_CLI(DUMP ATTRIBUTES, text,, [[Dump attribute cache]])
-{ cmd_dump_file(this_cli, $3, "attribute cache", rta_dump_all); } ;
-CF_CLI(DUMP ROUTES, text,, [[Dump routing table]])
++{ cmd_dump_file(this_cli, $3, "attribute cache", ea_dump_all); } ;
++CF_CLI(DUMP ROUTES, text,, [[Dump routes]])
+ { cmd_dump_file(this_cli, $3, "routing tables", rt_dump_all); } ;
++CF_CLI(DUMP TABLES, text,, [[Dump table connections]])
++{ cmd_dump_file(this_cli, $3, "table connections", rt_dump_hooks_all); } ;
+ CF_CLI(DUMP PROTOCOLS, text,, [[Dump protocol information]])
+ { cmd_dump_file(this_cli, $3, "protocols", protos_dump_all); } ;
+ CF_CLI(DUMP FILTER ALL, text,, [[Dump all filters in linearized form]])
+ { cmd_dump_file(this_cli, $4, "filter bytecode", filters_dump_all); } ;
CF_CLI(EVAL, term, <expr>, [[Evaluate an expression]])
{ cmd_eval(f_linearize($2, 1)); } ;
static void if_recalc_preferred(struct iface *i);
- static void ifa_dump_locked(struct ifa *);
- static void if_dump_locked(struct iface *);
+static void ifa_delete_locked(struct ifa *a);
+
++static void ifa_dump_locked(struct dump_request *, struct ifa *);
++static void if_dump_locked(struct dump_request *, struct iface *);
+
+struct iface *
+if_walk_first(void)
+{
+ IFACE_LOCK;
+ struct iface *i = HEAD(global_iface_list);
+ return NODE_VALID(i) ? i : NULL;
+}
+
+struct iface *
+if_walk_next(struct iface *i)
+{
+ IFACE_ASSERT_LOCKED;
+ i = NODE_NEXT(i);
+ return NODE_VALID(i) ? i : NULL;
+}
+
+void
+if_walk_done(void)
+{
+ IFACE_ASSERT_LOCKED;
+ IFACE_UNLOCK;
+}
+
/**
* ifa_dump - dump interface address
* @a: interface address descriptor
* This function dumps contents of an &ifa to the debug output.
*/
void
- ifa_dump(struct ifa *a)
+ ifa_dump(struct dump_request *dreq, struct ifa *a)
+{
+ IFACE_LOCK;
- ifa_dump_locked(a);
++ ifa_dump_locked(dreq, a);
+ IFACE_UNLOCK;
+}
+
+static void
- ifa_dump_locked(struct ifa *a)
++ifa_dump_locked(struct dump_request *dreq, struct ifa *a)
{
- debug("\t%I, net %N bc %I -> %I%s%s%s%s\n", a->ip, &a->prefix, a->brd, a->opposite,
+ RDUMP("\t%I, net %N bc %I -> %I%s%s%s%s\n", a->ip, &a->prefix, a->brd, a->opposite,
(a->flags & IA_PRIMARY) ? " PRIMARY" : "",
(a->flags & IA_SECONDARY) ? " SEC" : "",
(a->flags & IA_HOST) ? " HOST" : "",
* network interface to the debug output.
*/
void
- if_dump(struct iface *i)
+ if_dump(struct dump_request *dreq, struct iface *i)
+{
+ IFACE_LOCK;
- if_dump_locked(i);
++ if_dump_locked(dreq, i);
+ IFACE_UNLOCK;
+}
+
+static void
- if_dump_locked(struct iface *i)
++if_dump_locked(struct dump_request *dreq, struct iface *i)
{
struct ifa *a;
- debug("IF%d: %s", i->index, i->name);
+ RDUMP("IF%d: %s", i->index, i->name);
if (i->flags & IF_SHUTDOWN)
- debug(" SHUTDOWN");
+ RDUMP(" SHUTDOWN");
if (i->flags & IF_UP)
- debug(" UP");
+ RDUMP(" UP");
else
- debug(" DOWN");
+ RDUMP(" DOWN");
if (i->flags & IF_ADMIN_UP)
- debug(" LINK-UP");
+ RDUMP(" LINK-UP");
if (i->flags & IF_MULTIACCESS)
- debug(" MA");
+ RDUMP(" MA");
if (i->flags & IF_BROADCAST)
- debug(" BC");
+ RDUMP(" BC");
if (i->flags & IF_MULTICAST)
- debug(" MC");
+ RDUMP(" MC");
if (i->flags & IF_LOOPBACK)
- debug(" LOOP");
+ RDUMP(" LOOP");
if (i->flags & IF_IGNORE)
- debug(" IGN");
+ RDUMP(" IGN");
if (i->flags & IF_TMP_DOWN)
- debug(" TDOWN");
- debug(" MTU=%d\n", i->mtu);
+ RDUMP(" TDOWN");
+ RDUMP(" MTU=%d\n", i->mtu);
WALK_LIST(a, i->addrs)
{
- ifa_dump_locked(a);
- ifa_dump(dreq, a);
++ ifa_dump_locked(dreq, a);
ASSERT(!!(a->flags & IA_PRIMARY) ==
((a == i->addr4) || (a == i->addr6) || (a == i->llv6)));
}
* interfaces to the debug output.
*/
void
- if_dump_all(void)
+ if_dump_all(struct dump_request *dreq)
{
- debug("Known network interfaces:\n");
- struct iface *i;
-
+ RDUMP("Known network interfaces:\n");
- WALK_LIST(i, iface_list)
- if_dump(dreq, i);
- RDUMP("Router ID: %08x\n", config->router_id);
+ IFACE_WALK(i)
- if_dump(i);
++ if_dump_locked(dreq, i);
+ rcu_read_lock();
- debug("Router ID: %08x\n", atomic_load_explicit(&global_runtime, memory_order_relaxed)->router_id);
++ RDUMP("Router ID: %08x\n", atomic_load_explicit(&global_runtime, memory_order_relaxed)->router_id);
+ rcu_read_unlock();
+}
+
+void
+if_link(struct iface *i)
+{
+ IFACE_ASSERT_LOCKED;
+
+ if (i)
+ i->uc++;
+}
+
+void
+if_unlink(struct iface *i)
+{
+ IFACE_ASSERT_LOCKED;
+
+ if (i)
+ i->uc--;
+ /* TODO: Do some interface object cleanup */
+}
+
+void ifa_link(struct ifa *a)
+{
+ IFACE_ASSERT_LOCKED;
+
+ if (a)
+ {
+// debug("ifa_link: %p %d\n", a, a->uc);
+ a->uc++;
+ }
+}
+
+void ifa_unlink(struct ifa *a)
+{
+ IFACE_ASSERT_LOCKED;
+
+ if (!a)
+ return;
+
+// debug("ifa_unlink: %p %d\n", a, a->uc);
+ if (--a->uc)
+ return;
+
+ if_unlink(a->iface);
+#if DEBUGGING
+ memset(a, 0x5b, sizeof(struct ifa));
+#endif
+ mb_free(a);
}
static inline unsigned
neighbor *neigh_find(struct proto *p, ip_addr a, struct iface *ifa, uint flags);
- void neigh_dump_all(void);
-void neigh_dump(struct dump_request *, neighbor *);
+ void neigh_dump_all(struct dump_request *);
-void neigh_prune(void);
void neigh_if_up(struct iface *);
void neigh_if_down(struct iface *);
void neigh_if_link(struct iface *);
*
* This functions dumps the contents of a given neighbor entry to debug output.
*/
-void
+static void
- neigh_dump(neighbor *n)
+ neigh_dump(struct dump_request *dreq, neighbor *n)
{
- debug("%p %I %s %s ", n, n->addr,
+ RDUMP("%p %I %s %s ", n, n->addr,
n->iface ? n->iface->name : "[]",
n->ifreq ? n->ifreq->name : "[]");
- debug("%s %p %08x scope %s", n->proto->name, n->data, n->aux, ip_scope_text(n->scope));
+ RDUMP("%s %p %08x scope %s", n->proto->name, n->data, n->aux, ip_scope_text(n->scope));
if (n->flags & NEF_STICKY)
- debug(" STICKY");
+ RDUMP(" STICKY");
if (n->flags & NEF_ONLINK)
- debug(" ONLINK");
- debug("\n");
+ RDUMP(" ONLINK");
+ RDUMP("\n");
}
/**
* This function dumps the contents of the neighbor cache to debug output.
*/
void
- neigh_dump_all(void)
+ neigh_dump_all(struct dump_request *dreq)
{
+ IFACE_LOCK;
+
neighbor *n;
int i;
- debug("Known neighbors:\n");
+ RDUMP("Known neighbors:\n");
for(i=0; i<NEIGH_HASH_SIZE; i++)
WALK_LIST(n, neigh_hash_table[i])
- neigh_dump(n);
- debug("\n");
+ neigh_dump(dreq, n);
+ RDUMP("\n");
+
+ IFACE_UNLOCK;
}
static inline void
* the internals.
*/
void
- protos_dump_all(void)
+ protos_dump_all(struct dump_request *dreq)
{
- debug("Protocols:\n");
+ RDUMP("Protocols:\n");
- struct proto *p;
- WALK_LIST(p, proto_list)
+ WALK_TLIST(proto, p, &global_proto_list) PROTO_LOCKED_FROM_MAIN(p)
{
- RDUMP(" protocol %s state %s\n", p->name, p_states[p->proto_state]);
+#define DPF(x) (p->x ? " " #x : "")
- debug(" protocol %s (%p) state %s with %d active channels flags: %s%s%s\n",
++ RDUMP(" protocol %s (%p) state %s with %d active channels flags: %s%s%s\n",
+ p->name, p, p_states[p->proto_state], p->active_channels,
+ DPF(disabled), DPF(do_stop), DPF(reconfiguring));
+#undef DPF
struct channel *c;
WALK_LIST(c, p->channels)
{
- debug("\tTABLE %s\n", c->table->name);
+ RDUMP("\tTABLE %s\n", c->table->name);
if (c->in_filter)
- debug("\tInput filter: %s\n", filter_name(c->in_filter));
+ RDUMP("\tInput filter: %s\n", filter_name(c->in_filter));
if (c->out_filter)
- debug("\tOutput filter: %s\n", filter_name(c->out_filter));
- debug("\tChannel state: %s/%s/%s\n", c_states[c->channel_state],
+ RDUMP("\tOutput filter: %s\n", filter_name(c->out_filter));
++ RDUMP("\tChannel state: %s/%s/%s\n", c_states[c->channel_state],
+ c->in_req.hook ? rt_import_state_name(rt_import_get_state(c->in_req.hook)) : "-",
+ rt_export_state_name(rt_export_get_state(&c->out_req)));
}
- debug("\tSOURCES\n");
- rt_dump_sources(&p->sources);
- if (p->proto->dump && (p->proto_state != PS_DOWN))
++ RDUMP("\tSOURCES\n");
++ if (p->proto_state != PS_DOWN_XX)
++ rt_dump_sources(dreq, &p->sources);
+
+ if (p->proto->dump &&
+ (p->proto_state != PS_DOWN_XX) &&
+ (p->proto_state != PS_FLUSH))
- p->proto->dump(p);
+ p->proto->dump(p, dreq);
}
}
void (*postconfig)(struct proto_config *); /* After configuring each instance */
struct proto * (*init)(struct proto_config *); /* Create new instance */
int (*reconfigure)(struct proto *, struct proto_config *); /* Try to reconfigure instance, returns success */
- void (*dump)(struct proto *); /* Debugging dump */
+ void (*dump)(struct proto *, struct dump_request *); /* Debugging dump */
int (*start)(struct proto *); /* Start the instance */
int (*shutdown)(struct proto *); /* Stop the instance */
- void (*cleanup)(struct proto *); /* Called after shutdown when protocol became hungry/down */
+ void (*cleanup)(struct proto *); /* Cleanup the instance right before tearing it all down */
void (*get_status)(struct proto *, byte *buf); /* Get instance status (for `show protocols' command) */
- void (*get_route_info)(struct rte *, byte *buf); /* Get route information (for `show route' command) */
- int (*get_attr)(const struct eattr *, byte *buf, int buflen); /* ASCIIfy dynamic attribute (returns GA_*) */
+// int (*get_attr)(const struct eattr *, byte *buf, int buflen); /* ASCIIfy dynamic attribute (returns GA_*) */
void (*show_proto_info)(struct proto *); /* Show protocol info (for `show protocols all' command) */
void (*copy_config)(struct proto_config *, struct proto_config *); /* Copy config from given protocol instance */
};
void protos_build(void); /* Called from sysdep to initialize protocols */
void proto_build(struct protocol *); /* Called from protocol to register itself */
void protos_preconfig(struct config *);
-void protos_commit(struct config *new, struct config *old, int force_restart, int type);
+void protos_commit(struct config *new, struct config *old, int type);
struct proto * proto_spawn(struct proto_config *cf, uint disabled);
- void protos_dump_all(void);
+ void protos_dump_all(struct dump_request *);
#define GA_UNKNOWN 0 /* Attribute not recognized */
#define GA_NAME 1 /* Result = name */
void rt_flowspec_link(rtable *src, rtable *dst);
void rt_flowspec_unlink(rtable *src, rtable *dst);
rtable *rt_setup(pool *, struct rtable_config *);
-static inline void rt_shutdown(rtable *r) { rfree(r->rp); }
-
-static inline net *net_find(rtable *tab, const net_addr *addr) { return (net *) fib_find(&tab->fib, addr); }
-static inline net *net_find_valid(rtable *tab, const net_addr *addr)
-{ net *n = net_find(tab, addr); return (n && rte_is_valid(n->routes)) ? n : NULL; }
-static inline net *net_get(rtable *tab, const net_addr *addr) { return (net *) fib_get(&tab->fib, addr); }
-net *net_get(rtable *tab, const net_addr *addr);
-net *net_route(rtable *tab, const net_addr *n);
-int net_roa_check(rtable *tab, const net_addr *n, u32 asn);
-enum aspa_result aspa_check(rtable *tab, const struct adata *path, bool force_upstream);
-rte *rte_find(net *net, struct rte_src *src);
-rte *rte_get_temp(struct rta *, struct rte_src *src);
-void rte_update2(struct channel *c, const net_addr *n, rte *new, struct rte_src *src);
-/* rte_update() moved to protocol.h to avoid dependency conflicts */
+void rt_setup_digestor(struct rtable_private *tab);
+
+struct rt_export_feed *rt_net_feed(rtable *t, const net_addr *a, const struct rt_pending_export *first);
+rte rt_net_best(rtable *t, const net_addr *a);
int rt_examine(rtable *t, net_addr *a, struct channel *c, const struct filter *filter);
-rte *rt_export_merged(struct channel *c, net *net, rte **rt_free, linpool *pool, int silent);
-void rt_refresh_begin(rtable *t, struct channel *c);
-void rt_refresh_end(rtable *t, struct channel *c);
-void rt_modify_stale(rtable *t, struct channel *c);
-void rt_schedule_prune(rtable *t);
-void rte_dump(struct dump_request *, rte *);
-void rte_free(rte *);
-rte *rte_do_cow(rte *);
-static inline rte * rte_cow(rte *r) { return (r->flags & REF_COW) ? rte_do_cow(r) : r; }
-rte *rte_cow_rta(rte *r, linpool *lp);
+rte *rt_export_merged(struct channel *c, const struct rt_export_feed *feed, linpool *pool, int silent);
+void rt_refresh_begin(struct rt_import_request *);
+void rt_refresh_end(struct rt_import_request *);
+void rt_schedule_prune(struct rtable_private *t);
- void rte_dump(struct rte_storage *);
- void rt_dump(rtable *);
- void rt_dump_all(void);
- void rt_dump_hooks(rtable *);
- void rt_dump_hooks_all(void);
++void rte_dump(struct dump_request *, struct rte_storage *);
+ void rt_dump(struct dump_request *, rtable *);
+ void rt_dump_all(struct dump_request *);
-int rt_feed_channel(struct channel *c);
-void rt_feed_channel_abort(struct channel *c);
-int rte_update_in(struct channel *c, const net_addr *n, rte *new, struct rte_src *src);
++void rt_dump_hooks(struct dump_request *, rtable *);
++void rt_dump_hooks_all(struct dump_request *);
int rt_reload_channel(struct channel *c);
void rt_reload_channel_abort(struct channel *c);
void rt_prune_sync(rtable *t, int all);
}
HASH_WALK_FILTER_END;
- HASH_MAY_RESIZE_DOWN(src_hash, RSH, rta_pool);
-}
+ RTA_LOCK;
+ HASH_MAY_RESIZE_DOWN(o->hash, RSH, rta_pool);
+ if (o->stop && !o->uc)
+ {
+ rfree(o->prune);
+ RTA_UNLOCK;
-/*
- * Multipath Next Hop
- */
+ if (o->debug & D_EVENTS)
+ log(L_TRACE "%s: all rte_src's pruned, scheduling stop event", o->name);
+
+ rt_done_sources(o);
+ }
+ else
+ RTA_UNLOCK;
+}
-static inline u32
-nexthop_hash(struct nexthop *x)
+void
- rt_dump_sources(struct rte_owner *o)
++rt_dump_sources(struct dump_request *dreq, struct rte_owner *o)
{
- debug("\t%s: hord=%u, uc=%u, cnt=%u prune=%p, stop=%p\n",
- u32 h = 0;
- for (; x; x = x->next)
++ RDUMP("\t%s: hord=%u, uc=%u, cnt=%u prune=%p, stop=%p\n",
+ o->name, o->hash.order, o->uc, o->hash.count, o->prune, o->stop);
- debug("\tget_route_info=%p, better=%p, mergable=%p, igp_metric=%p, recalculate=%p",
++ RDUMP("\tget_route_info=%p, better=%p, mergable=%p, igp_metric=%p, recalculate=%p",
+ o->class->get_route_info, o->class->rte_better, o->class->rte_mergable,
+ o->class->rte_igp_metric, o->rte_recalculate);
+
+ int splitting = 0;
+ HASH_WALK(o->hash, next, src)
{
- debug("%c%c%uL %uG %luU",
- h ^= ipa_hash(x->gw) ^ (h << 5) ^ (h >> 9);
++ RDUMP("%c%c%uL %uG %luU",
+ (splitting % 8) ? ',' : '\n',
+ (splitting % 8) ? ' ' : '\t',
+ src->private_id, src->global_id,
+ atomic_load_explicit(&src->uc.uc, memory_order_relaxed));
- for (int i = 0; i < x->labels; i++)
- h ^= x->label[i] ^ (h << 6) ^ (h >> 7);
+ splitting++;
}
-
- return h;
+ HASH_WALK_END;
- debug("\n");
++ RDUMP("\n");
}
-static inline int
-nexthop_equal_1(struct nexthop *x, struct nexthop *y)
-{
- if (!ipa_equal(x->gw, y->gw) || (x->iface != y->iface) ||
- (x->flags != y->flags) || (x->weight != y->weight) ||
- (x->labels != y->labels))
- return 0;
-
- for (int i = 0; i < x->labels; i++)
- if (x->label[i] != y->label[i])
- return 0;
+static struct rte_owner_class default_rte_owner_class;
- return 1;
+void
+rt_init_sources(struct rte_owner *o, const char *name, event_list *list)
+{
+ RTA_LOCK;
+ HASH_INIT(o->hash, rta_pool, RSH_INIT_ORDER);
+ o->hash_key = random_u32();
+ o->uc = 0;
+ o->name = name;
+ o->prune = ev_new_init(rta_pool, rt_prune_sources, o);
+ o->stop = NULL;
+ o->list = list;
+ if (!o->class)
+ o->class = &default_rte_owner_class;
+ RTA_UNLOCK;
+ if (o->debug & D_EVENTS)
+ log(L_TRACE "%s: initialized rte_src owner", o->name);
}
-int
-nexthop_equal_(struct nexthop *x, struct nexthop *y)
+void
+rt_destroy_sources(struct rte_owner *o, event *done)
{
- /* Like nexthop_same(), but ignores difference between local labels and labels from hostentry */
-
- for (; x && y; x = x->next, y = y->next)
- if (!nexthop_equal_1(x, y))
- return 0;
+ o->stop = done;
- return x == y;
-}
+ if (!o->uc)
+ {
+ if (o->debug & D_EVENTS)
+ log(L_TRACE "%s: rte_src owner destroy requested, already clean, scheduling stop event", o->name);
-int
-nexthop__same(struct nexthop *x, struct nexthop *y)
-{
- for (; x && y; x = x->next, y = y->next)
- if (!nexthop_equal_1(x, y) ||
- (x->labels_orig != y->labels_orig))
- return 0;
+ RTA_LOCK;
+ rfree(o->prune);
+ RTA_UNLOCK;
- return x == y;
+ rt_done_sources(o);
+ }
+ else
+ if (o->debug & D_EVENTS)
+ log(L_TRACE "%s: rte_src owner destroy requested, remaining %u rte_src's to prune.", o->name, o->uc);
}
+/*
+ * Multipath Next Hop
+ */
+
static int
nexthop_compare_node(const struct nexthop *x, const struct nexthop *y)
{
return 1;
}
-static inline slab *
-nexthop_slab(struct nexthop *nh)
+/*
+ * Extended Attributes
+ */
+
+#define EA_CLASS_INITIAL_MAX 128
+static struct ea_class **ea_class_global = NULL;
+static uint ea_class_max;
+static struct idm ea_class_idm;
+
+/* Config parser lex register function */
+void ea_lex_register(struct ea_class *def);
+
+static void
+ea_class_free(struct ea_class *cl)
{
- return nexthop_slab_[MIN(nh->labels, 3)];
+ RTA_LOCK;
+
+ /* No more ea class references. Unregister the attribute. */
+ idm_free(&ea_class_idm, cl->id);
+ ea_class_global[cl->id] = NULL;
+
+ /* When we start supporting full protocol removal, we may need to call
+ * ea_lex_unregister(cl), see where ea_lex_register() is called. */
+
+ RTA_UNLOCK;
}
-static struct nexthop *
-nexthop_copy(struct nexthop *o)
+static void
+ea_class_ref_free(resource *r)
{
- struct nexthop *first = NULL;
- struct nexthop **last = &first;
-
- for (; o; o = o->next)
- {
- struct nexthop *n = sl_allocz(nexthop_slab(o));
- n->gw = o->gw;
- n->iface = o->iface;
- n->next = NULL;
- n->flags = o->flags;
- n->weight = o->weight;
- n->labels_orig = o->labels_orig;
- n->labels = o->labels;
- for (int i=0; i<o->labels; i++)
- n->label[i] = o->label[i];
-
- *last = n;
- last = &(n->next);
- }
+ SKIP_BACK_DECLARE(struct ea_class_ref, ref, r, r);
+ if (!--ref->class->uc)
+ ea_class_free(ref->class);
+}
- return first;
+static void
- ea_class_ref_dump(resource *r, unsigned indent UNUSED)
++ea_class_ref_dump(struct dump_request *dreq, resource *r)
+{
+ SKIP_BACK_DECLARE(struct ea_class_ref, ref, r, r);
- debug("name \"%s\", type=%d\n", ref->class->name, ref->class->type);
++ RDUMP("name \"%s\", type=%d\n", ref->class->name, ref->class->type);
}
+static struct resclass ea_class_ref_class = {
+ .name = "Attribute class reference",
+ .size = sizeof(struct ea_class_ref),
+ .free = ea_class_ref_free,
+ .dump = ea_class_ref_dump,
+ .lookup = NULL,
+ .memsize = NULL,
+};
+
static void
-nexthop_free(struct nexthop *o)
+ea_class_init(void)
{
- struct nexthop *n;
+ ASSERT_DIE(ea_class_global == NULL);
- while (o)
- {
- n = o->next;
- sl_free(o);
- o = n;
- }
+ idm_init(&ea_class_idm, rta_pool, EA_CLASS_INITIAL_MAX);
+ ea_class_global = mb_allocz(rta_pool,
+ sizeof(*ea_class_global) * (ea_class_max = EA_CLASS_INITIAL_MAX));
}
+struct ea_class_ref *
+ea_ref_class(pool *p, struct ea_class *def)
+{
+ def->uc++;
+ struct ea_class_ref *ref = ralloc(p, &ea_class_ref_class);
+ ref->class = def;
+ return ref;
+}
-/*
- * Extended Attributes
- */
+static struct ea_class_ref *
+ea_register(pool *p, struct ea_class *def)
+{
+ def->id = idm_alloc(&ea_class_idm);
+
+ ASSERT_DIE(ea_class_global);
+ while (def->id >= ea_class_max)
+ ea_class_global = mb_realloc(ea_class_global, sizeof(*ea_class_global) * (ea_class_max *= 2));
+
+ ASSERT_DIE(def->id < ea_class_max);
+ ea_class_global[def->id] = def;
+
+ return ea_ref_class(p, def);
+}
+
+struct ea_class_ref *
+ea_register_alloc(pool *p, struct ea_class cl)
+{
+ struct ea_class_ref *ref;
+
+ RTA_LOCK;
+ struct ea_class *clp = ea_class_find_by_name(cl.name);
+ if (clp && clp->type == cl.type)
+ {
+ ref = ea_ref_class(p, clp);
+ RTA_UNLOCK;
+ return ref;
+ }
+
+ uint namelen = strlen(cl.name) + 1;
+
+ struct {
+ struct ea_class cl;
+ char name[0];
+ } *cla = mb_alloc(rta_pool, sizeof(struct ea_class) + namelen);
+ cla->cl = cl;
+ memcpy(cla->name, cl.name, namelen);
+ cla->cl.name = cla->name;
+
+ ref = ea_register(p, &cla->cl);
+ RTA_UNLOCK;
+ return ref;
+}
+
+void
+ea_register_init(struct ea_class *clp)
+{
+ RTA_LOCK;
+ ASSERT_DIE(!ea_class_find_by_name(clp->name));
+
+ struct ea_class *def = ea_register(&root_pool, clp)->class;
+
+ if (!clp->hidden)
+ ea_lex_register(def);
+
+ RTA_UNLOCK;
+}
+
+struct ea_class *
+ea_class_find_by_id(uint id)
+{
+ ASSERT_DIE(id < ea_class_max);
+ ASSERT_DIE(ea_class_global[id]);
+ return ea_class_global[id];
+}
static inline eattr *
ea__find(ea_list *e, unsigned id)
byte buf[CLI_MSG_SIZE];
byte *pos = buf, *end = buf + sizeof(buf);
- if (EA_IS_CUSTOM(e->id))
- {
- const char *name = ea_custom_name(e->id);
- if (name)
- {
- pos += bsprintf(pos, "%s", name);
- status = GA_NAME;
- }
- else
- pos += bsprintf(pos, "%02x.", EA_PROTO(e->id));
- }
- else if (p = class_to_protocol[EA_PROTO(e->id)])
- {
- pos += bsprintf(pos, "%s.", p->name);
- if (p->get_attr)
- status = p->get_attr(e, pos, end - pos);
- pos += strlen(pos);
- }
- else if (EA_PROTO(e->id))
- pos += bsprintf(pos, "%02x.", EA_PROTO(e->id));
- else
- status = get_generic_attr(e, &pos, end - pos);
+ ASSERT_DIE(e->id < ea_class_max);
- if (status < GA_NAME)
- pos += bsprintf(pos, "%02x", EA_ID(e->id));
- if (status < GA_FULL)
- {
- *pos++ = ':';
- *pos++ = ' ';
+ struct ea_class *cls = ea_class_global[e->id];
+ ASSERT_DIE(cls);
+
+ if (e->undef || cls->hidden)
+ return;
+ else if (cls->format)
+ cls->format(e, buf, end - buf);
+ else
+ switch (e->type)
+ {
+ case T_INT:
+ if ((cls == &ea_gen_igp_metric) && e->u.data >= IGP_METRIC_UNKNOWN)
+ return;
- if (e->undef)
- bsprintf(pos, "undefined");
- else
- switch (e->type & EAF_TYPE_MASK)
- {
- case EAF_TYPE_INT:
bsprintf(pos, "%u", e->u.data);
break;
- case EAF_TYPE_OPAQUE:
+ case T_OPAQUE:
opaque_format(ad, pos, end - pos);
break;
- case EAF_TYPE_IP_ADDRESS:
+ case T_IP:
bsprintf(pos, "%I", *(ip_addr *) ad->data);
break;
- case EAF_TYPE_ROUTER_ID:
+ case T_QUAD:
bsprintf(pos, "%R", e->u.data);
break;
- case EAF_TYPE_AS_PATH:
+ case T_PATH:
as_path_format(ad, pos, end - pos);
break;
- case EAF_TYPE_BITFIELD:
- bsprintf(pos, "%08x", e->u.data);
- break;
- case EAF_TYPE_INT_SET:
- ea_show_int_set(c, ad, ISF_COMMUNITY_LIST, pos, buf, end);
+ case T_CLIST:
+ ea_show_int_set(c, cls->name, ad, ISF_COMMUNITY_LIST, buf);
return;
- case EAF_TYPE_EC_SET:
- ea_show_ec_set(c, ad, pos, buf, end);
+ case T_ECLIST:
+ ea_show_ec_set(c, cls->name, ad, buf);
return;
- case EAF_TYPE_LC_SET:
- ea_show_lc_set(c, ad, pos, buf, end);
+ case T_LCLIST:
+ ea_show_lc_set(c, cls->name, ad, buf);
return;
- case EAF_TYPE_STRING:
+ case T_STRING:
bsnprintf(pos, end - pos, "%s", (const char *) ad->data);
break;
+ case T_NEXTHOP_LIST:
+ ea_show_nexthop_list(c, (struct nexthop_adata *) e->u.ptr);
+ return;
+ case T_HOSTENTRY:
+ ea_show_hostentry(ad, pos, end - pos);
+ break;
default:
bsprintf(pos, "<type %02x>", e->type);
- }
- }
+ }
- if (status != GA_HIDDEN)
- cli_printf(c, -1012, "\t%s", buf);
+ cli_printf(c, -1012, "\t%s: %s", cls->name, buf);
+}
+
+static void
- nexthop_dump(const struct adata *ad)
++nexthop_dump(struct dump_request *dreq, const struct adata *ad)
+{
+ struct nexthop_adata *nhad = (struct nexthop_adata *) ad;
+
- debug(":");
++ RDUMP(":");
+
+ if (!NEXTHOP_IS_REACHABLE(nhad))
+ {
+ const char *name = rta_dest_name(nhad->dest);
+ if (name)
- debug(" %s", name);
++ RDUMP(" %s", name);
+ else
- debug(" D%d", nhad->dest);
++ RDUMP(" D%d", nhad->dest);
+ }
+ else NEXTHOP_WALK(nh, nhad)
+ {
- if (ipa_nonzero(nh->gw)) debug(" ->%I", nh->gw);
- if (nh->labels) debug(" L %d", nh->label[0]);
++ if (ipa_nonzero(nh->gw)) RDUMP(" ->%I", nh->gw);
++ if (nh->labels) RDUMP(" L %d", nh->label[0]);
+ for (int i=1; i<nh->labels; i++)
- debug("/%d", nh->label[i]);
- debug(" [%s]", nh->iface ? nh->iface->name : "???");
++ RDUMP("/%d", nh->label[i]);
++ RDUMP(" [%s]", nh->iface ? nh->iface->name : "???");
+ }
}
/**
}
while (e)
{
- RDUMP("[%c%c%c]",
+ struct ea_storage *s = e->stored ? ea_get_storage(e) : NULL;
- debug("[%c%c] overlay=%d uc=%d h=%08x",
++ RDUMP("[%c%c] overlay=%d uc=%d h=%08x",
(e->flags & EALF_SORTED) ? 'S' : 's',
(e->flags & EALF_BISECT) ? 'B' : 'b',
- (e->flags & EALF_CACHED) ? 'C' : 'c');
+ e->stored,
+ s ? atomic_load_explicit(&s->uc, memory_order_relaxed) : 0,
+ s ? s->hash_key : 0);
for(i=0; i<e->count; i++)
{
eattr *a = &e->attrs[i];
- RDUMP(" %02x:%02x.%02x", EA_PROTO(a->id), EA_ID(a->id), a->flags);
- RDUMP("=%c", "?iO?I?P???S?????" [a->type & EAF_TYPE_MASK]);
+ struct ea_class *clp = (a->id < ea_class_max) ? ea_class_global[a->id] : NULL;
+ if (clp)
- debug(" %s", clp->name);
++ RDUMP(" %s", clp->name);
+ else
- debug(" 0x%x", a->id);
++ RDUMP(" 0x%x", a->id);
+
- debug(".%02x", a->flags);
- debug("=%c",
++ RDUMP(".%02x", a->flags);
++ RDUMP("=%c",
+ "?iO?IRP???S??pE?"
+ "??L???N?????????"
+ "?o???r??????????" [a->type]);
if (a->originated)
- debug("o");
+ RDUMP("o");
- if (a->type & EAF_EMBEDDED)
+ if (a->undef)
- debug(":undef");
++ RDUMP(":undef");
+ else if (a->type & EAF_EMBEDDED)
- debug(":%08x", a->u.data);
+ RDUMP(":%08x", a->u.data);
+ else if (a->id == ea_gen_nexthop.id)
- nexthop_dump(a->u.ptr);
++ nexthop_dump(dreq, a->u.ptr);
else
{
int j, len = a->u.ptr->length;
- debug("[%d]:", len);
+ RDUMP("[%d]:", len);
for(j=0; j<len; j++)
- debug("%02x", a->u.ptr->data[j]);
+ RDUMP("%02x", a->u.ptr->data[j]);
}
- debug(" ");
++ RDUMP(" ");
}
if (e = e->next)
- debug(" | ");
+ RDUMP(" | ");
}
}
* to the debug output.
*/
void
- ea_dump_all(void)
-rta_dump_all(struct dump_request *dreq)
++ea_dump_all(struct dump_request *dreq)
{
- debug("Route attribute cache (%d entries, order %d):\n",
- rta *a;
- uint h;
++ RDUMP("Route attribute cache (%d entries, order %d):\n",
+ atomic_load_explicit(&rta_hash_table.count, memory_order_relaxed),
+ atomic_load_explicit(&rta_hash_table.cur, memory_order_relaxed)->order);
- RDUMP("Route attribute cache (%d entries, rehash at %d):\n", rta_cache_count, rta_cache_limit);
- for(h=0; h<rta_cache_size; h++)
- for(a=rta_hash_table[h]; a; a=a->next)
+ SPINHASH_WALK(rta_hash_table, RTAH, a)
{
- debug("%p ", a);
- ea_dump(a->l);
- debug("\n");
+ RDUMP("%p ", a);
- rta_dump(dreq, a);
++ ea_dump(dreq, a->l);
+ RDUMP("\n");
}
- debug("\n");
+ SPINHASH_WALK_END;
+ RDUMP("\n");
}
void
* This functions dumps contents of a &rte to debug output.
*/
void
- rte_dump(struct rte_storage *e)
- {
- debug("(%u) %-1N", NET_TO_INDEX(e->rte.net)->index, e->rte.net);
- debug("ID=%d ", e->rte.id);
- debug("SENDER=%s ", e->rte.sender->req->name);
- debug("PF=%02x ", e->rte.pflags);
- debug("SRC=%uG ", e->rte.src->global_id);
- ea_dump(e->rte.attrs);
- debug("\n");
-rte_dump(struct dump_request *dreq, rte *e)
++rte_dump(struct dump_request *dreq, struct rte_storage *e)
+ {
- net *n = e->net;
- RDUMP("%-1N ", n->n.addr);
- RDUMP("PF=%02x ", e->pflags);
- rta_dump(dreq, e->attrs);
++ RDUMP("(%u) %-1N", NET_TO_INDEX(e->rte.net)->index, e->rte.net);
++ RDUMP("ID=%d ", e->rte.id);
++ RDUMP("SENDER=%s ", e->rte.sender->req->name);
++ RDUMP("PF=%02x ", e->rte.pflags);
++ RDUMP("SRC=%uG ", e->rte.src->global_id);
++ ea_dump(dreq, e->rte.attrs);
+ RDUMP("\n");
}
/**
* This function dumps contents of a given routing table to debug output.
*/
void
- rt_dump(rtable *tab)
-rt_dump(struct dump_request *dreq, rtable *t)
++rt_dump(struct dump_request *dreq, rtable *tab)
{
- RDUMP("Dump of routing table <%s>\n", t->name);
-#ifdef DEBUGGING
- fib_check(&t->fib);
-#endif
- FIB_WALK(&t->fib, net, n)
- {
- rte *e;
- for(e=n->routes; e; e=e->next)
- rte_dump(dreq, e);
- }
- FIB_WALK_END;
+ RT_READ(tab, tp);
+
+ /* Looking at priv.deleted is technically unsafe but we don't care */
- debug("Dump of routing table <%s>%s\n", tab->name, OBSREF_GET(tab->priv.deleted) ? " (deleted)" : "");
++ RDUMP("Dump of routing table <%s>%s\n", tab->name, OBSREF_GET(tab->priv.deleted) ? " (deleted)" : "");
+
+ u32 bs = atomic_load_explicit(&tp->t->routes_block_size, memory_order_relaxed);
+ net *routes = atomic_load_explicit(&tp->t->routes, memory_order_relaxed);
+ for (u32 i = 0; i < bs; i++)
+ NET_READ_WALK_ROUTES(tp, &routes[i], ep, e)
- rte_dump(e);
++ rte_dump(dreq, e);
+
- debug("\n");
+ RDUMP("\n");
}
/**
node *n;
WALK_LIST2(t, n, routing_tables, n)
- rt_dump(t);
+ rt_dump(dreq, t);
-}
-
-static inline void
-rt_schedule_hcu(rtable *tab)
-{
- if (tab->hcu_scheduled)
- return;
- tab->hcu_scheduled = 1;
- ev_schedule(tab->rt_event);
+ WALK_LIST2(t, n, deleted_routing_tables, n)
- rt_dump(t);
++ rt_dump(dreq, t);
}
-static inline void
-rt_schedule_nhu(rtable *tab)
+void
- rt_dump_hooks(rtable *tp)
++rt_dump_hooks(struct dump_request *dreq, rtable *tp)
{
- if (tab->nhu_state == NHU_CLEAN)
- ev_schedule(tab->rt_event);
+ RT_LOCKED(tp, tab)
+ {
- debug("Dump of hooks in routing table <%s>%s\n", tab->name, OBSREF_GET(tab->deleted) ? " (deleted)" : "");
- debug(" nhu_state=%u use_count=%d rt_count=%u\n",
- /* state change:
- * NHU_CLEAN -> NHU_SCHEDULED
- * NHU_RUNNING -> NHU_DIRTY
- */
- tab->nhu_state |= NHU_SCHEDULED;
-}
++ RDUMP("Dump of hooks in routing table <%s>%s\n", tab->name, OBSREF_GET(tab->deleted) ? " (deleted)" : "");
++ RDUMP(" nhu_state=%u use_count=%d rt_count=%u\n",
+ tab->nhu_state, tab->use_count, tab->rt_count);
- debug(" last_rt_change=%t gc_time=%t gc_counter=%d prune_state=%u\n",
++ RDUMP(" last_rt_change=%t gc_time=%t gc_counter=%d prune_state=%u\n",
+ tab->last_rt_change, tab->gc_time, tab->gc_counter, tab->prune_state);
-void
-rt_schedule_prune(rtable *tab)
-{
- if (tab->prune_state == 0)
- ev_schedule(tab->rt_event);
+ struct rt_import_hook *ih;
+ WALK_LIST(ih, tab->imports)
+ {
+ ih->req->dump_req(ih->req);
- debug(" Import hook %p requested by %p: pref=%u"
++ RDUMP(" Import hook %p requested by %p: pref=%u"
+ " last_state_change=%t import_state=%u stopped=%p\n",
+ ih, ih->req, ih->stats.pref,
+ ih->last_state_change, ih->import_state, ih->stopped);
+ }
- /* state change 0->1, 2->3 */
- tab->prune_state |= 1;
-}
+#if 0
+ /* FIXME: I'm very lazy to write this now */
+ WALK_TLIST(lfjour_recipient, r, &tab->journal.recipients)
+ {
+ SKIP_BACK_DECLARE(struct rt_export_hook, eh, recipient, r);
+ eh->req->dump_req(eh->req);
- debug(" Export hook %p requested by %p:"
++ RDUMP(" Export hook %p requested by %p:"
+ " refeed_pending=%u last_state_change=%t export_state=%u\n",
+ eh, eh->req, eh->refeed_pending, eh->last_state_change,
+ atomic_load_explicit(&eh->export_state, memory_order_relaxed));
+ }
+#endif
- debug("\n");
++ RDUMP("\n");
+ }
+}
-static void
-rt_event(void *ptr)
+void
- rt_dump_hooks_all(void)
++rt_dump_hooks_all(struct dump_request *dreq)
{
- rtable *tab = ptr;
-
- rt_lock_table(tab);
+ rtable *t;
+ node *n;
- debug("Dump of all table hooks\n");
- if (tab->hcu_scheduled)
- rt_update_hostcache(tab);
++ RDUMP("Dump of all table hooks\n");
- if (tab->nhu_state)
- rt_next_hop_update(tab);
+ WALK_LIST2(t, n, routing_tables, n)
- rt_dump_hooks(t);
++ rt_dump_hooks(dreq, t);
- if (tab->prune_state)
- rt_prune_table(tab);
+ WALK_LIST2(t, n, deleted_routing_tables, n)
- rt_dump_hooks(t);
++ rt_dump_hooks(dreq, t);
+}
- rt_unlock_table(tab);
+static inline void
+rt_schedule_nhu(struct rtable_private *tab)
+{
+ if (tab->nhu_corked)
+ {
+ if (!(tab->nhu_corked & NHU_SCHEDULED))
+ tab->nhu_corked |= NHU_SCHEDULED;
+ }
+ else if (!(tab->nhu_state & NHU_SCHEDULED))
+ {
+ rt_trace(tab, D_EVENTS, "Scheduling NHU");
+
+ /* state change:
+ * NHU_CLEAN -> NHU_SCHEDULED
+ * NHU_RUNNING -> NHU_DIRTY
+ */
+ if ((tab->nhu_state |= NHU_SCHEDULED) == NHU_SCHEDULED)
+ ev_send_loop(tab->loop, tab->nhu_event);
+ }
}
+void
+rt_schedule_prune(struct rtable_private *tab)
+{
+ /* state change 0->1, 2->3 */
+ tab->prune_state |= 1;
+ ev_send_loop(tab->loop, tab->prune_event);
+}
static void
rt_prune_timer(timer *t)
}
static void
- rt_res_dump(resource *_r, unsigned indent UNUSED)
+ rt_res_dump(struct dump_request *dreq, resource *_r)
{
- rtable *r = (rtable *) _r;
+ SKIP_BACK_DECLARE(struct rtable_private, r, r, _r);
+
- debug("name \"%s\", addr_type=%s, rt_count=%u, use_count=%d\n",
+ RDUMP("name \"%s\", addr_type=%s, rt_count=%u, use_count=%d\n",
r->name, net_label[r->addr_type], r->rt_count, r->use_count);
+
+#if 0
+ /* TODO: rethink this completely */
+ /* TODO: move this to lfjour */
+ char x[32];
+ bsprintf(x, "%%%dspending export %%p\n", indent + 2);
+
+ WALK_TLIST(lfjour_block, n, &r->journal.pending)
+ debug(x, "", n);
+#endif
}
static struct resclass rt_class = {
}
#endif
- node *n;
- WALK_LIST(n, global_free_pages.pages)
- RDUMP(" %p\n", n);
+ void
+ page_dump(struct dump_request *dreq)
+ {
+ #ifdef HAVE_MMAP
+ RDUMP("Hot pages:\n");
- WALK_LIST(n, global_free_pages.empty)
++ struct free_page *fptop = PAGE_STACK_GET;
++ for (struct free_page *fp = fptop; fp; fp = atomic_load_explicit(&fp->next, memory_order_relaxed))
++ RDUMP(" %p\n", fp);
++
++ PAGE_STACK_PUT(fptop);
+
+ RDUMP("Cold pages:\n");
- struct empty_pages *ep = SKIP_BACK(struct empty_pages, n, n);
++
++ LOCK_DOMAIN(resource, empty_pages_domain);
++ for (struct empty_pages *ep = empty_pages; ep; ep = ep->next)
+ {
+ RDUMP(" %p (index)\n", ep);
+ for (uint i=0; i<ep->pos; i++)
+ RDUMP(" %p\n", ep->pages[i]);
+ }
++ UNLOCK_DOMAIN(resource, empty_pages_domain);
+ #endif
+ }
+
void
resource_sys_init(void)
{
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
+#include <sys/mman.h>
#include <sys/time.h>
#include <sys/types.h>
+#include <sys/stat.h>
#include <sys/socket.h>
+ #include <sys/stat.h>
#include <sys/uio.h>
#include <sys/un.h>
#include <poll.h>
{
struct rfile *a = (struct rfile *) r;
- debug("(fd %d)\n", a->fd);
- RDUMP("(FILE *%p)\n", a->f);
++ RDUMP("(fd %d)\n", a->fd);
}
static struct resclass rf_class = {
}
int
-rf_fileno(struct rfile *f)
+rf_same(struct rfile *a, struct rfile *b)
+{
+ rf_stat(a);
+ rf_stat(b);
+
+ return
+ (a->limit == b->limit) &&
+ (a->stat.st_mode == b->stat.st_mode) &&
+ (a->stat.st_dev == b->stat.st_dev) &&
+ (a->stat.st_ino == b->stat.st_ino);
+}
+
+void
+rf_write_crude(struct rfile *r, const char *buf, int sz)
{
- return fileno(f->f);
+ if (r->mapping)
+ memcpy(r->mapping, buf, sz);
+ else
+ write(r->fd, buf, sz);
+}
+
+
+int
+rf_writev(struct rfile *r, struct iovec *iov, int iov_count)
+{
+ off_t size = 0;
+ for (int i = 0; i < iov_count; i++)
+ size += iov[i].iov_len;
+
+ if (r->mapping)
+ {
+ /* Update the pointer */
+ off_t target = atomic_fetch_add_explicit(&r->pos, size, memory_order_relaxed) % r->limit;
+
+ /* Write the line */
+ for (int i = 0; i < iov_count; i++)
+ {
+ /* Take care of wrapping; this should really happen only once */
+ off_t rsz;
+ while ((rsz = r->limit - target) < (off_t) iov[i].iov_len)
+ {
+ memcpy(r->mapping + target, iov[i].iov_base, rsz);
+ iov[i].iov_base += rsz;
+ iov[i].iov_len -= rsz;
+ target = 0;
+ }
+
+ memcpy(r->mapping + target, iov[i].iov_base, iov[i].iov_len);
+ target += iov[i].iov_len;
+ }
+ return 1;
+ }
+ else if (r->limit && (atomic_fetch_add_explicit(&r->pos, size, memory_order_relaxed) + size > r->limit))
+ {
+ atomic_fetch_sub_explicit(&r->pos, size, memory_order_relaxed);
+ return 0;
+ }
+ else
+ {
+ while (size > 0)
+ {
+ /* Try to write */
+ ssize_t e = writev(r->fd, iov, iov_count);
+ if (e < 0)
+ if (errno == EINTR)
+ continue;
+ else
+ return 1; /* FIXME: What should we do when we suddenly can't write? */
+
+ /* It is expected that we always write the whole bunch at once */
+ if (e == size)
+ return 1;
+
+ /* Block split should not happen (we write small enough messages)
+ * but if it happens, let's try to write the rest of the log */
+ size -= e;
+ while (e > 0)
+ {
+ if ((ssize_t) iov[0].iov_len > e)
+ {
+ /* Some bytes are remaining in the first chunk */
+ iov[0].iov_len -= e;
+ iov[0].iov_base += e;
+ break;
+ }
+
+ /* First chunk written completely, get rid of it */
+ e -= iov[0].iov_len;
+ iov++;
+ iov_count--;
+ ASSERT_DIE(iov_count > 0);
+ }
+ }
+
+ return 1;
+ }
}
+ /*
+ * Dumping to files
+ */
+
+ struct dump_request_file {
+ struct dump_request dr;
+ uint pos, max; int fd;
+ uint last_progress_info;
+ char data[0];
+ };
+
+ static void
+ dump_to_file_flush(struct dump_request_file *req)
+ {
+ if (req->fd < 0)
+ return;
+
+ for (uint sent = 0; sent < req->pos; )
+ {
+ int e = write(req->fd, &req->data[sent], req->pos - sent);
+ if (e <= 0)
+ {
+ req->dr.report(&req->dr, 8009, "Failed to write data: %m");
+ close(req->fd);
+ req->fd = -1;
+ return;
+ }
+ sent += e;
+ }
+
+ req->dr.size += req->pos;
+ req->pos = 0;
+
+ for (uint reported = 0; req->dr.size >> req->last_progress_info; req->last_progress_info++)
+ if (!reported++)
+ req->dr.report(&req->dr, -13, "... dumped %lu bytes in %t s",
+ req->dr.size, current_time_now() - req->dr.begin);
+ }
+
+ static void
+ dump_to_file_write(struct dump_request *dr, const char *fmt, ...)
+ {
+ struct dump_request_file *req = SKIP_BACK(struct dump_request_file, dr, dr);
+
+ for (uint phase = 0; (req->fd >= 0) && (phase < 2); phase++)
+ {
+ va_list args;
+ va_start(args, fmt);
+ int i = bvsnprintf(&req->data[req->pos], req->max - req->pos, fmt, args);
+ va_end(args);
+
+ if (i >= 0)
+ {
+ req->pos += i;
+ return;
+ }
+ else
+ dump_to_file_flush(req);
+ }
+
+ bug("Too long dump call");
+ }
+
+ struct dump_request *
+ dump_to_file_init(off_t offset)
+ {
+ ASSERT_DIE(offset + sizeof(struct dump_request_file) + 1024 < (unsigned long) page_size);
+
+ struct dump_request_file *req = alloc_page() + offset;
+ *req = (struct dump_request_file) {
+ .dr = {
+ .write = dump_to_file_write,
+ .begin = current_time_now(),
+ .offset = offset,
+ },
+ .max = page_size - offset - OFFSETOF(struct dump_request_file, data[0]),
+ .fd = -1,
+ };
+
+ return &req->dr;
+ }
+
+ void
+ dump_to_file_run(struct dump_request *dr, const char *file, const char *what, void (*dump)(struct dump_request *))
+ {
+ struct dump_request_file *req = SKIP_BACK(struct dump_request_file, dr, dr);
+ req->fd = open(file, O_CREAT | O_WRONLY | O_TRUNC, S_IRUSR);
+
+ if (req->fd < 0)
+ {
+ dr->report(dr, 8009, "Failed to open file %s: %m", file);
+ goto cleanup;
+ }
+
+ dr->report(dr, -13, "Dumping %s to %s", what, file);
+
+ dump(dr);
+
+ if (req->fd >= 0)
+ {
+ dump_to_file_flush(req);
+ close(req->fd);
+ }
+
+ btime end = current_time_now();
+ dr->report(dr, 13, "Dumped %lu bytes in %t s", dr->size, end - dr->begin);
+
+ cleanup:
+ free_page(((void *) req) - dr->offset);
+ }
+
+ struct dump_request_cli {
+ cli *cli;
+ struct dump_request dr;
+ };
+
+ static void
+ cmd_dump_report(struct dump_request *dr, int state, const char *fmt, ...)
+ {
+ struct dump_request_cli *req = SKIP_BACK(struct dump_request_cli, dr, dr);
+ va_list args;
+ va_start(args, fmt);
+ cli_vprintf(req->cli, state, fmt, args);
+ va_end(args);
+ }
+
+ void
+ cmd_dump_file(struct cli *cli, const char *file, const char *what, void (*dump)(struct dump_request *))
+ {
+ if (cli->restricted)
+ return cli_printf(cli, 8007, "Access denied");
+
+ struct dump_request_cli *req = SKIP_BACK(struct dump_request_cli, dr,
+ dump_to_file_init(OFFSETOF(struct dump_request_cli, dr)));
+
+ req->cli = cli;
+ req->dr.report = cmd_dump_report;
+
+ dump_to_file_run(&req->dr, file, what, dump);
+ }
+
/*
* Time clock
node *n;
sock *s;
- debug("Open sockets:\n");
+ RDUMP("Open sockets:\n");
- WALK_LIST(n, sock_list)
++ dreq->indent += 3;
+ WALK_LIST(n, main_birdloop.sock_list)
{
s = SKIP_BACK(sock, n, n);
- debug("%p ", s);
- sk_dump(&s->r, 3);
+ RDUMP("%p ", s);
+ sk_dump(dreq, &s->r);
}
- debug("\n");
++ dreq->indent -= 3;
+ RDUMP("\n");
}
{
int i;
- log(L_DEBUG "Event log:");
- RDUMP("Event log:");
++ RDUMP("Event log:\n");
for (i = 0; i < EVENT_LOG_LENGTH; i++)
{
struct event_log_entry *en = event_log + (event_log_pos + i) % EVENT_LOG_LENGTH;
if (en->hook)
- log(L_DEBUG " Event 0x%p 0x%p at %8d for %d ms", en->hook, en->data,
- RDUMP(" Event 0x%p 0x%p at %8d for %d ms", en->hook, en->data,
- (int) ((last_time - en->timestamp) TO_MS), (int) (en->duration TO_MS));
++ RDUMP(" Event 0x%p 0x%p at %8d for %d ms\n", en->hook, en->data,
+ (int) ((last_io_time - en->timestamp) TO_MS), (int) (en->duration TO_MS));
}
}
* Debugging
*/
- void
- async_dump(void)
+ static void
+ async_dump_report(struct dump_request *dr UNUSED, int state, const char *fmt, ...)
+ {
+ va_list args;
+ va_start(args, fmt);
+ vlog(((state > 1000) ? L_ERR : L_INFO)[0], fmt, args);
+ va_end(args);
+ }
+
+ static void
+ async_dump_run(struct dump_request *dreq)
{
- debug("INTERNAL STATE DUMP\n\n");
+ RDUMP("ASYNC STATE DUMP\n");
- rdump(&root_pool, 0);
- sk_dump_all();
+ rdump(dreq, &root_pool);
+ sk_dump_all(dreq);
// XXXX tm_dump_all();
- if_dump_all();
- neigh_dump_all();
- ea_dump_all();
- rt_dump_all();
- protos_dump_all();
+ if_dump_all(dreq);
+ neigh_dump_all(dreq);
- rta_dump_all(dreq);
++ ea_dump_all(dreq);
+ rt_dump_all(dreq);
+ protos_dump_all(dreq);
debug("\n");
}
struct iface;
struct birdsock;
struct rfile;
+ struct cli;
+struct config;
/* main.c */
void io_init(void);
void io_loop(void);
- void io_log_dump(void);
+ void io_log_dump(struct dump_request *);
-int sk_open_unix(struct birdsock *s, const char *name);
-struct rfile *rf_open(struct pool *, const char *name, const char *mode);
-struct rfile *rf_fdopen(pool *p, int fd, const char *mode);
-void *rf_file(struct rfile *f);
+int sk_open_unix(struct birdsock *s, struct birdloop *, const char *name);
+
+enum rf_mode {
+ RF_APPEND = 1,
+ RF_FIXED,
+};
+
+struct rfile *rf_open(struct pool *, const char *name, enum rf_mode mode, off_t limit);
+off_t rf_size(struct rfile *);
+int rf_same(struct rfile *, struct rfile *);
+int rf_writev(struct rfile *, struct iovec *, int);
+void rf_write_crude(struct rfile *, const char *, int);
int rf_fileno(struct rfile *f);
+
+extern struct rfile rf_stderr;
+
void test_old_bird(const char *path);
ip_addr resolve_hostname(const char *host, int type, const char **err_msg);