const int rt_default_ecmp = 16;
- /*
- * Structure nl_parse_state keeps state of received route processing. Ideally,
- * we could just independently parse received Netlink messages and immediately
- * propagate received routes to the rest of BIRD, but older Linux kernel (before
- * version 4.11) represents and announces IPv6 ECMP routes not as one route with
- * multiple next hops (like RTA_MULTIPATH in IPv4 ECMP), but as a sequence of
- * routes with the same prefix. More recent kernels work as with IPv4.
- *
- * Therefore, BIRD keeps currently processed route in nl_parse_state structure
- * and postpones its propagation until we expect it to be final; i.e., when
- * non-matching route is received or when the scan ends. When another matching
- * route is received, it is merged with the already processed route to form an
- * ECMP route. Note that merging is done only for IPv6 (merge == 1), but the
- * postponing is done in both cases (for simplicity). All IPv4 routes or IPv6
- * routes with RTA_MULTIPATH set are just considered non-matching.
- *
- * This is ignored for asynchronous notifications (every notification is handled
- * as a separate route). It is not an issue for our routes, as we ignore such
- * notifications anyways. But importing alien IPv6 ECMP routes does not work
- * properly with older kernels.
- *
- * Whatever the kernel version is, IPv6 ECMP routes are sent as multiple routes
- * for the same prefix.
- */
-
struct nl_parse_state
{
+ struct krt_proto *proto;
struct linpool *pool;
int scan;
- int merge;
- net_addr *net;
- ea_list *attrs;
- struct krt_proto *proto;
- s8 new;
- s8 krt_src;
- u8 krt_type;
- u8 krt_proto;
- u32 krt_metric;
-
- u32 rta_flow; /* Used during parsing */
+ u32 rta_flow;
};
+/*
+ * Netlink eattr definitions
+ */
+
+#define KRT_METRICS_MAX ARRAY_SIZE(ea_krt_metrics)
+#define KRT_FEATURES_MAX 4
+
+static void krt_bitfield_format(const eattr *e, byte *buf, uint buflen);
+
+static struct ea_class
+ ea_krt_prefsrc = {
+ .name = "krt_prefsrc",
+ .type = T_IP,
+ },
+ ea_krt_realm = {
+ .name = "krt_realm",
+ .type = T_INT,
+ },
+ ea_krt_scope = {
+ .name = "krt_scope",
+ .type = T_INT,
+ };
+
+static struct ea_class ea_krt_metrics[] = {
+ [RTAX_LOCK] = {
+ .name = "krt_lock",
+ .type = T_INT,
+ .format = krt_bitfield_format,
+ },
+ [RTAX_FEATURES] = {
+ .name = "krt_features",
+ .type = T_INT,
+ .format = krt_bitfield_format,
+ },
+#define KRT_METRIC_INT(_rtax, _name) [_rtax] = { .name = _name, .type = T_INT }
+ KRT_METRIC_INT(RTAX_MTU, "krt_mtu"),
+ KRT_METRIC_INT(RTAX_WINDOW, "krt_window"),
+ KRT_METRIC_INT(RTAX_RTT, "krt_rtt"),
+ KRT_METRIC_INT(RTAX_RTTVAR, "krt_rttvar"),
+ KRT_METRIC_INT(RTAX_SSTHRESH, "krt_sstresh"),
+ KRT_METRIC_INT(RTAX_CWND, "krt_cwnd"),
+ KRT_METRIC_INT(RTAX_ADVMSS, "krt_advmss"),
+ KRT_METRIC_INT(RTAX_REORDERING, "krt_reordering"),
+ KRT_METRIC_INT(RTAX_HOPLIMIT, "krt_hoplimit"),
+ KRT_METRIC_INT(RTAX_INITCWND, "krt_initcwnd"),
+ KRT_METRIC_INT(RTAX_RTO_MIN, "krt_rto_min"),
+ KRT_METRIC_INT(RTAX_INITRWND, "krt_initrwnd"),
+ KRT_METRIC_INT(RTAX_QUICKACK, "krt_quickack"),
+#undef KRT_METRIC_INT
+};
+
+static const char *krt_metrics_names[KRT_METRICS_MAX] = {
+ NULL, "lock", "mtu", "window", "rtt", "rttvar", "sstresh", "cwnd", "advmss",
+ "reordering", "hoplimit", "initcwnd", "features", "rto_min", "initrwnd", "quickack"
+};
+
+static const char *krt_features_names[KRT_FEATURES_MAX] = {
+ "ecn", NULL, NULL, "allfrag"
+};
+
+static void
+krt_bitfield_format(const eattr *a, byte *buf, uint buflen)
+{
+ if (a->id == ea_krt_metrics[RTAX_LOCK].id)
+ ea_format_bitfield(a, buf, buflen, krt_metrics_names, 2, KRT_METRICS_MAX);
+ else if (a->id == ea_krt_metrics[RTAX_FEATURES].id)
+ ea_format_bitfield(a, buf, buflen, krt_features_names, 0, KRT_FEATURES_MAX);
+}
+
+static void
+nl_ea_register(void)
+{
+ EA_REGISTER_ALL(
+ &ea_krt_prefsrc,
+ &ea_krt_realm,
+ &ea_krt_scope
+ );
+
+ for (uint i = 0; i < KRT_METRICS_MAX; i++)
+ {
+ if (!ea_krt_metrics[i].name)
+ ea_krt_metrics[i] = (struct ea_class) {
+ .name = mb_sprintf(&root_pool, "krt_metric_%d", i),
+ .type = T_INT,
+ };
+
+ ea_register_init(&ea_krt_metrics[i]);
+ }
+
+ for (uint i = 1; i < KRT_METRICS_MAX; i++)
+ ASSERT_DIE(ea_krt_metrics[i].id == ea_krt_metrics[0].id + i);
+}
+
+
+
/*
* Synchronous Netlink interface
*/
}
static int
- nl_send_route(struct krt_proto *p, const rte *e, int op, int dest, struct nexthop_adata *nh)
-nl_send_route(struct krt_proto *p, rte *e, int op)
++nl_send_route(struct krt_proto *p, const rte *e, int op)
{
eattr *ea;
- net *net = e->net;
- rta *a = e->attrs;
- ea_list *eattrs = a->eattrs;
- int bufsize = 128 + KRT_METRICS_MAX*8 + nh_bufsize(&(a->nh));
+ ea_list *eattrs = e->attrs;
++ eattr *nhea = ea_find(eattrs, &ea_gen_nexthop);
++ struct nexthop_adata *nh = nhea ? (struct nexthop_adata *) nhea->u.ptr : NULL;
++ int dest = nhea_dest(nhea);
+
+ int bufsize = 128 + KRT_METRICS_MAX*8 + (nh ? nh_bufsize(nh) : 0);
u32 priority = 0;
struct {
/* Default scope is LINK for device routes, UNIVERSE otherwise */
if (p->af == AF_MPLS)
r->r.rtm_scope = RT_SCOPE_UNIVERSE;
- else if (ea = ea_find(eattrs, EA_KRT_SCOPE))
+ else if (ea = ea_find(eattrs, &ea_krt_scope))
r->r.rtm_scope = ea->u.data;
- else if (a->dest == RTD_UNICAST && ipa_zero(a->nh.gw))
++ else if (dest == RTD_UNICAST && ipa_zero(nh->nh.gw))
+ r->r.rtm_scope = RT_SCOPE_LINK;
else
- r->r.rtm_scope = (dest == RTD_UNICAST && ipa_zero(nh->nh.gw)) ? RT_SCOPE_LINK : RT_SCOPE_UNIVERSE;
+ r->r.rtm_scope = RT_SCOPE_UNIVERSE;
- if (ea = ea_find(eattrs, EA_KRT_PREFSRC))
+ if (ea = ea_find(eattrs, &ea_krt_prefsrc))
nl_add_attr_ipa(&r->h, rsize, RTA_PREFSRC, *(ip_addr *)ea->u.ptr->data);
- if (ea = ea_find(eattrs, EA_KRT_REALM))
+ if (ea = ea_find(eattrs, &ea_krt_realm))
nl_add_attr_u32(&r->h, rsize, RTA_FLOW, ea->u.data);
if (metrics[0])
nl_add_metrics(&r->h, rsize, metrics, KRT_METRICS_MAX);
-
- dest:
- switch (a->dest)
+ switch (dest)
{
case RTD_UNICAST:
r->r.rtm_type = RTN_UNICAST;
- if (!NEXTHOP_ONE(nh) && !krt_ecmp6(p))
- struct nexthop *nh = &(a->nh);
- if (nh->next)
++ if (!NEXTHOP_ONE(nh))
nl_add_multipath(&r->h, rsize, nh, p->af, eattrs);
else
{
}
static inline int
- nl_add_rte(struct krt_proto *p, rte *e)
- {
- ea_list *ea = e->attrs;
- int err = 0;
-
- eattr *nhea = ea_find(ea, &ea_gen_nexthop);
- struct nexthop_adata *nhad = nhea ? (struct nexthop_adata *) nhea->u.ptr : NULL;
-
- if (krt_ecmp6(p) && nhad && NEXTHOP_IS_REACHABLE(nhad) && !NEXTHOP_ONE(nhad))
- {
- uint cnt = 0;
- NEXTHOP_WALK(nh, nhad)
- {
- struct {
- struct nexthop_adata nhad;
- u32 labels[MPLS_MAX_LABEL_STACK];
- } nhx;
- memcpy(&nhx.nhad.nh, nh, NEXTHOP_SIZE(nh));
- nhx.nhad.ad.length = (void *) NEXTHOP_NEXT(&nhx.nhad.nh) - (void *) nhx.nhad.ad.data;
-
- if (!cnt++)
- {
- err = nl_send_route(p, e, NL_OP_ADD, RTD_UNICAST, &nhx.nhad);
- if (err < 0)
- return err;
- }
- else
- err += nl_send_route(p, e, NL_OP_APPEND, RTD_UNICAST, &nhx.nhad);
- }
-
- return err;
- }
-
- return nl_send_route(p, e, NL_OP_ADD,
- NEXTHOP_IS_REACHABLE(nhad) ? RTD_UNICAST : nhad->dest, nhad);
- }
-
- static inline int
- nl_delete_rte(struct krt_proto *p, const rte *e)
+ nl_allow_replace(struct krt_proto *p, rte *new)
{
- int err = 0;
+ /*
+ * We use NL_OP_REPLACE for IPv4, it has an issue with not checking for
+ * matching rtm_protocol, but that is OK when dedicated priority is used.
+ *
+ * For IPv6, the NL_OP_REPLACE is still broken even in Linux 4.19 LTS
+ * (although it seems to be fixed in Linux 5.10 LTS) for sequence:
+ *
+ * ip route add 2001:db8::/32 via fe80::1 dev eth0
+ * ip route replace 2001:db8::/32 dev eth0
+ *
+ * (it ends with two routes instead of replacing the first by the second one)
+ *
+ * Replacing with direct and special type (e.g. unreachable) routes does not
+ * work, but replacing with regular routes work reliably
+ */
- /* For IPv6, we just repeatedly request DELETE until we get error */
- do
- err = nl_send_route(p, e, NL_OP_DELETE, RTD_NONE, NULL);
- while (krt_ecmp6(p) && !err);
+ if (krt_ipv4(p))
+ return 1;
- return err;
- }
- rta *a = new->attrs;
- return (a->dest == RTD_UNICAST) && ipa_nonzero(a->nh.gw);
++ eattr *nhea = ea_find(new->attrs, &ea_gen_nexthop);
++ struct nexthop_adata *nh = nhea ? (struct nexthop_adata *) nhea->u.ptr : NULL;
++ int dest = nhea_dest(nhea);
+
- static inline int
- nl_replace_rte(struct krt_proto *p, rte *e)
- {
- eattr *nhea = ea_find(e->attrs, &ea_gen_nexthop);
- struct nexthop_adata *nhad = nhea ? (struct nexthop_adata *) nhea->u.ptr : NULL;
- return nl_send_route(p, e, NL_OP_REPLACE,
- NEXTHOP_IS_REACHABLE(nhad) ? RTD_UNICAST : nhad->dest, nhad);
++ return (dest == RTD_UNICAST) && ipa_nonzero(nh->nh.gw);
}
-
void
-krt_replace_rte(struct krt_proto *p, net *n UNUSED, rte *new, rte *old)
+krt_replace_rte(struct krt_proto *p, const net_addr *n UNUSED, rte *new, const rte *old)
{
int err = 0;
net6_prefix(&src), net6_pxlen(&src));
}
- if (s->net && !nl_mergable_route(s, net, p, priority, i->rtm_type, i->rtm_family))
- nl_announce_route(s);
- net *net = net_get(p->p.main_channel->table, n);
-
- rta *ra = lp_allocz(s->pool, RTA_MAX_SIZE);
- ra->source = RTS_INHERIT;
- ra->scope = SCOPE_UNIVERSE;
-
- {
- ea_list *ea = lp_alloc(s->pool, sizeof(ea_list) + 2 * sizeof(eattr));
- *ea = (ea_list) { .flags = EALF_SORTED, .count = 2 };
- ea->next = ra->eattrs;
- ra->eattrs = ea;
-
- ea->attrs[0] = (eattr) {
- .id = EA_KRT_SOURCE,
- .type = EAF_TYPE_INT,
- .u.data = i->rtm_protocol
- };
--
- ea->attrs[1] = (eattr) {
- .id = EA_KRT_METRIC,
- .type = EAF_TYPE_INT,
- .u.data = priority,
- };
- }
+ ea_list *ra = NULL;
+ ea_set_attr_u32(&ra, &ea_gen_source, 0, RTS_INHERIT);
++ ea_set_attr_u32(&ra, &ea_krt_source, 0, i->rtm_protocol);
++ ea_set_attr_u32(&ra, &ea_krt_metric, 0, priority);
if (a[RTA_FLOW])
s->rta_flow = rta_get_u32(a[RTA_FLOW]);
return;
}
- for (t = 1; t < KRT_METRICS_MAX; t++)
+ for (uint t = 1; t < KRT_METRICS_MAX; t++)
if (metrics[0] & (1 << t))
- {
- ea->attrs[n].id = EA_CODE(PROTOCOL_KERNEL, KRT_METRICS_OFFSET + t);
- ea->attrs[n].flags = 0;
- ea->attrs[n].type = EAF_TYPE_INT; /* FIXME: Some are EAF_TYPE_BITFIELD */
- ea->attrs[n].u.data = metrics[t];
- n++;
- }
-
- if (n > 0)
- {
- ea->next = ra->eattrs;
- ea->flags = EALF_SORTED;
- ea->count = n;
- ra->eattrs = ea;
- }
+ ea_set_attr(&ra,
+ EA_LITERAL_EMBEDDED(&ea_krt_metrics[t], 0, metrics[t]));
}
- /*
- * Ideally, now we would send the received route to the rest of kernel code.
- * But IPv6 ECMP routes before 4.11 are sent as a sequence of routes, so we
- * postpone it and merge next hops until the end of the sequence. Note that
- * when doing merging of next hops, we expect the new route to be unipath.
- * Otherwise, we ignore additional next hops in nexthop_insert().
- */
- rte *e = rte_get_temp(ra, p->p.main_source);
- e->net = net;
++ rte e0 = {
++ .net = net,
++ .attrs = ra,
++ };
- if (!s->net)
- {
- /* Store the new route */
- s->net = lp_alloc(s->pool, net->length);
- net_copy(s->net, net);
-
- ea_set_attr_data(&ra, &ea_gen_nexthop, 0,
- nhad.ad.data, nhad.ad.length);
-
- s->attrs = ra;
- s->proto = p;
- s->new = new;
- s->krt_src = krt_src;
- s->krt_type = i->rtm_type;
- s->krt_proto = i->rtm_protocol;
- s->krt_metric = priority;
- }
+ if (s->scan)
- krt_got_route(p, e, krt_src);
++ krt_got_route(p, &e0, krt_src);
else
- {
- /* Merge next hops with the stored route */
- eattr *nhea = ea_find(s->attrs, &ea_gen_nexthop);
- struct nexthop_adata *nhad_old = nhea ? (struct nexthop_adata *) nhea->u.ptr : NULL;
-
- if (nhad_old)
- ea_set_attr(&s->attrs,
- EA_LITERAL_DIRECT_ADATA(&ea_gen_nexthop, 0,
- &(nexthop_merge(nhad_old, &nhad.nhad,
- KRT_CF->merge_paths, s->pool)->ad)
- ));
- else
- ea_set_attr_data(&s->attrs, &ea_gen_nexthop, 0,
- nhad.ad.data, nhad.ad.length);
- }
- krt_got_route_async(p, e, new, krt_src);
++ krt_got_route_async(p, &e0, new, krt_src);
+
+ lp_flush(s->pool);
}
void