]> git.ipfire.org Git - thirdparty/bird.git/blob - sysdep/linux/netlink.c
Netlink: Use route replace for IPv4
[thirdparty/bird.git] / sysdep / linux / netlink.c
1 /*
2 * BIRD -- Linux Netlink Interface
3 *
4 * (c) 1999--2000 Martin Mares <mj@ucw.cz>
5 *
6 * Can be freely distributed and used under the terms of the GNU GPL.
7 */
8
9 #include <alloca.h>
10 #include <stdio.h>
11 #include <unistd.h>
12 #include <fcntl.h>
13 #include <sys/socket.h>
14 #include <sys/uio.h>
15 #include <errno.h>
16
17 #undef LOCAL_DEBUG
18
19 #include "nest/bird.h"
20 #include "nest/route.h"
21 #include "nest/protocol.h"
22 #include "nest/iface.h"
23 #include "lib/alloca.h"
24 #include "sysdep/unix/unix.h"
25 #include "sysdep/unix/krt.h"
26 #include "lib/socket.h"
27 #include "lib/string.h"
28 #include "lib/hash.h"
29 #include "conf/conf.h"
30
31 #include <asm/types.h>
32 #include <linux/if.h>
33 #include <linux/netlink.h>
34 #include <linux/rtnetlink.h>
35
36 #ifdef HAVE_MPLS_KERNEL
37 #include <linux/lwtunnel.h>
38 #endif
39
40 #ifndef MSG_TRUNC /* Hack: Several versions of glibc miss this one :( */
41 #define MSG_TRUNC 0x20
42 #endif
43
44 #ifndef IFA_FLAGS
45 #define IFA_FLAGS 8
46 #endif
47
48 #ifndef IFF_LOWER_UP
49 #define IFF_LOWER_UP 0x10000
50 #endif
51
52 #ifndef RTA_TABLE
53 #define RTA_TABLE 15
54 #endif
55
56 #ifndef RTA_VIA
57 #define RTA_VIA 18
58 #endif
59
60 #ifndef RTA_NEWDST
61 #define RTA_NEWDST 19
62 #endif
63
64 #ifndef RTA_ENCAP_TYPE
65 #define RTA_ENCAP_TYPE 21
66 #endif
67
68 #ifndef RTA_ENCAP
69 #define RTA_ENCAP 22
70 #endif
71
72 #define krt_ipv4(p) ((p)->af == AF_INET)
73 #define krt_ecmp6(p) ((p)->af == AF_INET6)
74
75 const int rt_default_ecmp = 16;
76
77 /*
78 * Structure nl_parse_state keeps state of received route processing. Ideally,
79 * we could just independently parse received Netlink messages and immediately
80 * propagate received routes to the rest of BIRD, but older Linux kernel (before
81 * version 4.11) represents and announces IPv6 ECMP routes not as one route with
82 * multiple next hops (like RTA_MULTIPATH in IPv4 ECMP), but as a sequence of
83 * routes with the same prefix. More recent kernels work as with IPv4.
84 *
85 * Therefore, BIRD keeps currently processed route in nl_parse_state structure
86 * and postpones its propagation until we expect it to be final; i.e., when
87 * non-matching route is received or when the scan ends. When another matching
88 * route is received, it is merged with the already processed route to form an
89 * ECMP route. Note that merging is done only for IPv6 (merge == 1), but the
90 * postponing is done in both cases (for simplicity). All IPv4 routes or IPv6
91 * routes with RTA_MULTIPATH set are just considered non-matching.
92 *
93 * This is ignored for asynchronous notifications (every notification is handled
94 * as a separate route). It is not an issue for our routes, as we ignore such
95 * notifications anyways. But importing alien IPv6 ECMP routes does not work
96 * properly with older kernels.
97 *
98 * Whatever the kernel version is, IPv6 ECMP routes are sent as multiple routes
99 * for the same prefix.
100 */
101
102 struct nl_parse_state
103 {
104 struct linpool *pool;
105 int scan;
106 int merge;
107
108 net *net;
109 rta *attrs;
110 struct krt_proto *proto;
111 s8 new;
112 s8 krt_src;
113 u8 krt_type;
114 u8 krt_proto;
115 u32 krt_metric;
116 };
117
118 /*
119 * Synchronous Netlink interface
120 */
121
122 struct nl_sock
123 {
124 int fd;
125 u32 seq;
126 byte *rx_buffer; /* Receive buffer */
127 struct nlmsghdr *last_hdr; /* Recently received packet */
128 uint last_size;
129 };
130
131 #define NL_RX_SIZE 8192
132
133 #define NL_OP_DELETE 0
134 #define NL_OP_ADD (NLM_F_CREATE|NLM_F_EXCL)
135 #define NL_OP_REPLACE (NLM_F_CREATE|NLM_F_REPLACE)
136 #define NL_OP_APPEND (NLM_F_CREATE|NLM_F_APPEND)
137
138 static linpool *nl_linpool;
139
140 static struct nl_sock nl_scan = {.fd = -1}; /* Netlink socket for synchronous scan */
141 static struct nl_sock nl_req = {.fd = -1}; /* Netlink socket for requests */
142
143 static void
144 nl_open_sock(struct nl_sock *nl)
145 {
146 if (nl->fd < 0)
147 {
148 nl->fd = socket(PF_NETLINK, SOCK_RAW, NETLINK_ROUTE);
149 if (nl->fd < 0)
150 die("Unable to open rtnetlink socket: %m");
151 nl->seq = (u32) (current_time() TO_S); /* Or perhaps random_u32() ? */
152 nl->rx_buffer = xmalloc(NL_RX_SIZE);
153 nl->last_hdr = NULL;
154 nl->last_size = 0;
155 }
156 }
157
158 static void
159 nl_open(void)
160 {
161 nl_open_sock(&nl_scan);
162 nl_open_sock(&nl_req);
163 }
164
165 static void
166 nl_send(struct nl_sock *nl, struct nlmsghdr *nh)
167 {
168 struct sockaddr_nl sa;
169
170 memset(&sa, 0, sizeof(sa));
171 sa.nl_family = AF_NETLINK;
172 nh->nlmsg_pid = 0;
173 nh->nlmsg_seq = ++(nl->seq);
174 if (sendto(nl->fd, nh, nh->nlmsg_len, 0, (struct sockaddr *)&sa, sizeof(sa)) < 0)
175 die("rtnetlink sendto: %m");
176 nl->last_hdr = NULL;
177 }
178
179 static void
180 nl_request_dump(int af, int cmd)
181 {
182 struct {
183 struct nlmsghdr nh;
184 struct rtgenmsg g;
185 } req = {
186 .nh.nlmsg_type = cmd,
187 .nh.nlmsg_len = sizeof(req),
188 .nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_DUMP,
189 .g.rtgen_family = af
190 };
191 nl_send(&nl_scan, &req.nh);
192 }
193
194 static struct nlmsghdr *
195 nl_get_reply(struct nl_sock *nl)
196 {
197 for(;;)
198 {
199 if (!nl->last_hdr)
200 {
201 struct iovec iov = { nl->rx_buffer, NL_RX_SIZE };
202 struct sockaddr_nl sa;
203 struct msghdr m = {
204 .msg_name = &sa,
205 .msg_namelen = sizeof(sa),
206 .msg_iov = &iov,
207 .msg_iovlen = 1,
208 };
209 int x = recvmsg(nl->fd, &m, 0);
210 if (x < 0)
211 die("nl_get_reply: %m");
212 if (sa.nl_pid) /* It isn't from the kernel */
213 {
214 DBG("Non-kernel packet\n");
215 continue;
216 }
217 nl->last_size = x;
218 nl->last_hdr = (void *) nl->rx_buffer;
219 if (m.msg_flags & MSG_TRUNC)
220 bug("nl_get_reply: got truncated reply which should be impossible");
221 }
222 if (NLMSG_OK(nl->last_hdr, nl->last_size))
223 {
224 struct nlmsghdr *h = nl->last_hdr;
225 nl->last_hdr = NLMSG_NEXT(h, nl->last_size);
226 if (h->nlmsg_seq != nl->seq)
227 {
228 log(L_WARN "nl_get_reply: Ignoring out of sequence netlink packet (%x != %x)",
229 h->nlmsg_seq, nl->seq);
230 continue;
231 }
232 return h;
233 }
234 if (nl->last_size)
235 log(L_WARN "nl_get_reply: Found packet remnant of size %d", nl->last_size);
236 nl->last_hdr = NULL;
237 }
238 }
239
240 static struct tbf rl_netlink_err = TBF_DEFAULT_LOG_LIMITS;
241
242 static int
243 nl_error(struct nlmsghdr *h, int ignore_esrch)
244 {
245 struct nlmsgerr *e;
246 int ec;
247
248 if (h->nlmsg_len < NLMSG_LENGTH(sizeof(struct nlmsgerr)))
249 {
250 log(L_WARN "Netlink: Truncated error message received");
251 return ENOBUFS;
252 }
253 e = (struct nlmsgerr *) NLMSG_DATA(h);
254 ec = -e->error;
255 if (ec && !(ignore_esrch && (ec == ESRCH)))
256 log_rl(&rl_netlink_err, L_WARN "Netlink: %s", strerror(ec));
257 return ec;
258 }
259
260 static struct nlmsghdr *
261 nl_get_scan(void)
262 {
263 struct nlmsghdr *h = nl_get_reply(&nl_scan);
264
265 if (h->nlmsg_type == NLMSG_DONE)
266 return NULL;
267 if (h->nlmsg_type == NLMSG_ERROR)
268 {
269 nl_error(h, 0);
270 return NULL;
271 }
272 return h;
273 }
274
275 static int
276 nl_exchange(struct nlmsghdr *pkt, int ignore_esrch)
277 {
278 struct nlmsghdr *h;
279
280 nl_send(&nl_req, pkt);
281 for(;;)
282 {
283 h = nl_get_reply(&nl_req);
284 if (h->nlmsg_type == NLMSG_ERROR)
285 break;
286 log(L_WARN "nl_exchange: Unexpected reply received");
287 }
288 return nl_error(h, ignore_esrch) ? -1 : 0;
289 }
290
291 /*
292 * Netlink attributes
293 */
294
295 static int nl_attr_len;
296
297 static void *
298 nl_checkin(struct nlmsghdr *h, int lsize)
299 {
300 nl_attr_len = h->nlmsg_len - NLMSG_LENGTH(lsize);
301 if (nl_attr_len < 0)
302 {
303 log(L_ERR "nl_checkin: underrun by %d bytes", -nl_attr_len);
304 return NULL;
305 }
306 return NLMSG_DATA(h);
307 }
308
309 struct nl_want_attrs {
310 u8 defined:1;
311 u8 checksize:1;
312 u8 size;
313 };
314
315
316 #define BIRD_IFLA_MAX (IFLA_WIRELESS+1)
317
318 static struct nl_want_attrs ifla_attr_want[BIRD_IFLA_MAX] = {
319 [IFLA_IFNAME] = { 1, 0, 0 },
320 [IFLA_MTU] = { 1, 1, sizeof(u32) },
321 [IFLA_MASTER] = { 1, 1, sizeof(u32) },
322 [IFLA_WIRELESS] = { 1, 0, 0 },
323 };
324
325
326 #define BIRD_IFA_MAX (IFA_FLAGS+1)
327
328 static struct nl_want_attrs ifa_attr_want4[BIRD_IFA_MAX] = {
329 [IFA_ADDRESS] = { 1, 1, sizeof(ip4_addr) },
330 [IFA_LOCAL] = { 1, 1, sizeof(ip4_addr) },
331 [IFA_BROADCAST] = { 1, 1, sizeof(ip4_addr) },
332 [IFA_FLAGS] = { 1, 1, sizeof(u32) },
333 };
334
335 static struct nl_want_attrs ifa_attr_want6[BIRD_IFA_MAX] = {
336 [IFA_ADDRESS] = { 1, 1, sizeof(ip6_addr) },
337 [IFA_LOCAL] = { 1, 1, sizeof(ip6_addr) },
338 [IFA_FLAGS] = { 1, 1, sizeof(u32) },
339 };
340
341
342 #define BIRD_RTA_MAX (RTA_ENCAP+1)
343
344 static struct nl_want_attrs nexthop_attr_want4[BIRD_RTA_MAX] = {
345 [RTA_GATEWAY] = { 1, 1, sizeof(ip4_addr) },
346 [RTA_ENCAP_TYPE]= { 1, 1, sizeof(u16) },
347 [RTA_ENCAP] = { 1, 0, 0 },
348 };
349
350 static struct nl_want_attrs nexthop_attr_want6[BIRD_RTA_MAX] = {
351 [RTA_GATEWAY] = { 1, 1, sizeof(ip6_addr) },
352 [RTA_ENCAP_TYPE]= { 1, 1, sizeof(u16) },
353 [RTA_ENCAP] = { 1, 0, 0 },
354 };
355
356 #ifdef HAVE_MPLS_KERNEL
357 static struct nl_want_attrs encap_mpls_want[BIRD_RTA_MAX] = {
358 [RTA_DST] = { 1, 0, 0 },
359 };
360 #endif
361
362 static struct nl_want_attrs rtm_attr_want4[BIRD_RTA_MAX] = {
363 [RTA_DST] = { 1, 1, sizeof(ip4_addr) },
364 [RTA_OIF] = { 1, 1, sizeof(u32) },
365 [RTA_GATEWAY] = { 1, 1, sizeof(ip4_addr) },
366 [RTA_PRIORITY] = { 1, 1, sizeof(u32) },
367 [RTA_PREFSRC] = { 1, 1, sizeof(ip4_addr) },
368 [RTA_METRICS] = { 1, 0, 0 },
369 [RTA_MULTIPATH] = { 1, 0, 0 },
370 [RTA_FLOW] = { 1, 1, sizeof(u32) },
371 [RTA_TABLE] = { 1, 1, sizeof(u32) },
372 [RTA_ENCAP_TYPE]= { 1, 1, sizeof(u16) },
373 [RTA_ENCAP] = { 1, 0, 0 },
374 };
375
376 static struct nl_want_attrs rtm_attr_want6[BIRD_RTA_MAX] = {
377 [RTA_DST] = { 1, 1, sizeof(ip6_addr) },
378 [RTA_SRC] = { 1, 1, sizeof(ip6_addr) },
379 [RTA_IIF] = { 1, 1, sizeof(u32) },
380 [RTA_OIF] = { 1, 1, sizeof(u32) },
381 [RTA_GATEWAY] = { 1, 1, sizeof(ip6_addr) },
382 [RTA_PRIORITY] = { 1, 1, sizeof(u32) },
383 [RTA_PREFSRC] = { 1, 1, sizeof(ip6_addr) },
384 [RTA_METRICS] = { 1, 0, 0 },
385 [RTA_MULTIPATH] = { 1, 0, 0 },
386 [RTA_FLOW] = { 1, 1, sizeof(u32) },
387 [RTA_TABLE] = { 1, 1, sizeof(u32) },
388 [RTA_ENCAP_TYPE]= { 1, 1, sizeof(u16) },
389 [RTA_ENCAP] = { 1, 0, 0 },
390 };
391
392 #ifdef HAVE_MPLS_KERNEL
393 static struct nl_want_attrs rtm_attr_want_mpls[BIRD_RTA_MAX] = {
394 [RTA_DST] = { 1, 1, sizeof(u32) },
395 [RTA_IIF] = { 1, 1, sizeof(u32) },
396 [RTA_OIF] = { 1, 1, sizeof(u32) },
397 [RTA_PRIORITY] = { 1, 1, sizeof(u32) },
398 [RTA_METRICS] = { 1, 0, 0 },
399 [RTA_FLOW] = { 1, 1, sizeof(u32) },
400 [RTA_TABLE] = { 1, 1, sizeof(u32) },
401 [RTA_VIA] = { 1, 0, 0 },
402 [RTA_NEWDST] = { 1, 0, 0 },
403 };
404 #endif
405
406
407 static int
408 nl_parse_attrs(struct rtattr *a, struct nl_want_attrs *want, struct rtattr **k, int ksize)
409 {
410 int max = ksize / sizeof(struct rtattr *);
411 bzero(k, ksize);
412
413 for ( ; RTA_OK(a, nl_attr_len); a = RTA_NEXT(a, nl_attr_len))
414 {
415 if ((a->rta_type >= max) || !want[a->rta_type].defined)
416 continue;
417
418 if (want[a->rta_type].checksize && (RTA_PAYLOAD(a) != want[a->rta_type].size))
419 {
420 log(L_ERR "nl_parse_attrs: Malformed attribute received");
421 return 0;
422 }
423
424 k[a->rta_type] = a;
425 }
426
427 if (nl_attr_len)
428 {
429 log(L_ERR "nl_parse_attrs: remnant of size %d", nl_attr_len);
430 return 0;
431 }
432
433 return 1;
434 }
435
436 static inline u16 rta_get_u16(struct rtattr *a)
437 { return *(u16 *) RTA_DATA(a); }
438
439 static inline u32 rta_get_u32(struct rtattr *a)
440 { return *(u32 *) RTA_DATA(a); }
441
442 static inline ip4_addr rta_get_ip4(struct rtattr *a)
443 { return ip4_ntoh(*(ip4_addr *) RTA_DATA(a)); }
444
445 static inline ip6_addr rta_get_ip6(struct rtattr *a)
446 { return ip6_ntoh(*(ip6_addr *) RTA_DATA(a)); }
447
448 static inline ip_addr rta_get_ipa(struct rtattr *a)
449 {
450 if (RTA_PAYLOAD(a) == sizeof(ip4_addr))
451 return ipa_from_ip4(rta_get_ip4(a));
452 else
453 return ipa_from_ip6(rta_get_ip6(a));
454 }
455
456 #ifdef HAVE_MPLS_KERNEL
457 static inline ip_addr rta_get_via(struct rtattr *a)
458 {
459 struct rtvia *v = RTA_DATA(a);
460 switch(v->rtvia_family) {
461 case AF_INET: return ipa_from_ip4(ip4_ntoh(*(ip4_addr *) v->rtvia_addr));
462 case AF_INET6: return ipa_from_ip6(ip6_ntoh(*(ip6_addr *) v->rtvia_addr));
463 }
464 return IPA_NONE;
465 }
466
467 static u32 rta_mpls_stack[MPLS_MAX_LABEL_STACK];
468 static inline int rta_get_mpls(struct rtattr *a, u32 *stack)
469 {
470 if (RTA_PAYLOAD(a) % 4)
471 log(L_WARN "KRT: Strange length of received MPLS stack: %u", RTA_PAYLOAD(a));
472
473 return mpls_get(RTA_DATA(a), RTA_PAYLOAD(a) & ~0x3, stack);
474 }
475 #endif
476
477 struct rtattr *
478 nl_add_attr(struct nlmsghdr *h, uint bufsize, uint code, const void *data, uint dlen)
479 {
480 uint pos = NLMSG_ALIGN(h->nlmsg_len);
481 uint len = RTA_LENGTH(dlen);
482
483 if (pos + len > bufsize)
484 bug("nl_add_attr: packet buffer overflow");
485
486 struct rtattr *a = (struct rtattr *)((char *)h + pos);
487 a->rta_type = code;
488 a->rta_len = len;
489 h->nlmsg_len = pos + len;
490
491 if (dlen > 0)
492 memcpy(RTA_DATA(a), data, dlen);
493
494 return a;
495 }
496
497 static inline struct rtattr *
498 nl_open_attr(struct nlmsghdr *h, uint bufsize, uint code)
499 {
500 return nl_add_attr(h, bufsize, code, NULL, 0);
501 }
502
503 static inline void
504 nl_close_attr(struct nlmsghdr *h, struct rtattr *a)
505 {
506 a->rta_len = (void *)h + NLMSG_ALIGN(h->nlmsg_len) - (void *)a;
507 }
508
509 static inline void
510 nl_add_attr_u16(struct nlmsghdr *h, uint bufsize, int code, u16 data)
511 {
512 nl_add_attr(h, bufsize, code, &data, 2);
513 }
514
515 static inline void
516 nl_add_attr_u32(struct nlmsghdr *h, uint bufsize, int code, u32 data)
517 {
518 nl_add_attr(h, bufsize, code, &data, 4);
519 }
520
521 static inline void
522 nl_add_attr_ip4(struct nlmsghdr *h, uint bufsize, int code, ip4_addr ip4)
523 {
524 ip4 = ip4_hton(ip4);
525 nl_add_attr(h, bufsize, code, &ip4, sizeof(ip4));
526 }
527
528 static inline void
529 nl_add_attr_ip6(struct nlmsghdr *h, uint bufsize, int code, ip6_addr ip6)
530 {
531 ip6 = ip6_hton(ip6);
532 nl_add_attr(h, bufsize, code, &ip6, sizeof(ip6));
533 }
534
535 static inline void
536 nl_add_attr_ipa(struct nlmsghdr *h, uint bufsize, int code, ip_addr ipa)
537 {
538 if (ipa_is_ip4(ipa))
539 nl_add_attr_ip4(h, bufsize, code, ipa_to_ip4(ipa));
540 else
541 nl_add_attr_ip6(h, bufsize, code, ipa_to_ip6(ipa));
542 }
543
544 #ifdef HAVE_MPLS_KERNEL
545 static inline void
546 nl_add_attr_mpls(struct nlmsghdr *h, uint bufsize, int code, int len, u32 *stack)
547 {
548 char buf[len*4];
549 mpls_put(buf, len, stack);
550 nl_add_attr(h, bufsize, code, buf, len*4);
551 }
552
553 static inline void
554 nl_add_attr_mpls_encap(struct nlmsghdr *h, uint bufsize, int len, u32 *stack)
555 {
556 nl_add_attr_u16(h, bufsize, RTA_ENCAP_TYPE, LWTUNNEL_ENCAP_MPLS);
557
558 struct rtattr *nest = nl_open_attr(h, bufsize, RTA_ENCAP);
559 nl_add_attr_mpls(h, bufsize, RTA_DST, len, stack);
560 nl_close_attr(h, nest);
561 }
562
563 static inline void
564 nl_add_attr_via(struct nlmsghdr *h, uint bufsize, ip_addr ipa)
565 {
566 struct rtvia *via = alloca(sizeof(struct rtvia) + 16);
567
568 if (ipa_is_ip4(ipa))
569 {
570 via->rtvia_family = AF_INET;
571 put_ip4(via->rtvia_addr, ipa_to_ip4(ipa));
572 nl_add_attr(h, bufsize, RTA_VIA, via, sizeof(struct rtvia) + 4);
573 }
574 else
575 {
576 via->rtvia_family = AF_INET6;
577 put_ip6(via->rtvia_addr, ipa_to_ip6(ipa));
578 nl_add_attr(h, bufsize, RTA_VIA, via, sizeof(struct rtvia) + 16);
579 }
580 }
581 #endif
582
583 static inline struct rtnexthop *
584 nl_open_nexthop(struct nlmsghdr *h, uint bufsize)
585 {
586 uint pos = NLMSG_ALIGN(h->nlmsg_len);
587 uint len = RTNH_LENGTH(0);
588
589 if (pos + len > bufsize)
590 bug("nl_open_nexthop: packet buffer overflow");
591
592 h->nlmsg_len = pos + len;
593
594 return (void *)h + pos;
595 }
596
597 static inline void
598 nl_close_nexthop(struct nlmsghdr *h, struct rtnexthop *nh)
599 {
600 nh->rtnh_len = (void *)h + NLMSG_ALIGN(h->nlmsg_len) - (void *)nh;
601 }
602
603 static inline void
604 nl_add_nexthop(struct nlmsghdr *h, uint bufsize, struct nexthop *nh, int af UNUSED)
605 {
606 #ifdef HAVE_MPLS_KERNEL
607 if (nh->labels > 0)
608 if (af == AF_MPLS)
609 nl_add_attr_mpls(h, bufsize, RTA_NEWDST, nh->labels, nh->label);
610 else
611 nl_add_attr_mpls_encap(h, bufsize, nh->labels, nh->label);
612
613 if (ipa_nonzero(nh->gw))
614 if (af == AF_MPLS)
615 nl_add_attr_via(h, bufsize, nh->gw);
616 else
617 nl_add_attr_ipa(h, bufsize, RTA_GATEWAY, nh->gw);
618 #else
619
620 if (ipa_nonzero(nh->gw))
621 nl_add_attr_ipa(h, bufsize, RTA_GATEWAY, nh->gw);
622 #endif
623 }
624
625 static void
626 nl_add_multipath(struct nlmsghdr *h, uint bufsize, struct nexthop *nh, int af)
627 {
628 struct rtattr *a = nl_open_attr(h, bufsize, RTA_MULTIPATH);
629
630 for (; nh; nh = nh->next)
631 {
632 struct rtnexthop *rtnh = nl_open_nexthop(h, bufsize);
633
634 rtnh->rtnh_flags = 0;
635 rtnh->rtnh_hops = nh->weight;
636 rtnh->rtnh_ifindex = nh->iface->index;
637
638 nl_add_nexthop(h, bufsize, nh, af);
639
640 if (nh->flags & RNF_ONLINK)
641 rtnh->rtnh_flags |= RTNH_F_ONLINK;
642
643 nl_close_nexthop(h, rtnh);
644 }
645
646 nl_close_attr(h, a);
647 }
648
649 static struct nexthop *
650 nl_parse_multipath(struct nl_parse_state *s, struct krt_proto *p, struct rtattr *ra, int af)
651 {
652 struct rtattr *a[BIRD_RTA_MAX];
653 struct rtnexthop *nh = RTA_DATA(ra);
654 struct nexthop *rv, *first, **last;
655 unsigned len = RTA_PAYLOAD(ra);
656
657 first = NULL;
658 last = &first;
659
660 while (len)
661 {
662 /* Use RTNH_OK(nh,len) ?? */
663 if ((len < sizeof(*nh)) || (len < nh->rtnh_len))
664 return NULL;
665
666 *last = rv = lp_allocz(s->pool, NEXTHOP_MAX_SIZE);
667 last = &(rv->next);
668
669 rv->weight = nh->rtnh_hops;
670 rv->iface = if_find_by_index(nh->rtnh_ifindex);
671 if (!rv->iface)
672 return NULL;
673
674 /* Nonexistent RTNH_PAYLOAD ?? */
675 nl_attr_len = nh->rtnh_len - RTNH_LENGTH(0);
676 switch (af)
677 {
678 case AF_INET:
679 if (!nl_parse_attrs(RTNH_DATA(nh), nexthop_attr_want4, a, sizeof(a)))
680 return NULL;
681 break;
682
683 case AF_INET6:
684 if (!nl_parse_attrs(RTNH_DATA(nh), nexthop_attr_want6, a, sizeof(a)))
685 return NULL;
686 break;
687
688 default:
689 return NULL;
690 }
691
692 if (a[RTA_GATEWAY])
693 {
694 rv->gw = rta_get_ipa(a[RTA_GATEWAY]);
695
696 if (nh->rtnh_flags & RTNH_F_ONLINK)
697 rv->flags |= RNF_ONLINK;
698
699 neighbor *nbr;
700 nbr = neigh_find(&p->p, rv->gw, rv->iface,
701 (rv->flags & RNF_ONLINK) ? NEF_ONLINK : 0);
702 if (!nbr || (nbr->scope == SCOPE_HOST))
703 return NULL;
704 }
705 else
706 rv->gw = IPA_NONE;
707
708 #ifdef HAVE_MPLS_KERNEL
709 if (a[RTA_ENCAP_TYPE])
710 {
711 if (rta_get_u16(a[RTA_ENCAP_TYPE]) != LWTUNNEL_ENCAP_MPLS) {
712 log(L_WARN "KRT: Unknown encapsulation method %d in multipath", rta_get_u16(a[RTA_ENCAP_TYPE]));
713 return NULL;
714 }
715
716 struct rtattr *enca[BIRD_RTA_MAX];
717 nl_attr_len = RTA_PAYLOAD(a[RTA_ENCAP]);
718 nl_parse_attrs(RTA_DATA(a[RTA_ENCAP]), encap_mpls_want, enca, sizeof(enca));
719 rv->labels = rta_get_mpls(enca[RTA_DST], rv->label);
720 break;
721 }
722 #endif
723
724
725 len -= NLMSG_ALIGN(nh->rtnh_len);
726 nh = RTNH_NEXT(nh);
727 }
728
729 /* Ensure nexthops are sorted to satisfy nest invariant */
730 if (!nexthop_is_sorted(first))
731 first = nexthop_sort(first);
732
733 return first;
734 }
735
736 static void
737 nl_add_metrics(struct nlmsghdr *h, uint bufsize, u32 *metrics, int max)
738 {
739 struct rtattr *a = nl_open_attr(h, bufsize, RTA_METRICS);
740 int t;
741
742 for (t = 1; t < max; t++)
743 if (metrics[0] & (1 << t))
744 nl_add_attr_u32(h, bufsize, t, metrics[t]);
745
746 nl_close_attr(h, a);
747 }
748
749 static int
750 nl_parse_metrics(struct rtattr *hdr, u32 *metrics, int max)
751 {
752 struct rtattr *a = RTA_DATA(hdr);
753 int len = RTA_PAYLOAD(hdr);
754
755 metrics[0] = 0;
756 for (; RTA_OK(a, len); a = RTA_NEXT(a, len))
757 {
758 if (a->rta_type == RTA_UNSPEC)
759 continue;
760
761 if (a->rta_type >= max)
762 continue;
763
764 if (RTA_PAYLOAD(a) != 4)
765 return -1;
766
767 metrics[0] |= 1 << a->rta_type;
768 metrics[a->rta_type] = rta_get_u32(a);
769 }
770
771 if (len > 0)
772 return -1;
773
774 return 0;
775 }
776
777
778 /*
779 * Scanning of interfaces
780 */
781
782 static void
783 nl_parse_link(struct nlmsghdr *h, int scan)
784 {
785 struct ifinfomsg *i;
786 struct rtattr *a[BIRD_IFLA_MAX];
787 int new = h->nlmsg_type == RTM_NEWLINK;
788 struct iface f = {};
789 struct iface *ifi;
790 char *name;
791 u32 mtu, master = 0;
792 uint fl;
793
794 if (!(i = nl_checkin(h, sizeof(*i))) || !nl_parse_attrs(IFLA_RTA(i), ifla_attr_want, a, sizeof(a)))
795 return;
796 if (!a[IFLA_IFNAME] || (RTA_PAYLOAD(a[IFLA_IFNAME]) < 2) || !a[IFLA_MTU])
797 {
798 /*
799 * IFLA_IFNAME and IFLA_MTU are required, in fact, but there may also come
800 * a message with IFLA_WIRELESS set, where (e.g.) no IFLA_IFNAME exists.
801 * We simply ignore all such messages with IFLA_WIRELESS without notice.
802 */
803
804 if (a[IFLA_WIRELESS])
805 return;
806
807 log(L_ERR "KIF: Malformed message received");
808 return;
809 }
810
811 name = RTA_DATA(a[IFLA_IFNAME]);
812 mtu = rta_get_u32(a[IFLA_MTU]);
813
814 if (a[IFLA_MASTER])
815 master = rta_get_u32(a[IFLA_MASTER]);
816
817 ifi = if_find_by_index(i->ifi_index);
818 if (!new)
819 {
820 DBG("KIF: IF%d(%s) goes down\n", i->ifi_index, name);
821 if (!ifi)
822 return;
823
824 if_delete(ifi);
825 }
826 else
827 {
828 DBG("KIF: IF%d(%s) goes up (mtu=%d,flg=%x)\n", i->ifi_index, name, mtu, i->ifi_flags);
829 if (ifi && strncmp(ifi->name, name, sizeof(ifi->name)-1))
830 if_delete(ifi);
831
832 strncpy(f.name, name, sizeof(f.name)-1);
833 f.index = i->ifi_index;
834 f.mtu = mtu;
835
836 f.master_index = master;
837 f.master = if_find_by_index(master);
838
839 fl = i->ifi_flags;
840 if (fl & IFF_UP)
841 f.flags |= IF_ADMIN_UP;
842 if (fl & IFF_LOWER_UP)
843 f.flags |= IF_LINK_UP;
844 if (fl & IFF_LOOPBACK) /* Loopback */
845 f.flags |= IF_MULTIACCESS | IF_LOOPBACK | IF_IGNORE;
846 else if (fl & IFF_POINTOPOINT) /* PtP */
847 f.flags |= IF_MULTICAST;
848 else if (fl & IFF_BROADCAST) /* Broadcast */
849 f.flags |= IF_MULTIACCESS | IF_BROADCAST | IF_MULTICAST;
850 else
851 f.flags |= IF_MULTIACCESS; /* NBMA */
852
853 if (fl & IFF_MULTICAST)
854 f.flags |= IF_MULTICAST;
855
856 ifi = if_update(&f);
857
858 if (!scan)
859 if_end_partial_update(ifi);
860 }
861 }
862
863 static void
864 nl_parse_addr4(struct ifaddrmsg *i, int scan, int new)
865 {
866 struct rtattr *a[BIRD_IFA_MAX];
867 struct iface *ifi;
868 u32 ifa_flags;
869 int scope;
870
871 if (!nl_parse_attrs(IFA_RTA(i), ifa_attr_want4, a, sizeof(a)))
872 return;
873
874 if (!a[IFA_LOCAL])
875 {
876 log(L_ERR "KIF: Malformed message received (missing IFA_LOCAL)");
877 return;
878 }
879 if (!a[IFA_ADDRESS])
880 {
881 log(L_ERR "KIF: Malformed message received (missing IFA_ADDRESS)");
882 return;
883 }
884
885 ifi = if_find_by_index(i->ifa_index);
886 if (!ifi)
887 {
888 log(L_ERR "KIF: Received address message for unknown interface %d", i->ifa_index);
889 return;
890 }
891
892 if (a[IFA_FLAGS])
893 ifa_flags = rta_get_u32(a[IFA_FLAGS]);
894 else
895 ifa_flags = i->ifa_flags;
896
897 struct ifa ifa;
898 bzero(&ifa, sizeof(ifa));
899 ifa.iface = ifi;
900 if (ifa_flags & IFA_F_SECONDARY)
901 ifa.flags |= IA_SECONDARY;
902
903 ifa.ip = rta_get_ipa(a[IFA_LOCAL]);
904
905 if (i->ifa_prefixlen > IP4_MAX_PREFIX_LENGTH)
906 {
907 log(L_ERR "KIF: Invalid prefix length for interface %s: %d", ifi->name, i->ifa_prefixlen);
908 new = 0;
909 }
910 if (i->ifa_prefixlen == IP4_MAX_PREFIX_LENGTH)
911 {
912 ifa.brd = rta_get_ipa(a[IFA_ADDRESS]);
913 net_fill_ip4(&ifa.prefix, rta_get_ip4(a[IFA_ADDRESS]), i->ifa_prefixlen);
914
915 /* It is either a host address or a peer address */
916 if (ipa_equal(ifa.ip, ifa.brd))
917 ifa.flags |= IA_HOST;
918 else
919 {
920 ifa.flags |= IA_PEER;
921 ifa.opposite = ifa.brd;
922 }
923 }
924 else
925 {
926 net_fill_ip4(&ifa.prefix, ipa_to_ip4(ifa.ip), i->ifa_prefixlen);
927 net_normalize(&ifa.prefix);
928
929 if (i->ifa_prefixlen == IP4_MAX_PREFIX_LENGTH - 1)
930 ifa.opposite = ipa_opposite_m1(ifa.ip);
931
932 if (i->ifa_prefixlen == IP4_MAX_PREFIX_LENGTH - 2)
933 ifa.opposite = ipa_opposite_m2(ifa.ip);
934
935 if ((ifi->flags & IF_BROADCAST) && a[IFA_BROADCAST])
936 {
937 ip4_addr xbrd = rta_get_ip4(a[IFA_BROADCAST]);
938 ip4_addr ybrd = ip4_or(ipa_to_ip4(ifa.ip), ip4_not(ip4_mkmask(i->ifa_prefixlen)));
939
940 if (ip4_equal(xbrd, net4_prefix(&ifa.prefix)) || ip4_equal(xbrd, ybrd))
941 ifa.brd = ipa_from_ip4(xbrd);
942 else if (ifi->flags & IF_TMP_DOWN) /* Complain only during the first scan */
943 {
944 log(L_ERR "KIF: Invalid broadcast address %I4 for %s", xbrd, ifi->name);
945 ifa.brd = ipa_from_ip4(ybrd);
946 }
947 }
948 }
949
950 scope = ipa_classify(ifa.ip);
951 if (scope < 0)
952 {
953 log(L_ERR "KIF: Invalid interface address %I for %s", ifa.ip, ifi->name);
954 return;
955 }
956 ifa.scope = scope & IADDR_SCOPE_MASK;
957
958 DBG("KIF: IF%d(%s): %s IPA %I, flg %x, net %N, brd %I, opp %I\n",
959 ifi->index, ifi->name,
960 new ? "added" : "removed",
961 ifa.ip, ifa.flags, &ifa.prefix, ifa.brd, ifa.opposite);
962
963 if (new)
964 ifa_update(&ifa);
965 else
966 ifa_delete(&ifa);
967
968 if (!scan)
969 if_end_partial_update(ifi);
970 }
971
972 static void
973 nl_parse_addr6(struct ifaddrmsg *i, int scan, int new)
974 {
975 struct rtattr *a[BIRD_IFA_MAX];
976 struct iface *ifi;
977 u32 ifa_flags;
978 int scope;
979
980 if (!nl_parse_attrs(IFA_RTA(i), ifa_attr_want6, a, sizeof(a)))
981 return;
982
983 if (!a[IFA_ADDRESS])
984 {
985 log(L_ERR "KIF: Malformed message received (missing IFA_ADDRESS)");
986 return;
987 }
988
989 ifi = if_find_by_index(i->ifa_index);
990 if (!ifi)
991 {
992 log(L_ERR "KIF: Received address message for unknown interface %d", i->ifa_index);
993 return;
994 }
995
996 if (a[IFA_FLAGS])
997 ifa_flags = rta_get_u32(a[IFA_FLAGS]);
998 else
999 ifa_flags = i->ifa_flags;
1000
1001 struct ifa ifa;
1002 bzero(&ifa, sizeof(ifa));
1003 ifa.iface = ifi;
1004 if (ifa_flags & IFA_F_SECONDARY)
1005 ifa.flags |= IA_SECONDARY;
1006
1007 /* Ignore tentative addresses silently */
1008 if (ifa_flags & IFA_F_TENTATIVE)
1009 return;
1010
1011 /* IFA_LOCAL can be unset for IPv6 interfaces */
1012 ifa.ip = rta_get_ipa(a[IFA_LOCAL] ? : a[IFA_ADDRESS]);
1013
1014 if (i->ifa_prefixlen > IP6_MAX_PREFIX_LENGTH)
1015 {
1016 log(L_ERR "KIF: Invalid prefix length for interface %s: %d", ifi->name, i->ifa_prefixlen);
1017 new = 0;
1018 }
1019 if (i->ifa_prefixlen == IP6_MAX_PREFIX_LENGTH)
1020 {
1021 ifa.brd = rta_get_ipa(a[IFA_ADDRESS]);
1022 net_fill_ip6(&ifa.prefix, rta_get_ip6(a[IFA_ADDRESS]), i->ifa_prefixlen);
1023
1024 /* It is either a host address or a peer address */
1025 if (ipa_equal(ifa.ip, ifa.brd))
1026 ifa.flags |= IA_HOST;
1027 else
1028 {
1029 ifa.flags |= IA_PEER;
1030 ifa.opposite = ifa.brd;
1031 }
1032 }
1033 else
1034 {
1035 net_fill_ip6(&ifa.prefix, ipa_to_ip6(ifa.ip), i->ifa_prefixlen);
1036 net_normalize(&ifa.prefix);
1037
1038 if (i->ifa_prefixlen == IP6_MAX_PREFIX_LENGTH - 1)
1039 ifa.opposite = ipa_opposite_m1(ifa.ip);
1040 }
1041
1042 scope = ipa_classify(ifa.ip);
1043 if (scope < 0)
1044 {
1045 log(L_ERR "KIF: Invalid interface address %I for %s", ifa.ip, ifi->name);
1046 return;
1047 }
1048 ifa.scope = scope & IADDR_SCOPE_MASK;
1049
1050 DBG("KIF: IF%d(%s): %s IPA %I, flg %x, net %N, brd %I, opp %I\n",
1051 ifi->index, ifi->name,
1052 new ? "added" : "removed",
1053 ifa.ip, ifa.flags, &ifa.prefix, ifa.brd, ifa.opposite);
1054
1055 if (new)
1056 ifa_update(&ifa);
1057 else
1058 ifa_delete(&ifa);
1059
1060 if (!scan)
1061 if_end_partial_update(ifi);
1062 }
1063
1064 static void
1065 nl_parse_addr(struct nlmsghdr *h, int scan)
1066 {
1067 struct ifaddrmsg *i;
1068
1069 if (!(i = nl_checkin(h, sizeof(*i))))
1070 return;
1071
1072 int new = (h->nlmsg_type == RTM_NEWADDR);
1073
1074 switch (i->ifa_family)
1075 {
1076 case AF_INET:
1077 return nl_parse_addr4(i, scan, new);
1078
1079 case AF_INET6:
1080 return nl_parse_addr6(i, scan, new);
1081 }
1082 }
1083
1084 void
1085 kif_do_scan(struct kif_proto *p UNUSED)
1086 {
1087 struct nlmsghdr *h;
1088
1089 if_start_update();
1090
1091 nl_request_dump(AF_UNSPEC, RTM_GETLINK);
1092 while (h = nl_get_scan())
1093 if (h->nlmsg_type == RTM_NEWLINK || h->nlmsg_type == RTM_DELLINK)
1094 nl_parse_link(h, 1);
1095 else
1096 log(L_DEBUG "nl_scan_ifaces: Unknown packet received (type=%d)", h->nlmsg_type);
1097
1098 /* Re-resolve master interface for slaves */
1099 struct iface *i;
1100 WALK_LIST(i, iface_list)
1101 if (i->master_index)
1102 {
1103 struct iface f = {
1104 .flags = i->flags,
1105 .mtu = i->mtu,
1106 .index = i->index,
1107 .master_index = i->master_index,
1108 .master = if_find_by_index(i->master_index)
1109 };
1110
1111 if (f.master != i->master)
1112 {
1113 memcpy(f.name, i->name, sizeof(f.name));
1114 if_update(&f);
1115 }
1116 }
1117
1118 nl_request_dump(AF_INET, RTM_GETADDR);
1119 while (h = nl_get_scan())
1120 if (h->nlmsg_type == RTM_NEWADDR || h->nlmsg_type == RTM_DELADDR)
1121 nl_parse_addr(h, 1);
1122 else
1123 log(L_DEBUG "nl_scan_ifaces: Unknown packet received (type=%d)", h->nlmsg_type);
1124
1125 nl_request_dump(AF_INET6, RTM_GETADDR);
1126 while (h = nl_get_scan())
1127 if (h->nlmsg_type == RTM_NEWADDR || h->nlmsg_type == RTM_DELADDR)
1128 nl_parse_addr(h, 1);
1129 else
1130 log(L_DEBUG "nl_scan_ifaces: Unknown packet received (type=%d)", h->nlmsg_type);
1131
1132 if_end_update();
1133 }
1134
1135 /*
1136 * Routes
1137 */
1138
1139 static inline u32
1140 krt_table_id(struct krt_proto *p)
1141 {
1142 return KRT_CF->sys.table_id;
1143 }
1144
1145 static HASH(struct krt_proto) nl_table_map;
1146
1147 #define RTH_KEY(p) p->af, krt_table_id(p)
1148 #define RTH_NEXT(p) p->sys.hash_next
1149 #define RTH_EQ(a1,i1,a2,i2) a1 == a2 && i1 == i2
1150 #define RTH_FN(a,i) a ^ u32_hash(i)
1151
1152 #define RTH_REHASH rth_rehash
1153 #define RTH_PARAMS /8, *2, 2, 2, 6, 20
1154
1155 HASH_DEFINE_REHASH_FN(RTH, struct krt_proto)
1156
1157 int
1158 krt_capable(rte *e)
1159 {
1160 rta *a = e->attrs;
1161
1162 switch (a->dest)
1163 {
1164 case RTD_UNICAST:
1165 case RTD_BLACKHOLE:
1166 case RTD_UNREACHABLE:
1167 case RTD_PROHIBIT:
1168 return 1;
1169
1170 default:
1171 return 0;
1172 }
1173 }
1174
1175 static inline int
1176 nh_bufsize(struct nexthop *nh)
1177 {
1178 int rv = 0;
1179 for (; nh != NULL; nh = nh->next)
1180 rv += RTNH_LENGTH(RTA_LENGTH(sizeof(ip_addr)));
1181 return rv;
1182 }
1183
1184 static int
1185 nl_send_route(struct krt_proto *p, rte *e, int op, int dest, struct nexthop *nh)
1186 {
1187 eattr *ea;
1188 net *net = e->net;
1189 rta *a = e->attrs;
1190 ea_list *eattrs = a->eattrs;
1191 int bufsize = 128 + KRT_METRICS_MAX*8 + nh_bufsize(&(a->nh));
1192 u32 priority = 0;
1193
1194 struct {
1195 struct nlmsghdr h;
1196 struct rtmsg r;
1197 char buf[0];
1198 } *r;
1199
1200 int rsize = sizeof(*r) + bufsize;
1201 r = alloca(rsize);
1202
1203 DBG("nl_send_route(%N,op=%x)\n", net->n.addr, op);
1204
1205 bzero(&r->h, sizeof(r->h));
1206 bzero(&r->r, sizeof(r->r));
1207 r->h.nlmsg_type = op ? RTM_NEWROUTE : RTM_DELROUTE;
1208 r->h.nlmsg_len = NLMSG_LENGTH(sizeof(struct rtmsg));
1209 r->h.nlmsg_flags = op | NLM_F_REQUEST | NLM_F_ACK;
1210
1211 r->r.rtm_family = p->af;
1212 r->r.rtm_dst_len = net_pxlen(net->n.addr);
1213 r->r.rtm_protocol = RTPROT_BIRD;
1214 r->r.rtm_scope = RT_SCOPE_NOWHERE;
1215 #ifdef HAVE_MPLS_KERNEL
1216 if (p->af == AF_MPLS)
1217 {
1218 /*
1219 * Kernel MPLS code is a bit picky. We must:
1220 * 1) Always set RT_SCOPE_UNIVERSE and RTN_UNICAST (even for RTM_DELROUTE)
1221 * 2) Never use RTA_PRIORITY
1222 */
1223
1224 u32 label = net_mpls(net->n.addr);
1225 nl_add_attr_mpls(&r->h, rsize, RTA_DST, 1, &label);
1226 r->r.rtm_scope = RT_SCOPE_UNIVERSE;
1227 r->r.rtm_type = RTN_UNICAST;
1228 }
1229 else
1230 #endif
1231 {
1232 nl_add_attr_ipa(&r->h, rsize, RTA_DST, net_prefix(net->n.addr));
1233
1234 /* Add source address for IPv6 SADR routes */
1235 if (net->n.addr->type == NET_IP6_SADR)
1236 {
1237 net_addr_ip6_sadr *a = (void *) &net->n.addr;
1238 nl_add_attr_ip6(&r->h, rsize, RTA_SRC, a->src_prefix);
1239 r->r.rtm_src_len = a->src_pxlen;
1240 }
1241 }
1242
1243 /*
1244 * Strange behavior for RTM_DELROUTE:
1245 * 1) rtm_family is ignored in IPv6, works for IPv4
1246 * 2) not setting RTA_PRIORITY is different from setting default value (on IPv6)
1247 * 3) not setting RTA_PRIORITY is equivalent to setting 0, which is wildcard
1248 */
1249
1250 if (krt_table_id(p) < 256)
1251 r->r.rtm_table = krt_table_id(p);
1252 else
1253 nl_add_attr_u32(&r->h, rsize, RTA_TABLE, krt_table_id(p));
1254
1255 if (p->af == AF_MPLS)
1256 priority = 0;
1257 else if (a->source == RTS_DUMMY)
1258 priority = e->u.krt.metric;
1259 else if (KRT_CF->sys.metric)
1260 priority = KRT_CF->sys.metric;
1261 else if ((op != NL_OP_DELETE) && (ea = ea_find(eattrs, EA_KRT_METRIC)))
1262 priority = ea->u.data;
1263
1264 if (priority)
1265 nl_add_attr_u32(&r->h, rsize, RTA_PRIORITY, priority);
1266
1267 /* For route delete, we do not specify remaining route attributes */
1268 if (op == NL_OP_DELETE)
1269 goto dest;
1270
1271 /* Default scope is LINK for device routes, UNIVERSE otherwise */
1272 if (p->af == AF_MPLS)
1273 r->r.rtm_scope = RT_SCOPE_UNIVERSE;
1274 else if (ea = ea_find(eattrs, EA_KRT_SCOPE))
1275 r->r.rtm_scope = ea->u.data;
1276 else
1277 r->r.rtm_scope = (dest == RTD_UNICAST && ipa_zero(nh->gw)) ? RT_SCOPE_LINK : RT_SCOPE_UNIVERSE;
1278
1279 if (ea = ea_find(eattrs, EA_KRT_PREFSRC))
1280 nl_add_attr_ipa(&r->h, rsize, RTA_PREFSRC, *(ip_addr *)ea->u.ptr->data);
1281
1282 if (ea = ea_find(eattrs, EA_KRT_REALM))
1283 nl_add_attr_u32(&r->h, rsize, RTA_FLOW, ea->u.data);
1284
1285
1286 u32 metrics[KRT_METRICS_MAX];
1287 metrics[0] = 0;
1288
1289 struct ea_walk_state ews = { .eattrs = eattrs };
1290 while (ea = ea_walk(&ews, EA_KRT_METRICS, KRT_METRICS_MAX))
1291 {
1292 int id = ea->id - EA_KRT_METRICS;
1293 metrics[0] |= 1 << id;
1294 metrics[id] = ea->u.data;
1295 }
1296
1297 if (metrics[0])
1298 nl_add_metrics(&r->h, rsize, metrics, KRT_METRICS_MAX);
1299
1300
1301 dest:
1302 switch (dest)
1303 {
1304 case RTD_UNICAST:
1305 r->r.rtm_type = RTN_UNICAST;
1306 if (nh->next && !krt_ecmp6(p))
1307 nl_add_multipath(&r->h, rsize, nh, p->af);
1308 else
1309 {
1310 nl_add_attr_u32(&r->h, rsize, RTA_OIF, nh->iface->index);
1311 nl_add_nexthop(&r->h, rsize, nh, p->af);
1312
1313 if (nh->flags & RNF_ONLINK)
1314 r->r.rtm_flags |= RTNH_F_ONLINK;
1315 }
1316 break;
1317 case RTD_BLACKHOLE:
1318 r->r.rtm_type = RTN_BLACKHOLE;
1319 break;
1320 case RTD_UNREACHABLE:
1321 r->r.rtm_type = RTN_UNREACHABLE;
1322 break;
1323 case RTD_PROHIBIT:
1324 r->r.rtm_type = RTN_PROHIBIT;
1325 break;
1326 case RTD_NONE:
1327 break;
1328 default:
1329 bug("krt_capable inconsistent with nl_send_route");
1330 }
1331
1332 /* Ignore missing for DELETE */
1333 return nl_exchange(&r->h, (op == NL_OP_DELETE));
1334 }
1335
1336 static inline int
1337 nl_add_rte(struct krt_proto *p, rte *e)
1338 {
1339 rta *a = e->attrs;
1340 int err = 0;
1341
1342 if (krt_ecmp6(p) && a->nh.next)
1343 {
1344 struct nexthop *nh = &(a->nh);
1345
1346 err = nl_send_route(p, e, NL_OP_ADD, RTD_UNICAST, nh);
1347 if (err < 0)
1348 return err;
1349
1350 for (nh = nh->next; nh; nh = nh->next)
1351 err += nl_send_route(p, e, NL_OP_APPEND, RTD_UNICAST, nh);
1352
1353 return err;
1354 }
1355
1356 return nl_send_route(p, e, NL_OP_ADD, a->dest, &(a->nh));
1357 }
1358
1359 static inline int
1360 nl_delete_rte(struct krt_proto *p, rte *e)
1361 {
1362 int err = 0;
1363
1364 /* For IPv6, we just repeatedly request DELETE until we get error */
1365 do
1366 err = nl_send_route(p, e, NL_OP_DELETE, RTD_NONE, NULL);
1367 while (krt_ecmp6(p) && !err);
1368
1369 return err;
1370 }
1371
1372 static inline int
1373 nl_replace_rte(struct krt_proto *p, rte *e)
1374 {
1375 rta *a = e->attrs;
1376 return nl_send_route(p, e, NL_OP_REPLACE, a->dest, &(a->nh));
1377 }
1378
1379
1380 void
1381 krt_replace_rte(struct krt_proto *p, net *n, rte *new, rte *old)
1382 {
1383 int err = 0;
1384
1385 /*
1386 * We use NL_OP_REPLACE for IPv4, it has an issue with not checking for
1387 * matching rtm_protocol, but that is OK when dedicated priority is used.
1388 *
1389 * We do not use NL_OP_REPLACE for IPv6, as it has broken semantics for ECMP
1390 * and with some kernel versions ECMP replace crashes kernel. Would need more
1391 * testing and checks for kernel versions.
1392 *
1393 * For IPv6, we use NL_OP_DELETE and then NL_OP_ADD. We also do not trust the
1394 * old route value, so we do not try to optimize IPv6 ECMP reconfigurations.
1395 */
1396
1397 if (krt_ipv4(p) && old && new)
1398 {
1399 err = nl_replace_rte(p, new);
1400 }
1401 else
1402 {
1403 if (old)
1404 nl_delete_rte(p, old);
1405
1406 if (new)
1407 err = nl_add_rte(p, new);
1408 }
1409
1410 if (err < 0)
1411 n->n.flags |= KRF_SYNC_ERROR;
1412 else
1413 n->n.flags &= ~KRF_SYNC_ERROR;
1414 }
1415
1416 static int
1417 nl_mergable_route(struct nl_parse_state *s, net *net, struct krt_proto *p, uint priority, uint krt_type, uint rtm_family)
1418 {
1419 /* Route merging is used for IPv6 scans */
1420 if (!s->scan || (rtm_family != AF_INET6))
1421 return 0;
1422
1423 /* Saved and new route must have same network, proto/table, and priority */
1424 if ((s->net != net) || (s->proto != p) || (s->krt_metric != priority))
1425 return 0;
1426
1427 /* Both must be regular unicast routes */
1428 if ((s->krt_type != RTN_UNICAST) || (krt_type != RTN_UNICAST))
1429 return 0;
1430
1431 return 1;
1432 }
1433
1434 static void
1435 nl_announce_route(struct nl_parse_state *s)
1436 {
1437 rte *e = rte_get_temp(s->attrs);
1438 e->net = s->net;
1439 e->u.krt.src = s->krt_src;
1440 e->u.krt.proto = s->krt_proto;
1441 e->u.krt.seen = 0;
1442 e->u.krt.best = 0;
1443 e->u.krt.metric = s->krt_metric;
1444
1445 if (s->scan)
1446 krt_got_route(s->proto, e);
1447 else
1448 krt_got_route_async(s->proto, e, s->new);
1449
1450 s->net = NULL;
1451 s->attrs = NULL;
1452 s->proto = NULL;
1453 lp_flush(s->pool);
1454 }
1455
1456 static inline void
1457 nl_parse_begin(struct nl_parse_state *s, int scan)
1458 {
1459 memset(s, 0, sizeof (struct nl_parse_state));
1460 s->pool = nl_linpool;
1461 s->scan = scan;
1462 }
1463
1464 static inline void
1465 nl_parse_end(struct nl_parse_state *s)
1466 {
1467 if (s->net)
1468 nl_announce_route(s);
1469 }
1470
1471
1472 #define SKIP(ARG...) do { DBG("KRT: Ignoring route - " ARG); return; } while(0)
1473
1474 static void
1475 nl_parse_route(struct nl_parse_state *s, struct nlmsghdr *h)
1476 {
1477 struct krt_proto *p;
1478 struct rtmsg *i;
1479 struct rtattr *a[BIRD_RTA_MAX];
1480 int new = h->nlmsg_type == RTM_NEWROUTE;
1481
1482 net_addr dst, src = {};
1483 u32 oif = ~0;
1484 u32 table_id;
1485 u32 priority = 0;
1486 u32 def_scope = RT_SCOPE_UNIVERSE;
1487 int krt_src;
1488
1489 if (!(i = nl_checkin(h, sizeof(*i))))
1490 return;
1491
1492 switch (i->rtm_family)
1493 {
1494 case AF_INET:
1495 if (!nl_parse_attrs(RTM_RTA(i), rtm_attr_want4, a, sizeof(a)))
1496 return;
1497
1498 if (a[RTA_DST])
1499 net_fill_ip4(&dst, rta_get_ip4(a[RTA_DST]), i->rtm_dst_len);
1500 else
1501 net_fill_ip4(&dst, IP4_NONE, 0);
1502 break;
1503
1504 case AF_INET6:
1505 if (!nl_parse_attrs(RTM_RTA(i), rtm_attr_want6, a, sizeof(a)))
1506 return;
1507
1508 if (a[RTA_DST])
1509 net_fill_ip6(&dst, rta_get_ip6(a[RTA_DST]), i->rtm_dst_len);
1510 else
1511 net_fill_ip6(&dst, IP6_NONE, 0);
1512
1513 if (a[RTA_SRC])
1514 net_fill_ip6(&src, rta_get_ip6(a[RTA_SRC]), i->rtm_src_len);
1515 else
1516 net_fill_ip6(&src, IP6_NONE, 0);
1517 break;
1518
1519 #ifdef HAVE_MPLS_KERNEL
1520 case AF_MPLS:
1521 if (!nl_parse_attrs(RTM_RTA(i), rtm_attr_want_mpls, a, sizeof(a)))
1522 return;
1523
1524 if (!a[RTA_DST])
1525 SKIP("MPLS route without RTA_DST");
1526
1527 if (rta_get_mpls(a[RTA_DST], rta_mpls_stack) != 1)
1528 SKIP("MPLS route with multi-label RTA_DST");
1529
1530 net_fill_mpls(&dst, rta_mpls_stack[0]);
1531 break;
1532 #endif
1533
1534 default:
1535 return;
1536 }
1537
1538 if (a[RTA_OIF])
1539 oif = rta_get_u32(a[RTA_OIF]);
1540
1541 if (a[RTA_TABLE])
1542 table_id = rta_get_u32(a[RTA_TABLE]);
1543 else
1544 table_id = i->rtm_table;
1545
1546 /* Do we know this table? */
1547 p = HASH_FIND(nl_table_map, RTH, i->rtm_family, table_id);
1548 if (!p)
1549 SKIP("unknown table %u\n", table_id);
1550
1551 if (a[RTA_SRC] && (p->p.net_type != NET_IP6_SADR))
1552 SKIP("src prefix for non-SADR channel\n");
1553
1554 if (a[RTA_IIF])
1555 SKIP("IIF set\n");
1556
1557 if (i->rtm_tos != 0) /* We don't support TOS */
1558 SKIP("TOS %02x\n", i->rtm_tos);
1559
1560 if (s->scan && !new)
1561 SKIP("RTM_DELROUTE in scan\n");
1562
1563 if (a[RTA_PRIORITY])
1564 priority = rta_get_u32(a[RTA_PRIORITY]);
1565
1566 int c = net_classify(&dst);
1567 if ((c < 0) || !(c & IADDR_HOST) || ((c & IADDR_SCOPE_MASK) <= SCOPE_LINK))
1568 SKIP("strange class/scope\n");
1569
1570 switch (i->rtm_protocol)
1571 {
1572 case RTPROT_UNSPEC:
1573 SKIP("proto unspec\n");
1574
1575 case RTPROT_REDIRECT:
1576 krt_src = KRT_SRC_REDIRECT;
1577 break;
1578
1579 case RTPROT_KERNEL:
1580 krt_src = KRT_SRC_KERNEL;
1581 return;
1582
1583 case RTPROT_BIRD:
1584 if (!s->scan)
1585 SKIP("echo\n");
1586 krt_src = KRT_SRC_BIRD;
1587 break;
1588
1589 case RTPROT_BOOT:
1590 default:
1591 krt_src = KRT_SRC_ALIEN;
1592 }
1593
1594 net_addr *n = &dst;
1595 if (p->p.net_type == NET_IP6_SADR)
1596 {
1597 n = alloca(sizeof(net_addr_ip6_sadr));
1598 net_fill_ip6_sadr(n, net6_prefix(&dst), net6_pxlen(&dst),
1599 net6_prefix(&src), net6_pxlen(&src));
1600 }
1601
1602 net *net = net_get(p->p.main_channel->table, n);
1603
1604 if (s->net && !nl_mergable_route(s, net, p, priority, i->rtm_type, i->rtm_family))
1605 nl_announce_route(s);
1606
1607 rta *ra = lp_allocz(s->pool, RTA_MAX_SIZE);
1608 ra->src = p->p.main_source;
1609 ra->source = RTS_INHERIT;
1610 ra->scope = SCOPE_UNIVERSE;
1611
1612 switch (i->rtm_type)
1613 {
1614 case RTN_UNICAST:
1615 ra->dest = RTD_UNICAST;
1616
1617 if (a[RTA_MULTIPATH])
1618 {
1619 struct nexthop *nh = nl_parse_multipath(s, p, a[RTA_MULTIPATH], i->rtm_family);
1620 if (!nh)
1621 {
1622 log(L_ERR "KRT: Received strange multipath route %N", net->n.addr);
1623 return;
1624 }
1625
1626 ra->nh = *nh;
1627 break;
1628 }
1629
1630 ra->nh.iface = if_find_by_index(oif);
1631 if (!ra->nh.iface)
1632 {
1633 log(L_ERR "KRT: Received route %N with unknown ifindex %u", net->n.addr, oif);
1634 return;
1635 }
1636
1637 if ((i->rtm_family != AF_MPLS) && a[RTA_GATEWAY]
1638 #ifdef HAVE_MPLS_KERNEL
1639 || (i->rtm_family == AF_MPLS) && a[RTA_VIA]
1640 #endif
1641 )
1642 {
1643 #ifdef HAVE_MPLS_KERNEL
1644 if (i->rtm_family == AF_MPLS)
1645 ra->nh.gw = rta_get_via(a[RTA_VIA]);
1646 else
1647 #endif
1648 ra->nh.gw = rta_get_ipa(a[RTA_GATEWAY]);
1649
1650 /* Silently skip strange 6to4 routes */
1651 const net_addr_ip6 sit = NET_ADDR_IP6(IP6_NONE, 96);
1652 if ((i->rtm_family == AF_INET6) && ipa_in_netX(ra->nh.gw, (net_addr *) &sit))
1653 return;
1654
1655 if (i->rtm_flags & RTNH_F_ONLINK)
1656 ra->nh.flags |= RNF_ONLINK;
1657
1658 neighbor *nbr;
1659 nbr = neigh_find(&p->p, ra->nh.gw, ra->nh.iface,
1660 (ra->nh.flags & RNF_ONLINK) ? NEF_ONLINK : 0);
1661 if (!nbr || (nbr->scope == SCOPE_HOST))
1662 {
1663 log(L_ERR "KRT: Received route %N with strange next-hop %I", net->n.addr,
1664 ra->nh.gw);
1665 return;
1666 }
1667 }
1668
1669 break;
1670 case RTN_BLACKHOLE:
1671 ra->dest = RTD_BLACKHOLE;
1672 break;
1673 case RTN_UNREACHABLE:
1674 ra->dest = RTD_UNREACHABLE;
1675 break;
1676 case RTN_PROHIBIT:
1677 ra->dest = RTD_PROHIBIT;
1678 break;
1679 /* FIXME: What about RTN_THROW? */
1680 default:
1681 SKIP("type %d\n", i->rtm_type);
1682 return;
1683 }
1684
1685 #ifdef HAVE_MPLS_KERNEL
1686 int labels = 0;
1687 if ((i->rtm_family == AF_MPLS) && a[RTA_NEWDST] && !ra->nh.next)
1688 labels = rta_get_mpls(a[RTA_NEWDST], ra->nh.label);
1689
1690 if (a[RTA_ENCAP] && a[RTA_ENCAP_TYPE] && !ra->nh.next)
1691 {
1692 switch (rta_get_u16(a[RTA_ENCAP_TYPE]))
1693 {
1694 case LWTUNNEL_ENCAP_MPLS:
1695 {
1696 struct rtattr *enca[BIRD_RTA_MAX];
1697 nl_attr_len = RTA_PAYLOAD(a[RTA_ENCAP]);
1698 nl_parse_attrs(RTA_DATA(a[RTA_ENCAP]), encap_mpls_want, enca, sizeof(enca));
1699 labels = rta_get_mpls(enca[RTA_DST], ra->nh.label);
1700 break;
1701 }
1702 default:
1703 SKIP("unknown encapsulation method %d\n", rta_get_u16(a[RTA_ENCAP_TYPE]));
1704 break;
1705 }
1706 }
1707
1708 if (labels < 0)
1709 {
1710 log(L_WARN "KRT: Too long MPLS stack received, ignoring.");
1711 ra->nh.labels = 0;
1712 }
1713 else
1714 ra->nh.labels = labels;
1715 #endif
1716
1717 if (i->rtm_scope != def_scope)
1718 {
1719 ea_list *ea = lp_alloc(s->pool, sizeof(ea_list) + sizeof(eattr));
1720 ea->next = ra->eattrs;
1721 ra->eattrs = ea;
1722 ea->flags = EALF_SORTED;
1723 ea->count = 1;
1724 ea->attrs[0].id = EA_KRT_SCOPE;
1725 ea->attrs[0].flags = 0;
1726 ea->attrs[0].type = EAF_TYPE_INT;
1727 ea->attrs[0].u.data = i->rtm_scope;
1728 }
1729
1730 if (a[RTA_PREFSRC])
1731 {
1732 ip_addr ps = rta_get_ipa(a[RTA_PREFSRC]);
1733
1734 ea_list *ea = lp_alloc(s->pool, sizeof(ea_list) + sizeof(eattr));
1735 ea->next = ra->eattrs;
1736 ra->eattrs = ea;
1737 ea->flags = EALF_SORTED;
1738 ea->count = 1;
1739 ea->attrs[0].id = EA_KRT_PREFSRC;
1740 ea->attrs[0].flags = 0;
1741 ea->attrs[0].type = EAF_TYPE_IP_ADDRESS;
1742 ea->attrs[0].u.ptr = lp_alloc(s->pool, sizeof(struct adata) + sizeof(ps));
1743 ea->attrs[0].u.ptr->length = sizeof(ps);
1744 memcpy(ea->attrs[0].u.ptr->data, &ps, sizeof(ps));
1745 }
1746
1747 if (a[RTA_FLOW])
1748 {
1749 ea_list *ea = lp_alloc(s->pool, sizeof(ea_list) + sizeof(eattr));
1750 ea->next = ra->eattrs;
1751 ra->eattrs = ea;
1752 ea->flags = EALF_SORTED;
1753 ea->count = 1;
1754 ea->attrs[0].id = EA_KRT_REALM;
1755 ea->attrs[0].flags = 0;
1756 ea->attrs[0].type = EAF_TYPE_INT;
1757 ea->attrs[0].u.data = rta_get_u32(a[RTA_FLOW]);
1758 }
1759
1760 if (a[RTA_METRICS])
1761 {
1762 u32 metrics[KRT_METRICS_MAX];
1763 ea_list *ea = lp_alloc(s->pool, sizeof(ea_list) + KRT_METRICS_MAX * sizeof(eattr));
1764 int t, n = 0;
1765
1766 if (nl_parse_metrics(a[RTA_METRICS], metrics, ARRAY_SIZE(metrics)) < 0)
1767 {
1768 log(L_ERR "KRT: Received route %N with strange RTA_METRICS attribute", net->n.addr);
1769 return;
1770 }
1771
1772 for (t = 1; t < KRT_METRICS_MAX; t++)
1773 if (metrics[0] & (1 << t))
1774 {
1775 ea->attrs[n].id = EA_CODE(PROTOCOL_KERNEL, KRT_METRICS_OFFSET + t);
1776 ea->attrs[n].flags = 0;
1777 ea->attrs[n].type = EAF_TYPE_INT; /* FIXME: Some are EAF_TYPE_BITFIELD */
1778 ea->attrs[n].u.data = metrics[t];
1779 n++;
1780 }
1781
1782 if (n > 0)
1783 {
1784 ea->next = ra->eattrs;
1785 ea->flags = EALF_SORTED;
1786 ea->count = n;
1787 ra->eattrs = ea;
1788 }
1789 }
1790
1791 /*
1792 * Ideally, now we would send the received route to the rest of kernel code.
1793 * But IPv6 ECMP routes before 4.11 are sent as a sequence of routes, so we
1794 * postpone it and merge next hops until the end of the sequence. Note that
1795 * when doing merging of next hops, we expect the new route to be unipath.
1796 * Otherwise, we ignore additional next hops in nexthop_insert().
1797 */
1798
1799 if (!s->net)
1800 {
1801 /* Store the new route */
1802 s->net = net;
1803 s->attrs = ra;
1804 s->proto = p;
1805 s->new = new;
1806 s->krt_src = krt_src;
1807 s->krt_type = i->rtm_type;
1808 s->krt_proto = i->rtm_protocol;
1809 s->krt_metric = priority;
1810 }
1811 else
1812 {
1813 /* Merge next hops with the stored route */
1814 rta *oa = s->attrs;
1815
1816 struct nexthop *nhs = &oa->nh;
1817 nexthop_insert(&nhs, &ra->nh);
1818
1819 /* Perhaps new nexthop is inserted at the first position */
1820 if (nhs == &ra->nh)
1821 {
1822 /* Swap rtas */
1823 s->attrs = ra;
1824
1825 /* Keep old eattrs */
1826 ra->eattrs = oa->eattrs;
1827 }
1828 }
1829 }
1830
1831 void
1832 krt_do_scan(struct krt_proto *p UNUSED) /* CONFIG_ALL_TABLES_AT_ONCE => p is NULL */
1833 {
1834 struct nlmsghdr *h;
1835 struct nl_parse_state s;
1836
1837 nl_parse_begin(&s, 1);
1838 nl_request_dump(AF_UNSPEC, RTM_GETROUTE);
1839 while (h = nl_get_scan())
1840 if (h->nlmsg_type == RTM_NEWROUTE || h->nlmsg_type == RTM_DELROUTE)
1841 nl_parse_route(&s, h);
1842 else
1843 log(L_DEBUG "nl_scan_fire: Unknown packet received (type=%d)", h->nlmsg_type);
1844 nl_parse_end(&s);
1845 }
1846
1847 /*
1848 * Asynchronous Netlink interface
1849 */
1850
1851 static sock *nl_async_sk; /* BIRD socket for asynchronous notifications */
1852 static byte *nl_async_rx_buffer; /* Receive buffer */
1853
1854 static void
1855 nl_async_msg(struct nlmsghdr *h)
1856 {
1857 struct nl_parse_state s;
1858
1859 switch (h->nlmsg_type)
1860 {
1861 case RTM_NEWROUTE:
1862 case RTM_DELROUTE:
1863 DBG("KRT: Received async route notification (%d)\n", h->nlmsg_type);
1864 nl_parse_begin(&s, 0);
1865 nl_parse_route(&s, h);
1866 nl_parse_end(&s);
1867 break;
1868 case RTM_NEWLINK:
1869 case RTM_DELLINK:
1870 DBG("KRT: Received async link notification (%d)\n", h->nlmsg_type);
1871 if (kif_proto)
1872 nl_parse_link(h, 0);
1873 break;
1874 case RTM_NEWADDR:
1875 case RTM_DELADDR:
1876 DBG("KRT: Received async address notification (%d)\n", h->nlmsg_type);
1877 if (kif_proto)
1878 nl_parse_addr(h, 0);
1879 break;
1880 default:
1881 DBG("KRT: Received unknown async notification (%d)\n", h->nlmsg_type);
1882 }
1883 }
1884
1885 static int
1886 nl_async_hook(sock *sk, uint size UNUSED)
1887 {
1888 struct iovec iov = { nl_async_rx_buffer, NL_RX_SIZE };
1889 struct sockaddr_nl sa;
1890 struct msghdr m = {
1891 .msg_name = &sa,
1892 .msg_namelen = sizeof(sa),
1893 .msg_iov = &iov,
1894 .msg_iovlen = 1,
1895 };
1896 struct nlmsghdr *h;
1897 int x;
1898 uint len;
1899
1900 x = recvmsg(sk->fd, &m, 0);
1901 if (x < 0)
1902 {
1903 if (errno == ENOBUFS)
1904 {
1905 /*
1906 * Netlink reports some packets have been thrown away.
1907 * One day we might react to it by asking for route table
1908 * scan in near future.
1909 */
1910 log(L_WARN "Kernel dropped some netlink messages, will resync on next scan.");
1911 return 1; /* More data are likely to be ready */
1912 }
1913 else if (errno != EWOULDBLOCK)
1914 log(L_ERR "Netlink recvmsg: %m");
1915 return 0;
1916 }
1917 if (sa.nl_pid) /* It isn't from the kernel */
1918 {
1919 DBG("Non-kernel packet\n");
1920 return 1;
1921 }
1922 h = (void *) nl_async_rx_buffer;
1923 len = x;
1924 if (m.msg_flags & MSG_TRUNC)
1925 {
1926 log(L_WARN "Netlink got truncated asynchronous message");
1927 return 1;
1928 }
1929 while (NLMSG_OK(h, len))
1930 {
1931 nl_async_msg(h);
1932 h = NLMSG_NEXT(h, len);
1933 }
1934 if (len)
1935 log(L_WARN "nl_async_hook: Found packet remnant of size %d", len);
1936 return 1;
1937 }
1938
1939 static void
1940 nl_async_err_hook(sock *sk, int e UNUSED)
1941 {
1942 nl_async_hook(sk, 0);
1943 }
1944
1945 static void
1946 nl_open_async(void)
1947 {
1948 sock *sk;
1949 struct sockaddr_nl sa;
1950 int fd;
1951
1952 if (nl_async_sk)
1953 return;
1954
1955 DBG("KRT: Opening async netlink socket\n");
1956
1957 fd = socket(PF_NETLINK, SOCK_RAW, NETLINK_ROUTE);
1958 if (fd < 0)
1959 {
1960 log(L_ERR "Unable to open asynchronous rtnetlink socket: %m");
1961 return;
1962 }
1963
1964 bzero(&sa, sizeof(sa));
1965 sa.nl_family = AF_NETLINK;
1966 sa.nl_groups = RTMGRP_LINK |
1967 RTMGRP_IPV4_IFADDR | RTMGRP_IPV4_ROUTE |
1968 RTMGRP_IPV6_IFADDR | RTMGRP_IPV6_ROUTE;
1969
1970 if (bind(fd, (struct sockaddr *) &sa, sizeof(sa)) < 0)
1971 {
1972 log(L_ERR "Unable to bind asynchronous rtnetlink socket: %m");
1973 close(fd);
1974 return;
1975 }
1976
1977 nl_async_rx_buffer = xmalloc(NL_RX_SIZE);
1978
1979 sk = nl_async_sk = sk_new(krt_pool);
1980 sk->type = SK_MAGIC;
1981 sk->rx_hook = nl_async_hook;
1982 sk->err_hook = nl_async_err_hook;
1983 sk->fd = fd;
1984 if (sk_open(sk) < 0)
1985 bug("Netlink: sk_open failed");
1986 }
1987
1988
1989 /*
1990 * Interface to the UNIX krt module
1991 */
1992
1993 void
1994 krt_sys_io_init(void)
1995 {
1996 nl_linpool = lp_new_default(krt_pool);
1997 HASH_INIT(nl_table_map, krt_pool, 6);
1998 }
1999
2000 int
2001 krt_sys_start(struct krt_proto *p)
2002 {
2003 struct krt_proto *old = HASH_FIND(nl_table_map, RTH, p->af, krt_table_id(p));
2004
2005 if (old)
2006 {
2007 log(L_ERR "%s: Kernel table %u already registered by %s",
2008 p->p.name, krt_table_id(p), old->p.name);
2009 return 0;
2010 }
2011
2012 HASH_INSERT2(nl_table_map, RTH, krt_pool, p);
2013
2014 nl_open();
2015 nl_open_async();
2016
2017 return 1;
2018 }
2019
2020 void
2021 krt_sys_shutdown(struct krt_proto *p)
2022 {
2023 HASH_REMOVE2(nl_table_map, RTH, krt_pool, p);
2024 }
2025
2026 int
2027 krt_sys_reconfigure(struct krt_proto *p UNUSED, struct krt_config *n, struct krt_config *o)
2028 {
2029 return (n->sys.table_id == o->sys.table_id) && (n->sys.metric == o->sys.metric);
2030 }
2031
2032 void
2033 krt_sys_init_config(struct krt_config *cf)
2034 {
2035 cf->sys.table_id = RT_TABLE_MAIN;
2036 cf->sys.metric = 32;
2037 }
2038
2039 void
2040 krt_sys_copy_config(struct krt_config *d, struct krt_config *s)
2041 {
2042 d->sys.table_id = s->sys.table_id;
2043 d->sys.metric = s->sys.metric;
2044 }
2045
2046 static const char *krt_metrics_names[KRT_METRICS_MAX] = {
2047 NULL, "lock", "mtu", "window", "rtt", "rttvar", "sstresh", "cwnd", "advmss",
2048 "reordering", "hoplimit", "initcwnd", "features", "rto_min", "initrwnd", "quickack"
2049 };
2050
2051 static const char *krt_features_names[KRT_FEATURES_MAX] = {
2052 "ecn", NULL, NULL, "allfrag"
2053 };
2054
2055 int
2056 krt_sys_get_attr(eattr *a, byte *buf, int buflen UNUSED)
2057 {
2058 switch (a->id)
2059 {
2060 case EA_KRT_PREFSRC:
2061 bsprintf(buf, "prefsrc");
2062 return GA_NAME;
2063
2064 case EA_KRT_REALM:
2065 bsprintf(buf, "realm");
2066 return GA_NAME;
2067
2068 case EA_KRT_SCOPE:
2069 bsprintf(buf, "scope");
2070 return GA_NAME;
2071
2072 case EA_KRT_LOCK:
2073 buf += bsprintf(buf, "lock:");
2074 ea_format_bitfield(a, buf, buflen, krt_metrics_names, 2, KRT_METRICS_MAX);
2075 return GA_FULL;
2076
2077 case EA_KRT_FEATURES:
2078 buf += bsprintf(buf, "features:");
2079 ea_format_bitfield(a, buf, buflen, krt_features_names, 0, KRT_FEATURES_MAX);
2080 return GA_FULL;
2081
2082 default:;
2083 int id = (int)EA_ID(a->id) - KRT_METRICS_OFFSET;
2084 if (id > 0 && id < KRT_METRICS_MAX)
2085 {
2086 bsprintf(buf, "%s", krt_metrics_names[id]);
2087 return GA_NAME;
2088 }
2089
2090 return GA_UNKNOWN;
2091 }
2092 }
2093
2094
2095
2096 void
2097 kif_sys_start(struct kif_proto *p UNUSED)
2098 {
2099 nl_open();
2100 nl_open_async();
2101 }
2102
2103 void
2104 kif_sys_shutdown(struct kif_proto *p UNUSED)
2105 {
2106 }
2107
2108 int
2109 kif_update_sysdep_addr(struct iface *i UNUSED)
2110 {
2111 return 0;
2112 }