]> git.ipfire.org Git - thirdparty/bird.git/blob - proto/rip/rip.c
Merge commit '5cff1d5f' into haugesund
[thirdparty/bird.git] / proto / rip / rip.c
1 /*
2 * BIRD -- Routing Information Protocol (RIP)
3 *
4 * (c) 1998--1999 Pavel Machek <pavel@ucw.cz>
5 * (c) 2004--2013 Ondrej Filip <feela@network.cz>
6 * (c) 2009--2015 Ondrej Zajicek <santiago@crfreenet.org>
7 * (c) 2009--2015 CZ.NIC z.s.p.o.
8 *
9 * Can be freely distributed and used under the terms of the GNU GPL.
10 */
11
12 /**
13 * DOC: Routing Information Protocol (RIP)
14 *
15 * The RIP protocol is implemented in two files: |rip.c| containing the protocol
16 * logic, route management and the protocol glue with BIRD core, and |packets.c|
17 * handling RIP packet processing, RX, TX and protocol sockets.
18 *
19 * Each instance of RIP is described by a structure &rip_proto, which contains
20 * an internal RIP routing table, a list of protocol interfaces and the main
21 * timer responsible for RIP routing table cleanup.
22 *
23 * RIP internal routing table contains incoming and outgoing routes. For each
24 * network (represented by structure &rip_entry) there is one outgoing route
25 * stored directly in &rip_entry and an one-way linked list of incoming routes
26 * (structures &rip_rte). The list contains incoming routes from different RIP
27 * neighbors, but only routes with the lowest metric are stored (i.e., all
28 * stored incoming routes have the same metric).
29 *
30 * Note that RIP itself does not select outgoing route, that is done by the core
31 * routing table. When a new incoming route is received, it is propagated to the
32 * RIP table by rip_update_rte() and possibly stored in the list of incoming
33 * routes. Then the change may be propagated to the core by rip_announce_rte().
34 * The core selects the best route and propagate it to RIP by rip_rt_notify(),
35 * which updates outgoing route part of &rip_entry and possibly triggers route
36 * propagation by rip_trigger_update().
37 *
38 * RIP interfaces are represented by structures &rip_iface. A RIP interface
39 * contains a per-interface socket, a list of associated neighbors, interface
40 * configuration, and state information related to scheduled interface events
41 * and running update sessions. RIP interfaces are added and removed based on
42 * core interface notifications.
43 *
44 * There are two RIP interface events - regular updates and triggered updates.
45 * Both are managed from the RIP interface timer (rip_iface_timer()). Regular
46 * updates are called at fixed interval and propagate the whole routing table,
47 * while triggered updates are scheduled by rip_trigger_update() due to some
48 * routing table change and propagate only the routes modified since the time
49 * they were scheduled. There are also unicast-destined requested updates, but
50 * these are sent directly as a reaction to received RIP request message. The
51 * update session is started by rip_send_table(). There may be at most one
52 * active update session per interface, as the associated state (including the
53 * fib iterator) is stored directly in &rip_iface structure.
54 *
55 * RIP neighbors are represented by structures &rip_neighbor. Compared to
56 * neighbor handling in other routing protocols, RIP does not have explicit
57 * neighbor discovery and adjacency maintenance, which makes the &rip_neighbor
58 * related code a bit peculiar. RIP neighbors are interlinked with core neighbor
59 * structures (&neighbor) and use core neighbor notifications to ensure that RIP
60 * neighbors are timely removed. RIP neighbors are added based on received route
61 * notifications and removed based on core neighbor and RIP interface events.
62 *
63 * RIP neighbors are linked by RIP routes and use counter to track the number of
64 * associated routes, but when these RIP routes timeout, associated RIP neighbor
65 * is still alive (with zero counter). When RIP neighbor is removed but still
66 * has some associated routes, it is not freed, just changed to detached state
67 * (core neighbors and RIP ifaces are unlinked), then during the main timer
68 * cleanup phase the associated routes are removed and the &rip_neighbor
69 * structure is finally freed.
70 *
71 * Supported standards:
72 * RFC 1058 - RIPv1
73 * RFC 2453 - RIPv2
74 * RFC 2080 - RIPng
75 * RFC 2091 - Triggered RIP for demand circuits
76 * RFC 4822 - RIP cryptographic authentication
77 */
78
79 #include <stdlib.h>
80 #include "rip.h"
81
82
83 static inline void rip_lock_neighbor(struct rip_neighbor *n);
84 static inline void rip_unlock_neighbor(struct rip_neighbor *n);
85 static inline int rip_iface_link_up(struct rip_iface *ifa);
86 static inline void rip_kick_timer(struct rip_proto *p);
87 static inline void rip_iface_kick_timer(struct rip_iface *ifa);
88 static void rip_iface_timer(timer *timer);
89 static void rip_trigger_update(struct rip_proto *p);
90
91
92 /*
93 * RIP routes
94 */
95
96 static struct rip_rte *
97 rip_add_rte(struct rip_proto *p, struct rip_rte **rp, struct rip_rte *src)
98 {
99 struct rip_rte *rt = sl_alloc(p->rte_slab);
100
101 memcpy(rt, src, sizeof(struct rip_rte));
102 rt->next = *rp;
103 *rp = rt;
104
105 rip_lock_neighbor(rt->from);
106
107 return rt;
108 }
109
110 static inline void
111 rip_remove_rte(struct rip_proto *p, struct rip_rte **rp)
112 {
113 struct rip_rte *rt = *rp;
114
115 rip_unlock_neighbor(rt->from);
116
117 *rp = rt->next;
118 sl_free(p->rte_slab, rt);
119 }
120
121 static inline int rip_same_rte(struct rip_rte *a, struct rip_rte *b)
122 { return a->metric == b->metric && a->tag == b->tag && ipa_equal(a->next_hop, b->next_hop); }
123
124 static inline int rip_valid_rte(struct rip_rte *rt)
125 { return rt->from->ifa != NULL; }
126
127 /**
128 * rip_announce_rte - announce route from RIP routing table to the core
129 * @p: RIP instance
130 * @en: related network
131 *
132 * The function takes a list of incoming routes from @en, prepare appropriate
133 * &rte for the core and propagate it by rte_update().
134 */
135 static void
136 rip_announce_rte(struct rip_proto *p, struct rip_entry *en)
137 {
138 struct rip_rte *rt = en->routes;
139
140 /* Find first valid rte */
141 while (rt && !rip_valid_rte(rt))
142 rt = rt->next;
143
144 if (rt)
145 {
146 /* Update */
147 rta a0 = {
148 .pref = p->p.main_channel->preference,
149 .source = RTS_RIP,
150 .scope = SCOPE_UNIVERSE,
151 .dest = RTD_UNICAST,
152 };
153
154 u8 rt_metric = rt->metric;
155 u16 rt_tag = rt->tag;
156
157 if (p->ecmp)
158 {
159 /* ECMP route */
160 struct nexthop *nhs = NULL;
161 int num = 0;
162
163 for (rt = en->routes; rt && (num < p->ecmp); rt = rt->next)
164 {
165 if (!rip_valid_rte(rt))
166 continue;
167
168 struct nexthop *nh = allocz(sizeof(struct nexthop));
169
170 nh->gw = rt->next_hop;
171 nh->iface = rt->from->ifa->iface;
172 nh->weight = rt->from->ifa->cf->ecmp_weight;
173
174 nexthop_insert(&nhs, nh);
175 num++;
176
177 if (rt->tag != rt_tag)
178 rt_tag = 0;
179 }
180
181 a0.nh = *nhs;
182 }
183 else
184 {
185 /* Unipath route */
186 a0.from = rt->from->nbr->addr;
187 a0.nh.gw = rt->next_hop;
188 a0.nh.iface = rt->from->ifa->iface;
189 }
190
191 rta *a = rta_lookup(&a0);
192 rte *e = rte_get_temp(a, p->p.main_source);
193
194 e->u.rip.from = a0.nh.iface;
195 e->u.rip.metric = rt_metric;
196 e->u.rip.tag = rt_tag;
197 e->pflags = EA_ID_FLAG(EA_RIP_METRIC) | EA_ID_FLAG(EA_RIP_TAG);
198
199 rte_update(&p->p, en->n.addr, e);
200 }
201 else
202 {
203 /* Withdraw */
204 rte_update(&p->p, en->n.addr, NULL);
205 }
206 }
207
208 /**
209 * rip_update_rte - enter a route update to RIP routing table
210 * @p: RIP instance
211 * @addr: network address
212 * @new: a &rip_rte representing the new route
213 *
214 * The function is called by the RIP packet processing code whenever it receives
215 * a reachable route. The appropriate routing table entry is found and the list
216 * of incoming routes is updated. Eventually, the change is also propagated to
217 * the core by rip_announce_rte(). Note that for unreachable routes,
218 * rip_withdraw_rte() should be called instead of rip_update_rte().
219 */
220 void
221 rip_update_rte(struct rip_proto *p, net_addr *n, struct rip_rte *new)
222 {
223 struct rip_entry *en = fib_get(&p->rtable, n);
224 struct rip_rte *rt, **rp;
225 int changed = 0;
226
227 /* If the new route is better, remove all current routes */
228 if (en->routes && new->metric < en->routes->metric)
229 while (en->routes)
230 rip_remove_rte(p, &en->routes);
231
232 /* Find the old route (also set rp for later) */
233 for (rp = &en->routes; rt = *rp; rp = &rt->next)
234 if (rt->from == new->from)
235 {
236 if (rip_same_rte(rt, new))
237 {
238 rt->expires = new->expires;
239 return;
240 }
241
242 /* Remove the old route */
243 rip_remove_rte(p, rp);
244 changed = 1;
245 break;
246 }
247
248 /* If the new route is optimal, add it to the list */
249 if (!en->routes || new->metric == en->routes->metric)
250 {
251 rt = rip_add_rte(p, rp, new);
252 changed = 1;
253 }
254
255 /* Announce change if on relevant position (the first or any for ECMP) */
256 if (changed && (rp == &en->routes || p->ecmp))
257 rip_announce_rte(p, en);
258 }
259
260 /**
261 * rip_withdraw_rte - enter a route withdraw to RIP routing table
262 * @p: RIP instance
263 * @addr: network address
264 * @from: a &rip_neighbor propagating the withdraw
265 *
266 * The function is called by the RIP packet processing code whenever it receives
267 * an unreachable route. The incoming route for given network from nbr @from is
268 * removed. Eventually, the change is also propagated by rip_announce_rte().
269 */
270 void
271 rip_withdraw_rte(struct rip_proto *p, net_addr *n, struct rip_neighbor *from)
272 {
273 struct rip_entry *en = fib_find(&p->rtable, n);
274 struct rip_rte *rt, **rp;
275
276 if (!en)
277 return;
278
279 /* Find the old route */
280 for (rp = &en->routes; rt = *rp; rp = &rt->next)
281 if (rt->from == from)
282 break;
283
284 if (!rt)
285 return;
286
287 /* Remove the old route */
288 rip_remove_rte(p, rp);
289
290 /* Announce change if on relevant position */
291 if (rp == &en->routes || p->ecmp)
292 rip_announce_rte(p, en);
293 }
294
295 /*
296 * rip_rt_notify - core tells us about new route, so store
297 * it into our data structures.
298 */
299 static void
300 rip_rt_notify(struct proto *P, struct channel *ch UNUSED, struct network *net, struct rte *new,
301 struct rte *old UNUSED)
302 {
303 struct rip_proto *p = (struct rip_proto *) P;
304 struct rip_entry *en;
305 int old_metric;
306
307 if (new)
308 {
309 /* Update */
310 u32 rt_metric = ea_get_int(new->attrs->eattrs, EA_RIP_METRIC, 1);
311 u32 rt_tag = ea_get_int(new->attrs->eattrs, EA_RIP_TAG, 0);
312
313 if (rt_metric > p->infinity)
314 {
315 log(L_WARN "%s: Invalid rip_metric value %u for route %N",
316 p->p.name, rt_metric, net->n.addr);
317 rt_metric = p->infinity;
318 }
319
320 if (rt_tag > 0xffff)
321 {
322 log(L_WARN "%s: Invalid rip_tag value %u for route %N",
323 p->p.name, rt_tag, net->n.addr);
324 rt_metric = p->infinity;
325 rt_tag = 0;
326 }
327
328 /*
329 * Note that we accept exported routes with infinity metric (this could
330 * happen if rip_metric is modified in filters). Such entry has infinity
331 * metric but is RIP_ENTRY_VALID and therefore is not subject to garbage
332 * collection.
333 */
334
335 en = fib_get(&p->rtable, net->n.addr);
336
337 old_metric = en->valid ? en->metric : -1;
338
339 en->valid = RIP_ENTRY_VALID;
340 en->metric = rt_metric;
341 en->tag = rt_tag;
342 en->from = (new->src->proto == P) ? new->u.rip.from : NULL;
343 en->iface = new->attrs->nh.iface;
344 en->next_hop = new->attrs->nh.gw;
345 }
346 else
347 {
348 /* Withdraw */
349 en = fib_find(&p->rtable, net->n.addr);
350
351 if (!en || en->valid != RIP_ENTRY_VALID)
352 return;
353
354 old_metric = en->metric;
355
356 en->valid = RIP_ENTRY_STALE;
357 en->metric = p->infinity;
358 en->tag = 0;
359 en->from = NULL;
360 en->iface = NULL;
361 en->next_hop = IPA_NONE;
362 }
363
364 /* Activate triggered updates */
365 if (en->metric != old_metric)
366 {
367 en->changed = current_time();
368 rip_trigger_update(p);
369 }
370 }
371
372 void
373 rip_flush_table(struct rip_proto *p, struct rip_neighbor *n)
374 {
375 btime expires = current_time() + n->ifa->cf->timeout_time;
376
377 FIB_WALK(&p->rtable, struct rip_entry, en)
378 {
379 for (struct rip_rte *e = en->routes; e; e = e->next)
380 if ((e->from == n) && (e->expires == TIME_INFINITY))
381 e->expires = expires;
382 }
383 FIB_WALK_END;
384 }
385
386
387 /*
388 * RIP neighbors
389 */
390
391 struct rip_neighbor *
392 rip_get_neighbor(struct rip_proto *p, ip_addr *a, struct rip_iface *ifa)
393 {
394 neighbor *nbr = neigh_find(&p->p, *a, ifa->iface, 0);
395
396 if (!nbr || (nbr->scope == SCOPE_HOST) || !rip_iface_link_up(ifa))
397 return NULL;
398
399 if (nbr->data)
400 return nbr->data;
401
402 TRACE(D_EVENTS, "New neighbor %I on %s", *a, ifa->iface->name);
403
404 struct rip_neighbor *n = mb_allocz(p->p.pool, sizeof(struct rip_neighbor));
405 n->ifa = ifa;
406 n->nbr = nbr;
407 nbr->data = n;
408 n->csn = nbr->aux;
409
410 add_tail(&ifa->neigh_list, NODE n);
411
412 return n;
413 }
414
415 static void
416 rip_remove_neighbor(struct rip_proto *p, struct rip_neighbor *n)
417 {
418 neighbor *nbr = n->nbr;
419
420 TRACE(D_EVENTS, "Removing neighbor %I on %s", nbr->addr, nbr->ifreq->name);
421
422 rem_node(NODE n);
423 n->ifa = NULL;
424 n->nbr = NULL;
425 nbr->data = NULL;
426 nbr->aux = n->csn;
427
428 rfree(n->bfd_req);
429 n->bfd_req = NULL;
430 n->last_seen = 0;
431
432 if (!n->uc)
433 mb_free(n);
434
435 /* Related routes are removed in rip_timer() */
436 rip_kick_timer(p);
437 }
438
439 static inline void
440 rip_lock_neighbor(struct rip_neighbor *n)
441 {
442 n->uc++;
443 }
444
445 static inline void
446 rip_unlock_neighbor(struct rip_neighbor *n)
447 {
448 n->uc--;
449
450 if (!n->nbr && !n->uc)
451 mb_free(n);
452 }
453
454 static void
455 rip_neigh_notify(struct neighbor *nbr)
456 {
457 struct rip_proto *p = (struct rip_proto *) nbr->proto;
458 struct rip_neighbor *n = nbr->data;
459
460 if (!n)
461 return;
462
463 /*
464 * We assume that rip_neigh_notify() is called before rip_if_notify() for
465 * IF_CHANGE_DOWN and therefore n->ifa is still valid. We have no such
466 * ordering assumption for IF_CHANGE_LINK, so we test link state of the
467 * underlying iface instead of just rip_iface state.
468 */
469 if ((nbr->scope <= 0) || !rip_iface_link_up(n->ifa))
470 rip_remove_neighbor(p, n);
471 }
472
473 static void
474 rip_bfd_notify(struct bfd_request *req)
475 {
476 struct rip_neighbor *n = req->data;
477 struct rip_proto *p = n->ifa->rip;
478
479 if (req->down)
480 {
481 TRACE(D_EVENTS, "BFD session down for nbr %I on %s",
482 n->nbr->addr, n->ifa->iface->name);
483 rip_remove_neighbor(p, n);
484 }
485 }
486
487 void
488 rip_update_bfd(struct rip_proto *p, struct rip_neighbor *n)
489 {
490 int use_bfd = n->ifa->cf->bfd && n->last_seen;
491
492 if (use_bfd && !n->bfd_req)
493 {
494 /*
495 * For RIPv2, use the same address as rip_open_socket(). For RIPng, neighbor
496 * should contain an address from the same prefix, thus also link-local. It
497 * may cause problems if two link-local addresses are assigned to one iface.
498 */
499 ip_addr saddr = rip_is_v2(p) ? n->ifa->sk->saddr : n->nbr->ifa->ip;
500 n->bfd_req = bfd_request_session(p->p.pool, n->nbr->addr, saddr,
501 n->nbr->iface, p->p.vrf,
502 rip_bfd_notify, n, NULL);
503 }
504
505 if (!use_bfd && n->bfd_req)
506 {
507 rfree(n->bfd_req);
508 n->bfd_req = NULL;
509 }
510 }
511
512
513 /*
514 * RIP interfaces
515 */
516
517 static void
518 rip_iface_start(struct rip_iface *ifa)
519 {
520 struct rip_proto *p = ifa->rip;
521
522 TRACE(D_EVENTS, "Starting interface %s", ifa->iface->name);
523
524 if (! ifa->cf->demand_circuit)
525 {
526 ifa->next_regular = current_time() + (random() % ifa->cf->update_time) + 100 MS;
527 tm_set(ifa->timer, ifa->next_regular);
528 }
529 else
530 {
531 ifa->next_regular = TIME_INFINITY;
532 }
533
534 ifa->up = 1;
535
536 if (ifa->cf->passive)
537 return;
538
539 rip_send_request(p, ifa);
540 rip_send_table(p, ifa, ifa->addr, 0);
541 }
542
543 static void
544 rip_iface_stop(struct rip_iface *ifa)
545 {
546 struct rip_proto *p = ifa->rip;
547 struct rip_neighbor *n;
548
549 TRACE(D_EVENTS, "Stopping interface %s", ifa->iface->name);
550
551 rip_reset_tx_session(p, ifa);
552
553 ifa->next_regular = 0;
554 ifa->next_triggered = 0;
555 ifa->want_triggered = 0;
556
557 if (ifa->tx_pending)
558 ifa->tx_seqnum++;
559
560 ifa->tx_pending = 0;
561 ifa->req_pending = 0;
562
563 if (ifa->cf->demand_circuit && !ifa->cf->passive)
564 rip_send_flush(p, ifa);
565
566 WALK_LIST_FIRST(n, ifa->neigh_list)
567 rip_remove_neighbor(p, n);
568
569 tm_stop(ifa->timer);
570 tm_stop(ifa->rxmt_timer);
571 ifa->up = 0;
572 }
573
574 static inline int
575 rip_iface_link_up(struct rip_iface *ifa)
576 {
577 return !ifa->cf->check_link || (ifa->iface->flags & IF_LINK_UP);
578 }
579
580 static void
581 rip_iface_update_state(struct rip_iface *ifa)
582 {
583 int up = ifa->sk && rip_iface_link_up(ifa);
584
585 if (up == ifa->up)
586 return;
587
588 if (up)
589 rip_iface_start(ifa);
590 else
591 rip_iface_stop(ifa);
592 }
593
594 static void
595 rip_iface_update_buffers(struct rip_iface *ifa)
596 {
597 if (!ifa->sk)
598 return;
599
600 uint rbsize = ifa->cf->rx_buffer ?: ifa->iface->mtu;
601 uint tbsize = ifa->cf->tx_length ?: ifa->iface->mtu;
602 rbsize = MAX(rbsize, tbsize);
603
604 sk_set_rbsize(ifa->sk, rbsize);
605 sk_set_tbsize(ifa->sk, tbsize);
606
607 uint headers = (rip_is_v2(ifa->rip) ? IP4_HEADER_LENGTH : IP6_HEADER_LENGTH) + UDP_HEADER_LENGTH;
608 ifa->tx_plen = tbsize - headers;
609
610 if (ifa->cf->auth_type == RIP_AUTH_CRYPTO)
611 ifa->tx_plen -= RIP_AUTH_TAIL_LENGTH + max_mac_length(ifa->cf->passwords);
612 }
613
614 static inline void
615 rip_iface_update_bfd(struct rip_iface *ifa)
616 {
617 struct rip_proto *p = ifa->rip;
618 struct rip_neighbor *n;
619
620 WALK_LIST(n, ifa->neigh_list)
621 rip_update_bfd(p, n);
622 }
623
624
625 static void
626 rip_iface_locked(struct object_lock *lock)
627 {
628 struct rip_iface *ifa = lock->data;
629 struct rip_proto *p = ifa->rip;
630
631 if (!rip_open_socket(ifa))
632 {
633 log(L_ERR "%s: Cannot open socket for %s", p->p.name, ifa->iface->name);
634 return;
635 }
636
637 rip_iface_update_buffers(ifa);
638 rip_iface_update_state(ifa);
639 }
640
641
642 static struct rip_iface *
643 rip_find_iface(struct rip_proto *p, struct iface *what)
644 {
645 struct rip_iface *ifa;
646
647 WALK_LIST(ifa, p->iface_list)
648 if (ifa->iface == what)
649 return ifa;
650
651 return NULL;
652 }
653
654 static void
655 rip_add_iface(struct rip_proto *p, struct iface *iface, struct rip_iface_config *ic)
656 {
657 struct rip_iface *ifa;
658
659 TRACE(D_EVENTS, "Adding interface %s", iface->name);
660
661 ifa = mb_allocz(p->p.pool, sizeof(struct rip_iface));
662 ifa->rip = p;
663 ifa->iface = iface;
664 ifa->cf = ic;
665
666 if (ipa_nonzero(ic->address))
667 ifa->addr = ic->address;
668 else if (ic->mode == RIP_IM_MULTICAST)
669 ifa->addr = rip_is_v2(p) ? IP4_RIP_ROUTERS : IP6_RIP_ROUTERS;
670 else /* Broadcast */
671 ifa->addr = iface->addr4->brd;
672 /*
673 * The above is just a workaround for BSD as it can't send broadcasts
674 * to 255.255.255.255. BSD systems need the network broadcast address instead.
675 *
676 * TODO: move this to sysdep code
677 */
678
679 init_list(&ifa->neigh_list);
680
681 add_tail(&p->iface_list, NODE ifa);
682
683 ifa->timer = tm_new_init(p->p.pool, rip_iface_timer, ifa, 0, 0);
684 ifa->rxmt_timer = tm_new_init(p->p.pool, rip_rxmt_timeout, ifa, 0, 0);
685
686 struct object_lock *lock = olock_new(p->p.pool);
687 lock->type = OBJLOCK_UDP;
688 lock->port = ic->port;
689 lock->iface = iface;
690 lock->data = ifa;
691 lock->hook = rip_iface_locked;
692 ifa->lock = lock;
693
694 olock_acquire(lock);
695 }
696
697 static void
698 rip_remove_iface(struct rip_proto *p, struct rip_iface *ifa)
699 {
700 rip_iface_stop(ifa);
701
702 TRACE(D_EVENTS, "Removing interface %s", ifa->iface->name);
703
704 rem_node(NODE ifa);
705
706 rfree(ifa->sk);
707 rfree(ifa->lock);
708 rfree(ifa->timer);
709
710 mb_free(ifa);
711 }
712
713 static int
714 rip_reconfigure_iface(struct rip_proto *p, struct rip_iface *ifa, struct rip_iface_config *new)
715 {
716 struct rip_iface_config *old = ifa->cf;
717
718 /* Change of these options would require to reset the iface socket */
719 if ((new->mode != old->mode) ||
720 (new->port != old->port) ||
721 (new->tx_tos != old->tx_tos) ||
722 (new->tx_priority != old->tx_priority) ||
723 (new->ttl_security != old->ttl_security) ||
724 (new->demand_circuit != old->demand_circuit))
725 return 0;
726
727 TRACE(D_EVENTS, "Reconfiguring interface %s", ifa->iface->name);
728
729 ifa->cf = new;
730
731 rip_iface_update_buffers(ifa);
732
733 if ((! ifa->cf->demand_circuit) &&
734 (ifa->next_regular > (current_time() + new->update_time)))
735 ifa->next_regular = current_time() + (random() % new->update_time) + 100 MS;
736
737 if (ifa->up && new->demand_circuit && (new->passive != old->passive))
738 {
739 if (new->passive)
740 rip_send_flush(p, ifa);
741 else
742 {
743 rip_send_request(p, ifa);
744 rip_send_table(p, ifa, ifa->addr, 0);
745 }
746 }
747
748 if (new->check_link != old->check_link)
749 rip_iface_update_state(ifa);
750
751 if (new->bfd != old->bfd)
752 rip_iface_update_bfd(ifa);
753
754 if (ifa->up)
755 rip_iface_kick_timer(ifa);
756
757 return 1;
758 }
759
760 static void
761 rip_reconfigure_ifaces(struct rip_proto *p, struct rip_config *cf)
762 {
763 struct iface *iface;
764
765 WALK_LIST(iface, iface_list)
766 {
767 if (!(iface->flags & IF_UP))
768 continue;
769
770 /* Ignore ifaces without appropriate address */
771 if (rip_is_v2(p) ? !iface->addr4 : !iface->llv6)
772 continue;
773
774 struct rip_iface *ifa = rip_find_iface(p, iface);
775 struct rip_iface_config *ic = (void *) iface_patt_find(&cf->patt_list, iface, NULL);
776
777 if (ifa && ic)
778 {
779 if (rip_reconfigure_iface(p, ifa, ic))
780 continue;
781
782 /* Hard restart */
783 log(L_INFO "%s: Restarting interface %s", p->p.name, ifa->iface->name);
784 rip_remove_iface(p, ifa);
785 rip_add_iface(p, iface, ic);
786 }
787
788 if (ifa && !ic)
789 rip_remove_iface(p, ifa);
790
791 if (!ifa && ic)
792 rip_add_iface(p, iface, ic);
793 }
794 }
795
796 static void
797 rip_if_notify(struct proto *P, unsigned flags, struct iface *iface)
798 {
799 struct rip_proto *p = (void *) P;
800 struct rip_config *cf = (void *) P->cf;
801 struct rip_iface *ifa = rip_find_iface(p, iface);
802
803 if (iface->flags & IF_IGNORE)
804 return;
805
806 /* Add, remove or restart interface */
807 if (flags & (IF_CHANGE_UPDOWN | (rip_is_v2(p) ? IF_CHANGE_ADDR4 : IF_CHANGE_LLV6)))
808 {
809 if (ifa)
810 rip_remove_iface(p, ifa);
811
812 if (!(iface->flags & IF_UP))
813 return;
814
815 /* Ignore ifaces without appropriate address */
816 if (rip_is_v2(p) ? !iface->addr4 : !iface->llv6)
817 return;
818
819 struct rip_iface_config *ic = (void *) iface_patt_find(&cf->patt_list, iface, NULL);
820 if (ic)
821 rip_add_iface(p, iface, ic);
822
823 return;
824 }
825
826 if (!ifa)
827 return;
828
829 if (flags & IF_CHANGE_MTU)
830 rip_iface_update_buffers(ifa);
831
832 if (flags & IF_CHANGE_LINK)
833 rip_iface_update_state(ifa);
834 }
835
836
837 /*
838 * RIP timer events
839 */
840
841 /**
842 * rip_timer - RIP main timer hook
843 * @t: timer
844 *
845 * The RIP main timer is responsible for routing table maintenance. Invalid or
846 * expired routes (&rip_rte) are removed and garbage collection of stale routing
847 * table entries (&rip_entry) is done. Changes are propagated to core tables,
848 * route reload is also done here. Note that garbage collection uses a maximal
849 * GC time, while interfaces maintain an illusion of per-interface GC times in
850 * rip_send_response().
851 *
852 * Keeping incoming routes and the selected outgoing route are two independent
853 * functions, therefore after garbage collection some entries now considered
854 * invalid (RIP_ENTRY_DUMMY) still may have non-empty list of incoming routes,
855 * while some valid entries (representing an outgoing route) may have that list
856 * empty.
857 *
858 * The main timer is not scheduled periodically but it uses the time of the
859 * current next event and the minimal interval of any possible event to compute
860 * the time of the next run.
861 */
862 static void
863 rip_timer(timer *t)
864 {
865 struct rip_proto *p = t->data;
866 struct rip_config *cf = (void *) (p->p.cf);
867 struct rip_iface *ifa;
868 struct rip_neighbor *n, *nn;
869 struct fib_iterator fit;
870 btime now_ = current_time();
871 btime next = now_ + MIN(cf->min_timeout_time, cf->max_garbage_time);
872 btime expires = 0;
873
874 TRACE(D_EVENTS, "Main timer fired");
875
876 FIB_ITERATE_INIT(&fit, &p->rtable);
877
878 loop:
879 FIB_ITERATE_START(&p->rtable, &fit, struct rip_entry, en)
880 {
881 struct rip_rte *rt, **rp;
882 int changed = 0;
883
884 /* Checking received routes for timeout and for dead neighbors */
885 for (rp = &en->routes; rt = *rp; /* rp = &rt->next */)
886 {
887 if (!rip_valid_rte(rt) || (rt->expires <= now_))
888 {
889 rip_remove_rte(p, rp);
890 changed = 1;
891 continue;
892 }
893
894 next = MIN(next, rt->expires);
895 rp = &rt->next;
896 }
897
898 /* Propagating eventual change */
899 if (changed || p->rt_reload)
900 {
901 /*
902 * We have to restart the iteration because there may be a cascade of
903 * synchronous events rip_announce_rte() -> nest table change ->
904 * rip_rt_notify() -> p->rtable change, invalidating hidden variables.
905 */
906
907 FIB_ITERATE_PUT_NEXT(&fit, &p->rtable);
908 rip_announce_rte(p, en);
909 goto loop;
910 }
911
912 /* Checking stale entries for garbage collection timeout */
913 if (en->valid == RIP_ENTRY_STALE)
914 {
915 expires = en->changed + cf->max_garbage_time;
916
917 if (expires <= now_)
918 {
919 // TRACE(D_EVENTS, "entry is too old: %N", en->n.addr);
920 en->valid = 0;
921 }
922 else
923 next = MIN(next, expires);
924 }
925
926 /* Remove empty nodes */
927 if (!en->valid && !en->routes)
928 {
929 FIB_ITERATE_PUT(&fit);
930 fib_delete(&p->rtable, en);
931 goto loop;
932 }
933 }
934 FIB_ITERATE_END;
935
936 p->rt_reload = 0;
937
938 /* Handling neighbor expiration */
939 WALK_LIST(ifa, p->iface_list)
940 {
941 /* No expiration for demand circuit ifaces */
942 if (ifa->cf->demand_circuit)
943 continue;
944
945 WALK_LIST_DELSAFE(n, nn, ifa->neigh_list)
946 if (n->last_seen)
947 {
948 expires = n->last_seen + n->ifa->cf->timeout_time;
949
950 if (expires <= now_)
951 rip_remove_neighbor(p, n);
952 else
953 next = MIN(next, expires);
954 }
955 }
956
957 tm_start(p->timer, MAX(next - now_, 100 MS));
958 }
959
960 static inline void
961 rip_kick_timer(struct rip_proto *p)
962 {
963 if ((p->timer->expires > (current_time() + 100 MS)))
964 tm_start(p->timer, 100 MS);
965 }
966
967 /**
968 * rip_iface_timer - RIP interface timer hook
969 * @t: timer
970 *
971 * RIP interface timers are responsible for scheduling both regular and
972 * triggered updates. Fixed, delay-independent period is used for regular
973 * updates, while minimal separating interval is enforced for triggered updates.
974 * The function also ensures that a new update is not started when the old one
975 * is still running.
976 */
977 static void
978 rip_iface_timer(timer *t)
979 {
980 struct rip_iface *ifa = t->data;
981 struct rip_proto *p = ifa->rip;
982 btime now_ = current_time();
983 btime period = ifa->cf->update_time;
984
985 if (ifa->cf->passive)
986 return;
987
988 TRACE(D_EVENTS, "Interface timer fired for %s", ifa->iface->name);
989
990 if (ifa->tx_active)
991 {
992 tm_start(ifa->timer, 100 MS);
993 return;
994 }
995
996 if (now_ >= ifa->next_regular)
997 {
998 /* Send regular update, set timer for next period (or following one if necessay) */
999 TRACE(D_EVENTS, "Sending regular updates for %s", ifa->iface->name);
1000 rip_send_table(p, ifa, ifa->addr, 0);
1001 ifa->next_regular += period * (1 + ((now_ - ifa->next_regular) / period));
1002 ifa->want_triggered = 0;
1003 p->triggered = 0;
1004 }
1005 else if (ifa->want_triggered && (now_ >= ifa->next_triggered))
1006 {
1007 /* Send triggered update, enforce interval between triggered updates */
1008 TRACE(D_EVENTS, "Sending triggered updates for %s", ifa->iface->name);
1009 rip_send_table(p, ifa, ifa->addr, ifa->want_triggered);
1010 ifa->next_triggered = now_ + MIN(5 S, period / 2);
1011 ifa->want_triggered = 0;
1012 p->triggered = 0;
1013 }
1014
1015 if (ifa->want_triggered && (ifa->next_triggered < ifa->next_regular))
1016 tm_set(ifa->timer, ifa->next_triggered);
1017 else if (ifa->next_regular != TIME_INFINITY)
1018 tm_set(ifa->timer, ifa->next_regular);
1019 }
1020
1021
1022 static inline void
1023 rip_iface_kick_timer(struct rip_iface *ifa)
1024 {
1025 if ((! tm_active(ifa->timer)) || (ifa->timer->expires > (current_time() + 100 MS)))
1026 tm_start(ifa->timer, 100 MS);
1027 }
1028
1029 static void
1030 rip_trigger_update(struct rip_proto *p)
1031 {
1032 if (p->triggered)
1033 return;
1034
1035 struct rip_iface *ifa;
1036 WALK_LIST(ifa, p->iface_list)
1037 {
1038 /* Interface not active */
1039 if (! ifa->up)
1040 continue;
1041
1042 /* Already scheduled */
1043 if (ifa->want_triggered)
1044 continue;
1045
1046 TRACE(D_EVENTS, "Scheduling triggered updates for %s", ifa->iface->name);
1047 ifa->want_triggered = current_time();
1048 rip_iface_kick_timer(ifa);
1049 p->triggered = 1;
1050 }
1051 }
1052
1053
1054 /*
1055 * RIP protocol glue
1056 */
1057
1058 static void
1059 rip_reload_routes(struct channel *C)
1060 {
1061 struct rip_proto *p = (struct rip_proto *) C->proto;
1062
1063 if (p->rt_reload)
1064 return;
1065
1066 TRACE(D_EVENTS, "Scheduling route reload");
1067 p->rt_reload = 1;
1068 rip_kick_timer(p);
1069 }
1070
1071 static void
1072 rip_make_tmp_attrs(struct rte *rt, struct linpool *pool)
1073 {
1074 rte_init_tmp_attrs(rt, pool, 2);
1075 rte_make_tmp_attr(rt, EA_RIP_METRIC, EAF_TYPE_INT, rt->u.rip.metric);
1076 rte_make_tmp_attr(rt, EA_RIP_TAG, EAF_TYPE_INT, rt->u.rip.tag);
1077 }
1078
1079 static void
1080 rip_store_tmp_attrs(struct rte *rt, struct linpool *pool)
1081 {
1082 rte_init_tmp_attrs(rt, pool, 2);
1083 rt->u.rip.metric = rte_store_tmp_attr(rt, EA_RIP_METRIC);
1084 rt->u.rip.tag = rte_store_tmp_attr(rt, EA_RIP_TAG);
1085 }
1086
1087 static int
1088 rip_rte_better(struct rte *new, struct rte *old)
1089 {
1090 return new->u.rip.metric < old->u.rip.metric;
1091 }
1092
1093 static int
1094 rip_rte_same(struct rte *new, struct rte *old)
1095 {
1096 return ((new->u.rip.metric == old->u.rip.metric) &&
1097 (new->u.rip.tag == old->u.rip.tag) &&
1098 (new->u.rip.from == old->u.rip.from));
1099 }
1100
1101
1102 static void
1103 rip_postconfig(struct proto_config *CF)
1104 {
1105 // struct rip_config *cf = (void *) CF;
1106
1107 /* Define default channel */
1108 if (! proto_cf_main_channel(CF))
1109 channel_config_new(NULL, net_label[CF->net_type], CF->net_type, CF);
1110 }
1111
1112 static struct proto *
1113 rip_init(struct proto_config *CF)
1114 {
1115 struct proto *P = proto_new(CF);
1116
1117 P->main_channel = proto_add_channel(P, proto_cf_main_channel(CF));
1118
1119 P->if_notify = rip_if_notify;
1120 P->rt_notify = rip_rt_notify;
1121 P->neigh_notify = rip_neigh_notify;
1122 P->reload_routes = rip_reload_routes;
1123 P->make_tmp_attrs = rip_make_tmp_attrs;
1124 P->store_tmp_attrs = rip_store_tmp_attrs;
1125 P->rte_better = rip_rte_better;
1126 P->rte_same = rip_rte_same;
1127
1128 return P;
1129 }
1130
1131 static int
1132 rip_start(struct proto *P)
1133 {
1134 struct rip_proto *p = (void *) P;
1135 struct rip_config *cf = (void *) (P->cf);
1136
1137 init_list(&p->iface_list);
1138 fib_init(&p->rtable, P->pool, cf->rip2 ? NET_IP4 : NET_IP6,
1139 sizeof(struct rip_entry), OFFSETOF(struct rip_entry, n), 0, NULL);
1140 p->rte_slab = sl_new(P->pool, sizeof(struct rip_rte));
1141 p->timer = tm_new_init(P->pool, rip_timer, p, 0, 0);
1142
1143 p->rip2 = cf->rip2;
1144 p->ecmp = cf->ecmp;
1145 p->infinity = cf->infinity;
1146 p->triggered = 0;
1147
1148 p->log_pkt_tbf = (struct tbf){ .rate = 1, .burst = 5 };
1149 p->log_rte_tbf = (struct tbf){ .rate = 4, .burst = 20 };
1150
1151 tm_start(p->timer, MIN(cf->min_timeout_time, cf->max_garbage_time));
1152
1153 return PS_UP;
1154 }
1155
1156 static int
1157 rip_shutdown(struct proto *P)
1158 {
1159 struct rip_proto *p = (void *) P;
1160
1161 TRACE(D_EVENTS, "Shutdown requested");
1162
1163 struct rip_iface *ifa;
1164 WALK_LIST(ifa, p->iface_list)
1165 rip_iface_stop(ifa);
1166
1167 return PS_DOWN;
1168 }
1169
1170 static int
1171 rip_reconfigure(struct proto *P, struct proto_config *CF)
1172 {
1173 struct rip_proto *p = (void *) P;
1174 struct rip_config *new = (void *) CF;
1175 // struct rip_config *old = (void *) (P->cf);
1176
1177 if (new->rip2 != p->rip2)
1178 return 0;
1179
1180 if (new->infinity != p->infinity)
1181 return 0;
1182
1183 if (!proto_configure_channel(P, &P->main_channel, proto_cf_main_channel(CF)))
1184 return 0;
1185
1186 TRACE(D_EVENTS, "Reconfiguring");
1187
1188 p->p.cf = CF;
1189 p->ecmp = new->ecmp;
1190 rip_reconfigure_ifaces(p, new);
1191
1192 p->rt_reload = 1;
1193 rip_kick_timer(p);
1194
1195 return 1;
1196 }
1197
1198 static void
1199 rip_get_route_info(rte *rte, byte *buf)
1200 {
1201 buf += bsprintf(buf, " (%d/%d)", rte->attrs->pref, rte->u.rip.metric);
1202
1203 if (rte->u.rip.tag)
1204 bsprintf(buf, " [%04x]", rte->u.rip.tag);
1205 }
1206
1207 static int
1208 rip_get_attr(const eattr *a, byte *buf, int buflen UNUSED)
1209 {
1210 switch (a->id)
1211 {
1212 case EA_RIP_METRIC:
1213 bsprintf(buf, "metric: %d", a->u.data);
1214 return GA_FULL;
1215
1216 case EA_RIP_TAG:
1217 bsprintf(buf, "tag: %04x", a->u.data);
1218 return GA_FULL;
1219
1220 default:
1221 return GA_UNKNOWN;
1222 }
1223 }
1224
1225 void
1226 rip_show_interfaces(struct proto *P, const char *iff)
1227 {
1228 struct rip_proto *p = (void *) P;
1229 struct rip_iface *ifa = NULL;
1230 struct rip_neighbor *n = NULL;
1231
1232 if (p->p.proto_state != PS_UP)
1233 {
1234 cli_msg(-1021, "%s: is not up", p->p.name);
1235 return;
1236 }
1237
1238 cli_msg(-1021, "%s:", p->p.name);
1239 cli_msg(-1021, "%-10s %-6s %6s %6s %7s",
1240 "Interface", "State", "Metric", "Nbrs", "Timer");
1241
1242 WALK_LIST(ifa, p->iface_list)
1243 {
1244 if (iff && !patmatch(iff, ifa->iface->name))
1245 continue;
1246
1247 int nbrs = 0;
1248 WALK_LIST(n, ifa->neigh_list)
1249 if (n->last_seen)
1250 nbrs++;
1251
1252 btime now_ = current_time();
1253 btime timer = ((ifa->next_regular < TIME_INFINITY) && (ifa->next_regular > now_)) ?
1254 (ifa->next_regular - now_) : 0;
1255 cli_msg(-1021, "%-10s %-6s %6u %6u %7t",
1256 ifa->iface->name, (ifa->up ? "Up" : "Down"), ifa->cf->metric, nbrs, timer);
1257 }
1258 }
1259
1260 void
1261 rip_show_neighbors(struct proto *P, const char *iff)
1262 {
1263 struct rip_proto *p = (void *) P;
1264 struct rip_iface *ifa = NULL;
1265 struct rip_neighbor *n = NULL;
1266
1267 if (p->p.proto_state != PS_UP)
1268 {
1269 cli_msg(-1022, "%s: is not up", p->p.name);
1270 return;
1271 }
1272
1273 cli_msg(-1022, "%s:", p->p.name);
1274 cli_msg(-1022, "%-25s %-10s %6s %6s %7s",
1275 "IP address", "Interface", "Metric", "Routes", "Seen");
1276
1277 WALK_LIST(ifa, p->iface_list)
1278 {
1279 if (iff && !patmatch(iff, ifa->iface->name))
1280 continue;
1281
1282 WALK_LIST(n, ifa->neigh_list)
1283 {
1284 if (!n->last_seen)
1285 continue;
1286
1287 btime timer = current_time() - n->last_seen;
1288 cli_msg(-1022, "%-25I %-10s %6u %6u %7t",
1289 n->nbr->addr, ifa->iface->name, ifa->cf->metric, n->uc, timer);
1290 }
1291 }
1292 }
1293
1294 static void
1295 rip_dump(struct proto *P)
1296 {
1297 struct rip_proto *p = (struct rip_proto *) P;
1298 struct rip_iface *ifa;
1299 int i;
1300
1301 i = 0;
1302 FIB_WALK(&p->rtable, struct rip_entry, en)
1303 {
1304 debug("RIP: entry #%d: %N via %I dev %s valid %d metric %d age %t\n",
1305 i++, en->n.addr, en->next_hop, en->iface ? en->iface->name : "(null)",
1306 en->valid, en->metric, current_time() - en->changed);
1307
1308 for (struct rip_rte *e = en->routes; e; e = e->next)
1309 debug("RIP: via %I metric %d expires %t\n",
1310 e->next_hop, e->metric, e->expires - current_time());
1311 }
1312 FIB_WALK_END;
1313
1314 i = 0;
1315 WALK_LIST(ifa, p->iface_list)
1316 {
1317 debug("RIP: interface #%d: %s, %I, up = %d, busy = %d\n",
1318 i++, ifa->iface->name, ifa->sk ? ifa->sk->daddr : IPA_NONE,
1319 ifa->up, ifa->tx_active);
1320 }
1321 }
1322
1323
1324 struct protocol proto_rip = {
1325 .name = "RIP",
1326 .template = "rip%d",
1327 .class = PROTOCOL_RIP,
1328 .preference = DEF_PREF_RIP,
1329 .channel_mask = NB_IP,
1330 .proto_size = sizeof(struct rip_proto),
1331 .config_size = sizeof(struct rip_config),
1332 .postconfig = rip_postconfig,
1333 .init = rip_init,
1334 .dump = rip_dump,
1335 .start = rip_start,
1336 .shutdown = rip_shutdown,
1337 .reconfigure = rip_reconfigure,
1338 .get_route_info = rip_get_route_info,
1339 .get_attr = rip_get_attr
1340 };