]> git.ipfire.org Git - thirdparty/bird.git/blob - proto/rip/rip.c
Channels - explicit links between protocols and tables
[thirdparty/bird.git] / proto / rip / rip.c
1 /*
2 * BIRD -- Routing Information Protocol (RIP)
3 *
4 * (c) 1998--1999 Pavel Machek <pavel@ucw.cz>
5 * (c) 2004--2013 Ondrej Filip <feela@network.cz>
6 * (c) 2009--2015 Ondrej Zajicek <santiago@crfreenet.org>
7 * (c) 2009--2015 CZ.NIC z.s.p.o.
8 *
9 * Can be freely distributed and used under the terms of the GNU GPL.
10 */
11
12 /**
13 * DOC: Routing Information Protocol (RIP)
14 *
15 * The RIP protocol is implemented in two files: |rip.c| containing the protocol
16 * logic, route management and the protocol glue with BIRD core, and |packets.c|
17 * handling RIP packet processing, RX, TX and protocol sockets.
18 *
19 * Each instance of RIP is described by a structure &rip_proto, which contains
20 * an internal RIP routing table, a list of protocol interfaces and the main
21 * timer responsible for RIP routing table cleanup.
22 *
23 * RIP internal routing table contains incoming and outgoing routes. For each
24 * network (represented by structure &rip_entry) there is one outgoing route
25 * stored directly in &rip_entry and an one-way linked list of incoming routes
26 * (structures &rip_rte). The list contains incoming routes from different RIP
27 * neighbors, but only routes with the lowest metric are stored (i.e., all
28 * stored incoming routes have the same metric).
29 *
30 * Note that RIP itself does not select outgoing route, that is done by the core
31 * routing table. When a new incoming route is received, it is propagated to the
32 * RIP table by rip_update_rte() and possibly stored in the list of incoming
33 * routes. Then the change may be propagated to the core by rip_announce_rte().
34 * The core selects the best route and propagate it to RIP by rip_rt_notify(),
35 * which updates outgoing route part of &rip_entry and possibly triggers route
36 * propagation by rip_trigger_update().
37 *
38 * RIP interfaces are represented by structures &rip_iface. A RIP interface
39 * contains a per-interface socket, a list of associated neighbors, interface
40 * configuration, and state information related to scheduled interface events
41 * and running update sessions. RIP interfaces are added and removed based on
42 * core interface notifications.
43 *
44 * There are two RIP interface events - regular updates and triggered updates.
45 * Both are managed from the RIP interface timer (rip_iface_timer()). Regular
46 * updates are called at fixed interval and propagate the whole routing table,
47 * while triggered updates are scheduled by rip_trigger_update() due to some
48 * routing table change and propagate only the routes modified since the time
49 * they were scheduled. There are also unicast-destined requested updates, but
50 * these are sent directly as a reaction to received RIP request message. The
51 * update session is started by rip_send_table(). There may be at most one
52 * active update session per interface, as the associated state (including the
53 * fib iterator) is stored directly in &rip_iface structure.
54 *
55 * RIP neighbors are represented by structures &rip_neighbor. Compared to
56 * neighbor handling in other routing protocols, RIP does not have explicit
57 * neighbor discovery and adjacency maintenance, which makes the &rip_neighbor
58 * related code a bit peculiar. RIP neighbors are interlinked with core neighbor
59 * structures (&neighbor) and use core neighbor notifications to ensure that RIP
60 * neighbors are timely removed. RIP neighbors are added based on received route
61 * notifications and removed based on core neighbor and RIP interface events.
62 *
63 * RIP neighbors are linked by RIP routes and use counter to track the number of
64 * associated routes, but when these RIP routes timeout, associated RIP neighbor
65 * is still alive (with zero counter). When RIP neighbor is removed but still
66 * has some associated routes, it is not freed, just changed to detached state
67 * (core neighbors and RIP ifaces are unlinked), then during the main timer
68 * cleanup phase the associated routes are removed and the &rip_neighbor
69 * structure is finally freed.
70 *
71 * Supported standards:
72 * - RFC 1058 - RIPv1
73 * - RFC 2453 - RIPv2
74 * - RFC 2080 - RIPng
75 * - RFC 4822 - RIP cryptographic authentication
76 */
77
78 #include <stdlib.h>
79 #include "rip.h"
80
81
82 static inline void rip_lock_neighbor(struct rip_neighbor *n);
83 static inline void rip_unlock_neighbor(struct rip_neighbor *n);
84 static inline int rip_iface_link_up(struct rip_iface *ifa);
85 static inline void rip_kick_timer(struct rip_proto *p);
86 static inline void rip_iface_kick_timer(struct rip_iface *ifa);
87 static void rip_iface_timer(timer *timer);
88 static void rip_trigger_update(struct rip_proto *p);
89
90
91 /*
92 * RIP routes
93 */
94
95 static struct rip_rte *
96 rip_add_rte(struct rip_proto *p, struct rip_rte **rp, struct rip_rte *src)
97 {
98 struct rip_rte *rt = sl_alloc(p->rte_slab);
99
100 memcpy(rt, src, sizeof(struct rip_rte));
101 rt->next = *rp;
102 *rp = rt;
103
104 rip_lock_neighbor(rt->from);
105
106 return rt;
107 }
108
109 static inline void
110 rip_remove_rte(struct rip_proto *p, struct rip_rte **rp)
111 {
112 struct rip_rte *rt = *rp;
113
114 rip_unlock_neighbor(rt->from);
115
116 *rp = rt->next;
117 sl_free(p->rte_slab, rt);
118 }
119
120 static inline int rip_same_rte(struct rip_rte *a, struct rip_rte *b)
121 { return a->metric == b->metric && a->tag == b->tag && ipa_equal(a->next_hop, b->next_hop); }
122
123 static inline int rip_valid_rte(struct rip_rte *rt)
124 { return rt->from->ifa != NULL; }
125
126 /**
127 * rip_announce_rte - announce route from RIP routing table to the core
128 * @p: RIP instance
129 * @en: related network
130 *
131 * The function takes a list of incoming routes from @en, prepare appropriate
132 * &rte for the core and propagate it by rte_update().
133 */
134 static void
135 rip_announce_rte(struct rip_proto *p, struct rip_entry *en)
136 {
137 struct rip_rte *rt = en->routes;
138
139 /* Find first valid rte */
140 while (rt && !rip_valid_rte(rt))
141 rt = rt->next;
142
143 if (rt)
144 {
145 /* Update */
146 net *n = net_get(p->p.main_channel->table, en->n.addr);
147
148 rta a0 = {
149 .src = p->p.main_source,
150 .source = RTS_RIP,
151 .scope = SCOPE_UNIVERSE,
152 .cast = RTC_UNICAST
153 };
154
155 u8 rt_metric = rt->metric;
156 u16 rt_tag = rt->tag;
157 struct rip_rte *rt2 = rt->next;
158
159 /* Find second valid rte */
160 while (rt2 && !rip_valid_rte(rt2))
161 rt2 = rt2->next;
162
163 if (p->ecmp && rt2)
164 {
165 /* ECMP route */
166 struct mpnh *nhs = NULL;
167 struct mpnh **nhp = &nhs;
168 int num = 0;
169
170 for (rt = en->routes; rt && (num < p->ecmp); rt = rt->next)
171 {
172 if (!rip_valid_rte(rt))
173 continue;
174
175 struct mpnh *nh = alloca(sizeof(struct mpnh));
176 nh->gw = rt->next_hop;
177 nh->iface = rt->from->nbr->iface;
178 nh->weight = rt->from->ifa->cf->ecmp_weight;
179 nh->next = NULL;
180 *nhp = nh;
181 nhp = &(nh->next);
182 num++;
183
184 if (rt->tag != rt_tag)
185 rt_tag = 0;
186 }
187
188 a0.dest = RTD_MULTIPATH;
189 a0.nexthops = nhs;
190 }
191 else
192 {
193 /* Unipath route */
194 a0.dest = RTD_ROUTER;
195 a0.gw = rt->next_hop;
196 a0.iface = rt->from->nbr->iface;
197 a0.from = rt->from->nbr->addr;
198 }
199
200 rta *a = rta_lookup(&a0);
201 rte *e = rte_get_temp(a);
202
203 e->u.rip.from = a0.iface;
204 e->u.rip.metric = rt_metric;
205 e->u.rip.tag = rt_tag;
206
207 e->net = n;
208 e->pflags = 0;
209
210 rte_update(&p->p, n, e);
211 }
212 else
213 {
214 /* Withdraw */
215 net *n = net_find(p->p.main_channel->table, en->n.addr);
216 rte_update(&p->p, n, NULL);
217 }
218 }
219
220 /**
221 * rip_update_rte - enter a route update to RIP routing table
222 * @p: RIP instance
223 * @addr: network address
224 * @new: a &rip_rte representing the new route
225 *
226 * The function is called by the RIP packet processing code whenever it receives
227 * a reachable route. The appropriate routing table entry is found and the list
228 * of incoming routes is updated. Eventually, the change is also propagated to
229 * the core by rip_announce_rte(). Note that for unreachable routes,
230 * rip_withdraw_rte() should be called instead of rip_update_rte().
231 */
232 void
233 rip_update_rte(struct rip_proto *p, net_addr *n, struct rip_rte *new)
234 {
235 struct rip_entry *en = fib_get(&p->rtable, n);
236 struct rip_rte *rt, **rp;
237 int changed = 0;
238
239 /* If the new route is better, remove all current routes */
240 if (en->routes && new->metric < en->routes->metric)
241 while (en->routes)
242 rip_remove_rte(p, &en->routes);
243
244 /* Find the old route (also set rp for later) */
245 for (rp = &en->routes; rt = *rp; rp = &rt->next)
246 if (rt->from == new->from)
247 {
248 if (rip_same_rte(rt, new))
249 {
250 rt->expires = new->expires;
251 return;
252 }
253
254 /* Remove the old route */
255 rip_remove_rte(p, rp);
256 changed = 1;
257 break;
258 }
259
260 /* If the new route is optimal, add it to the list */
261 if (!en->routes || new->metric == en->routes->metric)
262 {
263 rt = rip_add_rte(p, rp, new);
264 changed = 1;
265 }
266
267 /* Announce change if on relevant position (the first or any for ECMP) */
268 if (changed && (rp == &en->routes || p->ecmp))
269 rip_announce_rte(p, en);
270 }
271
272 /**
273 * rip_withdraw_rte - enter a route withdraw to RIP routing table
274 * @p: RIP instance
275 * @addr: network address
276 * @from: a &rip_neighbor propagating the withdraw
277 *
278 * The function is called by the RIP packet processing code whenever it receives
279 * an unreachable route. The incoming route for given network from nbr @from is
280 * removed. Eventually, the change is also propagated by rip_announce_rte().
281 */
282 void
283 rip_withdraw_rte(struct rip_proto *p, net_addr *n, struct rip_neighbor *from)
284 {
285 struct rip_entry *en = fib_find(&p->rtable, n);
286 struct rip_rte *rt, **rp;
287
288 if (!en)
289 return;
290
291 /* Find the old route */
292 for (rp = &en->routes; rt = *rp; rp = &rt->next)
293 if (rt->from == from)
294 break;
295
296 if (!rt)
297 return;
298
299 /* Remove the old route */
300 rip_remove_rte(p, rp);
301
302 /* Announce change if on relevant position */
303 if (rp == &en->routes || p->ecmp)
304 rip_announce_rte(p, en);
305 }
306
307 /*
308 * rip_rt_notify - core tells us about new route, so store
309 * it into our data structures.
310 */
311 static void
312 rip_rt_notify(struct proto *P, struct rtable *table UNUSED, struct network *net, struct rte *new,
313 struct rte *old UNUSED, struct ea_list *attrs)
314 {
315 struct rip_proto *p = (struct rip_proto *) P;
316 struct rip_entry *en;
317 int old_metric;
318
319 if (new)
320 {
321 /* Update */
322 u32 rt_metric = ea_get_int(attrs, EA_RIP_METRIC, 1);
323 u32 rt_tag = ea_get_int(attrs, EA_RIP_TAG, 0);
324
325 if (rt_metric > p->infinity)
326 {
327 log(L_WARN "%s: Invalid rip_metric value %u for route %N",
328 p->p.name, rt_metric, net->n.addr);
329 rt_metric = p->infinity;
330 }
331
332 if (rt_tag > 0xffff)
333 {
334 log(L_WARN "%s: Invalid rip_tag value %u for route %N",
335 p->p.name, rt_tag, net->n.addr);
336 rt_metric = p->infinity;
337 rt_tag = 0;
338 }
339
340 /*
341 * Note that we accept exported routes with infinity metric (this could
342 * happen if rip_metric is modified in filters). Such entry has infinity
343 * metric but is RIP_ENTRY_VALID and therefore is not subject to garbage
344 * collection.
345 */
346
347 en = fib_get(&p->rtable, net->n.addr);
348
349 old_metric = en->valid ? en->metric : -1;
350
351 en->valid = RIP_ENTRY_VALID;
352 en->metric = rt_metric;
353 en->tag = rt_tag;
354 en->from = (new->attrs->src->proto == P) ? new->u.rip.from : NULL;
355 en->iface = new->attrs->iface;
356 en->next_hop = new->attrs->gw;
357 }
358 else
359 {
360 /* Withdraw */
361 en = fib_find(&p->rtable, net->n.addr);
362
363 if (!en || en->valid != RIP_ENTRY_VALID)
364 return;
365
366 old_metric = en->metric;
367
368 en->valid = RIP_ENTRY_STALE;
369 en->metric = p->infinity;
370 en->tag = 0;
371 en->from = NULL;
372 en->iface = NULL;
373 en->next_hop = IPA_NONE;
374 }
375
376 /* Activate triggered updates */
377 if (en->metric != old_metric)
378 {
379 en->changed = now;
380 rip_trigger_update(p);
381 }
382 }
383
384
385 /*
386 * RIP neighbors
387 */
388
389 struct rip_neighbor *
390 rip_get_neighbor(struct rip_proto *p, ip_addr *a, struct rip_iface *ifa)
391 {
392 neighbor *nbr = neigh_find2(&p->p, a, ifa->iface, 0);
393
394 if (!nbr || (nbr->scope == SCOPE_HOST) || !rip_iface_link_up(ifa))
395 return NULL;
396
397 if (nbr->data)
398 return nbr->data;
399
400 TRACE(D_EVENTS, "New neighbor %I on %s", *a, ifa->iface->name);
401
402 struct rip_neighbor *n = mb_allocz(p->p.pool, sizeof(struct rip_neighbor));
403 n->ifa = ifa;
404 n->nbr = nbr;
405 nbr->data = n;
406 n->csn = nbr->aux;
407
408 add_tail(&ifa->neigh_list, NODE n);
409
410 return n;
411 }
412
413 static void
414 rip_remove_neighbor(struct rip_proto *p, struct rip_neighbor *n)
415 {
416 neighbor *nbr = n->nbr;
417
418 TRACE(D_EVENTS, "Removing neighbor %I on %s", nbr->addr, nbr->iface->name);
419
420 rem_node(NODE n);
421 n->ifa = NULL;
422 n->nbr = NULL;
423 nbr->data = NULL;
424 nbr->aux = n->csn;
425
426 rfree(n->bfd_req);
427 n->bfd_req = NULL;
428 n->last_seen = 0;
429
430 if (!n->uc)
431 mb_free(n);
432
433 /* Related routes are removed in rip_timer() */
434 rip_kick_timer(p);
435 }
436
437 static inline void
438 rip_lock_neighbor(struct rip_neighbor *n)
439 {
440 n->uc++;
441 }
442
443 static inline void
444 rip_unlock_neighbor(struct rip_neighbor *n)
445 {
446 n->uc--;
447
448 if (!n->nbr && !n->uc)
449 mb_free(n);
450 }
451
452 static void
453 rip_neigh_notify(struct neighbor *nbr)
454 {
455 struct rip_proto *p = (struct rip_proto *) nbr->proto;
456 struct rip_neighbor *n = nbr->data;
457
458 if (!n)
459 return;
460
461 /*
462 * We assume that rip_neigh_notify() is called before rip_if_notify() for
463 * IF_CHANGE_DOWN and therefore n->ifa is still valid. We have no such
464 * ordering assumption for IF_CHANGE_LINK, so we test link state of the
465 * underlying iface instead of just rip_iface state.
466 */
467 if ((nbr->scope <= 0) || !rip_iface_link_up(n->ifa))
468 rip_remove_neighbor(p, n);
469 }
470
471 static void
472 rip_bfd_notify(struct bfd_request *req)
473 {
474 struct rip_neighbor *n = req->data;
475 struct rip_proto *p = n->ifa->rip;
476
477 if (req->down)
478 {
479 TRACE(D_EVENTS, "BFD session down for nbr %I on %s",
480 n->nbr->addr, n->ifa->iface->name);
481 rip_remove_neighbor(p, n);
482 }
483 }
484
485 void
486 rip_update_bfd(struct rip_proto *p, struct rip_neighbor *n)
487 {
488 int use_bfd = n->ifa->cf->bfd && n->last_seen;
489
490 if (use_bfd && !n->bfd_req)
491 {
492 /*
493 * For RIPv2, use the same address as rip_open_socket(). For RIPng, neighbor
494 * should contain an address from the same prefix, thus also link-local. It
495 * may cause problems if two link-local addresses are assigned to one iface.
496 */
497 ip_addr saddr = rip_is_v2(p) ? n->ifa->sk->saddr : n->nbr->ifa->ip;
498 n->bfd_req = bfd_request_session(p->p.pool, n->nbr->addr, saddr,
499 n->nbr->iface, rip_bfd_notify, n);
500 }
501
502 if (!use_bfd && n->bfd_req)
503 {
504 rfree(n->bfd_req);
505 n->bfd_req = NULL;
506 }
507 }
508
509
510 /*
511 * RIP interfaces
512 */
513
514 static void
515 rip_iface_start(struct rip_iface *ifa)
516 {
517 struct rip_proto *p = ifa->rip;
518
519 TRACE(D_EVENTS, "Starting interface %s", ifa->iface->name);
520
521 ifa->next_regular = now + (random() % ifa->cf->update_time) + 1;
522 ifa->next_triggered = now; /* Available immediately */
523 ifa->want_triggered = 1; /* All routes in triggered update */
524 tm_start(ifa->timer, 1); /* Or 100 ms */
525 ifa->up = 1;
526
527 if (!ifa->cf->passive)
528 rip_send_request(ifa->rip, ifa);
529 }
530
531 static void
532 rip_iface_stop(struct rip_iface *ifa)
533 {
534 struct rip_proto *p = ifa->rip;
535 struct rip_neighbor *n;
536
537 TRACE(D_EVENTS, "Stopping interface %s", ifa->iface->name);
538
539 rip_reset_tx_session(p, ifa);
540
541 WALK_LIST_FIRST(n, ifa->neigh_list)
542 rip_remove_neighbor(p, n);
543
544 tm_stop(ifa->timer);
545 ifa->up = 0;
546 }
547
548 static inline int
549 rip_iface_link_up(struct rip_iface *ifa)
550 {
551 return !ifa->cf->check_link || (ifa->iface->flags & IF_LINK_UP);
552 }
553
554 static void
555 rip_iface_update_state(struct rip_iface *ifa)
556 {
557 int up = ifa->sk && rip_iface_link_up(ifa);
558
559 if (up == ifa->up)
560 return;
561
562 if (up)
563 rip_iface_start(ifa);
564 else
565 rip_iface_stop(ifa);
566 }
567
568 static void
569 rip_iface_update_buffers(struct rip_iface *ifa)
570 {
571 if (!ifa->sk)
572 return;
573
574 uint rbsize = ifa->cf->rx_buffer ?: ifa->iface->mtu;
575 uint tbsize = ifa->cf->tx_length ?: ifa->iface->mtu;
576 rbsize = MAX(rbsize, tbsize);
577
578 sk_set_rbsize(ifa->sk, rbsize);
579 sk_set_tbsize(ifa->sk, tbsize);
580
581 uint headers = (rip_is_v2(ifa->rip) ? IP4_HEADER_LENGTH : IP6_HEADER_LENGTH) + UDP_HEADER_LENGTH;
582 ifa->tx_plen = tbsize - headers;
583
584 if (ifa->cf->auth_type == RIP_AUTH_CRYPTO)
585 ifa->tx_plen -= RIP_AUTH_TAIL_LENGTH;
586 }
587
588 static inline void
589 rip_iface_update_bfd(struct rip_iface *ifa)
590 {
591 struct rip_proto *p = ifa->rip;
592 struct rip_neighbor *n;
593
594 WALK_LIST(n, ifa->neigh_list)
595 rip_update_bfd(p, n);
596 }
597
598
599 static void
600 rip_iface_locked(struct object_lock *lock)
601 {
602 struct rip_iface *ifa = lock->data;
603 struct rip_proto *p = ifa->rip;
604
605 if (!rip_open_socket(ifa))
606 {
607 log(L_ERR "%s: Cannot open socket for %s", p->p.name, ifa->iface->name);
608 return;
609 }
610
611 rip_iface_update_buffers(ifa);
612 rip_iface_update_state(ifa);
613 }
614
615
616 static struct rip_iface *
617 rip_find_iface(struct rip_proto *p, struct iface *what)
618 {
619 struct rip_iface *ifa;
620
621 WALK_LIST(ifa, p->iface_list)
622 if (ifa->iface == what)
623 return ifa;
624
625 return NULL;
626 }
627
628 static void
629 rip_add_iface(struct rip_proto *p, struct iface *iface, struct rip_iface_config *ic)
630 {
631 struct rip_iface *ifa;
632
633 TRACE(D_EVENTS, "Adding interface %s", iface->name);
634
635 ifa = mb_allocz(p->p.pool, sizeof(struct rip_iface));
636 ifa->rip = p;
637 ifa->iface = iface;
638 ifa->cf = ic;
639
640 if (ipa_nonzero(ic->address))
641 ifa->addr = ic->address;
642 else if (ic->mode == RIP_IM_MULTICAST)
643 ifa->addr = rip_is_v2(p) ? IP4_RIP_ROUTERS : IP6_RIP_ROUTERS;
644 else /* Broadcast */
645 ifa->addr = iface->addr->brd;
646
647 init_list(&ifa->neigh_list);
648
649 add_tail(&p->iface_list, NODE ifa);
650
651 ifa->timer = tm_new_set(p->p.pool, rip_iface_timer, ifa, 0, 0);
652
653 struct object_lock *lock = olock_new(p->p.pool);
654 lock->type = OBJLOCK_UDP;
655 lock->port = ic->port;
656 lock->iface = iface;
657 lock->data = ifa;
658 lock->hook = rip_iface_locked;
659 ifa->lock = lock;
660
661 olock_acquire(lock);
662 }
663
664 static void
665 rip_remove_iface(struct rip_proto *p, struct rip_iface *ifa)
666 {
667 rip_iface_stop(ifa);
668
669 TRACE(D_EVENTS, "Removing interface %s", ifa->iface->name);
670
671 rem_node(NODE ifa);
672
673 rfree(ifa->sk);
674 rfree(ifa->lock);
675 rfree(ifa->timer);
676
677 mb_free(ifa);
678 }
679
680 static int
681 rip_reconfigure_iface(struct rip_proto *p, struct rip_iface *ifa, struct rip_iface_config *new)
682 {
683 struct rip_iface_config *old = ifa->cf;
684
685 /* Change of these options would require to reset the iface socket */
686 if ((new->mode != old->mode) ||
687 (new->port != old->port) ||
688 (new->tx_tos != old->tx_tos) ||
689 (new->tx_priority != old->tx_priority) ||
690 (new->ttl_security != old->ttl_security))
691 return 0;
692
693 TRACE(D_EVENTS, "Reconfiguring interface %s", ifa->iface->name);
694
695 ifa->cf = new;
696
697 if (ifa->next_regular > (now + new->update_time))
698 ifa->next_regular = now + (random() % new->update_time) + 1;
699
700 if ((new->tx_length != old->tx_length) || (new->rx_buffer != old->rx_buffer))
701 rip_iface_update_buffers(ifa);
702
703 if (new->check_link != old->check_link)
704 rip_iface_update_state(ifa);
705
706 if (new->bfd != old->bfd)
707 rip_iface_update_bfd(ifa);
708
709 if (ifa->up)
710 rip_iface_kick_timer(ifa);
711
712 return 1;
713 }
714
715 static void
716 rip_reconfigure_ifaces(struct rip_proto *p, struct rip_config *cf)
717 {
718 struct iface *iface;
719
720 WALK_LIST(iface, iface_list)
721 {
722 if (! (iface->flags & IF_UP))
723 continue;
724
725 struct rip_iface *ifa = rip_find_iface(p, iface);
726 struct rip_iface_config *ic = (void *) iface_patt_find(&cf->patt_list, iface, NULL);
727
728 if (ifa && ic)
729 {
730 if (rip_reconfigure_iface(p, ifa, ic))
731 continue;
732
733 /* Hard restart */
734 log(L_INFO "%s: Restarting interface %s", p->p.name, ifa->iface->name);
735 rip_remove_iface(p, ifa);
736 rip_add_iface(p, iface, ic);
737 }
738
739 if (ifa && !ic)
740 rip_remove_iface(p, ifa);
741
742 if (!ifa && ic)
743 rip_add_iface(p, iface, ic);
744 }
745 }
746
747 static void
748 rip_if_notify(struct proto *P, unsigned flags, struct iface *iface)
749 {
750 struct rip_proto *p = (void *) P;
751 struct rip_config *cf = (void *) P->cf;
752
753 if (iface->flags & IF_IGNORE)
754 return;
755
756 if (flags & IF_CHANGE_UP)
757 {
758 struct rip_iface_config *ic = (void *) iface_patt_find(&cf->patt_list, iface, NULL);
759
760 if (ic)
761 rip_add_iface(p, iface, ic);
762
763 return;
764 }
765
766 struct rip_iface *ifa = rip_find_iface(p, iface);
767
768 if (!ifa)
769 return;
770
771 if (flags & IF_CHANGE_DOWN)
772 {
773 rip_remove_iface(p, ifa);
774 return;
775 }
776
777 if (flags & IF_CHANGE_MTU)
778 rip_iface_update_buffers(ifa);
779
780 if (flags & IF_CHANGE_LINK)
781 rip_iface_update_state(ifa);
782 }
783
784
785 /*
786 * RIP timer events
787 */
788
789 /**
790 * rip_timer - RIP main timer hook
791 * @t: timer
792 *
793 * The RIP main timer is responsible for routing table maintenance. Invalid or
794 * expired routes (&rip_rte) are removed and garbage collection of stale routing
795 * table entries (&rip_entry) is done. Changes are propagated to core tables,
796 * route reload is also done here. Note that garbage collection uses a maximal
797 * GC time, while interfaces maintain an illusion of per-interface GC times in
798 * rip_send_response().
799 *
800 * Keeping incoming routes and the selected outgoing route are two independent
801 * functions, therefore after garbage collection some entries now considered
802 * invalid (RIP_ENTRY_DUMMY) still may have non-empty list of incoming routes,
803 * while some valid entries (representing an outgoing route) may have that list
804 * empty.
805 *
806 * The main timer is not scheduled periodically but it uses the time of the
807 * current next event and the minimal interval of any possible event to compute
808 * the time of the next run.
809 */
810 static void
811 rip_timer(timer *t)
812 {
813 struct rip_proto *p = t->data;
814 struct rip_config *cf = (void *) (p->p.cf);
815 struct rip_iface *ifa;
816 struct rip_neighbor *n, *nn;
817 struct fib_iterator fit;
818 bird_clock_t next = now + MIN(cf->min_timeout_time, cf->max_garbage_time);
819 bird_clock_t expires = 0;
820
821 TRACE(D_EVENTS, "Main timer fired");
822
823 FIB_ITERATE_INIT(&fit, &p->rtable);
824
825 loop:
826 FIB_ITERATE_START(&p->rtable, &fit, struct rip_entry, en)
827 {
828 struct rip_rte *rt, **rp;
829 int changed = 0;
830
831 /* Checking received routes for timeout and for dead neighbors */
832 for (rp = &en->routes; rt = *rp; /* rp = &rt->next */)
833 {
834 if (!rip_valid_rte(rt) || (rt->expires <= now))
835 {
836 rip_remove_rte(p, rp);
837 changed = 1;
838 continue;
839 }
840
841 next = MIN(next, rt->expires);
842 rp = &rt->next;
843 }
844
845 /* Propagating eventual change */
846 if (changed || p->rt_reload)
847 {
848 /*
849 * We have to restart the iteration because there may be a cascade of
850 * synchronous events rip_announce_rte() -> nest table change ->
851 * rip_rt_notify() -> p->rtable change, invalidating hidden variables.
852 */
853
854 FIB_ITERATE_PUT_NEXT(&fit, &p->rtable);
855 rip_announce_rte(p, en);
856 goto loop;
857 }
858
859 /* Checking stale entries for garbage collection timeout */
860 if (en->valid == RIP_ENTRY_STALE)
861 {
862 expires = en->changed + cf->max_garbage_time;
863
864 if (expires <= now)
865 {
866 // TRACE(D_EVENTS, "entry is too old: %N", en->n.addr);
867 en->valid = 0;
868 }
869 else
870 next = MIN(next, expires);
871 }
872
873 /* Remove empty nodes */
874 if (!en->valid && !en->routes)
875 {
876 FIB_ITERATE_PUT(&fit);
877 fib_delete(&p->rtable, en);
878 goto loop;
879 }
880 }
881 FIB_ITERATE_END;
882
883 p->rt_reload = 0;
884
885 /* Handling neighbor expiration */
886 WALK_LIST(ifa, p->iface_list)
887 WALK_LIST_DELSAFE(n, nn, ifa->neigh_list)
888 if (n->last_seen)
889 {
890 expires = n->last_seen + n->ifa->cf->timeout_time;
891
892 if (expires <= now)
893 rip_remove_neighbor(p, n);
894 else
895 next = MIN(next, expires);
896 }
897
898 tm_start(p->timer, MAX(next - now, 1));
899 }
900
901 static inline void
902 rip_kick_timer(struct rip_proto *p)
903 {
904 if (p->timer->expires > (now + 1))
905 tm_start(p->timer, 1); /* Or 100 ms */
906 }
907
908 /**
909 * rip_iface_timer - RIP interface timer hook
910 * @t: timer
911 *
912 * RIP interface timers are responsible for scheduling both regular and
913 * triggered updates. Fixed, delay-independent period is used for regular
914 * updates, while minimal separating interval is enforced for triggered updates.
915 * The function also ensures that a new update is not started when the old one
916 * is still running.
917 */
918 static void
919 rip_iface_timer(timer *t)
920 {
921 struct rip_iface *ifa = t->data;
922 struct rip_proto *p = ifa->rip;
923 bird_clock_t period = ifa->cf->update_time;
924
925 if (ifa->cf->passive)
926 return;
927
928 TRACE(D_EVENTS, "Interface timer fired for %s", ifa->iface->name);
929
930 if (ifa->tx_active)
931 {
932 if (now < (ifa->next_regular + period))
933 { tm_start(ifa->timer, 1); return; }
934
935 /* We are too late, reset is done by rip_send_table() */
936 log(L_WARN "%s: Too slow update on %s, resetting", p->p.name, ifa->iface->name);
937 }
938
939 if (now >= ifa->next_regular)
940 {
941 /* Send regular update, set timer for next period (or following one if necessay) */
942 TRACE(D_EVENTS, "Sending regular updates for %s", ifa->iface->name);
943 rip_send_table(p, ifa, ifa->addr, 0);
944 ifa->next_regular += period * (1 + ((now - ifa->next_regular) / period));
945 ifa->want_triggered = 0;
946 p->triggered = 0;
947 }
948 else if (ifa->want_triggered && (now >= ifa->next_triggered))
949 {
950 /* Send triggered update, enforce interval between triggered updates */
951 TRACE(D_EVENTS, "Sending triggered updates for %s", ifa->iface->name);
952 rip_send_table(p, ifa, ifa->addr, ifa->want_triggered);
953 ifa->next_triggered = now + MIN(5, period / 2 + 1);
954 ifa->want_triggered = 0;
955 p->triggered = 0;
956 }
957
958 tm_start(ifa->timer, ifa->want_triggered ? 1 : (ifa->next_regular - now));
959 }
960
961 static inline void
962 rip_iface_kick_timer(struct rip_iface *ifa)
963 {
964 if (ifa->timer->expires > (now + 1))
965 tm_start(ifa->timer, 1); /* Or 100 ms */
966 }
967
968 static void
969 rip_trigger_update(struct rip_proto *p)
970 {
971 if (p->triggered)
972 return;
973
974 struct rip_iface *ifa;
975 WALK_LIST(ifa, p->iface_list)
976 {
977 /* Interface not active */
978 if (! ifa->up)
979 continue;
980
981 /* Already scheduled */
982 if (ifa->want_triggered)
983 continue;
984
985 TRACE(D_EVENTS, "Scheduling triggered updates for %s", ifa->iface->name);
986 ifa->want_triggered = now;
987 rip_iface_kick_timer(ifa);
988 }
989
990 p->triggered = 1;
991 }
992
993
994 /*
995 * RIP protocol glue
996 */
997
998 static struct ea_list *
999 rip_prepare_attrs(struct linpool *pool, ea_list *next, u8 metric, u16 tag)
1000 {
1001 struct ea_list *l = lp_alloc(pool, sizeof(struct ea_list) + 2 * sizeof(eattr));
1002
1003 l->next = next;
1004 l->flags = EALF_SORTED;
1005 l->count = 2;
1006
1007 l->attrs[0].id = EA_RIP_METRIC;
1008 l->attrs[0].flags = 0;
1009 l->attrs[0].type = EAF_TYPE_INT | EAF_TEMP;
1010 l->attrs[0].u.data = metric;
1011
1012 l->attrs[1].id = EA_RIP_TAG;
1013 l->attrs[1].flags = 0;
1014 l->attrs[1].type = EAF_TYPE_INT | EAF_TEMP;
1015 l->attrs[1].u.data = tag;
1016
1017 return l;
1018 }
1019
1020 static int
1021 rip_import_control(struct proto *P, struct rte **rt, struct ea_list **attrs, struct linpool *pool)
1022 {
1023 /* Prepare attributes with initial values */
1024 if ((*rt)->attrs->source != RTS_RIP)
1025 *attrs = rip_prepare_attrs(pool, *attrs, 1, 0);
1026
1027 return 0;
1028 }
1029
1030 static void
1031 rip_reload_routes(struct channel *C)
1032 {
1033 struct rip_proto *p = (struct rip_proto *) C->proto;
1034
1035 if (p->rt_reload)
1036 return;
1037
1038 TRACE(D_EVENTS, "Scheduling route reload");
1039 p->rt_reload = 1;
1040 rip_kick_timer(p);
1041 }
1042
1043 static struct ea_list *
1044 rip_make_tmp_attrs(struct rte *rt, struct linpool *pool)
1045 {
1046 return rip_prepare_attrs(pool, NULL, rt->u.rip.metric, rt->u.rip.tag);
1047 }
1048
1049 static void
1050 rip_store_tmp_attrs(struct rte *rt, struct ea_list *attrs)
1051 {
1052 rt->u.rip.metric = ea_get_int(attrs, EA_RIP_METRIC, 1);
1053 rt->u.rip.tag = ea_get_int(attrs, EA_RIP_TAG, 0);
1054 }
1055
1056 static int
1057 rip_rte_better(struct rte *new, struct rte *old)
1058 {
1059 return new->u.rip.metric < old->u.rip.metric;
1060 }
1061
1062 static int
1063 rip_rte_same(struct rte *new, struct rte *old)
1064 {
1065 return ((new->u.rip.metric == old->u.rip.metric) &&
1066 (new->u.rip.tag == old->u.rip.tag) &&
1067 (new->u.rip.from == old->u.rip.from));
1068 }
1069
1070
1071 static void
1072 rip_postconfig(struct proto_config *CF)
1073 {
1074 // struct rip_config *cf = (void *) CF;
1075
1076 /* Define default channel */
1077 if (EMPTY_LIST(CF->channels))
1078 channel_config_new(NULL, CF->net_type, CF);
1079 }
1080
1081 static struct proto *
1082 rip_init(struct proto_config *CF)
1083 {
1084 struct proto *P = proto_new(CF);
1085
1086 P->main_channel = proto_add_channel(P, proto_cf_main_channel(CF));
1087
1088 P->if_notify = rip_if_notify;
1089 P->rt_notify = rip_rt_notify;
1090 P->neigh_notify = rip_neigh_notify;
1091 P->import_control = rip_import_control;
1092 P->reload_routes = rip_reload_routes;
1093 P->make_tmp_attrs = rip_make_tmp_attrs;
1094 P->store_tmp_attrs = rip_store_tmp_attrs;
1095 P->rte_better = rip_rte_better;
1096 P->rte_same = rip_rte_same;
1097
1098 return P;
1099 }
1100
1101 static int
1102 rip_start(struct proto *P)
1103 {
1104 struct rip_proto *p = (void *) P;
1105 struct rip_config *cf = (void *) (P->cf);
1106
1107 init_list(&p->iface_list);
1108 fib_init(&p->rtable, P->pool, cf->rip2 ? NET_IP4 : NET_IP6,
1109 sizeof(struct rip_entry), OFFSETOF(struct rip_entry, n), 0, NULL);
1110 p->rte_slab = sl_new(P->pool, sizeof(struct rip_rte));
1111 p->timer = tm_new_set(P->pool, rip_timer, p, 0, 0);
1112
1113 p->rip2 = cf->rip2;
1114 p->ecmp = cf->ecmp;
1115 p->infinity = cf->infinity;
1116 p->triggered = 0;
1117
1118 p->log_pkt_tbf = (struct tbf){ .rate = 1, .burst = 5 };
1119 p->log_rte_tbf = (struct tbf){ .rate = 4, .burst = 20 };
1120
1121 tm_start(p->timer, MIN(cf->min_timeout_time, cf->max_garbage_time));
1122
1123 return PS_UP;
1124 }
1125
1126 static int
1127 rip_reconfigure(struct proto *P, struct proto_config *CF)
1128 {
1129 struct rip_proto *p = (void *) P;
1130 struct rip_config *new = (void *) CF;
1131 // struct rip_config *old = (void *) (P->cf);
1132
1133 if (new->rip2 != p->rip2)
1134 return 0;
1135
1136 if (new->infinity != p->infinity)
1137 return 0;
1138
1139 if (!proto_configure_channel(P, &P->main_channel, proto_cf_main_channel(CF)))
1140 return 0;
1141
1142 TRACE(D_EVENTS, "Reconfiguring");
1143
1144 p->p.cf = CF;
1145 p->ecmp = new->ecmp;
1146 rip_reconfigure_ifaces(p, new);
1147
1148 p->rt_reload = 1;
1149 rip_kick_timer(p);
1150
1151 return 1;
1152 }
1153
1154 static void
1155 rip_get_route_info(rte *rte, byte *buf, ea_list *attrs)
1156 {
1157 buf += bsprintf(buf, " (%d/%d)", rte->pref, rte->u.rip.metric);
1158
1159 if (rte->u.rip.tag)
1160 bsprintf(buf, " [%04x]", rte->u.rip.tag);
1161 }
1162
1163 static int
1164 rip_get_attr(eattr *a, byte *buf, int buflen UNUSED)
1165 {
1166 switch (a->id)
1167 {
1168 case EA_RIP_METRIC:
1169 bsprintf(buf, "metric: %d", a->u.data);
1170 return GA_FULL;
1171
1172 case EA_RIP_TAG:
1173 bsprintf(buf, "tag: %04x", a->u.data);
1174 return GA_FULL;
1175
1176 default:
1177 return GA_UNKNOWN;
1178 }
1179 }
1180
1181 void
1182 rip_show_interfaces(struct proto *P, char *iff)
1183 {
1184 struct rip_proto *p = (void *) P;
1185 struct rip_iface *ifa = NULL;
1186 struct rip_neighbor *n = NULL;
1187
1188 if (p->p.proto_state != PS_UP)
1189 {
1190 cli_msg(-1021, "%s: is not up", p->p.name);
1191 cli_msg(0, "");
1192 return;
1193 }
1194
1195 cli_msg(-1021, "%s:", p->p.name);
1196 cli_msg(-1021, "%-10s %-6s %6s %6s %6s",
1197 "Interface", "State", "Metric", "Nbrs", "Timer");
1198
1199 WALK_LIST(ifa, p->iface_list)
1200 {
1201 if (iff && !patmatch(iff, ifa->iface->name))
1202 continue;
1203
1204 int nbrs = 0;
1205 WALK_LIST(n, ifa->neigh_list)
1206 if (n->last_seen)
1207 nbrs++;
1208
1209 int timer = MAX(ifa->next_regular - now, 0);
1210 cli_msg(-1021, "%-10s %-6s %6u %6u %6u",
1211 ifa->iface->name, (ifa->up ? "Up" : "Down"), ifa->cf->metric, nbrs, timer);
1212 }
1213
1214 cli_msg(0, "");
1215 }
1216
1217 void
1218 rip_show_neighbors(struct proto *P, char *iff)
1219 {
1220 struct rip_proto *p = (void *) P;
1221 struct rip_iface *ifa = NULL;
1222 struct rip_neighbor *n = NULL;
1223
1224 if (p->p.proto_state != PS_UP)
1225 {
1226 cli_msg(-1022, "%s: is not up", p->p.name);
1227 cli_msg(0, "");
1228 return;
1229 }
1230
1231 cli_msg(-1022, "%s:", p->p.name);
1232 cli_msg(-1022, "%-25s %-10s %6s %6s %6s",
1233 "IP address", "Interface", "Metric", "Routes", "Seen");
1234
1235 WALK_LIST(ifa, p->iface_list)
1236 {
1237 if (iff && !patmatch(iff, ifa->iface->name))
1238 continue;
1239
1240 WALK_LIST(n, ifa->neigh_list)
1241 {
1242 if (!n->last_seen)
1243 continue;
1244
1245 int timer = now - n->last_seen;
1246 cli_msg(-1022, "%-25I %-10s %6u %6u %6u",
1247 n->nbr->addr, ifa->iface->name, ifa->cf->metric, n->uc, timer);
1248 }
1249 }
1250
1251 cli_msg(0, "");
1252 }
1253
1254 static void
1255 rip_dump(struct proto *P)
1256 {
1257 struct rip_proto *p = (struct rip_proto *) P;
1258 struct rip_iface *ifa;
1259 int i;
1260
1261 i = 0;
1262 FIB_WALK(&p->rtable, struct rip_entry, en)
1263 {
1264 debug("RIP: entry #%d: %N via %I dev %s valid %d metric %d age %d s\n",
1265 i++, en->n.addr, en->next_hop, en->iface->name,
1266 en->valid, en->metric, now - en->changed);
1267 }
1268 FIB_WALK_END;
1269
1270 i = 0;
1271 WALK_LIST(ifa, p->iface_list)
1272 {
1273 debug("RIP: interface #%d: %s, %I, up = %d, busy = %d\n",
1274 i++, ifa->iface->name, ifa->sk ? ifa->sk->daddr : IPA_NONE,
1275 ifa->up, ifa->tx_active);
1276 }
1277 }
1278
1279
1280 struct protocol proto_rip = {
1281 .name = "RIP",
1282 .template = "rip%d",
1283 .attr_class = EAP_RIP,
1284 .preference = DEF_PREF_RIP,
1285 .channel_mask = NB_IP,
1286 .proto_size = sizeof(struct rip_proto),
1287 .config_size = sizeof(struct rip_config),
1288 .postconfig = rip_postconfig,
1289 .init = rip_init,
1290 .dump = rip_dump,
1291 .start = rip_start,
1292 .reconfigure = rip_reconfigure,
1293 .get_route_info = rip_get_route_info,
1294 .get_attr = rip_get_attr
1295 };