]> git.ipfire.org Git - thirdparty/bird.git/blob - nest/rt-table.c
1f84e975f7add465236a085b767e4db01af40106
[thirdparty/bird.git] / nest / rt-table.c
1 /*
2 * BIRD -- Routing Tables
3 *
4 * (c) 1998--2000 Martin Mares <mj@ucw.cz>
5 *
6 * Can be freely distributed and used under the terms of the GNU GPL.
7 */
8
9 /**
10 * DOC: Routing tables
11 *
12 * Routing tables are probably the most important structures BIRD uses. They
13 * hold all the information about known networks, the associated routes and
14 * their attributes.
15 *
16 * There are multiple routing tables (a primary one together with any
17 * number of secondary ones if requested by the configuration). Each table
18 * is basically a FIB containing entries describing the individual
19 * destination networks. For each network (represented by structure &net),
20 * there is a one-way linked list of route entries (&rte), the first entry
21 * on the list being the best one (i.e., the one we currently use
22 * for routing), the order of the other ones is undetermined.
23 *
24 * The &rte contains information specific to the route (preference, protocol
25 * metrics, time of last modification etc.) and a pointer to a &rta structure
26 * (see the route attribute module for a precise explanation) holding the
27 * remaining route attributes which are expected to be shared by multiple
28 * routes in order to conserve memory.
29 */
30
31 #undef LOCAL_DEBUG
32
33 #include "nest/bird.h"
34 #include "nest/route.h"
35 #include "nest/protocol.h"
36 #include "nest/cli.h"
37 #include "nest/iface.h"
38 #include "lib/resource.h"
39 #include "lib/event.h"
40 #include "lib/string.h"
41 #include "conf/conf.h"
42 #include "filter/filter.h"
43 #include "lib/string.h"
44 #include "lib/alloca.h"
45
46 pool *rt_table_pool;
47
48 static slab *rte_slab;
49 static linpool *rte_update_pool;
50
51 static list routing_tables;
52
53 static void rt_format_via(rte *e, byte *via);
54 static void rt_free_hostcache(rtable *tab);
55 static void rt_notify_hostcache(rtable *tab, net *net);
56 static void rt_update_hostcache(rtable *tab);
57 static void rt_next_hop_update(rtable *tab);
58 static void rt_prune(rtable *tab);
59
60 static inline void rt_schedule_gc(rtable *tab);
61
62 static void
63 rte_init(struct fib_node *N)
64 {
65 net *n = (net *) N;
66
67 N->flags = 0;
68 n->routes = NULL;
69 }
70
71 /**
72 * rte_find - find a route
73 * @net: network node
74 * @p: protocol
75 *
76 * The rte_find() function returns a route for destination @net
77 * which belongs has been defined by protocol @p.
78 */
79 rte *
80 rte_find(net *net, struct proto *p)
81 {
82 rte *e = net->routes;
83
84 while (e && e->attrs->proto != p)
85 e = e->next;
86 return e;
87 }
88
89 /**
90 * rte_get_temp - get a temporary &rte
91 * @a: attributes to assign to the new route (a &rta; in case it's
92 * un-cached, rte_update() will create a cached copy automatically)
93 *
94 * Create a temporary &rte and bind it with the attributes @a.
95 * Also set route preference to the default preference set for
96 * the protocol.
97 */
98 rte *
99 rte_get_temp(rta *a)
100 {
101 rte *e = sl_alloc(rte_slab);
102
103 e->attrs = a;
104 e->flags = 0;
105 e->pref = a->proto->preference;
106 return e;
107 }
108
109 rte *
110 rte_do_cow(rte *r)
111 {
112 rte *e = sl_alloc(rte_slab);
113
114 memcpy(e, r, sizeof(rte));
115 e->attrs = rta_clone(r->attrs);
116 e->flags = 0;
117 return e;
118 }
119
120 static int /* Actually better or at least as good as */
121 rte_better(rte *new, rte *old)
122 {
123 int (*better)(rte *, rte *);
124
125 if (!old)
126 return 1;
127 if (new->pref > old->pref)
128 return 1;
129 if (new->pref < old->pref)
130 return 0;
131 if (new->attrs->proto->proto != old->attrs->proto->proto)
132 {
133 /*
134 * If the user has configured protocol preferences, so that two different protocols
135 * have the same preference, try to break the tie by comparing addresses. Not too
136 * useful, but keeps the ordering of routes unambiguous.
137 */
138 return new->attrs->proto->proto > old->attrs->proto->proto;
139 }
140 if (better = new->attrs->proto->rte_better)
141 return better(new, old);
142 return 0;
143 }
144
145 static void
146 rte_trace(struct proto *p, rte *e, int dir, char *msg)
147 {
148 byte via[STD_ADDRESS_P_LENGTH+32];
149
150 rt_format_via(e, via);
151 log(L_TRACE "%s %c %s %I/%d %s", p->name, dir, msg, e->net->n.prefix, e->net->n.pxlen, via);
152 }
153
154 static inline void
155 rte_trace_in(unsigned int flag, struct proto *p, rte *e, char *msg)
156 {
157 if (p->debug & flag)
158 rte_trace(p, e, '>', msg);
159 }
160
161 static inline void
162 rte_trace_out(unsigned int flag, struct proto *p, rte *e, char *msg)
163 {
164 if (p->debug & flag)
165 rte_trace(p, e, '<', msg);
166 }
167
168 static inline void
169 do_rte_announce(struct announce_hook *a, int type UNUSED, net *net, rte *new, rte *old, ea_list *tmpa, int refeed)
170 {
171 struct proto *p = a->proto;
172 struct filter *filter = p->out_filter;
173 struct proto_stats *stats = &p->stats;
174 rte *new0 = new;
175 rte *old0 = old;
176 int ok;
177
178 #ifdef CONFIG_PIPE
179 /* The secondary direction of the pipe */
180 if (proto_is_pipe(p) && (p->table != a->table))
181 {
182 filter = p->in_filter;
183 stats = pipe_get_peer_stats(p);
184 }
185 #endif
186
187 if (new)
188 {
189 stats->exp_updates_received++;
190
191 char *drop_reason = NULL;
192 if ((ok = p->import_control ? p->import_control(p, &new, &tmpa, rte_update_pool) : 0) < 0)
193 {
194 stats->exp_updates_rejected++;
195 drop_reason = "rejected by protocol";
196 }
197 else if (ok)
198 rte_trace_out(D_FILTERS, p, new, "forced accept by protocol");
199 else if ((filter == FILTER_REJECT) ||
200 (filter && f_run(filter, &new, &tmpa, rte_update_pool, FF_FORCE_TMPATTR) > F_ACCEPT))
201 {
202 stats->exp_updates_filtered++;
203 drop_reason = "filtered out";
204 }
205 if (drop_reason)
206 {
207 rte_trace_out(D_FILTERS, p, new, drop_reason);
208 if (new != new0)
209 rte_free(new);
210 new = NULL;
211 }
212 }
213 else
214 stats->exp_withdraws_received++;
215
216 /*
217 * This is a tricky part - we don't know whether route 'old' was
218 * exported to protocol 'p' or was filtered by the export filter.
219 * We try tu run the export filter to know this to have a correct
220 * value in 'old' argument of rte_update (and proper filter value)
221 *
222 * FIXME - this is broken because 'configure soft' may change
223 * filters but keep routes. Refeed is expected to be called after
224 * change of the filters and with old == new, therefore we do not
225 * even try to run the filter on an old route, This may lead to
226 * 'spurious withdraws' but ensure that there are no 'missing
227 * withdraws'.
228 *
229 * This is not completely safe as there is a window between
230 * reconfiguration and the end of refeed - if a newly filtered
231 * route disappears during this period, proper withdraw is not
232 * sent (because old would be also filtered) and the route is
233 * not refeeded (because it disappeared before that).
234 */
235
236 if (old && !refeed)
237 {
238 if (filter == FILTER_REJECT)
239 old = NULL;
240 else
241 {
242 ea_list *tmpb = p->make_tmp_attrs ? p->make_tmp_attrs(old, rte_update_pool) : NULL;
243 ok = p->import_control ? p->import_control(p, &old, &tmpb, rte_update_pool) : 0;
244 if (ok < 0 || (!ok && filter && f_run(filter, &old, &tmpb, rte_update_pool, FF_FORCE_TMPATTR) > F_ACCEPT))
245 {
246 if (old != old0)
247 rte_free(old);
248 old = NULL;
249 }
250 }
251 }
252
253 /* FIXME - This is broken because of incorrect 'old' value (see above) */
254 if (!new && !old)
255 return;
256
257 if (new)
258 stats->exp_updates_accepted++;
259 else
260 stats->exp_withdraws_accepted++;
261
262 /* Hack: We do not decrease exp_routes during refeed, we instead
263 reset exp_routes at the start of refeed. */
264 if (new)
265 stats->exp_routes++;
266 if (old && !refeed)
267 stats->exp_routes--;
268
269 if (p->debug & D_ROUTES)
270 {
271 if (new && old)
272 rte_trace_out(D_ROUTES, p, new, "replaced");
273 else if (new)
274 rte_trace_out(D_ROUTES, p, new, "added");
275 else if (old)
276 rte_trace_out(D_ROUTES, p, old, "removed");
277 }
278 if (!new)
279 p->rt_notify(p, a->table, net, NULL, old, NULL);
280 else if (tmpa)
281 {
282 ea_list *t = tmpa;
283 while (t->next)
284 t = t->next;
285 t->next = new->attrs->eattrs;
286 p->rt_notify(p, a->table, net, new, old, tmpa);
287 t->next = NULL;
288 }
289 else
290 p->rt_notify(p, a->table, net, new, old, new->attrs->eattrs);
291 if (new && new != new0) /* Discard temporary rte's */
292 rte_free(new);
293 if (old && old != old0)
294 rte_free(old);
295 }
296
297 /**
298 * rte_announce - announce a routing table change
299 * @tab: table the route has been added to
300 * @type: type of route announcement (RA_OPTIMAL or RA_ANY)
301 * @net: network in question
302 * @new: the new route to be announced
303 * @old: the previous route for the same network
304 * @tmpa: a list of temporary attributes belonging to the new route
305 *
306 * This function gets a routing table update and announces it
307 * to all protocols that acccepts given type of route announcement
308 * and are connected to the same table by their announcement hooks.
309 *
310 * Route announcement of type RA_OPTIMAL si generated when optimal
311 * route (in routing table @tab) changes. In that case @old stores the
312 * old optimal route.
313 *
314 * Route announcement of type RA_ANY si generated when any route (in
315 * routing table @tab) changes In that case @old stores the old route
316 * from the same protocol.
317 *
318 * For each appropriate protocol, we first call its import_control()
319 * hook which performs basic checks on the route (each protocol has a
320 * right to veto or force accept of the route before any filter is
321 * asked) and adds default values of attributes specific to the new
322 * protocol (metrics, tags etc.). Then it consults the protocol's
323 * export filter and if it accepts the route, the rt_notify() hook of
324 * the protocol gets called.
325 */
326 static void
327 rte_announce(rtable *tab, unsigned type, net *net, rte *new, rte *old, ea_list *tmpa)
328 {
329 struct announce_hook *a;
330
331 if (type == RA_OPTIMAL)
332 {
333 if (new)
334 new->attrs->proto->stats.pref_routes++;
335 if (old)
336 old->attrs->proto->stats.pref_routes--;
337
338 if (tab->hostcache)
339 rt_notify_hostcache(tab, net);
340 }
341
342 WALK_LIST(a, tab->hooks)
343 {
344 ASSERT(a->proto->core_state == FS_HAPPY || a->proto->core_state == FS_FEEDING);
345 if (a->proto->accept_ra_types == type)
346 do_rte_announce(a, type, net, new, old, tmpa, 0);
347 }
348 }
349
350
351 static inline int
352 rte_validate(rte *e)
353 {
354 int c;
355 net *n = e->net;
356
357 if ((n->n.pxlen > BITS_PER_IP_ADDRESS) || !ip_is_prefix(n->n.prefix,n->n.pxlen))
358 {
359 log(L_BUG "Ignoring bogus prefix %I/%d received via %s",
360 n->n.prefix, n->n.pxlen, e->sender->name);
361 return 0;
362 }
363
364 c = ipa_classify_net(n->n.prefix);
365 if ((c < 0) || !(c & IADDR_HOST) || ((c & IADDR_SCOPE_MASK) <= SCOPE_LINK))
366 {
367 log(L_WARN "Ignoring bogus route %I/%d received via %s",
368 n->n.prefix, n->n.pxlen, e->sender->name);
369 return 0;
370 }
371
372 return 1;
373 }
374
375 /**
376 * rte_free - delete a &rte
377 * @e: &rte to be deleted
378 *
379 * rte_free() deletes the given &rte from the routing table it's linked to.
380 */
381 void
382 rte_free(rte *e)
383 {
384 if (e->attrs->aflags & RTAF_CACHED)
385 rta_free(e->attrs);
386 sl_free(rte_slab, e);
387 }
388
389 static inline void
390 rte_free_quick(rte *e)
391 {
392 rta_free(e->attrs);
393 sl_free(rte_slab, e);
394 }
395
396 static int
397 rte_same(rte *x, rte *y)
398 {
399 return
400 x->attrs == y->attrs &&
401 x->flags == y->flags &&
402 x->pflags == y->pflags &&
403 x->pref == y->pref &&
404 (!x->attrs->proto->rte_same || x->attrs->proto->rte_same(x, y));
405 }
406
407 static void
408 rte_recalculate(rtable *table, net *net, struct proto *p, struct proto *src, rte *new, ea_list *tmpa)
409 {
410 struct proto_stats *stats = &p->stats;
411 rte *old_best = net->routes;
412 rte *old = NULL;
413 rte **k, *r, *s;
414
415 #ifdef CONFIG_PIPE
416 if (proto_is_pipe(p) && (p->table == table))
417 stats = pipe_get_peer_stats(p);
418 #endif
419
420 k = &net->routes; /* Find and remove original route from the same protocol */
421 while (old = *k)
422 {
423 if (old->attrs->proto == src)
424 {
425 /* If there is the same route in the routing table but from
426 * a different sender, then there are two paths from the
427 * source protocol to this routing table through transparent
428 * pipes, which is not allowed.
429 *
430 * We log that and ignore the route. If it is withdraw, we
431 * ignore it completely (there might be 'spurious withdraws',
432 * see FIXME in do_rte_announce())
433 */
434 if (old->sender != p)
435 {
436 if (new)
437 {
438 log(L_ERR "Pipe collision detected when sending %I/%d to table %s",
439 net->n.prefix, net->n.pxlen, table->name);
440 rte_free_quick(new);
441 }
442 return;
443 }
444
445 if (new && rte_same(old, new))
446 {
447 /* No changes, ignore the new route */
448 stats->imp_updates_ignored++;
449 rte_trace_in(D_ROUTES, p, new, "ignored");
450 rte_free_quick(new);
451 #ifdef CONFIG_RIP
452 /* lastmod is used internally by RIP as the last time
453 when the route was received. */
454 if (src->proto == &proto_rip)
455 old->lastmod = now;
456 #endif
457 return;
458 }
459 *k = old->next;
460 break;
461 }
462 k = &old->next;
463 }
464
465 if (!old && !new)
466 {
467 stats->imp_withdraws_ignored++;
468 return;
469 }
470
471 if (new)
472 stats->imp_updates_accepted++;
473 else
474 stats->imp_withdraws_accepted++;
475
476 if (new)
477 stats->imp_routes++;
478 if (old)
479 stats->imp_routes--;
480
481 rte_announce(table, RA_ANY, net, new, old, tmpa);
482
483 if (new && rte_better(new, old_best))
484 {
485 /* The first case - the new route is cleary optimal, we link it
486 at the first position and announce it */
487
488 rte_trace_in(D_ROUTES, p, new, "added [best]");
489 rte_announce(table, RA_OPTIMAL, net, new, old_best, tmpa);
490 new->next = net->routes;
491 net->routes = new;
492 }
493 else if (old == old_best)
494 {
495 /* The second case - the old best route disappeared, we add the
496 new route (if we have any) to the list (we don't care about
497 position) and then we elect the new optimal route and relink
498 that route at the first position and announce it. New optimal
499 route might be NULL if there is no more routes */
500
501 /* Add the new route to the list */
502 if (new)
503 {
504 rte_trace_in(D_ROUTES, p, new, "added");
505 new->next = net->routes;
506 net->routes = new;
507 }
508
509 /* Find new optimal route */
510 r = NULL;
511 for (s=net->routes; s; s=s->next)
512 if (rte_better(s, r))
513 r = s;
514
515 /* Announce optimal route */
516 rte_announce(table, RA_OPTIMAL, net, r, old_best, tmpa);
517
518 /* And relink it (if there is any) */
519 if (r)
520 {
521 k = &net->routes;
522 while (s = *k)
523 {
524 if (s == r)
525 {
526 *k = r->next;
527 break;
528 }
529 k = &s->next;
530 }
531 r->next = net->routes;
532 net->routes = r;
533 }
534 else if (table->gc_counter++ >= table->config->gc_max_ops &&
535 table->gc_time + table->config->gc_min_time <= now)
536 rt_schedule_gc(table);
537 }
538 else if (new)
539 {
540 /* The third case - the new route is not better than the old
541 best route (therefore old_best != NULL) and the old best
542 route was not removed (therefore old_best == net->routes).
543 We just link the new route after the old best route. */
544
545 ASSERT(net->routes != NULL);
546 new->next = net->routes->next;
547 net->routes->next = new;
548 rte_trace_in(D_ROUTES, p, new, "added");
549 }
550
551 /* Log the route removal */
552 if (!new && old && (p->debug & D_ROUTES))
553 {
554 if (old != old_best)
555 rte_trace_in(D_ROUTES, p, old, "removed");
556 else if (net->routes)
557 rte_trace_in(D_ROUTES, p, old, "removed [replaced]");
558 else
559 rte_trace_in(D_ROUTES, p, old, "removed [sole]");
560 }
561
562 if (old)
563 {
564 if (p->rte_remove)
565 p->rte_remove(net, old);
566 rte_free_quick(old);
567 }
568 if (new)
569 {
570 new->lastmod = now;
571 if (p->rte_insert)
572 p->rte_insert(net, new);
573 }
574 }
575
576 static int rte_update_nest_cnt; /* Nesting counter to allow recursive updates */
577
578 static inline void
579 rte_update_lock(void)
580 {
581 rte_update_nest_cnt++;
582 }
583
584 static inline void
585 rte_update_unlock(void)
586 {
587 if (!--rte_update_nest_cnt)
588 lp_flush(rte_update_pool);
589 }
590
591 /**
592 * rte_update - enter a new update to a routing table
593 * @table: table to be updated
594 * @net: network node
595 * @p: protocol submitting the update
596 * @src: protocol originating the update
597 * @new: a &rte representing the new route or %NULL for route removal.
598 *
599 * This function is called by the routing protocols whenever they discover
600 * a new route or wish to update/remove an existing route. The right announcement
601 * sequence is to build route attributes first (either un-cached with @aflags set
602 * to zero or a cached one using rta_lookup(); in this case please note that
603 * you need to increase the use count of the attributes yourself by calling
604 * rta_clone()), call rte_get_temp() to obtain a temporary &rte, fill in all
605 * the appropriate data and finally submit the new &rte by calling rte_update().
606 *
607 * @src specifies the protocol that originally created the route and the meaning
608 * of protocol-dependent data of @new. If @new is not %NULL, @src have to be the
609 * same value as @new->attrs->proto. @p specifies the protocol that called
610 * rte_update(). In most cases it is the same protocol as @src. rte_update()
611 * stores @p in @new->sender;
612 *
613 * When rte_update() gets any route, it automatically validates it (checks,
614 * whether the network and next hop address are valid IP addresses and also
615 * whether a normal routing protocol doesn't try to smuggle a host or link
616 * scope route to the table), converts all protocol dependent attributes stored
617 * in the &rte to temporary extended attributes, consults import filters of the
618 * protocol to see if the route should be accepted and/or its attributes modified,
619 * stores the temporary attributes back to the &rte.
620 *
621 * Now, having a "public" version of the route, we
622 * automatically find any old route defined by the protocol @src
623 * for network @n, replace it by the new one (or removing it if @new is %NULL),
624 * recalculate the optimal route for this destination and finally broadcast
625 * the change (if any) to all routing protocols by calling rte_announce().
626 *
627 * All memory used for attribute lists and other temporary allocations is taken
628 * from a special linear pool @rte_update_pool and freed when rte_update()
629 * finishes.
630 */
631
632 void
633 rte_update(rtable *table, net *net, struct proto *p, struct proto *src, rte *new)
634 {
635 ea_list *tmpa = NULL;
636 struct proto_stats *stats = &p->stats;
637
638 #ifdef CONFIG_PIPE
639 if (proto_is_pipe(p) && (p->table == table))
640 stats = pipe_get_peer_stats(p);
641 #endif
642
643 rte_update_lock();
644 if (new)
645 {
646 new->sender = p;
647 struct filter *filter = p->in_filter;
648
649 /* Do not filter routes going through the pipe,
650 they are filtered in the export filter only. */
651 #ifdef CONFIG_PIPE
652 if (proto_is_pipe(p))
653 filter = FILTER_ACCEPT;
654 #endif
655
656 stats->imp_updates_received++;
657 if (!rte_validate(new))
658 {
659 rte_trace_in(D_FILTERS, p, new, "invalid");
660 stats->imp_updates_invalid++;
661 goto drop;
662 }
663 if (filter == FILTER_REJECT)
664 {
665 stats->imp_updates_filtered++;
666 rte_trace_in(D_FILTERS, p, new, "filtered out");
667 goto drop;
668 }
669 if (src->make_tmp_attrs)
670 tmpa = src->make_tmp_attrs(new, rte_update_pool);
671 if (filter)
672 {
673 ea_list *old_tmpa = tmpa;
674 int fr = f_run(filter, &new, &tmpa, rte_update_pool, 0);
675 if (fr > F_ACCEPT)
676 {
677 stats->imp_updates_filtered++;
678 rte_trace_in(D_FILTERS, p, new, "filtered out");
679 goto drop;
680 }
681 if (tmpa != old_tmpa && src->store_tmp_attrs)
682 src->store_tmp_attrs(new, tmpa);
683 }
684 if (!(new->attrs->aflags & RTAF_CACHED)) /* Need to copy attributes */
685 new->attrs = rta_lookup(new->attrs);
686 new->flags |= REF_COW;
687 }
688 else
689 stats->imp_withdraws_received++;
690
691 rte_recalculate(table, net, p, src, new, tmpa);
692 rte_update_unlock();
693 return;
694
695 drop:
696 rte_free(new);
697 rte_recalculate(table, net, p, src, NULL, NULL);
698 rte_update_unlock();
699 }
700
701 /* Independent call to rte_announce(), used from next hop
702 recalculation, outside of rte_update(). new must be non-NULL */
703 static inline void
704 rte_announce_i(rtable *tab, unsigned type, net *n, rte *new, rte *old)
705 {
706 struct proto *src;
707 ea_list *tmpa;
708
709 rte_update_lock();
710 src = new->attrs->proto;
711 tmpa = src->make_tmp_attrs ? src->make_tmp_attrs(new, rte_update_pool) : NULL;
712 rte_announce(tab, type, n, new, old, tmpa);
713 rte_update_unlock();
714 }
715
716 void
717 rte_discard(rtable *t, rte *old) /* Non-filtered route deletion, used during garbage collection */
718 {
719 rte_update_lock();
720 rte_recalculate(t, old->net, old->sender, old->attrs->proto, NULL, NULL);
721 rte_update_unlock();
722 }
723
724 /**
725 * rte_dump - dump a route
726 * @e: &rte to be dumped
727 *
728 * This functions dumps contents of a &rte to debug output.
729 */
730 void
731 rte_dump(rte *e)
732 {
733 net *n = e->net;
734 if (n)
735 debug("%-1I/%2d ", n->n.prefix, n->n.pxlen);
736 else
737 debug("??? ");
738 debug("KF=%02x PF=%02x pref=%d lm=%d ", n->n.flags, e->pflags, e->pref, now-e->lastmod);
739 rta_dump(e->attrs);
740 if (e->attrs->proto->proto->dump_attrs)
741 e->attrs->proto->proto->dump_attrs(e);
742 debug("\n");
743 }
744
745 /**
746 * rt_dump - dump a routing table
747 * @t: routing table to be dumped
748 *
749 * This function dumps contents of a given routing table to debug output.
750 */
751 void
752 rt_dump(rtable *t)
753 {
754 rte *e;
755 net *n;
756 struct announce_hook *a;
757
758 debug("Dump of routing table <%s>\n", t->name);
759 #ifdef DEBUGGING
760 fib_check(&t->fib);
761 #endif
762 FIB_WALK(&t->fib, fn)
763 {
764 n = (net *) fn;
765 for(e=n->routes; e; e=e->next)
766 rte_dump(e);
767 }
768 FIB_WALK_END;
769 WALK_LIST(a, t->hooks)
770 debug("\tAnnounces routes to protocol %s\n", a->proto->name);
771 debug("\n");
772 }
773
774 /**
775 * rt_dump_all - dump all routing tables
776 *
777 * This function dumps contents of all routing tables to debug output.
778 */
779 void
780 rt_dump_all(void)
781 {
782 rtable *t;
783
784 WALK_LIST(t, routing_tables)
785 rt_dump(t);
786 }
787
788 static inline void
789 rt_schedule_gc(rtable *tab)
790 {
791 if (tab->gc_scheduled)
792 return;
793
794 tab->gc_scheduled = 1;
795 ev_schedule(tab->rt_event);
796 }
797
798 static inline void
799 rt_schedule_hcu(rtable *tab)
800 {
801 if (tab->hcu_scheduled)
802 return;
803
804 tab->hcu_scheduled = 1;
805 ev_schedule(tab->rt_event);
806 }
807
808 static inline void
809 rt_schedule_nhu(rtable *tab)
810 {
811 if (tab->nhu_state == 0)
812 ev_schedule(tab->rt_event);
813
814 /* state change 0->1, 2->3 */
815 tab->nhu_state |= 1;
816 }
817
818 static void
819 rt_event(void *ptr)
820 {
821 rtable *tab = ptr;
822
823 if (tab->hcu_scheduled)
824 rt_update_hostcache(tab);
825
826 if (tab->nhu_state)
827 rt_next_hop_update(tab);
828
829 if (tab->gc_scheduled)
830 rt_prune(tab);
831 }
832
833 void
834 rt_setup(pool *p, rtable *t, char *name, struct rtable_config *cf)
835 {
836 bzero(t, sizeof(*t));
837 fib_init(&t->fib, p, sizeof(net), 0, rte_init);
838 t->name = name;
839 t->config = cf;
840 init_list(&t->hooks);
841 if (cf)
842 {
843 t->rt_event = ev_new(p);
844 t->rt_event->hook = rt_event;
845 t->rt_event->data = t;
846 t->gc_time = now;
847 }
848 }
849
850 /**
851 * rt_init - initialize routing tables
852 *
853 * This function is called during BIRD startup. It initializes the
854 * routing table module.
855 */
856 void
857 rt_init(void)
858 {
859 rta_init();
860 rt_table_pool = rp_new(&root_pool, "Routing tables");
861 rte_update_pool = lp_new(rt_table_pool, 4080);
862 rte_slab = sl_new(rt_table_pool, sizeof(rte));
863 init_list(&routing_tables);
864 }
865
866 /**
867 * rt_prune - prune a routing table
868 * @tab: routing table to be pruned
869 *
870 * This function is called whenever a protocol shuts down. It scans
871 * the routing table and removes all routes belonging to inactive
872 * protocols and also stale network entries.
873 */
874 static void
875 rt_prune(rtable *tab)
876 {
877 struct fib_iterator fit;
878 int rcnt = 0, rdel = 0, ncnt = 0, ndel = 0;
879
880 DBG("Pruning route table %s\n", tab->name);
881 #ifdef DEBUGGING
882 fib_check(&tab->fib);
883 #endif
884 FIB_ITERATE_INIT(&fit, &tab->fib);
885 again:
886 FIB_ITERATE_START(&tab->fib, &fit, f)
887 {
888 net *n = (net *) f;
889 rte *e;
890 ncnt++;
891 rescan:
892 for (e=n->routes; e; e=e->next, rcnt++)
893 if (e->sender->core_state != FS_HAPPY &&
894 e->sender->core_state != FS_FEEDING)
895 {
896 rte_discard(tab, e);
897 rdel++;
898 goto rescan;
899 }
900 if (!n->routes) /* Orphaned FIB entry? */
901 {
902 FIB_ITERATE_PUT(&fit, f);
903 fib_delete(&tab->fib, f);
904 ndel++;
905 goto again;
906 }
907 }
908 FIB_ITERATE_END(f);
909 DBG("Pruned %d of %d routes and %d of %d networks\n", rdel, rcnt, ndel, ncnt);
910 #ifdef DEBUGGING
911 fib_check(&tab->fib);
912 #endif
913 tab->gc_counter = 0;
914 tab->gc_time = now;
915 tab->gc_scheduled = 0;
916 }
917
918 /**
919 * rt_prune_all - prune all routing tables
920 *
921 * This function calls rt_prune() for all known routing tables.
922 */
923 void
924 rt_prune_all(void)
925 {
926 rtable *t;
927
928 WALK_LIST(t, routing_tables)
929 rt_prune(t);
930 }
931
932 void
933 rt_preconfig(struct config *c)
934 {
935 struct symbol *s = cf_find_symbol("master");
936
937 init_list(&c->tables);
938 c->master_rtc = rt_new_table(s);
939 }
940
941
942 /*
943 * Some functions for handing internal next hop updates
944 * triggered by rt_schedule_nhu().
945 */
946
947 static inline int
948 hostentry_diff(struct hostentry *he, struct iface *iface, ip_addr gw, byte dest)
949 {
950 return (he->iface != iface) || !ipa_equal(he->gw, gw) || (he->dest != dest);
951 }
952
953 static inline int
954 rta_next_hop_outdated(rta *a)
955 {
956 struct hostentry *he = a->hostentry;
957 return he && hostentry_diff(he, a->iface, a->gw, a->dest);
958 }
959
960 static inline void
961 rta_apply_hostentry(rta *a, struct hostentry *he)
962 {
963 a->hostentry = he;
964 a->iface = he->iface;
965 a->gw = he->gw;
966 a->dest = he->dest;
967 }
968
969 static inline rte *
970 rt_next_hop_update_rte(rtable *tab, rte *old)
971 {
972 rta a;
973 memcpy(&a, old->attrs, sizeof(rta));
974 rta_apply_hostentry(&a, old->attrs->hostentry);
975 a.aflags = 0;
976
977 rte *e = sl_alloc(rte_slab);
978 memcpy(e, old, sizeof(rte));
979 e->attrs = rta_lookup(&a);
980
981 return e;
982 }
983
984 static inline int
985 rt_next_hop_update_net(rtable *tab, net *n)
986 {
987 rte **k, *e, *new, *old_best, **new_best;
988 int count = 0;
989 int free_old_best = 0;
990
991 old_best = n->routes;
992 if (!old_best)
993 return 0;
994
995 new_best = NULL;
996
997 for (k = &n->routes; e = *k; k = &e->next)
998 {
999 if (rta_next_hop_outdated(e->attrs))
1000 {
1001 new = rt_next_hop_update_rte(tab, e);
1002 *k = new;
1003
1004 rte_announce_i(tab, RA_ANY, n, new, e);
1005 rte_trace_in(D_ROUTES, new->sender, new, "updated");
1006
1007 if (e != old_best)
1008 rte_free_quick(e);
1009 else /* Freeing of the old best rte is postponed */
1010 free_old_best = 1;
1011
1012 e = new;
1013 count++;
1014 }
1015
1016 if (!new_best || rte_better(e, *new_best))
1017 new_best = k;
1018 }
1019
1020 /* Relink the new best route to the first position */
1021 new = *new_best;
1022 if (new != n->routes)
1023 {
1024 *new_best = new->next;
1025 new->next = n->routes;
1026 n->routes = new;
1027 }
1028
1029 /* Announce the new best route */
1030 if (new != old_best)
1031 {
1032 rte_announce_i(tab, RA_OPTIMAL, n, new, old_best);
1033 rte_trace_in(D_ROUTES, new->sender, new, "updated [best]");
1034 }
1035
1036 if (free_old_best)
1037 rte_free_quick(old_best);
1038
1039 return count;
1040 }
1041
1042 static void
1043 rt_next_hop_update(rtable *tab)
1044 {
1045 struct fib_iterator *fit = &tab->nhu_fit;
1046 int max_feed = 32;
1047
1048 if (tab->nhu_state == 0)
1049 return;
1050
1051 if (tab->nhu_state == 1)
1052 {
1053 FIB_ITERATE_INIT(fit, &tab->fib);
1054 tab->nhu_state = 2;
1055 }
1056
1057 FIB_ITERATE_START(&tab->fib, fit, fn)
1058 {
1059 if (max_feed <= 0)
1060 {
1061 FIB_ITERATE_PUT(fit, fn);
1062 ev_schedule(tab->rt_event);
1063 return;
1064 }
1065 max_feed -= rt_next_hop_update_net(tab, (net *) fn);
1066 }
1067 FIB_ITERATE_END(fn);
1068
1069 /* state change 2->0, 3->1 */
1070 tab->nhu_state &= 1;
1071
1072 if (tab->nhu_state > 0)
1073 ev_schedule(tab->rt_event);
1074 }
1075
1076
1077 struct rtable_config *
1078 rt_new_table(struct symbol *s)
1079 {
1080 struct rtable_config *c = cfg_allocz(sizeof(struct rtable_config));
1081
1082 cf_define_symbol(s, SYM_TABLE, c);
1083 c->name = s->name;
1084 add_tail(&new_config->tables, &c->n);
1085 c->gc_max_ops = 1000;
1086 c->gc_min_time = 5;
1087 return c;
1088 }
1089
1090 /**
1091 * rt_lock_table - lock a routing table
1092 * @r: routing table to be locked
1093 *
1094 * Lock a routing table, because it's in use by a protocol,
1095 * preventing it from being freed when it gets undefined in a new
1096 * configuration.
1097 */
1098 void
1099 rt_lock_table(rtable *r)
1100 {
1101 r->use_count++;
1102 }
1103
1104 /**
1105 * rt_unlock_table - unlock a routing table
1106 * @r: routing table to be unlocked
1107 *
1108 * Unlock a routing table formerly locked by rt_lock_table(),
1109 * that is decrease its use count and delete it if it's scheduled
1110 * for deletion by configuration changes.
1111 */
1112 void
1113 rt_unlock_table(rtable *r)
1114 {
1115 if (!--r->use_count && r->deleted)
1116 {
1117 struct config *conf = r->deleted;
1118 DBG("Deleting routing table %s\n", r->name);
1119 if (r->hostcache)
1120 rt_free_hostcache(r);
1121 rem_node(&r->n);
1122 fib_free(&r->fib);
1123 rfree(r->rt_event);
1124 mb_free(r);
1125 config_del_obstacle(conf);
1126 }
1127 }
1128
1129 /**
1130 * rt_commit - commit new routing table configuration
1131 * @new: new configuration
1132 * @old: original configuration or %NULL if it's boot time config
1133 *
1134 * Scan differences between @old and @new configuration and modify
1135 * the routing tables according to these changes. If @new defines a
1136 * previously unknown table, create it, if it omits a table existing
1137 * in @old, schedule it for deletion (it gets deleted when all protocols
1138 * disconnect from it by calling rt_unlock_table()), if it exists
1139 * in both configurations, leave it unchanged.
1140 */
1141 void
1142 rt_commit(struct config *new, struct config *old)
1143 {
1144 struct rtable_config *o, *r;
1145
1146 DBG("rt_commit:\n");
1147 if (old)
1148 {
1149 WALK_LIST(o, old->tables)
1150 {
1151 rtable *ot = o->table;
1152 if (!ot->deleted)
1153 {
1154 struct symbol *sym = cf_find_symbol(o->name);
1155 if (sym && sym->class == SYM_TABLE && !new->shutdown)
1156 {
1157 DBG("\t%s: same\n", o->name);
1158 r = sym->def;
1159 r->table = ot;
1160 ot->name = r->name;
1161 ot->config = r;
1162 }
1163 else
1164 {
1165 DBG("\t%s: deleted\n", o->name);
1166 ot->deleted = old;
1167 config_add_obstacle(old);
1168 rt_lock_table(ot);
1169 rt_unlock_table(ot);
1170 }
1171 }
1172 }
1173 }
1174
1175 WALK_LIST(r, new->tables)
1176 if (!r->table)
1177 {
1178 rtable *t = mb_alloc(rt_table_pool, sizeof(struct rtable));
1179 DBG("\t%s: created\n", r->name);
1180 rt_setup(rt_table_pool, t, r->name, r);
1181 add_tail(&routing_tables, &t->n);
1182 r->table = t;
1183 }
1184 DBG("\tdone\n");
1185 }
1186
1187 static inline void
1188 do_feed_baby(struct proto *p, int type, struct announce_hook *h, net *n, rte *e)
1189 {
1190 struct proto *q = e->attrs->proto;
1191 ea_list *tmpa;
1192
1193 rte_update_lock();
1194 tmpa = q->make_tmp_attrs ? q->make_tmp_attrs(e, rte_update_pool) : NULL;
1195 do_rte_announce(h, type, n, e, p->refeeding ? e : NULL, tmpa, p->refeeding);
1196 rte_update_unlock();
1197 }
1198
1199 /**
1200 * rt_feed_baby - advertise routes to a new protocol
1201 * @p: protocol to be fed
1202 *
1203 * This function performs one pass of advertisement of routes to a newly
1204 * initialized protocol. It's called by the protocol code as long as it
1205 * has something to do. (We avoid transferring all the routes in single
1206 * pass in order not to monopolize CPU time.)
1207 */
1208 int
1209 rt_feed_baby(struct proto *p)
1210 {
1211 struct announce_hook *h;
1212 struct fib_iterator *fit;
1213 int max_feed = 256;
1214
1215 if (!p->feed_ahook) /* Need to initialize first */
1216 {
1217 if (!p->ahooks)
1218 return 1;
1219 DBG("Announcing routes to new protocol %s\n", p->name);
1220 p->feed_ahook = p->ahooks;
1221 fit = p->feed_iterator = mb_alloc(p->pool, sizeof(struct fib_iterator));
1222 goto next_hook;
1223 }
1224 fit = p->feed_iterator;
1225
1226 again:
1227 h = p->feed_ahook;
1228 FIB_ITERATE_START(&h->table->fib, fit, fn)
1229 {
1230 net *n = (net *) fn;
1231 rte *e = n->routes;
1232 if (max_feed <= 0)
1233 {
1234 FIB_ITERATE_PUT(fit, fn);
1235 return 0;
1236 }
1237
1238 if (p->accept_ra_types == RA_OPTIMAL)
1239 if (e)
1240 {
1241 if (p->core_state != FS_FEEDING)
1242 return 1; /* In the meantime, the protocol fell down. */
1243 do_feed_baby(p, RA_OPTIMAL, h, n, e);
1244 max_feed--;
1245 }
1246
1247 if (p->accept_ra_types == RA_ANY)
1248 for(e = n->routes; e != NULL; e = e->next)
1249 {
1250 if (p->core_state != FS_FEEDING)
1251 return 1; /* In the meantime, the protocol fell down. */
1252 do_feed_baby(p, RA_ANY, h, n, e);
1253 max_feed--;
1254 }
1255 }
1256 FIB_ITERATE_END(fn);
1257 p->feed_ahook = h->next;
1258 if (!p->feed_ahook)
1259 {
1260 mb_free(p->feed_iterator);
1261 p->feed_iterator = NULL;
1262 return 1;
1263 }
1264
1265 next_hook:
1266 h = p->feed_ahook;
1267 FIB_ITERATE_INIT(fit, &h->table->fib);
1268 goto again;
1269 }
1270
1271 /**
1272 * rt_feed_baby_abort - abort protocol feeding
1273 * @p: protocol
1274 *
1275 * This function is called by the protocol code when the protocol
1276 * stops or ceases to exist before the last iteration of rt_feed_baby()
1277 * has finished.
1278 */
1279 void
1280 rt_feed_baby_abort(struct proto *p)
1281 {
1282 if (p->feed_ahook)
1283 {
1284 /* Unlink the iterator and exit */
1285 fit_get(&p->feed_ahook->table->fib, p->feed_iterator);
1286 p->feed_ahook = NULL;
1287 }
1288 }
1289
1290
1291 static inline unsigned
1292 ptr_hash(void *ptr)
1293 {
1294 uintptr_t p = (uintptr_t) ptr;
1295 return p ^ (p << 8) ^ (p >> 16);
1296 }
1297
1298 static inline unsigned
1299 hc_hash(ip_addr a, rtable *dep)
1300 {
1301 return (ipa_hash(a) ^ ptr_hash(dep)) & 0xffff;
1302 }
1303
1304 static inline void
1305 hc_insert(struct hostcache *hc, struct hostentry *he)
1306 {
1307 unsigned int k = he->hash_key >> hc->hash_shift;
1308 he->next = hc->hash_table[k];
1309 hc->hash_table[k] = he;
1310 }
1311
1312 static inline void
1313 hc_remove(struct hostcache *hc, struct hostentry *he)
1314 {
1315 struct hostentry **hep;
1316 unsigned int k = he->hash_key >> hc->hash_shift;
1317
1318 for (hep = &hc->hash_table[k]; *hep != he; hep = &(*hep)->next);
1319 *hep = he->next;
1320 }
1321
1322 #define HC_DEF_ORDER 10
1323 #define HC_HI_MARK *4
1324 #define HC_HI_STEP 2
1325 #define HC_HI_ORDER 16 /* Must be at most 16 */
1326 #define HC_LO_MARK /5
1327 #define HC_LO_STEP 2
1328 #define HC_LO_ORDER 10
1329
1330 static void
1331 hc_alloc_table(struct hostcache *hc, unsigned order)
1332 {
1333 unsigned hsize = 1 << order;
1334 hc->hash_order = order;
1335 hc->hash_shift = 16 - order;
1336 hc->hash_max = (order >= HC_HI_ORDER) ? ~0 : (hsize HC_HI_MARK);
1337 hc->hash_min = (order <= HC_LO_ORDER) ? 0 : (hsize HC_LO_MARK);
1338
1339 hc->hash_table = mb_allocz(rt_table_pool, hsize * sizeof(struct hostentry *));
1340 }
1341
1342 static void
1343 hc_resize(struct hostcache *hc, unsigned new_order)
1344 {
1345 unsigned old_size = 1 << hc->hash_order;
1346 struct hostentry **old_table = hc->hash_table;
1347 struct hostentry *he, *hen;
1348 int i;
1349
1350 hc_alloc_table(hc, new_order);
1351 for (i = 0; i < old_size; i++)
1352 for (he = old_table[i]; he != NULL; he=hen)
1353 {
1354 hen = he->next;
1355 hc_insert(hc, he);
1356 }
1357 mb_free(old_table);
1358 }
1359
1360 static struct hostentry *
1361 hc_new_hostentry(struct hostcache *hc, ip_addr a, ip_addr ll, rtable *dep, unsigned k)
1362 {
1363 struct hostentry *he = sl_alloc(hc->slab);
1364
1365 he->addr = a;
1366 he->link = ll;
1367 he->tab = dep;
1368 he->hash_key = k;
1369 he->uc = 0;
1370
1371 add_tail(&hc->hostentries, &he->ln);
1372 hc_insert(hc, he);
1373
1374 hc->hash_items++;
1375 if (hc->hash_items > hc->hash_max)
1376 hc_resize(hc, hc->hash_order + HC_HI_STEP);
1377
1378 return he;
1379 }
1380
1381 static void
1382 hc_delete_hostentry(struct hostcache *hc, struct hostentry *he)
1383 {
1384 rem_node(&he->ln);
1385 hc_remove(hc, he);
1386 sl_free(hc->slab, he);
1387
1388 hc->hash_items--;
1389 if (hc->hash_items < hc->hash_min)
1390 hc_resize(hc, hc->hash_order - HC_LO_STEP);
1391 }
1392
1393 static void
1394 rt_init_hostcache(rtable *tab)
1395 {
1396 struct hostcache *hc = mb_allocz(rt_table_pool, sizeof(struct hostcache));
1397 init_list(&hc->hostentries);
1398
1399 hc->hash_items = 0;
1400 hc_alloc_table(hc, HC_DEF_ORDER);
1401 hc->slab = sl_new(rt_table_pool, sizeof(struct hostentry));
1402
1403 hc->lp = lp_new(rt_table_pool, 1008);
1404 hc->trie = f_new_trie(hc->lp);
1405
1406 tab->hostcache = hc;
1407 }
1408
1409 static void
1410 rt_free_hostcache(rtable *tab)
1411 {
1412 struct hostcache *hc = tab->hostcache;
1413
1414 node *n;
1415 WALK_LIST(n, hc->hostentries)
1416 {
1417 struct hostentry *he = SKIP_BACK(struct hostentry, ln, n);
1418 if (he->uc)
1419 log(L_ERR "Hostcache is not empty in table %s", tab->name);
1420 }
1421
1422 rfree(hc->slab);
1423 rfree(hc->lp);
1424 mb_free(hc->hash_table);
1425 mb_free(hc);
1426 }
1427
1428 static void
1429 rt_notify_hostcache(rtable *tab, net *net)
1430 {
1431 struct hostcache *hc = tab->hostcache;
1432
1433 if (tab->hcu_scheduled)
1434 return;
1435
1436 if (trie_match_prefix(hc->trie, net->n.prefix, net->n.pxlen))
1437 rt_schedule_hcu(tab);
1438 }
1439
1440 static int
1441 if_local_addr(ip_addr a, struct iface *i)
1442 {
1443 struct ifa *b;
1444
1445 WALK_LIST(b, i->addrs)
1446 if (ipa_equal(a, b->ip))
1447 return 1;
1448
1449 return 0;
1450 }
1451
1452 static int
1453 rt_update_hostentry(rtable *tab, struct hostentry *he)
1454 {
1455 struct iface *old_iface = he->iface;
1456 ip_addr old_gw = he->gw;
1457 byte old_dest = he->dest;
1458 int pxlen = 0;
1459
1460 net *n = fib_route(&tab->fib, he->addr, MAX_PREFIX_LENGTH);
1461 if (n && n->routes)
1462 {
1463 rta *a = n->routes->attrs;
1464 pxlen = n->n.pxlen;
1465
1466 if (a->dest == RTD_DEVICE)
1467 {
1468 if (if_local_addr(he->addr, a->iface))
1469 {
1470 /* The host address is a local address, this is not valid */
1471 log(L_WARN "Next hop address %I is a local address of iface %s",
1472 he->addr, a->iface->name);
1473 he->iface = NULL;
1474 he->gw = IPA_NONE;
1475 he->dest = RTD_UNREACHABLE;
1476 }
1477 else
1478 {
1479 /* The host is directly reachable, use link as a gateway */
1480 he->iface = a->iface;
1481 he->gw = he->link;
1482 he->dest = RTD_ROUTER;
1483 }
1484 }
1485 else
1486 {
1487 /* The host is reachable through some route entry */
1488 he->iface = a->iface;
1489 he->gw = a->gw;
1490 he->dest = a->dest;
1491 }
1492 }
1493 else
1494 {
1495 /* The host is unreachable */
1496 he->iface = NULL;
1497 he->gw = IPA_NONE;
1498 he->dest = RTD_UNREACHABLE;
1499 }
1500
1501 /* Add a prefix range to the trie */
1502 trie_add_prefix(tab->hostcache->trie, he->addr, MAX_PREFIX_LENGTH, pxlen, MAX_PREFIX_LENGTH);
1503
1504 return hostentry_diff(he, old_iface, old_gw, old_dest);
1505 }
1506
1507 static void
1508 rt_update_hostcache(rtable *tab)
1509 {
1510 struct hostcache *hc = tab->hostcache;
1511 struct hostentry *he;
1512 node *n, *x;
1513
1514 /* Reset the trie */
1515 lp_flush(hc->lp);
1516 hc->trie = f_new_trie(hc->lp);
1517
1518 WALK_LIST_DELSAFE(n, x, hc->hostentries)
1519 {
1520 he = SKIP_BACK(struct hostentry, ln, n);
1521 if (!he->uc)
1522 {
1523 hc_delete_hostentry(hc, he);
1524 continue;
1525 }
1526
1527 if (rt_update_hostentry(tab, he))
1528 rt_schedule_nhu(he->tab);
1529 }
1530
1531 tab->hcu_scheduled = 0;
1532 }
1533
1534 static struct hostentry *
1535 rt_find_hostentry(rtable *tab, ip_addr a, ip_addr ll, rtable *dep)
1536 {
1537 struct hostentry *he;
1538
1539 if (!tab->hostcache)
1540 rt_init_hostcache(tab);
1541
1542 unsigned int k = hc_hash(a, dep);
1543 struct hostcache *hc = tab->hostcache;
1544 for (he = hc->hash_table[k >> hc->hash_shift]; he != NULL; he = he->next)
1545 if (ipa_equal(he->addr, a) && (he->tab == dep))
1546 return he;
1547
1548 he = hc_new_hostentry(hc, a, ll, dep, k);
1549 rt_update_hostentry(tab, he);
1550 return he;
1551 }
1552
1553 void
1554 rta_set_recursive_next_hop(rtable *dep, rta *a, rtable *tab, ip_addr *gw, ip_addr *ll)
1555 {
1556 rta_apply_hostentry(a, rt_find_hostentry(tab, *gw, *ll, dep));
1557 }
1558
1559 /*
1560 * CLI commands
1561 */
1562
1563 static void
1564 rt_format_via(rte *e, byte *via)
1565 {
1566 rta *a = e->attrs;
1567
1568 switch (a->dest)
1569 {
1570 case RTD_ROUTER: bsprintf(via, "via %I on %s", a->gw, a->iface->name); break;
1571 case RTD_DEVICE: bsprintf(via, "dev %s", a->iface->name); break;
1572 case RTD_BLACKHOLE: bsprintf(via, "blackhole"); break;
1573 case RTD_UNREACHABLE: bsprintf(via, "unreachable"); break;
1574 case RTD_PROHIBIT: bsprintf(via, "prohibited"); break;
1575 default: bsprintf(via, "???");
1576 }
1577 }
1578
1579 static void
1580 rt_show_rte(struct cli *c, byte *ia, rte *e, struct rt_show_data *d, ea_list *tmpa)
1581 {
1582 byte via[STD_ADDRESS_P_LENGTH+32], from[STD_ADDRESS_P_LENGTH+6];
1583 byte tm[TM_DATETIME_BUFFER_SIZE], info[256];
1584 rta *a = e->attrs;
1585 int primary = (e->net->routes == e);
1586
1587 rt_format_via(e, via);
1588 tm_format_datetime(tm, &config->tf_route, e->lastmod);
1589 if (ipa_nonzero(a->from) && !ipa_equal(a->from, a->gw))
1590 bsprintf(from, " from %I", a->from);
1591 else
1592 from[0] = 0;
1593 if (a->proto->proto->get_route_info || d->verbose)
1594 {
1595 /* Need to normalize the extended attributes */
1596 ea_list *t = tmpa;
1597 t = ea_append(t, a->eattrs);
1598 tmpa = alloca(ea_scan(t));
1599 ea_merge(t, tmpa);
1600 ea_sort(tmpa);
1601 }
1602 if (a->proto->proto->get_route_info)
1603 a->proto->proto->get_route_info(e, info, tmpa);
1604 else
1605 bsprintf(info, " (%d)", e->pref);
1606 cli_printf(c, -1007, "%-18s %s [%s %s%s]%s%s", ia, via, a->proto->name,
1607 tm, from, primary ? " *" : "", info);
1608 if (d->verbose)
1609 rta_show(c, a, tmpa);
1610 }
1611
1612 static void
1613 rt_show_net(struct cli *c, net *n, struct rt_show_data *d)
1614 {
1615 rte *e, *ee;
1616 byte ia[STD_ADDRESS_P_LENGTH+8];
1617 int ok;
1618
1619 bsprintf(ia, "%I/%d", n->n.prefix, n->n.pxlen);
1620 if (n->routes)
1621 d->net_counter++;
1622 for(e=n->routes; e; e=e->next)
1623 {
1624 struct ea_list *tmpa, *old_tmpa;
1625 struct proto *p0 = e->attrs->proto;
1626 struct proto *p1 = d->export_protocol;
1627 struct proto *p2 = d->show_protocol;
1628 d->rt_counter++;
1629 ee = e;
1630 rte_update_lock(); /* We use the update buffer for filtering */
1631 old_tmpa = tmpa = p0->make_tmp_attrs ? p0->make_tmp_attrs(e, rte_update_pool) : NULL;
1632 ok = (d->filter == FILTER_ACCEPT || f_run(d->filter, &e, &tmpa, rte_update_pool, FF_FORCE_TMPATTR) <= F_ACCEPT);
1633 if (p2 && p2 != p0) ok = 0;
1634 if (ok && d->export_mode)
1635 {
1636 int ic;
1637 if ((ic = p1->import_control ? p1->import_control(p1, &e, &tmpa, rte_update_pool) : 0) < 0)
1638 ok = 0;
1639 else if (!ic && d->export_mode > 1)
1640 {
1641 /* FIXME - this shows what should be exported according
1642 to current filters, but not what was really exported.
1643 'configure soft' command may change the export filter
1644 and do not update routes */
1645
1646 if ((p1->out_filter == FILTER_REJECT) ||
1647 (p1->out_filter && f_run(p1->out_filter, &e, &tmpa, rte_update_pool, FF_FORCE_TMPATTR) > F_ACCEPT))
1648 ok = 0;
1649 }
1650 }
1651 if (ok)
1652 {
1653 d->show_counter++;
1654 if (d->stats < 2)
1655 rt_show_rte(c, ia, e, d, tmpa);
1656 ia[0] = 0;
1657 }
1658 if (e != ee)
1659 rte_free(ee);
1660 rte_update_unlock();
1661 if (d->primary_only)
1662 break;
1663 }
1664 }
1665
1666 static void
1667 rt_show_cont(struct cli *c)
1668 {
1669 struct rt_show_data *d = c->rover;
1670 #ifdef DEBUGGING
1671 unsigned max = 4;
1672 #else
1673 unsigned max = 64;
1674 #endif
1675 struct fib *fib = &d->table->fib;
1676 struct fib_iterator *it = &d->fit;
1677
1678 FIB_ITERATE_START(fib, it, f)
1679 {
1680 net *n = (net *) f;
1681 if (d->running_on_config && d->running_on_config != config)
1682 {
1683 cli_printf(c, 8004, "Stopped due to reconfiguration");
1684 goto done;
1685 }
1686 if (d->export_protocol &&
1687 d->export_protocol->core_state != FS_HAPPY &&
1688 d->export_protocol->core_state != FS_FEEDING)
1689 {
1690 cli_printf(c, 8005, "Protocol is down");
1691 goto done;
1692 }
1693 if (!max--)
1694 {
1695 FIB_ITERATE_PUT(it, f);
1696 return;
1697 }
1698 rt_show_net(c, n, d);
1699 }
1700 FIB_ITERATE_END(f);
1701 if (d->stats)
1702 cli_printf(c, 14, "%d of %d routes for %d networks", d->show_counter, d->rt_counter, d->net_counter);
1703 else
1704 cli_printf(c, 0, "");
1705 done:
1706 c->cont = c->cleanup = NULL;
1707 }
1708
1709 static void
1710 rt_show_cleanup(struct cli *c)
1711 {
1712 struct rt_show_data *d = c->rover;
1713
1714 /* Unlink the iterator */
1715 fit_get(&d->table->fib, &d->fit);
1716 }
1717
1718 void
1719 rt_show(struct rt_show_data *d)
1720 {
1721 net *n;
1722
1723 if (d->pxlen == 256)
1724 {
1725 FIB_ITERATE_INIT(&d->fit, &d->table->fib);
1726 this_cli->cont = rt_show_cont;
1727 this_cli->cleanup = rt_show_cleanup;
1728 this_cli->rover = d;
1729 }
1730 else
1731 {
1732 if (d->show_for)
1733 n = fib_route(&d->table->fib, d->prefix, d->pxlen);
1734 else
1735 n = fib_find(&d->table->fib, &d->prefix, d->pxlen);
1736 if (n)
1737 {
1738 rt_show_net(this_cli, n, d);
1739 cli_msg(0, "");
1740 }
1741 else
1742 cli_msg(8001, "Network not in table");
1743 }
1744 }
1745
1746 /*
1747 * Documentation for functions declared inline in route.h
1748 */
1749 #if 0
1750
1751 /**
1752 * net_find - find a network entry
1753 * @tab: a routing table
1754 * @addr: address of the network
1755 * @len: length of the network prefix
1756 *
1757 * net_find() looks up the given network in routing table @tab and
1758 * returns a pointer to its &net entry or %NULL if no such network
1759 * exists.
1760 */
1761 static inline net *net_find(rtable *tab, ip_addr addr, unsigned len)
1762 { DUMMY; }
1763
1764 /**
1765 * net_get - obtain a network entry
1766 * @tab: a routing table
1767 * @addr: address of the network
1768 * @len: length of the network prefix
1769 *
1770 * net_get() looks up the given network in routing table @tab and
1771 * returns a pointer to its &net entry. If no such entry exists, it's
1772 * created.
1773 */
1774 static inline net *net_get(rtable *tab, ip_addr addr, unsigned len)
1775 { DUMMY; }
1776
1777 /**
1778 * rte_cow - copy a route for writing
1779 * @r: a route entry to be copied
1780 *
1781 * rte_cow() takes a &rte and prepares it for modification. The exact action
1782 * taken depends on the flags of the &rte -- if it's a temporary entry, it's
1783 * just returned unchanged, else a new temporary entry with the same contents
1784 * is created.
1785 *
1786 * The primary use of this function is inside the filter machinery -- when
1787 * a filter wants to modify &rte contents (to change the preference or to
1788 * attach another set of attributes), it must ensure that the &rte is not
1789 * shared with anyone else (and especially that it isn't stored in any routing
1790 * table).
1791 *
1792 * Result: a pointer to the new writable &rte.
1793 */
1794 static inline rte * rte_cow(rte *r)
1795 { DUMMY; }
1796
1797 #endif