]> git.ipfire.org Git - thirdparty/bird.git/blob - nest/rt-table.c
Fixes subtle bug in temporary attribute handling
[thirdparty/bird.git] / nest / rt-table.c
1 /*
2 * BIRD -- Routing Tables
3 *
4 * (c) 1998--2000 Martin Mares <mj@ucw.cz>
5 *
6 * Can be freely distributed and used under the terms of the GNU GPL.
7 */
8
9 /**
10 * DOC: Routing tables
11 *
12 * Routing tables are probably the most important structures BIRD uses. They
13 * hold all the information about known networks, the associated routes and
14 * their attributes.
15 *
16 * There are multiple routing tables (a primary one together with any
17 * number of secondary ones if requested by the configuration). Each table
18 * is basically a FIB containing entries describing the individual
19 * destination networks. For each network (represented by structure &net),
20 * there is a one-way linked list of route entries (&rte), the first entry
21 * on the list being the best one (i.e., the one we currently use
22 * for routing), the order of the other ones is undetermined.
23 *
24 * The &rte contains information specific to the route (preference, protocol
25 * metrics, time of last modification etc.) and a pointer to a &rta structure
26 * (see the route attribute module for a precise explanation) holding the
27 * remaining route attributes which are expected to be shared by multiple
28 * routes in order to conserve memory.
29 */
30
31 #undef LOCAL_DEBUG
32
33 #include "nest/bird.h"
34 #include "nest/route.h"
35 #include "nest/protocol.h"
36 #include "nest/cli.h"
37 #include "nest/iface.h"
38 #include "lib/resource.h"
39 #include "lib/event.h"
40 #include "lib/string.h"
41 #include "conf/conf.h"
42 #include "filter/filter.h"
43 #include "lib/string.h"
44 #include "lib/alloca.h"
45
46 pool *rt_table_pool;
47
48 static slab *rte_slab;
49 static linpool *rte_update_pool;
50
51 static list routing_tables;
52
53 static void rt_format_via(rte *e, byte *via);
54 static void rt_free_hostcache(rtable *tab);
55 static void rt_notify_hostcache(rtable *tab, net *net);
56 static void rt_update_hostcache(rtable *tab);
57 static void rt_next_hop_update(rtable *tab);
58 static inline int rt_prune_table(rtable *tab);
59 static inline void rt_schedule_gc(rtable *tab);
60 static inline void rt_schedule_prune(rtable *tab);
61
62
63 static inline struct ea_list *
64 make_tmp_attrs(struct rte *rt, struct linpool *pool)
65 {
66 struct ea_list *(*mta)(struct rte *rt, struct linpool *pool);
67 mta = rt->attrs->src->proto->make_tmp_attrs;
68 return mta ? mta(rt, rte_update_pool) : NULL;
69 }
70
71 /* Like fib_route(), but skips empty net entries */
72 static net *
73 net_route(rtable *tab, ip_addr a, int len)
74 {
75 ip_addr a0;
76 net *n;
77
78 while (len >= 0)
79 {
80 a0 = ipa_and(a, ipa_mkmask(len));
81 n = fib_find(&tab->fib, &a0, len);
82 if (n && rte_is_valid(n->routes))
83 return n;
84 len--;
85 }
86 return NULL;
87 }
88
89 static void
90 rte_init(struct fib_node *N)
91 {
92 net *n = (net *) N;
93
94 N->flags = 0;
95 n->routes = NULL;
96 }
97
98 /**
99 * rte_find - find a route
100 * @net: network node
101 * @src: route source
102 *
103 * The rte_find() function returns a route for destination @net
104 * which is from route source @src.
105 */
106 rte *
107 rte_find(net *net, struct rte_src *src)
108 {
109 rte *e = net->routes;
110
111 while (e && e->attrs->src != src)
112 e = e->next;
113 return e;
114 }
115
116 /**
117 * rte_get_temp - get a temporary &rte
118 * @a: attributes to assign to the new route (a &rta; in case it's
119 * un-cached, rte_update() will create a cached copy automatically)
120 *
121 * Create a temporary &rte and bind it with the attributes @a.
122 * Also set route preference to the default preference set for
123 * the protocol.
124 */
125 rte *
126 rte_get_temp(rta *a)
127 {
128 rte *e = sl_alloc(rte_slab);
129
130 e->attrs = a;
131 e->flags = 0;
132 e->pref = a->src->proto->preference;
133 return e;
134 }
135
136 rte *
137 rte_do_cow(rte *r)
138 {
139 rte *e = sl_alloc(rte_slab);
140
141 memcpy(e, r, sizeof(rte));
142 e->attrs = rta_clone(r->attrs);
143 e->flags = 0;
144 return e;
145 }
146
147 static int /* Actually better or at least as good as */
148 rte_better(rte *new, rte *old)
149 {
150 int (*better)(rte *, rte *);
151
152 if (!rte_is_valid(old))
153 return 1;
154 if (!rte_is_valid(new))
155 return 0;
156
157 if (new->pref > old->pref)
158 return 1;
159 if (new->pref < old->pref)
160 return 0;
161 if (new->attrs->src->proto->proto != old->attrs->src->proto->proto)
162 {
163 /*
164 * If the user has configured protocol preferences, so that two different protocols
165 * have the same preference, try to break the tie by comparing addresses. Not too
166 * useful, but keeps the ordering of routes unambiguous.
167 */
168 return new->attrs->src->proto->proto > old->attrs->src->proto->proto;
169 }
170 if (better = new->attrs->src->proto->rte_better)
171 return better(new, old);
172 return 0;
173 }
174
175 static void
176 rte_trace(struct proto *p, rte *e, int dir, char *msg)
177 {
178 byte via[STD_ADDRESS_P_LENGTH+32];
179
180 rt_format_via(e, via);
181 log(L_TRACE "%s %c %s %I/%d %s", p->name, dir, msg, e->net->n.prefix, e->net->n.pxlen, via);
182 }
183
184 static inline void
185 rte_trace_in(uint flag, struct proto *p, rte *e, char *msg)
186 {
187 if (p->debug & flag)
188 rte_trace(p, e, '>', msg);
189 }
190
191 static inline void
192 rte_trace_out(uint flag, struct proto *p, rte *e, char *msg)
193 {
194 if (p->debug & flag)
195 rte_trace(p, e, '<', msg);
196 }
197
198 static rte *
199 export_filter(struct announce_hook *ah, rte *rt0, rte **rt_free, ea_list **tmpa, int silent)
200 {
201 struct proto *p = ah->proto;
202 struct filter *filter = ah->out_filter;
203 struct proto_stats *stats = ah->stats;
204 ea_list *tmpb = NULL;
205 rte *rt;
206 int v;
207
208 rt = rt0;
209 *rt_free = NULL;
210
211 if (!tmpa)
212 tmpa = &tmpb;
213
214 *tmpa = make_tmp_attrs(rt, rte_update_pool);
215
216 v = p->import_control ? p->import_control(p, &rt, tmpa, rte_update_pool) : 0;
217 if (v < 0)
218 {
219 if (silent)
220 goto reject;
221
222 stats->exp_updates_rejected++;
223 if (v == RIC_REJECT)
224 rte_trace_out(D_FILTERS, p, rt, "rejected by protocol");
225 goto reject;
226 }
227 if (v > 0)
228 {
229 if (!silent)
230 rte_trace_out(D_FILTERS, p, rt, "forced accept by protocol");
231 goto accept;
232 }
233
234 v = filter && ((filter == FILTER_REJECT) ||
235 (f_run(filter, &rt, tmpa, rte_update_pool, FF_FORCE_TMPATTR) > F_ACCEPT));
236 if (v)
237 {
238 if (silent)
239 goto reject;
240
241 stats->exp_updates_filtered++;
242 rte_trace_out(D_FILTERS, p, rt, "filtered out");
243 goto reject;
244 }
245
246 accept:
247 if (rt != rt0)
248 *rt_free = rt;
249 return rt;
250
251 reject:
252 /* Discard temporary rte */
253 if (rt != rt0)
254 rte_free(rt);
255 return NULL;
256 }
257
258 static void
259 do_rt_notify(struct announce_hook *ah, net *net, rte *new, rte *old, ea_list *tmpa, int refeed)
260 {
261 struct proto *p = ah->proto;
262 struct proto_stats *stats = ah->stats;
263
264
265 /*
266 * First, apply export limit.
267 *
268 * Export route limits has several problems. Because exp_routes
269 * counter is reset before refeed, we don't really know whether
270 * limit is breached and whether the update is new or not. Therefore
271 * the number of really exported routes may exceed the limit
272 * temporarily (routes exported before and new routes in refeed).
273 *
274 * Minor advantage is that if the limit is decreased and refeed is
275 * requested, the number of exported routes really decrease.
276 *
277 * Second problem is that with export limits, we don't know whether
278 * old was really exported (it might be blocked by limit). When a
279 * withdraw is exported, we announce it even when the previous
280 * update was blocked. This is not a big issue, but the same problem
281 * is in updating exp_routes counter. Therefore, to be consistent in
282 * increases and decreases of exp_routes, we count exported routes
283 * regardless of blocking by limits.
284 *
285 * Similar problem is in handling updates - when a new route is
286 * received and blocking is active, the route would be blocked, but
287 * when an update for the route will be received later, the update
288 * would be propagated (as old != NULL). Therefore, we have to block
289 * also non-new updates (contrary to import blocking).
290 */
291
292 struct proto_limit *l = ah->out_limit;
293 if (l && new)
294 {
295 if ((!old || refeed) && (stats->exp_routes >= l->limit))
296 proto_notify_limit(ah, l, PLD_OUT, stats->exp_routes);
297
298 if (l->state == PLS_BLOCKED)
299 {
300 stats->exp_routes++; /* see note above */
301 stats->exp_updates_rejected++;
302 rte_trace_out(D_FILTERS, p, new, "rejected [limit]");
303 new = NULL;
304
305 if (!old)
306 return;
307 }
308 }
309
310
311 if (new)
312 stats->exp_updates_accepted++;
313 else
314 stats->exp_withdraws_accepted++;
315
316 /* Hack: We do not decrease exp_routes during refeed, we instead
317 reset exp_routes at the start of refeed. */
318 if (new)
319 stats->exp_routes++;
320 if (old && !refeed)
321 stats->exp_routes--;
322
323 if (p->debug & D_ROUTES)
324 {
325 if (new && old)
326 rte_trace_out(D_ROUTES, p, new, "replaced");
327 else if (new)
328 rte_trace_out(D_ROUTES, p, new, "added");
329 else if (old)
330 rte_trace_out(D_ROUTES, p, old, "removed");
331 }
332 if (!new)
333 p->rt_notify(p, ah->table, net, NULL, old, NULL);
334 else if (tmpa)
335 {
336 ea_list *t = tmpa;
337 while (t->next)
338 t = t->next;
339 t->next = new->attrs->eattrs;
340 p->rt_notify(p, ah->table, net, new, old, tmpa);
341 t->next = NULL;
342 }
343 else
344 p->rt_notify(p, ah->table, net, new, old, new->attrs->eattrs);
345 }
346
347 static void
348 rt_notify_basic(struct announce_hook *ah, net *net, rte *new0, rte *old0, int refeed)
349 {
350 struct proto *p = ah->proto;
351 struct proto_stats *stats = ah->stats;
352
353 rte *new = new0;
354 rte *old = old0;
355 rte *new_free = NULL;
356 rte *old_free = NULL;
357 ea_list *tmpa = NULL;
358
359 if (new)
360 stats->exp_updates_received++;
361 else
362 stats->exp_withdraws_received++;
363
364 /*
365 * This is a tricky part - we don't know whether route 'old' was
366 * exported to protocol 'p' or was filtered by the export filter.
367 * We try to run the export filter to know this to have a correct
368 * value in 'old' argument of rte_update (and proper filter value)
369 *
370 * FIXME - this is broken because 'configure soft' may change
371 * filters but keep routes. Refeed is expected to be called after
372 * change of the filters and with old == new, therefore we do not
373 * even try to run the filter on an old route, This may lead to
374 * 'spurious withdraws' but ensure that there are no 'missing
375 * withdraws'.
376 *
377 * This is not completely safe as there is a window between
378 * reconfiguration and the end of refeed - if a newly filtered
379 * route disappears during this period, proper withdraw is not
380 * sent (because old would be also filtered) and the route is
381 * not refeeded (because it disappeared before that).
382 */
383
384 if (new)
385 new = export_filter(ah, new, &new_free, &tmpa, 0);
386
387 if (old && !refeed)
388 old = export_filter(ah, old, &old_free, NULL, 1);
389
390 if (!new && !old)
391 {
392 /*
393 * As mentioned above, 'old' value may be incorrect in some race conditions.
394 * We generally ignore it with the exception of withdraw to pipe protocol.
395 * In that case we rather propagate unfiltered withdraws regardless of
396 * export filters to ensure that when a protocol is flushed, its routes are
397 * removed from all tables. Possible spurious unfiltered withdraws are not
398 * problem here as they are ignored if there is no corresponding route at
399 * the other end of the pipe. We directly call rt_notify() hook instead of
400 * do_rt_notify() to avoid logging and stat counters.
401 */
402
403 #ifdef CONFIG_PIPE
404 if ((p->proto == &proto_pipe) && !new0 && (p != old0->sender->proto))
405 p->rt_notify(p, ah->table, net, NULL, old0, NULL);
406 #endif
407
408 return;
409 }
410
411 do_rt_notify(ah, net, new, old, tmpa, refeed);
412
413 /* Discard temporary rte's */
414 if (new_free)
415 rte_free(new_free);
416 if (old_free)
417 rte_free(old_free);
418 }
419
420 static void
421 rt_notify_accepted(struct announce_hook *ah, net *net, rte *new_changed, rte *old_changed, rte *before_old, int feed)
422 {
423 // struct proto *p = ah->proto;
424 struct proto_stats *stats = ah->stats;
425
426 rte *r;
427 rte *new_best = NULL;
428 rte *old_best = NULL;
429 rte *new_free = NULL;
430 rte *old_free = NULL;
431 ea_list *tmpa = NULL;
432
433 /* Used to track whether we met old_changed position. If before_old is NULL
434 old_changed was the first and we met it implicitly before current best route. */
435 int old_meet = old_changed && !before_old;
436
437 /* Note that before_old is either NULL or valid (not rejected) route.
438 If old_changed is valid, before_old have to be too. If old changed route
439 was not valid, caller must use NULL for both old_changed and before_old. */
440
441 if (new_changed)
442 stats->exp_updates_received++;
443 else
444 stats->exp_withdraws_received++;
445
446 /* First, find the new_best route - first accepted by filters */
447 for (r=net->routes; rte_is_valid(r); r=r->next)
448 {
449 if (new_best = export_filter(ah, r, &new_free, &tmpa, 0))
450 break;
451
452 /* Note if we walked around the position of old_changed route */
453 if (r == before_old)
454 old_meet = 1;
455 }
456
457 /*
458 * Second, handle the feed case. That means we do not care for
459 * old_best. It is NULL for feed, and the new_best for refeed.
460 * For refeed, there is a hack similar to one in rt_notify_basic()
461 * to ensure withdraws in case of changed filters
462 */
463 if (feed)
464 {
465 if (feed == 2) /* refeed */
466 old_best = new_best ? new_best :
467 (rte_is_valid(net->routes) ? net->routes : NULL);
468 else
469 old_best = NULL;
470
471 if (!new_best && !old_best)
472 return;
473
474 goto found;
475 }
476
477 /*
478 * Now, we find the old_best route. Generally, it is the same as the
479 * new_best, unless new_best is the same as new_changed or
480 * old_changed is accepted before new_best.
481 *
482 * There are four cases:
483 *
484 * - We would find and accept old_changed before new_best, therefore
485 * old_changed is old_best. In remaining cases we suppose this
486 * is not true.
487 *
488 * - We found no new_best, therefore there is also no old_best and
489 * we ignore this withdraw.
490 *
491 * - We found new_best different than new_changed, therefore
492 * old_best is the same as new_best and we ignore this update.
493 *
494 * - We found new_best the same as new_changed, therefore it cannot
495 * be old_best and we have to continue search for old_best.
496 */
497
498 /* First case */
499 if (old_meet)
500 if (old_best = export_filter(ah, old_changed, &old_free, NULL, 1))
501 goto found;
502
503 /* Second case */
504 if (!new_best)
505 return;
506
507 /* Third case, we use r instead of new_best, because export_filter() could change it */
508 if (r != new_changed)
509 {
510 if (new_free)
511 rte_free(new_free);
512 return;
513 }
514
515 /* Fourth case */
516 for (r=r->next; rte_is_valid(r); r=r->next)
517 {
518 if (old_best = export_filter(ah, r, &old_free, NULL, 1))
519 goto found;
520
521 if (r == before_old)
522 if (old_best = export_filter(ah, old_changed, &old_free, NULL, 1))
523 goto found;
524 }
525
526 /* Implicitly, old_best is NULL and new_best is non-NULL */
527
528 found:
529 do_rt_notify(ah, net, new_best, old_best, tmpa, (feed == 2));
530
531 /* Discard temporary rte's */
532 if (new_free)
533 rte_free(new_free);
534 if (old_free)
535 rte_free(old_free);
536 }
537
538 /**
539 * rte_announce - announce a routing table change
540 * @tab: table the route has been added to
541 * @type: type of route announcement (RA_OPTIMAL or RA_ANY)
542 * @net: network in question
543 * @new: the new route to be announced
544 * @old: the previous route for the same network
545 *
546 * This function gets a routing table update and announces it
547 * to all protocols that acccepts given type of route announcement
548 * and are connected to the same table by their announcement hooks.
549 *
550 * Route announcement of type RA_OPTIMAL si generated when optimal
551 * route (in routing table @tab) changes. In that case @old stores the
552 * old optimal route.
553 *
554 * Route announcement of type RA_ANY si generated when any route (in
555 * routing table @tab) changes In that case @old stores the old route
556 * from the same protocol.
557 *
558 * For each appropriate protocol, we first call its import_control()
559 * hook which performs basic checks on the route (each protocol has a
560 * right to veto or force accept of the route before any filter is
561 * asked) and adds default values of attributes specific to the new
562 * protocol (metrics, tags etc.). Then it consults the protocol's
563 * export filter and if it accepts the route, the rt_notify() hook of
564 * the protocol gets called.
565 */
566 static void
567 rte_announce(rtable *tab, unsigned type, net *net, rte *new, rte *old, rte *before_old)
568 {
569 if (!rte_is_valid(old))
570 old = before_old = NULL;
571
572 if (!rte_is_valid(new))
573 new = NULL;
574
575 if (!old && !new)
576 return;
577
578 if (type == RA_OPTIMAL)
579 {
580 if (new)
581 new->attrs->src->proto->stats.pref_routes++;
582 if (old)
583 old->attrs->src->proto->stats.pref_routes--;
584
585 if (tab->hostcache)
586 rt_notify_hostcache(tab, net);
587 }
588
589 struct announce_hook *a;
590 WALK_LIST(a, tab->hooks)
591 {
592 ASSERT(a->proto->export_state != ES_DOWN);
593 if (a->proto->accept_ra_types == type)
594 if (type == RA_ACCEPTED)
595 rt_notify_accepted(a, net, new, old, before_old, 0);
596 else
597 rt_notify_basic(a, net, new, old, 0);
598 }
599 }
600
601 static inline int
602 rte_validate(rte *e)
603 {
604 int c;
605 net *n = e->net;
606
607 if ((n->n.pxlen > BITS_PER_IP_ADDRESS) || !ip_is_prefix(n->n.prefix,n->n.pxlen))
608 {
609 log(L_WARN "Ignoring bogus prefix %I/%d received via %s",
610 n->n.prefix, n->n.pxlen, e->sender->proto->name);
611 return 0;
612 }
613
614 c = ipa_classify_net(n->n.prefix);
615 if ((c < 0) || !(c & IADDR_HOST) || ((c & IADDR_SCOPE_MASK) <= SCOPE_LINK))
616 {
617 log(L_WARN "Ignoring bogus route %I/%d received via %s",
618 n->n.prefix, n->n.pxlen, e->sender->proto->name);
619 return 0;
620 }
621
622 return 1;
623 }
624
625 /**
626 * rte_free - delete a &rte
627 * @e: &rte to be deleted
628 *
629 * rte_free() deletes the given &rte from the routing table it's linked to.
630 */
631 void
632 rte_free(rte *e)
633 {
634 if (rta_is_cached(e->attrs))
635 rta_free(e->attrs);
636 sl_free(rte_slab, e);
637 }
638
639 static inline void
640 rte_free_quick(rte *e)
641 {
642 rta_free(e->attrs);
643 sl_free(rte_slab, e);
644 }
645
646 static int
647 rte_same(rte *x, rte *y)
648 {
649 return
650 x->attrs == y->attrs &&
651 x->flags == y->flags &&
652 x->pflags == y->pflags &&
653 x->pref == y->pref &&
654 (!x->attrs->src->proto->rte_same || x->attrs->src->proto->rte_same(x, y));
655 }
656
657 static inline int rte_is_ok(rte *e) { return e && !rte_is_filtered(e); }
658
659 static void
660 rte_recalculate(struct announce_hook *ah, net *net, rte *new, struct rte_src *src)
661 {
662 struct proto *p = ah->proto;
663 struct rtable *table = ah->table;
664 struct proto_stats *stats = ah->stats;
665 static struct tbf rl_pipe = TBF_DEFAULT_LOG_LIMITS;
666 rte *before_old = NULL;
667 rte *old_best = net->routes;
668 rte *old = NULL;
669 rte **k;
670
671 k = &net->routes; /* Find and remove original route from the same protocol */
672 while (old = *k)
673 {
674 if (old->attrs->src == src)
675 {
676 /* If there is the same route in the routing table but from
677 * a different sender, then there are two paths from the
678 * source protocol to this routing table through transparent
679 * pipes, which is not allowed.
680 *
681 * We log that and ignore the route. If it is withdraw, we
682 * ignore it completely (there might be 'spurious withdraws',
683 * see FIXME in do_rte_announce())
684 */
685 if (old->sender->proto != p)
686 {
687 if (new)
688 {
689 log_rl(&rl_pipe, L_ERR "Pipe collision detected when sending %I/%d to table %s",
690 net->n.prefix, net->n.pxlen, table->name);
691 rte_free_quick(new);
692 }
693 return;
694 }
695
696 if (new && rte_same(old, new))
697 {
698 /* No changes, ignore the new route */
699
700 if (!rte_is_filtered(new))
701 {
702 stats->imp_updates_ignored++;
703 rte_trace_in(D_ROUTES, p, new, "ignored");
704 }
705
706 rte_free_quick(new);
707 #ifdef CONFIG_RIP
708 /* lastmod is used internally by RIP as the last time
709 when the route was received. */
710 if (src->proto->proto == &proto_rip)
711 old->lastmod = now;
712 #endif
713 return;
714 }
715 *k = old->next;
716 break;
717 }
718 k = &old->next;
719 before_old = old;
720 }
721
722 if (!old)
723 before_old = NULL;
724
725 if (!old && !new)
726 {
727 stats->imp_withdraws_ignored++;
728 return;
729 }
730
731 int new_ok = rte_is_ok(new);
732 int old_ok = rte_is_ok(old);
733
734 struct proto_limit *l = ah->rx_limit;
735 if (l && !old && new)
736 {
737 u32 all_routes = stats->imp_routes + stats->filt_routes;
738
739 if (all_routes >= l->limit)
740 proto_notify_limit(ah, l, PLD_RX, all_routes);
741
742 if (l->state == PLS_BLOCKED)
743 {
744 /* In receive limit the situation is simple, old is NULL so
745 we just free new and exit like nothing happened */
746
747 stats->imp_updates_ignored++;
748 rte_trace_in(D_FILTERS, p, new, "ignored [limit]");
749 rte_free_quick(new);
750 return;
751 }
752 }
753
754 l = ah->in_limit;
755 if (l && !old_ok && new_ok)
756 {
757 if (stats->imp_routes >= l->limit)
758 proto_notify_limit(ah, l, PLD_IN, stats->imp_routes);
759
760 if (l->state == PLS_BLOCKED)
761 {
762 /* In import limit the situation is more complicated. We
763 shouldn't just drop the route, we should handle it like
764 it was filtered. We also have to continue the route
765 processing if old or new is non-NULL, but we should exit
766 if both are NULL as this case is probably assumed to be
767 already handled. */
768
769 stats->imp_updates_ignored++;
770 rte_trace_in(D_FILTERS, p, new, "ignored [limit]");
771
772 if (ah->in_keep_filtered)
773 new->flags |= REF_FILTERED;
774 else
775 { rte_free_quick(new); new = NULL; }
776
777 /* Note that old && !new could be possible when
778 ah->in_keep_filtered changed in the recent past. */
779
780 if (!old && !new)
781 return;
782
783 new_ok = 0;
784 goto skip_stats1;
785 }
786 }
787
788 if (new_ok)
789 stats->imp_updates_accepted++;
790 else if (old_ok)
791 stats->imp_withdraws_accepted++;
792 else
793 stats->imp_withdraws_ignored++;
794
795 skip_stats1:
796
797 if (new)
798 rte_is_filtered(new) ? stats->filt_routes++ : stats->imp_routes++;
799 if (old)
800 rte_is_filtered(old) ? stats->filt_routes-- : stats->imp_routes--;
801
802 if (table->config->sorted)
803 {
804 /* If routes are sorted, just insert new route to appropriate position */
805 if (new)
806 {
807 if (before_old && !rte_better(new, before_old))
808 k = &before_old->next;
809 else
810 k = &net->routes;
811
812 for (; *k; k=&(*k)->next)
813 if (rte_better(new, *k))
814 break;
815
816 new->next = *k;
817 *k = new;
818 }
819 }
820 else
821 {
822 /* If routes are not sorted, find the best route and move it on
823 the first position. There are several optimized cases. */
824
825 if (src->proto->rte_recalculate && src->proto->rte_recalculate(table, net, new, old, old_best))
826 goto do_recalculate;
827
828 if (new && rte_better(new, old_best))
829 {
830 /* The first case - the new route is cleary optimal,
831 we link it at the first position */
832
833 new->next = net->routes;
834 net->routes = new;
835 }
836 else if (old == old_best)
837 {
838 /* The second case - the old best route disappeared, we add the
839 new route (if we have any) to the list (we don't care about
840 position) and then we elect the new optimal route and relink
841 that route at the first position and announce it. New optimal
842 route might be NULL if there is no more routes */
843
844 do_recalculate:
845 /* Add the new route to the list */
846 if (new)
847 {
848 new->next = net->routes;
849 net->routes = new;
850 }
851
852 /* Find a new optimal route (if there is any) */
853 if (net->routes)
854 {
855 rte **bp = &net->routes;
856 for (k=&(*bp)->next; *k; k=&(*k)->next)
857 if (rte_better(*k, *bp))
858 bp = k;
859
860 /* And relink it */
861 rte *best = *bp;
862 *bp = best->next;
863 best->next = net->routes;
864 net->routes = best;
865 }
866 }
867 else if (new)
868 {
869 /* The third case - the new route is not better than the old
870 best route (therefore old_best != NULL) and the old best
871 route was not removed (therefore old_best == net->routes).
872 We just link the new route after the old best route. */
873
874 ASSERT(net->routes != NULL);
875 new->next = net->routes->next;
876 net->routes->next = new;
877 }
878 /* The fourth (empty) case - suboptimal route was removed, nothing to do */
879 }
880
881 if (new)
882 new->lastmod = now;
883
884 /* Log the route change */
885 if (p->debug & D_ROUTES)
886 {
887 if (new_ok)
888 rte_trace(p, new, '>', new == net->routes ? "added [best]" : "added");
889 else if (old_ok)
890 {
891 if (old != old_best)
892 rte_trace(p, old, '>', "removed");
893 else if (rte_is_ok(net->routes))
894 rte_trace(p, old, '>', "removed [replaced]");
895 else
896 rte_trace(p, old, '>', "removed [sole]");
897 }
898 }
899
900 /* Propagate the route change */
901 rte_announce(table, RA_ANY, net, new, old, NULL);
902 if (net->routes != old_best)
903 rte_announce(table, RA_OPTIMAL, net, net->routes, old_best, NULL);
904 if (table->config->sorted)
905 rte_announce(table, RA_ACCEPTED, net, new, old, before_old);
906
907 if (!net->routes &&
908 (table->gc_counter++ >= table->config->gc_max_ops) &&
909 (table->gc_time + table->config->gc_min_time <= now))
910 rt_schedule_gc(table);
911
912 if (old_ok && p->rte_remove)
913 p->rte_remove(net, old);
914 if (new_ok && p->rte_insert)
915 p->rte_insert(net, new);
916
917 if (old)
918 rte_free_quick(old);
919 }
920
921 static int rte_update_nest_cnt; /* Nesting counter to allow recursive updates */
922
923 static inline void
924 rte_update_lock(void)
925 {
926 rte_update_nest_cnt++;
927 }
928
929 static inline void
930 rte_update_unlock(void)
931 {
932 if (!--rte_update_nest_cnt)
933 lp_flush(rte_update_pool);
934 }
935
936 static inline void
937 rte_hide_dummy_routes(net *net, rte **dummy)
938 {
939 if (net->routes && net->routes->attrs->source == RTS_DUMMY)
940 {
941 *dummy = net->routes;
942 net->routes = (*dummy)->next;
943 }
944 }
945
946 static inline void
947 rte_unhide_dummy_routes(net *net, rte **dummy)
948 {
949 if (*dummy)
950 {
951 (*dummy)->next = net->routes;
952 net->routes = *dummy;
953 }
954 }
955
956 /**
957 * rte_update - enter a new update to a routing table
958 * @table: table to be updated
959 * @ah: pointer to table announce hook
960 * @net: network node
961 * @p: protocol submitting the update
962 * @src: protocol originating the update
963 * @new: a &rte representing the new route or %NULL for route removal.
964 *
965 * This function is called by the routing protocols whenever they discover
966 * a new route or wish to update/remove an existing route. The right announcement
967 * sequence is to build route attributes first (either un-cached with @aflags set
968 * to zero or a cached one using rta_lookup(); in this case please note that
969 * you need to increase the use count of the attributes yourself by calling
970 * rta_clone()), call rte_get_temp() to obtain a temporary &rte, fill in all
971 * the appropriate data and finally submit the new &rte by calling rte_update().
972 *
973 * @src specifies the protocol that originally created the route and the meaning
974 * of protocol-dependent data of @new. If @new is not %NULL, @src have to be the
975 * same value as @new->attrs->proto. @p specifies the protocol that called
976 * rte_update(). In most cases it is the same protocol as @src. rte_update()
977 * stores @p in @new->sender;
978 *
979 * When rte_update() gets any route, it automatically validates it (checks,
980 * whether the network and next hop address are valid IP addresses and also
981 * whether a normal routing protocol doesn't try to smuggle a host or link
982 * scope route to the table), converts all protocol dependent attributes stored
983 * in the &rte to temporary extended attributes, consults import filters of the
984 * protocol to see if the route should be accepted and/or its attributes modified,
985 * stores the temporary attributes back to the &rte.
986 *
987 * Now, having a "public" version of the route, we
988 * automatically find any old route defined by the protocol @src
989 * for network @n, replace it by the new one (or removing it if @new is %NULL),
990 * recalculate the optimal route for this destination and finally broadcast
991 * the change (if any) to all routing protocols by calling rte_announce().
992 *
993 * All memory used for attribute lists and other temporary allocations is taken
994 * from a special linear pool @rte_update_pool and freed when rte_update()
995 * finishes.
996 */
997
998 void
999 rte_update2(struct announce_hook *ah, net *net, rte *new, struct rte_src *src)
1000 {
1001 struct proto *p = ah->proto;
1002 struct proto_stats *stats = ah->stats;
1003 struct filter *filter = ah->in_filter;
1004 ea_list *tmpa = NULL;
1005 rte *dummy = NULL;
1006
1007 rte_update_lock();
1008 if (new)
1009 {
1010 new->sender = ah;
1011
1012 stats->imp_updates_received++;
1013 if (!rte_validate(new))
1014 {
1015 rte_trace_in(D_FILTERS, p, new, "invalid");
1016 stats->imp_updates_invalid++;
1017 goto drop;
1018 }
1019
1020 if (filter == FILTER_REJECT)
1021 {
1022 stats->imp_updates_filtered++;
1023 rte_trace_in(D_FILTERS, p, new, "filtered out");
1024
1025 if (! ah->in_keep_filtered)
1026 goto drop;
1027
1028 /* new is a private copy, i could modify it */
1029 new->flags |= REF_FILTERED;
1030 }
1031 else
1032 {
1033 tmpa = make_tmp_attrs(new, rte_update_pool);
1034 if (filter && (filter != FILTER_REJECT))
1035 {
1036 ea_list *old_tmpa = tmpa;
1037 int fr = f_run(filter, &new, &tmpa, rte_update_pool, 0);
1038 if (fr > F_ACCEPT)
1039 {
1040 stats->imp_updates_filtered++;
1041 rte_trace_in(D_FILTERS, p, new, "filtered out");
1042
1043 if (! ah->in_keep_filtered)
1044 goto drop;
1045
1046 new->flags |= REF_FILTERED;
1047 }
1048 if (tmpa != old_tmpa && src->proto->store_tmp_attrs)
1049 src->proto->store_tmp_attrs(new, tmpa);
1050 }
1051 }
1052 if (!rta_is_cached(new->attrs)) /* Need to copy attributes */
1053 new->attrs = rta_lookup(new->attrs);
1054 new->flags |= REF_COW;
1055 }
1056 else
1057 {
1058 stats->imp_withdraws_received++;
1059
1060 if (!net || !src)
1061 {
1062 stats->imp_withdraws_ignored++;
1063 rte_update_unlock();
1064 return;
1065 }
1066 }
1067
1068 recalc:
1069 rte_hide_dummy_routes(net, &dummy);
1070 rte_recalculate(ah, net, new, src);
1071 rte_unhide_dummy_routes(net, &dummy);
1072 rte_update_unlock();
1073 return;
1074
1075 drop:
1076 rte_free(new);
1077 new = NULL;
1078 goto recalc;
1079 }
1080
1081 /* Independent call to rte_announce(), used from next hop
1082 recalculation, outside of rte_update(). new must be non-NULL */
1083 static inline void
1084 rte_announce_i(rtable *tab, unsigned type, net *n, rte *new, rte *old)
1085 {
1086 rte_update_lock();
1087 rte_announce(tab, type, n, new, old, NULL);
1088 rte_update_unlock();
1089 }
1090
1091 void
1092 rte_discard(rtable *t, rte *old) /* Non-filtered route deletion, used during garbage collection */
1093 {
1094 rte_update_lock();
1095 rte_recalculate(old->sender, old->net, NULL, old->attrs->src);
1096 rte_update_unlock();
1097 }
1098
1099 /* Check rtable for best route to given net whether it would be exported do p */
1100 int
1101 rt_examine(rtable *t, ip_addr prefix, int pxlen, struct proto *p, struct filter *filter)
1102 {
1103 net *n = net_find(t, prefix, pxlen);
1104 rte *rt = n ? n->routes : NULL;
1105
1106 if (!rte_is_valid(rt))
1107 return 0;
1108
1109 rte_update_lock();
1110
1111 /* Rest is stripped down export_filter() */
1112 ea_list *tmpa = make_tmp_attrs(rt, rte_update_pool);
1113 int v = p->import_control ? p->import_control(p, &rt, &tmpa, rte_update_pool) : 0;
1114 if (v == RIC_PROCESS)
1115 v = (f_run(filter, &rt, &tmpa, rte_update_pool, FF_FORCE_TMPATTR) <= F_ACCEPT);
1116
1117 /* Discard temporary rte */
1118 if (rt != n->routes)
1119 rte_free(rt);
1120
1121 rte_update_unlock();
1122
1123 return v > 0;
1124 }
1125
1126
1127 /**
1128 * rt_refresh_begin - start a refresh cycle
1129 * @t: related routing table
1130 * @ah: related announce hook
1131 *
1132 * This function starts a refresh cycle for given routing table and announce
1133 * hook. The refresh cycle is a sequence where the protocol sends all its valid
1134 * routes to the routing table (by rte_update()). After that, all protocol
1135 * routes (more precisely routes with @ah as @sender) not sent during the
1136 * refresh cycle but still in the table from the past are pruned. This is
1137 * implemented by marking all related routes as stale by REF_STALE flag in
1138 * rt_refresh_begin(), then marking all related stale routes with REF_DISCARD
1139 * flag in rt_refresh_end() and then removing such routes in the prune loop.
1140 */
1141 void
1142 rt_refresh_begin(rtable *t, struct announce_hook *ah)
1143 {
1144 net *n;
1145 rte *e;
1146
1147 FIB_WALK(&t->fib, fn)
1148 {
1149 n = (net *) fn;
1150 for (e = n->routes; e; e = e->next)
1151 if (e->sender == ah)
1152 e->flags |= REF_STALE;
1153 }
1154 FIB_WALK_END;
1155 }
1156
1157 /**
1158 * rt_refresh_end - end a refresh cycle
1159 * @t: related routing table
1160 * @ah: related announce hook
1161 *
1162 * This function starts a refresh cycle for given routing table and announce
1163 * hook. See rt_refresh_begin() for description of refresh cycles.
1164 */
1165 void
1166 rt_refresh_end(rtable *t, struct announce_hook *ah)
1167 {
1168 int prune = 0;
1169 net *n;
1170 rte *e;
1171
1172 FIB_WALK(&t->fib, fn)
1173 {
1174 n = (net *) fn;
1175 for (e = n->routes; e; e = e->next)
1176 if ((e->sender == ah) && (e->flags & REF_STALE))
1177 {
1178 e->flags |= REF_DISCARD;
1179 prune = 1;
1180 }
1181 }
1182 FIB_WALK_END;
1183
1184 if (prune)
1185 rt_schedule_prune(t);
1186 }
1187
1188
1189 /**
1190 * rte_dump - dump a route
1191 * @e: &rte to be dumped
1192 *
1193 * This functions dumps contents of a &rte to debug output.
1194 */
1195 void
1196 rte_dump(rte *e)
1197 {
1198 net *n = e->net;
1199 debug("%-1I/%2d ", n->n.prefix, n->n.pxlen);
1200 debug("KF=%02x PF=%02x pref=%d lm=%d ", n->n.flags, e->pflags, e->pref, now-e->lastmod);
1201 rta_dump(e->attrs);
1202 if (e->attrs->src->proto->proto->dump_attrs)
1203 e->attrs->src->proto->proto->dump_attrs(e);
1204 debug("\n");
1205 }
1206
1207 /**
1208 * rt_dump - dump a routing table
1209 * @t: routing table to be dumped
1210 *
1211 * This function dumps contents of a given routing table to debug output.
1212 */
1213 void
1214 rt_dump(rtable *t)
1215 {
1216 rte *e;
1217 net *n;
1218 struct announce_hook *a;
1219
1220 debug("Dump of routing table <%s>\n", t->name);
1221 #ifdef DEBUGGING
1222 fib_check(&t->fib);
1223 #endif
1224 FIB_WALK(&t->fib, fn)
1225 {
1226 n = (net *) fn;
1227 for(e=n->routes; e; e=e->next)
1228 rte_dump(e);
1229 }
1230 FIB_WALK_END;
1231 WALK_LIST(a, t->hooks)
1232 debug("\tAnnounces routes to protocol %s\n", a->proto->name);
1233 debug("\n");
1234 }
1235
1236 /**
1237 * rt_dump_all - dump all routing tables
1238 *
1239 * This function dumps contents of all routing tables to debug output.
1240 */
1241 void
1242 rt_dump_all(void)
1243 {
1244 rtable *t;
1245
1246 WALK_LIST(t, routing_tables)
1247 rt_dump(t);
1248 }
1249
1250 static inline void
1251 rt_schedule_prune(rtable *tab)
1252 {
1253 rt_mark_for_prune(tab);
1254 ev_schedule(tab->rt_event);
1255 }
1256
1257 static inline void
1258 rt_schedule_gc(rtable *tab)
1259 {
1260 if (tab->gc_scheduled)
1261 return;
1262
1263 tab->gc_scheduled = 1;
1264 ev_schedule(tab->rt_event);
1265 }
1266
1267 static inline void
1268 rt_schedule_hcu(rtable *tab)
1269 {
1270 if (tab->hcu_scheduled)
1271 return;
1272
1273 tab->hcu_scheduled = 1;
1274 ev_schedule(tab->rt_event);
1275 }
1276
1277 static inline void
1278 rt_schedule_nhu(rtable *tab)
1279 {
1280 if (tab->nhu_state == 0)
1281 ev_schedule(tab->rt_event);
1282
1283 /* state change 0->1, 2->3 */
1284 tab->nhu_state |= 1;
1285 }
1286
1287
1288 static void
1289 rt_prune_nets(rtable *tab)
1290 {
1291 struct fib_iterator fit;
1292 int ncnt = 0, ndel = 0;
1293
1294 #ifdef DEBUGGING
1295 fib_check(&tab->fib);
1296 #endif
1297
1298 FIB_ITERATE_INIT(&fit, &tab->fib);
1299 again:
1300 FIB_ITERATE_START(&tab->fib, &fit, f)
1301 {
1302 net *n = (net *) f;
1303 ncnt++;
1304 if (!n->routes) /* Orphaned FIB entry */
1305 {
1306 FIB_ITERATE_PUT(&fit, f);
1307 fib_delete(&tab->fib, f);
1308 ndel++;
1309 goto again;
1310 }
1311 }
1312 FIB_ITERATE_END(f);
1313 DBG("Pruned %d of %d networks\n", ndel, ncnt);
1314
1315 tab->gc_counter = 0;
1316 tab->gc_time = now;
1317 tab->gc_scheduled = 0;
1318 }
1319
1320 static void
1321 rt_event(void *ptr)
1322 {
1323 rtable *tab = ptr;
1324
1325 if (tab->hcu_scheduled)
1326 rt_update_hostcache(tab);
1327
1328 if (tab->nhu_state)
1329 rt_next_hop_update(tab);
1330
1331 if (tab->prune_state)
1332 if (!rt_prune_table(tab))
1333 {
1334 /* Table prune unfinished */
1335 ev_schedule(tab->rt_event);
1336 return;
1337 }
1338
1339 if (tab->gc_scheduled)
1340 {
1341 rt_prune_nets(tab);
1342 rt_prune_sources(); // FIXME this should be moved to independent event
1343 }
1344 }
1345
1346 void
1347 rt_setup(pool *p, rtable *t, char *name, struct rtable_config *cf)
1348 {
1349 bzero(t, sizeof(*t));
1350 fib_init(&t->fib, p, sizeof(net), 0, rte_init);
1351 t->name = name;
1352 t->config = cf;
1353 init_list(&t->hooks);
1354 if (cf)
1355 {
1356 t->rt_event = ev_new(p);
1357 t->rt_event->hook = rt_event;
1358 t->rt_event->data = t;
1359 t->gc_time = now;
1360 }
1361 }
1362
1363 /**
1364 * rt_init - initialize routing tables
1365 *
1366 * This function is called during BIRD startup. It initializes the
1367 * routing table module.
1368 */
1369 void
1370 rt_init(void)
1371 {
1372 rta_init();
1373 rt_table_pool = rp_new(&root_pool, "Routing tables");
1374 rte_update_pool = lp_new(rt_table_pool, 4080);
1375 rte_slab = sl_new(rt_table_pool, sizeof(rte));
1376 init_list(&routing_tables);
1377 }
1378
1379
1380 static int
1381 rt_prune_step(rtable *tab, int *limit)
1382 {
1383 struct fib_iterator *fit = &tab->prune_fit;
1384
1385 DBG("Pruning route table %s\n", tab->name);
1386 #ifdef DEBUGGING
1387 fib_check(&tab->fib);
1388 #endif
1389
1390 if (tab->prune_state == RPS_NONE)
1391 return 1;
1392
1393 if (tab->prune_state == RPS_SCHEDULED)
1394 {
1395 FIB_ITERATE_INIT(fit, &tab->fib);
1396 tab->prune_state = RPS_RUNNING;
1397 }
1398
1399 again:
1400 FIB_ITERATE_START(&tab->fib, fit, fn)
1401 {
1402 net *n = (net *) fn;
1403 rte *e;
1404
1405 rescan:
1406 for (e=n->routes; e; e=e->next)
1407 if (e->sender->proto->flushing || (e->flags & REF_DISCARD))
1408 {
1409 if (*limit <= 0)
1410 {
1411 FIB_ITERATE_PUT(fit, fn);
1412 return 0;
1413 }
1414
1415 rte_discard(tab, e);
1416 (*limit)--;
1417
1418 goto rescan;
1419 }
1420 if (!n->routes) /* Orphaned FIB entry */
1421 {
1422 FIB_ITERATE_PUT(fit, fn);
1423 fib_delete(&tab->fib, fn);
1424 goto again;
1425 }
1426 }
1427 FIB_ITERATE_END(fn);
1428
1429 #ifdef DEBUGGING
1430 fib_check(&tab->fib);
1431 #endif
1432
1433 tab->prune_state = RPS_NONE;
1434 return 1;
1435 }
1436
1437 /**
1438 * rt_prune_table - prune a routing table
1439 *
1440 * This function scans the routing table @tab and removes routes belonging to
1441 * flushing protocols, discarded routes and also stale network entries, in a
1442 * similar fashion like rt_prune_loop(). Returns 1 when all such routes are
1443 * pruned. Contrary to rt_prune_loop(), this function is not a part of the
1444 * protocol flushing loop, but it is called from rt_event() for just one routing
1445 * table.
1446 *
1447 * Note that rt_prune_table() and rt_prune_loop() share (for each table) the
1448 * prune state (@prune_state) and also the pruning iterator (@prune_fit).
1449 */
1450 static inline int
1451 rt_prune_table(rtable *tab)
1452 {
1453 int limit = 512;
1454 return rt_prune_step(tab, &limit);
1455 }
1456
1457 /**
1458 * rt_prune_loop - prune routing tables
1459 *
1460 * The prune loop scans routing tables and removes routes belonging to flushing
1461 * protocols, discarded routes and also stale network entries. Returns 1 when
1462 * all such routes are pruned. It is a part of the protocol flushing loop.
1463 */
1464 int
1465 rt_prune_loop(void)
1466 {
1467 int limit = 512;
1468 rtable *t;
1469
1470 WALK_LIST(t, routing_tables)
1471 if (! rt_prune_step(t, &limit))
1472 return 0;
1473
1474 return 1;
1475 }
1476
1477 void
1478 rt_preconfig(struct config *c)
1479 {
1480 struct symbol *s = cf_find_symbol("master");
1481
1482 init_list(&c->tables);
1483 c->master_rtc = rt_new_table(s);
1484 }
1485
1486
1487 /*
1488 * Some functions for handing internal next hop updates
1489 * triggered by rt_schedule_nhu().
1490 */
1491
1492 static inline int
1493 rta_next_hop_outdated(rta *a)
1494 {
1495 struct hostentry *he = a->hostentry;
1496
1497 if (!he)
1498 return 0;
1499
1500 if (!he->src)
1501 return a->dest != RTD_UNREACHABLE;
1502
1503 return (a->iface != he->src->iface) || !ipa_equal(a->gw, he->gw) ||
1504 (a->dest != he->dest) || (a->igp_metric != he->igp_metric) ||
1505 !mpnh_same(a->nexthops, he->src->nexthops);
1506 }
1507
1508 static inline void
1509 rta_apply_hostentry(rta *a, struct hostentry *he)
1510 {
1511 a->hostentry = he;
1512 a->iface = he->src ? he->src->iface : NULL;
1513 a->gw = he->gw;
1514 a->dest = he->dest;
1515 a->igp_metric = he->igp_metric;
1516 a->nexthops = he->src ? he->src->nexthops : NULL;
1517 }
1518
1519 static inline rte *
1520 rt_next_hop_update_rte(rtable *tab, rte *old)
1521 {
1522 rta a;
1523 memcpy(&a, old->attrs, sizeof(rta));
1524 rta_apply_hostentry(&a, old->attrs->hostentry);
1525 a.aflags = 0;
1526
1527 rte *e = sl_alloc(rte_slab);
1528 memcpy(e, old, sizeof(rte));
1529 e->attrs = rta_lookup(&a);
1530
1531 return e;
1532 }
1533
1534 static inline int
1535 rt_next_hop_update_net(rtable *tab, net *n)
1536 {
1537 rte **k, *e, *new, *old_best, **new_best;
1538 int count = 0;
1539 int free_old_best = 0;
1540
1541 old_best = n->routes;
1542 if (!old_best)
1543 return 0;
1544
1545 for (k = &n->routes; e = *k; k = &e->next)
1546 if (rta_next_hop_outdated(e->attrs))
1547 {
1548 new = rt_next_hop_update_rte(tab, e);
1549 *k = new;
1550
1551 rte_announce_i(tab, RA_ANY, n, new, e);
1552 rte_trace_in(D_ROUTES, new->sender->proto, new, "updated");
1553
1554 /* Call a pre-comparison hook */
1555 /* Not really an efficient way to compute this */
1556 if (e->attrs->src->proto->rte_recalculate)
1557 e->attrs->src->proto->rte_recalculate(tab, n, new, e, NULL);
1558
1559 if (e != old_best)
1560 rte_free_quick(e);
1561 else /* Freeing of the old best rte is postponed */
1562 free_old_best = 1;
1563
1564 e = new;
1565 count++;
1566 }
1567
1568 if (!count)
1569 return 0;
1570
1571 /* Find the new best route */
1572 new_best = NULL;
1573 for (k = &n->routes; e = *k; k = &e->next)
1574 {
1575 if (!new_best || rte_better(e, *new_best))
1576 new_best = k;
1577 }
1578
1579 /* Relink the new best route to the first position */
1580 new = *new_best;
1581 if (new != n->routes)
1582 {
1583 *new_best = new->next;
1584 new->next = n->routes;
1585 n->routes = new;
1586 }
1587
1588 /* Announce the new best route */
1589 if (new != old_best)
1590 {
1591 rte_announce_i(tab, RA_OPTIMAL, n, new, old_best);
1592 rte_trace_in(D_ROUTES, new->sender->proto, new, "updated [best]");
1593 }
1594
1595 if (free_old_best)
1596 rte_free_quick(old_best);
1597
1598 return count;
1599 }
1600
1601 static void
1602 rt_next_hop_update(rtable *tab)
1603 {
1604 struct fib_iterator *fit = &tab->nhu_fit;
1605 int max_feed = 32;
1606
1607 if (tab->nhu_state == 0)
1608 return;
1609
1610 if (tab->nhu_state == 1)
1611 {
1612 FIB_ITERATE_INIT(fit, &tab->fib);
1613 tab->nhu_state = 2;
1614 }
1615
1616 FIB_ITERATE_START(&tab->fib, fit, fn)
1617 {
1618 if (max_feed <= 0)
1619 {
1620 FIB_ITERATE_PUT(fit, fn);
1621 ev_schedule(tab->rt_event);
1622 return;
1623 }
1624 max_feed -= rt_next_hop_update_net(tab, (net *) fn);
1625 }
1626 FIB_ITERATE_END(fn);
1627
1628 /* state change 2->0, 3->1 */
1629 tab->nhu_state &= 1;
1630
1631 if (tab->nhu_state > 0)
1632 ev_schedule(tab->rt_event);
1633 }
1634
1635
1636 struct rtable_config *
1637 rt_new_table(struct symbol *s)
1638 {
1639 /* Hack that allows to 'redefine' the master table */
1640 if ((s->class == SYM_TABLE) && (s->def == new_config->master_rtc))
1641 return s->def;
1642
1643 struct rtable_config *c = cfg_allocz(sizeof(struct rtable_config));
1644
1645 cf_define_symbol(s, SYM_TABLE, c);
1646 c->name = s->name;
1647 add_tail(&new_config->tables, &c->n);
1648 c->gc_max_ops = 1000;
1649 c->gc_min_time = 5;
1650 return c;
1651 }
1652
1653 /**
1654 * rt_lock_table - lock a routing table
1655 * @r: routing table to be locked
1656 *
1657 * Lock a routing table, because it's in use by a protocol,
1658 * preventing it from being freed when it gets undefined in a new
1659 * configuration.
1660 */
1661 void
1662 rt_lock_table(rtable *r)
1663 {
1664 r->use_count++;
1665 }
1666
1667 /**
1668 * rt_unlock_table - unlock a routing table
1669 * @r: routing table to be unlocked
1670 *
1671 * Unlock a routing table formerly locked by rt_lock_table(),
1672 * that is decrease its use count and delete it if it's scheduled
1673 * for deletion by configuration changes.
1674 */
1675 void
1676 rt_unlock_table(rtable *r)
1677 {
1678 if (!--r->use_count && r->deleted)
1679 {
1680 struct config *conf = r->deleted;
1681 DBG("Deleting routing table %s\n", r->name);
1682 if (r->hostcache)
1683 rt_free_hostcache(r);
1684 rem_node(&r->n);
1685 fib_free(&r->fib);
1686 rfree(r->rt_event);
1687 mb_free(r);
1688 config_del_obstacle(conf);
1689 }
1690 }
1691
1692 /**
1693 * rt_commit - commit new routing table configuration
1694 * @new: new configuration
1695 * @old: original configuration or %NULL if it's boot time config
1696 *
1697 * Scan differences between @old and @new configuration and modify
1698 * the routing tables according to these changes. If @new defines a
1699 * previously unknown table, create it, if it omits a table existing
1700 * in @old, schedule it for deletion (it gets deleted when all protocols
1701 * disconnect from it by calling rt_unlock_table()), if it exists
1702 * in both configurations, leave it unchanged.
1703 */
1704 void
1705 rt_commit(struct config *new, struct config *old)
1706 {
1707 struct rtable_config *o, *r;
1708
1709 DBG("rt_commit:\n");
1710 if (old)
1711 {
1712 WALK_LIST(o, old->tables)
1713 {
1714 rtable *ot = o->table;
1715 if (!ot->deleted)
1716 {
1717 struct symbol *sym = cf_find_symbol(o->name);
1718 if (sym && sym->class == SYM_TABLE && !new->shutdown)
1719 {
1720 DBG("\t%s: same\n", o->name);
1721 r = sym->def;
1722 r->table = ot;
1723 ot->name = r->name;
1724 ot->config = r;
1725 if (o->sorted != r->sorted)
1726 log(L_WARN "Reconfiguration of rtable sorted flag not implemented");
1727 }
1728 else
1729 {
1730 DBG("\t%s: deleted\n", o->name);
1731 ot->deleted = old;
1732 config_add_obstacle(old);
1733 rt_lock_table(ot);
1734 rt_unlock_table(ot);
1735 }
1736 }
1737 }
1738 }
1739
1740 WALK_LIST(r, new->tables)
1741 if (!r->table)
1742 {
1743 rtable *t = mb_alloc(rt_table_pool, sizeof(struct rtable));
1744 DBG("\t%s: created\n", r->name);
1745 rt_setup(rt_table_pool, t, r->name, r);
1746 add_tail(&routing_tables, &t->n);
1747 r->table = t;
1748 }
1749 DBG("\tdone\n");
1750 }
1751
1752 static inline void
1753 do_feed_baby(struct proto *p, int type, struct announce_hook *h, net *n, rte *e)
1754 {
1755 rte_update_lock();
1756 if (type == RA_ACCEPTED)
1757 rt_notify_accepted(h, n, e, NULL, NULL, p->refeeding ? 2 : 1);
1758 else
1759 rt_notify_basic(h, n, e, p->refeeding ? e : NULL, p->refeeding);
1760 rte_update_unlock();
1761 }
1762
1763 /**
1764 * rt_feed_baby - advertise routes to a new protocol
1765 * @p: protocol to be fed
1766 *
1767 * This function performs one pass of advertisement of routes to a newly
1768 * initialized protocol. It's called by the protocol code as long as it
1769 * has something to do. (We avoid transferring all the routes in single
1770 * pass in order not to monopolize CPU time.)
1771 */
1772 int
1773 rt_feed_baby(struct proto *p)
1774 {
1775 struct announce_hook *h;
1776 struct fib_iterator *fit;
1777 int max_feed = 256;
1778
1779 if (!p->feed_ahook) /* Need to initialize first */
1780 {
1781 if (!p->ahooks)
1782 return 1;
1783 DBG("Announcing routes to new protocol %s\n", p->name);
1784 p->feed_ahook = p->ahooks;
1785 fit = p->feed_iterator = mb_alloc(p->pool, sizeof(struct fib_iterator));
1786 goto next_hook;
1787 }
1788 fit = p->feed_iterator;
1789
1790 again:
1791 h = p->feed_ahook;
1792 FIB_ITERATE_START(&h->table->fib, fit, fn)
1793 {
1794 net *n = (net *) fn;
1795 rte *e = n->routes;
1796 if (max_feed <= 0)
1797 {
1798 FIB_ITERATE_PUT(fit, fn);
1799 return 0;
1800 }
1801
1802 /* XXXX perhaps we should change feed for RA_ACCEPTED to not use 'new' */
1803
1804 if ((p->accept_ra_types == RA_OPTIMAL) ||
1805 (p->accept_ra_types == RA_ACCEPTED))
1806 if (rte_is_valid(e))
1807 {
1808 if (p->export_state != ES_FEEDING)
1809 return 1; /* In the meantime, the protocol fell down. */
1810
1811 do_feed_baby(p, p->accept_ra_types, h, n, e);
1812 max_feed--;
1813 }
1814
1815 if (p->accept_ra_types == RA_ANY)
1816 for(e = n->routes; e; e = e->next)
1817 {
1818 if (p->export_state != ES_FEEDING)
1819 return 1; /* In the meantime, the protocol fell down. */
1820
1821 if (!rte_is_valid(e))
1822 continue;
1823
1824 do_feed_baby(p, RA_ANY, h, n, e);
1825 max_feed--;
1826 }
1827 }
1828 FIB_ITERATE_END(fn);
1829 p->feed_ahook = h->next;
1830 if (!p->feed_ahook)
1831 {
1832 mb_free(p->feed_iterator);
1833 p->feed_iterator = NULL;
1834 return 1;
1835 }
1836
1837 next_hook:
1838 h = p->feed_ahook;
1839 FIB_ITERATE_INIT(fit, &h->table->fib);
1840 goto again;
1841 }
1842
1843 /**
1844 * rt_feed_baby_abort - abort protocol feeding
1845 * @p: protocol
1846 *
1847 * This function is called by the protocol code when the protocol
1848 * stops or ceases to exist before the last iteration of rt_feed_baby()
1849 * has finished.
1850 */
1851 void
1852 rt_feed_baby_abort(struct proto *p)
1853 {
1854 if (p->feed_ahook)
1855 {
1856 /* Unlink the iterator and exit */
1857 fit_get(&p->feed_ahook->table->fib, p->feed_iterator);
1858 p->feed_ahook = NULL;
1859 }
1860 }
1861
1862
1863 static inline unsigned
1864 ptr_hash(void *ptr)
1865 {
1866 uintptr_t p = (uintptr_t) ptr;
1867 return p ^ (p << 8) ^ (p >> 16);
1868 }
1869
1870 static inline unsigned
1871 hc_hash(ip_addr a, rtable *dep)
1872 {
1873 return (ipa_hash(a) ^ ptr_hash(dep)) & 0xffff;
1874 }
1875
1876 static inline void
1877 hc_insert(struct hostcache *hc, struct hostentry *he)
1878 {
1879 uint k = he->hash_key >> hc->hash_shift;
1880 he->next = hc->hash_table[k];
1881 hc->hash_table[k] = he;
1882 }
1883
1884 static inline void
1885 hc_remove(struct hostcache *hc, struct hostentry *he)
1886 {
1887 struct hostentry **hep;
1888 uint k = he->hash_key >> hc->hash_shift;
1889
1890 for (hep = &hc->hash_table[k]; *hep != he; hep = &(*hep)->next);
1891 *hep = he->next;
1892 }
1893
1894 #define HC_DEF_ORDER 10
1895 #define HC_HI_MARK *4
1896 #define HC_HI_STEP 2
1897 #define HC_HI_ORDER 16 /* Must be at most 16 */
1898 #define HC_LO_MARK /5
1899 #define HC_LO_STEP 2
1900 #define HC_LO_ORDER 10
1901
1902 static void
1903 hc_alloc_table(struct hostcache *hc, unsigned order)
1904 {
1905 unsigned hsize = 1 << order;
1906 hc->hash_order = order;
1907 hc->hash_shift = 16 - order;
1908 hc->hash_max = (order >= HC_HI_ORDER) ? ~0 : (hsize HC_HI_MARK);
1909 hc->hash_min = (order <= HC_LO_ORDER) ? 0 : (hsize HC_LO_MARK);
1910
1911 hc->hash_table = mb_allocz(rt_table_pool, hsize * sizeof(struct hostentry *));
1912 }
1913
1914 static void
1915 hc_resize(struct hostcache *hc, unsigned new_order)
1916 {
1917 unsigned old_size = 1 << hc->hash_order;
1918 struct hostentry **old_table = hc->hash_table;
1919 struct hostentry *he, *hen;
1920 int i;
1921
1922 hc_alloc_table(hc, new_order);
1923 for (i = 0; i < old_size; i++)
1924 for (he = old_table[i]; he != NULL; he=hen)
1925 {
1926 hen = he->next;
1927 hc_insert(hc, he);
1928 }
1929 mb_free(old_table);
1930 }
1931
1932 static struct hostentry *
1933 hc_new_hostentry(struct hostcache *hc, ip_addr a, ip_addr ll, rtable *dep, unsigned k)
1934 {
1935 struct hostentry *he = sl_alloc(hc->slab);
1936
1937 he->addr = a;
1938 he->link = ll;
1939 he->tab = dep;
1940 he->hash_key = k;
1941 he->uc = 0;
1942 he->src = NULL;
1943
1944 add_tail(&hc->hostentries, &he->ln);
1945 hc_insert(hc, he);
1946
1947 hc->hash_items++;
1948 if (hc->hash_items > hc->hash_max)
1949 hc_resize(hc, hc->hash_order + HC_HI_STEP);
1950
1951 return he;
1952 }
1953
1954 static void
1955 hc_delete_hostentry(struct hostcache *hc, struct hostentry *he)
1956 {
1957 rta_free(he->src);
1958
1959 rem_node(&he->ln);
1960 hc_remove(hc, he);
1961 sl_free(hc->slab, he);
1962
1963 hc->hash_items--;
1964 if (hc->hash_items < hc->hash_min)
1965 hc_resize(hc, hc->hash_order - HC_LO_STEP);
1966 }
1967
1968 static void
1969 rt_init_hostcache(rtable *tab)
1970 {
1971 struct hostcache *hc = mb_allocz(rt_table_pool, sizeof(struct hostcache));
1972 init_list(&hc->hostentries);
1973
1974 hc->hash_items = 0;
1975 hc_alloc_table(hc, HC_DEF_ORDER);
1976 hc->slab = sl_new(rt_table_pool, sizeof(struct hostentry));
1977
1978 hc->lp = lp_new(rt_table_pool, 1008);
1979 hc->trie = f_new_trie(hc->lp, sizeof(struct f_trie_node));
1980
1981 tab->hostcache = hc;
1982 }
1983
1984 static void
1985 rt_free_hostcache(rtable *tab)
1986 {
1987 struct hostcache *hc = tab->hostcache;
1988
1989 node *n;
1990 WALK_LIST(n, hc->hostentries)
1991 {
1992 struct hostentry *he = SKIP_BACK(struct hostentry, ln, n);
1993 rta_free(he->src);
1994
1995 if (he->uc)
1996 log(L_ERR "Hostcache is not empty in table %s", tab->name);
1997 }
1998
1999 rfree(hc->slab);
2000 rfree(hc->lp);
2001 mb_free(hc->hash_table);
2002 mb_free(hc);
2003 }
2004
2005 static void
2006 rt_notify_hostcache(rtable *tab, net *net)
2007 {
2008 struct hostcache *hc = tab->hostcache;
2009
2010 if (tab->hcu_scheduled)
2011 return;
2012
2013 if (trie_match_prefix(hc->trie, net->n.prefix, net->n.pxlen))
2014 rt_schedule_hcu(tab);
2015 }
2016
2017 static int
2018 if_local_addr(ip_addr a, struct iface *i)
2019 {
2020 struct ifa *b;
2021
2022 WALK_LIST(b, i->addrs)
2023 if (ipa_equal(a, b->ip))
2024 return 1;
2025
2026 return 0;
2027 }
2028
2029 static u32
2030 rt_get_igp_metric(rte *rt)
2031 {
2032 eattr *ea = ea_find(rt->attrs->eattrs, EA_GEN_IGP_METRIC);
2033
2034 if (ea)
2035 return ea->u.data;
2036
2037 rta *a = rt->attrs;
2038
2039 #ifdef CONFIG_OSPF
2040 if ((a->source == RTS_OSPF) ||
2041 (a->source == RTS_OSPF_IA) ||
2042 (a->source == RTS_OSPF_EXT1))
2043 return rt->u.ospf.metric1;
2044 #endif
2045
2046 #ifdef CONFIG_RIP
2047 if (a->source == RTS_RIP)
2048 return rt->u.rip.metric;
2049 #endif
2050
2051 /* Device routes */
2052 if ((a->dest != RTD_ROUTER) && (a->dest != RTD_MULTIPATH))
2053 return 0;
2054
2055 return IGP_METRIC_UNKNOWN;
2056 }
2057
2058 static int
2059 rt_update_hostentry(rtable *tab, struct hostentry *he)
2060 {
2061 rta *old_src = he->src;
2062 int pxlen = 0;
2063
2064 /* Reset the hostentry */
2065 he->src = NULL;
2066 he->gw = IPA_NONE;
2067 he->dest = RTD_UNREACHABLE;
2068 he->igp_metric = 0;
2069
2070 net *n = net_route(tab, he->addr, MAX_PREFIX_LENGTH);
2071 if (n)
2072 {
2073 rte *e = n->routes;
2074 rta *a = e->attrs;
2075 pxlen = n->n.pxlen;
2076
2077 if (a->hostentry)
2078 {
2079 /* Recursive route should not depend on another recursive route */
2080 log(L_WARN "Next hop address %I resolvable through recursive route for %I/%d",
2081 he->addr, n->n.prefix, pxlen);
2082 goto done;
2083 }
2084
2085 if (a->dest == RTD_DEVICE)
2086 {
2087 if (if_local_addr(he->addr, a->iface))
2088 {
2089 /* The host address is a local address, this is not valid */
2090 log(L_WARN "Next hop address %I is a local address of iface %s",
2091 he->addr, a->iface->name);
2092 goto done;
2093 }
2094
2095 /* The host is directly reachable, use link as a gateway */
2096 he->gw = he->link;
2097 he->dest = RTD_ROUTER;
2098 }
2099 else
2100 {
2101 /* The host is reachable through some route entry */
2102 he->gw = a->gw;
2103 he->dest = a->dest;
2104 }
2105
2106 he->src = rta_clone(a);
2107 he->igp_metric = rt_get_igp_metric(e);
2108 }
2109
2110 done:
2111 /* Add a prefix range to the trie */
2112 trie_add_prefix(tab->hostcache->trie, he->addr, MAX_PREFIX_LENGTH, pxlen, MAX_PREFIX_LENGTH);
2113
2114 rta_free(old_src);
2115 return old_src != he->src;
2116 }
2117
2118 static void
2119 rt_update_hostcache(rtable *tab)
2120 {
2121 struct hostcache *hc = tab->hostcache;
2122 struct hostentry *he;
2123 node *n, *x;
2124
2125 /* Reset the trie */
2126 lp_flush(hc->lp);
2127 hc->trie = f_new_trie(hc->lp, sizeof(struct f_trie_node));
2128
2129 WALK_LIST_DELSAFE(n, x, hc->hostentries)
2130 {
2131 he = SKIP_BACK(struct hostentry, ln, n);
2132 if (!he->uc)
2133 {
2134 hc_delete_hostentry(hc, he);
2135 continue;
2136 }
2137
2138 if (rt_update_hostentry(tab, he))
2139 rt_schedule_nhu(he->tab);
2140 }
2141
2142 tab->hcu_scheduled = 0;
2143 }
2144
2145 static struct hostentry *
2146 rt_get_hostentry(rtable *tab, ip_addr a, ip_addr ll, rtable *dep)
2147 {
2148 struct hostentry *he;
2149
2150 if (!tab->hostcache)
2151 rt_init_hostcache(tab);
2152
2153 uint k = hc_hash(a, dep);
2154 struct hostcache *hc = tab->hostcache;
2155 for (he = hc->hash_table[k >> hc->hash_shift]; he != NULL; he = he->next)
2156 if (ipa_equal(he->addr, a) && (he->tab == dep))
2157 return he;
2158
2159 he = hc_new_hostentry(hc, a, ll, dep, k);
2160 rt_update_hostentry(tab, he);
2161 return he;
2162 }
2163
2164 void
2165 rta_set_recursive_next_hop(rtable *dep, rta *a, rtable *tab, ip_addr *gw, ip_addr *ll)
2166 {
2167 rta_apply_hostentry(a, rt_get_hostentry(tab, *gw, *ll, dep));
2168 }
2169
2170
2171 /*
2172 * CLI commands
2173 */
2174
2175 static void
2176 rt_format_via(rte *e, byte *via)
2177 {
2178 rta *a = e->attrs;
2179
2180 switch (a->dest)
2181 {
2182 case RTD_ROUTER: bsprintf(via, "via %I on %s", a->gw, a->iface->name); break;
2183 case RTD_DEVICE: bsprintf(via, "dev %s", a->iface->name); break;
2184 case RTD_BLACKHOLE: bsprintf(via, "blackhole"); break;
2185 case RTD_UNREACHABLE: bsprintf(via, "unreachable"); break;
2186 case RTD_PROHIBIT: bsprintf(via, "prohibited"); break;
2187 case RTD_MULTIPATH: bsprintf(via, "multipath"); break;
2188 default: bsprintf(via, "???");
2189 }
2190 }
2191
2192 static void
2193 rt_show_rte(struct cli *c, byte *ia, rte *e, struct rt_show_data *d, ea_list *tmpa)
2194 {
2195 byte via[STD_ADDRESS_P_LENGTH+32], from[STD_ADDRESS_P_LENGTH+8];
2196 byte tm[TM_DATETIME_BUFFER_SIZE], info[256];
2197 rta *a = e->attrs;
2198 int primary = (e->net->routes == e);
2199 int sync_error = (e->net->n.flags & KRF_SYNC_ERROR);
2200 void (*get_route_info)(struct rte *, byte *buf, struct ea_list *attrs);
2201 struct mpnh *nh;
2202
2203 rt_format_via(e, via);
2204 tm_format_datetime(tm, &config->tf_route, e->lastmod);
2205 if (ipa_nonzero(a->from) && !ipa_equal(a->from, a->gw))
2206 bsprintf(from, " from %I", a->from);
2207 else
2208 from[0] = 0;
2209
2210 get_route_info = a->src->proto->proto->get_route_info;
2211 if (get_route_info || d->verbose)
2212 {
2213 /* Need to normalize the extended attributes */
2214 ea_list *t = tmpa;
2215 t = ea_append(t, a->eattrs);
2216 tmpa = alloca(ea_scan(t));
2217 ea_merge(t, tmpa);
2218 ea_sort(tmpa);
2219 }
2220 if (get_route_info)
2221 get_route_info(e, info, tmpa);
2222 else
2223 bsprintf(info, " (%d)", e->pref);
2224 cli_printf(c, -1007, "%-18s %s [%s %s%s]%s%s", ia, via, a->src->proto->name,
2225 tm, from, primary ? (sync_error ? " !" : " *") : "", info);
2226 for (nh = a->nexthops; nh; nh = nh->next)
2227 cli_printf(c, -1007, "\tvia %I on %s weight %d", nh->gw, nh->iface->name, nh->weight + 1);
2228 if (d->verbose)
2229 rta_show(c, a, tmpa);
2230 }
2231
2232 static void
2233 rt_show_net(struct cli *c, net *n, struct rt_show_data *d)
2234 {
2235 rte *e, *ee;
2236 byte ia[STD_ADDRESS_P_LENGTH+8];
2237 struct ea_list *tmpa;
2238 struct announce_hook *a = NULL;
2239 int first = 1;
2240 int pass = 0;
2241
2242 bsprintf(ia, "%I/%d", n->n.prefix, n->n.pxlen);
2243
2244 if (d->export_mode)
2245 {
2246 if (! d->export_protocol->rt_notify)
2247 return;
2248
2249 a = proto_find_announce_hook(d->export_protocol, d->table);
2250 if (!a)
2251 return;
2252 }
2253
2254 for (e = n->routes; e; e = e->next)
2255 {
2256 if (rte_is_filtered(e) != d->filtered)
2257 continue;
2258
2259 d->rt_counter++;
2260 d->net_counter += first;
2261 first = 0;
2262
2263 if (pass)
2264 continue;
2265
2266 ee = e;
2267 rte_update_lock(); /* We use the update buffer for filtering */
2268 tmpa = make_tmp_attrs(e, rte_update_pool);
2269
2270 if (d->export_mode)
2271 {
2272 struct proto *ep = d->export_protocol;
2273 int ic = ep->import_control ? ep->import_control(ep, &e, &tmpa, rte_update_pool) : 0;
2274
2275 if (ep->accept_ra_types == RA_OPTIMAL)
2276 pass = 1;
2277
2278 if (ic < 0)
2279 goto skip;
2280
2281 if (d->export_mode > RSEM_PREEXPORT)
2282 {
2283 /*
2284 * FIXME - This shows what should be exported according to current
2285 * filters, but not what was really exported. 'configure soft'
2286 * command may change the export filter and do not update routes.
2287 */
2288 int do_export = (ic > 0) ||
2289 (f_run(a->out_filter, &e, &tmpa, rte_update_pool, FF_FORCE_TMPATTR) <= F_ACCEPT);
2290
2291 if (do_export != (d->export_mode == RSEM_EXPORT))
2292 goto skip;
2293
2294 if ((d->export_mode == RSEM_EXPORT) && (ep->accept_ra_types == RA_ACCEPTED))
2295 pass = 1;
2296 }
2297 }
2298
2299 if (d->show_protocol && (d->show_protocol != e->attrs->src->proto))
2300 goto skip;
2301
2302 if (f_run(d->filter, &e, &tmpa, rte_update_pool, FF_FORCE_TMPATTR) > F_ACCEPT)
2303 goto skip;
2304
2305 d->show_counter++;
2306 if (d->stats < 2)
2307 rt_show_rte(c, ia, e, d, tmpa);
2308 ia[0] = 0;
2309
2310 skip:
2311 if (e != ee)
2312 {
2313 rte_free(e);
2314 e = ee;
2315 }
2316 rte_update_unlock();
2317
2318 if (d->primary_only)
2319 break;
2320 }
2321 }
2322
2323 static void
2324 rt_show_cont(struct cli *c)
2325 {
2326 struct rt_show_data *d = c->rover;
2327 #ifdef DEBUGGING
2328 unsigned max = 4;
2329 #else
2330 unsigned max = 64;
2331 #endif
2332 struct fib *fib = &d->table->fib;
2333 struct fib_iterator *it = &d->fit;
2334
2335 FIB_ITERATE_START(fib, it, f)
2336 {
2337 net *n = (net *) f;
2338 if (d->running_on_config && d->running_on_config != config)
2339 {
2340 cli_printf(c, 8004, "Stopped due to reconfiguration");
2341 goto done;
2342 }
2343 if (d->export_protocol && (d->export_protocol->export_state == ES_DOWN))
2344 {
2345 cli_printf(c, 8005, "Protocol is down");
2346 goto done;
2347 }
2348 if (!max--)
2349 {
2350 FIB_ITERATE_PUT(it, f);
2351 return;
2352 }
2353 rt_show_net(c, n, d);
2354 }
2355 FIB_ITERATE_END(f);
2356 if (d->stats)
2357 cli_printf(c, 14, "%d of %d routes for %d networks", d->show_counter, d->rt_counter, d->net_counter);
2358 else
2359 cli_printf(c, 0, "");
2360 done:
2361 c->cont = c->cleanup = NULL;
2362 }
2363
2364 static void
2365 rt_show_cleanup(struct cli *c)
2366 {
2367 struct rt_show_data *d = c->rover;
2368
2369 /* Unlink the iterator */
2370 fit_get(&d->table->fib, &d->fit);
2371 }
2372
2373 void
2374 rt_show(struct rt_show_data *d)
2375 {
2376 net *n;
2377
2378 /* Default is either a master table or a table related to a respective protocol */
2379 if (!d->table && d->export_protocol) d->table = d->export_protocol->table;
2380 if (!d->table && d->show_protocol) d->table = d->show_protocol->table;
2381 if (!d->table) d->table = config->master_rtc->table;
2382
2383 /* Filtered routes are neither exported nor have sensible ordering */
2384 if (d->filtered && (d->export_mode || d->primary_only))
2385 cli_msg(0, "");
2386
2387 if (d->pxlen == 256)
2388 {
2389 FIB_ITERATE_INIT(&d->fit, &d->table->fib);
2390 this_cli->cont = rt_show_cont;
2391 this_cli->cleanup = rt_show_cleanup;
2392 this_cli->rover = d;
2393 }
2394 else
2395 {
2396 if (d->show_for)
2397 n = net_route(d->table, d->prefix, d->pxlen);
2398 else
2399 n = net_find(d->table, d->prefix, d->pxlen);
2400
2401 if (n)
2402 rt_show_net(this_cli, n, d);
2403
2404 if (d->rt_counter)
2405 cli_msg(0, "");
2406 else
2407 cli_msg(8001, "Network not in table");
2408 }
2409 }
2410
2411 /*
2412 * Documentation for functions declared inline in route.h
2413 */
2414 #if 0
2415
2416 /**
2417 * net_find - find a network entry
2418 * @tab: a routing table
2419 * @addr: address of the network
2420 * @len: length of the network prefix
2421 *
2422 * net_find() looks up the given network in routing table @tab and
2423 * returns a pointer to its &net entry or %NULL if no such network
2424 * exists.
2425 */
2426 static inline net *net_find(rtable *tab, ip_addr addr, unsigned len)
2427 { DUMMY; }
2428
2429 /**
2430 * net_get - obtain a network entry
2431 * @tab: a routing table
2432 * @addr: address of the network
2433 * @len: length of the network prefix
2434 *
2435 * net_get() looks up the given network in routing table @tab and
2436 * returns a pointer to its &net entry. If no such entry exists, it's
2437 * created.
2438 */
2439 static inline net *net_get(rtable *tab, ip_addr addr, unsigned len)
2440 { DUMMY; }
2441
2442 /**
2443 * rte_cow - copy a route for writing
2444 * @r: a route entry to be copied
2445 *
2446 * rte_cow() takes a &rte and prepares it for modification. The exact action
2447 * taken depends on the flags of the &rte -- if it's a temporary entry, it's
2448 * just returned unchanged, else a new temporary entry with the same contents
2449 * is created.
2450 *
2451 * The primary use of this function is inside the filter machinery -- when
2452 * a filter wants to modify &rte contents (to change the preference or to
2453 * attach another set of attributes), it must ensure that the &rte is not
2454 * shared with anyone else (and especially that it isn't stored in any routing
2455 * table).
2456 *
2457 * Result: a pointer to the new writable &rte.
2458 */
2459 static inline rte * rte_cow(rte *r)
2460 { DUMMY; }
2461
2462 #endif