]>
git.ipfire.org Git - thirdparty/bird.git/blob - nest/rt-table.c
2 * BIRD -- Routing Tables
4 * (c) 1998--2000 Martin Mares <mj@ucw.cz>
6 * Can be freely distributed and used under the terms of the GNU GPL.
12 * Routing tables are probably the most important structures BIRD uses. They
13 * hold all the information about known networks, the associated routes and
16 * There are multiple routing tables (a primary one together with any
17 * number of secondary ones if requested by the configuration). Each table
18 * is basically a FIB containing entries describing the individual
19 * destination networks. For each network (represented by structure &net),
20 * there is a one-way linked list of route entries (&rte), the first entry
21 * on the list being the best one (i.e., the one we currently use
22 * for routing), the order of the other ones is undetermined.
24 * The &rte contains information specific to the route (preference, protocol
25 * metrics, time of last modification etc.) and a pointer to a &rta structure
26 * (see the route attribute module for a precise explanation) holding the
27 * remaining route attributes which are expected to be shared by multiple
28 * routes in order to conserve memory.
33 #include "nest/bird.h"
34 #include "nest/route.h"
35 #include "nest/protocol.h"
36 #include "nest/iface.h"
37 #include "lib/resource.h"
38 #include "lib/event.h"
39 #include "lib/string.h"
40 #include "conf/conf.h"
41 #include "filter/filter.h"
42 #include "filter/data.h"
44 #include "lib/string.h"
45 #include "lib/alloca.h"
48 #include "proto/bgp/bgp.h"
53 static slab
*rte_slab
;
54 static linpool
*rte_update_pool
;
58 static void rt_free_hostcache(rtable
*tab
);
59 static void rt_notify_hostcache(rtable
*tab
, net
*net
);
60 static void rt_update_hostcache(rtable
*tab
);
61 static void rt_next_hop_update(rtable
*tab
);
62 static inline void rt_prune_table(rtable
*tab
);
65 /* Like fib_route(), but skips empty net entries */
67 net_route_ip4(rtable
*t
, net_addr_ip4
*n
)
71 while (r
= net_find_valid(t
, (net_addr
*) n
), (!r
) && (n
->pxlen
> 0))
74 ip4_clrbit(&n
->prefix
, n
->pxlen
);
81 net_route_ip6(rtable
*t
, net_addr_ip6
*n
)
85 while (r
= net_find_valid(t
, (net_addr
*) n
), (!r
) && (n
->pxlen
> 0))
88 ip6_clrbit(&n
->prefix
, n
->pxlen
);
95 net_route_ip6_sadr(rtable
*t
, net_addr_ip6_sadr
*n
)
104 /* We need to do dst first matching. Since sadr addresses are hashed on dst
105 prefix only, find the hash table chain and go through it to find the
106 match with the smallest matching src prefix. */
107 for (fn
= fib_get_chain(&t
->fib
, (net_addr
*) n
); fn
; fn
= fn
->next
)
109 net_addr_ip6_sadr
*a
= (void *) fn
->addr
;
111 if (net_equal_dst_ip6_sadr(n
, a
) &&
112 net_in_net_src_ip6_sadr(n
, a
) &&
113 (a
->src_pxlen
>= best_pxlen
))
115 best
= fib_node_to_user(&t
->fib
, fn
);
116 best_pxlen
= a
->src_pxlen
;
127 ip6_clrbit(&n
->dst_prefix
, n
->dst_pxlen
);
134 net_route(rtable
*tab
, const net_addr
*n
)
136 ASSERT(tab
->addr_type
== n
->type
);
138 net_addr
*n0
= alloca(n
->length
);
146 return net_route_ip4(tab
, (net_addr_ip4
*) n0
);
151 return net_route_ip6(tab
, (net_addr_ip6
*) n0
);
154 return net_route_ip6_sadr(tab
, (net_addr_ip6_sadr
*) n0
);
163 net_roa_check_ip4(rtable
*tab
, const net_addr_ip4
*px
, u32 asn
)
165 struct net_addr_roa4 n
= NET_ADDR_ROA4(px
->prefix
, px
->pxlen
, 0, 0);
171 for (fn
= fib_get_chain(&tab
->fib
, (net_addr
*) &n
); fn
; fn
= fn
->next
)
173 net_addr_roa4
*roa
= (void *) fn
->addr
;
174 net
*r
= fib_node_to_user(&tab
->fib
, fn
);
176 if (net_equal_prefix_roa4(roa
, &n
) && rte_is_valid(r
->routes
))
179 if (asn
&& (roa
->asn
== asn
) && (roa
->max_pxlen
>= px
->pxlen
))
188 ip4_clrbit(&n
.prefix
, n
.pxlen
);
191 return anything
? ROA_INVALID
: ROA_UNKNOWN
;
195 net_roa_check_ip6(rtable
*tab
, const net_addr_ip6
*px
, u32 asn
)
197 struct net_addr_roa6 n
= NET_ADDR_ROA6(px
->prefix
, px
->pxlen
, 0, 0);
203 for (fn
= fib_get_chain(&tab
->fib
, (net_addr
*) &n
); fn
; fn
= fn
->next
)
205 net_addr_roa6
*roa
= (void *) fn
->addr
;
206 net
*r
= fib_node_to_user(&tab
->fib
, fn
);
208 if (net_equal_prefix_roa6(roa
, &n
) && rte_is_valid(r
->routes
))
211 if (asn
&& (roa
->asn
== asn
) && (roa
->max_pxlen
>= px
->pxlen
))
220 ip6_clrbit(&n
.prefix
, n
.pxlen
);
223 return anything
? ROA_INVALID
: ROA_UNKNOWN
;
227 * roa_check - check validity of route origination in a ROA table
229 * @n: network prefix to check
230 * @asn: AS number of network prefix
232 * Implements RFC 6483 route validation for the given network prefix. The
233 * procedure is to find all candidate ROAs - ROAs whose prefixes cover the given
234 * network prefix. If there is no candidate ROA, return ROA_UNKNOWN. If there is
235 * a candidate ROA with matching ASN and maxlen field greater than or equal to
236 * the given prefix length, return ROA_VALID. Otherwise, return ROA_INVALID. If
237 * caller cannot determine origin AS, 0 could be used (in that case ROA_VALID
238 * cannot happen). Table @tab must have type NET_ROA4 or NET_ROA6, network @n
239 * must have type NET_IP4 or NET_IP6, respectively.
242 net_roa_check(rtable
*tab
, const net_addr
*n
, u32 asn
)
244 if ((tab
->addr_type
== NET_ROA4
) && (n
->type
== NET_IP4
))
245 return net_roa_check_ip4(tab
, (const net_addr_ip4
*) n
, asn
);
246 else if ((tab
->addr_type
== NET_ROA6
) && (n
->type
== NET_IP6
))
247 return net_roa_check_ip6(tab
, (const net_addr_ip6
*) n
, asn
);
249 return ROA_UNKNOWN
; /* Should not happen */
253 * rte_find - find a route
257 * The rte_find() function returns a route for destination @net
258 * which is from route source @src.
261 rte_find(net
*net
, struct rte_src
*src
)
263 rte
*e
= net
->routes
;
265 while (e
&& e
->attrs
->src
!= src
)
271 * rte_get_temp - get a temporary &rte
272 * @a: attributes to assign to the new route (a &rta; in case it's
273 * un-cached, rte_update() will create a cached copy automatically)
275 * Create a temporary &rte and bind it with the attributes @a.
276 * Also set route preference to the default preference set for
282 rte
*e
= sl_alloc(rte_slab
);
294 rte
*e
= sl_alloc(rte_slab
);
296 memcpy(e
, r
, sizeof(rte
));
297 e
->attrs
= rta_clone(r
->attrs
);
303 * rte_cow_rta - get a private writable copy of &rte with writable &rta
304 * @r: a route entry to be copied
305 * @lp: a linpool from which to allocate &rta
307 * rte_cow_rta() takes a &rte and prepares it and associated &rta for
308 * modification. There are three possibilities: First, both &rte and &rta are
309 * private copies, in that case they are returned unchanged. Second, &rte is
310 * private copy, but &rta is cached, in that case &rta is duplicated using
311 * rta_do_cow(). Third, both &rte is shared and &rta is cached, in that case
312 * both structures are duplicated by rte_do_cow() and rta_do_cow().
314 * Note that in the second case, cached &rta loses one reference, while private
315 * copy created by rta_do_cow() is a shallow copy sharing indirect data (eattrs,
316 * nexthops, ...) with it. To work properly, original shared &rta should have
317 * another reference during the life of created private copy.
319 * Result: a pointer to the new writable &rte with writable &rta.
322 rte_cow_rta(rte
*r
, linpool
*lp
)
324 if (!rta_is_cached(r
->attrs
))
328 rta
*a
= rta_do_cow(r
->attrs
, lp
);
336 * rte_init_tmp_attrs - initialize temporary ea_list for route
337 * @r: route entry to be modified
338 * @lp: linpool from which to allocate attributes
339 * @max: maximum number of added temporary attribus
341 * This function is supposed to be called from make_tmp_attrs() and
342 * store_tmp_attrs() hooks before rte_make_tmp_attr() / rte_store_tmp_attr()
343 * functions. It allocates &ea_list with length for @max items for temporary
344 * attributes and puts it on top of eattrs stack.
347 rte_init_tmp_attrs(rte
*r
, linpool
*lp
, uint max
)
349 struct ea_list
*e
= lp_alloc(lp
, sizeof(struct ea_list
) + max
* sizeof(eattr
));
351 e
->next
= r
->attrs
->eattrs
;
352 e
->flags
= EALF_SORTED
| EALF_TEMP
;
355 r
->attrs
->eattrs
= e
;
359 * rte_make_tmp_attr - make temporary eattr from private route fields
360 * @r: route entry to be modified
362 * @type: attribute type
363 * @val: attribute value (u32 or adata ptr)
365 * This function is supposed to be called from make_tmp_attrs() hook for
366 * each temporary attribute, after temporary &ea_list was initialized by
367 * rte_init_tmp_attrs(). It checks whether temporary attribute is supposed to
368 * be defined (based on route pflags) and if so then it fills &eattr field in
369 * preallocated temporary &ea_list on top of route @r eattrs stack.
371 * Note that it may require free &eattr in temporary &ea_list, so it must not be
372 * called more times than @max argument of rte_init_tmp_attrs().
375 rte_make_tmp_attr(rte
*r
, uint id
, uint type
, uintptr_t val
)
377 if (r
->pflags
& EA_ID_FLAG(id
))
379 ea_list
*e
= r
->attrs
->eattrs
;
380 eattr
*a
= &e
->attrs
[e
->count
++];
385 if (type
& EAF_EMBEDDED
)
386 a
->u
.data
= (u32
) val
;
388 a
->u
.ptr
= (struct adata
*) val
;
393 * rte_store_tmp_attr - store temporary eattr to private route fields
394 * @r: route entry to be modified
397 * This function is supposed to be called from store_tmp_attrs() hook for
398 * each temporary attribute, after temporary &ea_list was initialized by
399 * rte_init_tmp_attrs(). It checks whether temporary attribute is defined in
400 * route @r eattrs stack, updates route pflags accordingly, undefines it by
401 * filling &eattr field in preallocated temporary &ea_list on top of the eattrs
402 * stack, and returns the value. Caller is supposed to store it in the
403 * appropriate private field.
405 * Note that it may require free &eattr in temporary &ea_list, so it must not be
406 * called more times than @max argument of rte_init_tmp_attrs()
409 rte_store_tmp_attr(rte
*r
, uint id
)
411 ea_list
*e
= r
->attrs
->eattrs
;
412 eattr
*a
= ea_find(e
->next
, id
);
416 e
->attrs
[e
->count
++] = (struct eattr
) { .id
= id
, .type
= EAF_TYPE_UNDEF
};
417 r
->pflags
|= EA_ID_FLAG(id
);
418 return (a
->type
& EAF_EMBEDDED
) ? a
->u
.data
: (uintptr_t) a
->u
.ptr
;
422 r
->pflags
&= ~EA_ID_FLAG(id
);
428 * rte_make_tmp_attrs - prepare route by adding all relevant temporary route attributes
429 * @r: route entry to be modified (may be replaced if COW)
430 * @lp: linpool from which to allocate attributes
431 * @old_attrs: temporary ref to old &rta (may be NULL)
433 * This function expands privately stored protocol-dependent route attributes
434 * to a uniform &eattr / &ea_list representation. It is essentially a wrapper
435 * around protocol make_tmp_attrs() hook, which does some additional work like
436 * ensuring that route @r is writable.
438 * The route @r may be read-only (with %REF_COW flag), in that case rw copy is
439 * obtained by rte_cow() and @r is replaced. If @rte is originally rw, it may be
440 * directly modified (and it is never copied).
442 * If the @old_attrs ptr is supplied, the function obtains another reference of
443 * old cached &rta, that is necessary in some cases (see rte_cow_rta() for
444 * details). It is freed by rte_store_tmp_attrs(), or manually by rta_free().
446 * Generally, if caller ensures that @r is read-only (e.g. in route export) then
447 * it may ignore @old_attrs (and set it to NULL), but must handle replacement of
448 * @r. If caller ensures that @r is writable (e.g. in route import) then it may
449 * ignore replacement of @r, but it must handle @old_attrs.
452 rte_make_tmp_attrs(rte
**r
, linpool
*lp
, rta
**old_attrs
)
454 void (*make_tmp_attrs
)(rte
*r
, linpool
*lp
);
455 make_tmp_attrs
= (*r
)->attrs
->src
->proto
->make_tmp_attrs
;
460 /* We may need to keep ref to old attributes, will be freed in rte_store_tmp_attrs() */
462 *old_attrs
= rta_is_cached((*r
)->attrs
) ? rta_clone((*r
)->attrs
) : NULL
;
464 *r
= rte_cow_rta(*r
, lp
);
465 make_tmp_attrs(*r
, lp
);
469 * rte_store_tmp_attrs - store temporary route attributes back to private route fields
470 * @r: route entry to be modified
471 * @lp: linpool from which to allocate attributes
472 * @old_attrs: temporary ref to old &rta
474 * This function stores temporary route attributes that were expanded by
475 * rte_make_tmp_attrs() back to private route fields and also undefines them.
476 * It is essentially a wrapper around protocol store_tmp_attrs() hook, which
477 * does some additional work like shortcut if there is no change and cleanup
478 * of @old_attrs reference obtained by rte_make_tmp_attrs().
481 rte_store_tmp_attrs(rte
*r
, linpool
*lp
, rta
*old_attrs
)
483 void (*store_tmp_attrs
)(rte
*rt
, linpool
*lp
);
484 store_tmp_attrs
= r
->attrs
->src
->proto
->store_tmp_attrs
;
486 if (!store_tmp_attrs
)
489 ASSERT(!rta_is_cached(r
->attrs
));
491 /* If there is no new ea_list, we just skip the temporary ea_list */
492 ea_list
*ea
= r
->attrs
->eattrs
;
493 if (ea
&& (ea
->flags
& EALF_TEMP
))
494 r
->attrs
->eattrs
= ea
->next
;
496 store_tmp_attrs(r
, lp
);
498 /* Free ref we got in rte_make_tmp_attrs(), have to do rta_lookup() first */
499 r
->attrs
= rta_lookup(r
->attrs
);
504 static int /* Actually better or at least as good as */
505 rte_better(rte
*new, rte
*old
)
507 int (*better
)(rte
*, rte
*);
509 if (!rte_is_valid(old
))
511 if (!rte_is_valid(new))
514 if (new->pref
> old
->pref
)
516 if (new->pref
< old
->pref
)
518 if (new->attrs
->src
->proto
->proto
!= old
->attrs
->src
->proto
->proto
)
521 * If the user has configured protocol preferences, so that two different protocols
522 * have the same preference, try to break the tie by comparing addresses. Not too
523 * useful, but keeps the ordering of routes unambiguous.
525 return new->attrs
->src
->proto
->proto
> old
->attrs
->src
->proto
->proto
;
527 if (better
= new->attrs
->src
->proto
->rte_better
)
528 return better(new, old
);
533 rte_mergable(rte
*pri
, rte
*sec
)
535 int (*mergable
)(rte
*, rte
*);
537 if (!rte_is_valid(pri
) || !rte_is_valid(sec
))
540 if (pri
->pref
!= sec
->pref
)
543 if (pri
->attrs
->src
->proto
->proto
!= sec
->attrs
->src
->proto
->proto
)
546 if (mergable
= pri
->attrs
->src
->proto
->rte_mergable
)
547 return mergable(pri
, sec
);
553 rte_trace(struct proto
*p
, rte
*e
, int dir
, char *msg
)
555 log(L_TRACE
"%s %c %s %N %s", p
->name
, dir
, msg
, e
->net
->n
.addr
, rta_dest_name(e
->attrs
->dest
));
559 rte_trace_in(uint flag
, struct proto
*p
, rte
*e
, char *msg
)
562 rte_trace(p
, e
, '>', msg
);
566 rte_trace_out(uint flag
, struct proto
*p
, rte
*e
, char *msg
)
569 rte_trace(p
, e
, '<', msg
);
573 export_filter_(struct channel
*c
, rte
*rt0
, rte
**rt_free
, linpool
*pool
, int silent
)
575 struct proto
*p
= c
->proto
;
576 const struct filter
*filter
= c
->out_filter
;
577 struct proto_stats
*stats
= &c
->stats
;
584 v
= p
->preexport
? p
->preexport(p
, &rt
, pool
) : 0;
590 stats
->exp_updates_rejected
++;
592 rte_trace_out(D_FILTERS
, p
, rt
, "rejected by protocol");
598 rte_trace_out(D_FILTERS
, p
, rt
, "forced accept by protocol");
602 rte_make_tmp_attrs(&rt
, pool
, NULL
);
604 v
= filter
&& ((filter
== FILTER_REJECT
) ||
605 (f_run(filter
, &rt
, pool
,
606 (silent
? FF_SILENT
: 0)) > F_ACCEPT
));
612 stats
->exp_updates_filtered
++;
613 rte_trace_out(D_FILTERS
, p
, rt
, "filtered out");
623 /* Discard temporary rte */
630 export_filter(struct channel
*c
, rte
*rt0
, rte
**rt_free
, int silent
)
632 return export_filter_(c
, rt0
, rt_free
, rte_update_pool
, silent
);
636 do_rt_notify(struct channel
*c
, net
*net
, rte
*new, rte
*old
, int refeed
)
638 struct proto
*p
= c
->proto
;
639 struct proto_stats
*stats
= &c
->stats
;
644 /* Apply export limit */
645 struct channel_limit
*l
= &c
->out_limit
;
646 if (l
->action
&& !old
&& new)
648 if (stats
->exp_routes
>= l
->limit
)
649 channel_notify_limit(c
, l
, PLD_OUT
, stats
->exp_routes
);
651 if (l
->state
== PLS_BLOCKED
)
653 stats
->exp_updates_rejected
++;
654 rte_trace_out(D_FILTERS
, p
, new, "rejected [limit]");
659 /* Apply export table */
660 if (c
->out_table
&& !rte_update_out(c
, net
->n
.addr
, new, old
, refeed
))
664 stats
->exp_updates_accepted
++;
666 stats
->exp_withdraws_accepted
++;
670 bmap_clear(&c
->export_map
, old
->id
);
676 bmap_set(&c
->export_map
, new->id
);
680 if (p
->debug
& D_ROUTES
)
683 rte_trace_out(D_ROUTES
, p
, new, "replaced");
685 rte_trace_out(D_ROUTES
, p
, new, "added");
687 rte_trace_out(D_ROUTES
, p
, old
, "removed");
690 p
->rt_notify(p
, c
, net
, new, old
);
694 rt_notify_basic(struct channel
*c
, net
*net
, rte
*new, rte
*old
, int refeed
)
696 // struct proto *p = c->proto;
697 rte
*new_free
= NULL
;
700 c
->stats
.exp_updates_received
++;
702 c
->stats
.exp_withdraws_received
++;
705 new = export_filter(c
, new, &new_free
, 0);
707 if (old
&& !bmap_test(&c
->export_map
, old
->id
))
713 do_rt_notify(c
, net
, new, old
, refeed
);
715 /* Discard temporary rte */
721 rt_notify_accepted(struct channel
*c
, net
*net
, rte
*new_changed
, rte
*old_changed
, int refeed
)
723 // struct proto *p = c->proto;
724 rte
*new_best
= NULL
;
725 rte
*old_best
= NULL
;
726 rte
*new_free
= NULL
;
730 * We assume that there are no changes in net route order except (added)
731 * new_changed and (removed) old_changed. Therefore, the function is not
732 * compatible with deterministic_med (where nontrivial reordering can happen
733 * as a result of a route change) and with recomputation of recursive routes
734 * due to next hop update (where many routes can be changed in one step).
736 * Note that we need this assumption just for optimizations, we could just
737 * run full new_best recomputation otherwise.
739 * There are three cases:
740 * feed or old_best is old_changed -> we need to recompute new_best
741 * old_best is before new_changed -> new_best is old_best, ignore
742 * old_best is after new_changed -> try new_changed, otherwise old_best
746 c
->stats
.exp_updates_received
++;
748 c
->stats
.exp_withdraws_received
++;
750 /* Find old_best - either old_changed, or route for net->routes */
751 if (old_changed
&& bmap_test(&c
->export_map
, old_changed
->id
))
752 old_best
= old_changed
;
755 for (rte
*r
= net
->routes
; rte_is_valid(r
); r
= r
->next
)
757 if (bmap_test(&c
->export_map
, r
->id
))
763 /* Note if new_changed found before old_best */
764 if (r
== new_changed
)
770 if ((new_changed
== old_changed
) || (old_best
== old_changed
))
772 /* Feed or old_best changed -> find first accepted by filters */
773 for (rte
*r
= net
->routes
; rte_is_valid(r
); r
= r
->next
)
774 if (new_best
= export_filter(c
, r
, &new_free
, 0))
779 /* Other cases -> either new_changed, or old_best (and nothing changed) */
780 if (new_first
&& (new_changed
= export_filter(c
, new_changed
, &new_free
, 0)))
781 new_best
= new_changed
;
786 if (!new_best
&& !old_best
)
789 do_rt_notify(c
, net
, new_best
, old_best
, refeed
);
791 /* Discard temporary rte */
797 static struct nexthop
*
798 nexthop_merge_rta(struct nexthop
*nhs
, rta
*a
, linpool
*pool
, int max
)
800 return nexthop_merge(nhs
, &(a
->nh
), 1, 0, max
, pool
);
804 rt_export_merged(struct channel
*c
, net
*net
, rte
**rt_free
, linpool
*pool
, int silent
)
806 // struct proto *p = c->proto;
807 struct nexthop
*nhs
= NULL
;
808 rte
*best0
, *best
, *rt0
, *rt
, *tmp
;
813 if (!rte_is_valid(best0
))
816 best
= export_filter_(c
, best0
, rt_free
, pool
, silent
);
818 if (!best
|| !rte_is_reachable(best
))
821 for (rt0
= best0
->next
; rt0
; rt0
= rt0
->next
)
823 if (!rte_mergable(best0
, rt0
))
826 rt
= export_filter_(c
, rt0
, &tmp
, pool
, 1);
831 if (rte_is_reachable(rt
))
832 nhs
= nexthop_merge_rta(nhs
, rt
->attrs
, pool
, c
->merge_limit
);
840 nhs
= nexthop_merge_rta(nhs
, best
->attrs
, pool
, c
->merge_limit
);
844 best
= rte_cow_rta(best
, pool
);
845 nexthop_link(best
->attrs
, nhs
);
857 rt_notify_merged(struct channel
*c
, net
*net
, rte
*new_changed
, rte
*old_changed
,
858 rte
*new_best
, rte
*old_best
, int refeed
)
860 // struct proto *p = c->proto;
861 rte
*new_free
= NULL
;
863 /* We assume that all rte arguments are either NULL or rte_is_valid() */
865 /* This check should be done by the caller */
866 if (!new_best
&& !old_best
)
869 /* Check whether the change is relevant to the merged route */
870 if ((new_best
== old_best
) &&
871 (new_changed
!= old_changed
) &&
872 !rte_mergable(new_best
, new_changed
) &&
873 !rte_mergable(old_best
, old_changed
))
877 c
->stats
.exp_updates_received
++;
879 c
->stats
.exp_withdraws_received
++;
881 /* Prepare new merged route */
883 new_best
= rt_export_merged(c
, net
, &new_free
, rte_update_pool
, 0);
885 /* Check old merged route */
886 if (old_best
&& !bmap_test(&c
->export_map
, old_best
->id
))
889 if (!new_best
&& !old_best
)
892 do_rt_notify(c
, net
, new_best
, old_best
, refeed
);
894 /* Discard temporary rte */
901 * rte_announce - announce a routing table change
902 * @tab: table the route has been added to
903 * @type: type of route announcement (RA_UNDEF or RA_ANY)
904 * @net: network in question
905 * @new: the new or changed route
906 * @old: the previous route replaced by the new one
907 * @new_best: the new best route for the same network
908 * @old_best: the previous best route for the same network
910 * This function gets a routing table update and announces it to all protocols
911 * that are connected to the same table by their channels.
913 * There are two ways of how routing table changes are announced. First, there
914 * is a change of just one route in @net (which may caused a change of the best
915 * route of the network). In this case @new and @old describes the changed route
916 * and @new_best and @old_best describes best routes. Other routes are not
917 * affected, but in sorted table the order of other routes might change.
919 * Second, There is a bulk change of multiple routes in @net, with shared best
920 * route selection. In such case separate route changes are described using
921 * @type of %RA_ANY, with @new and @old specifying the changed route, while
922 * @new_best and @old_best are NULL. After that, another notification is done
923 * where @new_best and @old_best are filled (may be the same), but @new and @old
926 * The function announces the change to all associated channels. For each
927 * channel, an appropriate preprocessing is done according to channel &ra_mode.
928 * For example, %RA_OPTIMAL channels receive just changes of best routes.
930 * In general, we first call preexport() hook of a protocol, which performs
931 * basic checks on the route (each protocol has a right to veto or force accept
932 * of the route before any filter is asked). Then we consult an export filter
933 * of the channel and verify the old route in an export map of the channel.
934 * Finally, the rt_notify() hook of the protocol gets called.
936 * Note that there are also calls of rt_notify() hooks due to feed, but that is
937 * done outside of scope of rte_announce().
940 rte_announce(rtable
*tab
, uint type
, net
*net
, rte
*new, rte
*old
,
941 rte
*new_best
, rte
*old_best
)
943 if (!rte_is_valid(new))
946 if (!rte_is_valid(old
))
949 if (!rte_is_valid(new_best
))
952 if (!rte_is_valid(old_best
))
955 if (!new && !old
&& !new_best
&& !old_best
)
958 if (new_best
!= old_best
)
961 new_best
->sender
->stats
.pref_routes
++;
963 old_best
->sender
->stats
.pref_routes
--;
966 rt_notify_hostcache(tab
, net
);
969 struct channel
*c
; node
*n
;
970 WALK_LIST2(c
, n
, tab
->channels
, table_node
)
972 if (c
->export_state
== ES_DOWN
)
975 if (type
&& (type
!= c
->ra_mode
))
981 if (new_best
!= old_best
)
982 rt_notify_basic(c
, net
, new_best
, old_best
, 0);
987 rt_notify_basic(c
, net
, new, old
, 0);
991 rt_notify_accepted(c
, net
, new, old
, 0);
995 rt_notify_merged(c
, net
, new, old
, new_best
, old_best
, 0);
1002 rte_validate(rte
*e
)
1007 if (!net_validate(n
->n
.addr
))
1009 log(L_WARN
"Ignoring bogus prefix %N received via %s",
1010 n
->n
.addr
, e
->sender
->proto
->name
);
1014 /* FIXME: better handling different nettypes */
1015 c
= !net_is_flow(n
->n
.addr
) ?
1016 net_classify(n
->n
.addr
): (IADDR_HOST
| SCOPE_UNIVERSE
);
1017 if ((c
< 0) || !(c
& IADDR_HOST
) || ((c
& IADDR_SCOPE_MASK
) <= SCOPE_LINK
))
1019 log(L_WARN
"Ignoring bogus route %N received via %s",
1020 n
->n
.addr
, e
->sender
->proto
->name
);
1024 if (net_type_match(n
->n
.addr
, NB_DEST
) == !e
->attrs
->dest
)
1026 log(L_WARN
"Ignoring route %N with invalid dest %d received via %s",
1027 n
->n
.addr
, e
->attrs
->dest
, e
->sender
->proto
->name
);
1031 if ((e
->attrs
->dest
== RTD_UNICAST
) && !nexthop_is_sorted(&(e
->attrs
->nh
)))
1033 log(L_WARN
"Ignoring unsorted multipath route %N received via %s",
1034 n
->n
.addr
, e
->sender
->proto
->name
);
1042 * rte_free - delete a &rte
1043 * @e: &rte to be deleted
1045 * rte_free() deletes the given &rte from the routing table it's linked to.
1050 if (rta_is_cached(e
->attrs
))
1052 sl_free(rte_slab
, e
);
1056 rte_free_quick(rte
*e
)
1059 sl_free(rte_slab
, e
);
1063 rte_same(rte
*x
, rte
*y
)
1065 /* rte.flags are not checked, as they are mostly internal to rtable */
1067 x
->attrs
== y
->attrs
&&
1068 x
->pflags
== y
->pflags
&&
1069 x
->pref
== y
->pref
&&
1070 (!x
->attrs
->src
->proto
->rte_same
|| x
->attrs
->src
->proto
->rte_same(x
, y
)) &&
1071 rte_is_filtered(x
) == rte_is_filtered(y
);
1074 static inline int rte_is_ok(rte
*e
) { return e
&& !rte_is_filtered(e
); }
1077 rte_recalculate(struct channel
*c
, net
*net
, rte
*new, struct rte_src
*src
)
1079 struct proto
*p
= c
->proto
;
1080 struct rtable
*table
= c
->table
;
1081 struct proto_stats
*stats
= &c
->stats
;
1082 static struct tbf rl_pipe
= TBF_DEFAULT_LOG_LIMITS
;
1083 rte
*before_old
= NULL
;
1084 rte
*old_best
= net
->routes
;
1088 k
= &net
->routes
; /* Find and remove original route from the same protocol */
1091 if (old
->attrs
->src
== src
)
1093 /* If there is the same route in the routing table but from
1094 * a different sender, then there are two paths from the
1095 * source protocol to this routing table through transparent
1096 * pipes, which is not allowed.
1098 * We log that and ignore the route. If it is withdraw, we
1099 * ignore it completely (there might be 'spurious withdraws',
1100 * see FIXME in do_rte_announce())
1102 if (old
->sender
->proto
!= p
)
1106 log_rl(&rl_pipe
, L_ERR
"Pipe collision detected when sending %N to table %s",
1107 net
->n
.addr
, table
->name
);
1108 rte_free_quick(new);
1113 if (new && rte_same(old
, new))
1115 /* No changes, ignore the new route and refresh the old one */
1117 old
->flags
&= ~(REF_STALE
| REF_DISCARD
| REF_MODIFY
);
1119 if (!rte_is_filtered(new))
1121 stats
->imp_updates_ignored
++;
1122 rte_trace_in(D_ROUTES
, p
, new, "ignored");
1125 rte_free_quick(new);
1141 stats
->imp_withdraws_ignored
++;
1145 int new_ok
= rte_is_ok(new);
1146 int old_ok
= rte_is_ok(old
);
1148 struct channel_limit
*l
= &c
->rx_limit
;
1149 if (l
->action
&& !old
&& new && !c
->in_table
)
1151 u32 all_routes
= stats
->imp_routes
+ stats
->filt_routes
;
1153 if (all_routes
>= l
->limit
)
1154 channel_notify_limit(c
, l
, PLD_RX
, all_routes
);
1156 if (l
->state
== PLS_BLOCKED
)
1158 /* In receive limit the situation is simple, old is NULL so
1159 we just free new and exit like nothing happened */
1161 stats
->imp_updates_ignored
++;
1162 rte_trace_in(D_FILTERS
, p
, new, "ignored [limit]");
1163 rte_free_quick(new);
1169 if (l
->action
&& !old_ok
&& new_ok
)
1171 if (stats
->imp_routes
>= l
->limit
)
1172 channel_notify_limit(c
, l
, PLD_IN
, stats
->imp_routes
);
1174 if (l
->state
== PLS_BLOCKED
)
1176 /* In import limit the situation is more complicated. We
1177 shouldn't just drop the route, we should handle it like
1178 it was filtered. We also have to continue the route
1179 processing if old or new is non-NULL, but we should exit
1180 if both are NULL as this case is probably assumed to be
1183 stats
->imp_updates_ignored
++;
1184 rte_trace_in(D_FILTERS
, p
, new, "ignored [limit]");
1186 if (c
->in_keep_filtered
)
1187 new->flags
|= REF_FILTERED
;
1189 { rte_free_quick(new); new = NULL
; }
1191 /* Note that old && !new could be possible when
1192 c->in_keep_filtered changed in the recent past. */
1203 stats
->imp_updates_accepted
++;
1205 stats
->imp_withdraws_accepted
++;
1207 stats
->imp_withdraws_ignored
++;
1212 rte_is_filtered(new) ? stats
->filt_routes
++ : stats
->imp_routes
++;
1214 rte_is_filtered(old
) ? stats
->filt_routes
-- : stats
->imp_routes
--;
1216 if (table
->config
->sorted
)
1218 /* If routes are sorted, just insert new route to appropriate position */
1221 if (before_old
&& !rte_better(new, before_old
))
1222 k
= &before_old
->next
;
1226 for (; *k
; k
=&(*k
)->next
)
1227 if (rte_better(new, *k
))
1237 /* If routes are not sorted, find the best route and move it on
1238 the first position. There are several optimized cases. */
1240 if (src
->proto
->rte_recalculate
&& src
->proto
->rte_recalculate(table
, net
, new, old
, old_best
))
1241 goto do_recalculate
;
1243 if (new && rte_better(new, old_best
))
1245 /* The first case - the new route is cleary optimal,
1246 we link it at the first position */
1248 new->next
= net
->routes
;
1252 else if (old
== old_best
)
1254 /* The second case - the old best route disappeared, we add the
1255 new route (if we have any) to the list (we don't care about
1256 position) and then we elect the new optimal route and relink
1257 that route at the first position and announce it. New optimal
1258 route might be NULL if there is no more routes */
1261 /* Add the new route to the list */
1264 new->next
= net
->routes
;
1269 /* Find a new optimal route (if there is any) */
1272 rte
**bp
= &net
->routes
;
1273 for (k
=&(*bp
)->next
; *k
; k
=&(*k
)->next
)
1274 if (rte_better(*k
, *bp
))
1280 best
->next
= net
->routes
;
1286 /* The third case - the new route is not better than the old
1287 best route (therefore old_best != NULL) and the old best
1288 route was not removed (therefore old_best == net->routes).
1289 We just link the new route after the old best route. */
1291 ASSERT(net
->routes
!= NULL
);
1292 new->next
= net
->routes
->next
;
1293 net
->routes
->next
= new;
1296 /* The fourth (empty) case - suboptimal route was removed, nothing to do */
1301 new->lastmod
= current_time();
1305 new->id
= hmap_first_zero(&table
->id_map
);
1306 hmap_set(&table
->id_map
, new->id
);
1312 /* Log the route change */
1313 if (p
->debug
& D_ROUTES
)
1316 rte_trace(p
, new, '>', new == net
->routes
? "added [best]" : "added");
1319 if (old
!= old_best
)
1320 rte_trace(p
, old
, '>', "removed");
1321 else if (rte_is_ok(net
->routes
))
1322 rte_trace(p
, old
, '>', "removed [replaced]");
1324 rte_trace(p
, old
, '>', "removed [sole]");
1328 /* Propagate the route change */
1329 rte_announce(table
, RA_UNDEF
, net
, new, old
, net
->routes
, old_best
);
1332 (table
->gc_counter
++ >= table
->config
->gc_max_ops
) &&
1333 (table
->gc_time
+ table
->config
->gc_min_time
<= current_time()))
1334 rt_schedule_prune(table
);
1336 if (old_ok
&& p
->rte_remove
)
1337 p
->rte_remove(net
, old
);
1338 if (new_ok
&& p
->rte_insert
)
1339 p
->rte_insert(net
, new);
1344 hmap_clear(&table
->id_map
, old
->id
);
1346 rte_free_quick(old
);
1350 static int rte_update_nest_cnt
; /* Nesting counter to allow recursive updates */
1353 rte_update_lock(void)
1355 rte_update_nest_cnt
++;
1359 rte_update_unlock(void)
1361 if (!--rte_update_nest_cnt
)
1362 lp_flush(rte_update_pool
);
1366 rte_hide_dummy_routes(net
*net
, rte
**dummy
)
1368 if (net
->routes
&& net
->routes
->attrs
->source
== RTS_DUMMY
)
1370 *dummy
= net
->routes
;
1371 net
->routes
= (*dummy
)->next
;
1376 rte_unhide_dummy_routes(net
*net
, rte
**dummy
)
1380 (*dummy
)->next
= net
->routes
;
1381 net
->routes
= *dummy
;
1386 * rte_update - enter a new update to a routing table
1387 * @table: table to be updated
1388 * @c: channel doing the update
1389 * @net: network node
1390 * @p: protocol submitting the update
1391 * @src: protocol originating the update
1392 * @new: a &rte representing the new route or %NULL for route removal.
1394 * This function is called by the routing protocols whenever they discover
1395 * a new route or wish to update/remove an existing route. The right announcement
1396 * sequence is to build route attributes first (either un-cached with @aflags set
1397 * to zero or a cached one using rta_lookup(); in this case please note that
1398 * you need to increase the use count of the attributes yourself by calling
1399 * rta_clone()), call rte_get_temp() to obtain a temporary &rte, fill in all
1400 * the appropriate data and finally submit the new &rte by calling rte_update().
1402 * @src specifies the protocol that originally created the route and the meaning
1403 * of protocol-dependent data of @new. If @new is not %NULL, @src have to be the
1404 * same value as @new->attrs->proto. @p specifies the protocol that called
1405 * rte_update(). In most cases it is the same protocol as @src. rte_update()
1406 * stores @p in @new->sender;
1408 * When rte_update() gets any route, it automatically validates it (checks,
1409 * whether the network and next hop address are valid IP addresses and also
1410 * whether a normal routing protocol doesn't try to smuggle a host or link
1411 * scope route to the table), converts all protocol dependent attributes stored
1412 * in the &rte to temporary extended attributes, consults import filters of the
1413 * protocol to see if the route should be accepted and/or its attributes modified,
1414 * stores the temporary attributes back to the &rte.
1416 * Now, having a "public" version of the route, we
1417 * automatically find any old route defined by the protocol @src
1418 * for network @n, replace it by the new one (or removing it if @new is %NULL),
1419 * recalculate the optimal route for this destination and finally broadcast
1420 * the change (if any) to all routing protocols by calling rte_announce().
1422 * All memory used for attribute lists and other temporary allocations is taken
1423 * from a special linear pool @rte_update_pool and freed when rte_update()
1428 rte_update2(struct channel
*c
, const net_addr
*n
, rte
*new, struct rte_src
*src
)
1430 struct proto
*p
= c
->proto
;
1431 struct proto_stats
*stats
= &c
->stats
;
1432 const struct filter
*filter
= c
->in_filter
;
1436 ASSERT(c
->channel_state
== CS_UP
);
1441 /* Create a temporary table node */
1442 nn
= alloca(sizeof(net
) + n
->length
);
1443 memset(nn
, 0, sizeof(net
) + n
->length
);
1444 net_copy(nn
->n
.addr
, n
);
1450 new->pref
= c
->preference
;
1452 stats
->imp_updates_received
++;
1453 if (!rte_validate(new))
1455 rte_trace_in(D_FILTERS
, p
, new, "invalid");
1456 stats
->imp_updates_invalid
++;
1460 if (filter
== FILTER_REJECT
)
1462 stats
->imp_updates_filtered
++;
1463 rte_trace_in(D_FILTERS
, p
, new, "filtered out");
1465 if (! c
->in_keep_filtered
)
1468 /* new is a private copy, i could modify it */
1469 new->flags
|= REF_FILTERED
;
1473 rta
*old_attrs
= NULL
;
1474 rte_make_tmp_attrs(&new, rte_update_pool
, &old_attrs
);
1476 int fr
= f_run(filter
, &new, rte_update_pool
, 0);
1479 stats
->imp_updates_filtered
++;
1480 rte_trace_in(D_FILTERS
, p
, new, "filtered out");
1482 if (! c
->in_keep_filtered
)
1484 rta_free(old_attrs
);
1488 new->flags
|= REF_FILTERED
;
1491 rte_store_tmp_attrs(new, rte_update_pool
, old_attrs
);
1493 if (!rta_is_cached(new->attrs
)) /* Need to copy attributes */
1494 new->attrs
= rta_lookup(new->attrs
);
1495 new->flags
|= REF_COW
;
1497 /* Use the actual struct network, not the dummy one */
1498 nn
= net_get(c
->table
, n
);
1503 stats
->imp_withdraws_received
++;
1505 if (!(nn
= net_find(c
->table
, n
)) || !src
)
1507 stats
->imp_withdraws_ignored
++;
1508 rte_update_unlock();
1514 /* And recalculate the best route */
1515 rte_hide_dummy_routes(nn
, &dummy
);
1516 rte_recalculate(c
, nn
, new, src
);
1517 rte_unhide_dummy_routes(nn
, &dummy
);
1519 rte_update_unlock();
1525 if (nn
= net_find(c
->table
, n
))
1528 rte_update_unlock();
1531 /* Independent call to rte_announce(), used from next hop
1532 recalculation, outside of rte_update(). new must be non-NULL */
1534 rte_announce_i(rtable
*tab
, uint type
, net
*net
, rte
*new, rte
*old
,
1535 rte
*new_best
, rte
*old_best
)
1538 rte_announce(tab
, type
, net
, new, old
, new_best
, old_best
);
1539 rte_update_unlock();
1543 rte_discard(rte
*old
) /* Non-filtered route deletion, used during garbage collection */
1546 rte_recalculate(old
->sender
, old
->net
, NULL
, old
->attrs
->src
);
1547 rte_update_unlock();
1550 /* Modify existing route by protocol hook, used for long-lived graceful restart */
1552 rte_modify(rte
*old
)
1556 rte
*new = old
->sender
->proto
->rte_modify(old
, rte_update_pool
);
1561 if (!rta_is_cached(new->attrs
))
1562 new->attrs
= rta_lookup(new->attrs
);
1563 new->flags
= (old
->flags
& ~REF_MODIFY
) | REF_COW
;
1566 rte_recalculate(old
->sender
, old
->net
, new, old
->attrs
->src
);
1569 rte_update_unlock();
1572 /* Check rtable for best route to given net whether it would be exported do p */
1574 rt_examine(rtable
*t
, net_addr
*a
, struct proto
*p
, const struct filter
*filter
)
1576 net
*n
= net_find(t
, a
);
1577 rte
*rt
= n
? n
->routes
: NULL
;
1579 if (!rte_is_valid(rt
))
1584 /* Rest is stripped down export_filter() */
1585 int v
= p
->preexport
? p
->preexport(p
, &rt
, rte_update_pool
) : 0;
1586 if (v
== RIC_PROCESS
)
1588 rte_make_tmp_attrs(&rt
, rte_update_pool
, NULL
);
1589 v
= (f_run(filter
, &rt
, rte_update_pool
, FF_SILENT
) <= F_ACCEPT
);
1592 /* Discard temporary rte */
1593 if (rt
!= n
->routes
)
1596 rte_update_unlock();
1603 * rt_refresh_begin - start a refresh cycle
1604 * @t: related routing table
1605 * @c related channel
1607 * This function starts a refresh cycle for given routing table and announce
1608 * hook. The refresh cycle is a sequence where the protocol sends all its valid
1609 * routes to the routing table (by rte_update()). After that, all protocol
1610 * routes (more precisely routes with @c as @sender) not sent during the
1611 * refresh cycle but still in the table from the past are pruned. This is
1612 * implemented by marking all related routes as stale by REF_STALE flag in
1613 * rt_refresh_begin(), then marking all related stale routes with REF_DISCARD
1614 * flag in rt_refresh_end() and then removing such routes in the prune loop.
1617 rt_refresh_begin(rtable
*t
, struct channel
*c
)
1619 FIB_WALK(&t
->fib
, net
, n
)
1622 for (e
= n
->routes
; e
; e
= e
->next
)
1624 e
->flags
|= REF_STALE
;
1630 * rt_refresh_end - end a refresh cycle
1631 * @t: related routing table
1632 * @c: related channel
1634 * This function ends a refresh cycle for given routing table and announce
1635 * hook. See rt_refresh_begin() for description of refresh cycles.
1638 rt_refresh_end(rtable
*t
, struct channel
*c
)
1642 FIB_WALK(&t
->fib
, net
, n
)
1645 for (e
= n
->routes
; e
; e
= e
->next
)
1646 if ((e
->sender
== c
) && (e
->flags
& REF_STALE
))
1648 e
->flags
|= REF_DISCARD
;
1655 rt_schedule_prune(t
);
1659 rt_modify_stale(rtable
*t
, struct channel
*c
)
1663 FIB_WALK(&t
->fib
, net
, n
)
1666 for (e
= n
->routes
; e
; e
= e
->next
)
1667 if ((e
->sender
== c
) && (e
->flags
& REF_STALE
) && !(e
->flags
& REF_FILTERED
))
1669 e
->flags
|= REF_MODIFY
;
1676 rt_schedule_prune(t
);
1680 * rte_dump - dump a route
1681 * @e: &rte to be dumped
1683 * This functions dumps contents of a &rte to debug output.
1689 debug("%-1N ", n
->n
.addr
);
1690 debug("PF=%02x pref=%d ", e
->pflags
, e
->pref
);
1692 if (e
->attrs
->src
->proto
->proto
->dump_attrs
)
1693 e
->attrs
->src
->proto
->proto
->dump_attrs(e
);
1698 * rt_dump - dump a routing table
1699 * @t: routing table to be dumped
1701 * This function dumps contents of a given routing table to debug output.
1706 debug("Dump of routing table <%s>\n", t
->name
);
1710 FIB_WALK(&t
->fib
, net
, n
)
1713 for(e
=n
->routes
; e
; e
=e
->next
)
1721 * rt_dump_all - dump all routing tables
1723 * This function dumps contents of all routing tables to debug output.
1730 WALK_LIST(t
, routing_tables
)
1735 rt_schedule_hcu(rtable
*tab
)
1737 if (tab
->hcu_scheduled
)
1740 tab
->hcu_scheduled
= 1;
1741 ev_schedule(tab
->rt_event
);
1745 rt_schedule_nhu(rtable
*tab
)
1747 if (tab
->nhu_state
== NHU_CLEAN
)
1748 ev_schedule(tab
->rt_event
);
1751 * NHU_CLEAN -> NHU_SCHEDULED
1752 * NHU_RUNNING -> NHU_DIRTY
1754 tab
->nhu_state
|= NHU_SCHEDULED
;
1758 rt_schedule_prune(rtable
*tab
)
1760 if (tab
->prune_state
== 0)
1761 ev_schedule(tab
->rt_event
);
1763 /* state change 0->1, 2->3 */
1764 tab
->prune_state
|= 1;
1775 if (tab
->hcu_scheduled
)
1776 rt_update_hostcache(tab
);
1779 rt_next_hop_update(tab
);
1781 if (tab
->prune_state
)
1782 rt_prune_table(tab
);
1784 rt_unlock_table(tab
);
1788 rt_setup(pool
*p
, rtable
*t
, struct rtable_config
*cf
)
1790 bzero(t
, sizeof(*t
));
1793 t
->addr_type
= cf
->addr_type
;
1794 fib_init(&t
->fib
, p
, t
->addr_type
, sizeof(net
), OFFSETOF(net
, n
), 0, NULL
);
1795 init_list(&t
->channels
);
1797 hmap_init(&t
->id_map
, p
, 1024);
1798 hmap_set(&t
->id_map
, 0);
1800 t
->rt_event
= ev_new_init(p
, rt_event
, t
);
1801 t
->gc_time
= current_time();
1805 * rt_init - initialize routing tables
1807 * This function is called during BIRD startup. It initializes the
1808 * routing table module.
1814 rt_table_pool
= rp_new(&root_pool
, "Routing tables");
1815 rte_update_pool
= lp_new_default(rt_table_pool
);
1816 rte_slab
= sl_new(rt_table_pool
, sizeof(rte
));
1817 init_list(&routing_tables
);
1822 * rt_prune_table - prune a routing table
1824 * The prune loop scans routing tables and removes routes belonging to flushing
1825 * protocols, discarded routes and also stale network entries. It is called from
1826 * rt_event(). The event is rescheduled if the current iteration do not finish
1827 * the table. The pruning is directed by the prune state (@prune_state),
1828 * specifying whether the prune cycle is scheduled or running, and there
1829 * is also a persistent pruning iterator (@prune_fit).
1831 * The prune loop is used also for channel flushing. For this purpose, the
1832 * channels to flush are marked before the iteration and notified after the
1836 rt_prune_table(rtable
*tab
)
1838 struct fib_iterator
*fit
= &tab
->prune_fit
;
1844 DBG("Pruning route table %s\n", tab
->name
);
1846 fib_check(&tab
->fib
);
1849 if (tab
->prune_state
== 0)
1852 if (tab
->prune_state
== 1)
1854 /* Mark channels to flush */
1855 WALK_LIST2(c
, n
, tab
->channels
, table_node
)
1856 if (c
->channel_state
== CS_FLUSHING
)
1857 c
->flush_active
= 1;
1859 FIB_ITERATE_INIT(fit
, &tab
->fib
);
1860 tab
->prune_state
= 2;
1864 FIB_ITERATE_START(&tab
->fib
, fit
, net
, n
)
1869 for (e
=n
->routes
; e
; e
=e
->next
)
1871 if (e
->sender
->flush_active
|| (e
->flags
& REF_DISCARD
))
1875 FIB_ITERATE_PUT(fit
);
1876 ev_schedule(tab
->rt_event
);
1886 if (e
->flags
& REF_MODIFY
)
1890 FIB_ITERATE_PUT(fit
);
1891 ev_schedule(tab
->rt_event
);
1902 if (!n
->routes
) /* Orphaned FIB entry */
1904 FIB_ITERATE_PUT(fit
);
1905 fib_delete(&tab
->fib
, n
);
1912 fib_check(&tab
->fib
);
1915 tab
->gc_counter
= 0;
1916 tab
->gc_time
= current_time();
1918 /* state change 2->0, 3->1 */
1919 tab
->prune_state
&= 1;
1921 if (tab
->prune_state
> 0)
1922 ev_schedule(tab
->rt_event
);
1924 /* FIXME: This should be handled in a better way */
1927 /* Close flushed channels */
1928 WALK_LIST2_DELSAFE(c
, n
, x
, tab
->channels
, table_node
)
1929 if (c
->flush_active
)
1931 c
->flush_active
= 0;
1932 channel_set_state(c
, CS_DOWN
);
1939 rt_preconfig(struct config
*c
)
1941 init_list(&c
->tables
);
1943 rt_new_table(cf_get_symbol("master4"), NET_IP4
);
1944 rt_new_table(cf_get_symbol("master6"), NET_IP6
);
1949 * Some functions for handing internal next hop updates
1950 * triggered by rt_schedule_nhu().
1954 rta_next_hop_outdated(rta
*a
)
1956 struct hostentry
*he
= a
->hostentry
;
1962 return a
->dest
!= RTD_UNREACHABLE
;
1964 return (a
->dest
!= he
->dest
) || (a
->igp_metric
!= he
->igp_metric
) ||
1965 (!he
->nexthop_linkable
) || !nexthop_same(&(a
->nh
), &(he
->src
->nh
));
1969 rta_apply_hostentry(rta
*a
, struct hostentry
*he
, mpls_label_stack
*mls
)
1973 a
->igp_metric
= he
->igp_metric
;
1975 if (a
->dest
!= RTD_UNICAST
)
1979 a
->nh
= (struct nexthop
) {};
1981 { /* Store the label stack for later changes */
1982 a
->nh
.labels_orig
= a
->nh
.labels
= mls
->len
;
1983 memcpy(a
->nh
.label
, mls
->stack
, mls
->len
* sizeof(u32
));
1988 if (((!mls
) || (!mls
->len
)) && he
->nexthop_linkable
)
1989 { /* Just link the nexthop chain, no label append happens. */
1990 memcpy(&(a
->nh
), &(he
->src
->nh
), nexthop_size(&(he
->src
->nh
)));
1994 struct nexthop
*nhp
= NULL
, *nhr
= NULL
;
1995 int skip_nexthop
= 0;
1997 for (struct nexthop
*nh
= &(he
->src
->nh
); nh
; nh
= nh
->next
)
2004 nhp
= (nhp
? (nhp
->next
= lp_alloc(rte_update_pool
, NEXTHOP_MAX_SIZE
)) : &(a
->nh
));
2007 memset(nhp
, 0, NEXTHOP_MAX_SIZE
);
2008 nhp
->iface
= nh
->iface
;
2009 nhp
->weight
= nh
->weight
;
2013 nhp
->labels
= nh
->labels
+ mls
->len
;
2014 nhp
->labels_orig
= mls
->len
;
2015 if (nhp
->labels
<= MPLS_MAX_LABEL_STACK
)
2017 memcpy(nhp
->label
, nh
->label
, nh
->labels
* sizeof(u32
)); /* First the hostentry labels */
2018 memcpy(&(nhp
->label
[nh
->labels
]), mls
->stack
, mls
->len
* sizeof(u32
)); /* Then the bottom labels */
2022 log(L_WARN
"Sum of label stack sizes %d + %d = %d exceedes allowed maximum (%d)",
2023 nh
->labels
, mls
->len
, nhp
->labels
, MPLS_MAX_LABEL_STACK
);
2028 else if (nh
->labels
)
2030 nhp
->labels
= nh
->labels
;
2031 nhp
->labels_orig
= 0;
2032 memcpy(nhp
->label
, nh
->label
, nh
->labels
* sizeof(u32
));
2035 if (ipa_nonzero(nh
->gw
))
2037 nhp
->gw
= nh
->gw
; /* Router nexthop */
2038 nhp
->flags
|= (nh
->flags
& RNF_ONLINK
);
2040 else if (!(nh
->iface
->flags
& IF_MULTIACCESS
) || (nh
->iface
->flags
& IF_LOOPBACK
))
2041 nhp
->gw
= IPA_NONE
; /* PtP link - no need for nexthop */
2042 else if (ipa_nonzero(he
->link
))
2043 nhp
->gw
= he
->link
; /* Device nexthop with link-local address known */
2045 nhp
->gw
= he
->addr
; /* Device nexthop with link-local address unknown */
2053 a
->dest
= RTD_UNREACHABLE
;
2054 log(L_WARN
"No valid nexthop remaining, setting route unreachable");
2060 rt_next_hop_update_rte(rtable
*tab UNUSED
, rte
*old
)
2062 rta
*a
= alloca(RTA_MAX_SIZE
);
2063 memcpy(a
, old
->attrs
, rta_size(old
->attrs
));
2065 mpls_label_stack mls
= { .len
= a
->nh
.labels_orig
};
2066 memcpy(mls
.stack
, &a
->nh
.label
[a
->nh
.labels
- mls
.len
], mls
.len
* sizeof(u32
));
2068 rta_apply_hostentry(a
, old
->attrs
->hostentry
, &mls
);
2071 rte
*e
= sl_alloc(rte_slab
);
2072 memcpy(e
, old
, sizeof(rte
));
2073 e
->attrs
= rta_lookup(a
);
2079 rt_next_hop_update_net(rtable
*tab
, net
*n
)
2081 rte
**k
, *e
, *new, *old_best
, **new_best
;
2083 int free_old_best
= 0;
2085 old_best
= n
->routes
;
2089 for (k
= &n
->routes
; e
= *k
; k
= &e
->next
)
2090 if (rta_next_hop_outdated(e
->attrs
))
2092 new = rt_next_hop_update_rte(tab
, e
);
2095 rte_trace_in(D_ROUTES
, new->sender
->proto
, new, "updated");
2096 rte_announce_i(tab
, RA_ANY
, n
, new, e
, NULL
, NULL
);
2098 /* Call a pre-comparison hook */
2099 /* Not really an efficient way to compute this */
2100 if (e
->attrs
->src
->proto
->rte_recalculate
)
2101 e
->attrs
->src
->proto
->rte_recalculate(tab
, n
, new, e
, NULL
);
2105 else /* Freeing of the old best rte is postponed */
2115 /* Find the new best route */
2117 for (k
= &n
->routes
; e
= *k
; k
= &e
->next
)
2119 if (!new_best
|| rte_better(e
, *new_best
))
2123 /* Relink the new best route to the first position */
2125 if (new != n
->routes
)
2127 *new_best
= new->next
;
2128 new->next
= n
->routes
;
2132 /* Announce the new best route */
2133 if (new != old_best
)
2134 rte_trace_in(D_ROUTES
, new->sender
->proto
, new, "updated [best]");
2136 /* Propagate changes */
2137 rte_announce_i(tab
, RA_UNDEF
, n
, NULL
, NULL
, n
->routes
, old_best
);
2140 rte_free_quick(old_best
);
2146 rt_next_hop_update(rtable
*tab
)
2148 struct fib_iterator
*fit
= &tab
->nhu_fit
;
2151 if (tab
->nhu_state
== NHU_CLEAN
)
2154 if (tab
->nhu_state
== NHU_SCHEDULED
)
2156 FIB_ITERATE_INIT(fit
, &tab
->fib
);
2157 tab
->nhu_state
= NHU_RUNNING
;
2160 FIB_ITERATE_START(&tab
->fib
, fit
, net
, n
)
2164 FIB_ITERATE_PUT(fit
);
2165 ev_schedule(tab
->rt_event
);
2168 max_feed
-= rt_next_hop_update_net(tab
, n
);
2173 * NHU_DIRTY -> NHU_SCHEDULED
2174 * NHU_RUNNING -> NHU_CLEAN
2176 tab
->nhu_state
&= 1;
2178 if (tab
->nhu_state
!= NHU_CLEAN
)
2179 ev_schedule(tab
->rt_event
);
2183 struct rtable_config
*
2184 rt_new_table(struct symbol
*s
, uint addr_type
)
2186 /* Hack that allows to 'redefine' the master table */
2187 if ((s
->class == SYM_TABLE
) &&
2188 (s
->table
== new_config
->def_tables
[addr_type
]) &&
2189 ((addr_type
== NET_IP4
) || (addr_type
== NET_IP6
)))
2192 struct rtable_config
*c
= cfg_allocz(sizeof(struct rtable_config
));
2194 cf_define_symbol(s
, SYM_TABLE
, table
, c
);
2196 c
->addr_type
= addr_type
;
2197 c
->gc_max_ops
= 1000;
2200 add_tail(&new_config
->tables
, &c
->n
);
2202 /* First table of each type is kept as default */
2203 if (! new_config
->def_tables
[addr_type
])
2204 new_config
->def_tables
[addr_type
] = c
;
2210 * rt_lock_table - lock a routing table
2211 * @r: routing table to be locked
2213 * Lock a routing table, because it's in use by a protocol,
2214 * preventing it from being freed when it gets undefined in a new
2218 rt_lock_table(rtable
*r
)
2224 * rt_unlock_table - unlock a routing table
2225 * @r: routing table to be unlocked
2227 * Unlock a routing table formerly locked by rt_lock_table(),
2228 * that is decrease its use count and delete it if it's scheduled
2229 * for deletion by configuration changes.
2232 rt_unlock_table(rtable
*r
)
2234 if (!--r
->use_count
&& r
->deleted
)
2236 struct config
*conf
= r
->deleted
;
2237 DBG("Deleting routing table %s\n", r
->name
);
2238 r
->config
->table
= NULL
;
2240 rt_free_hostcache(r
);
2243 hmap_free(&r
->id_map
);
2246 config_del_obstacle(conf
);
2250 static struct rtable_config
*
2251 rt_find_table_config(struct config
*cf
, char *name
)
2253 struct symbol
*sym
= cf_find_symbol(cf
, name
);
2254 return (sym
&& (sym
->class == SYM_TABLE
)) ? sym
->table
: NULL
;
2258 * rt_commit - commit new routing table configuration
2259 * @new: new configuration
2260 * @old: original configuration or %NULL if it's boot time config
2262 * Scan differences between @old and @new configuration and modify
2263 * the routing tables according to these changes. If @new defines a
2264 * previously unknown table, create it, if it omits a table existing
2265 * in @old, schedule it for deletion (it gets deleted when all protocols
2266 * disconnect from it by calling rt_unlock_table()), if it exists
2267 * in both configurations, leave it unchanged.
2270 rt_commit(struct config
*new, struct config
*old
)
2272 struct rtable_config
*o
, *r
;
2274 DBG("rt_commit:\n");
2277 WALK_LIST(o
, old
->tables
)
2279 rtable
*ot
= o
->table
;
2282 r
= rt_find_table_config(new, o
->name
);
2283 if (r
&& (r
->addr_type
== o
->addr_type
) && !new->shutdown
)
2285 DBG("\t%s: same\n", o
->name
);
2289 if (o
->sorted
!= r
->sorted
)
2290 log(L_WARN
"Reconfiguration of rtable sorted flag not implemented");
2294 DBG("\t%s: deleted\n", o
->name
);
2296 config_add_obstacle(old
);
2298 rt_unlock_table(ot
);
2304 WALK_LIST(r
, new->tables
)
2307 rtable
*t
= mb_allocz(rt_table_pool
, sizeof(struct rtable
));
2308 DBG("\t%s: created\n", r
->name
);
2309 rt_setup(rt_table_pool
, t
, r
);
2310 add_tail(&routing_tables
, &t
->n
);
2317 do_feed_channel(struct channel
*c
, net
*n
, rte
*e
)
2320 if (c
->ra_mode
== RA_ACCEPTED
)
2321 rt_notify_accepted(c
, n
, NULL
, NULL
, c
->refeeding
);
2322 else if (c
->ra_mode
== RA_MERGED
)
2323 rt_notify_merged(c
, n
, NULL
, NULL
, e
, e
, c
->refeeding
);
2325 rt_notify_basic(c
, n
, e
, e
, c
->refeeding
);
2326 rte_update_unlock();
2330 * rt_feed_channel - advertise all routes to a channel
2331 * @c: channel to be fed
2333 * This function performs one pass of advertisement of routes to a channel that
2334 * is in the ES_FEEDING state. It is called by the protocol code as long as it
2335 * has something to do. (We avoid transferring all the routes in single pass in
2336 * order not to monopolize CPU time.)
2339 rt_feed_channel(struct channel
*c
)
2341 struct fib_iterator
*fit
= &c
->feed_fit
;
2344 ASSERT(c
->export_state
== ES_FEEDING
);
2346 if (!c
->feed_active
)
2348 FIB_ITERATE_INIT(fit
, &c
->table
->fib
);
2352 FIB_ITERATE_START(&c
->table
->fib
, fit
, net
, n
)
2357 FIB_ITERATE_PUT(fit
);
2361 if ((c
->ra_mode
== RA_OPTIMAL
) ||
2362 (c
->ra_mode
== RA_ACCEPTED
) ||
2363 (c
->ra_mode
== RA_MERGED
))
2364 if (rte_is_valid(e
))
2366 /* In the meantime, the protocol may fell down */
2367 if (c
->export_state
!= ES_FEEDING
)
2370 do_feed_channel(c
, n
, e
);
2374 if (c
->ra_mode
== RA_ANY
)
2375 for(e
= n
->routes
; e
; e
= e
->next
)
2377 /* In the meantime, the protocol may fell down */
2378 if (c
->export_state
!= ES_FEEDING
)
2381 if (!rte_is_valid(e
))
2384 do_feed_channel(c
, n
, e
);
2396 * rt_feed_baby_abort - abort protocol feeding
2399 * This function is called by the protocol code when the protocol stops or
2400 * ceases to exist during the feeding.
2403 rt_feed_channel_abort(struct channel
*c
)
2407 /* Unlink the iterator */
2408 fit_get(&c
->table
->fib
, &c
->feed_fit
);
2419 rte_update_in(struct channel
*c
, const net_addr
*n
, rte
*new, struct rte_src
*src
)
2421 struct rtable
*tab
= c
->in_table
;
2427 net
= net_get(tab
, n
);
2430 new->pref
= c
->preference
;
2432 if (!rta_is_cached(new->attrs
))
2433 new->attrs
= rta_lookup(new->attrs
);
2437 net
= net_find(tab
, n
);
2443 /* Find the old rte */
2444 for (pos
= &net
->routes
; old
= *pos
; pos
= &old
->next
)
2445 if (old
->attrs
->src
== src
)
2447 if (new && rte_same(old
, new))
2449 /* Refresh the old rte, continue with update to main rtable */
2450 if (old
->flags
& (REF_STALE
| REF_DISCARD
| REF_MODIFY
))
2452 old
->flags
&= ~(REF_STALE
| REF_DISCARD
| REF_MODIFY
);
2459 /* Move iterator if needed */
2460 if (old
== c
->reload_next_rte
)
2461 c
->reload_next_rte
= old
->next
;
2463 /* Remove the old rte */
2465 rte_free_quick(old
);
2479 struct channel_limit
*l
= &c
->rx_limit
;
2480 if (l
->action
&& !old
)
2482 if (tab
->rt_count
>= l
->limit
)
2483 channel_notify_limit(c
, l
, PLD_RX
, tab
->rt_count
);
2485 if (l
->state
== PLS_BLOCKED
)
2487 rte_trace_in(D_FILTERS
, c
->proto
, new, "ignored [limit]");
2492 /* Insert the new rte */
2493 rte
*e
= rte_do_cow(new);
2494 e
->flags
|= REF_COW
;
2497 e
->lastmod
= current_time();
2504 c
->stats
.imp_updates_received
++;
2505 c
->stats
.imp_updates_ignored
++;
2510 c
->stats
.imp_withdraws_received
++;
2511 c
->stats
.imp_withdraws_ignored
++;
2516 rt_reload_channel(struct channel
*c
)
2518 struct rtable
*tab
= c
->in_table
;
2519 struct fib_iterator
*fit
= &c
->reload_fit
;
2522 ASSERT(c
->channel_state
== CS_UP
);
2524 if (!c
->reload_active
)
2526 FIB_ITERATE_INIT(fit
, &tab
->fib
);
2527 c
->reload_active
= 1;
2531 for (rte
*e
= c
->reload_next_rte
; e
; e
= e
->next
)
2533 if (max_feed
-- <= 0)
2535 c
->reload_next_rte
= e
;
2536 debug("%s channel reload burst split (max_feed=%d)", c
->proto
->name
, max_feed
);
2540 rte_update2(c
, e
->net
->n
.addr
, rte_do_cow(e
), e
->attrs
->src
);
2543 c
->reload_next_rte
= NULL
;
2545 FIB_ITERATE_START(&tab
->fib
, fit
, net
, n
)
2547 if (c
->reload_next_rte
= n
->routes
)
2549 FIB_ITERATE_PUT_NEXT(fit
, &tab
->fib
);
2555 while (c
->reload_next_rte
);
2557 c
->reload_active
= 0;
2562 rt_reload_channel_abort(struct channel
*c
)
2564 if (c
->reload_active
)
2566 /* Unlink the iterator */
2567 fit_get(&c
->in_table
->fib
, &c
->reload_fit
);
2568 c
->reload_next_rte
= NULL
;
2569 c
->reload_active
= 0;
2574 rt_prune_sync(rtable
*t
, int all
)
2576 FIB_WALK(&t
->fib
, net
, n
)
2578 rte
*e
, **ee
= &n
->routes
;
2581 if (all
|| (e
->flags
& (REF_STALE
| REF_DISCARD
)))
2600 rte_update_out(struct channel
*c
, const net_addr
*n
, rte
*new, rte
*old0
, int refeed
)
2602 struct rtable
*tab
= c
->out_table
;
2603 struct rte_src
*src
;
2609 net
= net_get(tab
, n
);
2610 src
= new->attrs
->src
;
2612 rte_store_tmp_attrs(new, rte_update_pool
, NULL
);
2614 if (!rta_is_cached(new->attrs
))
2615 new->attrs
= rta_lookup(new->attrs
);
2619 net
= net_find(tab
, n
);
2620 src
= old0
->attrs
->src
;
2626 /* Find the old rte */
2627 for (pos
= &net
->routes
; old
= *pos
; pos
= &old
->next
)
2628 if ((c
->ra_mode
!= RA_ANY
) || (old
->attrs
->src
== src
))
2630 if (new && rte_same(old
, new))
2632 /* REF_STALE / REF_DISCARD not used in export table */
2634 if (old->flags & (REF_STALE | REF_DISCARD | REF_MODIFY))
2636 old->flags &= ~(REF_STALE | REF_DISCARD | REF_MODIFY);
2644 /* Remove the old rte */
2646 rte_free_quick(old
);
2660 /* Insert the new rte */
2661 rte
*e
= rte_do_cow(new);
2662 e
->flags
|= REF_COW
;
2665 e
->lastmod
= current_time();
2684 hc_hash(ip_addr a
, rtable
*dep
)
2686 return ipa_hash(a
) ^ ptr_hash(dep
);
2690 hc_insert(struct hostcache
*hc
, struct hostentry
*he
)
2692 uint k
= he
->hash_key
>> hc
->hash_shift
;
2693 he
->next
= hc
->hash_table
[k
];
2694 hc
->hash_table
[k
] = he
;
2698 hc_remove(struct hostcache
*hc
, struct hostentry
*he
)
2700 struct hostentry
**hep
;
2701 uint k
= he
->hash_key
>> hc
->hash_shift
;
2703 for (hep
= &hc
->hash_table
[k
]; *hep
!= he
; hep
= &(*hep
)->next
);
2707 #define HC_DEF_ORDER 10
2708 #define HC_HI_MARK *4
2709 #define HC_HI_STEP 2
2710 #define HC_HI_ORDER 16 /* Must be at most 16 */
2711 #define HC_LO_MARK /5
2712 #define HC_LO_STEP 2
2713 #define HC_LO_ORDER 10
2716 hc_alloc_table(struct hostcache
*hc
, unsigned order
)
2718 uint hsize
= 1 << order
;
2719 hc
->hash_order
= order
;
2720 hc
->hash_shift
= 32 - order
;
2721 hc
->hash_max
= (order
>= HC_HI_ORDER
) ? ~0U : (hsize HC_HI_MARK
);
2722 hc
->hash_min
= (order
<= HC_LO_ORDER
) ? 0U : (hsize HC_LO_MARK
);
2724 hc
->hash_table
= mb_allocz(rt_table_pool
, hsize
* sizeof(struct hostentry
*));
2728 hc_resize(struct hostcache
*hc
, unsigned new_order
)
2730 struct hostentry
**old_table
= hc
->hash_table
;
2731 struct hostentry
*he
, *hen
;
2732 uint old_size
= 1 << hc
->hash_order
;
2735 hc_alloc_table(hc
, new_order
);
2736 for (i
= 0; i
< old_size
; i
++)
2737 for (he
= old_table
[i
]; he
!= NULL
; he
=hen
)
2745 static struct hostentry
*
2746 hc_new_hostentry(struct hostcache
*hc
, ip_addr a
, ip_addr ll
, rtable
*dep
, unsigned k
)
2748 struct hostentry
*he
= sl_alloc(hc
->slab
);
2750 *he
= (struct hostentry
) {
2757 add_tail(&hc
->hostentries
, &he
->ln
);
2761 if (hc
->hash_items
> hc
->hash_max
)
2762 hc_resize(hc
, hc
->hash_order
+ HC_HI_STEP
);
2768 hc_delete_hostentry(struct hostcache
*hc
, struct hostentry
*he
)
2774 sl_free(hc
->slab
, he
);
2777 if (hc
->hash_items
< hc
->hash_min
)
2778 hc_resize(hc
, hc
->hash_order
- HC_LO_STEP
);
2782 rt_init_hostcache(rtable
*tab
)
2784 struct hostcache
*hc
= mb_allocz(rt_table_pool
, sizeof(struct hostcache
));
2785 init_list(&hc
->hostentries
);
2788 hc_alloc_table(hc
, HC_DEF_ORDER
);
2789 hc
->slab
= sl_new(rt_table_pool
, sizeof(struct hostentry
));
2791 hc
->lp
= lp_new(rt_table_pool
, LP_GOOD_SIZE(1024));
2792 hc
->trie
= f_new_trie(hc
->lp
, 0);
2794 tab
->hostcache
= hc
;
2798 rt_free_hostcache(rtable
*tab
)
2800 struct hostcache
*hc
= tab
->hostcache
;
2803 WALK_LIST(n
, hc
->hostentries
)
2805 struct hostentry
*he
= SKIP_BACK(struct hostentry
, ln
, n
);
2809 log(L_ERR
"Hostcache is not empty in table %s", tab
->name
);
2814 mb_free(hc
->hash_table
);
2819 rt_notify_hostcache(rtable
*tab
, net
*net
)
2821 if (tab
->hcu_scheduled
)
2824 if (trie_match_net(tab
->hostcache
->trie
, net
->n
.addr
))
2825 rt_schedule_hcu(tab
);
2829 if_local_addr(ip_addr a
, struct iface
*i
)
2833 WALK_LIST(b
, i
->addrs
)
2834 if (ipa_equal(a
, b
->ip
))
2841 rt_get_igp_metric(rte
*rt
)
2843 eattr
*ea
= ea_find(rt
->attrs
->eattrs
, EA_GEN_IGP_METRIC
);
2851 if ((a
->source
== RTS_OSPF
) ||
2852 (a
->source
== RTS_OSPF_IA
) ||
2853 (a
->source
== RTS_OSPF_EXT1
))
2854 return rt
->u
.ospf
.metric1
;
2858 if (a
->source
== RTS_RIP
)
2859 return rt
->u
.rip
.metric
;
2863 if (a
->source
== RTS_BGP
)
2865 u64 metric
= bgp_total_aigp_metric(rt
);
2866 return (u32
) MIN(metric
, (u64
) IGP_METRIC_UNKNOWN
);
2870 if (a
->source
== RTS_DEVICE
)
2873 return IGP_METRIC_UNKNOWN
;
2877 rt_update_hostentry(rtable
*tab
, struct hostentry
*he
)
2879 rta
*old_src
= he
->src
;
2883 /* Reset the hostentry */
2885 he
->dest
= RTD_UNREACHABLE
;
2886 he
->nexthop_linkable
= 0;
2890 net_fill_ip_host(&he_addr
, he
->addr
);
2891 net
*n
= net_route(tab
, &he_addr
);
2896 pxlen
= n
->n
.addr
->pxlen
;
2900 /* Recursive route should not depend on another recursive route */
2901 log(L_WARN
"Next hop address %I resolvable through recursive route for %N",
2902 he
->addr
, n
->n
.addr
);
2906 if (a
->dest
== RTD_UNICAST
)
2908 for (struct nexthop
*nh
= &(a
->nh
); nh
; nh
= nh
->next
)
2909 if (ipa_zero(nh
->gw
))
2911 if (if_local_addr(he
->addr
, nh
->iface
))
2913 /* The host address is a local address, this is not valid */
2914 log(L_WARN
"Next hop address %I is a local address of iface %s",
2915 he
->addr
, nh
->iface
->name
);
2923 he
->src
= rta_clone(a
);
2925 he
->nexthop_linkable
= !direct
;
2926 he
->igp_metric
= rt_get_igp_metric(e
);
2930 /* Add a prefix range to the trie */
2931 trie_add_prefix(tab
->hostcache
->trie
, &he_addr
, pxlen
, he_addr
.pxlen
);
2934 return old_src
!= he
->src
;
2938 rt_update_hostcache(rtable
*tab
)
2940 struct hostcache
*hc
= tab
->hostcache
;
2941 struct hostentry
*he
;
2944 /* Reset the trie */
2946 hc
->trie
= f_new_trie(hc
->lp
, 0);
2948 WALK_LIST_DELSAFE(n
, x
, hc
->hostentries
)
2950 he
= SKIP_BACK(struct hostentry
, ln
, n
);
2953 hc_delete_hostentry(hc
, he
);
2957 if (rt_update_hostentry(tab
, he
))
2958 rt_schedule_nhu(he
->tab
);
2961 tab
->hcu_scheduled
= 0;
2965 rt_get_hostentry(rtable
*tab
, ip_addr a
, ip_addr ll
, rtable
*dep
)
2967 struct hostentry
*he
;
2969 if (!tab
->hostcache
)
2970 rt_init_hostcache(tab
);
2972 u32 k
= hc_hash(a
, dep
);
2973 struct hostcache
*hc
= tab
->hostcache
;
2974 for (he
= hc
->hash_table
[k
>> hc
->hash_shift
]; he
!= NULL
; he
= he
->next
)
2975 if (ipa_equal(he
->addr
, a
) && (he
->tab
== dep
))
2978 he
= hc_new_hostentry(hc
, a
, ipa_zero(ll
) ? a
: ll
, dep
, k
);
2979 rt_update_hostentry(tab
, he
);
2985 * Documentation for functions declared inline in route.h
2990 * net_find - find a network entry
2991 * @tab: a routing table
2992 * @addr: address of the network
2994 * net_find() looks up the given network in routing table @tab and
2995 * returns a pointer to its &net entry or %NULL if no such network
2998 static inline net
*net_find(rtable
*tab
, net_addr
*addr
)
3002 * net_get - obtain a network entry
3003 * @tab: a routing table
3004 * @addr: address of the network
3006 * net_get() looks up the given network in routing table @tab and
3007 * returns a pointer to its &net entry. If no such entry exists, it's
3010 static inline net
*net_get(rtable
*tab
, net_addr
*addr
)
3014 * rte_cow - copy a route for writing
3015 * @r: a route entry to be copied
3017 * rte_cow() takes a &rte and prepares it for modification. The exact action
3018 * taken depends on the flags of the &rte -- if it's a temporary entry, it's
3019 * just returned unchanged, else a new temporary entry with the same contents
3022 * The primary use of this function is inside the filter machinery -- when
3023 * a filter wants to modify &rte contents (to change the preference or to
3024 * attach another set of attributes), it must ensure that the &rte is not
3025 * shared with anyone else (and especially that it isn't stored in any routing
3028 * Result: a pointer to the new writable &rte.
3030 static inline rte
* rte_cow(rte
*r
)