]>
git.ipfire.org Git - thirdparty/bird.git/blob - nest/rt-attr.c
2 * BIRD -- Route Attribute Cache
4 * (c) 1998--2000 Martin Mares <mj@ucw.cz>
6 * Can be freely distributed and used under the terms of the GNU GPL.
10 * DOC: Route attribute cache
12 * Each route entry carries a set of route attributes. Several of them
13 * vary from route to route, but most attributes are usually common
14 * for a large number of routes. To conserve memory, we've decided to
15 * store only the varying ones directly in the &rte and hold the rest
16 * in a special structure called &rta which is shared among all the
17 * &rte's with these attributes.
19 * Each &rta contains all the static attributes of the route (i.e.,
20 * those which are always present) as structure members and a list of
21 * dynamic attributes represented by a linked list of &ea_list
22 * structures, each of them consisting of an array of &eattr's containing
23 * the individual attributes. An attribute can be specified more than once
24 * in the &ea_list chain and in such case the first occurrence overrides
25 * the others. This semantics is used especially when someone (for example
26 * a filter) wishes to alter values of several dynamic attributes, but
27 * it wants to preserve the original attribute lists maintained by
30 * Each &eattr contains an attribute identifier (split to protocol ID and
31 * per-protocol attribute ID), protocol dependent flags, a type code (consisting
32 * of several bit fields describing attribute characteristics) and either an
33 * embedded 32-bit value or a pointer to a &adata structure holding attribute
36 * There exist two variants of &rta's -- cached and un-cached ones. Un-cached
37 * &rta's can have arbitrarily complex structure of &ea_list's and they
38 * can be modified by any module in the route processing chain. Cached
39 * &rta's have their attribute lists normalized (that means at most one
40 * &ea_list is present and its values are sorted in order to speed up
41 * searching), they are stored in a hash table to make fast lookup possible
42 * and they are provided with a use count to allow sharing.
44 * Routing tables always contain only cached &rta's.
47 #include "nest/bird.h"
48 #include "nest/route.h"
49 #include "nest/protocol.h"
50 #include "nest/iface.h"
52 #include "nest/attrs.h"
53 #include "lib/alloca.h"
56 #include "lib/resource.h"
57 #include "lib/string.h"
61 const char * rta_dest_names
[RTD_MAX
] = {
63 [RTD_UNICAST
] = "unicast",
64 [RTD_BLACKHOLE
] = "blackhole",
65 [RTD_UNREACHABLE
] = "unreachable",
66 [RTD_PROHIBIT
] = "prohibited",
71 static slab
*rta_slab_
[4];
72 static slab
*nexthop_slab_
[4];
73 static slab
*rte_src_slab
;
75 static struct idm src_ids
;
76 #define SRC_ID_INIT_SIZE 4
80 #define RSH_KEY(n) n->proto, n->private_id
81 #define RSH_NEXT(n) n->next
82 #define RSH_EQ(p1,n1,p2,n2) p1 == p2 && n1 == n2
83 #define RSH_FN(p,n) p->hash_key ^ u32_hash(n)
85 #define RSH_REHASH rte_src_rehash
86 #define RSH_PARAMS /2, *2, 1, 1, 8, 20
87 #define RSH_INIT_ORDER 6
89 static HASH(struct rte_src
) src_hash
;
91 struct protocol
*attr_class_to_protocol
[EAP_MAX
];
97 rte_src_slab
= sl_new(rta_pool
, sizeof(struct rte_src
));
99 idm_init(&src_ids
, rta_pool
, SRC_ID_INIT_SIZE
);
101 HASH_INIT(src_hash
, rta_pool
, RSH_INIT_ORDER
);
105 HASH_DEFINE_REHASH_FN(RSH
, struct rte_src
)
108 rt_find_source(struct proto
*p
, u32 id
)
110 return HASH_FIND(src_hash
, RSH
, p
, id
);
114 rt_get_source(struct proto
*p
, u32 id
)
116 struct rte_src
*src
= rt_find_source(p
, id
);
121 src
= sl_alloc(rte_src_slab
);
123 src
->private_id
= id
;
124 src
->global_id
= idm_alloc(&src_ids
);
127 HASH_INSERT2(src_hash
, RSH
, rta_pool
, src
);
133 rt_prune_sources(void)
135 HASH_WALK_FILTER(src_hash
, next
, src
, sp
)
139 HASH_DO_REMOVE(src_hash
, RSH
, sp
);
140 idm_free(&src_ids
, src
->global_id
);
141 sl_free(rte_src_slab
, src
);
144 HASH_WALK_FILTER_END
;
146 HASH_MAY_RESIZE_DOWN(src_hash
, RSH
, rta_pool
);
155 nexthop_hash(struct nexthop
*x
)
158 for (; x
; x
= x
->next
)
160 h
^= ipa_hash(x
->gw
) ^ (h
<< 5) ^ (h
>> 9);
162 for (int i
= 0; i
< x
->labels
; i
++)
163 h
^= x
->label
[i
] ^ (h
<< 6) ^ (h
>> 7);
170 nexthop__same(struct nexthop
*x
, struct nexthop
*y
)
172 for (; x
&& y
; x
= x
->next
, y
= y
->next
)
174 if (!ipa_equal(x
->gw
, y
->gw
) || (x
->iface
!= y
->iface
) || (x
->weight
!= y
->weight
) || (x
->labels
!= y
->labels
))
177 for (int i
= 0; i
< x
->labels
; i
++)
178 if (x
->label
[i
] != y
->label
[i
])
186 nexthop_compare_node(struct nexthop
*x
, struct nexthop
*y
)
196 r
= ((int) y
->weight
) - ((int) x
->weight
);
200 r
= ipa_compare(x
->gw
, y
->gw
);
204 r
= ((int) y
->labels
) - ((int) x
->labels
);
208 for (int i
= 0; i
< y
->labels
; i
++)
210 r
= ((int) y
->label
[i
]) - ((int) x
->label
[i
]);
215 return ((int) x
->iface
->index
) - ((int) y
->iface
->index
);
218 static inline struct nexthop
*
219 nexthop_copy_node(const struct nexthop
*src
, linpool
*lp
)
221 struct nexthop
*n
= lp_alloc(lp
, nexthop_size(src
));
223 memcpy(n
, src
, nexthop_size(src
));
230 * nexthop_merge - merge nexthop lists
233 * @rx: reusability of list @x
234 * @ry: reusability of list @y
235 * @max: max number of nexthops
236 * @lp: linpool for allocating nexthops
238 * The nexthop_merge() function takes two nexthop lists @x and @y and merges them,
239 * eliminating possible duplicates. The input lists must be sorted and the
240 * result is sorted too. The number of nexthops in result is limited by @max.
241 * New nodes are allocated from linpool @lp.
243 * The arguments @rx and @ry specify whether corresponding input lists may be
244 * consumed by the function (i.e. their nodes reused in the resulting list), in
245 * that case the caller should not access these lists after that. To eliminate
246 * issues with deallocation of these lists, the caller should use some form of
247 * bulk deallocation (e.g. stack or linpool) to free these nodes when the
248 * resulting list is no longer needed. When reusability is not set, the
249 * corresponding lists are not modified nor linked from the resulting list.
252 nexthop_merge(struct nexthop
*x
, struct nexthop
*y
, int rx
, int ry
, int max
, linpool
*lp
)
254 struct nexthop
*root
= NULL
;
255 struct nexthop
**n
= &root
;
257 while ((x
|| y
) && max
--)
259 int cmp
= nexthop_compare_node(x
, y
);
262 *n
= rx
? x
: nexthop_copy_node(x
, lp
);
267 *n
= ry
? y
: nexthop_copy_node(y
, lp
);
272 *n
= rx
? x
: (ry
? y
: nexthop_copy_node(x
, lp
));
284 nexthop_insert(struct nexthop
**n
, struct nexthop
*x
)
286 for (; *n
; n
= &((*n
)->next
))
288 int cmp
= nexthop_compare_node(*n
, x
);
303 nexthop_is_sorted(struct nexthop
*x
)
305 for (; x
&& x
->next
; x
= x
->next
)
306 if (nexthop_compare_node(x
, x
->next
) >= 0)
313 nexthop_slab(struct nexthop
*nh
)
315 return nexthop_slab_
[MIN(nh
->labels
, 3)];
318 static struct nexthop
*
319 nexthop_copy(struct nexthop
*o
)
321 struct nexthop
*first
= NULL
;
322 struct nexthop
**last
= &first
;
324 for (; o
; o
= o
->next
)
326 struct nexthop
*n
= sl_alloc(nexthop_slab(o
));
330 n
->weight
= o
->weight
;
331 n
->labels
= o
->labels
;
332 for (int i
=0; i
<o
->labels
; i
++)
333 n
->label
[i
] = o
->label
[i
];
343 nexthop_free(struct nexthop
*o
)
350 sl_free(nexthop_slab(o
), o
);
357 * Extended Attributes
360 static inline eattr
*
361 ea__find(ea_list
*e
, unsigned id
)
368 if (e
->flags
& EALF_BISECT
)
385 for(m
=0; m
<e
->count
; m
++)
386 if (e
->attrs
[m
].id
== id
)
394 * ea_find - find an extended attribute
395 * @e: attribute list to search in
396 * @id: attribute ID to search for
398 * Given an extended attribute list, ea_find() searches for a first
399 * occurrence of an attribute with specified ID, returning either a pointer
400 * to its &eattr structure or %NULL if no such attribute exists.
403 ea_find(ea_list
*e
, unsigned id
)
405 eattr
*a
= ea__find(e
, id
& EA_CODE_MASK
);
407 if (a
&& (a
->type
& EAF_TYPE_MASK
) == EAF_TYPE_UNDEF
&&
408 !(id
& EA_ALLOW_UNDEF
))
414 * ea_walk - walk through extended attributes
415 * @s: walk state structure
416 * @id: start of attribute ID interval
417 * @max: length of attribute ID interval
419 * Given an extended attribute list, ea_walk() walks through the list looking
420 * for first occurrences of attributes with ID in specified interval from @id to
421 * (@id + @max - 1), returning pointers to found &eattr structures, storing its
422 * walk state in @s for subsequent calls.
424 * The function ea_walk() is supposed to be called in a loop, with initially
425 * zeroed walk state structure @s with filled the initial extended attribute
426 * list, returning one found attribute in each call or %NULL when no other
427 * attribute exists. The extended attribute list or the arguments should not be
428 * modified between calls. The maximum value of @max is 128.
431 ea_walk(struct ea_walk_state
*s
, uint id
, uint max
)
433 ea_list
*e
= s
->eattrs
;
442 for (; e
; e
= e
->next
)
444 if (e
->flags
& EALF_BISECT
)
453 if (e
->attrs
[m
].id
< id
)
464 a_max
= e
->attrs
+ e
->count
;
465 for (; a
< a_max
; a
++)
466 if ((a
->id
>= id
) && (a
->id
< max
))
470 if (BIT32_TEST(s
->visited
, n
))
473 BIT32_SET(s
->visited
, n
);
475 if ((a
->type
& EAF_TYPE_MASK
) == EAF_TYPE_UNDEF
)
482 else if (e
->flags
& EALF_BISECT
)
490 * ea_get_int - fetch an integer attribute
493 * @def: default value
495 * This function is a shortcut for retrieving a value of an integer attribute
496 * by calling ea_find() to find the attribute, extracting its value or returning
497 * a provided default if no such attribute is present.
500 ea_get_int(ea_list
*e
, unsigned id
, int def
)
502 eattr
*a
= ea_find(e
, id
);
509 ea_do_sort(ea_list
*e
)
511 unsigned n
= e
->count
;
513 eattr
*b
= alloca(n
* sizeof(eattr
));
516 /* We need to use a stable sorting algorithm, hence mergesort */
522 eattr
*p
, *q
, *lo
, *hi
;
526 while (s
< n
&& p
[-1].id
<= a
[s
].id
)
532 while (s
< n
&& p
[-1].id
<= a
[s
].id
)
537 while (lo
< q
&& hi
< p
)
538 if (lo
->id
<= hi
->id
)
553 ea_do_prune(ea_list
*e
)
555 eattr
*s
, *d
, *l
, *s0
;
558 /* Discard duplicates and undefs. Do you remember sorting was stable? */
560 l
= e
->attrs
+ e
->count
;
564 while (s
< l
&& s
->id
== s
[-1].id
)
566 /* s0 is the most recent version, s[-1] the oldest one */
567 if ((s0
->type
& EAF_TYPE_MASK
) != EAF_TYPE_UNDEF
)
570 d
->type
= (d
->type
& ~(EAF_ORIGINATED
|EAF_FRESH
)) | (s
[-1].type
& EAF_ORIGINATED
);
579 * ea_sort - sort an attribute list
580 * @e: list to be sorted
582 * This function takes a &ea_list chain and sorts the attributes
583 * within each of its entries.
585 * If an attribute occurs multiple times in a single &ea_list,
586 * ea_sort() leaves only the first (the only significant) occurrence.
593 if (!(e
->flags
& EALF_SORTED
))
597 e
->flags
|= EALF_SORTED
;
600 e
->flags
|= EALF_BISECT
;
606 * ea_scan - estimate attribute list size
609 * This function calculates an upper bound of the size of
610 * a given &ea_list after merging with ea_merge().
622 return sizeof(ea_list
) + sizeof(eattr
)*cnt
;
626 * ea_merge - merge segments of an attribute list
628 * @t: buffer to store the result to
630 * This function takes a possibly multi-segment attribute list
631 * and merges all of its segments to one.
633 * The primary use of this function is for &ea_list normalization:
634 * first call ea_scan() to determine how much memory will the result
635 * take, then allocate a buffer (usually using alloca()), merge the
636 * segments with ea_merge() and finally sort and prune the result
637 * by calling ea_sort().
640 ea_merge(ea_list
*e
, ea_list
*t
)
649 memcpy(d
, e
->attrs
, sizeof(eattr
)*e
->count
);
650 t
->count
+= e
->count
;
657 * ea_same - compare two &ea_list's
661 * ea_same() compares two normalized attribute lists @x and @y and returns
662 * 1 if they contain the same attributes, 0 otherwise.
665 ea_same(ea_list
*x
, ea_list
*y
)
671 ASSERT(!x
->next
&& !y
->next
);
672 if (x
->count
!= y
->count
)
674 for(c
=0; c
<x
->count
; c
++)
676 eattr
*a
= &x
->attrs
[c
];
677 eattr
*b
= &y
->attrs
[c
];
679 if (a
->id
!= b
->id
||
680 a
->flags
!= b
->flags
||
681 a
->type
!= b
->type
||
682 ((a
->type
& EAF_EMBEDDED
) ? a
->u
.data
!= b
->u
.data
: !adata_same(a
->u
.ptr
, b
->u
.ptr
)))
688 static inline ea_list
*
689 ea_list_copy(ea_list
*o
)
697 len
= sizeof(ea_list
) + sizeof(eattr
) * o
->count
;
698 n
= mb_alloc(rta_pool
, len
);
700 n
->flags
|= EALF_CACHED
;
701 for(i
=0; i
<o
->count
; i
++)
703 eattr
*a
= &n
->attrs
[i
];
704 if (!(a
->type
& EAF_EMBEDDED
))
706 unsigned size
= sizeof(struct adata
) + a
->u
.ptr
->length
;
707 struct adata
*d
= mb_alloc(rta_pool
, size
);
708 memcpy(d
, a
->u
.ptr
, size
);
723 for(i
=0; i
<o
->count
; i
++)
725 eattr
*a
= &o
->attrs
[i
];
726 if (!(a
->type
& EAF_EMBEDDED
))
734 get_generic_attr(eattr
*a
, byte
**buf
, int buflen UNUSED
)
736 if (a
->id
== EA_GEN_IGP_METRIC
)
738 *buf
+= bsprintf(*buf
, "igp_metric");
746 ea_format_bitfield(struct eattr
*a
, byte
*buf
, int bufsize
, const char **names
, int min
, int max
)
748 byte
*bound
= buf
+ bufsize
- 32;
749 u32 data
= a
->u
.data
;
752 for (i
= min
; i
< max
; i
++)
753 if ((data
& (1u << i
)) && names
[i
])
761 buf
+= bsprintf(buf
, " %s", names
[i
]);
766 bsprintf(buf
, " %08x", data
);
772 opaque_format(struct adata
*ad
, byte
*buf
, uint size
)
774 byte
*bound
= buf
+ size
- 10;
777 for(i
= 0; i
< ad
->length
; i
++)
787 buf
+= bsprintf(buf
, "%02x", ad
->data
[i
]);
795 ea_show_int_set(struct cli
*c
, struct adata
*ad
, int way
, byte
*pos
, byte
*buf
, byte
*end
)
797 int i
= int_set_format(ad
, way
, 0, pos
, end
- pos
);
798 cli_printf(c
, -1012, "\t%s", buf
);
801 i
= int_set_format(ad
, way
, i
, buf
, end
- buf
- 1);
802 cli_printf(c
, -1012, "\t\t%s", buf
);
807 ea_show_ec_set(struct cli
*c
, struct adata
*ad
, byte
*pos
, byte
*buf
, byte
*end
)
809 int i
= ec_set_format(ad
, 0, pos
, end
- pos
);
810 cli_printf(c
, -1012, "\t%s", buf
);
813 i
= ec_set_format(ad
, i
, buf
, end
- buf
- 1);
814 cli_printf(c
, -1012, "\t\t%s", buf
);
819 ea_show_lc_set(struct cli
*c
, struct adata
*ad
, byte
*pos
, byte
*buf
, byte
*end
)
821 int i
= lc_set_format(ad
, 0, pos
, end
- pos
);
822 cli_printf(c
, -1012, "\t%s", buf
);
825 i
= lc_set_format(ad
, i
, buf
, end
- buf
- 1);
826 cli_printf(c
, -1012, "\t\t%s", buf
);
831 * ea_show - print an &eattr to CLI
832 * @c: destination CLI
833 * @e: attribute to be printed
835 * This function takes an extended attribute represented by its &eattr
836 * structure and prints it to the CLI according to the type information.
838 * If the protocol defining the attribute provides its own
839 * get_attr() hook, it's consulted first.
842 ea_show(struct cli
*c
, eattr
*e
)
845 int status
= GA_UNKNOWN
;
846 struct adata
*ad
= (e
->type
& EAF_EMBEDDED
) ? NULL
: e
->u
.ptr
;
847 byte buf
[CLI_MSG_SIZE
];
848 byte
*pos
= buf
, *end
= buf
+ sizeof(buf
);
850 if (p
= attr_class_to_protocol
[EA_PROTO(e
->id
)])
852 pos
+= bsprintf(pos
, "%s.", p
->name
);
854 status
= p
->get_attr(e
, pos
, end
- pos
);
857 else if (EA_PROTO(e
->id
))
858 pos
+= bsprintf(pos
, "%02x.", EA_PROTO(e
->id
));
860 status
= get_generic_attr(e
, &pos
, end
- pos
);
862 if (status
< GA_NAME
)
863 pos
+= bsprintf(pos
, "%02x", EA_ID(e
->id
));
864 if (status
< GA_FULL
)
868 switch (e
->type
& EAF_TYPE_MASK
)
871 bsprintf(pos
, "%u", e
->u
.data
);
873 case EAF_TYPE_OPAQUE
:
874 opaque_format(ad
, pos
, end
- pos
);
876 case EAF_TYPE_IP_ADDRESS
:
877 bsprintf(pos
, "%I", *(ip_addr
*) ad
->data
);
879 case EAF_TYPE_ROUTER_ID
:
880 bsprintf(pos
, "%R", e
->u
.data
);
882 case EAF_TYPE_AS_PATH
:
883 as_path_format(ad
, pos
, end
- pos
);
885 case EAF_TYPE_BITFIELD
:
886 bsprintf(pos
, "%08x", e
->u
.data
);
888 case EAF_TYPE_INT_SET
:
889 ea_show_int_set(c
, ad
, 1, pos
, buf
, end
);
891 case EAF_TYPE_EC_SET
:
892 ea_show_ec_set(c
, ad
, pos
, buf
, end
);
894 case EAF_TYPE_LC_SET
:
895 ea_show_lc_set(c
, ad
, pos
, buf
, end
);
899 bsprintf(pos
, "<type %02x>", e
->type
);
902 cli_printf(c
, -1012, "\t%s", buf
);
906 * ea_dump - dump an extended attribute
907 * @e: attribute to be dumped
909 * ea_dump() dumps contents of the extended attribute given to
925 (e
->flags
& EALF_SORTED
) ? 'S' : 's',
926 (e
->flags
& EALF_BISECT
) ? 'B' : 'b',
927 (e
->flags
& EALF_CACHED
) ? 'C' : 'c');
928 for(i
=0; i
<e
->count
; i
++)
930 eattr
*a
= &e
->attrs
[i
];
931 debug(" %02x:%02x.%02x", EA_PROTO(a
->id
), EA_ID(a
->id
), a
->flags
);
932 if (a
->type
& EAF_TEMP
)
934 debug("=%c", "?iO?I?P???S?????" [a
->type
& EAF_TYPE_MASK
]);
935 if (a
->type
& EAF_ORIGINATED
)
937 if (a
->type
& EAF_EMBEDDED
)
938 debug(":%08x", a
->u
.data
);
941 int j
, len
= a
->u
.ptr
->length
;
944 debug("%02x", a
->u
.ptr
->data
[j
]);
953 * ea_hash - calculate an &ea_list hash key
956 * ea_hash() takes an extended attribute list and calculated a hopefully
957 * uniformly distributed hash value from its contents.
962 const u64 mul
= 0x68576150f3d6847;
963 u64 h
= 0xafcef24eda8b29;
966 if (e
) /* Assuming chain of length 1 */
968 for(i
=0; i
<e
->count
; i
++)
970 struct eattr
*a
= &e
->attrs
[i
];
971 h
^= a
->id
; h
*= mul
;
972 if (a
->type
& EAF_EMBEDDED
)
976 struct adata
*d
= a
->u
.ptr
;
977 h
^= mem_hash(d
->data
, d
->length
);
982 return (h
>> 32) ^ (h
& 0xffffffff);
986 * ea_append - concatenate &ea_list's
987 * @to: destination list (can be %NULL)
988 * @what: list to be appended (can be %NULL)
990 * This function appends the &ea_list @what at the end of
991 * &ea_list @to and returns a pointer to the resulting list.
994 ea_append(ea_list
*to
, ea_list
*what
)
1011 static uint rta_cache_count
;
1012 static uint rta_cache_size
= 32;
1013 static uint rta_cache_limit
;
1014 static uint rta_cache_mask
;
1015 static rta
**rta_hash_table
;
1018 rta_alloc_hash(void)
1020 rta_hash_table
= mb_allocz(rta_pool
, sizeof(rta
*) * rta_cache_size
);
1021 if (rta_cache_size
< 32768)
1022 rta_cache_limit
= rta_cache_size
* 2;
1024 rta_cache_limit
= ~0;
1025 rta_cache_mask
= rta_cache_size
- 1;
1033 #define MIX(f) mem_hash_mix(&h, &(a->f), sizeof(a->f));
1043 return mem_hash_value(&h
) ^ nexthop_hash(&(a
->nh
)) ^ ea_hash(a
->eattrs
);
1047 rta_same(rta
*x
, rta
*y
)
1049 return (x
->src
== y
->src
&&
1050 x
->source
== y
->source
&&
1051 x
->scope
== y
->scope
&&
1052 x
->dest
== y
->dest
&&
1053 x
->igp_metric
== y
->igp_metric
&&
1054 ipa_equal(x
->from
, y
->from
) &&
1055 x
->hostentry
== y
->hostentry
&&
1056 nexthop_same(&(x
->nh
), &(y
->nh
)) &&
1057 ea_same(x
->eattrs
, y
->eattrs
));
1060 static inline slab
*
1063 return rta_slab_
[a
->nh
.labels
> 2 ? 3 : a
->nh
.labels
];
1069 rta
*r
= sl_alloc(rta_slab(o
));
1071 memcpy(r
, o
, rta_size(o
));
1073 r
->nh
.next
= nexthop_copy(o
->nh
.next
);
1074 r
->eattrs
= ea_list_copy(o
->eattrs
);
1081 uint h
= r
->hash_key
& rta_cache_mask
;
1082 r
->next
= rta_hash_table
[h
];
1084 r
->next
->pprev
= &r
->next
;
1085 r
->pprev
= &rta_hash_table
[h
];
1086 rta_hash_table
[h
] = r
;
1092 uint ohs
= rta_cache_size
;
1095 rta
**oht
= rta_hash_table
;
1097 rta_cache_size
= 2*rta_cache_size
;
1098 DBG("Rehashing rta cache from %d to %d entries.\n", ohs
, rta_cache_size
);
1100 for(h
=0; h
<ohs
; h
++)
1101 for(r
=oht
[h
]; r
; r
=n
)
1110 * rta_lookup - look up a &rta in attribute cache
1111 * @o: a un-cached &rta
1113 * rta_lookup() gets an un-cached &rta structure and returns its cached
1114 * counterpart. It starts with examining the attribute cache to see whether
1115 * there exists a matching entry. If such an entry exists, it's returned and
1116 * its use count is incremented, else a new entry is created with use count
1119 * The extended attribute lists attached to the &rta are automatically
1120 * converted to the normalized form.
1128 ASSERT(!(o
->aflags
& RTAF_CACHED
));
1131 if (o
->eattrs
->next
) /* Multiple ea_list's, need to merge them */
1133 ea_list
*ml
= alloca(ea_scan(o
->eattrs
));
1134 ea_merge(o
->eattrs
, ml
);
1141 for(r
=rta_hash_table
[h
& rta_cache_mask
]; r
; r
=r
->next
)
1142 if (r
->hash_key
== h
&& rta_same(r
, o
))
1143 return rta_clone(r
);
1147 r
->aflags
= RTAF_CACHED
;
1148 rt_lock_source(r
->src
);
1149 rt_lock_hostentry(r
->hostentry
);
1152 if (++rta_cache_count
> rta_cache_limit
)
1161 ASSERT(rta_cache_count
&& (a
->aflags
& RTAF_CACHED
));
1163 *a
->pprev
= a
->next
;
1165 a
->next
->pprev
= a
->pprev
;
1166 rt_unlock_hostentry(a
->hostentry
);
1167 rt_unlock_source(a
->src
);
1169 nexthop_free(a
->nh
.next
);
1171 a
->aflags
= 0; /* Poison the entry */
1172 sl_free(rta_slab(a
), a
);
1176 rta_do_cow(rta
*o
, linpool
*lp
)
1178 rta
*r
= lp_alloc(lp
, rta_size(o
));
1179 memcpy(r
, o
, rta_size(o
));
1180 for (struct nexthop
**nhn
= &(r
->nh
.next
), *nho
= o
->nh
.next
; nho
; nho
= nho
->next
)
1182 *nhn
= lp_alloc(lp
, nexthop_size(nho
));
1183 memcpy(*nhn
, nho
, nexthop_size(nho
));
1184 nhn
= &((*nhn
)->next
);
1192 * rta_dump - dump route attributes
1193 * @a: attribute structure to dump
1195 * This function takes a &rta and dumps its contents to the debug output.
1200 static char *rts
[] = { "RTS_DUMMY", "RTS_STATIC", "RTS_INHERIT", "RTS_DEVICE",
1201 "RTS_STAT_DEV", "RTS_REDIR", "RTS_RIP",
1202 "RTS_OSPF", "RTS_OSPF_IA", "RTS_OSPF_EXT1",
1203 "RTS_OSPF_EXT2", "RTS_BGP", "RTS_PIPE", "RTS_BABEL" };
1204 static char *rtd
[] = { "", " DEV", " HOLE", " UNREACH", " PROHIBIT" };
1206 debug("p=%s uc=%d %s %s%s h=%04x",
1207 a
->src
->proto
->name
, a
->uc
, rts
[a
->source
], ip_scope_text(a
->scope
),
1208 rtd
[a
->dest
], a
->hash_key
);
1209 if (!(a
->aflags
& RTAF_CACHED
))
1211 debug(" <-%I", a
->from
);
1212 if (a
->dest
== RTD_UNICAST
)
1213 for (struct nexthop
*nh
= &(a
->nh
); nh
; nh
= nh
->next
)
1215 if (ipa_nonzero(nh
->gw
)) debug(" ->%I", nh
->gw
);
1216 if (nh
->labels
) debug(" L %d", nh
->label
[0]);
1217 for (int i
=1; i
<nh
->labels
; i
++)
1218 debug("/%d", nh
->label
[i
]);
1219 debug(" [%s]", nh
->iface
? nh
->iface
->name
: "???");
1229 * rta_dump_all - dump attribute cache
1231 * This function dumps the whole contents of route attribute cache
1232 * to the debug output.
1240 debug("Route attribute cache (%d entries, rehash at %d):\n", rta_cache_count
, rta_cache_limit
);
1241 for(h
=0; h
<rta_cache_size
; h
++)
1242 for(a
=rta_hash_table
[h
]; a
; a
=a
->next
)
1252 rta_show(struct cli
*c
, rta
*a
, ea_list
*eal
)
1254 static char *src_names
[] = { "dummy", "static", "inherit", "device", "static-device", "redirect",
1255 "RIP", "OSPF", "OSPF-IA", "OSPF-E1", "OSPF-E2", "BGP", "pipe" };
1258 cli_printf(c
, -1008, "\tType: %s %s", src_names
[a
->source
], ip_scope_text(a
->scope
));
1261 for(; eal
; eal
=eal
->next
)
1262 for(i
=0; i
<eal
->count
; i
++)
1263 ea_show(c
, &eal
->attrs
[i
]);
1267 * rta_init - initialize route attribute cache
1269 * This function is called during initialization of the routing
1270 * table module to set up the internals of the attribute cache.
1275 rta_pool
= rp_new(&root_pool
, "Attributes");
1277 rta_slab_
[0] = sl_new(rta_pool
, sizeof(rta
));
1278 rta_slab_
[1] = sl_new(rta_pool
, sizeof(rta
) + sizeof(u32
));
1279 rta_slab_
[2] = sl_new(rta_pool
, sizeof(rta
) + sizeof(u32
)*2);
1280 rta_slab_
[3] = sl_new(rta_pool
, sizeof(rta
) + sizeof(u32
)*MPLS_MAX_LABEL_STACK
);
1282 nexthop_slab_
[0] = sl_new(rta_pool
, sizeof(struct nexthop
));
1283 nexthop_slab_
[1] = sl_new(rta_pool
, sizeof(struct nexthop
) + sizeof(u32
));
1284 nexthop_slab_
[2] = sl_new(rta_pool
, sizeof(struct nexthop
) + sizeof(u32
)*2);
1285 nexthop_slab_
[3] = sl_new(rta_pool
, sizeof(struct nexthop
) + sizeof(u32
)*MPLS_MAX_LABEL_STACK
);
1292 * Documentation for functions declared inline in route.h
1297 * rta_clone - clone route attributes
1298 * @r: a &rta to be cloned
1300 * rta_clone() takes a cached &rta and returns its identical cached
1301 * copy. Currently it works by just returning the original &rta with
1302 * its use count incremented.
1304 static inline rta
*rta_clone(rta
*r
)
1308 * rta_free - free route attributes
1309 * @r: a &rta to be freed
1311 * If you stop using a &rta (for example when deleting a route which uses
1312 * it), you need to call rta_free() to notify the attribute cache the
1313 * attribute is no longer in use and can be freed if you were the last
1314 * user (which rta_free() tests by inspecting the use count).
1316 static inline void rta_free(rta
*r
)