]>
git.ipfire.org Git - thirdparty/bird.git/blob - nest/rt-attr.c
2 * BIRD -- Route Attribute Cache
4 * (c) 1998--2000 Martin Mares <mj@ucw.cz>
6 * Can be freely distributed and used under the terms of the GNU GPL.
10 * DOC: Route attribute cache
12 * Each route entry carries a set of route attributes. Several of them
13 * vary from route to route, but most attributes are usually common
14 * for a large number of routes. To conserve memory, we've decided to
15 * store only the varying ones directly in the &rte and hold the rest
16 * in a special structure called &rta which is shared among all the
17 * &rte's with these attributes.
19 * Each &rta contains all the static attributes of the route (i.e.,
20 * those which are always present) as structure members and a list of
21 * dynamic attributes represented by a linked list of &ea_list
22 * structures, each of them consisting of an array of &eattr's containing
23 * the individual attributes. An attribute can be specified more than once
24 * in the &ea_list chain and in such case the first occurrence overrides
25 * the others. This semantics is used especially when someone (for example
26 * a filter) wishes to alter values of several dynamic attributes, but
27 * it wants to preserve the original attribute lists maintained by
30 * Each &eattr contains an attribute identifier (split to protocol ID and
31 * per-protocol attribute ID), protocol dependent flags, a type code (consisting
32 * of several bit fields describing attribute characteristics) and either an
33 * embedded 32-bit value or a pointer to a &adata structure holding attribute
36 * There exist two variants of &rta's -- cached and un-cached ones. Un-cached
37 * &rta's can have arbitrarily complex structure of &ea_list's and they
38 * can be modified by any module in the route processing chain. Cached
39 * &rta's have their attribute lists normalized (that means at most one
40 * &ea_list is present and its values are sorted in order to speed up
41 * searching), they are stored in a hash table to make fast lookup possible
42 * and they are provided with a use count to allow sharing.
44 * Routing tables always contain only cached &rta's.
47 #include "nest/bird.h"
48 #include "nest/route.h"
49 #include "nest/protocol.h"
50 #include "nest/iface.h"
52 #include "nest/attrs.h"
53 #include "lib/alloca.h"
56 #include "lib/resource.h"
57 #include "lib/string.h"
63 static slab
*rta_slab_
[4];
64 static slab
*nexthop_slab_
[4];
65 static slab
*rte_src_slab
;
67 static struct idm src_ids
;
68 #define SRC_ID_INIT_SIZE 4
72 #define RSH_KEY(n) n->proto, n->private_id
73 #define RSH_NEXT(n) n->next
74 #define RSH_EQ(p1,n1,p2,n2) p1 == p2 && n1 == n2
75 #define RSH_FN(p,n) p->hash_key ^ u32_hash(n)
77 #define RSH_REHASH rte_src_rehash
78 #define RSH_PARAMS /2, *2, 1, 1, 8, 20
79 #define RSH_INIT_ORDER 6
81 static HASH(struct rte_src
) src_hash
;
83 struct protocol
*attr_class_to_protocol
[EAP_MAX
];
89 rte_src_slab
= sl_new(rta_pool
, sizeof(struct rte_src
));
91 idm_init(&src_ids
, rta_pool
, SRC_ID_INIT_SIZE
);
93 HASH_INIT(src_hash
, rta_pool
, RSH_INIT_ORDER
);
97 HASH_DEFINE_REHASH_FN(RSH
, struct rte_src
)
100 rt_find_source(struct proto
*p
, u32 id
)
102 return HASH_FIND(src_hash
, RSH
, p
, id
);
106 rt_get_source(struct proto
*p
, u32 id
)
108 struct rte_src
*src
= rt_find_source(p
, id
);
113 src
= sl_alloc(rte_src_slab
);
115 src
->private_id
= id
;
116 src
->global_id
= idm_alloc(&src_ids
);
119 HASH_INSERT2(src_hash
, RSH
, rta_pool
, src
);
125 rt_prune_sources(void)
127 HASH_WALK_FILTER(src_hash
, next
, src
, sp
)
131 HASH_DO_REMOVE(src_hash
, RSH
, sp
);
132 idm_free(&src_ids
, src
->global_id
);
133 sl_free(rte_src_slab
, src
);
136 HASH_WALK_FILTER_END
;
138 HASH_MAY_RESIZE_DOWN(src_hash
, RSH
, rta_pool
);
147 nexthop_hash(struct nexthop
*x
)
150 for (; x
; x
= x
->next
)
152 h
^= ipa_hash(x
->gw
) ^ (h
<< 5) ^ (h
>> 9);
154 for (int i
= 0; i
< x
->labels
; i
++)
155 h
^= x
->label
[i
] ^ (h
<< 6) ^ (h
>> 7);
162 nexthop__same(struct nexthop
*x
, struct nexthop
*y
)
164 for (; x
&& y
; x
= x
->next
, y
= y
->next
)
166 if (!ipa_equal(x
->gw
, y
->gw
) || (x
->iface
!= y
->iface
) || (x
->weight
!= y
->weight
) || (x
->labels
!= y
->labels
))
169 for (int i
= 0; i
< x
->labels
; i
++)
170 if (x
->label
[i
] != y
->label
[i
])
178 nexthop_compare_node(struct nexthop
*x
, struct nexthop
*y
)
188 r
= ((int) y
->weight
) - ((int) x
->weight
);
192 r
= ipa_compare(x
->gw
, y
->gw
);
196 r
= ((int) y
->labels
) - ((int) x
->labels
);
200 for (int i
= 0; i
< y
->labels
; i
++)
202 r
= ((int) y
->label
[i
]) - ((int) x
->label
[i
]);
207 return ((int) x
->iface
->index
) - ((int) y
->iface
->index
);
210 static inline struct nexthop
*
211 nexthop_copy_node(const struct nexthop
*src
, linpool
*lp
)
213 struct nexthop
*n
= lp_alloc(lp
, nexthop_size(src
));
215 memcpy(n
, src
, nexthop_size(src
));
222 * nexthop_merge - merge nexthop lists
225 * @rx: reusability of list @x
226 * @ry: reusability of list @y
227 * @max: max number of nexthops
228 * @lp: linpool for allocating nexthops
230 * The nexthop_merge() function takes two nexthop lists @x and @y and merges them,
231 * eliminating possible duplicates. The input lists must be sorted and the
232 * result is sorted too. The number of nexthops in result is limited by @max.
233 * New nodes are allocated from linpool @lp.
235 * The arguments @rx and @ry specify whether corresponding input lists may be
236 * consumed by the function (i.e. their nodes reused in the resulting list), in
237 * that case the caller should not access these lists after that. To eliminate
238 * issues with deallocation of these lists, the caller should use some form of
239 * bulk deallocation (e.g. stack or linpool) to free these nodes when the
240 * resulting list is no longer needed. When reusability is not set, the
241 * corresponding lists are not modified nor linked from the resulting list.
244 nexthop_merge(struct nexthop
*x
, struct nexthop
*y
, int rx
, int ry
, int max
, linpool
*lp
)
246 struct nexthop
*root
= NULL
;
247 struct nexthop
**n
= &root
;
249 while ((x
|| y
) && max
--)
251 int cmp
= nexthop_compare_node(x
, y
);
254 *n
= rx
? x
: nexthop_copy_node(x
, lp
);
259 *n
= ry
? y
: nexthop_copy_node(y
, lp
);
264 *n
= rx
? x
: (ry
? y
: nexthop_copy_node(x
, lp
));
276 nexthop_insert(struct nexthop
**n
, struct nexthop
*x
)
278 for (; *n
; n
= &((*n
)->next
))
280 int cmp
= nexthop_compare_node(*n
, x
);
295 nexthop_is_sorted(struct nexthop
*x
)
297 for (; x
&& x
->next
; x
= x
->next
)
298 if (nexthop_compare_node(x
, x
->next
) >= 0)
305 nexthop_slab(struct nexthop
*nh
)
307 return nexthop_slab_
[MIN(nh
->labels
, 3)];
310 static struct nexthop
*
311 nexthop_copy(struct nexthop
*o
)
313 struct nexthop
*first
= NULL
;
314 struct nexthop
**last
= &first
;
316 for (; o
; o
= o
->next
)
318 struct nexthop
*n
= sl_alloc(nexthop_slab(o
));
322 n
->weight
= o
->weight
;
323 n
->labels
= o
->labels
;
324 for (int i
=0; i
<o
->labels
; i
++)
325 n
->label
[i
] = o
->label
[i
];
335 nexthop_free(struct nexthop
*o
)
342 sl_free(nexthop_slab(o
), o
);
349 * Extended Attributes
352 static inline eattr
*
353 ea__find(ea_list
*e
, unsigned id
)
360 if (e
->flags
& EALF_BISECT
)
377 for(m
=0; m
<e
->count
; m
++)
378 if (e
->attrs
[m
].id
== id
)
386 * ea_find - find an extended attribute
387 * @e: attribute list to search in
388 * @id: attribute ID to search for
390 * Given an extended attribute list, ea_find() searches for a first
391 * occurrence of an attribute with specified ID, returning either a pointer
392 * to its &eattr structure or %NULL if no such attribute exists.
395 ea_find(ea_list
*e
, unsigned id
)
397 eattr
*a
= ea__find(e
, id
& EA_CODE_MASK
);
399 if (a
&& (a
->type
& EAF_TYPE_MASK
) == EAF_TYPE_UNDEF
&&
400 !(id
& EA_ALLOW_UNDEF
))
406 * ea_walk - walk through extended attributes
407 * @s: walk state structure
408 * @id: start of attribute ID interval
409 * @max: length of attribute ID interval
411 * Given an extended attribute list, ea_walk() walks through the list looking
412 * for first occurrences of attributes with ID in specified interval from @id to
413 * (@id + @max - 1), returning pointers to found &eattr structures, storing its
414 * walk state in @s for subsequent calls.
416 * The function ea_walk() is supposed to be called in a loop, with initially
417 * zeroed walk state structure @s with filled the initial extended attribute
418 * list, returning one found attribute in each call or %NULL when no other
419 * attribute exists. The extended attribute list or the arguments should not be
420 * modified between calls. The maximum value of @max is 128.
423 ea_walk(struct ea_walk_state
*s
, uint id
, uint max
)
425 ea_list
*e
= s
->eattrs
;
434 for (; e
; e
= e
->next
)
436 if (e
->flags
& EALF_BISECT
)
445 if (e
->attrs
[m
].id
< id
)
456 a_max
= e
->attrs
+ e
->count
;
457 for (; a
< a_max
; a
++)
458 if ((a
->id
>= id
) && (a
->id
< max
))
462 if (BIT32_TEST(s
->visited
, n
))
465 BIT32_SET(s
->visited
, n
);
467 if ((a
->type
& EAF_TYPE_MASK
) == EAF_TYPE_UNDEF
)
474 else if (e
->flags
& EALF_BISECT
)
482 * ea_get_int - fetch an integer attribute
485 * @def: default value
487 * This function is a shortcut for retrieving a value of an integer attribute
488 * by calling ea_find() to find the attribute, extracting its value or returning
489 * a provided default if no such attribute is present.
492 ea_get_int(ea_list
*e
, unsigned id
, int def
)
494 eattr
*a
= ea_find(e
, id
);
501 ea_do_sort(ea_list
*e
)
503 unsigned n
= e
->count
;
505 eattr
*b
= alloca(n
* sizeof(eattr
));
508 /* We need to use a stable sorting algorithm, hence mergesort */
514 eattr
*p
, *q
, *lo
, *hi
;
518 while (s
< n
&& p
[-1].id
<= a
[s
].id
)
524 while (s
< n
&& p
[-1].id
<= a
[s
].id
)
529 while (lo
< q
&& hi
< p
)
530 if (lo
->id
<= hi
->id
)
545 ea_do_prune(ea_list
*e
)
547 eattr
*s
, *d
, *l
, *s0
;
550 /* Discard duplicates and undefs. Do you remember sorting was stable? */
552 l
= e
->attrs
+ e
->count
;
556 while (s
< l
&& s
->id
== s
[-1].id
)
558 /* s0 is the most recent version, s[-1] the oldest one */
559 if ((s0
->type
& EAF_TYPE_MASK
) != EAF_TYPE_UNDEF
)
562 d
->type
= (d
->type
& ~(EAF_ORIGINATED
|EAF_FRESH
)) | (s
[-1].type
& EAF_ORIGINATED
);
571 * ea_sort - sort an attribute list
572 * @e: list to be sorted
574 * This function takes a &ea_list chain and sorts the attributes
575 * within each of its entries.
577 * If an attribute occurs multiple times in a single &ea_list,
578 * ea_sort() leaves only the first (the only significant) occurrence.
585 if (!(e
->flags
& EALF_SORTED
))
589 e
->flags
|= EALF_SORTED
;
592 e
->flags
|= EALF_BISECT
;
598 * ea_scan - estimate attribute list size
601 * This function calculates an upper bound of the size of
602 * a given &ea_list after merging with ea_merge().
614 return sizeof(ea_list
) + sizeof(eattr
)*cnt
;
618 * ea_merge - merge segments of an attribute list
620 * @t: buffer to store the result to
622 * This function takes a possibly multi-segment attribute list
623 * and merges all of its segments to one.
625 * The primary use of this function is for &ea_list normalization:
626 * first call ea_scan() to determine how much memory will the result
627 * take, then allocate a buffer (usually using alloca()), merge the
628 * segments with ea_merge() and finally sort and prune the result
629 * by calling ea_sort().
632 ea_merge(ea_list
*e
, ea_list
*t
)
641 memcpy(d
, e
->attrs
, sizeof(eattr
)*e
->count
);
642 t
->count
+= e
->count
;
649 * ea_same - compare two &ea_list's
653 * ea_same() compares two normalized attribute lists @x and @y and returns
654 * 1 if they contain the same attributes, 0 otherwise.
657 ea_same(ea_list
*x
, ea_list
*y
)
663 ASSERT(!x
->next
&& !y
->next
);
664 if (x
->count
!= y
->count
)
666 for(c
=0; c
<x
->count
; c
++)
668 eattr
*a
= &x
->attrs
[c
];
669 eattr
*b
= &y
->attrs
[c
];
671 if (a
->id
!= b
->id
||
672 a
->flags
!= b
->flags
||
673 a
->type
!= b
->type
||
674 ((a
->type
& EAF_EMBEDDED
) ? a
->u
.data
!= b
->u
.data
: !adata_same(a
->u
.ptr
, b
->u
.ptr
)))
680 static inline ea_list
*
681 ea_list_copy(ea_list
*o
)
689 len
= sizeof(ea_list
) + sizeof(eattr
) * o
->count
;
690 n
= mb_alloc(rta_pool
, len
);
692 n
->flags
|= EALF_CACHED
;
693 for(i
=0; i
<o
->count
; i
++)
695 eattr
*a
= &n
->attrs
[i
];
696 if (!(a
->type
& EAF_EMBEDDED
))
698 unsigned size
= sizeof(struct adata
) + a
->u
.ptr
->length
;
699 struct adata
*d
= mb_alloc(rta_pool
, size
);
700 memcpy(d
, a
->u
.ptr
, size
);
715 for(i
=0; i
<o
->count
; i
++)
717 eattr
*a
= &o
->attrs
[i
];
718 if (!(a
->type
& EAF_EMBEDDED
))
726 get_generic_attr(eattr
*a
, byte
**buf
, int buflen UNUSED
)
728 if (a
->id
== EA_GEN_IGP_METRIC
)
730 *buf
+= bsprintf(*buf
, "igp_metric");
738 ea_format_bitfield(struct eattr
*a
, byte
*buf
, int bufsize
, const char **names
, int min
, int max
)
740 byte
*bound
= buf
+ bufsize
- 32;
741 u32 data
= a
->u
.data
;
744 for (i
= min
; i
< max
; i
++)
745 if ((data
& (1u << i
)) && names
[i
])
753 buf
+= bsprintf(buf
, " %s", names
[i
]);
758 bsprintf(buf
, " %08x", data
);
764 opaque_format(struct adata
*ad
, byte
*buf
, uint size
)
766 byte
*bound
= buf
+ size
- 10;
769 for(i
= 0; i
< ad
->length
; i
++)
779 buf
+= bsprintf(buf
, "%02x", ad
->data
[i
]);
787 ea_show_int_set(struct cli
*c
, struct adata
*ad
, int way
, byte
*pos
, byte
*buf
, byte
*end
)
789 int i
= int_set_format(ad
, way
, 0, pos
, end
- pos
);
790 cli_printf(c
, -1012, "\t%s", buf
);
793 i
= int_set_format(ad
, way
, i
, buf
, end
- buf
- 1);
794 cli_printf(c
, -1012, "\t\t%s", buf
);
799 ea_show_ec_set(struct cli
*c
, struct adata
*ad
, byte
*pos
, byte
*buf
, byte
*end
)
801 int i
= ec_set_format(ad
, 0, pos
, end
- pos
);
802 cli_printf(c
, -1012, "\t%s", buf
);
805 i
= ec_set_format(ad
, i
, buf
, end
- buf
- 1);
806 cli_printf(c
, -1012, "\t\t%s", buf
);
811 ea_show_lc_set(struct cli
*c
, struct adata
*ad
, byte
*pos
, byte
*buf
, byte
*end
)
813 int i
= lc_set_format(ad
, 0, pos
, end
- pos
);
814 cli_printf(c
, -1012, "\t%s", buf
);
817 i
= lc_set_format(ad
, i
, buf
, end
- buf
- 1);
818 cli_printf(c
, -1012, "\t\t%s", buf
);
823 * ea_show - print an &eattr to CLI
824 * @c: destination CLI
825 * @e: attribute to be printed
827 * This function takes an extended attribute represented by its &eattr
828 * structure and prints it to the CLI according to the type information.
830 * If the protocol defining the attribute provides its own
831 * get_attr() hook, it's consulted first.
834 ea_show(struct cli
*c
, eattr
*e
)
837 int status
= GA_UNKNOWN
;
838 struct adata
*ad
= (e
->type
& EAF_EMBEDDED
) ? NULL
: e
->u
.ptr
;
839 byte buf
[CLI_MSG_SIZE
];
840 byte
*pos
= buf
, *end
= buf
+ sizeof(buf
);
842 if (p
= attr_class_to_protocol
[EA_PROTO(e
->id
)])
844 pos
+= bsprintf(pos
, "%s.", p
->name
);
846 status
= p
->get_attr(e
, pos
, end
- pos
);
849 else if (EA_PROTO(e
->id
))
850 pos
+= bsprintf(pos
, "%02x.", EA_PROTO(e
->id
));
852 status
= get_generic_attr(e
, &pos
, end
- pos
);
854 if (status
< GA_NAME
)
855 pos
+= bsprintf(pos
, "%02x", EA_ID(e
->id
));
856 if (status
< GA_FULL
)
860 switch (e
->type
& EAF_TYPE_MASK
)
863 bsprintf(pos
, "%u", e
->u
.data
);
865 case EAF_TYPE_OPAQUE
:
866 opaque_format(ad
, pos
, end
- pos
);
868 case EAF_TYPE_IP_ADDRESS
:
869 bsprintf(pos
, "%I", *(ip_addr
*) ad
->data
);
871 case EAF_TYPE_ROUTER_ID
:
872 bsprintf(pos
, "%R", e
->u
.data
);
874 case EAF_TYPE_AS_PATH
:
875 as_path_format(ad
, pos
, end
- pos
);
877 case EAF_TYPE_BITFIELD
:
878 bsprintf(pos
, "%08x", e
->u
.data
);
880 case EAF_TYPE_INT_SET
:
881 ea_show_int_set(c
, ad
, 1, pos
, buf
, end
);
883 case EAF_TYPE_EC_SET
:
884 ea_show_ec_set(c
, ad
, pos
, buf
, end
);
886 case EAF_TYPE_LC_SET
:
887 ea_show_lc_set(c
, ad
, pos
, buf
, end
);
891 bsprintf(pos
, "<type %02x>", e
->type
);
894 cli_printf(c
, -1012, "\t%s", buf
);
898 * ea_dump - dump an extended attribute
899 * @e: attribute to be dumped
901 * ea_dump() dumps contents of the extended attribute given to
917 (e
->flags
& EALF_SORTED
) ? 'S' : 's',
918 (e
->flags
& EALF_BISECT
) ? 'B' : 'b',
919 (e
->flags
& EALF_CACHED
) ? 'C' : 'c');
920 for(i
=0; i
<e
->count
; i
++)
922 eattr
*a
= &e
->attrs
[i
];
923 debug(" %02x:%02x.%02x", EA_PROTO(a
->id
), EA_ID(a
->id
), a
->flags
);
924 if (a
->type
& EAF_TEMP
)
926 debug("=%c", "?iO?I?P???S?????" [a
->type
& EAF_TYPE_MASK
]);
927 if (a
->type
& EAF_ORIGINATED
)
929 if (a
->type
& EAF_EMBEDDED
)
930 debug(":%08x", a
->u
.data
);
933 int j
, len
= a
->u
.ptr
->length
;
936 debug("%02x", a
->u
.ptr
->data
[j
]);
945 * ea_hash - calculate an &ea_list hash key
948 * ea_hash() takes an extended attribute list and calculated a hopefully
949 * uniformly distributed hash value from its contents.
954 const u64 mul
= 0x68576150f3d6847;
955 u64 h
= 0xafcef24eda8b29;
958 if (e
) /* Assuming chain of length 1 */
960 for(i
=0; i
<e
->count
; i
++)
962 struct eattr
*a
= &e
->attrs
[i
];
963 h
^= a
->id
; h
*= mul
;
964 if (a
->type
& EAF_EMBEDDED
)
968 struct adata
*d
= a
->u
.ptr
;
969 h
^= mem_hash(d
->data
, d
->length
);
974 return (h
>> 32) ^ (h
& 0xffffffff);
978 * ea_append - concatenate &ea_list's
979 * @to: destination list (can be %NULL)
980 * @what: list to be appended (can be %NULL)
982 * This function appends the &ea_list @what at the end of
983 * &ea_list @to and returns a pointer to the resulting list.
986 ea_append(ea_list
*to
, ea_list
*what
)
1003 static uint rta_cache_count
;
1004 static uint rta_cache_size
= 32;
1005 static uint rta_cache_limit
;
1006 static uint rta_cache_mask
;
1007 static rta
**rta_hash_table
;
1010 rta_alloc_hash(void)
1012 rta_hash_table
= mb_allocz(rta_pool
, sizeof(rta
*) * rta_cache_size
);
1013 if (rta_cache_size
< 32768)
1014 rta_cache_limit
= rta_cache_size
* 2;
1016 rta_cache_limit
= ~0;
1017 rta_cache_mask
= rta_cache_size
- 1;
1025 #define MIX(f) mem_hash_mix(&h, &(a->f), sizeof(a->f));
1035 return mem_hash_value(&h
) ^ nexthop_hash(&(a
->nh
)) ^ ea_hash(a
->eattrs
);
1039 rta_same(rta
*x
, rta
*y
)
1041 return (x
->src
== y
->src
&&
1042 x
->source
== y
->source
&&
1043 x
->scope
== y
->scope
&&
1044 x
->dest
== y
->dest
&&
1045 x
->igp_metric
== y
->igp_metric
&&
1046 ipa_equal(x
->from
, y
->from
) &&
1047 x
->hostentry
== y
->hostentry
&&
1048 nexthop_same(&(x
->nh
), &(y
->nh
)) &&
1049 ea_same(x
->eattrs
, y
->eattrs
));
1052 static inline slab
*
1055 return rta_slab_
[a
->nh
.labels
> 2 ? 3 : a
->nh
.labels
];
1061 rta
*r
= sl_alloc(rta_slab(o
));
1063 memcpy(r
, o
, rta_size(o
));
1065 r
->nh
.next
= nexthop_copy(o
->nh
.next
);
1066 r
->eattrs
= ea_list_copy(o
->eattrs
);
1073 uint h
= r
->hash_key
& rta_cache_mask
;
1074 r
->next
= rta_hash_table
[h
];
1076 r
->next
->pprev
= &r
->next
;
1077 r
->pprev
= &rta_hash_table
[h
];
1078 rta_hash_table
[h
] = r
;
1084 uint ohs
= rta_cache_size
;
1087 rta
**oht
= rta_hash_table
;
1089 rta_cache_size
= 2*rta_cache_size
;
1090 DBG("Rehashing rta cache from %d to %d entries.\n", ohs
, rta_cache_size
);
1092 for(h
=0; h
<ohs
; h
++)
1093 for(r
=oht
[h
]; r
; r
=n
)
1102 * rta_lookup - look up a &rta in attribute cache
1103 * @o: a un-cached &rta
1105 * rta_lookup() gets an un-cached &rta structure and returns its cached
1106 * counterpart. It starts with examining the attribute cache to see whether
1107 * there exists a matching entry. If such an entry exists, it's returned and
1108 * its use count is incremented, else a new entry is created with use count
1111 * The extended attribute lists attached to the &rta are automatically
1112 * converted to the normalized form.
1120 ASSERT(!(o
->aflags
& RTAF_CACHED
));
1123 if (o
->eattrs
->next
) /* Multiple ea_list's, need to merge them */
1125 ea_list
*ml
= alloca(ea_scan(o
->eattrs
));
1126 ea_merge(o
->eattrs
, ml
);
1133 for(r
=rta_hash_table
[h
& rta_cache_mask
]; r
; r
=r
->next
)
1134 if (r
->hash_key
== h
&& rta_same(r
, o
))
1135 return rta_clone(r
);
1139 r
->aflags
= RTAF_CACHED
;
1140 rt_lock_source(r
->src
);
1141 rt_lock_hostentry(r
->hostentry
);
1144 if (++rta_cache_count
> rta_cache_limit
)
1153 ASSERT(rta_cache_count
&& (a
->aflags
& RTAF_CACHED
));
1155 *a
->pprev
= a
->next
;
1157 a
->next
->pprev
= a
->pprev
;
1158 rt_unlock_hostentry(a
->hostentry
);
1159 rt_unlock_source(a
->src
);
1161 nexthop_free(a
->nh
.next
);
1163 a
->aflags
= 0; /* Poison the entry */
1164 sl_free(rta_slab(a
), a
);
1168 rta_do_cow(rta
*o
, linpool
*lp
)
1170 rta
*r
= lp_alloc(lp
, rta_size(o
));
1171 memcpy(r
, o
, rta_size(o
));
1172 for (struct nexthop
**nhn
= &(r
->nh
.next
), *nho
= o
->nh
.next
; nho
; nho
= nho
->next
)
1174 *nhn
= lp_alloc(lp
, nexthop_size(nho
));
1175 memcpy(*nhn
, nho
, nexthop_size(nho
));
1176 nhn
= &((*nhn
)->next
);
1184 * rta_dump - dump route attributes
1185 * @a: attribute structure to dump
1187 * This function takes a &rta and dumps its contents to the debug output.
1192 static char *rts
[] = { "RTS_DUMMY", "RTS_STATIC", "RTS_INHERIT", "RTS_DEVICE",
1193 "RTS_STAT_DEV", "RTS_REDIR", "RTS_RIP",
1194 "RTS_OSPF", "RTS_OSPF_IA", "RTS_OSPF_EXT1",
1195 "RTS_OSPF_EXT2", "RTS_BGP", "RTS_PIPE", "RTS_BABEL" };
1196 static char *rtd
[] = { "", " DEV", " HOLE", " UNREACH", " PROHIBIT" };
1198 debug("p=%s uc=%d %s %s%s h=%04x",
1199 a
->src
->proto
->name
, a
->uc
, rts
[a
->source
], ip_scope_text(a
->scope
),
1200 rtd
[a
->dest
], a
->hash_key
);
1201 if (!(a
->aflags
& RTAF_CACHED
))
1203 debug(" <-%I", a
->from
);
1204 if (a
->dest
== RTD_UNICAST
)
1205 for (struct nexthop
*nh
= &(a
->nh
); nh
; nh
= nh
->next
)
1207 if (ipa_nonzero(nh
->gw
)) debug(" ->%I", nh
->gw
);
1208 if (nh
->labels
) debug(" L %d", nh
->label
[0]);
1209 for (int i
=1; i
<nh
->labels
; i
++)
1210 debug("/%d", nh
->label
[i
]);
1211 debug(" [%s]", nh
->iface
? nh
->iface
->name
: "???");
1221 * rta_dump_all - dump attribute cache
1223 * This function dumps the whole contents of route attribute cache
1224 * to the debug output.
1232 debug("Route attribute cache (%d entries, rehash at %d):\n", rta_cache_count
, rta_cache_limit
);
1233 for(h
=0; h
<rta_cache_size
; h
++)
1234 for(a
=rta_hash_table
[h
]; a
; a
=a
->next
)
1244 rta_show(struct cli
*c
, rta
*a
, ea_list
*eal
)
1246 static char *src_names
[] = { "dummy", "static", "inherit", "device", "static-device", "redirect",
1247 "RIP", "OSPF", "OSPF-IA", "OSPF-E1", "OSPF-E2", "BGP", "pipe" };
1250 cli_printf(c
, -1008, "\tType: %s %s", src_names
[a
->source
], ip_scope_text(a
->scope
));
1253 for(; eal
; eal
=eal
->next
)
1254 for(i
=0; i
<eal
->count
; i
++)
1255 ea_show(c
, &eal
->attrs
[i
]);
1259 * rta_init - initialize route attribute cache
1261 * This function is called during initialization of the routing
1262 * table module to set up the internals of the attribute cache.
1267 rta_pool
= rp_new(&root_pool
, "Attributes");
1269 rta_slab_
[0] = sl_new(rta_pool
, sizeof(rta
));
1270 rta_slab_
[1] = sl_new(rta_pool
, sizeof(rta
) + sizeof(u32
));
1271 rta_slab_
[2] = sl_new(rta_pool
, sizeof(rta
) + sizeof(u32
)*2);
1272 rta_slab_
[3] = sl_new(rta_pool
, sizeof(rta
) + sizeof(u32
)*MPLS_MAX_LABEL_STACK
);
1274 nexthop_slab_
[0] = sl_new(rta_pool
, sizeof(struct nexthop
));
1275 nexthop_slab_
[1] = sl_new(rta_pool
, sizeof(struct nexthop
) + sizeof(u32
));
1276 nexthop_slab_
[2] = sl_new(rta_pool
, sizeof(struct nexthop
) + sizeof(u32
)*2);
1277 nexthop_slab_
[3] = sl_new(rta_pool
, sizeof(struct nexthop
) + sizeof(u32
)*MPLS_MAX_LABEL_STACK
);
1284 * Documentation for functions declared inline in route.h
1289 * rta_clone - clone route attributes
1290 * @r: a &rta to be cloned
1292 * rta_clone() takes a cached &rta and returns its identical cached
1293 * copy. Currently it works by just returning the original &rta with
1294 * its use count incremented.
1296 static inline rta
*rta_clone(rta
*r
)
1300 * rta_free - free route attributes
1301 * @r: a &rta to be freed
1303 * If you stop using a &rta (for example when deleting a route which uses
1304 * it), you need to call rta_free() to notify the attribute cache the
1305 * attribute is no longer in use and can be freed if you were the last
1306 * user (which rta_free() tests by inspecting the use count).
1308 static inline void rta_free(rta
*r
)