]>
git.ipfire.org Git - thirdparty/bird.git/blob - nest/route.h
2 * BIRD Internet Routing Daemon -- Routing Table
4 * (c) 1998--2000 Martin Mares <mj@ucw.cz>
6 * Can be freely distributed and used under the terms of the GNU GPL.
10 #define _BIRD_ROUTE_H_
12 #include "lib/lists.h"
13 #include "lib/bitmap.h"
14 #include "lib/resource.h"
26 struct f_trie_walk_state
;
30 * Generic data structure for storing network prefixes. Also used
31 * for the master routing table. Currently implemented as a hash
34 * Available operations:
35 * - insertion of new entry
37 * - searching for entry by network prefix
38 * - asynchronous retrieval of fib contents
42 struct fib_node
*next
; /* Next in hash chain */
43 struct fib_iterator
*readers
; /* List of readers of this node */
47 struct fib_iterator
{ /* See lib/slists.h for an explanation */
48 struct fib_iterator
*prev
, *next
; /* Must be synced with struct fib_node! */
49 byte efef
; /* 0xff to distinguish between iterator and node */
51 struct fib_node
*node
; /* Or NULL if freshly merged */
55 typedef void (*fib_init_fn
)(struct fib
*, void *);
58 pool
*fib_pool
; /* Pool holding all our data */
59 slab
*fib_slab
; /* Slab holding all fib nodes */
60 struct fib_node
**hash_table
; /* Node hash table */
61 uint hash_size
; /* Number of hash table entries (a power of two) */
62 uint hash_order
; /* Binary logarithm of hash_size */
63 uint hash_shift
; /* 32 - hash_order */
64 uint addr_type
; /* Type of address data stored in fib (NET_*) */
65 uint node_size
; /* FIB node size, 0 for nonuniform */
66 uint node_offset
; /* Offset of fib_node struct inside of user data */
67 uint entries
; /* Number of entries */
68 uint entries_min
, entries_max
; /* Entry count limits (else start rehashing) */
69 fib_init_fn init
; /* Constructor */
72 static inline void * fib_node_to_user(struct fib
*f
, struct fib_node
*e
)
73 { return e
? (void *) ((char *) e
- f
->node_offset
) : NULL
; }
75 static inline struct fib_node
* fib_user_to_node(struct fib
*f
, void *e
)
76 { return e
? (void *) ((char *) e
+ f
->node_offset
) : NULL
; }
78 void fib_init(struct fib
*f
, pool
*p
, uint addr_type
, uint node_size
, uint node_offset
, uint hash_order
, fib_init_fn init
);
79 void *fib_find(struct fib
*, const net_addr
*); /* Find or return NULL if doesn't exist */
80 void *fib_get_chain(struct fib
*f
, const net_addr
*a
); /* Find first node in linked list from hash table */
81 void *fib_get(struct fib
*, const net_addr
*); /* Find or create new if nonexistent */
82 void *fib_route(struct fib
*, const net_addr
*); /* Longest-match routing lookup */
83 void fib_delete(struct fib
*, void *); /* Remove fib entry */
84 void fib_free(struct fib
*); /* Destroy the fib */
85 void fib_check(struct fib
*); /* Consistency check for debugging */
87 void fit_init(struct fib_iterator
*, struct fib
*); /* Internal functions, don't call */
88 struct fib_node
*fit_get(struct fib
*, struct fib_iterator
*);
89 void fit_put(struct fib_iterator
*, struct fib_node
*);
90 void fit_put_next(struct fib
*f
, struct fib_iterator
*i
, struct fib_node
*n
, uint hpos
);
91 void fit_put_end(struct fib_iterator
*i
);
92 void fit_copy(struct fib
*f
, struct fib_iterator
*dst
, struct fib_iterator
*src
);
95 #define FIB_WALK(fib, type, z) do { \
96 struct fib_node *fn_, **ff_ = (fib)->hash_table; \
97 uint count_ = (fib)->hash_size; \
100 for (fn_ = *ff_++; z = fib_node_to_user(fib, fn_); fn_=fn_->next)
102 #define FIB_WALK_END } while (0)
104 #define FIB_ITERATE_INIT(it, fib) fit_init(it, fib)
106 #define FIB_ITERATE_START(fib, it, type, z) do { \
107 struct fib_node *fn_ = fit_get(fib, it); \
108 uint count_ = (fib)->hash_size; \
109 uint hpos_ = (it)->hash; \
114 if (++hpos_ >= count_) \
116 fn_ = (fib)->hash_table[hpos_]; \
119 z = fib_node_to_user(fib, fn_);
121 #define FIB_ITERATE_END fn_ = fn_->next; } } while(0)
123 #define FIB_ITERATE_PUT(it) fit_put(it, fn_)
125 #define FIB_ITERATE_PUT_NEXT(it, fib) fit_put_next(fib, it, fn_, hpos_)
127 #define FIB_ITERATE_PUT_END(it) fit_put_end(it)
129 #define FIB_ITERATE_UNLINK(it, fib) fit_get(fib, it)
131 #define FIB_ITERATE_COPY(dst, src, fib) fit_copy(fib, dst, src)
135 * Master Routing Tables. Generally speaking, each of them contains a FIB
136 * with each entry pointing to a list of route entries representing routes
137 * to given network (with the selected one at the head).
139 * Each of the RTE's contains variable data (the preference and protocol-dependent
140 * metrics) and a pointer to a route attribute block common for many routes).
142 * It's guaranteed that there is at most one RTE for every (prefix,proto) pair.
145 struct rtable_config
{
148 struct rtable
*table
;
149 struct proto_config
*krt_attached
; /* Kernel syncer attached to this table */
150 uint addr_type
; /* Type of address data stored in table (NET_*) */
151 int gc_max_ops
; /* Maximum number of operations before GC is run */
152 int gc_min_time
; /* Minimum time between two consecutive GC runs */
153 byte sorted
; /* Routes of network are sorted according to rte_better() */
154 byte internal
; /* Internal table of a protocol */
155 byte trie_used
; /* Rtable has attached trie */
156 btime min_settle_time
; /* Minimum settle time for notifications */
157 btime max_settle_time
; /* Maximum settle time for notifications */
160 typedef struct rtable
{
162 node n
; /* Node in list of all tables */
163 pool
*rp
; /* Resource pool to allocate everything from, including itself */
165 struct f_trie
*trie
; /* Trie of prefixes defined in fib */
166 char *name
; /* Name of this table */
167 list channels
; /* List of attached channels (struct channel) */
168 uint addr_type
; /* Type of address data stored in table (NET_*) */
169 int pipe_busy
; /* Pipe loop detection */
170 int use_count
; /* Number of protocols using this table */
171 u32 rt_count
; /* Number of routes in the table */
173 byte internal
; /* Internal table of a protocol */
176 struct hostcache
*hostcache
;
177 struct rtable_config
*config
; /* Configuration of this table */
178 struct config
*deleted
; /* Table doesn't exist in current configuration,
179 * delete as soon as use_count becomes 0 and remove
180 * obstacle from this routing table.
182 struct event
*rt_event
; /* Routing table event */
183 btime last_rt_change
; /* Last time when route changed */
184 btime base_settle_time
; /* Start time of rtable settling interval */
185 btime gc_time
; /* Time of last GC */
186 int gc_counter
; /* Number of operations since last GC */
187 byte prune_state
; /* Table prune state, 1 -> scheduled, 2-> running */
188 byte prune_trie
; /* Prune prefix trie during next table prune */
189 byte hcu_scheduled
; /* Hostcache update is scheduled */
190 byte nhu_state
; /* Next Hop Update state */
191 struct fib_iterator prune_fit
; /* Rtable prune FIB iterator */
192 struct fib_iterator nhu_fit
; /* Next Hop Update FIB iterator */
193 struct f_trie
*trie_new
; /* New prefix trie defined during pruning */
194 struct f_trie
*trie_old
; /* Old prefix trie waiting to be freed */
195 u32 trie_lock_count
; /* Prefix trie locked by walks */
196 u32 trie_old_lock_count
; /* Old prefix trie locked by walks */
198 list subscribers
; /* Subscribers for notifications */
199 struct timer
*settle_timer
; /* Settle time for notifications */
200 list flowspec_links
; /* List of flowspec links, src for NET_IPx and dst for NET_FLOWx */
201 struct f_trie
*flowspec_trie
; /* Trie for evaluation of flowspec notifications */
204 struct rt_subscription
{
207 void (*hook
)(struct rt_subscription
*b
);
211 struct rt_flowspec_link
{
219 #define NHU_SCHEDULED 1
220 #define NHU_RUNNING 2
223 typedef struct network
{
224 struct rte
*routes
; /* Available routes for this network */
225 struct fib_node n
; /* FIB flags reserved for kernel syncer */
229 slab
*slab
; /* Slab holding all hostentries */
230 struct hostentry
**hash_table
; /* Hash table for hostentries */
231 unsigned hash_order
, hash_shift
;
232 unsigned hash_max
, hash_min
;
234 linpool
*lp
; /* Linpool for trie */
235 struct f_trie
*trie
; /* Trie of prefixes that might affect hostentries */
236 list hostentries
; /* List of all hostentries */
237 byte update_hostcache
;
242 ip_addr addr
; /* IP address of host, part of key */
243 ip_addr link
; /* (link-local) IP address of host, used as gw
244 if host is directly attached */
245 struct rtable
*tab
; /* Dependent table, part of key */
246 struct hostentry
*next
; /* Next in hash chain */
247 unsigned hash_key
; /* Hash key */
248 unsigned uc
; /* Use count */
249 struct rta
*src
; /* Source rta entry */
250 byte dest
; /* Chosen route destination type (RTD_...) */
251 byte nexthop_linkable
; /* Nexthop list is completely non-device */
252 u32 igp_metric
; /* Chosen route IGP metric */
257 net
*net
; /* Network this RTE belongs to */
258 struct channel
*sender
; /* Channel used to send the route to the routing table */
259 struct rta
*attrs
; /* Attributes of this route */
260 u32 id
; /* Table specific route id */
261 byte flags
; /* Flags (REF_...) */
262 byte pflags
; /* Protocol-specific flags */
263 word pref
; /* Route preference */
264 btime lastmod
; /* Last modified */
265 union { /* Protocol-dependent data (metrics etc.) */
268 struct iface
*from
; /* Incoming iface */
269 u8 metric
; /* RIP metric */
270 u16 tag
; /* External route tag */
275 u32 metric1
, metric2
; /* OSPF Type 1 and Type 2 metrics */
276 u32 tag
; /* External route tag */
277 u32 router_id
; /* Router that originated this route */
282 u8 suppressed
; /* Used for deterministic MED comparison */
283 s8 stale
; /* Route is LLGR_STALE, -1 if unknown */
284 struct rtable
*base_table
; /* Base table for Flowspec validation */
289 u16 seqno
; /* Babel seqno */
290 u16 metric
; /* Babel metric */
291 u64 router_id
; /* Babel router id */
294 struct { /* Routes generated by krt sync (both temporary and inherited ones) */
295 s8 src
; /* Alleged route source (see krt.h) */
296 u8 proto
; /* Kernel source protocol ID */
297 u8 seen
; /* Seen during last scan */
298 u8 best
; /* Best route in network, propagated to core */
299 u32 metric
; /* Kernel metric */
304 #define REF_COW 1 /* Copy this rte on write */
305 #define REF_FILTERED 2 /* Route is rejected by import filter */
306 #define REF_STALE 4 /* Route is stale in a refresh cycle */
307 #define REF_DISCARD 8 /* Route is scheduled for discard */
308 #define REF_MODIFY 16 /* Route is scheduled for modify */
310 /* Route is valid for propagation (may depend on other flags in the future), accepts NULL */
311 static inline int rte_is_valid(rte
*r
) { return r
&& !(r
->flags
& REF_FILTERED
); }
313 /* Route just has REF_FILTERED flag */
314 static inline int rte_is_filtered(rte
*r
) { return !!(r
->flags
& REF_FILTERED
); }
317 /* Types of route announcement, also used as flags */
318 #define RA_UNDEF 0 /* Undefined RA type */
319 #define RA_OPTIMAL 1 /* Announcement of optimal route change */
320 #define RA_ACCEPTED 2 /* Announcement of first accepted route */
321 #define RA_ANY 3 /* Announcement of any route change */
322 #define RA_MERGED 4 /* Announcement of optimal route merged with next ones */
324 /* Return value of preexport() callback */
325 #define RIC_ACCEPT 1 /* Accepted by protocol */
326 #define RIC_PROCESS 0 /* Process it through import filter */
327 #define RIC_REJECT -1 /* Rejected by protocol */
328 #define RIC_DROP -2 /* Silently dropped by protocol */
330 extern list routing_tables
;
334 void rt_preconfig(struct config
*);
335 void rt_commit(struct config
*new, struct config
*old
);
336 void rt_lock_table(rtable
*);
337 void rt_unlock_table(rtable
*);
338 struct f_trie
* rt_lock_trie(rtable
*tab
);
339 void rt_unlock_trie(rtable
*tab
, struct f_trie
*trie
);
340 void rt_subscribe(rtable
*tab
, struct rt_subscription
*s
);
341 void rt_unsubscribe(struct rt_subscription
*s
);
342 void rt_flowspec_link(rtable
*src
, rtable
*dst
);
343 void rt_flowspec_unlink(rtable
*src
, rtable
*dst
);
344 rtable
*rt_setup(pool
*, struct rtable_config
*);
345 static inline void rt_shutdown(rtable
*r
) { rfree(r
->rp
); }
347 static inline net
*net_find(rtable
*tab
, const net_addr
*addr
) { return (net
*) fib_find(&tab
->fib
, addr
); }
348 static inline net
*net_find_valid(rtable
*tab
, const net_addr
*addr
)
349 { net
*n
= net_find(tab
, addr
); return (n
&& rte_is_valid(n
->routes
)) ? n
: NULL
; }
350 static inline net
*net_get(rtable
*tab
, const net_addr
*addr
) { return (net
*) fib_get(&tab
->fib
, addr
); }
351 net
*net_get(rtable
*tab
, const net_addr
*addr
);
352 net
*net_route(rtable
*tab
, const net_addr
*n
);
353 int net_roa_check(rtable
*tab
, const net_addr
*n
, u32 asn
);
354 rte
*rte_find(net
*net
, struct rte_src
*src
);
355 rte
*rte_get_temp(struct rta
*);
356 void rte_update2(struct channel
*c
, const net_addr
*n
, rte
*new, struct rte_src
*src
);
357 /* rte_update() moved to protocol.h to avoid dependency conflicts */
358 int rt_examine(rtable
*t
, net_addr
*a
, struct proto
*p
, const struct filter
*filter
);
359 rte
*rt_export_merged(struct channel
*c
, net
*net
, rte
**rt_free
, linpool
*pool
, int silent
);
360 void rt_refresh_begin(rtable
*t
, struct channel
*c
);
361 void rt_refresh_end(rtable
*t
, struct channel
*c
);
362 void rt_modify_stale(rtable
*t
, struct channel
*c
);
363 void rt_schedule_prune(rtable
*t
);
364 void rte_dump(rte
*);
365 void rte_free(rte
*);
366 rte
*rte_do_cow(rte
*);
367 static inline rte
* rte_cow(rte
*r
) { return (r
->flags
& REF_COW
) ? rte_do_cow(r
) : r
; }
368 rte
*rte_cow_rta(rte
*r
, linpool
*lp
);
369 void rte_init_tmp_attrs(struct rte
*r
, linpool
*lp
, uint max
);
370 void rte_make_tmp_attr(struct rte
*r
, uint id
, uint type
, uintptr_t val
);
371 void rte_make_tmp_attrs(struct rte
**r
, struct linpool
*pool
, struct rta
**old_attrs
);
372 uintptr_t rte_store_tmp_attr(struct rte
*r
, uint id
);
373 void rt_dump(rtable
*);
374 void rt_dump_all(void);
375 int rt_feed_channel(struct channel
*c
);
376 void rt_feed_channel_abort(struct channel
*c
);
377 int rte_update_in(struct channel
*c
, const net_addr
*n
, rte
*new, struct rte_src
*src
);
378 int rt_reload_channel(struct channel
*c
);
379 void rt_reload_channel_abort(struct channel
*c
);
380 void rt_prune_sync(rtable
*t
, int all
);
381 int rte_update_out(struct channel
*c
, const net_addr
*n
, rte
*new, rte
*old0
, int refeed
);
382 struct rtable_config
*rt_new_table(struct symbol
*s
, uint addr_type
);
384 static inline int rt_is_ip(rtable
*tab
)
385 { return (tab
->addr_type
== NET_IP4
) || (tab
->addr_type
== NET_IP6
); }
387 static inline int rt_is_vpn(rtable
*tab
)
388 { return (tab
->addr_type
== NET_VPN4
) || (tab
->addr_type
== NET_VPN6
); }
390 static inline int rt_is_roa(rtable
*tab
)
391 { return (tab
->addr_type
== NET_ROA4
) || (tab
->addr_type
== NET_ROA6
); }
393 static inline int rt_is_flow(rtable
*tab
)
394 { return (tab
->addr_type
== NET_FLOW4
) || (tab
->addr_type
== NET_FLOW6
); }
397 /* Default limit for ECMP next hops, defined in sysdep code */
398 extern const int rt_default_ecmp
;
400 struct rt_show_data_rtable
{
403 struct channel
*export_channel
;
406 struct rt_show_data
{
409 struct rt_show_data_rtable
*tab
; /* Iterator over table list */
410 struct rt_show_data_rtable
*last_table
; /* Last table in output */
411 struct fib_iterator fit
; /* Iterator over networks in table */
412 struct f_trie_walk_state
*walk_state
; /* Iterator over networks in trie */
413 struct f_trie
*walk_lock
; /* Locked trie for walking */
414 int verbose
, tables_defined_by
;
415 const struct filter
*filter
;
416 struct proto
*show_protocol
;
417 struct proto
*export_protocol
;
418 struct channel
*export_channel
;
419 struct config
*running_on_config
;
420 struct krt_proto
*kernel
;
421 int export_mode
, addr_mode
, primary_only
, filtered
, stats
;
423 int table_open
; /* Iteration (fit) is open */
424 int trie_walk
; /* Current table is iterated using trie */
425 int net_counter
, rt_counter
, show_counter
, table_counter
;
426 int net_counter_last
, rt_counter_last
, show_counter_last
;
429 void rt_show(struct rt_show_data
*);
430 struct rt_show_data_rtable
* rt_show_add_table(struct rt_show_data
*d
, rtable
*t
);
432 /* Value of table definition mode in struct rt_show_data */
433 #define RSD_TDB_DEFAULT 0 /* no table specified */
434 #define RSD_TDB_INDIRECT 0 /* show route ... protocol P ... */
435 #define RSD_TDB_ALL RSD_TDB_SET /* show route ... table all ... */
436 #define RSD_TDB_DIRECT RSD_TDB_SET | RSD_TDB_NMN /* show route ... table X table Y ... */
438 #define RSD_TDB_SET 0x1 /* internal: show empty tables */
439 #define RSD_TDB_NMN 0x2 /* internal: need matching net */
441 /* Value of addr_mode */
442 #define RSD_ADDR_EQUAL 1 /* Exact query - show route <addr> */
443 #define RSD_ADDR_FOR 2 /* Longest prefix match - show route for <addr> */
444 #define RSD_ADDR_IN 3 /* Interval query - show route in <addr> */
446 /* Value of export_mode in struct rt_show_data */
447 #define RSEM_NONE 0 /* Export mode not used */
448 #define RSEM_PREEXPORT 1 /* Routes ready for export, before filtering */
449 #define RSEM_EXPORT 2 /* Routes accepted by export filter */
450 #define RSEM_NOEXPORT 3 /* Routes rejected by export filter */
451 #define RSEM_EXPORTED 4 /* Routes marked in export map */
456 * Beware: All standard BGP attributes must be represented here instead
457 * of making them local to the route. This is needed to ensure proper
458 * construction of BGP route attribute lists.
461 /* Nexthop structure */
463 ip_addr gw
; /* Next hop */
464 struct iface
*iface
; /* Outgoing interface */
465 struct nexthop
*next
;
468 byte labels_orig
; /* Number of labels before hostentry was applied */
469 byte labels
; /* Number of all labels */
473 #define RNF_ONLINK 0x1 /* Gateway is onlink regardless of IP ranges */
477 struct rte_src
*next
; /* Hash chain */
478 struct proto
*proto
; /* Protocol the source is based on */
479 u32 private_id
; /* Private ID, assigned by the protocol */
480 u32 global_id
; /* Globally unique ID of the source */
481 unsigned uc
; /* Use count */
486 struct rta
*next
, **pprev
; /* Hash chain */
487 u32 uc
; /* Use count */
488 u32 hash_key
; /* Hash over important fields */
489 struct ea_list
*eattrs
; /* Extended Attribute chain */
490 struct rte_src
*src
; /* Route source that created the route */
491 struct hostentry
*hostentry
; /* Hostentry for recursive next-hops */
492 ip_addr from
; /* Advertising router */
493 u32 igp_metric
; /* IGP metric to next hop (for iBGP routes) */
494 u8 source
; /* Route source (RTS_...) */
495 u8 scope
; /* Route scope (SCOPE_... -- see ip.h) */
496 u8 dest
; /* Route destination type (RTD_...) */
498 struct nexthop nh
; /* Next hop */
501 #define RTS_DUMMY 0 /* Dummy route to be removed soon */
502 #define RTS_STATIC 1 /* Normal static route */
503 #define RTS_INHERIT 2 /* Route inherited from kernel */
504 #define RTS_DEVICE 3 /* Device route */
505 #define RTS_STATIC_DEVICE 4 /* Static device route */
506 #define RTS_REDIRECT 5 /* Learned via redirect */
507 #define RTS_RIP 6 /* RIP route */
508 #define RTS_OSPF 7 /* OSPF route */
509 #define RTS_OSPF_IA 8 /* OSPF inter-area route */
510 #define RTS_OSPF_EXT1 9 /* OSPF external route type 1 */
511 #define RTS_OSPF_EXT2 10 /* OSPF external route type 2 */
512 #define RTS_BGP 11 /* BGP route */
513 #define RTS_PIPE 12 /* Inter-table wormhole */
514 #define RTS_BABEL 13 /* Babel route */
515 #define RTS_RPKI 14 /* Route Origin Authorization */
516 #define RTS_PERF 15 /* Perf checker */
519 #define RTC_UNICAST 0
520 #define RTC_BROADCAST 1
521 #define RTC_MULTICAST 2
522 #define RTC_ANYCAST 3 /* IPv6 Anycast */
524 #define RTD_NONE 0 /* Undefined next hop */
525 #define RTD_UNICAST 1 /* Next hop is neighbor router */
526 #define RTD_BLACKHOLE 2 /* Silently drop packets */
527 #define RTD_UNREACHABLE 3 /* Reject as unreachable */
528 #define RTD_PROHIBIT 4 /* Administratively prohibited */
531 #define RTAF_CACHED 1 /* This is a cached rta */
533 #define IGP_METRIC_UNKNOWN 0x80000000 /* Default igp_metric used when no other
534 protocol-specific metric is availabe */
537 extern const char * rta_dest_names
[RTD_MAX
];
539 static inline const char *rta_dest_name(uint n
)
540 { return (n
< RTD_MAX
) ? rta_dest_names
[n
] : "???"; }
542 /* Route has regular, reachable nexthop (i.e. not RTD_UNREACHABLE and like) */
543 static inline int rte_is_reachable(rte
*r
)
544 { return r
->attrs
->dest
== RTD_UNICAST
; }
548 * Extended Route Attributes
551 typedef struct eattr
{
552 word id
; /* EA_CODE(PROTOCOL_..., protocol-dependent ID) */
553 byte flags
; /* Protocol-dependent flags */
554 byte type
; /* Attribute type and several flags (EAF_...) */
557 const struct adata
*ptr
; /* Attribute data elsewhere */
562 #define EA_CODE(proto,id) (((proto) << 8) | (id))
563 #define EA_ID(ea) ((ea) & 0xff)
564 #define EA_PROTO(ea) ((ea) >> 8)
565 #define EA_ID_FLAG(ea) (1 << EA_ID(ea))
566 #define EA_CUSTOM(id) ((id) | EA_CUSTOM_BIT)
567 #define EA_IS_CUSTOM(ea) ((ea) & EA_CUSTOM_BIT)
568 #define EA_CUSTOM_ID(ea) ((ea) & ~EA_CUSTOM_BIT)
570 const char *ea_custom_name(uint ea
);
572 #define EA_GEN_IGP_METRIC EA_CODE(PROTOCOL_NONE, 0)
574 #define EA_CODE_MASK 0xffff
575 #define EA_CUSTOM_BIT 0x8000
576 #define EA_ALLOW_UNDEF 0x10000 /* ea_find: allow EAF_TYPE_UNDEF */
577 #define EA_BIT(n) ((n) << 24) /* Used in bitfield accessors */
578 #define EA_BIT_GET(ea) ((ea) >> 24)
580 #define EAF_TYPE_MASK 0x1f /* Mask with this to get type */
581 #define EAF_TYPE_INT 0x01 /* 32-bit unsigned integer number */
582 #define EAF_TYPE_OPAQUE 0x02 /* Opaque byte string (not filterable) */
583 #define EAF_TYPE_IP_ADDRESS 0x04 /* IP address */
584 #define EAF_TYPE_ROUTER_ID 0x05 /* Router ID (IPv4 address) */
585 #define EAF_TYPE_AS_PATH 0x06 /* BGP AS path (encoding per RFC 1771:4.3) */
586 #define EAF_TYPE_BITFIELD 0x09 /* 32-bit embedded bitfield */
587 #define EAF_TYPE_INT_SET 0x0a /* Set of u32's (e.g., a community list) */
588 #define EAF_TYPE_EC_SET 0x0e /* Set of pairs of u32's - ext. community list */
589 #define EAF_TYPE_LC_SET 0x12 /* Set of triplets of u32's - large community list */
590 #define EAF_TYPE_UNDEF 0x1f /* `force undefined' entry */
591 #define EAF_EMBEDDED 0x01 /* Data stored in eattr.u.data (part of type spec) */
592 #define EAF_VAR_LENGTH 0x02 /* Attribute length is variable (part of type spec) */
593 #define EAF_ORIGINATED 0x20 /* The attribute has originated locally */
594 #define EAF_FRESH 0x40 /* An uncached attribute (e.g. modified in export filter) */
596 typedef struct adata
{
597 uint length
; /* Length of data */
601 extern const adata null_adata
; /* adata of length 0 */
603 static inline struct adata
*
604 lp_alloc_adata(struct linpool
*pool
, uint len
)
606 struct adata
*ad
= lp_alloc(pool
, sizeof(struct adata
) + len
);
611 static inline int adata_same(const struct adata
*a
, const struct adata
*b
)
612 { return (a
->length
== b
->length
&& !memcmp(a
->data
, b
->data
, a
->length
)); }
615 typedef struct ea_list
{
616 struct ea_list
*next
; /* In case we have an override list */
617 byte flags
; /* Flags: EALF_... */
619 word count
; /* Number of attributes */
620 eattr attrs
[0]; /* Attribute definitions themselves */
623 #define EALF_SORTED 1 /* Attributes are sorted by code */
624 #define EALF_BISECT 2 /* Use interval bisection for searching */
625 #define EALF_CACHED 4 /* Attributes belonging to cached rta */
626 #define EALF_TEMP 8 /* Temporary ea_list added by make_tmp_attrs hooks */
628 struct rte_src
*rt_find_source(struct proto
*p
, u32 id
);
629 struct rte_src
*rt_get_source(struct proto
*p
, u32 id
);
630 static inline void rt_lock_source(struct rte_src
*src
) { src
->uc
++; }
631 static inline void rt_unlock_source(struct rte_src
*src
) { src
->uc
--; }
632 void rt_prune_sources(void);
634 struct ea_walk_state
{
635 ea_list
*eattrs
; /* Ccurrent ea_list, initially set by caller */
636 eattr
*ea
; /* Current eattr, initially NULL */
637 u32 visited
[4]; /* Bitfield, limiting max to 128 */
640 eattr
*ea_find(ea_list
*, unsigned ea
);
641 eattr
*ea_walk(struct ea_walk_state
*s
, uint id
, uint max
);
642 int ea_get_int(ea_list
*, unsigned ea
, int def
);
643 void ea_dump(ea_list
*);
644 void ea_sort(ea_list
*); /* Sort entries in all sub-lists */
645 unsigned ea_scan(ea_list
*); /* How many bytes do we need for merged ea_list */
646 void ea_merge(ea_list
*from
, ea_list
*to
); /* Merge sub-lists to allocated buffer */
647 int ea_same(ea_list
*x
, ea_list
*y
); /* Test whether two ea_lists are identical */
648 uint
ea_hash(ea_list
*e
); /* Calculate 16-bit hash value */
649 ea_list
*ea_append(ea_list
*to
, ea_list
*what
);
650 void ea_format_bitfield(const struct eattr
*a
, byte
*buf
, int bufsize
, const char **names
, int min
, int max
);
652 #define ea_normalize(ea) do { \
654 ea_list *t = alloca(ea_scan(ea)); \
659 if (ea->count == 0) \
663 static inline eattr *
664 ea_set_attr(ea_list
**to
, struct linpool
*pool
, uint id
, uint flags
, uint type
, uintptr_t val
)
666 ea_list
*a
= lp_alloc(pool
, sizeof(ea_list
) + sizeof(eattr
));
667 eattr
*e
= &a
->attrs
[0];
669 a
->flags
= EALF_SORTED
;
678 if (type
& EAF_EMBEDDED
)
679 e
->u
.data
= (u32
) val
;
681 e
->u
.ptr
= (struct adata
*) val
;
687 ea_set_attr_u32(ea_list
**to
, struct linpool
*pool
, uint id
, uint flags
, uint type
, u32 val
)
688 { ea_set_attr(to
, pool
, id
, flags
, type
, (uintptr_t) val
); }
691 ea_set_attr_ptr(ea_list
**to
, struct linpool
*pool
, uint id
, uint flags
, uint type
, struct adata
*val
)
692 { ea_set_attr(to
, pool
, id
, flags
, type
, (uintptr_t) val
); }
695 ea_set_attr_data(ea_list
**to
, struct linpool
*pool
, uint id
, uint flags
, uint type
, void *data
, uint len
)
697 struct adata
*a
= lp_alloc_adata(pool
, len
);
698 memcpy(a
->data
, data
, len
);
699 ea_set_attr(to
, pool
, id
, flags
, type
, (uintptr_t) a
);
703 #define NEXTHOP_MAX_SIZE (sizeof(struct nexthop) + sizeof(u32)*MPLS_MAX_LABEL_STACK)
705 static inline size_t nexthop_size(const struct nexthop
*nh
)
706 { return sizeof(struct nexthop
) + sizeof(u32
)*nh
->labels
; }
707 int nexthop__same(struct nexthop
*x
, struct nexthop
*y
); /* Compare multipath nexthops */
708 static inline int nexthop_same(struct nexthop
*x
, struct nexthop
*y
)
709 { return (x
== y
) || nexthop__same(x
, y
); }
710 struct nexthop
*nexthop_merge(struct nexthop
*x
, struct nexthop
*y
, int rx
, int ry
, int max
, linpool
*lp
);
711 struct nexthop
*nexthop_sort(struct nexthop
*x
);
712 static inline void nexthop_link(struct rta
*a
, struct nexthop
*from
)
713 { memcpy(&a
->nh
, from
, nexthop_size(from
)); }
714 void nexthop_insert(struct nexthop
**n
, struct nexthop
*y
);
715 int nexthop_is_sorted(struct nexthop
*x
);
718 static inline size_t rta_size(const rta
*a
) { return sizeof(rta
) + sizeof(u32
)*a
->nh
.labels
; }
719 #define RTA_MAX_SIZE (sizeof(rta) + sizeof(u32)*MPLS_MAX_LABEL_STACK)
720 rta
*rta_lookup(rta
*); /* Get rta equivalent to this one, uc++ */
721 static inline int rta_is_cached(rta
*r
) { return r
->aflags
& RTAF_CACHED
; }
722 static inline rta
*rta_clone(rta
*r
) { r
->uc
++; return r
; }
723 void rta__free(rta
*r
);
724 static inline void rta_free(rta
*r
) { if (r
&& !--r
->uc
) rta__free(r
); }
725 rta
*rta_do_cow(rta
*o
, linpool
*lp
);
726 static inline rta
* rta_cow(rta
*r
, linpool
*lp
) { return rta_is_cached(r
) ? rta_do_cow(r
, lp
) : r
; }
727 void rta_dump(rta
*);
728 void rta_dump_all(void);
729 void rta_show(struct cli
*, rta
*);
731 u32
rt_get_igp_metric(rte
*rt
);
732 struct hostentry
* rt_get_hostentry(rtable
*tab
, ip_addr a
, ip_addr ll
, rtable
*dep
);
733 void rta_apply_hostentry(rta
*a
, struct hostentry
*he
, mpls_label_stack
*mls
);
736 rta_set_recursive_next_hop(rtable
*dep
, rta
*a
, rtable
*tab
, ip_addr gw
, ip_addr ll
, mpls_label_stack
*mls
)
738 rta_apply_hostentry(a
, rt_get_hostentry(tab
, gw
, ll
, dep
), mls
);
742 * rta_set_recursive_next_hop() acquires hostentry from hostcache and fills
743 * rta->hostentry field. New hostentry has zero use count. Cached rta locks its
744 * hostentry (increases its use count), uncached rta does not lock it. Hostentry
745 * with zero use count is removed asynchronously during host cache update,
746 * therefore it is safe to hold such hostentry temorarily. Hostentry holds a
747 * lock for a 'source' rta, mainly to share multipath nexthops.
749 * There is no need to hold a lock for hostentry->dep table, because that table
750 * contains routes responsible for that hostentry, and therefore is non-empty if
751 * given hostentry has non-zero use count. If the hostentry has zero use count,
752 * the entry is removed before dep is referenced.
754 * The protocol responsible for routes with recursive next hops should hold a
755 * lock for a 'source' table governing that routes (argument tab to
756 * rta_set_recursive_next_hop()), because its routes reference hostentries
757 * (through rta) related to the governing table. When all such routes are
758 * removed, rtas are immediately removed achieving zero uc. Then the 'source'
759 * table lock could be immediately released, although hostentries may still
760 * exist - they will be freed together with the 'source' table.
763 static inline void rt_lock_hostentry(struct hostentry
*he
) { if (he
) he
->uc
++; }
764 static inline void rt_unlock_hostentry(struct hostentry
*he
) { if (he
) he
->uc
--; }
766 int rt_flowspec_check(rtable
*tab_ip
, rtable
*tab_flow
, const net_addr
*n
, rta
*a
, int interior
);
770 * Default protocol preferences
773 #define DEF_PREF_DIRECT 240 /* Directly connected */
774 #define DEF_PREF_STATIC 200 /* Static route */
775 #define DEF_PREF_OSPF 150 /* OSPF intra-area, inter-area and type 1 external routes */
776 #define DEF_PREF_BABEL 130 /* Babel */
777 #define DEF_PREF_RIP 120 /* RIP */
778 #define DEF_PREF_BGP 100 /* BGP */
779 #define DEF_PREF_RPKI 100 /* RPKI */
780 #define DEF_PREF_INHERITED 10 /* Routes inherited from other routing daemons */
783 * Route Origin Authorization
786 #define ROA_UNKNOWN 0
788 #define ROA_INVALID 2