struct proto;
struct cli;
struct rtable_private;
+struct rte_storage;
+
+#define RTE_IN_TABLE_WRITABLE \
+ byte pflags; /* Protocol-specific flags; may change in-table (!) */ \
+ u8 stale_cycle; /* Auxiliary value for route refresh; may change in-table (!) */ \
typedef struct rte {
+ RTE_IN_TABLE_WRITABLE;
+ byte flags; /* Table-specific flags */
+ u8 generation; /* If this route import is based on other previously exported route,
+ this value should be 1 + MAX(generation of the parent routes).
+ Otherwise the route is independent and this value is zero. */
+ u32 id; /* Table specific route id */
struct ea_list *attrs; /* Attributes of this route */
const net_addr *net; /* Network this RTE belongs to */
struct rte_src *src; /* Route source that created the route */
struct rt_import_hook *sender; /* Import hook used to send the route to the routing table */
btime lastmod; /* Last modified (set by table) */
- u32 id; /* Table specific route id */
- byte flags; /* Table-specific flags */
- byte pflags; /* Protocol-specific flags */
- u8 generation; /* If this route import is based on other previously exported route,
- this value should be 1 + MAX(generation of the parent routes).
- Otherwise the route is independent and this value is zero. */
- u8 stale_cycle; /* Auxiliary value for route refresh */
} rte;
#define REF_FILTERED 2 /* Route is rejected by import filter */
struct rte_owner {
struct rte_owner_class *class;
- int (*rte_recalculate)(struct rtable_private *, struct network *, struct rte *, struct rte *, struct rte *);
+ int (*rte_recalculate)(struct rtable_private *, struct network *, struct rte_storage *new, struct rte_storage *, struct rte_storage *);
HASH(struct rte_src) hash;
const char *name;
u32 hash_key;
{ return (v < FLOWSPEC__MAX) ? flowspec_valid_names[v] : "???"; }
extern struct ea_class ea_gen_flowspec_valid;
-static inline enum flowspec_valid rt_get_flowspec_valid(rte *rt)
+static inline enum flowspec_valid rt_get_flowspec_valid(const rte *rt)
{ return ea_get_int(rt->attrs, &ea_gen_flowspec_valid, FLOWSPEC_UNKNOWN); }
/* Next hop: For now, stored as adata */
static void rt_export_used(struct rt_table_exporter *, const char *, const char *);
static void rt_export_cleanup(struct rtable_private *tab);
-static int rte_same(rte *x, rte *y);
+static int rte_same(const rte *x, const rte *y);
const char *rt_import_state_name_array[TIS_MAX] = {
[TIS_DOWN] = "DOWN",
struct rte_storage *
rte_store(const rte *r, net *net, struct rtable_private *tab)
{
- struct rte_storage *e = sl_alloc(tab->rte_slab);
+ struct rte_storage *s = sl_alloc(tab->rte_slab);
+ struct rte *e = RTES_WRITE(s);
- e->rte = *r;
- e->rte.net = net->n.addr;
+ *e = *r;
+ e->net = net->n.addr;
- rt_lock_source(e->rte.src);
+ rt_lock_source(e->src);
- if (ea_is_cached(e->rte.attrs))
- e->rte.attrs = rta_clone(e->rte.attrs);
+ if (ea_is_cached(e->attrs))
+ e->attrs = rta_clone(e->attrs);
else
- e->rte.attrs = rta_lookup(e->rte.attrs, 1);
+ e->attrs = rta_lookup(e->attrs, 1);
- return e;
+ return s;
}
/**
}
static void
-rt_notify_basic(struct channel *c, const net_addr *net, rte *new, rte *old)
+rt_notify_basic(struct channel *c, const net_addr *net, rte *new, const rte *old)
{
if (new && old && rte_same(new, old))
{
rt_notify_optimal(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *first)
{
struct channel *c = SKIP_BACK(struct channel, out_req, req);
- rte *o = RTE_VALID_OR_NULL(first->old_best);
+ const rte *o = RTE_VALID_OR_NULL(first->old_best);
struct rte_storage *new_best = first->new_best;
RPE_WALK(first, rpe, NULL)
{
struct channel *c = SKIP_BACK(struct channel, out_req, req);
- rte *n = RTE_VALID_OR_NULL(first->new);
- rte *o = RTE_VALID_OR_NULL(first->old);
+ const rte *n = RTE_VALID_OR_NULL(first->new);
+ const rte *o = RTE_VALID_OR_NULL(first->old);
if (!n && !o)
{
}
static int
-rte_same(rte *x, rte *y)
+rte_same(const rte *x, const rte *y)
{
/* rte.flags / rte.pflags are not checked, as they are internal to rtable */
return
rte_is_filtered(x) == rte_is_filtered(y);
}
-static inline int rte_is_ok(rte *e) { return e && !rte_is_filtered(e); }
+static inline int rte_is_ok(const rte *e) { return e && !rte_is_filtered(e); }
static int
rte_recalculate(struct rtable_private *table, struct rt_import_hook *c, net *net, rte *new, struct rte_src *src)
struct rt_import_request *req = c->req;
struct rt_import_stats *stats = &c->stats;
struct rte_storage *old_best_stored = net->routes, *old_stored = NULL;
- rte *old_best = old_best_stored ? &old_best_stored->rte : NULL;
- rte *old = NULL;
+ const rte *old_best = old_best_stored ? &old_best_stored->rte : NULL;
+ const rte *old = NULL;
/* If the new route is identical to the old one, we find the attributes in
* cache and clone these with no performance drop. OTOH, if we were to lookup
* therefore it's definitely worth the time. */
struct rte_storage *new_stored = NULL;
if (new)
- new = &(new_stored = rte_store(new, net, table))->rte;
+ {
+ new_stored = rte_store(new, net, table);
+ new = RTES_WRITE(new_stored);
+ }
/* Find and remove original route from the same protocol */
struct rte_storage **before_old = rte_find(net, src);
if (new && rte_same(old, &new_stored->rte))
{
/* No changes, ignore the new route and refresh the old one */
- old->stale_cycle = new->stale_cycle;
+ old_stored->stale_cycle = new->stale_cycle;
if (!rte_is_filtered(new))
{
the first position. There are several optimized cases. */
if (src->owner->rte_recalculate &&
- src->owner->rte_recalculate(table, net, new_stored ? &new_stored->rte : NULL, old, old_best))
+ src->owner->rte_recalculate(table, net, new_stored, old_stored, old_best_stored))
goto do_recalculate;
if (new_stored && rte_better(&new_stored->rte, old_best))
if (new_stored)
{
- new_stored->rte.lastmod = current_time();
- new_stored->rte.id = hmap_first_zero(&table->id_map);
- hmap_set(&table->id_map, new_stored->rte.id);
+ new->lastmod = current_time();
+ new->id = hmap_first_zero(&table->id_map);
+ hmap_set(&table->id_map, new->id);
}
/* Log the route change */
}
int
-channel_preimport(struct rt_import_request *req, rte *new, rte *old)
+channel_preimport(struct rt_import_request *req, rte *new, const rte *old)
{
struct channel *c = SKIP_BACK(struct channel, in_req, req);
{
for (struct rte_storage *e = n->routes; e; e = e->next)
if (e->rte.sender == req->hook)
- e->rte.stale_cycle = 0;
+ e->stale_cycle = 0;
}
FIB_WALK_END;
}
/* This net may affect some flowspecs, check the actual change */
- rte *o = RTE_VALID_OR_NULL(first->old_best);
+ const rte *o = RTE_VALID_OR_NULL(first->old_best);
struct rte_storage *new_best = first->new_best;
RPE_WALK(first, rpe, NULL)
}
static inline int
-rt_next_hop_update_rte(rte *old, rte *new)
+rt_next_hop_update_rte(const rte *old, rte *new)
{
struct hostentry_adata *head = rta_next_hop_outdated(old->attrs);
if (!head)
#endif /* CONFIG_BGP */
static int
-rt_flowspec_update_rte(rtable *tab, rte *r, rte *new)
+rt_flowspec_update_rte(rtable *tab, const rte *r, rte *new)
{
#ifdef CONFIG_BGP
if (r->generation || (rt_get_source_attr(r) != RTS_BGP))
if (updates[i].new_stored)
{
/* Get a new ID for the route */
- updates[i].new_stored->rte.lastmod = current_time();
- updates[i].new_stored->rte.id = hmap_first_zero(&tab->id_map);
- hmap_set(&tab->id_map, updates[i].new_stored->rte.id);
+ rte *new = RTES_WRITE(updates[i].new_stored);
+ new->lastmod = current_time();
+ new->id = hmap_first_zero(&tab->id_map);
+ hmap_set(&tab->id_map, new->id);
/* Call a pre-comparison hook */
/* Not really an efficient way to compute this */
if (updates[i].old->rte.src->owner->rte_recalculate)
- updates[i].old->rte.src->owner->rte_recalculate(tab, n, &updates[i].new_stored->rte, &updates[i].old->rte, &old_best->rte);
+ updates[i].old->rte.src->owner->rte_recalculate(tab, n, updates[i].new_stored, updates[i].old, old_best);
}
#if DEBUGGING
else
{
/* This net may affect some hostentries, check the actual change */
- rte *o = RTE_VALID_OR_NULL(first->old_best);
+ const rte *o = RTE_VALID_OR_NULL(first->old_best);
struct rte_storage *new_best = first->new_best;
RPE_WALK(first, rpe, NULL)
struct rte_storage {
struct rte_storage *next; /* Next in chain */
- struct rte rte; /* Route data */
+ union {
+ struct {
+ RTE_IN_TABLE_WRITABLE;
+ };
+ const struct rte rte; /* Route data */
+ };
};
#define RTE_COPY(r) ((r) ? (r)->rte : (rte) {})
#define RTE_OR_NULL(r) ((r) ? &((r)->rte) : NULL)
#define RTE_VALID_OR_NULL(r) (((r) && (rte_is_valid(&(r)->rte))) ? &((r)->rte) : NULL)
+#define RTES_WRITE(r) (((r) != ((struct rte_storage *) 0)) ? ((struct rte *) &(r)->rte) : NULL)
+
/* Table-channel connections */
struct rt_import_request {
void (*log_state_change)(struct rt_import_request *req, u8 state);
/* Preimport is called when the @new route is just-to-be inserted, replacing @old.
* Return a route (may be different or modified in-place) to continue or NULL to withdraw. */
- int (*preimport)(struct rt_import_request *req, struct rte *new, struct rte *old);
+ int (*preimport)(struct rt_import_request *req, struct rte *new, const struct rte *old);
};
struct rt_import_hook {
* Channel export hooks. To be refactored out.
*/
-int channel_preimport(struct rt_import_request *req, rte *new, rte *old);
+int channel_preimport(struct rt_import_request *req, rte *new, const rte *old);
void channel_reload_export_bulk(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *first, struct rt_pending_export *last, const rte **feed, uint count);
static inline int
-same_group(rte *r, u32 lpref, u32 lasn)
+same_group(const rte *r, u32 lpref, u32 lasn)
{
return (rt_get_preference(r) == lpref) && (bgp_get_neighbor(r) == lasn);
}
}
int
-bgp_rte_recalculate(struct rtable_private *table, net *net, rte *new, rte *old, rte *old_best)
+bgp_rte_recalculate(struct rtable_private *table, net *net,
+ struct rte_storage *new_stored, struct rte_storage *old_stored, struct rte_storage *old_best_stored)
{
- rte *key = new ? new : old;
+ struct rte_storage *key_stored = new_stored ? new_stored : old_stored;
+ const struct rte *new = &new_stored->rte,
+ *old = &old_stored->rte,
+ *old_best = &old_best_stored->rte,
+ *key = &key_stored->rte;
+
u32 lpref = rt_get_preference(key);
u32 lasn = bgp_get_neighbor(key);
int old_suppressed = old ? !!(old->pflags & BGP_REF_SUPPRESSED) : 0;
if (new && old && !same_group(old, lpref, lasn))
{
int i1, i2;
- i1 = bgp_rte_recalculate(table, net, NULL, old, old_best);
- i2 = bgp_rte_recalculate(table, net, new, NULL, old_best);
+ i1 = bgp_rte_recalculate(table, net, NULL, old_stored, old_best_stored);
+ i2 = bgp_rte_recalculate(table, net, new_stored, NULL, old_best_stored);
return i1 || i2;
}
*/
if (new)
- new->pflags |= BGP_REF_SUPPRESSED;
+ new_stored->pflags |= BGP_REF_SUPPRESSED;
if (old)
{
- old->pflags |= BGP_REF_SUPPRESSED;
+ old_stored->pflags |= BGP_REF_SUPPRESSED;
/* The fast case - replace not best with worse (or remove not best) */
if (old_suppressed && !(new && bgp_rte_better(new, old)))
}
/* The default case - find a new best-in-group route */
- rte *r = new; /* new may not be in the list */
+ struct rte_storage *r = new_stored; /* new may not be in the list */
for (struct rte_storage *s = net->routes; rte_is_valid(RTE_OR_NULL(s)); s = s->next)
if (use_deterministic_med(s) && same_group(&s->rte, lpref, lasn))
{
- s->rte.pflags |= BGP_REF_SUPPRESSED;
- if (!r || bgp_rte_better(&s->rte, r))
- r = &s->rte;
+ s->pflags |= BGP_REF_SUPPRESSED;
+ if (!r || bgp_rte_better(&s->rte, &r->rte))
+ r = s;
}
/* Simple case - the last route in group disappears */
return 0;
/* Found if new is mergable with best-in-group */
- if (new && (new != r) && bgp_rte_mergable(r, new))
- new->pflags &= ~BGP_REF_SUPPRESSED;
+ if (new && (new_stored != r) && bgp_rte_mergable(&r->rte, new))
+ new_stored->pflags &= ~BGP_REF_SUPPRESSED;
/* Found all existing routes mergable with best-in-group */
for (struct rte_storage *s = net->routes; rte_is_valid(RTE_OR_NULL(s)); s = s->next)
if (use_deterministic_med(s) && same_group(&s->rte, lpref, lasn))
- if ((&s->rte != r) && bgp_rte_mergable(r, &s->rte))
- s->rte.pflags &= ~BGP_REF_SUPPRESSED;
+ if ((s != r) && bgp_rte_mergable(&r->rte, &s->rte))
+ s->pflags &= ~BGP_REF_SUPPRESSED;
/* Found best-in-group */
r->pflags &= ~BGP_REF_SUPPRESSED;
* the first reason does not apply, return 0
*/
- if (r == new)
+ if (r == new_stored)
return old_best && same_group(old_best, lpref, lasn);
else
return !old_suppressed;
int bgp_rte_better(const rte *, const rte *);
int bgp_rte_mergable(const rte *pri, const rte *sec);
-int bgp_rte_recalculate(struct rtable_private *table, net *net, rte *new, rte *old, rte *old_best);
+int bgp_rte_recalculate(struct rtable_private *table, net *net, struct rte_storage *new, struct rte_storage *old, struct rte_storage *old_best);
void bgp_rte_modify_stale(struct rt_export_request *req, const net_addr *n, struct rt_pending_export *first, struct rt_pending_export *last, const rte **feed, uint count);
u32 bgp_rte_igp_metric(const rte *);
void bgp_rt_notify(struct proto *P, struct channel *C, const net_addr *n, rte *new, const rte *old);