typedef struct ea_list {
struct ea_list *next; /* In case we have an override list */
byte flags; /* Flags: EALF_... */
- byte rfu;
+ byte stored:5; /* enum ea_stored */
+ byte rfu:3;
word count; /* Number of attributes */
eattr attrs[0]; /* Attribute definitions themselves */
} ea_list;
+enum ea_stored {
+ EALS_NONE = 0, /* This is a temporary ea_list */
+ EALS_PREIMPORT = 1, /* State when route entered rte_update() */
+ EALS_FILTERED = 2, /* State after filters */
+ EALS_IN_TABLE = 3, /* State in table */
+ EALS_KEY = 4, /* EA list used as key */
+ EALS_CUSTOM = 0x10, /* OR this with custom values */
+ EALS_MAX = 0x20,
+};
+
struct ea_storage {
struct ea_storage *next_hash; /* Next in hash chain */
struct ea_storage **pprev_hash; /* Previous in hash chain */
#define EALF_SORTED 1 /* Attributes are sorted by code */
#define EALF_BISECT 2 /* Use interval bisection for searching */
-#define EALF_CACHED 4 /* List is cached */
#define EALF_HUGE 8 /* List is too big to fit into slab */
struct ea_class {
eattr *ea_walk(struct ea_walk_state *s, uint id, uint max);
void ea_dump(ea_list *);
int ea_same(ea_list *x, ea_list *y); /* Test whether two ea_lists are identical */
-uint ea_hash(ea_list *e); /* Calculate 16-bit hash value */
+uint ea_hash(ea_list *e); /* Calculate attributes hash value */
ea_list *ea_append(ea_list *to, ea_list *what);
void ea_format_bitfield(const struct eattr *a, byte *buf, int bufsize, const char **names, int min, int max);
/* Normalize ea_list; allocates the result from tmp_linpool */
-ea_list *ea_normalize(ea_list *e, int overlay);
+ea_list *ea_normalize(ea_list *e, u32 upto);
uint ea_list_size(ea_list *);
void ea_list_copy(ea_list *dest, ea_list *src, uint size);
}
void rta_init(void);
-ea_list *ea_lookup(ea_list *, int overlay); /* Get a cached (and normalized) variant of this attribute list */
-static inline int ea_is_cached(const ea_list *r) { return r->flags & EALF_CACHED; }
+
+ea_list *ea_lookup_slow(ea_list *r, u32 squash_upto, enum ea_stored oid);
+
static inline struct ea_storage *ea_get_storage(ea_list *r)
{
- ASSERT_DIE(ea_is_cached(r));
+ ASSERT_DIE(r->stored);
return SKIP_BACK(struct ea_storage, l[0], r);
}
-static inline ea_list *ea_clone(ea_list *r) {
+static inline ea_list *ea_ref(ea_list *r)
+{
ASSERT_DIE(0 < atomic_fetch_add_explicit(&ea_get_storage(r)->uc, 1, memory_order_acq_rel));
return r;
}
+
+static inline ea_list *ea_lookup(ea_list *r, u32 squash_upto, enum ea_stored oid)
+{
+ ASSERT_DIE(oid);
+ if ((r->stored == oid) || BIT32_TEST(&squash_upto, r->stored))
+ return ea_ref(r);
+ else
+ return ea_lookup_slow(r, squash_upto, oid);
+}
+
+static inline ea_list *ea_strip_to(ea_list *r, u32 strip_to)
+{
+ ASSERT_DIE(strip_to);
+ while (r && !BIT32_TEST(&strip_to, r->stored))
+ r = r->next;
+
+ return r;
+}
+
void ea__free(struct ea_storage *r);
static inline void ea_free(ea_list *l) {
if (!l) return;
void ea_dump_all(void);
void ea_show_list(struct cli *, ea_list *);
-#define rta_lookup ea_lookup
-#define rta_is_cached ea_is_cached
-#define rta_clone ea_clone
-#define rta_free ea_free
-
#endif
ea.a[ea.l.count++] = EA_LITERAL_EMBEDDED(&ea_gen_source, 0, m->mpls_rts);
PUT_ATTR(&ea_gen_mpls_class);
- return ea_get_storage(ea_lookup(&ea.l, 0));
+ return ea_get_storage(ea_lookup(&ea.l, 0, EALS_KEY));
}
static void
* a given &ea_list after merging with ea_merge().
*/
static unsigned
-ea_scan(const ea_list *e, int overlay)
+ea_scan(const ea_list *e, u32 upto)
{
unsigned cnt = 0;
{
cnt += e->count;
e = e->next;
- if (e && overlay && ea_is_cached(e))
+ if (e && BIT32_TEST(&upto, e->stored))
break;
}
return sizeof(ea_list) + sizeof(eattr)*cnt;
* by calling ea_sort().
*/
static void
-ea_merge(ea_list *e, ea_list *t, int overlay)
+ea_merge(ea_list *e, ea_list *t, u32 upto)
{
eattr *d = t->attrs;
d += e->count;
e = e->next;
- if (e && overlay && ea_is_cached(e))
+ if (e && BIT32_TEST(&upto, e->stored))
break;
}
}
ea_list *
-ea_normalize(ea_list *e, int overlay)
+ea_normalize(ea_list *e, u32 upto)
{
#if 0
debug("(normalize)");
ea_dump(e);
debug(" ----> ");
#endif
- ea_list *t = tmp_alloc(ea_scan(e, overlay));
- ea_merge(e, t, overlay);
+ ea_list *t = tmp_allocz(ea_scan(e, upto));
+ ea_merge(e, t, upto);
ea_sort(t);
#if 0
ea_dump(t);
debug("\n");
#endif
- return t->count ? t : t->next;
+ return t;
}
static _Bool
}
if (l->next)
- {
- ASSERT_DIE(ea_is_cached(l->next));
- ea_clone(l->next);
- }
+ ea_ref(l->next);
}
static void ea_free_nested(ea_list *l);
}
while (e)
{
- struct ea_storage *s = ea_is_cached(e) ? ea_get_storage(e) : NULL;
- debug("[%c%c%c] uc=%d h=%08x",
+ struct ea_storage *s = e->stored ? ea_get_storage(e) : NULL;
+ debug("[%c%c] overlay=%d uc=%d h=%08x",
(e->flags & EALF_SORTED) ? 'S' : 's',
(e->flags & EALF_BISECT) ? 'B' : 'b',
- (e->flags & EALF_CACHED) ? 'C' : 'c',
+ e->stored,
s ? atomic_load_explicit(&s->uc, memory_order_relaxed) : 0,
s ? s->hash_key : 0);
for(i=0; i<e->count; i++)
* ea_hash() takes an extended attribute list and calculated a hopefully
* uniformly distributed hash value from its contents.
*/
-inline uint
+uint
ea_hash(ea_list *e)
{
const u64 mul = 0x68576150f3d6847;
* converted to the normalized form.
*/
ea_list *
-ea_lookup(ea_list *o, int overlay)
+ea_lookup_slow(ea_list *o, u32 squash_upto, enum ea_stored oid)
{
struct ea_storage *r;
uint h;
- ASSERT(!ea_is_cached(o));
- o = ea_normalize(o, overlay);
+ ASSERT(o->stored != oid);
+ ASSERT(oid);
+ o = ea_normalize(o, squash_upto);
h = ea_hash(o);
+ squash_upto |= BIT32_VAL(oid);
+
RTA_LOCK;
for(r=rta_hash_table[h & rta_cache_mask]; r; r=r->next_hash)
- if (r->hash_key == h && ea_same(r->l, o))
+ if (r->hash_key == h && ea_same(r->l, o) && BIT32_TEST(&squash_upto, r->l->stored))
{
atomic_fetch_add_explicit(&r->uc, 1, memory_order_acq_rel);
RTA_UNLOCK;
ea_list_copy(r->l, o, elen);
ea_list_ref(r->l);
- r->l->flags |= EALF_CACHED | huge;
+ r->l->flags |= huge;
+ r->l->stored = oid;
r->hash_key = h;
atomic_store_explicit(&r->uc, 1, memory_order_release);
else
from[0] = 0;
- /* Need to normalize the extended attributes */
- if (d->verbose && !rta_is_cached(a) && a)
- a = ea_normalize(a, 0);
+ /* Need to normalize the attributes for dumping */
+ if (d->verbose && !a->stored)
+ a = ea_normalize(a, EALS_NONE);
get_route_info = e->src->owner->class ? e->src->owner->class->get_route_info : NULL;
if (get_route_info)
rt_lock_source(e->src);
- if (ea_is_cached(e->attrs))
- e->attrs = rta_clone(e->attrs);
- else
- e->attrs = rta_lookup(e->attrs, 1);
+ e->attrs = ea_lookup(e->attrs, BIT32_ALL(EALS_PREIMPORT, EALS_FILTERED), EALS_IN_TABLE);
#if 0
debug("(store) %N ", i->addr);
rt_unlock_source(e->rte.src);
- rta_free(e->rte.attrs);
+ ea_free(e->rte.attrs);
sl_free(e);
}
return
(x == y) || (
(x->attrs == y->attrs) ||
- ((!(x->attrs->flags & EALF_CACHED) || !(y->attrs->flags & EALF_CACHED)) && ea_same(x->attrs, y->attrs))
+ ((!x->attrs->stored || !y->attrs->stored) && ea_same(x->attrs, y->attrs))
) &&
x->src == y->src &&
rte_is_filtered(x) == rte_is_filtered(y);
ASSERT(c->channel_state == CS_UP);
- ea_list *ea_tmp[2] = {};
+ ea_list *ea_prefilter = NULL, *ea_postfilter = NULL;
- /* The import reloader requires prefilter routes to be the first layer */
+ /* Storing prefilter routes as an explicit layer */
if (new && (c->in_keep & RIK_PREFILTER))
- ea_tmp[0] = new->attrs =
- (ea_is_cached(new->attrs) && !new->attrs->next) ?
- ea_clone(new->attrs) :
- ea_lookup(new->attrs, 0);
+ ea_prefilter = new->attrs = ea_lookup(new->attrs, 0, EALS_PREIMPORT);
#if 0
debug("%s.%s -(prefilter)-> %s: %N ", c->proto->name, c->name, c->table->name, n);
if (new)
{
- ea_tmp[1] = new->attrs =
- ea_is_cached(new->attrs) ? ea_clone(new->attrs) : ea_lookup(new->attrs, !!ea_tmp[0]);
+ ea_postfilter = new->attrs = ea_lookup(new->attrs,
+ ea_prefilter ? BIT32_ALL(EALS_PREIMPORT) : 0, EALS_FILTERED);
if (net_is_flow(n))
rt_flowspec_resolve_rte(new, c);
/* Now the route attributes are kept by the in-table cached version
* and we may drop the local handles */
- for (uint k = 0; k < ARRAY_SIZE(ea_tmp); k++)
- if (ea_tmp[k])
- ea_free(ea_tmp[k]);
+ ea_free(ea_prefilter);
+ ea_free(ea_postfilter);
}
void
if (!head)
return 0;
+ /* Get the state of the route just before nexthop was resolved */
*new = *old;
+ new->attrs = ea_strip_to(new->attrs, BIT32_ALL(EALS_PREIMPORT, EALS_FILTERED));
+
RT_LOCKED(head->he->owner, tab)
rta_apply_hostentry(tab, &new->attrs, head);
return 1;
if (nb)
{
rb = RTE_COPY_VALID(RTE_OR_NULL(nb->routes));
- rta_clone(rb.attrs);
+ ea_ref(rb.attrs);
net_copy(&nau.n, nb->routes->rte.net);
rb.net = &nau.n;
}
return 0;
*new = *r;
+ new->attrs = ea_strip_to(new->attrs, BIT32_ALL(EALS_PREIMPORT, EALS_FILTERED));
ea_set_attr_u32(&new->attrs, &ea_gen_flowspec_valid, 0, valid);
return 1;
#else
rte new = rte_init_from(feed[i]);
/* Strip the later attribute layers */
- while (new.attrs->next)
- new.attrs = new.attrs->next;
+ new.attrs = ea_strip_to(new.attrs, BIT32_ALL(EALS_PREIMPORT));
/* And reload the route */
rte_update(c, net, &new, new.src);
static void
hc_delete_hostentry(struct hostcache *hc, pool *p, struct hostentry *he)
{
- rta_free(he->src);
+ ea_free(he->src);
rem_node(&he->ln);
hc_remove(hc, he);
WALK_LIST(n, hc->hostentries)
{
struct hostentry *he = SKIP_BACK(struct hostentry, ln, n);
- rta_free(he->src);
+ ea_free(he->src);
if (!lfuc_finished(&he->uc))
log(L_ERR "Hostcache is not empty in table %s", tab->name);
direct++;
}
- he->src = rta_clone(a);
+ he->src = ea_ref(a);
he->nexthop_linkable = !direct;
he->igp_metric = rt_get_igp_metric(&e->rte);
}
/* Add a prefix range to the trie */
trie_add_prefix(tab->hostcache->trie, &he_addr, pxlen, he_addr.pxlen);
- rta_free(old_src);
+ ea_free(old_src);
return old_src != he->src;
}
}
/* Store the route attributes */
- if (rta_is_cached(new->attrs))
- rta_clone(new->attrs);
- else
- new->attrs = rta_lookup(new->attrs, 0);
+ new->attrs = ea_lookup(new->attrs, 0, EALS_KEY);
/* Insert the new route into the bucket */
struct aggregator_route *arte = sl_alloc(p->route_slab);
old_bucket->count--;
HASH_REMOVE2(p->routes, AGGR_RTE, p->p.pool, old_route);
- rta_free(old_route->rte.attrs);
+ ea_free(old_route->rte.attrs);
sl_free(old_route);
}
b->rte = arte->next_rte;
b->count--;
HASH_REMOVE(p->routes, AGGR_RTE, arte);
- rta_free(arte->rte.attrs);
+ ea_free(arte->rte.attrs);
sl_free(arte);
}
/* Prepare cached route attributes */
if (!s->mpls && (s->cached_ea == NULL))
- a0 = s->cached_ea = ea_lookup(a0, 0);
+ a0 = s->cached_ea = ea_lookup(a0, 0, EALS_CUSTOM);
rte e0 = {
.attrs = a0,
bgp_apply_mpls_labels(s, to, lnum, labels);
/* Attributes were changed, invalidate cached entry */
- rta_free(s->cached_ea);
+ ea_free(s->cached_ea);
s->cached_ea = NULL;
return;
c->desc->decode_nlri(s, nlri, len, ea);
- rta_free(s->cached_ea);
+ ea_free(s->cached_ea);
s->cached_ea = NULL;
rt_unlock_source(s->last_src);
ea, s.mp_next_hop_data, s.mp_next_hop_len);
done:
- rta_free(s.cached_ea);
+ ea_free(s.cached_ea);
lp_restore(tmp_linpool, tmpp);
return;
}
ASSERT_DIE(ARRAY_SIZE(eattrs.a) >= eattrs.l.count);
- ea_list *eal = ea_lookup(&eattrs.l, 0);
+ ea_list *eal = ea_lookup(&eattrs.l, 0, EALS_CUSTOM);
ea_free(nf->old_ea);
nf->old_ea = eal;
else if (nf->old_ea)
{
/* Remove the route */
- rta_free(nf->old_ea);
+ ea_free(nf->old_ea);
nf->old_ea = NULL;
rte_update(p->p.main_channel, nf->fn.addr, NULL, p->p.main_source);
ea_set_attr_data(&ea, &ea_gen_nexthop, 0,
&nhad.ad.data, sizeof nhad - sizeof nhad.ad);
- p->data[i].a = rta_lookup(ea, 0);
+ p->data[i].a = ea_lookup(ea, 0, EALS_CUSTOM);
}
else
- p->data[i].a = rta_clone(p->data[i-1].a);
+ p->data[i].a = ea_ref(p->data[i-1].a);
}
clock_gettime(CLOCK_MONOTONIC, &ts_generated);