ip_addr from; /* Advertising router */
u32 igp_metric; /* IGP metric to next hop (for iBGP routes) */
u16 cached:1; /* Are attributes cached? */
- u16 source:7; /* Route source (RTS_...) */
+ u16 obsolete:1; /* This rta is going to be freed */
+ u16 source:6; /* Route source (RTS_...) */
u16 scope:4; /* Route scope (SCOPE_... -- see ip.h) */
u16 dest:4; /* Route destination type (RTD_...) */
word pref;
#define RTA_MAX_SIZE (sizeof(rta) + sizeof(u32)*MPLS_MAX_LABEL_STACK)
rta *rta_lookup(rta *); /* Get rta equivalent to this one, uc++ */
static inline int rta_is_cached(rta *r) { return r->cached; }
-static inline rta *rta_clone(rta *r) { r->uc++; return r; }
-void rta__free(rta *r);
-static inline void rta_free(rta *r) { if (r && !--r->uc) rta__free(r); }
+static inline rta *rta_clone(rta *r)
+{
+ if (r->obsolete)
+ return rta_lookup(r);
+
+ r->uc++;
+ return r;
+}
+void rta_unlink(rta *r);
+static inline void rta_free(rta *r) { if (r && !--r->uc) rta_unlink(r); }
rta *rta_do_cow(rta *o, linpool *lp);
static inline rta * rta_cow(rta *r, linpool *lp) { return rta_is_cached(r) ? rta_do_cow(r, lp) : r; }
void rta_dump(rta *);
#include "nest/cli.h"
#include "nest/attrs.h"
#include "lib/alloca.h"
+#include "lib/gc.h"
#include "lib/hash.h"
#include "lib/idm.h"
#include "lib/resource.h"
#include "lib/string.h"
#include <stddef.h>
+#include <pthread.h>
const adata null_adata; /* adata of length 0 */
static uint rta_cache_limit;
static uint rta_cache_mask;
static rta **rta_hash_table;
+static pthread_mutex_t rta_hash_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+struct rta_gc_chain {
+ node n;
+ u64 round;
+ rta *chain;
+};
+
+static _Thread_local struct rta_gc_chain *rta_gc_current_chain = NULL;
+static list rta_gc_chain_list;
static void
rta_alloc_hash(void)
rta *r = sl_alloc(rta_slab(o));
memcpy(r, o, rta_size(o));
+ r->obsolete = 0;
r->uc = 1;
r->nh.next = nexthop_copy(o->nh.next);
r->eattrs = ea_list_copy(o->eattrs);
rta *r;
uint h;
+ if (o->cached && o->obsolete)
+ {
+ pthread_mutex_lock(&rta_hash_mutex);
+ ASSERT(o->uc == 0);
+ h = o->hash_key;
+ goto copy;
+ }
+
ASSERT(!o->cached);
if (o->eattrs)
ea_normalize(o->eattrs);
h = rta_hash(o);
+
+ pthread_mutex_lock(&rta_hash_mutex);
+
for(r=rta_hash_table[h & rta_cache_mask]; r; r=r->next)
if (r->hash_key == h && rta_same(r, o))
- return rta_clone(r);
+ {
+ r = rta_clone(r);
+ goto done;
+ }
+copy:
r = rta_copy(o);
r->hash_key = h;
r->cached = 1;
if (++rta_cache_count > rta_cache_limit)
rta_rehash();
+done:
+ pthread_mutex_unlock(&rta_hash_mutex);
return r;
}
void
-rta__free(rta *a)
+rta_unlink(rta *a)
{
+ pthread_mutex_lock(&rta_hash_mutex);
ASSERT(rta_cache_count && a->cached);
+ ASSERT(rta_gc_current_chain);
+
+ a->obsolete = 1;
rta_cache_count--;
+
*a->pprev = a->next;
if (a->next)
a->next->pprev = a->pprev;
+
+ a->pprev = NULL;
+ a->next = rta_gc_current_chain->chain;
+ rta_gc_current_chain->chain = a;
+
+ pthread_mutex_unlock(&rta_hash_mutex);
+}
+
+static void
+rta_do_free(rta *a)
+{
+ ASSERT(a->obsolete);
rt_unlock_hostentry(a->hostentry);
if (a->nh.next)
nexthop_free(a->nh.next);
sl_free(rta_slab(a), a);
}
+static void
+rta_gc_enter(u64 round, struct gc_callback_set *gcs UNUSED)
+{
+ pthread_mutex_lock(&rta_hash_mutex);
+ ASSERT(rta_gc_current_chain == NULL);
+ rta_gc_current_chain = mb_alloc(rta_pool, sizeof(struct rta_gc_chain));
+ *rta_gc_current_chain = (struct rta_gc_chain) { .round = round };
+
+ add_tail(&rta_gc_chain_list, &rta_gc_current_chain->n);
+
+ pthread_mutex_unlock(&rta_hash_mutex);
+}
+
+static void
+rta_gc_exit(u64 round UNUSED, struct gc_callback_set *gcs UNUSED)
+{
+ if (!rta_gc_current_chain->chain)
+ {
+ pthread_mutex_lock(&rta_hash_mutex);
+ rem_node(&rta_gc_current_chain->n);
+ mb_free(rta_gc_current_chain);
+ pthread_mutex_unlock(&rta_hash_mutex);
+ }
+
+ rta_gc_current_chain = NULL;
+}
+
+static void
+rta_gc_cleanup(u64 round, struct gc_callback_set *gcs UNUSED)
+{
+ pthread_mutex_lock(&rta_hash_mutex);
+ node *n = HEAD(rta_gc_chain_list);
+ if (!NODE_VALID(n))
+ goto done;
+
+ struct rta_gc_chain *rgc = SKIP_BACK(struct rta_gc_chain, n, n);
+ if (rgc->round > round)
+ goto done;
+
+ ASSERT(rgc->round == round);
+ rem_node(n);
+
+ rta *a, *nxt = rgc->chain;
+ while (a = nxt)
+ {
+ nxt = a->next;
+ rta_do_free(a);
+ }
+
+ mb_free(rgc);
+
+done:
+ pthread_mutex_unlock(&rta_hash_mutex);
+}
+
+static struct gc_callback_set rta_gc_callback_set = {
+ .enter = rta_gc_enter,
+ .exit = rta_gc_exit,
+ .cleanup = rta_gc_cleanup,
+};
+
rta *
rta_do_cow(rta *o, linpool *lp)
{
nexthop_slab_[2] = sl_new(rta_pool, sizeof(struct nexthop) + sizeof(u32)*2);
nexthop_slab_[3] = sl_new(rta_pool, sizeof(struct nexthop) + sizeof(u32)*MPLS_MAX_LABEL_STACK);
+ init_list(&rta_gc_chain_list);
+ gc_register(&rta_gc_callback_set);
+
rta_alloc_hash();
rte_src_init();
+
}
/*