struct ea_storage {
struct ea_storage *next_hash; /* Next in hash chain */
- struct lfuc uc; /* Use count */
+ _Atomic u32 uc; /* Use count */
u32 hash_key; /* List hash */
- /* 32 bits unused but we don't wanna squeeze the use count
- * into 32 bits to pack with the list hash, sorry */
ea_list l[0]; /* The list itself */
};
static inline ea_list *ea_ref(ea_list *r)
{
- lfuc_lock(&ea_get_storage(r)->uc);
+ ASSERT_DIE(0 < atomic_fetch_add_explicit(&ea_get_storage(r)->uc, 1, memory_order_acq_rel));
return r;
}
return ea_lookup_slow(r, squash_upto, oid);
}
-#define ea_free_later ea_free
-extern event ea_cleanup_event;
-static inline void ea_free(ea_list *l)
+struct ea_free_deferred {
+ struct deferred_call dc;
+ ea_list *attrs;
+};
+
+void ea_free_deferred(struct deferred_call *dc);
+
+static inline ea_list *ea_free_later(ea_list *r)
{
- if (l)
- lfuc_unlock(&ea_get_storage(l)->uc, &global_work_list, &ea_cleanup_event);
+ if (!r)
+ return NULL;
+
+ struct ea_free_deferred efd = {
+ .dc.hook = ea_free_deferred,
+ .attrs = r,
+ };
+
+ defer_call(&efd.dc, sizeof efd);
+ return r;
}
+#define ea_free ea_free_later
+
static inline ea_list *ea_lookup_tmp(ea_list *r, u32 squash_upto, enum ea_stored oid)
{
- ea_free_later(r = ea_lookup(r, squash_upto, oid));
- return r;
+ return ea_free_later(ea_lookup(r, squash_upto, oid));
}
static inline ea_list *ea_ref_tmp(ea_list *r)
{
ASSERT_DIE(r->stored);
- ea_free_later(ea_ref(r));
- return r;
+ return ea_free_later(ea_ref(r));
}
static inline ea_list *ea_strip_to(ea_list *r, u32 strip_to)
ea_ref(l->next);
}
+static void ea_free_nested(ea_list *l);
+
static void
ea_list_unref(ea_list *l)
{
}
if (l->next)
- lfuc_unlock(&ea_get_storage(l->next)->uc, &global_work_list, &ea_cleanup_event);
+ ea_free_nested(l->next);
}
void
(e->flags & EALF_SORTED) ? 'S' : 's',
(e->flags & EALF_BISECT) ? 'B' : 'b',
e->stored,
- s ? atomic_load_explicit(&s->uc.uc, memory_order_relaxed) : 0,
+ s ? atomic_load_explicit(&s->uc, memory_order_relaxed) : 0,
s ? s->hash_key : 0);
for(i=0; i<e->count; i++)
{
* rta's
*/
-event ea_cleanup_event;
-
static SPINHASH(struct ea_storage) rta_hash_table;
#define RTAH_KEY(a) a->l, a->hash_key
(!r || r->l->stored > ea->l->stored))
r = ea;
if (r)
- lfuc_lock_revive(&r->uc);
+ atomic_fetch_add_explicit(&r->uc, 1, memory_order_acq_rel);
SPINHASH_END_CHAIN(read);
return r ? r->l : NULL;
r->l->flags |= huge;
r->l->stored = oid;
r->hash_key = h;
-
- memset(&r->uc, 0, sizeof r->uc);
- lfuc_lock_revive(&r->uc);
+ atomic_store_explicit(&r->uc, 1, memory_order_release);
SPINHASH_INSERT(rta_hash_table, RTAH, r);
}
static void
-ea_cleanup(void *_ UNUSED)
+ea_free_locked(struct ea_storage *a)
{
- RTA_LOCK;
+ /* Somebody has cloned this rta inbetween. This sometimes happens. */
+ if (atomic_load_explicit(&a->uc, memory_order_acquire))
+ return;
- _Bool run_again = 0;
+ SPINHASH_REMOVE(rta_hash_table, RTAH, a);
- SPINHASH_WALK_FILTER(rta_hash_table, RTAH, write, rp)
- {
- struct ea_storage *r = *rp;
- if (lfuc_finished(&r->uc))
- {
- run_again = 1;
- SPINHASH_DO_REMOVE(rta_hash_table, RTAH, rp);
+ ea_list_unref(a->l);
+ if (a->l->flags & EALF_HUGE)
+ mb_free(a);
+ else
+ sl_free(a);
+}
- ea_list_unref(r->l);
- if (r->l->flags & EALF_HUGE)
- mb_free(r);
- else
- sl_free(r);
- }
- }
- SPINHASH_WALK_FILTER_END(write);
+static void
+ea_free_nested(struct ea_list *l)
+{
+ struct ea_storage *r = ea_get_storage(l);
+ if (1 == atomic_fetch_sub_explicit(&r->uc, 1, memory_order_acq_rel))
+ ea_free_locked(r);
+}
- if (run_again)
- ev_send(&global_work_list, &ea_cleanup_event);
+void
+ea_free_deferred(struct deferred_call *dc)
+{
+ struct ea_storage *r = ea_get_storage(SKIP_BACK(struct ea_free_deferred, dc, dc)->attrs);
+ if (1 != atomic_fetch_sub_explicit(&r->uc, 1, memory_order_acq_rel))
+ return;
+ RTA_LOCK;
+ ea_free_locked(r);
RTA_UNLOCK;
}
rte_src_init();
ea_class_init();
- ea_cleanup_event = (event) {
- .hook = ea_cleanup,
- };
-
RTA_UNLOCK;
/* These attributes are required to be first for nice "show route" output */