struct ea_storage {
struct ea_storage *next_hash; /* Next in hash chain */
- _Atomic u32 uc; /* Use count */
+ struct lfuc uc; /* Use count */
u32 hash_key; /* List hash */
+ /* 32 bits unused but we don't wanna squeeze the use count
+ * into 32 bits to pack with the list hash, sorry */
ea_list l[0]; /* The list itself */
};
static inline ea_list *ea_ref(ea_list *r)
{
- ASSERT_DIE(0 < atomic_fetch_add_explicit(&ea_get_storage(r)->uc, 1, memory_order_acq_rel));
+ lfuc_lock(&ea_get_storage(r)->uc);
return r;
}
return ea_lookup_slow(r, squash_upto, oid);
}
-struct ea_free_deferred {
- struct deferred_call dc;
- ea_list *attrs;
-};
-
-void ea_free_deferred(struct deferred_call *dc);
-
-static inline ea_list *ea_free_later(ea_list *r)
+#define ea_free_later ea_free
+extern event ea_cleanup_event;
+static inline void ea_free(ea_list *l)
{
- struct ea_free_deferred efd = {
- .dc.hook = ea_free_deferred,
- .attrs = r,
- };
-
- defer_call(&efd.dc, sizeof efd);
- return r;
+ if (l)
+ lfuc_unlock(&ea_get_storage(l)->uc, &global_work_list, &ea_cleanup_event);
}
static inline ea_list *ea_lookup_tmp(ea_list *r, u32 squash_upto, enum ea_stored oid)
{
- return ea_free_later(ea_lookup(r, squash_upto, oid));
+ ea_free_later(r = ea_lookup(r, squash_upto, oid));
+ return r;
}
static inline ea_list *ea_ref_tmp(ea_list *r)
{
ASSERT_DIE(r->stored);
- return ea_free_later(ea_ref(r));
+ ea_free_later(ea_ref(r));
+ return r;
}
static inline ea_list *ea_strip_to(ea_list *r, u32 strip_to)
return r;
}
-void ea__free(struct ea_storage *r);
-static inline void ea_free(ea_list *l) {
- if (!l) return;
- struct ea_storage *r = ea_get_storage(l);
- if (1 == atomic_fetch_sub_explicit(&r->uc, 1, memory_order_acq_rel)) ea__free(r);
-}
-
void ea_dump(ea_list *);
void ea_dump_all(void);
void ea_show_list(struct cli *, ea_list *);
ea_ref(l->next);
}
-static void ea_free_nested(ea_list *l);
-
static void
ea_list_unref(ea_list *l)
{
}
if (l->next)
- ea_free_nested(l->next);
+ lfuc_unlock(&ea_get_storage(l->next)->uc, &global_work_list, &ea_cleanup_event);
}
void
(e->flags & EALF_SORTED) ? 'S' : 's',
(e->flags & EALF_BISECT) ? 'B' : 'b',
e->stored,
- s ? atomic_load_explicit(&s->uc, memory_order_relaxed) : 0,
+ s ? atomic_load_explicit(&s->uc.uc, memory_order_relaxed) : 0,
s ? s->hash_key : 0);
for(i=0; i<e->count; i++)
{
* rta's
*/
+event ea_cleanup_event;
+
static HASH(struct ea_storage) rta_hash_table;
#define RTAH_KEY(a) a->l, a->hash_key
if (r)
{
- atomic_fetch_add_explicit(&r->uc, 1, memory_order_acq_rel);
+ lfuc_lock_revive(&r->uc);
RTA_UNLOCK;
return r->l;
}
r->l->flags |= huge;
r->l->stored = oid;
r->hash_key = h;
- atomic_store_explicit(&r->uc, 1, memory_order_release);
+
+ memset(&r->uc, 0, sizeof r->uc);
+ lfuc_lock_revive(&r->uc);
HASH_INSERT2(rta_hash_table, RTAH, rta_pool, r);
}
static void
-ea_free_locked(struct ea_storage *a)
+ea_cleanup(void *_ UNUSED)
{
- /* Somebody has cloned this rta inbetween. This sometimes happens. */
- if (atomic_load_explicit(&a->uc, memory_order_acquire))
- return;
-
- HASH_REMOVE2(rta_hash_table, RTAH, rta_pool, a);
+ RTA_LOCK;
- ea_list_unref(a->l);
- if (a->l->flags & EALF_HUGE)
- mb_free(a);
- else
- sl_free(a);
-}
+ HASH_WALK_FILTER(rta_hash_table, next_hash, r, rp)
+ {
+ if (lfuc_finished(&r->uc))
+ {
+ HASH_DO_REMOVE(rta_hash_table, RTAH, rp);
-static void
-ea_free_nested(struct ea_list *l)
-{
- struct ea_storage *r = ea_get_storage(l);
- if (1 == atomic_fetch_sub_explicit(&r->uc, 1, memory_order_acq_rel))
- ea_free_locked(r);
-}
+ ea_list_unref(r->l);
+ if (r->l->flags & EALF_HUGE)
+ mb_free(r);
+ else
+ sl_free(r);
+ }
+ }
+ HASH_WALK_FILTER_END;
-void
-ea__free(struct ea_storage *a)
-{
- RTA_LOCK;
- ea_free_locked(a);
RTA_UNLOCK;
}
-void
-ea_free_deferred(struct deferred_call *dc)
-{
- ea_free(SKIP_BACK(struct ea_free_deferred, dc, dc)->attrs);
-}
-
/**
* rta_dump_all - dump attribute cache
*
rte_src_init();
ea_class_init();
+ ea_cleanup_event = (event) {
+ .hook = ea_cleanup,
+ };
+
RTA_UNLOCK;
/* These attributes are required to be first for nice "show route" output */