]> git.ipfire.org Git - thirdparty/bird.git/commitdiff
TMP: lockfree usecount and spinhash use callbacks
authorMaria Matejka <mq@ucw.cz>
Sat, 15 Jun 2024 18:42:09 +0000 (20:42 +0200)
committerMaria Matejka <mq@ucw.cz>
Sat, 15 Jun 2024 18:42:09 +0000 (20:42 +0200)
lib/hash.h
lib/lockfree.c
lib/lockfree.h
lib/netindex.c
lib/netindex.h
lib/netindex_private.h
lib/route.h
nest/mpls.c
nest/mpls.h

index 257a5f6abf6643a36c4bc302e1cf0efc5269f2ca..f981cefa8d353c5b209937ca01c62b791e8ffef7 100644 (file)
@@ -218,8 +218,7 @@ struct {                                                            \
   uint cur_order, new_order;                                           \
   struct { type *data; rw_spinlock lock; } *cur, *new;                 \
   pool *pool;                                                          \
-  event rehash;                                                                \
-  event_list *target;                                                  \
+  callback rehash;                                                     \
 }
 
 #define SPINHASH_INIT(v,id,_pool,_target)                              \
@@ -230,19 +229,18 @@ struct {                                                          \
     (v).cur = mb_allocz(_pool, (1U << id##_ORDER) * sizeof *(v).cur);  \
     (v).new = NULL;                                                    \
     (v).pool = _pool;                                                  \
-    (v).rehash = (event) { .hook = id##_REHASH, .data = &(v), };       \
-    (v).target = _target;                                              \
+    if (_target) callback_init(&(v).rehash, id##_REHASH, _target);     \
   })
 
 #define SPINHASH_FREE(v)                                               \
   ({                                                                   \
-    ev_postpone(&(v).rehash);                                          \
+    callback_cancel(&(v).rehash);                                      \
     mb_free((v).cur);                                                  \
     ASSERT_DIE((v).new == NULL);                                       \
     (v).cur = NULL;                                                    \
     (v).cur_order = 0;                                                 \
     (v).pool = NULL;                                                   \
-    (v).target = NULL;                                                 \
+    (v).rehash = (callback) {};                                                \
   })
 
 #define SPINHASH_BEGIN_CHAIN(v,id,rw,n,key...)                         \
@@ -367,12 +365,12 @@ struct {                                                          \
   })
 
 #define SPINHASH_REQUEST_REHASH(v,id,count)                            \
-  if (SPINHASH_CHECK_REHASH(v,id,count) && (v).target)                 \
-      ev_send((v).target, &(v).rehash);
+  if ((v).rehash.target && SPINHASH_CHECK_REHASH(v,id,count))          \
+    callback_activate(&(v).rehash);                                    \
 
 #define SPINHASH_DEFINE_REHASH_FN(id,type)                             \
-static void id##_REHASH(void *_v) {                                    \
-  SPINHASH(type) *v = _v;                                              \
+static void id##_REHASH(callback *cb) {                                        \
+  SKIP_BACK_DECLARE(SPINHASH(type), v, rehash, cb);                    \
   SPINHASH_REHASH_FN_BODY(v,id,type);                                  \
 }
 
index 3a2ccab75fd5414d2d3cd52b7aa073a2790dafa7..6a96ca889c53af0576efd1983e3462caf21a5078 100644 (file)
@@ -15,7 +15,7 @@
 void lfuc_unlock_deferred(struct deferred_call *dc)
 {
   SKIP_BACK_DECLARE(struct lfuc_unlock_queue_item, luqi, dc, dc);
-  lfuc_unlock_immediately(luqi->c, luqi->el, luqi->ev);
+  lfuc_unlock_immediately(luqi->c, luqi->cb);
 }
 
 #if 0
index 0553aac124e67ad8cfb62a403f3dec2c816ee498..bab6313e644bf17ff1fb86d084722834b873306c 100644 (file)
@@ -67,7 +67,7 @@ static inline u64 lfuc_lock_revive(struct lfuc *c)
  * If the usecount reaches zero, a prune event is run to possibly free the object.
  * The prune event MUST use lfuc_finished() to check the object state.
  */
-static inline void lfuc_unlock_immediately(struct lfuc *c, event_list *el, event *ev)
+static inline void lfuc_unlock_immediately(struct lfuc *c, struct callback *cb)
 {
   /* Unlocking is tricky. We do it lockless so at the same time, the prune
    * event may be running, therefore if the unlock gets us to zero, it must be
@@ -98,7 +98,7 @@ static inline void lfuc_unlock_immediately(struct lfuc *c, event_list *el, event
   if (uc == pending)
     /* If we're the last unlocker (every owner is already unlocking), schedule
      * the owner's prune event */
-    ev_send(el, ev);
+    callback_activate(cb);
   else
     ASSERT_DIE(uc > pending);
 
@@ -112,19 +112,17 @@ static inline void lfuc_unlock_immediately(struct lfuc *c, event_list *el, event
 struct lfuc_unlock_queue_item {
   struct deferred_call dc;
   struct lfuc *c;
-  event_list *el;
-  event *ev;
+  struct callback *cb;
 };
 
 void lfuc_unlock_deferred(struct deferred_call *dc);
 
-static inline void lfuc_unlock(struct lfuc *c, event_list *el, event *ev)
+static inline void lfuc_unlock(struct lfuc *c, struct callback *cb)
 {
   struct lfuc_unlock_queue_item luqi = {
     .dc.hook = lfuc_unlock_deferred,
     .c = c,
-    .el = el,
-    .ev = ev,
+    .cb = cb,
   };
 
   defer_call(&luqi.dc, sizeof luqi);
index 88f9b6d5a29858f9ebb3a6a6fb5c1988d1499f4e..a6b078796528a6ff8676f94d874683d04abee8c2 100644 (file)
@@ -20,9 +20,9 @@
 #define NETINDEX_REHASH                netindex_rehash
 #define NETINDEX_PARAMS                /8, *2, 2, 2, 12, 28
 
-static void NETINDEX_REHASH(void *_v) {
+static void NETINDEX_REHASH(callback *cb) {
   log(L_TRACE "Netindex rehash: begin");
-  netindex_spinhash *v = _v;
+  SKIP_BACK_DECLARE(netindex_spinhash, v, rehash, cb);
   int step;
   {
     NH_LOCK(SKIP_BACK(netindex_hash, hash, v), _);
@@ -43,7 +43,7 @@ static void NETINDEX_REHASH(void *_v) {
   log(L_TRACE "Netindex rehash: done");
 }
 
-static void netindex_hash_cleanup(void *netindex_hash);
+static void netindex_hash_cleanup(callback *cb);
 
 static struct netindex *
 net_lock_revive_unlock(netindex_hash *h, struct netindex *i)
@@ -52,7 +52,7 @@ net_lock_revive_unlock(netindex_hash *h, struct netindex *i)
     return NULL;
 
   lfuc_lock_revive(&i->uc);
-  lfuc_unlock(&i->uc, h->cleanup_list, &h->cleanup_event);
+  lfuc_unlock(&i->uc, &h->cleanup);
   return i;
 }
 
@@ -60,7 +60,7 @@ net_lock_revive_unlock(netindex_hash *h, struct netindex *i)
  * Index initialization
  */
 netindex_hash *
-netindex_hash_new(pool *sp, event_list *cleanup_target, u8 type)
+netindex_hash_new(pool *sp, struct birdloop *cleanup_target, u8 type)
 {
   DOMAIN(attrs) dom = DOMAIN_NEW_RCU_SYNC(attrs);
   LOCK_DOMAIN(attrs, dom);
@@ -82,8 +82,7 @@ netindex_hash_new(pool *sp, event_list *cleanup_target, u8 type)
 
   hmap_init(&nh->id_map, nh->pool, 128);
 
-  nh->cleanup_list = cleanup_target;
-  nh->cleanup_event = (event) { .hook = netindex_hash_cleanup, nh };
+  callback_init(&nh->cleanup, netindex_hash_cleanup, cleanup_target);
 
   UNLOCK_DOMAIN(attrs, dom);
   return SKIP_BACK(netindex_hash, priv, nh);
@@ -123,9 +122,9 @@ netindex_hash_cleanup_removed(struct netindex_hash_private *nh, struct netindex
 }
 
 static void
-netindex_hash_cleanup(void *_nh)
+netindex_hash_cleanup(callback *cb)
 {
-  struct netindex_hash_private *nh = _nh;
+  SKIP_BACK_DECLARE(struct netindex_hash_private, nh, cleanup, cb);
 
   DOMAIN(attrs) dom = nh->lock;
   LOCK_DOMAIN(attrs, dom);
@@ -176,16 +175,16 @@ netindex_hash_cleanup(void *_nh)
     kept += netindex_hash_cleanup_removed(nh, block, removed, removed_cnt);
 
   /* Return now unless we're deleted */
-  if (kept || !nh->deleted_event)
+  if (kept || !nh->deleted)
   {
     UNLOCK_DOMAIN(attrs, dom);
     return;
   }
 
-  ev_postpone(&nh->cleanup_event);
+  callback_cancel(&nh->cleanup);
 
-  event *e = nh->deleted_event;
-  event_list *t = nh->deleted_target;
+  /* Store the callback */
+  callback *cd = nh->deleted;
 
   /* Check cleanliness */
   SPINHASH_WALK(nh->hash, NETINDEX, i)
@@ -203,18 +202,16 @@ netindex_hash_cleanup(void *_nh)
   DOMAIN_FREE(attrs, dom);
 
   /* Notify the requestor */
-  ev_send(t, e);
+  callback_activate(cd);
 }
 
 void
-netindex_hash_delete(netindex_hash *h, event *e, event_list *t)
+netindex_hash_delete(netindex_hash *h, callback *cb)
 {
   NH_LOCK(h, hp);
 
-  hp->deleted_event = e;
-  hp->deleted_target = t;
-
-  ev_send(hp->cleanup_list, &hp->cleanup_event);
+  hp->deleted = cb;
+  callback_activate(&hp->cleanup);
 }
 
 /*
@@ -243,7 +240,7 @@ net_validate_index(netindex_hash *h, struct netindex *ni)
 static struct netindex *
 net_new_index_locked(struct netindex_hash_private *hp, const net_addr *n)
 {
-  ASSERT_DIE(!hp->deleted_event);
+  ASSERT_DIE(!hp->deleted);
 
   u32 i = hmap_first_zero(&hp->id_map);
   hmap_set(&hp->id_map, i);
@@ -302,7 +299,7 @@ void net_lock_index(netindex_hash *h UNUSED, struct netindex *i)
 void net_unlock_index(netindex_hash *h, struct netindex *i)
 {
 //  log(L_TRACE "Unlock index %p", i);
-  lfuc_unlock(&i->uc, h->cleanup_list, &h->cleanup_event);
+  lfuc_unlock(&i->uc, &h->cleanup);
 }
 
 struct netindex *
index 0cbaa5379a79a5c61c4f0d33de561046a6bfd8ce..d0debb4816adb90e54f8ab4eeb1dd3e4a886dffb 100644 (file)
@@ -28,8 +28,8 @@ struct netindex {
 typedef union netindex_hash netindex_hash;
 
 /* Initialization and teardown */
-netindex_hash *netindex_hash_new(pool *, event_list *, u8);
-void netindex_hash_delete(netindex_hash *, event *, event_list *);
+netindex_hash *netindex_hash_new(pool *, struct birdloop *, u8);
+void netindex_hash_delete(netindex_hash *, callback *);
 
 /* Find/get/resolve index; pointer valid until end of task */ 
 struct netindex *net_find_index(netindex_hash *, const net_addr *);
index bb3bd1ec3567170e9253c53c61b35486e08e9e11..9a1aabe7aa86e4c2e751a39117b26adfd31e09b5 100644 (file)
@@ -15,8 +15,7 @@ typedef SPINHASH(struct netindex) netindex_spinhash;
 
 #define NETINDEX_HASH_PUBLIC \
   DOMAIN(attrs) lock;          /* Assigned lock */             \
-  event_list *cleanup_list;    /* Cleanup event list */        \
-  event cleanup_event;         /* Cleanup event */             \
+  callback cleanup;            /* Usecount cleanup */          \
   u8 net_type;                 /* Which NET_* is stored */     \
   uint _Atomic block_size;     /* How big block is */          \
   struct netindex * _Atomic * _Atomic block;   /* u32 to netindex */           \
@@ -29,8 +28,7 @@ struct netindex_hash_private {
   slab *slab;
   struct hmap id_map;
   u32 block_epoch;
-  event *deleted_event;
-  event_list *deleted_target;
+  callback *deleted;
 };
 
 typedef union netindex_hash {
index 0a95663569a5751087316c1f0a53059f5833565b..2b9a67256a78398b0a097e29204ac02e8de64895 100644 (file)
@@ -87,8 +87,7 @@ struct rte_owner {
   u32 hash_key;
   u32 uc;
   u32 debug;
-  event_list *list;
-  event *prune;
+  struct callback *prune_callback;
   event *stop;
 };
 
@@ -119,7 +118,7 @@ static inline void rt_lock_source(struct rte_src *src)
 
 static inline void rt_unlock_source(struct rte_src *src)
 {
-  lfuc_unlock(&src->uc, src->owner->list, src->owner->prune);
+  lfuc_unlock(&src->uc, src->owner->prune_callback);
 }
 
 #ifdef RT_SOURCE_DEBUG
index a362ef80b49ea3f4405e15f0252b684ab8daf51a..eca0d7d6d29eb8a66e0f466fda5aaf9a94e57857 100644 (file)
@@ -93,7 +93,7 @@ static void mpls_remove_range(struct mpls_range *r);
 static void mpls_cleanup_ranges(void *_domain);
 
 static void mpls_free_fec(struct mpls_fec_map *m, struct mpls_fec *fec);
-static void mpls_fec_map_cleanup(void *_m);
+static void mpls_fec_map_cleanup(callback *cb);
 
 /*
  *     MPLS domain
@@ -659,7 +659,7 @@ mpls_channel_shutdown(struct channel *C)
   if (!c->rts)
     return;
 
-  ev_send_loop(c->mpls_map->loop, c->mpls_map->cleanup_event);
+  callback_activate(&c->mpls_map->cleanup);
 }
 
 static void
@@ -792,8 +792,7 @@ mpls_fec_map_new(pool *pp, struct birdloop *loop, struct channel *C, uint rts)
   DBGL("New FEC Map %p", m);
 
   m->pool = p;
-  m->loop = loop;
-  m->cleanup_event = ev_new_init(p, mpls_fec_map_cleanup, m);
+  callback_init(&m->cleanup, mpls_fec_map_cleanup, loop);
   m->channel = C;
   channel_add_obstacle(C);
 
@@ -881,9 +880,9 @@ mpls_fec_map_reconfigure(struct mpls_fec_map *m, struct channel *C)
 }
 
 static void
-mpls_fec_map_cleanup(void *_m)
+mpls_fec_map_cleanup(callback *cb)
 {
-  struct mpls_fec_map *m = _m;
+  SKIP_BACK_DECLARE(struct mpls_fec_map, m, cleanup, cb);
   _Bool finished = (m->channel->channel_state == CS_STOP);
   HASH_WALK_DELSAFE(m->label_hash, next_l, fec)
     if (lfuc_finished(&fec->uc))
@@ -896,7 +895,7 @@ mpls_fec_map_cleanup(void *_m)
 
   if (finished)
   {
-    ev_postpone(m->cleanup_event);
+    callback_cancel(&m->cleanup);
     channel_del_obstacle(m->channel);
   }
 }
@@ -1193,7 +1192,7 @@ inline void mpls_lock_fec(struct mpls_fec *fec)
 
 inline void mpls_unlock_fec(struct mpls_fec *fec)
 {
-  lfuc_unlock(&fec->uc, birdloop_event_list(fec->map->loop), fec->map->cleanup_event);
+  lfuc_unlock(&fec->uc, &fec->map->cleanup);
   DBGL("Unlocked FEC %p %u (deferred)", fec, fec->label);
 }
 
index 9cb4925a107886900726338511f33cd612a55973..6388bf7cea5293c798873a7611f8a712119c1d73 100644 (file)
@@ -124,8 +124,7 @@ struct mpls_fec {
 
 struct mpls_fec_map {
   pool *pool;                          /* Pool for FEC map */
-  struct birdloop *loop;               /* Owner's loop for sending events */
-  event *cleanup_event;                        /* Event for unlocked FEC cleanup */
+  callback cleanup;                    /* Callback for unlocked FEC cleanup */
   slab *slabs[4];                      /* Slabs for FEC allocation */
   HASH(struct mpls_fec) net_hash;      /* Hash table for MPLS_POLICY_PREFIX FECs */
   HASH(struct mpls_fec) attrs_hash;    /* Hash table for MPLS_POLICY_AGGREGATE FECs */