]> git.ipfire.org Git - thirdparty/haproxy.git/commitdiff
MINOR: event_hdl: add event_hdl_async_equeue_size() function
authorAurelien DARRAGON <adarragon@haproxy.com>
Wed, 1 Mar 2023 14:02:04 +0000 (15:02 +0100)
committerChristopher Faulet <cfaulet@haproxy.com>
Wed, 5 Apr 2023 06:58:17 +0000 (08:58 +0200)
Use event_hdl_async_equeue_size() in advanced async task handler to
get the near real-time event queue size.

By near real-time, you should understand that the queue size is not
updated during element insertion/removal, but shortly before insertion
and shortly after removal, so the size should reflect the approximate
queue size at a given time but should definitely not be used as a
unique source of truth.

If 68e692da0 ("MINOR: event_hdl: add event handler base api")
is being backported, then this commit should be backported with it.

include/haproxy/event_hdl-t.h
include/haproxy/event_hdl.h
src/event_hdl.c

index e1966fd07685018cfb94899c75722564b17f2be7..eb5d05737d0b29d247f9d7b3c20d4d544fce921b 100644 (file)
@@ -79,8 +79,14 @@ struct event_hdl_sub_list_head {
 
 /* event_hdl_sub_list is an alias (please use this for portability) */
 typedef struct event_hdl_sub_list_head event_hdl_sub_list;
+
+struct event_hdl_async_equeue_head {
+       struct mt_list head;
+       uint32_t size; /* near realtime size, not fully synced with head (to be used as a hint) */
+};
+
 /* event_hdl_async_equeue is an alias to mt_list (please use this for portability) */
-typedef struct mt_list event_hdl_async_equeue;
+typedef struct event_hdl_async_equeue_head event_hdl_async_equeue;
 
 /* subscription mgmt from event */
 struct event_hdl_sub_mgmt
index 6b396ada2d67a06a759b6a2d47f82baf36806a78..41dc44674fbf6e5c1dfc0cd3942d10599a8bbb3c 100644 (file)
@@ -416,19 +416,31 @@ void event_hdl_async_free_event(struct event_hdl_async_event *e);
 /* use this for advanced async mode to initialize event queue */
 static inline void event_hdl_async_equeue_init(event_hdl_async_equeue *queue)
 {
-       MT_LIST_INIT(queue);
+       MT_LIST_INIT(&queue->head);
+       queue->size = 0;
 }
 
 /* use this for advanced async mode to pop an event from event queue */
 static inline struct event_hdl_async_event *event_hdl_async_equeue_pop(event_hdl_async_equeue *queue)
 {
-       return MT_LIST_POP(queue, struct event_hdl_async_event *, mt_list);
+       struct event_hdl_async_event *event;
+
+       event = MT_LIST_POP(&queue->head, struct event_hdl_async_event *, mt_list);
+       if (event)
+               HA_ATOMIC_DEC(&queue->size);
+       return event;
 }
 
 /* use this for advanced async mode to check if the event queue is empty */
 static inline int event_hdl_async_equeue_isempty(event_hdl_async_equeue *queue)
 {
-       return MT_LIST_ISEMPTY(queue);
+       return MT_LIST_ISEMPTY(&queue->head);
+}
+
+/* use this for advanced async mode to check if the event queue size */
+static inline uint32_t event_hdl_async_equeue_size(event_hdl_async_equeue *queue)
+{
+       return HA_ATOMIC_LOAD(&queue->size);
 }
 
 /* use this to initialize <sub_list> event subscription list */
index 37a4adcc2aca39f34c7802391086e9d6ed35392d..aecca8706727f4927e23d780ca97f5b6b296146c 100644 (file)
@@ -317,7 +317,8 @@ static inline void _event_hdl_unsubscribe(struct event_hdl_sub *del_sub)
                 * consumed the END event before the wakeup, and some tasks
                 * kill themselves (ie: normal async mode) when they receive such event
                 */
-               lock = MT_LIST_APPEND_LOCKED(del_sub->hdl.async_equeue, &del_sub->async_end->mt_list);
+               HA_ATOMIC_INC(&del_sub->hdl.async_equeue->size);
+               lock = MT_LIST_APPEND_LOCKED(&del_sub->hdl.async_equeue->head, &del_sub->async_end->mt_list);
 
                /* wake up the task */
                event_hdl_task_wakeup(del_sub->hdl.async_task);
@@ -462,7 +463,7 @@ struct event_hdl_sub *event_hdl_subscribe_ptr(event_hdl_sub_list *sub_list,
                                /* memory error */
                                goto memory_error;
                        }
-                       MT_LIST_INIT(&task_ctx->e_queue);
+                       event_hdl_async_equeue_init(&task_ctx->e_queue);
                        task_ctx->func = new_sub->hdl.async_ptr;
 
                        new_sub->hdl.async_equeue = &task_ctx->e_queue;
@@ -785,7 +786,8 @@ static int _event_hdl_publish(event_hdl_sub_list *sub_list, struct event_hdl_sub
 
                                /* appending new event to event hdl queue */
                                MT_LIST_INIT(&new_event->mt_list);
-                               MT_LIST_APPEND(cur_sub->hdl.async_equeue, &new_event->mt_list);
+                               HA_ATOMIC_INC(&cur_sub->hdl.async_equeue->size);
+                               MT_LIST_APPEND(&cur_sub->hdl.async_equeue->head, &new_event->mt_list);
 
                                /* wake up the task */
                                event_hdl_task_wakeup(cur_sub->hdl.async_task);