]> git.ipfire.org Git - thirdparty/haproxy.git/commitdiff
MEDIUM: task: replace the global rq_lock with a per-rq one
authorWilly Tarreau <w@1wt.eu>
Thu, 16 Jun 2022 14:58:17 +0000 (16:58 +0200)
committerWilly Tarreau <w@1wt.eu>
Fri, 1 Jul 2022 17:15:14 +0000 (19:15 +0200)
There's no point having a global rq_lock now that we have one shared RQ
per thread, let's have one lock per runqueue instead.

include/haproxy/task.h
include/haproxy/tinfo-t.h
src/task.c

index 6c4a5f7fb8502e0259c5843f2fd3353c801de175..de54b1ce6f5d0a84ff414c5c77c2c702749cb298 100644 (file)
@@ -101,7 +101,6 @@ extern struct pool_head *pool_head_notification;
 extern struct eb_root timers;      /* sorted timers tree, global */
 #endif
 
-__decl_thread(extern HA_SPINLOCK_T rq_lock);  /* spin lock related to run queue */
 __decl_thread(extern HA_RWLOCK_T wq_lock);    /* RW lock related to the wait queue */
 
 void __tasklet_wakeup_on(struct tasklet *tl, int thr);
index 94d08d4f772828a2e6ab4ebb940d5dd40a67f9b1..f9534d1111d01c829efcc628b521c61cadd50d5b 100644 (file)
@@ -25,6 +25,7 @@
 #include <import/ebtree-t.h>
 
 #include <haproxy/api-t.h>
+#include <haproxy/thread-t.h>
 
 /* tasklet classes */
 enum {
@@ -104,6 +105,7 @@ struct thread_ctx {
        uint64_t prev_mono_time;            /* previous system wide monotonic time  */
 
        struct eb_root rqueue_shared;       /* run queue fed by other threads */
+       __decl_thread(HA_SPINLOCK_T rqsh_lock); /* lock protecting the shared runqueue */
 
        ALWAYS_ALIGN(128);
 };
index f972142243fed5adf7cbce5cf68eff3e42ca4760..b7678c15a47dba36f97d91a1a728009a918a5f34 100644 (file)
@@ -38,7 +38,6 @@ DECLARE_POOL(pool_head_notification, "notification", sizeof(struct notification)
 volatile unsigned long global_tasks_mask = 0; /* Mask of threads with tasks in the global runqueue */
 unsigned int niced_tasks = 0;      /* number of niced tasks in the run queue */
 
-__decl_aligned_spinlock(rq_lock); /* spin lock related to run queue */
 __decl_aligned_rwlock(wq_lock);   /* RW lock related to the wait queue */
 
 #ifdef USE_THREAD
@@ -235,7 +234,7 @@ void __task_wakeup(struct task *t)
                root = &ha_thread_ctx[thr].rqueue_shared;
 
                _HA_ATOMIC_INC(&grq_total);
-               HA_SPIN_LOCK(TASK_RQ_LOCK, &rq_lock);
+               HA_SPIN_LOCK(TASK_RQ_LOCK, &ha_thread_ctx[thr].rqsh_lock);
 
                if (t->tid < 0)
                        global_tasks_mask = all_threads_mask;
@@ -265,7 +264,7 @@ void __task_wakeup(struct task *t)
 
 #ifdef USE_THREAD
        if (thr != tid) {
-               HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
+               HA_SPIN_UNLOCK(TASK_RQ_LOCK, &ha_thread_ctx[thr].rqsh_lock);
 
                /* If all threads that are supposed to handle this task are sleeping,
                 * wake one.
@@ -835,13 +834,13 @@ void process_runnable_tasks()
        while (lpicked + gpicked < budget) {
                if ((global_tasks_mask & tid_bit) && !grq) {
 #ifdef USE_THREAD
-                       HA_SPIN_LOCK(TASK_RQ_LOCK, &rq_lock);
+                       HA_SPIN_LOCK(TASK_RQ_LOCK, &th_ctx->rqsh_lock);
                        grq = eb32sc_lookup_ge(&th_ctx->rqueue_shared, _HA_ATOMIC_LOAD(&tt->rqueue_ticks) - TIMER_LOOK_BACK, tid_bit);
                        if (unlikely(!grq)) {
                                grq = eb32sc_first(&th_ctx->rqueue_shared, tid_bit);
                                if (!grq) {
                                        global_tasks_mask &= ~tid_bit;
-                                       HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
+                                       HA_SPIN_UNLOCK(TASK_RQ_LOCK, &th_ctx->rqsh_lock);
                                }
                        }
 #endif
@@ -876,7 +875,7 @@ void process_runnable_tasks()
                                grq = eb32sc_first(&th_ctx->rqueue_shared, tid_bit);
                                if (!grq) {
                                        global_tasks_mask &= ~tid_bit;
-                                       HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
+                                       HA_SPIN_UNLOCK(TASK_RQ_LOCK, &th_ctx->rqsh_lock);
                                }
                        }
                        gpicked++;
@@ -891,7 +890,7 @@ void process_runnable_tasks()
 
        /* release the rqueue lock */
        if (grq) {
-               HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
+               HA_SPIN_UNLOCK(TASK_RQ_LOCK, &th_ctx->rqsh_lock);
                grq = NULL;
        }