From: Willy Tarreau Date: Thu, 16 Jun 2022 14:58:17 +0000 (+0200) Subject: MEDIUM: task: replace the global rq_lock with a per-rq one X-Git-Tag: v2.7-dev2~143 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=b17dd6cc194163fe60ce7387d407d976d8588209;p=thirdparty%2Fhaproxy.git MEDIUM: task: replace the global rq_lock with a per-rq one There's no point having a global rq_lock now that we have one shared RQ per thread, let's have one lock per runqueue instead. --- diff --git a/include/haproxy/task.h b/include/haproxy/task.h index 6c4a5f7fb8..de54b1ce6f 100644 --- a/include/haproxy/task.h +++ b/include/haproxy/task.h @@ -101,7 +101,6 @@ extern struct pool_head *pool_head_notification; extern struct eb_root timers; /* sorted timers tree, global */ #endif -__decl_thread(extern HA_SPINLOCK_T rq_lock); /* spin lock related to run queue */ __decl_thread(extern HA_RWLOCK_T wq_lock); /* RW lock related to the wait queue */ void __tasklet_wakeup_on(struct tasklet *tl, int thr); diff --git a/include/haproxy/tinfo-t.h b/include/haproxy/tinfo-t.h index 94d08d4f77..f9534d1111 100644 --- a/include/haproxy/tinfo-t.h +++ b/include/haproxy/tinfo-t.h @@ -25,6 +25,7 @@ #include #include +#include /* tasklet classes */ enum { @@ -104,6 +105,7 @@ struct thread_ctx { uint64_t prev_mono_time; /* previous system wide monotonic time */ struct eb_root rqueue_shared; /* run queue fed by other threads */ + __decl_thread(HA_SPINLOCK_T rqsh_lock); /* lock protecting the shared runqueue */ ALWAYS_ALIGN(128); }; diff --git a/src/task.c b/src/task.c index f972142243..b7678c15a4 100644 --- a/src/task.c +++ b/src/task.c @@ -38,7 +38,6 @@ DECLARE_POOL(pool_head_notification, "notification", sizeof(struct notification) volatile unsigned long global_tasks_mask = 0; /* Mask of threads with tasks in the global runqueue */ unsigned int niced_tasks = 0; /* number of niced tasks in the run queue */ -__decl_aligned_spinlock(rq_lock); /* spin lock related to run queue */ __decl_aligned_rwlock(wq_lock); /* RW lock related to the wait queue */ #ifdef USE_THREAD @@ -235,7 +234,7 @@ void __task_wakeup(struct task *t) root = &ha_thread_ctx[thr].rqueue_shared; _HA_ATOMIC_INC(&grq_total); - HA_SPIN_LOCK(TASK_RQ_LOCK, &rq_lock); + HA_SPIN_LOCK(TASK_RQ_LOCK, &ha_thread_ctx[thr].rqsh_lock); if (t->tid < 0) global_tasks_mask = all_threads_mask; @@ -265,7 +264,7 @@ void __task_wakeup(struct task *t) #ifdef USE_THREAD if (thr != tid) { - HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock); + HA_SPIN_UNLOCK(TASK_RQ_LOCK, &ha_thread_ctx[thr].rqsh_lock); /* If all threads that are supposed to handle this task are sleeping, * wake one. @@ -835,13 +834,13 @@ void process_runnable_tasks() while (lpicked + gpicked < budget) { if ((global_tasks_mask & tid_bit) && !grq) { #ifdef USE_THREAD - HA_SPIN_LOCK(TASK_RQ_LOCK, &rq_lock); + HA_SPIN_LOCK(TASK_RQ_LOCK, &th_ctx->rqsh_lock); grq = eb32sc_lookup_ge(&th_ctx->rqueue_shared, _HA_ATOMIC_LOAD(&tt->rqueue_ticks) - TIMER_LOOK_BACK, tid_bit); if (unlikely(!grq)) { grq = eb32sc_first(&th_ctx->rqueue_shared, tid_bit); if (!grq) { global_tasks_mask &= ~tid_bit; - HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock); + HA_SPIN_UNLOCK(TASK_RQ_LOCK, &th_ctx->rqsh_lock); } } #endif @@ -876,7 +875,7 @@ void process_runnable_tasks() grq = eb32sc_first(&th_ctx->rqueue_shared, tid_bit); if (!grq) { global_tasks_mask &= ~tid_bit; - HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock); + HA_SPIN_UNLOCK(TASK_RQ_LOCK, &th_ctx->rqsh_lock); } } gpicked++; @@ -891,7 +890,7 @@ void process_runnable_tasks() /* release the rqueue lock */ if (grq) { - HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock); + HA_SPIN_UNLOCK(TASK_RQ_LOCK, &th_ctx->rqsh_lock); grq = NULL; }