From: Willy Tarreau Date: Thu, 16 Jun 2022 13:59:36 +0000 (+0200) Subject: MINOR: task: replace global_tasks_mask with a check for tree's emptiness X-Git-Tag: v2.7-dev2~141 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=c958c70ec8bca6f816cf93f58e9089b01cc94d33;p=thirdparty%2Fhaproxy.git MINOR: task: replace global_tasks_mask with a check for tree's emptiness This bit field used to be a per-thread cache of the result of the last lookup of the presence of a task for each thread in the shared cache. Since we now know that each thread has its own shared cache, a test of emptiness is now sufficient to decide whether or not the shared tree has a task for the current thread. Let's just remove this mask. --- diff --git a/include/haproxy/task.h b/include/haproxy/task.h index 4fb1fe31cf..03ade6fc66 100644 --- a/include/haproxy/task.h +++ b/include/haproxy/task.h @@ -89,7 +89,6 @@ /* a few exported variables */ -extern volatile unsigned long global_tasks_mask; /* Mask of threads with tasks in the global runqueue */ extern unsigned int niced_tasks; /* number of niced tasks in the run queue */ extern struct pool_head *pool_head_task; @@ -182,8 +181,8 @@ static inline int task_in_wq(struct task *t) /* returns true if the current thread has some work to do */ static inline int thread_has_tasks(void) { - return ((int)!!(global_tasks_mask & tid_bit) | - (int)!eb_is_empty(&th_ctx->rqueue) | + return ((int)!eb_is_empty(&th_ctx->rqueue) | + (int)!eb_is_empty(&th_ctx->rqueue_shared) | (int)!!th_ctx->tl_class_mask | (int)!MT_LIST_ISEMPTY(&th_ctx->shared_tasklet_list)); } diff --git a/src/debug.c b/src/debug.c index 1e9a5ca4d7..a55d087b05 100644 --- a/src/debug.c +++ b/src/debug.c @@ -168,7 +168,7 @@ void ha_thread_dump(struct buffer *buf, int thr, int calling_tid) (thr == calling_tid) ? '*' : ' ', stuck ? '>' : ' ', thr + 1, ha_get_pthread_id(thr), thread_has_tasks(), - !!(global_tasks_mask & thr_bit), + !eb_is_empty(&ha_thread_ctx[thr].rqueue_shared), !eb_is_empty(&ha_thread_ctx[thr].timers), !eb_is_empty(&ha_thread_ctx[thr].rqueue), !(LIST_ISEMPTY(&ha_thread_ctx[thr].tasklets[TL_URGENT]) && diff --git a/src/task.c b/src/task.c index f0ca126ceb..29505b9bf2 100644 --- a/src/task.c +++ b/src/task.c @@ -35,7 +35,6 @@ DECLARE_POOL(pool_head_tasklet, "tasklet", sizeof(struct tasklet)); */ DECLARE_POOL(pool_head_notification, "notification", sizeof(struct notification)); -volatile unsigned long global_tasks_mask = 0; /* Mask of threads with tasks in the global runqueue */ unsigned int niced_tasks = 0; /* number of niced tasks in the run queue */ __decl_aligned_rwlock(wq_lock); /* RW lock related to the wait queue */ @@ -235,10 +234,6 @@ void __task_wakeup(struct task *t) _HA_ATOMIC_INC(&ha_thread_ctx[thr].rq_total); HA_SPIN_LOCK(TASK_RQ_LOCK, &ha_thread_ctx[thr].rqsh_lock); - if (t->tid < 0) - global_tasks_mask = all_threads_mask; - else - global_tasks_mask |= 1UL << thr; t->rq.key = _HA_ATOMIC_ADD_FETCH(&ha_thread_ctx[thr].rqueue_ticks, 1); __ha_barrier_store(); } else @@ -562,8 +557,7 @@ unsigned int run_tasks_from_lists(unsigned int budgets[]) if (unlikely(queue > TL_NORMAL && budget_mask & (1 << TL_NORMAL) && - (!eb_is_empty(&th_ctx->rqueue) || - (global_tasks_mask & tid_bit)))) { + (!eb_is_empty(&th_ctx->rqueue) || !eb_is_empty(&th_ctx->rqueue_shared)))) { /* a task was woken up by a bulk tasklet or another thread */ break; } @@ -784,7 +778,7 @@ void process_runnable_tasks() /* normal tasklets list gets a default weight of ~37% */ if ((tt->tl_class_mask & (1 << TL_NORMAL)) || - !eb_is_empty(&th_ctx->rqueue) || (global_tasks_mask & tid_bit)) + !eb_is_empty(&th_ctx->rqueue) || !eb_is_empty(&th_ctx->rqueue_shared)) max[TL_NORMAL] = default_weights[TL_NORMAL]; /* bulk tasklets list gets a default weight of ~13% */ @@ -831,16 +825,14 @@ void process_runnable_tasks() lpicked = gpicked = 0; budget = max[TL_NORMAL] - tt->tasks_in_list; while (lpicked + gpicked < budget) { - if ((global_tasks_mask & tid_bit) && !grq) { + if (!eb_is_empty(&th_ctx->rqueue_shared) && !grq) { #ifdef USE_THREAD HA_SPIN_LOCK(TASK_RQ_LOCK, &th_ctx->rqsh_lock); grq = eb32sc_lookup_ge(&th_ctx->rqueue_shared, _HA_ATOMIC_LOAD(&tt->rqueue_ticks) - TIMER_LOOK_BACK, tid_bit); if (unlikely(!grq)) { grq = eb32sc_first(&th_ctx->rqueue_shared, tid_bit); - if (!grq) { - global_tasks_mask &= ~tid_bit; + if (!grq) HA_SPIN_UNLOCK(TASK_RQ_LOCK, &th_ctx->rqsh_lock); - } } #endif } @@ -872,10 +864,8 @@ void process_runnable_tasks() if (unlikely(!grq)) { grq = eb32sc_first(&th_ctx->rqueue_shared, tid_bit); - if (!grq) { - global_tasks_mask &= ~tid_bit; + if (!grq) HA_SPIN_UNLOCK(TASK_RQ_LOCK, &th_ctx->rqsh_lock); - } } gpicked++; }