]> git.ipfire.org Git - thirdparty/haproxy.git/commitdiff
MINOR: task: replace global_tasks_mask with a check for tree's emptiness
authorWilly Tarreau <w@1wt.eu>
Thu, 16 Jun 2022 13:59:36 +0000 (15:59 +0200)
committerWilly Tarreau <w@1wt.eu>
Fri, 1 Jul 2022 17:15:14 +0000 (19:15 +0200)
This bit field used to be a per-thread cache of the result of the last
lookup of the presence of a task for each thread in the shared cache.
Since we now know that each thread has its own shared cache, a test of
emptiness is now sufficient to decide whether or not the shared tree
has a task for the current thread. Let's just remove this mask.

include/haproxy/task.h
src/debug.c
src/task.c

index 4fb1fe31cf0b463f73bb0f9a87015177776447a0..03ade6fc6690f61e0355d45cf73338fc85d1ec0a 100644 (file)
@@ -89,7 +89,6 @@
 
 
 /* a few exported variables */
-extern volatile unsigned long global_tasks_mask; /* Mask of threads with tasks in the global runqueue */
 extern unsigned int niced_tasks;  /* number of niced tasks in the run queue */
 
 extern struct pool_head *pool_head_task;
@@ -182,8 +181,8 @@ static inline int task_in_wq(struct task *t)
 /* returns true if the current thread has some work to do */
 static inline int thread_has_tasks(void)
 {
-       return ((int)!!(global_tasks_mask & tid_bit) |
-               (int)!eb_is_empty(&th_ctx->rqueue) |
+       return ((int)!eb_is_empty(&th_ctx->rqueue) |
+               (int)!eb_is_empty(&th_ctx->rqueue_shared) |
                (int)!!th_ctx->tl_class_mask |
                (int)!MT_LIST_ISEMPTY(&th_ctx->shared_tasklet_list));
 }
index 1e9a5ca4d789618494262dc45ab245e231b1d20b..a55d087b058bdfd11aa6ccf5c81e8e1d6ba4624b 100644 (file)
@@ -168,7 +168,7 @@ void ha_thread_dump(struct buffer *buf, int thr, int calling_tid)
                      (thr == calling_tid) ? '*' : ' ', stuck ? '>' : ' ', thr + 1,
                      ha_get_pthread_id(thr),
                      thread_has_tasks(),
-                     !!(global_tasks_mask & thr_bit),
+                     !eb_is_empty(&ha_thread_ctx[thr].rqueue_shared),
                      !eb_is_empty(&ha_thread_ctx[thr].timers),
                      !eb_is_empty(&ha_thread_ctx[thr].rqueue),
                      !(LIST_ISEMPTY(&ha_thread_ctx[thr].tasklets[TL_URGENT]) &&
index f0ca126ceba4382ee35ec9350afb5dfe277ca5a4..29505b9bf258537d1fa151faff2fc9abf63e67b6 100644 (file)
@@ -35,7 +35,6 @@ DECLARE_POOL(pool_head_tasklet, "tasklet", sizeof(struct tasklet));
  */
 DECLARE_POOL(pool_head_notification, "notification", sizeof(struct notification));
 
-volatile unsigned long global_tasks_mask = 0; /* Mask of threads with tasks in the global runqueue */
 unsigned int niced_tasks = 0;      /* number of niced tasks in the run queue */
 
 __decl_aligned_rwlock(wq_lock);   /* RW lock related to the wait queue */
@@ -235,10 +234,6 @@ void __task_wakeup(struct task *t)
                _HA_ATOMIC_INC(&ha_thread_ctx[thr].rq_total);
                HA_SPIN_LOCK(TASK_RQ_LOCK, &ha_thread_ctx[thr].rqsh_lock);
 
-               if (t->tid < 0)
-                       global_tasks_mask = all_threads_mask;
-               else
-                       global_tasks_mask |= 1UL << thr;
                t->rq.key = _HA_ATOMIC_ADD_FETCH(&ha_thread_ctx[thr].rqueue_ticks, 1);
                __ha_barrier_store();
        } else
@@ -562,8 +557,7 @@ unsigned int run_tasks_from_lists(unsigned int budgets[])
 
                        if (unlikely(queue > TL_NORMAL &&
                                     budget_mask & (1 << TL_NORMAL) &&
-                                    (!eb_is_empty(&th_ctx->rqueue) ||
-                                     (global_tasks_mask & tid_bit)))) {
+                                    (!eb_is_empty(&th_ctx->rqueue) || !eb_is_empty(&th_ctx->rqueue_shared)))) {
                                /* a task was woken up by a bulk tasklet or another thread */
                                break;
                        }
@@ -784,7 +778,7 @@ void process_runnable_tasks()
 
        /* normal tasklets list gets a default weight of ~37% */
        if ((tt->tl_class_mask & (1 << TL_NORMAL)) ||
-           !eb_is_empty(&th_ctx->rqueue) || (global_tasks_mask & tid_bit))
+           !eb_is_empty(&th_ctx->rqueue) || !eb_is_empty(&th_ctx->rqueue_shared))
                max[TL_NORMAL] = default_weights[TL_NORMAL];
 
        /* bulk tasklets list gets a default weight of ~13% */
@@ -831,16 +825,14 @@ void process_runnable_tasks()
        lpicked = gpicked = 0;
        budget = max[TL_NORMAL] - tt->tasks_in_list;
        while (lpicked + gpicked < budget) {
-               if ((global_tasks_mask & tid_bit) && !grq) {
+               if (!eb_is_empty(&th_ctx->rqueue_shared) && !grq) {
 #ifdef USE_THREAD
                        HA_SPIN_LOCK(TASK_RQ_LOCK, &th_ctx->rqsh_lock);
                        grq = eb32sc_lookup_ge(&th_ctx->rqueue_shared, _HA_ATOMIC_LOAD(&tt->rqueue_ticks) - TIMER_LOOK_BACK, tid_bit);
                        if (unlikely(!grq)) {
                                grq = eb32sc_first(&th_ctx->rqueue_shared, tid_bit);
-                               if (!grq) {
-                                       global_tasks_mask &= ~tid_bit;
+                               if (!grq)
                                        HA_SPIN_UNLOCK(TASK_RQ_LOCK, &th_ctx->rqsh_lock);
-                               }
                        }
 #endif
                }
@@ -872,10 +864,8 @@ void process_runnable_tasks()
 
                        if (unlikely(!grq)) {
                                grq = eb32sc_first(&th_ctx->rqueue_shared, tid_bit);
-                               if (!grq) {
-                                       global_tasks_mask &= ~tid_bit;
+                               if (!grq)
                                        HA_SPIN_UNLOCK(TASK_RQ_LOCK, &th_ctx->rqsh_lock);
-                               }
                        }
                        gpicked++;
                }