From: Willy Tarreau Date: Wed, 24 Feb 2021 15:13:03 +0000 (+0100) Subject: MINOR: tasks: do not maintain the rqueue_size counter anymore X-Git-Tag: v2.4-dev10~62 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=2c41d77ebc8c47374784d00e7026b484fbb56389;p=thirdparty%2Fhaproxy.git MINOR: tasks: do not maintain the rqueue_size counter anymore This one is exclusively used as a boolean nowadays and is non-zero only when the thread-local run queue is not empty. Better check the root tree's pointer and avoid updating this counter all the time. --- diff --git a/include/haproxy/task-t.h b/include/haproxy/task-t.h index 34727fa4e2..1a103a265a 100644 --- a/include/haproxy/task-t.h +++ b/include/haproxy/task-t.h @@ -78,10 +78,9 @@ struct task_per_thread { struct list tasklets[TL_CLASSES]; /* tasklets (and/or tasks) to run, by class */ unsigned int rqueue_ticks; /* Insertion counter for the run queue */ int task_list_size; /* Number of tasks among the tasklets */ - int rqueue_size; /* Number of elements in the per-thread run queue */ int current_queue; /* points to current tasklet list being run, -1 if none */ - struct task *current; /* current task (not tasklet) */ unsigned int rq_total; /* total size of the run queue, prio_tree + tasklets */ + struct task *current; /* current task (not tasklet) */ uint8_t tl_class_mask; /* bit mask of non-empty tasklets classes */ __attribute__((aligned(64))) char end[0]; }; diff --git a/include/haproxy/task.h b/include/haproxy/task.h index 58418189eb..61ac55efa0 100644 --- a/include/haproxy/task.h +++ b/include/haproxy/task.h @@ -179,7 +179,7 @@ static inline int task_in_wq(struct task *t) static inline int thread_has_tasks(void) { return (!!(global_tasks_mask & tid_bit) | - (sched->rqueue_size > 0) | + !eb_is_empty(&sched->rqueue) | !!sched->tl_class_mask | !MT_LIST_ISEMPTY(&sched->shared_tasklet_list)); } @@ -325,7 +325,6 @@ static inline struct task *__task_unlink_rq(struct task *t) else #endif { - sched->rqueue_size--; _HA_ATOMIC_SUB(&sched->rq_total, 1); } eb32sc_delete(&t->rq); diff --git a/src/task.c b/src/task.c index 153f7d6387..6dffbdec1d 100644 --- a/src/task.c +++ b/src/task.c @@ -150,13 +150,8 @@ void __task_wakeup(struct task *t, struct eb_root *root) if (root == &rqueue) { _HA_ATOMIC_OR(&t->state, TASK_GLOBAL); HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock); - } else -#endif - { - int nb = ((void *)root - (void *)&task_per_thread[0].rqueue) / sizeof(task_per_thread[0]); - task_per_thread[nb].rqueue_size++; } -#ifdef USE_THREAD + /* If all threads that are supposed to handle this task are sleeping, * wake one. */ @@ -428,7 +423,7 @@ unsigned int run_tasks_from_lists(unsigned int budgets[]) if (unlikely(queue > TL_NORMAL && budget_mask & (1 << TL_NORMAL) && - ((sched->rqueue_size > 0) || + (!eb_is_empty(&sched->rqueue) || (global_tasks_mask & tid_bit)))) { /* a task was woken up by a bulk tasklet or another thread */ break; @@ -609,7 +604,7 @@ void process_runnable_tasks() /* normal tasklets list gets a default weight of ~37% */ if ((tt->tl_class_mask & (1 << TL_NORMAL)) || - (sched->rqueue_size > 0) || (global_tasks_mask & tid_bit)) + !eb_is_empty(&sched->rqueue) || (global_tasks_mask & tid_bit)) max[TL_NORMAL] = default_weights[TL_NORMAL]; /* bulk tasklets list gets a default weight of ~13% */