From: Willy Tarreau Date: Thu, 25 Feb 2021 06:51:18 +0000 (+0100) Subject: MINOR: task: make grq_total atomic to move it outside of the grq_lock X-Git-Tag: v2.4-dev10~41 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=45499c56d30a00ada0dda5d96fac3bde293d0d5b;p=thirdparty%2Fhaproxy.git MINOR: task: make grq_total atomic to move it outside of the grq_lock Instead of decrementing grq_total once per task picked from the global run queue, let's do it at once after the loop like we do for other counters. This simplifies the code everywhere. It is not expected to bring noticeable improvements however, since global tasks tend to be less common nowadays. --- diff --git a/include/haproxy/task.h b/include/haproxy/task.h index 96a929fc9b..ce6418260c 100644 --- a/include/haproxy/task.h +++ b/include/haproxy/task.h @@ -88,7 +88,7 @@ /* a few exported variables */ extern volatile unsigned long global_tasks_mask; /* Mask of threads with tasks in the global runqueue */ -extern unsigned int grq_total; /* total number of entries in the global run queue */ +extern unsigned int grq_total; /* total number of entries in the global run queue, atomic */ extern unsigned int niced_tasks; /* number of niced tasks in the run queue */ extern struct pool_head *pool_head_task; extern struct pool_head *pool_head_tasklet; @@ -324,8 +324,6 @@ static inline struct task *task_unlink_rq(struct task *t) if (likely(task_in_rq(t))) { eb32sc_delete(&t->rq); - if (is_global) { - grq_total--; done = 1; } @@ -333,8 +331,10 @@ static inline struct task *task_unlink_rq(struct task *t) HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock); if (done) { - if (is_global) + if (is_global) { _HA_ATOMIC_AND(&t->state, ~TASK_GLOBAL); + _HA_ATOMIC_SUB(&grq_total, 1); + } else _HA_ATOMIC_SUB(&sched->rq_total, 1); if (t->nice) diff --git a/src/task.c b/src/task.c index b2054320e9..9249068eca 100644 --- a/src/task.c +++ b/src/task.c @@ -46,7 +46,7 @@ __decl_aligned_rwlock(wq_lock); /* RW lock related to the wait queue */ #ifdef USE_THREAD struct eb_root timers; /* sorted timers tree, global, accessed under wq_lock */ struct eb_root rqueue; /* tree constituting the global run queue, accessed under rq_lock */ -unsigned int grq_total; /* total number of entries in the global run queue, use grq_lock */ +unsigned int grq_total; /* total number of entries in the global run queue, atomic */ static unsigned int global_rqueue_ticks; /* insertion count in the grq, use rq_lock */ #endif @@ -159,10 +159,10 @@ void __task_wakeup(struct task *t) if (t->thread_mask != tid_bit && global.nbthread != 1) { root = &rqueue; + _HA_ATOMIC_ADD(&grq_total, 1); HA_SPIN_LOCK(TASK_RQ_LOCK, &rq_lock); global_tasks_mask |= t->thread_mask; - grq_total++; t->rq.key = ++global_rqueue_ticks; __ha_barrier_store(); } else @@ -708,7 +708,6 @@ void process_runnable_tasks() else { t = eb32sc_entry(grq, struct task, rq); grq = eb32sc_next(grq, tid_bit); - grq_total--; _HA_ATOMIC_AND(&t->state, ~TASK_GLOBAL); eb32sc_delete(&t->rq); @@ -738,8 +737,10 @@ void process_runnable_tasks() if (lpicked + gpicked) { tt->tl_class_mask |= 1 << TL_NORMAL; _HA_ATOMIC_ADD(&tt->tasks_in_list, lpicked + gpicked); - if (gpicked) + if (gpicked) { + _HA_ATOMIC_SUB(&grq_total, gpicked); _HA_ATOMIC_ADD(&tt->rq_total, gpicked); + } activity[tid].tasksw += lpicked + gpicked; }