]> git.ipfire.org Git - thirdparty/haproxy.git/commitdiff
MINOR: task: make grq_total atomic to move it outside of the grq_lock
authorWilly Tarreau <w@1wt.eu>
Thu, 25 Feb 2021 06:51:18 +0000 (07:51 +0100)
committerWilly Tarreau <w@1wt.eu>
Thu, 25 Feb 2021 08:44:16 +0000 (09:44 +0100)
Instead of decrementing grq_total once per task picked from the global
run queue, let's do it at once after the loop like we do for other
counters. This simplifies the code everywhere. It is not expected to
bring noticeable improvements however, since global tasks tend to be
less common nowadays.

include/haproxy/task.h
src/task.c

index 96a929fc9bdd2b521e81cb5a4c561147641145e0..ce6418260cdb0864f55cfe37675a8ecebec94ac1 100644 (file)
@@ -88,7 +88,7 @@
 
 /* a few exported variables */
 extern volatile unsigned long global_tasks_mask; /* Mask of threads with tasks in the global runqueue */
-extern unsigned int grq_total;    /* total number of entries in the global run queue */
+extern unsigned int grq_total;    /* total number of entries in the global run queue, atomic */
 extern unsigned int niced_tasks;  /* number of niced tasks in the run queue */
 extern struct pool_head *pool_head_task;
 extern struct pool_head *pool_head_tasklet;
@@ -324,8 +324,6 @@ static inline struct task *task_unlink_rq(struct task *t)
 
        if (likely(task_in_rq(t))) {
                eb32sc_delete(&t->rq);
-               if (is_global) {
-                       grq_total--;
                done = 1;
        }
 
@@ -333,8 +331,10 @@ static inline struct task *task_unlink_rq(struct task *t)
                HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
 
        if (done) {
-               if (is_global)
+               if (is_global) {
                        _HA_ATOMIC_AND(&t->state, ~TASK_GLOBAL);
+                       _HA_ATOMIC_SUB(&grq_total, 1);
+               }
                else
                        _HA_ATOMIC_SUB(&sched->rq_total, 1);
                if (t->nice)
index b2054320e97dbcea8fd62edc2d43877392009d01..9249068eca56b1c06d99a38d0fba9a77b22bdbe9 100644 (file)
@@ -46,7 +46,7 @@ __decl_aligned_rwlock(wq_lock);   /* RW lock related to the wait queue */
 #ifdef USE_THREAD
 struct eb_root timers;      /* sorted timers tree, global, accessed under wq_lock */
 struct eb_root rqueue;      /* tree constituting the global run queue, accessed under rq_lock */
-unsigned int grq_total;     /* total number of entries in the global run queue, use grq_lock */
+unsigned int grq_total;     /* total number of entries in the global run queue, atomic */
 static unsigned int global_rqueue_ticks;  /* insertion count in the grq, use rq_lock */
 #endif
 
@@ -159,10 +159,10 @@ void __task_wakeup(struct task *t)
        if (t->thread_mask != tid_bit && global.nbthread != 1) {
                root = &rqueue;
 
+               _HA_ATOMIC_ADD(&grq_total, 1);
                HA_SPIN_LOCK(TASK_RQ_LOCK, &rq_lock);
 
                global_tasks_mask |= t->thread_mask;
-               grq_total++;
                t->rq.key = ++global_rqueue_ticks;
                __ha_barrier_store();
        } else
@@ -708,7 +708,6 @@ void process_runnable_tasks()
                else {
                        t = eb32sc_entry(grq, struct task, rq);
                        grq = eb32sc_next(grq, tid_bit);
-                       grq_total--;
                        _HA_ATOMIC_AND(&t->state, ~TASK_GLOBAL);
                        eb32sc_delete(&t->rq);
 
@@ -738,8 +737,10 @@ void process_runnable_tasks()
        if (lpicked + gpicked) {
                tt->tl_class_mask |= 1 << TL_NORMAL;
                _HA_ATOMIC_ADD(&tt->tasks_in_list, lpicked + gpicked);
-               if (gpicked)
+               if (gpicked) {
+                       _HA_ATOMIC_SUB(&grq_total, gpicked);
                        _HA_ATOMIC_ADD(&tt->rq_total, gpicked);
+               }
                activity[tid].tasksw += lpicked + gpicked;
        }