]> git.ipfire.org Git - thirdparty/haproxy.git/commitdiff
MEDIUM: tasks: Get rid of active_tasks_mask.
authorOlivier Houchard <ohouchard@haproxy.com>
Wed, 29 May 2019 17:22:43 +0000 (19:22 +0200)
committerWilly Tarreau <w@1wt.eu>
Wed, 29 May 2019 19:53:37 +0000 (21:53 +0200)
Remove the active_tasks_mask variable, we can deduce if we've work to do
by other means, and it is costly to maintain. Instead, introduce a new
function, thread_has_tasks(), that returns non-zero if there's tasks
scheduled for the thread, zero otherwise.

include/proto/task.h
src/debug.c
src/haproxy.c
src/task.c

index 585d8900abfb7b9ee3734e3e61a7ed57533fbf24..d39d0f4d4f23f949587cd316982ca9e446fc7388 100644 (file)
@@ -83,7 +83,6 @@
 
 /* a few exported variables */
 extern unsigned int nb_tasks;     /* total number of tasks */
-extern volatile unsigned long active_tasks_mask; /* Mask of threads with active tasks */
 extern volatile unsigned long global_tasks_mask; /* Mask of threads with tasks in the global runqueue */
 extern unsigned int tasks_run_queue;    /* run queue size */
 extern unsigned int tasks_run_queue_cur;
@@ -233,7 +232,6 @@ static inline void tasklet_wakeup(struct tasklet *tl)
                return;
        LIST_ADDQ(&task_per_thread[tid].task_list, &tl->list);
        task_per_thread[tid].task_list_size++;
-       _HA_ATOMIC_OR(&active_tasks_mask, tid_bit);
        _HA_ATOMIC_ADD(&tasks_run_queue, 1);
 
 }
@@ -541,6 +539,13 @@ static inline int notification_registered(struct list *wake)
        return !LIST_ISEMPTY(wake);
 }
 
+static inline int thread_has_tasks(void)
+{
+       return (!!(global_tasks_mask & tid_bit) |
+               (task_per_thread[tid].rqueue_size > 0) |
+               !LIST_ISEMPTY(&task_per_thread[tid].task_list));
+}
+
 /*
  * This does 3 things :
  *   - wake up all expired tasks
index b34df7201801536225c5d25a65581fa9f2a8fc74..fd760faacea68b455fc02020eeced47ff004cba4 100644 (file)
@@ -47,7 +47,7 @@ void ha_thread_dump(struct buffer *buf, int thr, int calling_tid)
                      "%c%cThread %-2u: act=%d glob=%d wq=%d rq=%d tl=%d tlsz=%d rqsz=%d\n"
                      "             stuck=%d fdcache=%d prof=%d",
                      (thr == calling_tid) ? '*' : ' ', stuck ? '>' : ' ', thr + 1,
-                     !!(active_tasks_mask & thr_bit),
+                     thread_has_tasks(),
                      !!(global_tasks_mask & thr_bit),
                      !eb_is_empty(&task_per_thread[thr].timers),
                      !eb_is_empty(&task_per_thread[thr].rqueue),
index 35dd514a65dc313da4deabf3c44d48ccee9e888f..df9a686001cb8434364e3169ce9fe50580a376df 100644 (file)
@@ -2528,14 +2528,14 @@ static void run_poll_loop()
                wake = 1;
                if (fd_cache_mask & tid_bit)
                        activity[tid].wake_cache++;
-               else if (active_tasks_mask & tid_bit)
+               else if (thread_has_tasks())
                        activity[tid].wake_tasks++;
                else if (signal_queue_len && tid == 0)
                        activity[tid].wake_signal++;
                else {
                        _HA_ATOMIC_OR(&sleeping_thread_mask, tid_bit);
                        __ha_barrier_atomic_store();
-                       if (active_tasks_mask & tid_bit) {
+                       if (global_tasks_mask & tid_bit) {
                                activity[tid].wake_tasks++;
                                _HA_ATOMIC_AND(&sleeping_thread_mask, ~tid_bit);
                        } else
index 0799d0089fb0d3b5b6de2cc3a58f0006e7160275..0ccc7af1710e67b1a95f424dfbd0f5cb80a4ca05 100644 (file)
@@ -35,7 +35,6 @@ DECLARE_POOL(pool_head_tasklet, "tasklet", sizeof(struct tasklet));
 DECLARE_POOL(pool_head_notification, "notification", sizeof(struct notification));
 
 unsigned int nb_tasks = 0;
-volatile unsigned long active_tasks_mask = 0; /* Mask of threads with active tasks */
 volatile unsigned long global_tasks_mask = 0; /* Mask of threads with tasks in the global runqueue */
 unsigned int tasks_run_queue = 0;
 unsigned int tasks_run_queue_cur = 0;    /* copy of the run queue size */
@@ -82,7 +81,6 @@ void __task_wakeup(struct task *t, struct eb_root *root)
                __ha_barrier_store();
        }
 #endif
-       _HA_ATOMIC_OR(&active_tasks_mask, t->thread_mask);
        t->rq.key = _HA_ATOMIC_ADD(&rqueue_ticks, 1);
 
        if (likely(t->nice)) {
@@ -308,7 +306,7 @@ void process_runnable_tasks()
 
        ti->flags &= ~TI_FL_STUCK; // this thread is still running
 
-       if (!(active_tasks_mask & tid_bit)) {
+       if (!thread_has_tasks()) {
                activity[tid].empty_rq++;
                return;
        }
@@ -381,13 +379,6 @@ void process_runnable_tasks()
                grq = NULL;
        }
 
-       if (!(global_tasks_mask & tid_bit) && task_per_thread[tid].rqueue_size == 0) {
-               _HA_ATOMIC_AND(&active_tasks_mask, ~tid_bit);
-               __ha_barrier_atomic_load();
-               if (global_tasks_mask & tid_bit)
-                       _HA_ATOMIC_OR(&active_tasks_mask, tid_bit);
-       }
-
        while (max_processed > 0 && !LIST_ISEMPTY(&task_per_thread[tid].task_list)) {
                struct task *t;
                unsigned short state;
@@ -449,10 +440,8 @@ void process_runnable_tasks()
                max_processed--;
        }
 
-       if (!LIST_ISEMPTY(&task_per_thread[tid].task_list)) {
-               _HA_ATOMIC_OR(&active_tasks_mask, tid_bit);
+       if (!LIST_ISEMPTY(&task_per_thread[tid].task_list))
                activity[tid].long_rq++;
-       }
 }
 
 /*