]> git.ipfire.org Git - thirdparty/haproxy.git/commitdiff
MINOR: task: permanently flag tasklets waking themselves up
authorWilly Tarreau <w@1wt.eu>
Fri, 31 Jan 2020 09:48:10 +0000 (10:48 +0100)
committerWilly Tarreau <w@1wt.eu>
Fri, 31 Jan 2020 16:45:10 +0000 (17:45 +0100)
Commit a17664d829 ("MEDIUM: tasks: automatically requeue into the bulk
queue an already running tasklet") tried to inflict a penalty to
self-requeuing tasks/tasklets which correspond to those involved in
large, high-latency data transfers, for the benefit of all other
processing which requires a low latency. However, it turns out that
while it ought to do this on a case-by-case basis, basing itself on
the RUNNING flag isn't accurate because this flag doesn't leave for
tasklets, so we'd rather need a distinct flag to tag such tasklets.

This commit introduces TASK_SELF_WAKING to mark tasklets acting like
this. For now it's still set when TASK_RUNNING is present but this
will have to change. The flag is kept across wakeups.

include/proto/task.h
include/types/task.h
src/task.c

index ca963a1d36991e771911d4fb78a68ac68652e94b..53119b9d0dba9e38d0208a73c4c8b9201531e75a 100644 (file)
@@ -245,10 +245,16 @@ static inline void tasklet_wakeup(struct tasklet *tl)
        if (likely(tl->tid < 0)) {
                /* this tasklet runs on the caller thread */
                if (LIST_ISEMPTY(&tl->list)) {
-                       if (tl->state & TASK_RUNNING)
+                       if (tl->state & TASK_SELF_WAKING) {
                                LIST_ADDQ(&task_per_thread[tid].tasklets[TL_BULK], &tl->list);
-                       else
+                       }
+                       else if (tl->state & TASK_RUNNING) {
+                               _HA_ATOMIC_OR(&tl->state, TASK_SELF_WAKING);
+                               LIST_ADDQ(&task_per_thread[tid].tasklets[TL_BULK], &tl->list);
+                       }
+                       else {
                                LIST_ADDQ(&task_per_thread[tid].tasklets[TL_URGENT], &tl->list);
+                       }
                        _HA_ATOMIC_ADD(&tasks_run_queue, 1);
                }
        } else {
index 5f7ae61e86bf86b24a2dce24da0a32b59c051dfc..6ca97671f01bc31de34b984b50eb037acf7e676d 100644 (file)
@@ -36,6 +36,7 @@
 #define TASK_QUEUED       0x0004  /* The task has been (re-)added to the run queue */
 #define TASK_SHARED_WQ    0x0008  /* The task's expiration may be updated by other
                                    * threads, must be set before first queue/wakeup */
+#define TASK_SELF_WAKING  0x0010  /* task/tasklet found waking itself */
 
 #define TASK_WOKEN_INIT   0x0100  /* woken up for initialisation purposes */
 #define TASK_WOKEN_TIMER  0x0200  /* woken up because of expired timer */
index 3eaa9b4f6aaeaf0d46828b42ef7120c50272c47a..f1f36a914f70973de6ab4107fa8e760320f6104a 100644 (file)
@@ -329,7 +329,7 @@ static int run_tasks_from_list(struct list *list, int max)
 
        while (done < max && !LIST_ISEMPTY(list)) {
                t = (struct task *)LIST_ELEM(list->n, struct tasklet *, list);
-               state = (t->state & TASK_SHARED_WQ) | TASK_RUNNING;
+               state = (t->state & (TASK_SHARED_WQ|TASK_SELF_WAKING)) | TASK_RUNNING;
                state = _HA_ATOMIC_XCHG(&t->state, state);
                __ha_barrier_atomic_store();
                __tasklet_remove_from_tasklet_list((struct tasklet *)t);