]> git.ipfire.org Git - thirdparty/haproxy.git/commitdiff
BUG/MAJOR: sched: make sure task_kill() always queues the task
authorWilly Tarreau <w@1wt.eu>
Thu, 2 Jul 2020 12:14:00 +0000 (14:14 +0200)
committerWilly Tarreau <w@1wt.eu>
Thu, 2 Jul 2020 12:14:00 +0000 (14:14 +0200)
task_kill() may fail to queue a task if this task has never ever run,
because its equivalent (tasklet->list) member has never been "emptied"
since it didn't pass through the LIST_DEL_INIT() that's performed by
run_tasks_from_lists(). This results in these tasks to never be freed.

It happens during the mux takeover since the target task usually is
the timeout task which, by definition, has never run yet.

This fixes commit eb8c2c69f ("MEDIUM: sched: implement task_kill() to
kill a task") which was introduced after 2.2-dev11 and doesn't need to
be backported.

src/task.c

index 761fc29a3fa1aa534313d148dafa3dfd85fcaad4..7ce1cd2a37f88fd0b36b85be361fbd33bfa03949 100644 (file)
@@ -94,16 +94,18 @@ void task_kill(struct task *t)
                         * the task's first thread for the job.
                         */
                        thr = my_ffsl(t->thread_mask) - 1;
-                       if (MT_LIST_ADDQ(&task_per_thread[thr].shared_tasklet_list,
-                                        (struct mt_list *)&((struct tasklet *)t)->list)) {
-                               _HA_ATOMIC_ADD(&tasks_run_queue, 1);
-                               _HA_ATOMIC_ADD(&task_per_thread[thr].task_list_size, 1);
-                               if (sleeping_thread_mask & (1UL << thr)) {
-                                       _HA_ATOMIC_AND(&sleeping_thread_mask, ~(1UL << thr));
-                                       wake_thread(thr);
-                               }
-                               return;
+
+                       /* Beware: tasks that have never run don't have their ->list empty yet! */
+                       LIST_INIT(&((struct tasklet *)t)->list);
+                       BUG_ON(!MT_LIST_ADDQ(&task_per_thread[thr].shared_tasklet_list,
+                                            (struct mt_list *)&((struct tasklet *)t)->list));
+                       _HA_ATOMIC_ADD(&tasks_run_queue, 1);
+                       _HA_ATOMIC_ADD(&task_per_thread[thr].task_list_size, 1);
+                       if (sleeping_thread_mask & (1UL << thr)) {
+                               _HA_ATOMIC_AND(&sleeping_thread_mask, ~(1UL << thr));
+                               wake_thread(thr);
                        }
+                       return;
                }
        }
 }