TASK_SHARED_WQ was set upon task creation and never changed afterwards.
Thus if a task was created to run anywhere (e.g. a check or a Lua task),
all its timers would always pass through the shared timers queue with a
lock. Now we know that tid<0 indicates a shared task, so we can use that
to decide whether or not to use the shared queue. The task might be
migrated using task_set_affinity() but it's always dequeued first so
the check will still be valid.
Not only this removes a flag that's difficult to keep synchronized with
the thread ID, but it should significantly lower the load on systems with
many checks. A quick test with 5000 servers and fast checks that were
saturating the CPU shows that the check rate increased by 20% (hence the
CPU usage dropped by 17%). It's worth noting that run_task_lists() almost
no longer appears in perf top now.
SHOW_FLAG(f, TASK_IN_LIST);
SHOW_FLAG(f, TASK_KILLED);
SHOW_FLAG(f, TASK_SELF_WAKING);
- SHOW_FLAG(f, TASK_SHARED_WQ);
SHOW_FLAG(f, TASK_QUEUED);
SHOW_FLAG(f, TASK_GLOBAL);
SHOW_FLAG(f, TASK_RUNNING);
#define TASK_RUNNING 0x00000001 /* the task is currently running */
#define TASK_GLOBAL 0x00000002 /* The task is currently in the global runqueue */
#define TASK_QUEUED 0x00000004 /* The task has been (re-)added to the run queue */
-#define TASK_SHARED_WQ 0x00000008 /* The task's expiration may be updated by other
- * threads, must be set before first queue/wakeup */
+/* unused 0x00000008 */
#define TASK_SELF_WAKING 0x00000010 /* task/tasklet found waking itself */
#define TASK_KILLED 0x00000020 /* task/tasklet killed, may now be freed */
#define TASK_IN_LIST 0x00000040 /* tasklet is in a tasklet list */
/* unused: 0x20000..0x80000000 */
/* These flags are persistent across scheduler calls */
-#define TASK_PERSISTENT (TASK_SHARED_WQ | TASK_SELF_WAKING | TASK_KILLED | \
+#define TASK_PERSISTENT (TASK_SELF_WAKING | TASK_KILLED | \
TASK_HEAVY | TASK_F_TASKLET | TASK_F_USR1)
struct notification {
unsigned long locked;
if (likely(task_in_wq(t))) {
- locked = t->state & TASK_SHARED_WQ;
- BUG_ON(!locked && t->tid != tid);
+ locked = t->tid < 0;
+ BUG_ON(t->tid >= 0 && t->tid != tid);
if (locked)
HA_RWLOCK_WRLOCK(TASK_WQ_LOCK, &wq_lock);
__task_unlink_wq(t);
return;
#ifdef USE_THREAD
- if (task->state & TASK_SHARED_WQ) {
+ if (task->tid < 0) {
HA_RWLOCK_WRLOCK(TASK_WQ_LOCK, &wq_lock);
if (!task_in_wq(task) || tick_is_lt(task->expire, task->wq.key))
__task_queue(task, &timers);
} else
#endif
{
- BUG_ON(task->tid != tid); // should have TASK_SHARED_WQ
+ BUG_ON(task->tid != tid);
if (!task_in_wq(task) || tick_is_lt(task->expire, task->wq.key))
__task_queue(task, &th_ctx->timers);
}
tid = 0;
#endif
t->tid = tid;
- if (tid < 0)
- t->state |= TASK_SHARED_WQ;
t->nice = 0;
t->calls = 0;
t->call_date = 0;
return;
#ifdef USE_THREAD
- if (task->state & TASK_SHARED_WQ) {
+ if (task->tid < 0) {
/* FIXME: is it really needed to lock the WQ during the check ? */
HA_RWLOCK_WRLOCK(TASK_WQ_LOCK, &wq_lock);
if (task_in_wq(task))
void __task_queue(struct task *task, struct eb_root *wq)
{
#ifdef USE_THREAD
- BUG_ON((wq == &timers && !(task->state & TASK_SHARED_WQ)) ||
- (wq == &th_ctx->timers && (task->state & TASK_SHARED_WQ)) ||
+ BUG_ON((wq == &timers && task->tid >= 0) ||
+ (wq == &th_ctx->timers && task->tid < 0) ||
(wq != &timers && wq != &th_ctx->timers));
#endif
/* if this happens the process is doomed anyway, so better catch it now