* protected by the global wq_lock, otherwise by it necessarily belongs to the
* current thread'sand is queued without locking.
*/
-static inline void task_queue(struct task *task)
+#define task_queue(t) \
+ _task_queue(t, MK_CALLER(WAKEUP_TYPE_TASK_QUEUE, 0, 0))
+
+static inline void _task_queue(struct task *task, const struct ha_caller *caller)
{
/* If we already have a place in the wait queue no later than the
* timeout we're trying to set, we'll stay there, because it is very
#ifdef USE_THREAD
if (task->tid < 0) {
HA_RWLOCK_WRLOCK(TASK_WQ_LOCK, &wq_lock);
- if (!task_in_wq(task) || tick_is_lt(task->expire, task->wq.key))
+ if (!task_in_wq(task) || tick_is_lt(task->expire, task->wq.key)) {
+ if (likely(caller)) {
+ caller = HA_ATOMIC_XCHG(&task->caller, caller);
+ BUG_ON((ulong)caller & 1);
+#ifdef DEBUG_TASK
+ HA_ATOMIC_STORE(&task->debug.prev_caller, caller);
+#endif
+ }
__task_queue(task, &tg_ctx->timers);
+ }
HA_RWLOCK_WRUNLOCK(TASK_WQ_LOCK, &wq_lock);
} else
#endif
{
BUG_ON(task->tid != tid);
- if (!task_in_wq(task) || tick_is_lt(task->expire, task->wq.key))
+ if (!task_in_wq(task) || tick_is_lt(task->expire, task->wq.key)) {
+ if (likely(caller)) {
+ caller = HA_ATOMIC_XCHG(&task->caller, caller);
+ BUG_ON((ulong)caller & 1);
+#ifdef DEBUG_TASK
+ HA_ATOMIC_STORE(&task->debug.prev_caller, caller);
+#endif
+ }
__task_queue(task, &th_ctx->timers);
+ }
}
}
* now_ms without using tick_add() will definitely make this happen once every
* 49.7 days.
*/
-static inline void task_schedule(struct task *task, int when)
+#define task_schedule(t, w) \
+ _task_schedule(t, w, MK_CALLER(WAKEUP_TYPE_TASK_SCHEDULE, 0, 0))
+
+static inline void _task_schedule(struct task *task, int when, const struct ha_caller *caller)
{
/* TODO: mthread, check if there is no tisk with this test */
if (task_in_rq(task))
when = tick_first(when, task->expire);
task->expire = when;
- if (!task_in_wq(task) || tick_is_lt(task->expire, task->wq.key))
+ if (!task_in_wq(task) || tick_is_lt(task->expire, task->wq.key)) {
+ if (likely(caller)) {
+ caller = HA_ATOMIC_XCHG(&task->caller, caller);
+ BUG_ON((ulong)caller & 1);
+#ifdef DEBUG_TASK
+ HA_ATOMIC_STORE(&task->debug.prev_caller, caller);
+#endif
+ }
__task_queue(task, &tg_ctx->timers);
+ }
HA_RWLOCK_WRUNLOCK(TASK_WQ_LOCK, &wq_lock);
} else
#endif
when = tick_first(when, task->expire);
task->expire = when;
- if (!task_in_wq(task) || tick_is_lt(task->expire, task->wq.key))
+ if (!task_in_wq(task) || tick_is_lt(task->expire, task->wq.key)) {
+ if (likely(caller)) {
+ caller = HA_ATOMIC_XCHG(&task->caller, caller);
+ BUG_ON((ulong)caller & 1);
+#ifdef DEBUG_TASK
+ HA_ATOMIC_STORE(&task->debug.prev_caller, caller);
+#endif
+ }
__task_queue(task, &th_ctx->timers);
+ }
}
}
case WAKEUP_TYPE_TASKLET_WAKEUP : return "tasklet_wakeup";
case WAKEUP_TYPE_TASKLET_WAKEUP_AFTER : return "tasklet_wakeup_after";
case WAKEUP_TYPE_TASK_DROP_RUNNING : return "task_drop_running";
+ case WAKEUP_TYPE_TASK_QUEUE : return "task_queue";
+ case WAKEUP_TYPE_TASK_SCHEDULE : return "task_schedule";
case WAKEUP_TYPE_APPCTX_WAKEUP : return "appctx_wakeup";
default : return "?";
}