extern struct pool_head *pool_head_task;
extern struct pool_head *pool_head_tasklet;
extern struct pool_head *pool_head_notification;
-extern THREAD_LOCAL struct task *curr_task; /* task currently running or NULL */
+extern THREAD_LOCAL struct task_per_thread *sched; /* current's thread scheduler context */
#ifdef USE_THREAD
extern struct eb_root timers; /* sorted timers tree, global */
extern struct eb_root rqueue; /* tree constituting the run queue */
struct eb_root *root;
if (t->thread_mask == tid_bit || global.nbthread == 1)
- root = &task_per_thread[tid].rqueue;
+ root = &sched->rqueue;
else
root = &rqueue;
#else
- struct eb_root *root = &task_per_thread[tid].rqueue;
+ struct eb_root *root = &sched->rqueue;
#endif
state = _HA_ATOMIC_OR(&t->state, f);
global_rqueue_size--;
} else
#endif
- task_per_thread[tid].rqueue_size--;
+ sched->rqueue_size--;
eb32sc_delete(&t->rq);
if (likely(t->nice))
_HA_ATOMIC_SUB(&niced_tasks, 1);
*/
static inline void tasklet_insert_into_tasklet_list(struct tasklet *tl)
{
- if (MT_LIST_ADDQ(&task_per_thread[tid].task_list, &tl->list) == 1)
+ if (MT_LIST_ADDQ(&sched->task_list, &tl->list) == 1)
_HA_ATOMIC_ADD(&tasks_run_queue, 1);
}
*/
static inline void __task_free(struct task *t)
{
- if (t == curr_task) {
- curr_task = NULL;
+ if (t == sched->current) {
+ sched->current = NULL;
__ha_barrier_store();
}
pool_free(pool_head_task, t);
/* There's no need to protect t->state with a lock, as the task
* has to run on the current thread.
*/
- if (t == curr_task || !(t->state & (TASK_QUEUED | TASK_RUNNING)))
+ if (t == sched->current || !(t->state & (TASK_QUEUED | TASK_RUNNING)))
__task_free(t);
else
t->process = NULL;
#endif
{
if (!task_in_wq(task) || tick_is_lt(task->expire, task->wq.key))
- __task_queue(task, &task_per_thread[tid].timers);
+ __task_queue(task, &sched->timers);
}
}
task->expire = when;
if (!task_in_wq(task) || tick_is_lt(task->expire, task->wq.key))
- __task_queue(task, &task_per_thread[tid].timers);
+ __task_queue(task, &sched->timers);
}
}
static inline int thread_has_tasks(void)
{
return (!!(global_tasks_mask & tid_bit) |
- (task_per_thread[tid].rqueue_size > 0) |
- !MT_LIST_ISEMPTY(&task_per_thread[tid].task_list));
+ (sched->rqueue_size > 0) |
+ !MT_LIST_ISEMPTY(&sched->task_list));
}
/* adds list item <item> to work list <work> and wake up the associated task */
unsigned int nb_tasks_cur = 0; /* copy of the tasks count */
unsigned int niced_tasks = 0; /* number of niced tasks in the run queue */
-THREAD_LOCAL struct task *curr_task = NULL; /* task (not tasklet) currently running or NULL */
+THREAD_LOCAL struct task_per_thread *sched = &task_per_thread[0]; /* scheduler context for the current thread */
__decl_aligned_spinlock(rq_lock); /* spin lock related to run queue */
__decl_aligned_rwlock(wq_lock); /* RW lock related to the wait queue */
*/
int wake_expired_tasks()
{
- struct task_per_thread * const tt = &task_per_thread[tid]; // thread's tasks
+ struct task_per_thread * const tt = sched; // thread's tasks
struct task *task;
struct eb32_node *eb;
int ret = TICK_ETERNITY;
*/
void process_runnable_tasks()
{
- struct task_per_thread * const tt = &task_per_thread[tid]; // thread's tasks
+ struct task_per_thread * const tt = sched;
struct eb32sc_node *lrq = NULL; // next local run queue entry
struct eb32sc_node *grq = NULL; // next global run queue entry
struct task *t;
t->call_date = now_ns;
}
- curr_task = t;
+ sched->current = t;
__ha_barrier_store();
if (likely(process == process_stream))
t = process_stream(t, ctx, state);
t = process(t, ctx, state);
else {
__task_free(t);
- curr_task = NULL;
+ sched->current = NULL;
__ha_barrier_store();
/* We don't want max_processed to be decremented if
* we're just freeing a destroyed task, we should only
*/
continue;
}
- curr_task = NULL;
+ sched->current = NULL;
__ha_barrier_store();
/* If there is a pending state we have to wake up the task
* immediately, else we defer it into wait queue