{
return (!!(global_tasks_mask & tid_bit) |
(sched->rqueue_size > 0) |
- !LIST_ISEMPTY(&sched->tasklets[TL_URGENT]) |
- !LIST_ISEMPTY(&sched->tasklets[TL_NORMAL]) |
- !LIST_ISEMPTY(&sched->tasklets[TL_BULK]) |
+ !!sched->tl_class_mask |
!MT_LIST_ISEMPTY(&sched->shared_tasklet_list));
}
if (LIST_ISEMPTY(&tl->list)) {
if (tl->state & TASK_SELF_WAKING) {
LIST_ADDQ(&sched->tasklets[TL_BULK], &tl->list);
+ sched->tl_class_mask |= 1 << TL_BULK;
}
else if ((struct task *)tl == sched->current) {
_HA_ATOMIC_OR(&tl->state, TASK_SELF_WAKING);
LIST_ADDQ(&sched->tasklets[TL_BULK], &tl->list);
+ sched->tl_class_mask |= 1 << TL_BULK;
}
else if (sched->current_queue < 0) {
LIST_ADDQ(&sched->tasklets[TL_URGENT], &tl->list);
+ sched->tl_class_mask |= 1 << TL_URGENT;
}
else {
LIST_ADDQ(&sched->tasklets[sched->current_queue], &tl->list);
+ sched->tl_class_mask |= 1 << sched->current_queue;
}
_HA_ATOMIC_ADD(&tasks_run_queue, 1);
max[TL_URGENT] = max[TL_NORMAL] = max[TL_BULK] = 0;
/* urgent tasklets list gets a default weight of ~50% */
- if (!LIST_ISEMPTY(&tt->tasklets[TL_URGENT]) ||
+ if ((tt->tl_class_mask & (1 << TL_URGENT)) ||
!MT_LIST_ISEMPTY(&tt->shared_tasklet_list))
max[TL_URGENT] = default_weights[TL_URGENT];
/* normal tasklets list gets a default weight of ~37% */
- if (!LIST_ISEMPTY(&tt->tasklets[TL_NORMAL]) ||
+ if ((tt->tl_class_mask & (1 << TL_NORMAL)) ||
(sched->rqueue_size > 0) || (global_tasks_mask & tid_bit))
max[TL_NORMAL] = default_weights[TL_NORMAL];
/* bulk tasklets list gets a default weight of ~13% */
- if (!LIST_ISEMPTY(&tt->tasklets[TL_BULK]))
+ if ((tt->tl_class_mask & (1 << TL_BULK)))
max[TL_BULK] = default_weights[TL_BULK];
/* Now compute a fair share of the weights. Total may slightly exceed
LIST_INIT(&((struct tasklet *)t)->list);
/* And add it to the local task list */
tasklet_insert_into_tasklet_list(&tt->tasklets[TL_NORMAL], (struct tasklet *)t);
+ tt->tl_class_mask |= 1 << TL_NORMAL;
tt->task_list_size++;
activity[tid].tasksw++;
}
* main list.
*/
tmp_list = MT_LIST_BEHEAD(&tt->shared_tasklet_list);
- if (tmp_list)
+ if (tmp_list) {
LIST_SPLICE_END_DETACHED(&tt->tasklets[TL_URGENT], (struct list *)tmp_list);
+ if (!LIST_ISEMPTY(&tt->tasklets[TL_URGENT]))
+ tt->tl_class_mask |= 1 << TL_URGENT;
+ }
/* execute tasklets in each queue */
for (queue = 0; queue < TL_CLASSES; queue++) {
tt->current_queue = queue;
max_processed -= run_tasks_from_list(&tt->tasklets[queue], max[queue]);
tt->current_queue = -1;
+ if (LIST_ISEMPTY(&tt->tasklets[queue]))
+ tt->tl_class_mask &= ~(1 << queue);
}
}
if (max_processed > 0 && thread_has_tasks())
goto not_done_yet;
- if (!LIST_ISEMPTY(&sched->tasklets[TL_URGENT]) |
- !LIST_ISEMPTY(&sched->tasklets[TL_NORMAL]) |
- !LIST_ISEMPTY(&sched->tasklets[TL_BULK]))
+ if (tt->tl_class_mask)
activity[tid].long_rq++;
}