/* a few exported variables */
extern unsigned int nb_tasks; /* total number of tasks */
-extern volatile unsigned long active_tasks_mask; /* Mask of threads with active tasks */
extern volatile unsigned long global_tasks_mask; /* Mask of threads with tasks in the global runqueue */
extern unsigned int tasks_run_queue; /* run queue size */
extern unsigned int tasks_run_queue_cur;
return;
LIST_ADDQ(&task_per_thread[tid].task_list, &tl->list);
task_per_thread[tid].task_list_size++;
- _HA_ATOMIC_OR(&active_tasks_mask, tid_bit);
_HA_ATOMIC_ADD(&tasks_run_queue, 1);
}
return !LIST_ISEMPTY(wake);
}
+static inline int thread_has_tasks(void)
+{
+ return (!!(global_tasks_mask & tid_bit) |
+ (task_per_thread[tid].rqueue_size > 0) |
+ !LIST_ISEMPTY(&task_per_thread[tid].task_list));
+}
+
/*
* This does 3 things :
* - wake up all expired tasks
"%c%cThread %-2u: act=%d glob=%d wq=%d rq=%d tl=%d tlsz=%d rqsz=%d\n"
" stuck=%d fdcache=%d prof=%d",
(thr == calling_tid) ? '*' : ' ', stuck ? '>' : ' ', thr + 1,
- !!(active_tasks_mask & thr_bit),
+ thread_has_tasks(),
!!(global_tasks_mask & thr_bit),
!eb_is_empty(&task_per_thread[thr].timers),
!eb_is_empty(&task_per_thread[thr].rqueue),
wake = 1;
if (fd_cache_mask & tid_bit)
activity[tid].wake_cache++;
- else if (active_tasks_mask & tid_bit)
+ else if (thread_has_tasks())
activity[tid].wake_tasks++;
else if (signal_queue_len && tid == 0)
activity[tid].wake_signal++;
else {
_HA_ATOMIC_OR(&sleeping_thread_mask, tid_bit);
__ha_barrier_atomic_store();
- if (active_tasks_mask & tid_bit) {
+ if (global_tasks_mask & tid_bit) {
activity[tid].wake_tasks++;
_HA_ATOMIC_AND(&sleeping_thread_mask, ~tid_bit);
} else
DECLARE_POOL(pool_head_notification, "notification", sizeof(struct notification));
unsigned int nb_tasks = 0;
-volatile unsigned long active_tasks_mask = 0; /* Mask of threads with active tasks */
volatile unsigned long global_tasks_mask = 0; /* Mask of threads with tasks in the global runqueue */
unsigned int tasks_run_queue = 0;
unsigned int tasks_run_queue_cur = 0; /* copy of the run queue size */
__ha_barrier_store();
}
#endif
- _HA_ATOMIC_OR(&active_tasks_mask, t->thread_mask);
t->rq.key = _HA_ATOMIC_ADD(&rqueue_ticks, 1);
if (likely(t->nice)) {
ti->flags &= ~TI_FL_STUCK; // this thread is still running
- if (!(active_tasks_mask & tid_bit)) {
+ if (!thread_has_tasks()) {
activity[tid].empty_rq++;
return;
}
grq = NULL;
}
- if (!(global_tasks_mask & tid_bit) && task_per_thread[tid].rqueue_size == 0) {
- _HA_ATOMIC_AND(&active_tasks_mask, ~tid_bit);
- __ha_barrier_atomic_load();
- if (global_tasks_mask & tid_bit)
- _HA_ATOMIC_OR(&active_tasks_mask, tid_bit);
- }
-
while (max_processed > 0 && !LIST_ISEMPTY(&task_per_thread[tid].task_list)) {
struct task *t;
unsigned short state;
max_processed--;
}
- if (!LIST_ISEMPTY(&task_per_thread[tid].task_list)) {
- _HA_ATOMIC_OR(&active_tasks_mask, tid_bit);
+ if (!LIST_ISEMPTY(&task_per_thread[tid].task_list))
activity[tid].long_rq++;
- }
}
/*