/* a few exported variables */
extern volatile unsigned long global_tasks_mask; /* Mask of threads with tasks in the global runqueue */
-extern unsigned int grq_total; /* total number of entries in the global run queue, atomic */
extern unsigned int niced_tasks; /* number of niced tasks in the run queue */
extern struct pool_head *pool_head_task;
{
int thr, ret = 0;
-#ifdef USE_THREAD
- ret = _HA_ATOMIC_LOAD(&grq_total);
-#endif
for (thr = 0; thr < global.nbthread; thr++)
ret += _HA_ATOMIC_LOAD(&ha_thread_ctx[thr].rq_total);
return ret;
#ifdef USE_THREAD
struct eb_root timers; /* sorted timers tree, global, accessed under wq_lock */
-unsigned int grq_total; /* total number of entries in the global run queue, atomic */
#endif
if (thr != tid) {
root = &ha_thread_ctx[thr].rqueue_shared;
- _HA_ATOMIC_INC(&grq_total);
+ _HA_ATOMIC_INC(&ha_thread_ctx[thr].rq_total);
HA_SPIN_LOCK(TASK_RQ_LOCK, &ha_thread_ctx[thr].rqsh_lock);
if (t->tid < 0)
if (lpicked + gpicked) {
tt->tl_class_mask |= 1 << TL_NORMAL;
_HA_ATOMIC_ADD(&tt->tasks_in_list, lpicked + gpicked);
-#ifdef USE_THREAD
- if (gpicked) {
- _HA_ATOMIC_SUB(&grq_total, gpicked);
- _HA_ATOMIC_ADD(&tt->rq_total, gpicked);
- }
-#endif
activity[tid].tasksw += lpicked + gpicked;
}