/* 1. global run queue */
#ifdef USE_THREAD
- rqnode = eb32sc_first(&rqueue, ~0UL);
- while (rqnode) {
- t = eb32sc_entry(rqnode, struct task, rq);
- entry = sched_activity_entry(tmp_activity, t->process);
- if (t->call_date) {
- lat = now_ns - t->call_date;
- if ((int64_t)lat > 0)
- entry->lat_time += lat;
+ for (thr = 0; thr < global.nbthread; thr++) {
+ /* task run queue */
+ rqnode = eb32sc_first(&ha_thread_ctx[thr].rqueue_shared, ~0UL);
+ while (rqnode) {
+ t = eb32sc_entry(rqnode, struct task, rq);
+ entry = sched_activity_entry(tmp_activity, t->process);
+ if (t->call_date) {
+ lat = now_ns - t->call_date;
+ if ((int64_t)lat > 0)
+ entry->lat_time += lat;
+ }
+ entry->calls++;
+ rqnode = eb32sc_next(rqnode, ~0UL);
}
- entry->calls++;
- rqnode = eb32sc_next(rqnode, ~0UL);
}
#endif
/* 2. all threads's local run queues */
#ifdef USE_THREAD
struct eb_root timers; /* sorted timers tree, global, accessed under wq_lock */
-struct eb_root rqueue; /* tree constituting the global run queue, accessed under rq_lock */
unsigned int grq_total; /* total number of entries in the global run queue, atomic */
-static unsigned int global_rqueue_ticks; /* insertion count in the grq, use rq_lock */
#endif
#ifdef USE_THREAD
if (thr != tid) {
- root = &rqueue;
+ root = &ha_thread_ctx[thr].rqueue_shared;
_HA_ATOMIC_INC(&grq_total);
HA_SPIN_LOCK(TASK_RQ_LOCK, &rq_lock);
global_tasks_mask = all_threads_mask;
else
global_tasks_mask |= 1UL << thr;
- t->rq.key = ++global_rqueue_ticks;
+ t->rq.key = _HA_ATOMIC_ADD_FETCH(&ha_thread_ctx[thr].rqueue_ticks, 1);
__ha_barrier_store();
} else
#endif
if ((global_tasks_mask & tid_bit) && !grq) {
#ifdef USE_THREAD
HA_SPIN_LOCK(TASK_RQ_LOCK, &rq_lock);
- grq = eb32sc_lookup_ge(&rqueue, global_rqueue_ticks - TIMER_LOOK_BACK, tid_bit);
+ grq = eb32sc_lookup_ge(&th_ctx->rqueue_shared, _HA_ATOMIC_LOAD(&tt->rqueue_ticks) - TIMER_LOOK_BACK, tid_bit);
if (unlikely(!grq)) {
- grq = eb32sc_first(&rqueue, tid_bit);
+ grq = eb32sc_first(&th_ctx->rqueue_shared, tid_bit);
if (!grq) {
global_tasks_mask &= ~tid_bit;
HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
eb32sc_delete(&t->rq);
if (unlikely(!grq)) {
- grq = eb32sc_first(&rqueue, tid_bit);
+ grq = eb32sc_first(&th_ctx->rqueue_shared, tid_bit);
if (!grq) {
global_tasks_mask &= ~tid_bit;
HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
#ifdef USE_THREAD
/* cleanup the global run queue */
- tmp_rq = eb32sc_first(&rqueue, ~0UL);
+ tmp_rq = eb32sc_first(&th_ctx->rqueue_shared, ~0UL);
while (tmp_rq) {
t = eb32sc_entry(tmp_rq, struct task, rq);
tmp_rq = eb32sc_next(tmp_rq, ~0UL);
#ifdef USE_THREAD
memset(&timers, 0, sizeof(timers));
- memset(&rqueue, 0, sizeof(rqueue));
#endif
for (i = 0; i < MAX_THREADS; i++) {
for (q = 0; q < TL_CLASSES; q++)