extern struct pool_head *pool_head_notification;
extern THREAD_LOCAL struct task *curr_task; /* task currently running or NULL */
extern THREAD_LOCAL struct eb32sc_node *rq_next; /* Next task to be potentially run */
+#ifdef USE_THREAD
extern struct eb_root rqueue; /* tree constituting the run queue */
+#endif
extern struct eb_root rqueue_local[MAX_THREADS]; /* tree constituting the per-thread run queue */
extern struct list task_list[MAX_THREADS]; /* List of tasks to be run, mixing tasks and tasklets */
extern int task_list_size[MAX_THREADS]; /* Number of task sin the task_list */
__decl_hathreads(HA_SPINLOCK_T __attribute__((aligned(64))) wq_lock); /* spin lock related to wait queue */
static struct eb_root timers; /* sorted timers tree */
+#ifdef USE_THREAD
struct eb_root rqueue; /* tree constituting the run queue */
-struct eb_root rqueue_local[MAX_THREADS]; /* tree constituting the per-thread run queue */
static int global_rqueue_size; /* Number of element sin the global runqueue */
+#endif
+struct eb_root rqueue_local[MAX_THREADS]; /* tree constituting the per-thread run queue */
static int rqueue_size[MAX_THREADS]; /* Number of elements in the per-thread run queue */
static unsigned int rqueue_ticks; /* insertion count */
void *expected = NULL;
int *rq_size;
+#ifdef USE_THREAD
if (root == &rqueue) {
rq_size = &global_rqueue_size;
HA_SPIN_LOCK(TASK_RQ_LOCK, &rq_lock);
- } else {
+ } else
+#endif
+ {
int nb = root - &rqueue_local[0];
rq_size = &rqueue_size[nb];
}
*/
redo:
if (unlikely(!HA_ATOMIC_CAS(&t->rq.node.leaf_p, &expected, (void *)0x1))) {
+#ifdef USE_THREAD
if (root == &rqueue)
HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
+#endif
return;
}
/* There's a small race condition, when running a task, the thread
state = (volatile unsigned short)(t->state);
if (unlikely(state != 0 && !(state & TASK_RUNNING)))
goto redo;
+#ifdef USE_THREAD
if (root == &rqueue)
HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
+#endif
return;
}
HA_ATOMIC_ADD(&tasks_run_queue, 1);
}
eb32sc_insert(root, &t->rq, t->thread_mask);
+#ifdef USE_THREAD
if (root == &rqueue) {
global_rqueue_size++;
HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
- } else {
+ } else
+#endif
+ {
int nb = root - &rqueue_local[0];
rqueue_size[nb]++;
{
struct task *t;
int max_processed;
+#ifdef USE_THREAD
uint64_t average = 0;
+#endif
tasks_run_queue_cur = tasks_run_queue; /* keep a copy for reporting */
nb_tasks_cur = nb_tasks;
return;
}
+#ifdef USE_THREAD
average = tasks_run_queue / global.nbthread;
/* Get some elements from the global run queue and put it in the
__task_unlink_rq(t);
__task_wakeup(t, &rqueue_local[tid]);
}
+#endif
HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
} else {
if (t != NULL) {
state = HA_ATOMIC_AND(&t->state, ~TASK_RUNNING);
if (state)
+#ifdef USE_THREAD
__task_wakeup(t, (t->thread_mask == tid_bit) ?
&rqueue_local[tid] : &rqueue);
+#else
+ __task_wakeup(t, &rqueue_local[tid]);
+#endif
else
task_queue(t);
}
int i;
memset(&timers, 0, sizeof(timers));
+#ifdef USE_THREAD
memset(&rqueue, 0, sizeof(rqueue));
+#endif
HA_SPIN_INIT(&wq_lock);
HA_SPIN_INIT(&rq_lock);
for (i = 0; i < MAX_THREADS; i++) {