*/
avail = pool2_buffer->allocated - pool2_buffer->used - global.tune.reserved_bufs / 2;
- if (avail > (int)run_queue)
+ if (avail > (int)tasks_run_queue)
__stream_offer_buffers(avail);
}
/* a few exported variables */
extern unsigned int nb_tasks; /* total number of tasks */
-extern unsigned int run_queue; /* run queue size */
-extern unsigned int run_queue_cur;
+extern unsigned int tasks_run_queue; /* run queue size */
+extern unsigned int tasks_run_queue_cur;
extern unsigned int nb_tasks_cur;
extern unsigned int niced_tasks; /* number of niced tasks in the run queue */
extern struct pool_head *pool2_task;
}
/*
- * Unlink the task from the run queue. The run_queue size and number of niced
- * tasks are updated too. A pointer to the task itself is returned. The task
- * *must* already be in the run queue before calling this function. If unsure,
- * use the safer task_unlink_rq() function. Note that the pointer to the next
- * run queue entry is neither checked nor updated.
+ * Unlink the task from the run queue. The tasks_run_queue size and number of
+ * niced tasks are updated too. A pointer to the task itself is returned. The
+ * task *must* already be in the run queue before calling this function. If
+ * unsure, use the safer task_unlink_rq() function. Note that the pointer to the
+ * next run queue entry is neither checked nor updated.
*/
static inline struct task *__task_unlink_rq(struct task *t)
{
eb32_delete(&t->rq);
- run_queue--;
+ tasks_run_queue--;
if (likely(t->nice))
niced_tasks--;
return t;
break;
/* expire immediately if events are pending */
- if (fd_cache_num || run_queue || signal_queue_len || applets_active_queue)
+ if (fd_cache_num || tasks_run_queue || signal_queue_len || applets_active_queue)
next = now_ms;
/* The poller will ensure it returns around <next> */
global.rlimit_nofile,
global.maxsock, global.maxconn, global.maxpipes,
actconn, pipes_used, pipes_used+pipes_free, read_freq_ctr(&global.conn_per_sec),
- run_queue_cur, nb_tasks_cur, idle_pct
+ tasks_run_queue_cur, nb_tasks_cur, idle_pct
);
/* scope_txt = search query, appctx->ctx.stats.scope_len is always <= STAT_SCOPE_TXT_MAXLEN */
info[INF_MAX_ZLIB_MEM_USAGE] = mkf_u32(FO_CONFIG|FN_LIMIT, global.maxzlibmem);
#endif
info[INF_TASKS] = mkf_u32(0, nb_tasks_cur);
- info[INF_RUN_QUEUE] = mkf_u32(0, run_queue_cur);
+ info[INF_RUN_QUEUE] = mkf_u32(0, tasks_run_queue_cur);
info[INF_IDLE_PCT] = mkf_u32(FN_AVG, idle_pct);
info[INF_NODE] = mkf_str(FO_CONFIG|FN_OUTPUT|FS_SERVICE, global.node);
if (global.desc)
struct stream *sess, *bak;
list_for_each_entry_safe(sess, bak, &buffer_wq, buffer_wait) {
- if (rqlimit <= run_queue)
+ if (rqlimit <= tasks_run_queue)
break;
if (sess->task->state & TASK_RUNNING)
struct pool_head *pool2_task;
unsigned int nb_tasks = 0;
-unsigned int run_queue = 0;
-unsigned int run_queue_cur = 0; /* copy of the run queue size */
+unsigned int tasks_run_queue = 0;
+unsigned int tasks_run_queue_cur = 0; /* copy of the run queue size */
unsigned int nb_tasks_cur = 0; /* copy of the tasks count */
unsigned int niced_tasks = 0; /* number of niced tasks in the run queue */
struct eb32_node *last_timer = NULL; /* optimization: last queued timer */
/* Puts the task <t> in run queue at a position depending on t->nice. <t> is
* returned. The nice value assigns boosts in 32th of the run queue size. A
- * nice value of -1024 sets the task to -run_queue*32, while a nice value of
- * 1024 sets the task to run_queue*32. The state flags are cleared, so the
- * caller will have to set its flags after this call.
+ * nice value of -1024 sets the task to -tasks_run_queue*32, while a nice value
+ * of 1024 sets the task to tasks_run_queue*32. The state flags are cleared, so
+ * the caller will have to set its flags after this call.
* The task must not already be in the run queue. If unsure, use the safer
* task_wakeup() function.
*/
struct task *__task_wakeup(struct task *t)
{
- run_queue++;
+ tasks_run_queue++;
t->rq.key = ++rqueue_ticks;
if (likely(t->nice)) {
niced_tasks++;
if (likely(t->nice > 0))
- offset = (unsigned)((run_queue * (unsigned int)t->nice) / 32U);
+ offset = (unsigned)((tasks_run_queue * (unsigned int)t->nice) / 32U);
else
- offset = -(unsigned)((run_queue * (unsigned int)-t->nice) / 32U);
+ offset = -(unsigned)((tasks_run_queue * (unsigned int)-t->nice) / 32U);
t->rq.key += offset;
}
struct task *t;
unsigned int max_processed;
- run_queue_cur = run_queue; /* keep a copy for reporting */
+ tasks_run_queue_cur = tasks_run_queue; /* keep a copy for reporting */
nb_tasks_cur = nb_tasks;
- max_processed = run_queue;
+ max_processed = tasks_run_queue;
- if (!run_queue)
+ if (!tasks_run_queue)
return;
if (max_processed > 200)