include/proto/task.h
Functions for task management.
- Copyright (C) 2000-2008 Willy Tarreau - w@1wt.eu
+ Copyright (C) 2000-2009 Willy Tarreau - w@1wt.eu
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
/* perform minimal initializations, report 0 in case of error, 1 if OK. */
int init_task();
+/* return 0 if task is in run queue, otherwise non-zero */
+static inline int task_in_rq(struct task *t)
+{
+ return t->rq.node.leaf_p != NULL;
+}
+
+/* return 0 if task is in wait queue, otherwise non-zero */
+static inline int task_in_wq(struct task *t)
+{
+ return t->wq.node.leaf_p != NULL;
+}
+
/* puts the task <t> in run queue with reason flags <f>, and returns <t> */
struct task *__task_wakeup(struct task *t);
static inline struct task *task_wakeup(struct task *t, unsigned int f)
{
- if (likely(!(t->state & TASK_IN_RUNQUEUE)))
+ if (likely(!task_in_rq(t)))
__task_wakeup(t);
t->state |= f;
return t;
}
-/* removes the task <t> from the run queue if it was in it.
- * returns <t>.
+/*
+ * Unlink the task from the wait queue, and possibly update the last_timer
+ * pointer. A pointer to the task itself is returned. The task *must* already
+ * be in the wait queue before calling this function. If unsure, use the safer
+ * task_unlink_wq() function.
*/
-static inline struct task *task_sleep(struct task *t)
+static inline struct task *__task_unlink_wq(struct task *t)
+{
+ eb32_delete(&t->wq);
+ if (last_timer == t)
+ last_timer = NULL;
+ return t;
+}
+
+static inline struct task *task_unlink_wq(struct task *t)
{
- if (t->state & TASK_IN_RUNQUEUE) {
- t->state = TASK_SLEEPING;
- eb32_delete(&t->eb);
- run_queue--;
- if (likely(t->nice))
- niced_tasks--;
- }
+ if (likely(task_in_wq(t)))
+ __task_unlink_wq(t);
return t;
}
/*
- * unlinks the task from wherever it is queued :
- * - run_queue
- * - wait queue
- * A pointer to the task itself is returned.
+ * Unlink the task from the run queue. The run_queue size and number of niced
+ * tasks are updated too. A pointer to the task itself is returned. The task
+ * *must* already be in the wait queue before calling this function. If unsure,
+ * use the safer task_unlink_rq() function.
*/
-static inline struct task *task_dequeue(struct task *t)
+static inline struct task *__task_unlink_rq(struct task *t)
+{
+ eb32_delete(&t->rq);
+ run_queue--;
+ if (likely(t->nice))
+ niced_tasks--;
+ return t;
+}
+
+static inline struct task *task_unlink_rq(struct task *t)
{
- if (likely(t->eb.node.leaf_p)) {
- if (last_timer == t)
- last_timer = NULL;
- eb32_delete(&t->eb);
- }
+ if (likely(task_in_rq(t)))
+ __task_unlink_rq(t);
return t;
}
*/
static inline struct task *task_delete(struct task *t)
{
- task_dequeue(t);
- if (t->state & TASK_IN_RUNQUEUE) {
- run_queue--;
- if (likely(t->nice))
- niced_tasks--;
- }
+ task_unlink_wq(t);
+ task_unlink_rq(t);
return t;
}
*/
static inline struct task *task_init(struct task *t)
{
- t->eb.node.leaf_p = NULL;
+ t->wq.node.leaf_p = NULL;
+ t->rq.node.leaf_p = NULL;
t->state = TASK_SLEEPING;
t->nice = 0;
return t;
pool_free2(pool2_task, t);
}
-/* inserts <task> into its assigned wait queue, where it may already be. In this case, it
- * may be only moved or left where it was, depending on its timing requirements.
- * <task> is returned.
+/* Place <task> into the wait queue, where it may already be. If the expiration
+ * timer is infinite, the task is dequeued.
*/
-struct task *task_queue(struct task *task);
+void task_queue(struct task *task);
/*
* This does 4 things :
/*
* Task management functions.
*
- * Copyright 2000-2008 Willy Tarreau <w@1wt.eu>
+ * Copyright 2000-2009 Willy Tarreau <w@1wt.eu>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
return ticks_to_tree(timeval_to_ticks(t));
}
-/* perform minimal intializations, report 0 in case of error, 1 if OK. */
-int init_task()
-{
- memset(&timers, 0, sizeof(timers));
- memset(&rqueue, 0, sizeof(rqueue));
- pool2_task = create_pool("task", sizeof(struct task), MEM_F_SHARED);
- return pool2_task != NULL;
-}
-
-/* Puts the task <t> in run queue at a position depending on t->nice.
- * <t> is returned. The nice value assigns boosts in 32th of the run queue
- * size. A nice value of -1024 sets the task to -run_queue*32, while a nice
- * value of 1024 sets the task to run_queue*32.
+/* Puts the task <t> in run queue at a position depending on t->nice. <t> is
+ * returned. The nice value assigns boosts in 32th of the run queue size. A
+ * nice value of -1024 sets the task to -run_queue*32, while a nice value of
+ * 1024 sets the task to run_queue*32. The state flags are cleared, so the
+ * caller will have to set its flags after this call.
+ * The task must not already be in the run queue. If unsure, use the safer
+ * task_wakeup() function.
*/
struct task *__task_wakeup(struct task *t)
{
- task_dequeue(t);
-
run_queue++;
- t->eb.key = ++rqueue_ticks;
+ t->rq.key = ++rqueue_ticks;
if (likely(t->nice)) {
int offset;
offset = (unsigned)((run_queue * (unsigned int)t->nice) / 32U);
else
offset = -(unsigned)((run_queue * (unsigned int)-t->nice) / 32U);
- t->eb.key += offset;
+ t->rq.key += offset;
}
/* clear state flags at the same time */
- t->state = TASK_IN_RUNQUEUE;
+ t->state &= ~TASK_WOKEN_ANY;
- eb32_insert(&rqueue[ticks_to_tree(t->eb.key)], &t->eb);
+ eb32_insert(&rqueue[ticks_to_tree(t->rq.key)], &t->rq);
return t;
}
* task_queue()
*
* Inserts a task into the wait queue at the position given by its expiration
- * date. Note that the task must *not* already be in the wait queue nor in the
- * run queue, otherwise unpredictable results may happen. Tasks queued with an
- * eternity expiration date are simply returned. Last, tasks must not be queued
- * further than the end of the next tree, which is between <now_ms> and
- * <now_ms> + TIMER_SIGN_BIT ms (now+12days..24days in 32bit).
+ * date. It does not matter if the task was already in the wait queue or not,
+ * and it may even help if its position has not changed because we'll be able
+ * to return without doing anything. Tasks queued with an eternity expiration
+ * are just unlinked from the WQ. Last, tasks must not be queued further than
+ * the end of the next tree, which is between <now_ms> and <now_ms> +
+ * TIMER_SIGN_BIT ms (now+12days..24days in 32bit).
*/
-struct task *task_queue(struct task *task)
+void task_queue(struct task *task)
{
+ /* if the task is already in the wait queue, we may reuse its position
+ * or we will at least have to unlink it first.
+ */
+ if (task_in_wq(task)) {
+ if (task->wq.key == task->expire)
+ return;
+ __task_unlink_wq(task);
+ }
+
+ /* the task is not in the queue now */
if (unlikely(!task->expire))
- return task;
+ return;
- task->eb.key = task->expire;
+ task->wq.key = task->expire;
#ifdef DEBUG_CHECK_INVALID_EXPIRATION_DATES
- if ((task->eb.key - now_ms) & TIMER_SIGN_BIT)
+ if ((task->wq.key - now_ms) & TIMER_SIGN_BIT)
/* we're queuing too far away or in the past (most likely) */
- return task;
+ return;
#endif
if (likely(last_timer &&
- last_timer->eb.key == task->eb.key &&
- last_timer->eb.node.node_p &&
- last_timer->eb.node.bit == -1)) {
+ last_timer->wq.key == task->wq.key &&
+ last_timer->wq.node.node_p &&
+ last_timer->wq.node.bit == -1)) {
/* Most often, last queued timer has the same expiration date, so
* if it's not queued at the root, let's queue a dup directly there.
* Note that we can only use dups at the dup tree's root (bit==-1).
*/
- eb_insert_dup(&last_timer->eb.node, &task->eb.node);
- return task;
+ eb_insert_dup(&last_timer->wq.node, &task->wq.node);
+ return;
}
- eb32_insert(&timers[ticks_to_tree(task->eb.key)], &task->eb);
- if (task->eb.node.bit == -1)
+ eb32_insert(&timers[ticks_to_tree(task->wq.key)], &task->wq);
+ if (task->wq.node.bit == -1)
last_timer = task; /* we only want dup a tree's root */
- return task;
+ return;
}
-
/*
* Extract all expired timers from the timer queue, and wakes up all
* associated tasks. Returns the date of next event (or eternity).
do {
eb = eb32_first(&timers[tree]);
while (eb) {
- task = eb32_entry(eb, struct task, eb);
+ task = eb32_entry(eb, struct task, wq);
if ((now_ms - eb->key) & TIMER_SIGN_BIT) {
/* note that we don't need this check for the <previous>
* tree, but it's cheaper than duplicating the code.
/* detach the task from the queue and add the task to the run queue */
eb = eb32_next(eb);
- __task_wakeup(task);
- task->state |= TASK_WOKEN_TIMER;
+ __task_unlink_wq(task);
+ task_wakeup(task, TASK_WOKEN_TIMER);
}
tree = (tree + 1) & TIMER_TREE_MASK;
} while (((tree - now_tree) & TIMER_TREE_MASK) < TIMER_TREES/2);
do {
eb = eb32_first(&rqueue[tree]);
while (eb) {
- t = eb32_entry(eb, struct task, eb);
+ t = eb32_entry(eb, struct task, rq);
/* detach the task from the queue and add the task to the run queue */
eb = eb32_next(eb);
+ __task_unlink_rq(t);
- run_queue--;
- if (likely(t->nice))
- niced_tasks--;
- t->state &= ~TASK_IN_RUNQUEUE;
- task_dequeue(t);
-
+ t->state |= TASK_RUNNING;
t->process(t, &temp);
+ t->state &= ~TASK_RUNNING;
*next = tick_first(*next, temp);
if (!--max_processed)
} while (tree != stop);
}
+/* perform minimal intializations, report 0 in case of error, 1 if OK. */
+int init_task()
+{
+ memset(&timers, 0, sizeof(timers));
+ memset(&rqueue, 0, sizeof(rqueue));
+ pool2_task = create_pool("task", sizeof(struct task), MEM_F_SHARED);
+ return pool2_task != NULL;
+}
+
/*
* Local variables:
* c-indent-level: 8