extern unsigned int niced_tasks; /* number of niced tasks in the run queue */
extern struct pool_head *pool2_task;
extern struct eb32_node *last_timer; /* optimization: last queued timer */
+extern struct eb32_node *rq_next; /* optimization: next task except if delete/insert */
/* return 0 if task is in run queue, otherwise non-zero */
static inline int task_in_rq(struct task *t)
/*
* Unlink the task from the run queue. The run_queue size and number of niced
* tasks are updated too. A pointer to the task itself is returned. The task
- * *must* already be in the wait queue before calling this function. If unsure,
- * use the safer task_unlink_rq() function.
+ * *must* already be in the run queue before calling this function. If unsure,
+ * use the safer task_unlink_rq() function. Note that the pointer to the next
+ * run queue entry is neither checked nor updated.
*/
static inline struct task *__task_unlink_rq(struct task *t)
{
return t;
}
+/* This function unlinks task <t> from the run queue if it is in it. It also
+ * takes care of updating the next run queue task if it was this task.
+ */
static inline struct task *task_unlink_rq(struct task *t)
{
- if (likely(task_in_rq(t)))
+ if (likely(task_in_rq(t))) {
+ if (&t->rq == rq_next)
+ rq_next = eb32_next(rq_next);
__task_unlink_rq(t);
+ }
return t;
}
unsigned int nb_tasks_cur = 0; /* copy of the tasks count */
unsigned int niced_tasks = 0; /* number of niced tasks in the run queue */
struct eb32_node *last_timer = NULL; /* optimization: last queued timer */
+struct eb32_node *rq_next = NULL; /* optimization: next task except if delete/insert */
static struct eb_root timers; /* sorted timers tree */
static struct eb_root rqueue; /* tree constituting the run queue */
void process_runnable_tasks(int *next)
{
struct task *t;
- struct eb32_node *eb;
unsigned int max_processed;
int expire;
max_processed = (max_processed + 3) / 4;
expire = *next;
- eb = eb32_lookup_ge(&rqueue, rqueue_ticks - TIMER_LOOK_BACK);
+
while (max_processed--) {
/* Note: this loop is one of the fastest code path in
* the whole program. It should not be re-arranged
* without a good reason.
*/
-
- if (unlikely(!eb)) {
- /* we might have reached the end of the tree, typically because
- * <rqueue_ticks> is in the first half and we're first scanning
- * the last half. Let's loop back to the beginning of the tree now.
- */
- eb = eb32_first(&rqueue);
- if (likely(!eb))
- break;
+ if (unlikely(!rq_next)) {
+ rq_next = eb32_lookup_ge(&rqueue, rqueue_ticks - TIMER_LOOK_BACK);
+ if (!rq_next) {
+ /* we might have reached the end of the tree, typically because
+ * <rqueue_ticks> is in the first half and we're first scanning
+ * the last half. Let's loop back to the beginning of the tree now.
+ */
+ rq_next = eb32_first(&rqueue);
+ if (!rq_next)
+ break;
+ }
}
- /* detach the task from the queue */
- t = eb32_entry(eb, struct task, rq);
- eb = eb32_next(eb);
+ /* detach the task from the queue after updating the pointer to
+ * the next entry.
+ */
+ t = eb32_entry(rq_next, struct task, rq);
+ rq_next = eb32_next(rq_next);
__task_unlink_rq(t);
t->state |= TASK_RUNNING;
task_queue(t);
expire = tick_first_2nz(expire, t->expire);
}
-
- /* if the task has put itself back into the run queue, we want to ensure
- * it will be served at the proper time, especially if it's reniced.
- */
- if (unlikely(task_in_rq(t)) && (!eb || tick_is_lt(t->rq.key, eb->key))) {
- eb = eb32_lookup_ge(&rqueue, rqueue_ticks - TIMER_LOOK_BACK);
- }
}
}
*next = expire;