]> git.ipfire.org Git - thirdparty/haproxy.git/commitdiff
[OPTIM] task_queue: assume most consecutive timers are equal
authorWilly Tarreau <w@1wt.eu>
Sat, 5 Jul 2008 16:16:19 +0000 (18:16 +0200)
committerWilly Tarreau <w@1wt.eu>
Sat, 5 Jul 2008 16:16:19 +0000 (18:16 +0200)
When queuing a timer, it's very likely that an expiration date is
equal to that of the previously queued timer, due to time rounding
to the millisecond. Optimizing for this case provides a noticeable
1% performance boost.

include/proto/task.h
src/task.c

index de50cbab39cbb8504b89cd6666989d7db3974afc..cc5c180f4ea765b05e410eaca5017e58028d3714 100644 (file)
@@ -35,6 +35,7 @@
 extern unsigned int run_queue;    /* run queue size */
 extern unsigned int niced_tasks;  /* number of niced tasks in the run queue */
 extern struct pool_head *pool2_task;
+extern struct task *last_timer;   /* optimization: last queued timer */
 
 /* perform minimal initializations, report 0 in case of error, 1 if OK. */
 int init_task();
@@ -63,11 +64,23 @@ static inline struct task *task_sleep(struct task *t)
  *  - wait queue
  * A pointer to the task itself is returned.
  */
-static inline struct task *task_delete(struct task *t)
+static inline struct task *task_dequeue(struct task *t)
 {
-       if (t->eb.node.leaf_p)
+       if (likely(t->eb.node.leaf_p)) {
+               if (last_timer == t)
+                       last_timer = NULL;
                eb32_delete(&t->eb);
+       }
+       return t;
+}
 
+/*
+ * Unlinks the task and adjusts run queue stats.
+ * A pointer to the task itself is returned.
+ */
+static inline struct task *task_delete(struct task *t)
+{
+       task_dequeue(t);
        if (t->state == TASK_RUNNING) {
                run_queue--;
                if (likely(t->nice))
index b28ae0b0315602756eb55f2af6464ebb49f3caa6..13d6817efc668d8bfcc32dbaf2ae1df5683b2306 100644 (file)
@@ -25,6 +25,7 @@ struct pool_head *pool2_task;
 
 unsigned int run_queue = 0;
 unsigned int niced_tasks = 0; /* number of niced tasks in the run queue */
+struct task *last_timer = NULL;  /* optimization: last queued timer */
 
 /* Principle of the wait queue.
  *
@@ -136,8 +137,7 @@ struct task *task_wakeup(struct task *t)
        if (t->state == TASK_RUNNING)
                return t;
 
-       if (likely(t->eb.node.leaf_p))
-               eb32_delete(&t->eb);
+       task_dequeue(t);
 
        run_queue++;
        t->eb.key = ++rqueue_ticks;
@@ -180,7 +180,18 @@ struct task *task_queue(struct task *task)
                /* we're queuing too far away or in the past (most likely) */
                return task;
 #endif
+
+       if (likely(last_timer &&
+                  last_timer->eb.key == task->eb.key &&
+                  last_timer->eb.node.node_p)) {
+               /* Most often, last queued timer has the same expiration date, so
+                * if it's not queued at the root, let's queue a dup directly there.
+                */
+               eb_insert_dup(&last_timer->eb.node, &task->eb.node);
+               return task;
+       }
        eb32_insert(&timers[ticks_to_tree(task->eb.key)], &task->eb);
+       last_timer = task;
        return task;
 }
 
@@ -280,7 +291,7 @@ void process_runnable_tasks(struct timeval *next)
                        if (likely(t->nice))
                                niced_tasks--;
                        t->state = TASK_IDLE;
-                       eb32_delete(&t->eb);
+                       task_dequeue(t);
 
                        t->process(t, &temp);
                        tv_bound(next, &temp);