]> git.ipfire.org Git - thirdparty/haproxy.git/commitdiff
[OPTIM] freq_ctr: do not rotate the counters when reading
authorWilly Tarreau <w@1wt.eu>
Fri, 6 Mar 2009 13:29:25 +0000 (14:29 +0100)
committerWilly Tarreau <w@1wt.eu>
Fri, 6 Mar 2009 13:29:25 +0000 (14:29 +0100)
It's easier to take the counter's age into account when consulting it
than to rotate it first. It also saves some CPU cycles and avoids the
multiply for outdated counters, finally saving CPU cycles here too
when multiple operations need to read the same counter.

The freq_ctr code has also shrinked by one third consecutively to these
optimizations.

src/freq_ctr.c

index ce1ca6c6b4d10b022cecda53945e692cf81d36b5..3df930fc4bd754e48e4eb6da34765598073a1a7b 100644 (file)
  */
 unsigned int read_freq_ctr(struct freq_ctr *ctr)
 {
-       unsigned int cur;
-       if (unlikely(ctr->curr_sec != now.tv_sec))
-               rotate_freq_ctr(ctr);
+       unsigned int curr, past;
+       unsigned int age;
 
-       cur = ctr->curr_ctr;
-       if (ctr->prev_ctr <= 1 && !ctr->curr_ctr)
-               return ctr->prev_ctr; /* very low rate, avoid flapping */
+       age = now.tv_sec - ctr->curr_sec;
+       if (unlikely(age > 1))
+               return 0;
+
+       curr = 0;               
+       past = ctr->curr_ctr;
+       if (likely(!age)) {
+               curr = past;
+               past = ctr->prev_ctr;
+       }
 
-       return cur + mul32hi(ctr->prev_ctr, ~curr_sec_ms_scaled);
+       if (past <= 1 && !curr)
+               return past; /* very low rate, avoid flapping */
+
+       return curr + mul32hi(past, ~curr_sec_ms_scaled);
 }
 
 /* returns the number of remaining events that can occur on this freq counter
@@ -47,16 +56,26 @@ unsigned int read_freq_ctr(struct freq_ctr *ctr)
  */
 unsigned int freq_ctr_remain(struct freq_ctr *ctr, unsigned int freq, unsigned int pend)
 {
-       unsigned int cur;
-       if (unlikely(ctr->curr_sec != now.tv_sec))
-               rotate_freq_ctr(ctr);
+       unsigned int curr, past;
+       unsigned int age;
+
+       past = 0;
+       curr = 0;               
+       age = now.tv_sec - ctr->curr_sec;
 
-       cur = mul32hi(ctr->prev_ctr, ~curr_sec_ms_scaled);
-       cur += ctr->curr_ctr + pend;
+       if (likely(age <= 1)) {
+               past = ctr->curr_ctr;
+               if (likely(!age)) {
+                       curr = past;
+                       past = ctr->prev_ctr;
+               }
+               curr += mul32hi(past, ~curr_sec_ms_scaled);
+       }
+       curr += pend;
 
-       if (cur >= freq)
+       if (curr >= freq)
                return 0;
-       return freq - cur;
+       return freq - curr;
 }
 
 /* return the expected wait time in ms before the next event may occur,
@@ -67,18 +86,27 @@ unsigned int freq_ctr_remain(struct freq_ctr *ctr, unsigned int freq, unsigned i
  */
 unsigned int next_event_delay(struct freq_ctr *ctr, unsigned int freq, unsigned int pend)
 {
-       unsigned int cur, wait;
+       unsigned int curr, past;
+       unsigned int wait, age;
 
-       if (unlikely(ctr->curr_sec != now.tv_sec))
-               rotate_freq_ctr(ctr);
+       past = 0;
+       curr = 0;               
+       age = now.tv_sec - ctr->curr_sec;
 
-       cur = mul32hi(ctr->prev_ctr, ~curr_sec_ms_scaled);
-       cur += ctr->curr_ctr + pend;
+       if (likely(age <= 1)) {
+               past = ctr->curr_ctr;
+               if (likely(!age)) {
+                       curr = past;
+                       past = ctr->prev_ctr;
+               }
+               curr += mul32hi(past, ~curr_sec_ms_scaled);
+       }
+       curr += pend;
 
-       if (cur < freq)
+       if (curr < freq)
                return 0;
 
-       wait = 999 / cur;
+       wait = 999 / curr;
        return MAX(wait, 1);
 }