From: Willy Tarreau Date: Wed, 6 Oct 2021 14:22:09 +0000 (+0200) Subject: REORG: activity: uninline activity_count_runtime() X-Git-Tag: v2.5-dev9~70 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=e0650224b8658694c0d25d7ba38ba2bfd0c1b33c;p=thirdparty%2Fhaproxy.git REORG: activity: uninline activity_count_runtime() This function has no reason for being inlined, it's called from non critical places (once in pollers), is quite large and comes with dependencies (time and freq_ctr). Let's move it to acitvity.c. That's another 0.4% less LoC to build. --- diff --git a/include/haproxy/activity.h b/include/haproxy/activity.h index 28f8689a4e..421697c007 100644 --- a/include/haproxy/activity.h +++ b/include/haproxy/activity.h @@ -25,7 +25,6 @@ #include #include #include -#include #include extern unsigned int profiling; @@ -34,42 +33,7 @@ extern struct activity activity[MAX_THREADS]; extern struct sched_activity sched_activity[256]; void report_stolen_time(uint64_t stolen); - -/* Collect date and time information before calling poll(). This will be used - * to count the run time of the past loop and the sleep time of the next poll. - * It also makes use of the just updated before_poll timer to count the loop's - * run time and feed the average loop time metric (in microseconds). - */ -static inline void activity_count_runtime() -{ - uint32_t run_time; - uint32_t up, down; - - /* 1 millisecond per loop on average over last 1024 iterations is - * enough to turn on profiling. - */ - up = 1000; - down = up * 99 / 100; - - run_time = (before_poll.tv_sec - after_poll.tv_sec) * 1000000U + (before_poll.tv_usec - after_poll.tv_usec); - run_time = swrate_add(&activity[tid].avg_loop_us, TIME_STATS_SAMPLES, run_time); - - /* In automatic mode, reaching the "up" threshold on average switches - * profiling to "on" when automatic, and going back below the "down" - * threshold switches to off. The forced modes don't check the load. - */ - if (!(task_profiling_mask & tid_bit)) { - if (unlikely((profiling & HA_PROF_TASKS_MASK) == HA_PROF_TASKS_ON || - ((profiling & HA_PROF_TASKS_MASK) == HA_PROF_TASKS_AON && - swrate_avg(run_time, TIME_STATS_SAMPLES) >= up))) - _HA_ATOMIC_OR(&task_profiling_mask, tid_bit); - } else { - if (unlikely((profiling & HA_PROF_TASKS_MASK) == HA_PROF_TASKS_OFF || - ((profiling & HA_PROF_TASKS_MASK) == HA_PROF_TASKS_AOFF && - swrate_avg(run_time, TIME_STATS_SAMPLES) <= down))) - _HA_ATOMIC_AND(&task_profiling_mask, ~tid_bit); - } -} +void activity_count_runtime(); /* Computes the index of function pointer for use with sched_activity[] * or any other similar array passed in , and returns a pointer to the diff --git a/src/activity.c b/src/activity.c index 6ed6fc5ec2..90a1ef96b4 100644 --- a/src/activity.c +++ b/src/activity.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #if defined(DEBUG_MEM_STATS) @@ -347,6 +348,42 @@ void report_stolen_time(uint64_t stolen) update_freq_ctr_period(&activity[tid].cpust_15s, 15000, stolen); } +/* Collect date and time information before calling poll(). This will be used + * to count the run time of the past loop and the sleep time of the next poll. + * It also makes use of the just updated before_poll timer to count the loop's + * run time and feed the average loop time metric (in microseconds). + */ +void activity_count_runtime() +{ + uint32_t run_time; + uint32_t up, down; + + /* 1 millisecond per loop on average over last 1024 iterations is + * enough to turn on profiling. + */ + up = 1000; + down = up * 99 / 100; + + run_time = (before_poll.tv_sec - after_poll.tv_sec) * 1000000U + (before_poll.tv_usec - after_poll.tv_usec); + run_time = swrate_add(&activity[tid].avg_loop_us, TIME_STATS_SAMPLES, run_time); + + /* In automatic mode, reaching the "up" threshold on average switches + * profiling to "on" when automatic, and going back below the "down" + * threshold switches to off. The forced modes don't check the load. + */ + if (!(task_profiling_mask & tid_bit)) { + if (unlikely((profiling & HA_PROF_TASKS_MASK) == HA_PROF_TASKS_ON || + ((profiling & HA_PROF_TASKS_MASK) == HA_PROF_TASKS_AON && + swrate_avg(run_time, TIME_STATS_SAMPLES) >= up))) + _HA_ATOMIC_OR(&task_profiling_mask, tid_bit); + } else { + if (unlikely((profiling & HA_PROF_TASKS_MASK) == HA_PROF_TASKS_OFF || + ((profiling & HA_PROF_TASKS_MASK) == HA_PROF_TASKS_AOFF && + swrate_avg(run_time, TIME_STATS_SAMPLES) <= down))) + _HA_ATOMIC_AND(&task_profiling_mask, ~tid_bit); + } +} + #ifdef USE_MEMORY_PROFILING /* config parser for global "profiling.memory", accepts "on" or "off" */ static int cfg_parse_prof_memory(char **args, int section_type, struct proxy *curpx,