extern struct sched_activity sched_activity[256];
void report_stolen_time(uint64_t stolen);
-void activity_count_runtime();
+void activity_count_runtime(uint32_t run_time);
struct sched_activity *sched_activity_entry(struct sched_activity *array, const void *func);
#endif /* _HAPROXY_ACTIVITY_H */
update_freq_ctr_period(&activity[tid].cpust_15s, 15000, stolen);
}
-/* Collect date and time information before calling poll(). This will be used
- * to count the run time of the past loop and the sleep time of the next poll.
- * It also makes use of the just updated before_poll timer to count the loop's
- * run time and feed the average loop time metric (in microseconds).
+/* Update avg_loop value for the current thread and possibly decide to enable
+ * task-level profiling on the current thread based on its average run time.
+ * The <run_time> argument is the number of microseconds elapsed since the
+ * last time poll() returned.
*/
-void activity_count_runtime()
+void activity_count_runtime(uint32_t run_time)
{
- uint32_t run_time;
uint32_t up, down;
/* 1 millisecond per loop on average over last 1024 iterations is
up = 1000;
down = up * 99 / 100;
- run_time = (before_poll.tv_sec - after_poll.tv_sec) * 1000000U + (before_poll.tv_usec - after_poll.tv_usec);
run_time = swrate_add(&activity[tid].avg_loop_us, TIME_STATS_SAMPLES, run_time);
/* In automatic mode, reaching the "up" threshold on average switches
{
uint64_t new_mono_time;
uint64_t new_cpu_time;
+ uint32_t run_time;
int64_t stolen;
gettimeofday(&before_poll, NULL);
+ run_time = (before_poll.tv_sec - after_poll.tv_sec) * 1000000U + (before_poll.tv_usec - after_poll.tv_usec);
+
new_cpu_time = now_cpu_time();
new_mono_time = now_mono_time();
report_stolen_time(stolen);
}
}
+
+ /* update the average runtime */
+ activity_count_runtime(run_time);
}
/* returns the current date as returned by gettimeofday() in ISO+microsecond
/* now let's wait for polled events */
wait_time = wake ? 0 : compute_poll_timeout(exp);
clock_entering_poll();
- activity_count_runtime();
do {
int timeout = (global.tune.options & GTUNE_BUSY_POLLING) ? 0 : wait_time;
*/
wait_time = wake ? 0 : compute_poll_timeout(exp);
clock_entering_poll();
- activity_count_runtime();
do {
int timeout = (global.tune.options & GTUNE_BUSY_POLLING) ? 0 : wait_time;
wait_time = wake ? 0 : compute_poll_timeout(exp);
fd = global.tune.maxpollevents;
clock_entering_poll();
- activity_count_runtime();
do {
int timeout = (global.tune.options & GTUNE_BUSY_POLLING) ? 0 : wait_time;
/* now let's wait for events */
wait_time = wake ? 0 : compute_poll_timeout(exp);
clock_entering_poll();
- activity_count_runtime();
status = poll(poll_events, nbfd, wait_time);
clock_update_date(wait_time, status);
clock_leaving_poll(wait_time, status);
delta.tv_sec = (delta_ms / 1000);
delta.tv_usec = (delta_ms % 1000) * 1000;
clock_entering_poll();
- activity_count_runtime();
status = select(maxfd,
readnotnull ? tmp_evts[DIR_RD] : NULL,
writenotnull ? tmp_evts[DIR_WR] : NULL,