uint64_t prev_cpu_time; /* previous per thread CPU time */
uint64_t prev_mono_time; /* previous system wide monotonic time */
+ uint64_t curr_mono_time; /* latest system wide monotonic time */
struct eb_root rqueue_shared; /* run queue fed by other threads */
__decl_thread(HA_SPINLOCK_T rqsh_lock); /* lock protecting the shared runqueue */
gettimeofday(&date, NULL);
date_ns = tv_to_ns(&date);
+ th_ctx->curr_mono_time = now_mono_time();
+
/* compute the minimum and maximum local date we may have reached based
* on our past date and the associated timeout. There are three possible
* extremities:
void clock_init_process_date(void)
{
now_offset = 0;
+ th_ctx->prev_mono_time = th_ctx->curr_mono_time = now_mono_time(); // 0 if not supported
gettimeofday(&date, NULL);
after_poll = before_poll = date;
now_ns = global_now_ns = tv_to_ns(&date);
now_ns = _HA_ATOMIC_LOAD(&global_now_ns);
th_ctx->idle_pct = 100;
th_ctx->prev_cpu_time = now_cpu_time();
+ th_ctx->prev_mono_time = now_mono_time();
+ th_ctx->curr_mono_time = th_ctx->prev_mono_time;
clock_update_date(0, 1);
}
{
clock_measure_idle();
th_ctx->prev_cpu_time = now_cpu_time();
- th_ctx->prev_mono_time = now_mono_time();
+ th_ctx->prev_mono_time = th_ctx->curr_mono_time;
}
/* Collect date and time information before calling poll(). This will be used