* disabled, it contains the same info for the single running thread.
*/
struct thread_info {
- uint64_t prev_cpu_time; /* previous per thread CPU time */
- uint64_t prev_mono_time; /* previous system wide monotonic time */
- unsigned int idle_pct; /* idle to total ratio over last sample (percent) */
unsigned int flags; /* thread info flags, TI_FL_* */
#ifdef CONFIG_HAP_POOLS
struct mt_list shared_tasklet_list; /* Tasklet to be run, woken up by other threads */
unsigned int rq_total; /* total size of the run queue, prio_tree + tasklets */
int tasks_in_list; /* Number of tasks in the per-thread tasklets list */
+ uint64_t prev_cpu_time; /* previous per thread CPU time */
+ uint64_t prev_mono_time; /* previous system wide monotonic time */
+ uint idle_pct; /* idle to total ratio over last sample (percent) */
ALWAYS_ALIGN(128);
};
now = after_poll = before_poll = date;
global_now = ((ullong)date.tv_sec << 32) + (uint)date.tv_usec;
global_now_ms = now.tv_sec * 1000 + now.tv_usec / 1000;
- ti->idle_pct = 100;
+ th_ctx->idle_pct = 100;
clock_update_date(0, 1);
}
old_now = _HA_ATOMIC_LOAD(&global_now);
now.tv_sec = old_now >> 32;
now.tv_usec = (uint)old_now;
- ti->idle_pct = 100;
+ th_ctx->idle_pct = 100;
clock_update_date(0, 1);
}
for (thr = 0; thr < MAX_THREADS; thr++) {
if (!(all_threads_mask & (1UL << thr)))
continue;
- total += HA_ATOMIC_LOAD(&ha_thread_info[thr].idle_pct);
+ total += HA_ATOMIC_LOAD(&ha_thread_ctx[thr].idle_pct);
rthr++;
}
return rthr ? total / rthr : 0;
if (samp_time < 500000)
return;
- HA_ATOMIC_STORE(&ti->idle_pct, (100ULL * idle_time + samp_time / 2) / samp_time);
+ HA_ATOMIC_STORE(&th_ctx->idle_pct, (100ULL * idle_time + samp_time / 2) / samp_time);
idle_time = samp_time = 0;
}
void clock_leaving_poll(int timeout, int interrupted)
{
clock_measure_idle();
- ti->prev_cpu_time = now_cpu_time();
- ti->prev_mono_time = now_mono_time();
+ th_ctx->prev_cpu_time = now_cpu_time();
+ th_ctx->prev_mono_time = now_mono_time();
}
/* Collect date and time information before calling poll(). This will be used
new_cpu_time = now_cpu_time();
new_mono_time = now_mono_time();
- if (ti->prev_cpu_time && ti->prev_mono_time) {
- new_cpu_time -= ti->prev_cpu_time;
- new_mono_time -= ti->prev_mono_time;
+ if (th_ctx->prev_cpu_time && th_ctx->prev_mono_time) {
+ new_cpu_time -= th_ctx->prev_cpu_time;
+ new_mono_time -= th_ctx->prev_mono_time;
stolen = new_mono_time - new_cpu_time;
if (unlikely(stolen >= 500000)) {
stolen /= 500000;
/* Verify compression rate limiting and CPU usage */
if ((global.comp_rate_lim > 0 && (read_freq_ctr(&global.comp_bps_out) > global.comp_rate_lim)) || /* rate */
- (ti->idle_pct < compress_min_idle)) { /* idle */
+ (th_ctx->idle_pct < compress_min_idle)) { /* idle */
if (comp_ctx->cur_lvl > 0)
strm->level = --comp_ctx->cur_lvl;
}
/* compression limit */
if ((global.comp_rate_lim > 0 && (read_freq_ctr(&global.comp_bps_out) > global.comp_rate_lim)) || /* rate */
- (ti->idle_pct < compress_min_idle)) { /* idle */
+ (th_ctx->idle_pct < compress_min_idle)) { /* idle */
/* decrease level */
if (comp_ctx->cur_lvl > 0) {
comp_ctx->cur_lvl--;
void ha_thread_dump(struct buffer *buf, int thr, int calling_tid)
{
unsigned long thr_bit = 1UL << thr;
- unsigned long long p = ha_thread_info[thr].prev_cpu_time;
+ unsigned long long p = ha_thread_ctx[thr].prev_cpu_time;
unsigned long long n = now_cpu_time_thread(thr);
int stuck = !!(ha_thread_info[thr].flags & TI_FL_STUCK);
goto fail;
/* limit cpu usage */
- if (ti->idle_pct < compress_min_idle)
+ if (th_ctx->idle_pct < compress_min_idle)
goto fail;
/* initialize compression */
if (thr < 0 || thr >= global.nbthread)
break;
- p = ha_thread_info[thr].prev_cpu_time;
+ p = ha_thread_ctx[thr].prev_cpu_time;
n = now_cpu_time_thread(thr);
/* not yet reached the deadline of 1 sec */