extern struct thread_info {
clockid_t clock_id;
+ uint64_t prev_cpu_time; /* previous per thread CPU time */
+ uint64_t prev_mono_time; /* previous system wide monotonic time */
+ unsigned int idle_pct; /* idle to total ratio over last sample (percent) */
/* pad to cache line (64B) */
char __pad[0]; /* unused except to check remaining room */
char __end[0] __attribute__((aligned(64)));
extern struct thread_info {
pthread_t pthread;
clockid_t clock_id;
+ uint64_t prev_cpu_time; /* previous per thread CPU time */
+ uint64_t prev_mono_time; /* previous system wide monotonic time */
+ unsigned int idle_pct; /* idle to total ratio over last sample (percent) */
/* pad to cache line (64B) */
char __pad[0]; /* unused except to check remaining room */
char __end[0] __attribute__((aligned(64)));
#include <unistd.h>
#include <sys/time.h>
#include <common/config.h>
+#include <common/hathreads.h>
#include <common/standard.h>
/* eternity when exprimed in timeval */
extern THREAD_LOCAL unsigned int now_ms; /* internal date in milliseconds (may wrap) */
extern THREAD_LOCAL unsigned int samp_time; /* total elapsed time over current sample */
extern THREAD_LOCAL unsigned int idle_time; /* total idle time over current sample */
-extern THREAD_LOCAL unsigned int idle_pct; /* idle to total ratio over last sample (percent) */
extern THREAD_LOCAL struct timeval now; /* internal date is a monotonic function of real clock */
extern THREAD_LOCAL struct timeval date; /* the real current date */
extern struct timeval start_date; /* the process's start date */
extern THREAD_LOCAL struct timeval before_poll; /* system date before calling poll() */
extern THREAD_LOCAL struct timeval after_poll; /* system date after leaving poll() */
-extern THREAD_LOCAL uint64_t prev_cpu_time; /* previous per thread CPU time */
-extern THREAD_LOCAL uint64_t prev_mono_time; /* previous system wide monotonic time */
/**** exported functions *************************************************/
if (samp_time < 500000)
return;
- idle_pct = (100 * idle_time + samp_time / 2) / samp_time;
+ ti->idle_pct = (100 * idle_time + samp_time / 2) / samp_time;
idle_time = samp_time = 0;
}
static inline void tv_leaving_poll(int timeout, int interrupted)
{
measure_idle();
- prev_cpu_time = now_cpu_time();
- prev_mono_time = now_mono_time();
+ ti->prev_cpu_time = now_cpu_time();
+ ti->prev_mono_time = now_mono_time();
}
#endif /* _COMMON_TIME_H */
new_cpu_time = now_cpu_time();
new_mono_time = now_mono_time();
- if (prev_cpu_time && prev_mono_time) {
- new_cpu_time -= prev_cpu_time;
- new_mono_time -= prev_mono_time;
+ if (ti->prev_cpu_time && ti->prev_mono_time) {
+ new_cpu_time -= ti->prev_cpu_time;
+ new_mono_time -= ti->prev_mono_time;
stolen = new_mono_time - new_cpu_time;
if (unlikely(stolen >= 500000)) {
stolen /= 500000;
/* Verify compression rate limiting and CPU usage */
if ((global.comp_rate_lim > 0 && (read_freq_ctr(&global.comp_bps_out) > global.comp_rate_lim)) || /* rate */
- (idle_pct < compress_min_idle)) { /* idle */
+ (ti->idle_pct < compress_min_idle)) { /* idle */
if (comp_ctx->cur_lvl > 0)
strm->level = --comp_ctx->cur_lvl;
}
/* compression limit */
if ((global.comp_rate_lim > 0 && (read_freq_ctr(&global.comp_bps_out) > global.comp_rate_lim)) || /* rate */
- (idle_pct < compress_min_idle)) { /* idle */
+ (ti->idle_pct < compress_min_idle)) { /* idle */
/* decrease level */
if (comp_ctx->cur_lvl > 0) {
comp_ctx->cur_lvl--;
goto fail;
/* limit cpu usage */
- if (idle_pct < compress_min_idle)
+ if (ti->idle_pct < compress_min_idle)
goto fail;
/* initialize compression */
goto fail;
/* limit cpu usage */
- if (idle_pct < compress_min_idle)
+ if (ti->idle_pct < compress_min_idle)
goto fail;
/* initialize compression */
global.rlimit_nofile,
global.maxsock, global.maxconn, global.maxpipes,
actconn, pipes_used, pipes_used+pipes_free, read_freq_ctr(&global.conn_per_sec),
- tasks_run_queue_cur, nb_tasks_cur, idle_pct
+ tasks_run_queue_cur, nb_tasks_cur, ti->idle_pct
);
/* scope_txt = search query, appctx->ctx.stats.scope_len is always <= STAT_SCOPE_TXT_MAXLEN */
#endif
info[INF_TASKS] = mkf_u32(0, nb_tasks_cur);
info[INF_RUN_QUEUE] = mkf_u32(0, tasks_run_queue_cur);
- info[INF_IDLE_PCT] = mkf_u32(FN_AVG, idle_pct);
+ info[INF_IDLE_PCT] = mkf_u32(FN_AVG, ti->idle_pct);
info[INF_NODE] = mkf_str(FO_CONFIG|FN_OUTPUT|FS_SERVICE, global.node);
if (global.desc)
info[INF_DESCRIPTION] = mkf_str(FO_CONFIG|FN_OUTPUT|FS_SERVICE, global.desc);
THREAD_LOCAL unsigned int now_ms; /* internal date in milliseconds (may wrap) */
THREAD_LOCAL unsigned int samp_time; /* total elapsed time over current sample */
THREAD_LOCAL unsigned int idle_time; /* total idle time over current sample */
-THREAD_LOCAL unsigned int idle_pct; /* idle to total ratio over last sample (percent) */
THREAD_LOCAL struct timeval now; /* internal date is a monotonic function of real clock */
THREAD_LOCAL struct timeval date; /* the real current date */
struct timeval start_date; /* the process's start date */
THREAD_LOCAL struct timeval before_poll; /* system date before calling poll() */
THREAD_LOCAL struct timeval after_poll; /* system date after leaving poll() */
-THREAD_LOCAL uint64_t prev_cpu_time = 0; /* previous per thread CPU time */
-THREAD_LOCAL uint64_t prev_mono_time = 0; /* previous system wide monotonic time */
static THREAD_LOCAL struct timeval tv_offset; /* per-thread time ofsset relative to global time */
volatile unsigned long long global_now; /* common date between all threads (32:32) */
adjusted = date;
after_poll = date;
samp_time = idle_time = 0;
- idle_pct = 100;
+ ti->idle_pct = 100;
global_now = (((unsigned long long)adjusted.tv_sec) << 32) +
(unsigned int)adjusted.tv_usec;
goto to_ms;