/* set the time of last session on the backend */
static inline void be_set_sess_last(struct proxy *be)
{
- be->be_counters.last_sess = ns_to_sec(tv_to_ns(&now));
+ be->be_counters.last_sess = ns_to_sec(now_ns);
}
/* This function returns non-zero if the designated server will be
extern struct timeval start_date; /* the process's start date in wall-clock time */
extern struct timeval start_time; /* the process's start date in internal monotonic time */
-extern volatile ullong global_now; /* common monotonic date between all threads (32:32) */
+extern volatile ullong global_now_ns; /* common monotonic date between all threads, in ns (wraps every 585 yr) */
-extern THREAD_LOCAL struct timeval now; /* internal monotonic date derived from real clock */
+extern THREAD_LOCAL ullong now_ns; /* internal monotonic date derived from real clock, in ns (wraps every 585 yr) */
extern THREAD_LOCAL struct timeval date; /* the real current date (wall-clock time) */
uint64_t now_cpu_time_thread(int thr);
se_expect_no_data(qcs->sd);
/* TODO duplicated from mux_h2 */
- sess->t_idle = ns_to_ms(tv_to_ns(&now) - sess->accept_ts) - sess->t_handshake;
+ sess->t_idle = ns_to_ms(now_ns - sess->accept_ts) - sess->t_handshake;
if (!sc_new_from_endp(qcs->sd, sess, buf))
return NULL;
/* TODO duplicated from mux_h2 */
sess->accept_date = date;
- sess->accept_ts = tv_to_ns(&now);
+ sess->accept_ts = now_ns;
sess->t_handshake = 0;
sess->t_idle = 0;
/* set the time of last session on the designated server */
static inline void srv_set_sess_last(struct server *s)
{
- s->counters.last_sess = ns_to_sec(tv_to_ns(&now));
+ s->counters.last_sess = ns_to_sec(now_ns);
}
/* returns the current server throttle rate between 0 and 100% */
} while (0)
/* retrieve uptime */
- up = tv_to_ns(&now) - tv_to_ns(&start_time);
+ up = now_ns - tv_to_ns(&start_time);
up_sec = ns_to_sec(up);
up_usec = (up / 1000U) % 1000000U;
int be_lastsession(const struct proxy *be)
{
if (be->be_counters.last_sess)
- return ns_to_sec(tv_to_ns(&now)) - be->be_counters.last_sess;
+ return ns_to_sec(now_ns) - be->be_counters.last_sess;
return -1;
}
sc_shutdown(sc);
sc->flags |= SC_FL_ERROR;
- s->logs.t_queue = ns_to_ms(tv_to_ns(&now) - s->logs.accept_ts);
+ s->logs.t_queue = ns_to_ms(now_ns - s->logs.accept_ts);
/* we may need to know the position in the queue for logging */
pendconn_cond_unlink(s->pend_pos);
if (unlikely(!(s->flags & SF_ASSIGNED)))
sc->state = SC_ST_REQ;
else {
- s->logs.t_queue = ns_to_ms(tv_to_ns(&now) - s->logs.accept_ts);
+ s->logs.t_queue = ns_to_ms(now_ns - s->logs.accept_ts);
sc->state = SC_ST_ASS;
}
DBG_TRACE_STATE("dequeue connection request", STRM_EV_STRM_PROC|STRM_EV_CS_ST, s);
/* ... and timeout expired */
s->conn_exp = TICK_ETERNITY;
s->flags &= ~SF_CONN_EXP;
- s->logs.t_queue = ns_to_ms(tv_to_ns(&now) - s->logs.accept_ts);
+ s->logs.t_queue = ns_to_ms(now_ns - s->logs.accept_ts);
/* we may need to know the position in the queue for logging */
pendconn_cond_unlink(s->pend_pos);
/* Connection remains in queue, check if we have to abort it */
if (back_may_abort_req(req, s)) {
- s->logs.t_queue = ns_to_ms(tv_to_ns(&now) - s->logs.accept_ts);
+ s->logs.t_queue = ns_to_ms(now_ns - s->logs.accept_ts);
/* we may need to know the position in the queue for logging */
pendconn_cond_unlink(s->pend_pos);
}
/* The server is assigned */
- s->logs.t_queue = ns_to_ms(tv_to_ns(&now) - s->logs.accept_ts);
+ s->logs.t_queue = ns_to_ms(now_ns - s->logs.accept_ts);
sc->state = SC_ST_ASS;
be_set_sess_last(s->be);
DBG_TRACE_STATE("connection request assigned to a server", STRM_EV_STRM_PROC|STRM_EV_CS_ST, s);
BUG_ON(!sc_appctx(s->scb));
if (!s->logs.request_ts)
- s->logs.request_ts = tv_to_ns(&now);
- s->logs.t_queue = ns_to_ms(tv_to_ns(&now) - s->logs.accept_ts);
+ s->logs.request_ts = now_ns;
+ s->logs.t_queue = ns_to_ms(now_ns - s->logs.accept_ts);
be_set_sess_last(s->be);
}
*/
void set_backend_down(struct proxy *be)
{
- be->last_change = ns_to_sec(tv_to_ns(&now));
+ be->last_change = ns_to_sec(now_ns);
_HA_ATOMIC_INC(&be->down_trans);
if (!(global.mode & MODE_STARTING)) {
}
int be_downtime(struct proxy *px) {
- if (px->lbprm.tot_weight && px->last_change < ns_to_sec(tv_to_ns(&now))) // ignore negative time
+ if (px->lbprm.tot_weight && px->last_change < ns_to_sec(now_ns)) // ignore negative time
return px->down_time;
- return ns_to_sec(tv_to_ns(&now)) - px->last_change + px->down_time;
+ return ns_to_sec(now_ns) - px->last_change + px->down_time;
}
/*
peers->remote = p;
p->conf.file = strdup(file);
p->conf.line = linenum;
- p->last_change = ns_to_sec(tv_to_ns(&now));
+ p->last_change = ns_to_sec(now_ns);
p->xprt = xprt_get(XPRT_RAW);
p->sock_init_arg = NULL;
HA_SPIN_INIT(&p->lock);
cfg_peers = curpeers;
curpeers->conf.file = strdup(file);
curpeers->conf.line = linenum;
- curpeers->last_change = ns_to_sec(tv_to_ns(&now));
+ curpeers->last_change = ns_to_sec(now_ns);
curpeers->id = strdup(args[1]);
curpeers->disabled = 0;
}
if (status == HCHK_STATUS_START) {
check->result = CHK_RES_UNKNOWN; /* no result yet */
check->desc[0] = '\0';
- check->start = tv_to_ns(&now);
+ check->start = now_ns;
return;
}
check->duration = -1;
else if (check->start) {
/* set_server_check_status() may be called more than once */
- check->duration = ns_to_ms(tv_to_ns(&now) - check->start);
+ check->duration = ns_to_ms(now_ns - check->start);
check->start = 0;
}
s->queue.length);
if ((s->cur_state == SRV_ST_STARTING) &&
- ns_to_sec(tv_to_ns(&now)) < s->last_change + s->slowstart &&
- ns_to_sec(tv_to_ns(&now)) >= s->last_change) {
- ratio = MAX(1, 100 * (ns_to_sec(tv_to_ns(&now)) - s->last_change) / s->slowstart);
+ ns_to_sec(now_ns) < s->last_change + s->slowstart &&
+ ns_to_sec(now_ns) >= s->last_change) {
+ ratio = MAX(1, 100 * (ns_to_sec(now_ns) - s->last_change) / s->slowstart);
chunk_appendf(buf, "; throttle=%d%%", ratio);
}
/* check this every ms */
t->expire = tick_add(now_ms, MS_TO_TICKS(mininter * srvpos / nbcheck));
- check->start = tv_to_ns(&now);
+ check->start = now_ns;
task_queue(t);
return 1;
init_new_proxy(fe);
fe->next = proxies_list;
proxies_list = fe;
- fe->last_change = ns_to_sec(tv_to_ns(&now));
+ fe->last_change = ns_to_sec(now_ns);
fe->id = strdup("GLOBAL");
fe->cap = PR_CAP_FE|PR_CAP_INT;
fe->maxconn = 10; /* default to 10 concurrent connections */
/* If there is data available for analysis, log the end of the idle time. */
if (c_data(req) && s->logs.t_idle == -1)
- s->logs.t_idle = ns_to_ms(tv_to_ns(&now) - s->logs.accept_ts) - s->logs.t_handshake;
+ s->logs.t_idle = ns_to_ms(now_ns - s->logs.accept_ts) - s->logs.t_handshake;
to_forward = pcli_parse_request(s, req, &errmsg, &next_pid);
if (to_forward > 0) {
sess_change_server(s, NULL);
}
- s->logs.t_close = ns_to_ms(tv_to_ns(&now) - s->logs.accept_ts);
+ s->logs.t_close = ns_to_ms(now_ns - s->logs.accept_ts);
stream_process_counters(s);
/* don't count other requests' data */
stream_update_time_stats(s);
s->logs.accept_date = date; /* user-visible date for logging */
- s->logs.accept_ts = tv_to_ns(&now); /* corrected date for internal use */
+ s->logs.accept_ts = now_ns; /* corrected date for internal use */
s->logs.t_handshake = 0; /* There are no handshake in keep alive connection. */
s->logs.t_idle = -1;
s->logs.request_ts = 0;
struct timeval start_date; /* the process's start date in wall-clock time */
struct timeval start_time; /* the process's start date in internal monotonic time */
-volatile ullong global_now; /* common monotonic date between all threads (32:32) */
+volatile ullong global_now_ns; /* common monotonic date between all threads, in ns (wraps every 585 yr) */
volatile uint global_now_ms; /* common monotonic date in milliseconds (may wrap) */
-THREAD_ALIGNED(64) static ullong now_offset; /* global offset between system time and global time */
+THREAD_ALIGNED(64) static llong now_offset; /* global offset between system time and global time in ns */
+THREAD_LOCAL ullong now_ns; /* internal monotonic date derived from real clock, in ns (wraps every 585 yr) */
THREAD_LOCAL uint now_ms; /* internal monotonic date in milliseconds (may wrap) */
-THREAD_LOCAL struct timeval now; /* internal monotonic date derived from real clock */
THREAD_LOCAL struct timeval date; /* the real current date (wall-clock time) */
static THREAD_LOCAL struct timeval before_poll; /* system date before calling poll() */
return ret;
}
-/* clock_update_date: sets <date> to system time, and sets <now> to something as
- * close as possible to real time, following a monotonic function. The main
+/* clock_update_date: sets <date> to system time, and sets <now_ns> to something
+ * as close as possible to real time, following a monotonic function. The main
* principle consists in detecting backwards and forwards time jumps and adjust
* an offset to correct them. This function should be called once after each
* poll, and never farther apart than MAX_DELAY_MS*2. The poll's timeout should
* clock_init_thread_date() must also have been called once for each thread.
*
* An offset is used to adjust the current time (date), to figure a monotonic
- * local time (now). The offset is not critical, as it is only updated after a
- * clock jump is detected. From this point all threads will apply it to their
+ * local time (now_ns). The offset is not critical, as it is only updated after
+ * a clock jump is detected. From this point all threads will apply it to their
* locally measured time, and will then agree around a common monotonic
- * global_now value that serves to further refine their local time. As it is
- * not possible to atomically update a timeval, both global_now and the
- * now_offset values are instead stored as 64-bit integers made of two 32 bit
- * values for the tv_sec and tv_usec parts. The offset is made of two signed
- * ints so that the clock can be adjusted in the two directions.
+ * global_now_ns value that serves to further refine their local time. Both
+ * now_ns and global_now_ns are 64-bit integers counting nanoseconds since a
+ * vague reference (it starts roughly 20s before the next wrap-around of the
+ * millisecond counter after boot). The offset is also an integral number of
+ * nanoseconds, but it's signed so that the clock can be adjusted in the two
+ * directions.
*/
void clock_update_local_date(int max_wait, int interrupted)
{
struct timeval min_deadline, max_deadline;
- ullong ofs;
gettimeofday(&date, NULL);
_tv_ms_add(&min_deadline, &before_poll, max_wait);
_tv_ms_add(&max_deadline, &before_poll, max_wait + 100);
- ofs = HA_ATOMIC_LOAD(&now_offset);
-
if (unlikely(__tv_islt(&date, &before_poll) || // big jump backwards
(!interrupted && __tv_islt(&date, &min_deadline)) || // small jump backwards
__tv_islt(&max_deadline, &date))) { // big jump forwards
if (!interrupted)
- _tv_ms_add(&now, &now, max_wait);
+ now_ns += ms_to_ns(max_wait);
} else {
/* The date is still within expectations. Let's apply the
* now_offset to the system date. Note: ofs if made of two
* independent signed ints.
*/
- now.tv_sec = date.tv_sec + (int)(ofs >> 32); // note: may be positive or negative
- now.tv_usec = date.tv_usec + (int)ofs; // note: may be positive or negative
- if ((int)now.tv_usec < 0) {
- now.tv_usec += 1000000;
- now.tv_sec -= 1;
- } else if (now.tv_usec >= 1000000) {
- now.tv_usec -= 1000000;
- now.tv_sec += 1;
- }
+ now_ns = tv_to_ns(&date) + HA_ATOMIC_LOAD(&now_offset);
}
- now_ms = __tv_to_ms(&now);
+ now_ms = ns_to_ms(now_ns);
}
void clock_update_global_date()
{
- struct timeval tmp_now;
+ ullong old_now_ns;
uint old_now_ms;
- ullong old_now;
- ullong new_now;
- ullong ofs_new;
- uint sec_ofs, usec_ofs;
/* now that we have bounded the local time, let's check if it's
* realistic regarding the global date, which only moves forward,
* otherwise catch up.
*/
- old_now = global_now;
+ old_now_ns = _HA_ATOMIC_LOAD(&global_now_ns);
old_now_ms = global_now_ms;
do {
- tmp_now.tv_sec = (unsigned int)(old_now >> 32);
- tmp_now.tv_usec = old_now & 0xFFFFFFFFU;
+ if (now_ns < old_now_ns)
+ now_ns = old_now_ns;
- if (__tv_islt(&now, &tmp_now))
- now = tmp_now;
-
- /* now <now> is expected to be the most accurate date,
- * equal to <global_now> or newer. Updating the global
+ /* now <now_ns> is expected to be the most accurate date,
+ * equal to <global_now_ns> or newer. Updating the global
* date too often causes extreme contention and is not
* needed: it's only used to help threads run at the
* same date in case of local drift, and the global date,
* synchronized no better than 32 microseconds, so that's
* what we're doing here.
*/
+ now_ms = ns_to_ms(now_ns);
- new_now = ((ullong)now.tv_sec << 32) + (uint)now.tv_usec;
- now_ms = __tv_to_ms(&now);
-
- if (!((new_now ^ old_now) & ~0x1FULL))
+ if (!((now_ns ^ old_now_ns) & ~0x7FFFULL))
return;
- /* let's try to update the global <now> (both in timeval
+ /* let's try to update the global_now_ns (both in nanoseconds
* and ms forms) or loop again.
*/
- } while ((!_HA_ATOMIC_CAS(&global_now, &old_now, new_now) ||
+ } while ((!_HA_ATOMIC_CAS(&global_now_ns, &old_now_ns, now_ns) ||
(now_ms != old_now_ms && !_HA_ATOMIC_CAS(&global_now_ms, &old_now_ms, now_ms))) &&
__ha_cpu_relax());
- /* <now> and <now_ms> are now updated to the last value of global_now
- * and global_now_ms, which were also monotonically updated. We can
- * compute the latest offset, we don't care who writes it last, the
- * variations will not break the monotonic property.
+ /* <now_ns> and <now_ms> are now updated to the last value of
+ * global_now_ns and global_now_ms, which were also monotonically
+ * updated. We can compute the latest offset, we don't care who writes
+ * it last, the variations will not break the monotonic property.
*/
-
- sec_ofs = now.tv_sec - date.tv_sec;
- usec_ofs = now.tv_usec - date.tv_usec;
- if ((int)usec_ofs < 0) {
- usec_ofs += 1000000;
- sec_ofs -= 1;
- }
- ofs_new = ((ullong)sec_ofs << 32) + usec_ofs;
- HA_ATOMIC_STORE(&now_offset, ofs_new);
+ HA_ATOMIC_STORE(&now_offset, now_ns - tv_to_ns(&date));
}
/* must be called once at boot to initialize some global variables */
{
now_offset = 0;
gettimeofday(&date, NULL);
- now = after_poll = before_poll = date;
- global_now = ((ullong)date.tv_sec << 32) + (uint)date.tv_usec;
- global_now_ms = now.tv_sec * 1000 + now.tv_usec / 1000;
+ after_poll = before_poll = date;
+ now_ns = global_now_ns = tv_to_ns(&date);
+ global_now_ms = ns_to_ms(now_ns);
/* force time to wrap 20s after boot: we first compute the time offset
* that once applied to the wall-clock date will make the local time
* and will be used to recompute the local time, both of which will
* match and continue from this shifted date.
*/
- now_offset = (uint64_t)(-(global_now_ms / 1000U) - BOOT_TIME_WRAP_SEC) << 32;
- global_now += now_offset;
+ now_offset = sec_to_ns((uint)((uint)(-global_now_ms) / 1000U - BOOT_TIME_WRAP_SEC));
+ global_now_ns += now_offset;
+ now_ns = global_now_ns;
+ now_ms = global_now_ms = ns_to_ms(now_ns);
th_ctx->idle_pct = 100;
clock_update_date(0, 1);
*/
void clock_init_thread_date(void)
{
- ullong old_now;
-
gettimeofday(&date, NULL);
after_poll = before_poll = date;
- old_now = _HA_ATOMIC_LOAD(&global_now);
- now.tv_sec = old_now >> 32;
- now.tv_usec = (uint)old_now;
+ now_ns = _HA_ATOMIC_LOAD(&global_now_ns);
th_ctx->idle_pct = 100;
th_ctx->prev_cpu_time = now_cpu_time();
clock_update_date(0, 1);
spoe_update_stat_time(ullong *since, long *t)
{
if (*t == -1)
- *t = ns_to_ms(tv_to_ns(&now) - *since);
+ *t = ns_to_ms(now_ns - *since);
else
- *t += ns_to_ms(tv_to_ns(&now) - *since);
+ *t += ns_to_ms(now_ns - *since);
*since = 0;
}
LIST_APPEND(&SPOE_APPCTX(appctx)->waiting_queue, &ctx->list);
}
_HA_ATOMIC_INC(&agent->counters.nb_waiting);
- ctx->stats.wait_ts = tv_to_ns(&now);
+ ctx->stats.wait_ts = now_ns;
SPOE_APPCTX(appctx)->frag_ctx.ctx = NULL;
SPOE_APPCTX(appctx)->frag_ctx.cursid = 0;
SPOE_APPCTX(appctx)->frag_ctx.curfid = 0;
LIST_INIT(&ctx->list);
_HA_ATOMIC_DEC(&agent->counters.nb_waiting);
spoe_update_stat_time(&ctx->stats.wait_ts, &ctx->stats.t_waiting);
- ctx->stats.response_ts = tv_to_ns(&now);
+ ctx->stats.response_ts = now_ns;
if (ctx->spoe_appctx) {
ctx->spoe_appctx->cur_fpa--;
ctx->spoe_appctx = NULL;
* it. */
_HA_ATOMIC_INC(&agent->counters.nb_sending);
spoe_update_stat_time(&ctx->stats.request_ts, &ctx->stats.t_request);
- ctx->stats.queue_ts = tv_to_ns(&now);
+ ctx->stats.queue_ts = now_ns;
if (ctx->spoe_appctx)
return 1;
LIST_APPEND(&agent->rt[tid].sending_queue, &ctx->list);
return 0;
agent->rt[tid].processing++;
- ctx->stats.start_ts = tv_to_ns(&now);
- ctx->stats.request_ts = tv_to_ns(&now);
+ ctx->stats.start_ts = now_ns;
+ ctx->stats.request_ts = now_ns;
ctx->stats.t_request = -1;
ctx->stats.t_queue = -1;
ctx->stats.t_waiting = -1;
if (ctx->state == SPOE_CTX_ST_ENCODING_MSGS) {
if (ctx->stats.request_ts == 0)
- ctx->stats.request_ts = tv_to_ns(&now);
+ ctx->stats.request_ts = now_ns;
if (!spoe_acquire_buffer(&ctx->buffer, &ctx->buffer_wait))
goto out;
ret = spoe_encode_messages(s, ctx, messages, dir, type);
/* conf->agent_fe was already initialized during the config
* parsing. Finish initialization. */
- conf->agent_fe.last_change = ns_to_sec(tv_to_ns(&now));
+ conf->agent_fe.last_change = ns_to_sec(now_ns);
conf->agent_fe.cap = PR_CAP_FE;
conf->agent_fe.mode = PR_MODE_TCP;
conf->agent_fe.maxconn = 0;
tzset();
clock_init_process_date();
start_date = date;
- start_time = now;
+ start_time = NS_TO_TV(now_ns);
pid = getpid();
/* Set local host name and adjust some environment variables.
finst = ((htxn->dir == SMP_OPT_DIR_REQ) ? SF_FINST_R : SF_FINST_H);
if (htxn->dir == SMP_OPT_DIR_REQ) {
/* let's log the request time */
- s->logs.request_ts = tv_to_ns(&now);
+ s->logs.request_ts = now_ns;
if (s->sess->fe == s->be) /* report it if the request was intercepted by the frontend */
_HA_ATOMIC_INC(&s->sess->fe->fe_counters.intercepted_req);
}
*/
struct timeval tv;
- tv_remain(&start_time, &now, &tv);
+ tv = NS_TO_TV(now_ns);
+ tv_remain(&start_time, &tv, &tv); // tv = now_ns - start_time
tv_add(&tv, &tv, &start_date);
lua_newtable(L);
goto fail;
/* Note: Only eval on the request */
- s->logs.request_ts = tv_to_ns(&now);
+ s->logs.request_ts = now_ns;
req->analysers &= AN_REQ_FLT_END;
if (s->sess->fe == s->be) /* report it if the request was intercepted by the frontend */
if (rule->from == ACT_F_HTTP_REQ) {
/* let's log the request time */
- s->logs.request_ts = tv_to_ns(&now);
+ s->logs.request_ts = now_ns;
req->analysers &= AN_REQ_FLT_END;
if (s->sess->fe == s->be) /* report it if the request was intercepted by the frontend */
if (!s->target && http_stats_check_uri(s, txn, px)) {
s->target = &http_stats_applet.obj_type;
if (unlikely(!sc_applet_create(s->scb, objt_applet(s->target)))) {
- s->logs.request_ts = tv_to_ns(&now);
+ s->logs.request_ts = now_ns;
if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_RESOURCE;
goto return_int_err;
if (s->be->cookie_name || sess->fe->capture_name)
http_manage_client_side_cookies(s, req);
- s->logs.request_ts = tv_to_ns(&now);
+ s->logs.request_ts = now_ns;
stream_inc_http_err_ctr(s);
_HA_ATOMIC_INC(&sess->fe->fe_counters.denied_req);
if (s->flags & SF_BE_ASSIGNED)
req->analyse_exp = TICK_ETERNITY;
req->analysers &= ~an_bit;
- s->logs.request_ts = tv_to_ns(&now);
+ s->logs.request_ts = now_ns;
/* OK let's go on with the BODY now */
DBG_TRACE_LEAVE(STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA, s, txn);
return 1;
* It will not cause trouble to the logs because we can exclude
* the tarpitted connections by filtering on the 'PT' status flags.
*/
- s->logs.t_queue = ns_to_ms(tv_to_ns(&now) - s->logs.accept_ts);
+ s->logs.t_queue = ns_to_ms(now_ns - s->logs.accept_ts);
http_reply_and_close(s, txn->status, (!(s->scf->flags & SC_FL_ERROR) ? http_error_message(s) : NULL));
http_set_term_flags(s);
http_end:
/* The situation will not evolve, so let's give up on the analysis. */
- s->logs.request_ts = tv_to_ns(&now); /* update the request timer to reflect full request */
+ s->logs.request_ts = now_ns; /* update the request timer to reflect full request */
req->analysers &= ~an_bit;
req->analyse_exp = TICK_ETERNITY;
DBG_TRACE_LEAVE(STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA, s, txn);
end:
/* we want to have the response time before we start processing it */
- s->logs.t_data = ns_to_ms(tv_to_ns(&now) - s->logs.accept_ts);
+ s->logs.t_data = ns_to_ms(now_ns - s->logs.accept_ts);
/* end of job, return OK */
rep->analysers &= ~an_bit;
if (rule->flags & REDIRECT_FLAG_FROM_REQ) {
/* let's log the request time */
- s->logs.request_ts = tv_to_ns(&now);
+ s->logs.request_ts = now_ns;
req->analysers &= AN_REQ_FLT_END;
if (s->sess->fe == s->be) /* report it if the request was intercepted by the frontend */
tmp_strm_log.t_queue = -1;
tmp_strm_log.t_connect = -1;
tmp_strm_log.t_data = -1;
- tmp_strm_log.t_close = ns_to_ms(tv_to_ns(&now) - sess->accept_ts);
+ tmp_strm_log.t_close = ns_to_ms(now_ns - sess->accept_ts);
tmp_strm_log.bytes_in = 0;
tmp_strm_log.bytes_out = 0;
tmp_strm_log.prx_queue_pos = 0;
px->conf.file = strdup(file);
px->conf.line = linenum;
px->mode = PR_MODE_SYSLOG;
- px->last_change = ns_to_sec(tv_to_ns(&now));
+ px->last_change = ns_to_sec(now_ns);
px->cap = PR_CAP_FE;
px->maxconn = 10;
px->timeout.client = TICK_ETERNITY;
/* check this in one ms */
t->expire = TICK_ETERNITY;
- check->start = tv_to_ns(&now);
+ check->start = now_ns;
task_queue(t);
}
}
if (h1s->sess->t_idle == -1)
- h1s->sess->t_idle = ns_to_ms(tv_to_ns(&now) - h1s->sess->accept_ts) - h1s->sess->t_handshake;
+ h1s->sess->t_idle = ns_to_ms(now_ns - h1s->sess->accept_ts) - h1s->sess->t_handshake;
/* Get the stream rxbuf */
buf = h1_get_buf(h1c, &h1s->rxbuf);
h1c = h1s->h1c;
sess->accept_date = date;
- sess->accept_ts = tv_to_ns(&now);
+ sess->accept_ts = now_ns;
sess->t_handshake = 0;
sess->t_idle = -1;
* request) and the idle time, which is the delay since the previous
* request. We can set the value now, it will be copied by stream_new().
*/
- sess->t_idle = ns_to_ms(tv_to_ns(&now) - sess->accept_ts) - sess->t_handshake;
+ sess->t_idle = ns_to_ms(now_ns - sess->accept_ts) - sess->t_handshake;
if (!sc_new_from_endp(h2s->sd, sess, input))
goto out_close;
* right now.
*/
sess->accept_date = date;
- sess->accept_ts = tv_to_ns(&now);
+ sess->accept_ts = now_ns;
sess->t_handshake = 0;
sess->t_idle = 0;
continue;
}
- child->timestamp = ns_to_sec(tv_to_ns(&now));
+ child->timestamp = ns_to_sec(now_ns);
ret = fork();
if (ret < 0) {
/* Pre-configures a peers frontend to accept incoming connections */
void peers_setup_frontend(struct proxy *fe)
{
- fe->last_change = ns_to_sec(tv_to_ns(&now));
+ fe->last_change = ns_to_sec(now_ns);
fe->cap = PR_CAP_FE | PR_CAP_BE;
fe->mode = PR_MODE_PEERS;
fe->maxconn = 0;
}
init_new_proxy(curproxy);
- curproxy->last_change = ns_to_sec(tv_to_ns(&now));
+ curproxy->last_change = ns_to_sec(now_ns);
curproxy->id = strdup(name);
curproxy->cap = cap;
dump_server_addr(&srv->check.addr, srv_check_addr);
dump_server_addr(&srv->agent.addr, srv_agent_addr);
- srv_time_since_last_change = ns_to_sec(tv_to_ns(&now)) - srv->last_change;
+ srv_time_since_last_change = ns_to_sec(now_ns) - srv->last_change;
bk_f_forced_id = px->options & PR_O_FORCED_ID ? 1 : 0;
srv_f_forced_id = srv->flags & SRV_F_FORCED_ID ? 1 : 0;
s->proxy->beconn * s->maxconn / s->proxy->fullconn);
if ((s->cur_state == SRV_ST_STARTING) &&
- ns_to_sec(tv_to_ns(&now)) < s->last_change + s->slowstart &&
- ns_to_sec(tv_to_ns(&now)) >= s->last_change) {
+ ns_to_sec(now_ns) < s->last_change + s->slowstart &&
+ ns_to_sec(now_ns) >= s->last_change) {
unsigned int ratio;
- ratio = 100 * (ns_to_sec(tv_to_ns(&now)) - s->last_change) / s->slowstart;
+ ratio = 100 * (ns_to_sec(now_ns) - s->last_change) / s->slowstart;
max = MAX(1, max * ratio / 100);
}
return max;
void resolvers_setup_proxy(struct proxy *px)
{
- px->last_change = ns_to_sec(tv_to_ns(&now));
+ px->last_change = ns_to_sec(now_ns);
px->cap = PR_CAP_FE | PR_CAP_BE;
px->maxconn = 0;
px->conn_retries = 1;
int srv_downtime(const struct server *s)
{
- if ((s->cur_state != SRV_ST_STOPPED) || s->last_change >= ns_to_sec(tv_to_ns(&now))) // ignore negative time
+ if ((s->cur_state != SRV_ST_STOPPED) || s->last_change >= ns_to_sec(now_ns)) // ignore negative time
return s->down_time;
- return ns_to_sec(tv_to_ns(&now)) - s->last_change + s->down_time;
+ return ns_to_sec(now_ns) - s->last_change + s->down_time;
}
int srv_lastsession(const struct server *s)
{
if (s->counters.last_sess)
- return ns_to_sec(tv_to_ns(&now)) - s->counters.last_sess;
+ return ns_to_sec(now_ns) - s->counters.last_sess;
return -1;
}
struct proxy *px = sv->proxy;
unsigned w;
- if (ns_to_sec(tv_to_ns(&now)) < sv->last_change || ns_to_sec(tv_to_ns(&now)) >= sv->last_change + sv->slowstart) {
+ if (ns_to_sec(now_ns) < sv->last_change || ns_to_sec(now_ns) >= sv->last_change + sv->slowstart) {
/* go to full throttle if the slowstart interval is reached */
if (sv->next_state == SRV_ST_STARTING)
sv->next_state = SRV_ST_RUNNING;
* It must also start immediately, at least at the minimal step when leaving maintenance.
*/
if ((sv->next_state == SRV_ST_STARTING) && (px->lbprm.algo & BE_LB_PROP_DYN))
- w = (px->lbprm.wdiv * (ns_to_sec(tv_to_ns(&now)) - sv->last_change) + sv->slowstart) / sv->slowstart;
+ w = (px->lbprm.wdiv * (ns_to_sec(now_ns) - sv->last_change) + sv->slowstart) / sv->slowstart;
else
w = px->lbprm.wdiv;
event_hdl_sub_list_init(&srv->e_subs);
srv->next_state = SRV_ST_RUNNING; /* early server setup */
- srv->last_change = ns_to_sec(tv_to_ns(&now));
+ srv->last_change = ns_to_sec(now_ns);
srv->check.obj_type = OBJ_TYPE_CHECK;
srv->check.status = HCHK_STATUS_INI;
if (srv->next_state == SRV_ST_STARTING) {
task_schedule(srv->warmup,
tick_add(now_ms,
- MS_TO_TICKS(MAX(1000, (ns_to_sec(tv_to_ns(&now)) - srv->last_change)) / 20)));
+ MS_TO_TICKS(MAX(1000, (ns_to_sec(now_ns) - srv->last_change)) / 20)));
}
}
if (srv_prev_state != s->cur_state) {
if (srv_prev_state == SRV_ST_STOPPED) {
/* server was down and no longer is */
- if (s->last_change < ns_to_sec(tv_to_ns(&now))) // ignore negative times
- s->down_time += ns_to_sec(tv_to_ns(&now)) - s->last_change;
+ if (s->last_change < ns_to_sec(now_ns)) // ignore negative times
+ s->down_time += ns_to_sec(now_ns) - s->last_change;
}
else if (s->cur_state == SRV_ST_STOPPED) {
/* server was up and is currently down */
s->counters.down_trans++;
}
- s->last_change = ns_to_sec(tv_to_ns(&now));
+ s->last_change = ns_to_sec(now_ns);
}
/* check if backend stats must be updated due to the server state change */
/* backend was down and is back up again:
* no helper function, updating last_change and backend downtime stats
*/
- if (s->proxy->last_change < ns_to_sec(tv_to_ns(&now))) // ignore negative times
- s->proxy->down_time += ns_to_sec(tv_to_ns(&now)) - s->proxy->last_change;
- s->proxy->last_change = ns_to_sec(tv_to_ns(&now));
+ if (s->proxy->last_change < ns_to_sec(now_ns)) // ignore negative times
+ s->proxy->down_time += ns_to_sec(now_ns) - s->proxy->last_change;
+ s->proxy->last_change = ns_to_sec(now_ns);
}
}
srv_adm_set_drain(srv);
}
- srv->last_change = ns_to_sec(tv_to_ns(&now)) - srv_last_time_change;
+ srv->last_change = ns_to_sec(now_ns) - srv_last_time_change;
srv->check.status = srv_check_status;
srv->check.result = srv_check_result;
sess->fe = fe;
sess->origin = origin;
sess->accept_date = date; /* user-visible date for logging */
- sess->accept_ts = tv_to_ns(&now); /* corrected date for internal use */
+ sess->accept_ts = now_ns; /* corrected date for internal use */
sess->stkctr = NULL;
if (pool_head_stk_ctr) {
sess->stkctr = pool_alloc(pool_head_stk_ctr);
{
struct session *sess = conn->owner;
- sess->t_handshake = ns_to_ms(tv_to_ns(&now) - sess->accept_ts);
+ sess->t_handshake = ns_to_ms(now_ns - sess->accept_ts);
if (conn->flags & CO_FL_ERROR)
goto fail;
/* Pre-configures a ring proxy to emit connections */
void sink_setup_proxy(struct proxy *px)
{
- px->last_change = ns_to_sec(tv_to_ns(&now));
+ px->last_change = ns_to_sec(now_ns);
px->cap = PR_CAP_BE;
px->maxconn = 0;
px->conn_retries = 1;
metric = mkf_str(FO_STATUS, fld_status);
break;
case ST_F_LASTCHG:
- metric = mkf_u32(FN_AGE, ns_to_sec(tv_to_ns(&now)) - sv->last_change);
+ metric = mkf_u32(FN_AGE, ns_to_sec(now_ns) - sv->last_change);
break;
case ST_F_WEIGHT:
metric = mkf_u32(FN_AVG, (sv->cur_eweight * px->lbprm.wmult + px->lbprm.wdiv - 1) / px->lbprm.wdiv);
metric = mkf_u64(FN_COUNTER, px->down_trans);
break;
case ST_F_LASTCHG:
- metric = mkf_u32(FN_AGE, ns_to_sec(tv_to_ns(&now)) - px->last_change);
+ metric = mkf_u32(FN_AGE, ns_to_sec(now_ns) - px->last_change);
break;
case ST_F_DOWNTIME:
if (px->srv)
{
struct appctx *appctx = __sc_appctx(sc);
struct show_stat_ctx *ctx = appctx->svcctx;
- unsigned int up = (ns_to_sec(tv_to_ns(&now)) - start_time.tv_sec);
+ unsigned int up = (ns_to_sec(now_ns) - start_time.tv_sec);
char scope_txt[STAT_SCOPE_TXT_MAXLEN + sizeof STAT_SCOPE_PATTERN];
const char *scope_ptr = stats_scope_ptr(appctx, sc);
unsigned long long bps;
}
glob_out_b32 *= 32; // values are 32-byte units
- up = tv_to_ns(&now) - tv_to_ns(&start_time);
+ up = now_ns - tv_to_ns(&start_time);
up_sec = ns_to_sec(up);
up_usec = (up / 1000U) % 1000000U;
/* First, centralize the timers information, and clear any irrelevant
* timeout.
*/
- s->logs.t_connect = ns_to_ms(tv_to_ns(&now) - s->logs.accept_ts);
+ s->logs.t_connect = ns_to_ms(now_ns - s->logs.accept_ts);
s->conn_exp = TICK_ETERNITY;
s->flags &= ~SF_CONN_EXP;
}
if (!(s->flags & SF_IGNORE)) {
- s->logs.t_close = ns_to_ms(tv_to_ns(&now) - s->logs.accept_ts);
+ s->logs.t_close = ns_to_ms(now_ns - s->logs.accept_ts);
stream_process_counters(s);
chunk_appendf(&trash,
" age=%s)\n",
- human_time(ns_to_sec(tv_to_ns(&now)) - strm->logs.accept_date.tv_sec, 1));
+ human_time(ns_to_sec(now_ns) - strm->logs.accept_date.tv_sec, 1));
if (strm->txn)
chunk_appendf(&trash,
chunk_appendf(&trash,
" ts=%02x epoch=%#x age=%s calls=%u rate=%u cpu=%llu lat=%llu",
curr_strm->task->state, curr_strm->stream_epoch,
- human_time(ns_to_sec(tv_to_ns(&now)) - ns_to_sec(curr_strm->logs.accept_ts), 1),
+ human_time(ns_to_sec(now_ns) - ns_to_sec(curr_strm->logs.accept_ts), 1),
curr_strm->task->calls, read_freq_ctr(&curr_strm->call_rate),
(unsigned long long)curr_strm->cpu_time, (unsigned long long)curr_strm->lat_time);