/* set the time of last session on the backend */
static inline void be_set_sess_last(struct proxy *be)
{
- if (be->be_counters.shared.tg && be->be_counters.shared.tg[tgid - 1])
- HA_ATOMIC_STORE(&be->be_counters.shared.tg[tgid - 1]->last_sess, ns_to_sec(now_ns));
+ HA_ATOMIC_STORE(&be->be_counters.shared.tg[tgid - 1]->last_sess, ns_to_sec(now_ns));
}
/* This function returns non-zero if the designated server will be
/* increase the number of cumulated connections received on the designated frontend */
static inline void proxy_inc_fe_conn_ctr(struct listener *l, struct proxy *fe)
{
- if (fe->fe_counters.shared.tg && fe->fe_counters.shared.tg[tgid - 1]) {
- _HA_ATOMIC_INC(&fe->fe_counters.shared.tg[tgid - 1]->cum_conn);
- update_freq_ctr(&fe->fe_counters.shared.tg[tgid - 1]->conn_per_sec, 1);
- }
- if (l && l->counters && l->counters->shared.tg && l->counters->shared.tg[tgid - 1])
+ _HA_ATOMIC_INC(&fe->fe_counters.shared.tg[tgid - 1]->cum_conn);
+ update_freq_ctr(&fe->fe_counters.shared.tg[tgid - 1]->conn_per_sec, 1);
+
+ if (l && l->counters)
_HA_ATOMIC_INC(&l->counters->shared.tg[tgid - 1]->cum_conn);
HA_ATOMIC_UPDATE_MAX(&fe->fe_counters.cps_max,
update_freq_ctr(&fe->fe_counters._conn_per_sec, 1));
/* increase the number of cumulated connections accepted by the designated frontend */
static inline void proxy_inc_fe_sess_ctr(struct listener *l, struct proxy *fe)
{
- if (fe->fe_counters.shared.tg && fe->fe_counters.shared.tg[tgid - 1]) {
- _HA_ATOMIC_INC(&fe->fe_counters.shared.tg[tgid - 1]->cum_sess);
- update_freq_ctr(&fe->fe_counters.shared.tg[tgid - 1]->sess_per_sec, 1);
- }
- if (l && l->counters && l->counters->shared.tg && l->counters->shared.tg[tgid - 1])
+ _HA_ATOMIC_INC(&fe->fe_counters.shared.tg[tgid - 1]->cum_sess);
+ update_freq_ctr(&fe->fe_counters.shared.tg[tgid - 1]->sess_per_sec, 1);
+
+ if (l && l->counters)
_HA_ATOMIC_INC(&l->counters->shared.tg[tgid - 1]->cum_sess);
HA_ATOMIC_UPDATE_MAX(&fe->fe_counters.sps_max,
update_freq_ctr(&fe->fe_counters._sess_per_sec, 1));
http_ver > sizeof(fe->fe_counters.shared.tg[tgid - 1]->cum_sess_ver) / sizeof(*fe->fe_counters.shared.tg[tgid - 1]->cum_sess_ver))
return;
- if (fe->fe_counters.shared.tg && fe->fe_counters.shared.tg[tgid - 1])
- _HA_ATOMIC_INC(&fe->fe_counters.shared.tg[tgid - 1]->cum_sess_ver[http_ver - 1]);
- if (l && l->counters && l->counters->shared.tg && l->counters->shared.tg[tgid - 1])
+ _HA_ATOMIC_INC(&fe->fe_counters.shared.tg[tgid - 1]->cum_sess_ver[http_ver - 1]);
+ if (l && l->counters)
_HA_ATOMIC_INC(&l->counters->shared.tg[tgid - 1]->cum_sess_ver[http_ver - 1]);
}
/* increase the number of cumulated streams on the designated backend */
static inline void proxy_inc_be_ctr(struct proxy *be)
{
- if (be->be_counters.shared.tg && be->be_counters.shared.tg[tgid - 1]) {
- _HA_ATOMIC_INC(&be->be_counters.shared.tg[tgid - 1]->cum_sess);
- update_freq_ctr(&be->be_counters.shared.tg[tgid - 1]->sess_per_sec, 1);
- }
+ _HA_ATOMIC_INC(&be->be_counters.shared.tg[tgid - 1]->cum_sess);
+ update_freq_ctr(&be->be_counters.shared.tg[tgid - 1]->sess_per_sec, 1);
+
HA_ATOMIC_UPDATE_MAX(&be->be_counters.sps_max,
update_freq_ctr(&be->be_counters._sess_per_sec, 1));
}
if (http_ver >= sizeof(fe->fe_counters.shared.tg[tgid - 1]->p.http.cum_req) / sizeof(*fe->fe_counters.shared.tg[tgid - 1]->p.http.cum_req))
return;
- if (fe->fe_counters.shared.tg && fe->fe_counters.shared.tg[tgid - 1]) {
- _HA_ATOMIC_INC(&fe->fe_counters.shared.tg[tgid - 1]->p.http.cum_req[http_ver]);
- update_freq_ctr(&fe->fe_counters.shared.tg[tgid - 1]->req_per_sec, 1);
- }
- if (l && l->counters && l->counters->shared.tg && l->counters->shared.tg[tgid - 1])
+ _HA_ATOMIC_INC(&fe->fe_counters.shared.tg[tgid - 1]->p.http.cum_req[http_ver]);
+ update_freq_ctr(&fe->fe_counters.shared.tg[tgid - 1]->req_per_sec, 1);
+
+ if (l && l->counters)
_HA_ATOMIC_INC(&l->counters->shared.tg[tgid - 1]->p.http.cum_req[http_ver]);
HA_ATOMIC_UPDATE_MAX(&fe->fe_counters.p.http.rps_max,
update_freq_ctr(&fe->fe_counters.p.http._req_per_sec, 1));
/* increase the number of cumulated streams on the designated server */
static inline void srv_inc_sess_ctr(struct server *s)
{
- if (s->counters.shared.tg && s->counters.shared.tg[tgid - 1]) {
- _HA_ATOMIC_INC(&s->counters.shared.tg[tgid - 1]->cum_sess);
- update_freq_ctr(&s->counters.shared.tg[tgid - 1]->sess_per_sec, 1);
- }
+ _HA_ATOMIC_INC(&s->counters.shared.tg[tgid - 1]->cum_sess);
+ update_freq_ctr(&s->counters.shared.tg[tgid - 1]->sess_per_sec, 1);
+
HA_ATOMIC_UPDATE_MAX(&s->counters.sps_max,
update_freq_ctr(&s->counters._sess_per_sec, 1));
}
/* set the time of last session on the designated server */
static inline void srv_set_sess_last(struct server *s)
{
- if (s->counters.shared.tg && s->counters.shared.tg[tgid - 1])
- HA_ATOMIC_STORE(&s->counters.shared.tg[tgid - 1]->last_sess, ns_to_sec(now_ns));
+ HA_ATOMIC_STORE(&s->counters.shared.tg[tgid - 1]->last_sess, ns_to_sec(now_ns));
}
/* returns the current server throttle rate between 0 and 100% */
else if (srv != prev_srv) {
if (s->be_tgcounters)
_HA_ATOMIC_INC(&s->be_tgcounters->cum_lbconn);
- if (srv->counters.shared.tg && srv->counters.shared.tg[tgid - 1])
- _HA_ATOMIC_INC(&srv->counters.shared.tg[tgid - 1]->cum_lbconn);
+ _HA_ATOMIC_INC(&srv->counters.shared.tg[tgid - 1]->cum_lbconn);
}
stream_set_srv_target(s, srv);
}
s->txn->flags |= TX_CK_DOWN;
}
s->flags |= SF_REDISP;
- if (prev_srv->counters.shared.tg && prev_srv->counters.shared.tg[tgid - 1])
- _HA_ATOMIC_INC(&prev_srv->counters.shared.tg[tgid - 1]->redispatches);
+ _HA_ATOMIC_INC(&prev_srv->counters.shared.tg[tgid - 1]->redispatches);
if (s->be_tgcounters)
_HA_ATOMIC_INC(&s->be_tgcounters->redispatches);
} else {
- if (prev_srv->counters.shared.tg && prev_srv->counters.shared.tg[tgid - 1])
- _HA_ATOMIC_INC(&prev_srv->counters.shared.tg[tgid - 1]->retries);
+ _HA_ATOMIC_INC(&prev_srv->counters.shared.tg[tgid - 1]->retries);
if (s->be_tgcounters)
_HA_ATOMIC_INC(&s->be_tgcounters->retries);
}
return ACT_RET_CONT;
if (px == strm_fe(s)) {
- if (px->fe_counters.shared.tg && px->fe_counters.shared.tg[tgid - 1])
- _HA_ATOMIC_INC(&px->fe_counters.shared.tg[tgid - 1]->p.http.cache_lookups);
+ _HA_ATOMIC_INC(&px->fe_counters.shared.tg[tgid - 1]->p.http.cache_lookups);
}
else {
- if (px->be_counters.shared.tg && px->be_counters.shared.tg[tgid - 1])
- _HA_ATOMIC_INC(&px->be_counters.shared.tg[tgid - 1]->p.http.cache_lookups);
+ _HA_ATOMIC_INC(&px->be_counters.shared.tg[tgid - 1]->p.http.cache_lookups);
}
cache_tree = get_cache_tree_from_hash(cache, read_u32(s->txn->cache_hash));
should_send_notmodified_response(cache, htxbuf(&s->req.buf), res);
if (px == strm_fe(s)) {
- if (px->fe_counters.shared.tg && px->fe_counters.shared.tg[tgid - 1])
- _HA_ATOMIC_INC(&px->fe_counters.shared.tg[tgid - 1]->p.http.cache_hits);
+ _HA_ATOMIC_INC(&px->fe_counters.shared.tg[tgid - 1]->p.http.cache_hits);
}
else {
- if (px->be_counters.shared.tg && px->be_counters.shared.tg[tgid - 1])
- _HA_ATOMIC_INC(&px->be_counters.shared.tg[tgid - 1]->p.http.cache_hits);
+ _HA_ATOMIC_INC(&px->be_counters.shared.tg[tgid - 1]->p.http.cache_hits);
}
return ACT_RET_CONT;
} else {
if ((!(check->state & CHK_ST_AGENT) ||
(check->status >= HCHK_STATUS_L57DATA)) &&
(check->health > 0)) {
- if (s->counters.shared.tg && s->counters.shared.tg[tgid - 1])
- _HA_ATOMIC_INC(&s->counters.shared.tg[tgid - 1]->failed_checks);
+ _HA_ATOMIC_INC(&s->counters.shared.tg[tgid - 1]->failed_checks);
report = 1;
check->health--;
if (check->health < check->rise)
HA_SPIN_UNLOCK(SERVER_LOCK, &s->lock);
HA_ATOMIC_STORE(&s->consecutive_errors, 0);
- if (s->counters.shared.tg && s->counters.shared.tg[tgid - 1])
- _HA_ATOMIC_INC(&s->counters.shared.tg[tgid - 1]->failed_hana);
+ _HA_ATOMIC_INC(&s->counters.shared.tg[tgid - 1]->failed_hana);
if (s->check.fastinter) {
/* timer might need to be advanced, it might also already be
}
else if (s->cur_state == SRV_ST_STOPPED) {
/* server was up and is currently down */
- if (s->counters.shared.tg && s->counters.shared.tg[tgid - 1])
- HA_ATOMIC_INC(&s->counters.shared.tg[tgid - 1]->down_trans);
+ HA_ATOMIC_INC(&s->counters.shared.tg[tgid - 1]->down_trans);
_srv_event_hdl_publish(EVENT_HDL_SUB_SERVER_DOWN, cb_data.common, s);
}
}
s->last_change = ns_to_sec(now_ns);
- if (s->counters.shared.tg && s->counters.shared.tg[tgid - 1])
- HA_ATOMIC_STORE(&s->counters.shared.tg[tgid - 1]->last_state_change, s->last_change);
+ HA_ATOMIC_STORE(&s->counters.shared.tg[tgid - 1]->last_state_change, s->last_change);
/* publish the state change */
_srv_event_hdl_prepare_state(&cb_data.state,
if (last_change < ns_to_sec(now_ns)) // ignore negative times
s->proxy->down_time += ns_to_sec(now_ns) - last_change;
s->proxy->last_change = ns_to_sec(now_ns);
- if (s->proxy->be_counters.shared.tg && s->proxy->be_counters.shared.tg[tgid - 1])
- HA_ATOMIC_STORE(&s->proxy->be_counters.shared.tg[tgid - 1]->last_state_change, s->proxy->last_change);
+ HA_ATOMIC_STORE(&s->proxy->be_counters.shared.tg[tgid - 1]->last_state_change, s->proxy->last_change);
}
}