/* set the time of last session on the backend */
static inline void be_set_sess_last(struct proxy *be)
{
- if (be->be_counters.shared.tg[tgid - 1])
+ if (be->be_counters.shared.tg && be->be_counters.shared.tg[tgid - 1])
HA_ATOMIC_STORE(&be->be_counters.shared.tg[tgid - 1]->last_sess, ns_to_sec(now_ns));
}
COUNTERS_SHARED;
struct {
COUNTERS_SHARED_TG;
- } *tg[MAX_TGROUPS];
+ } **tg;
};
/*
struct fe_counters_shared {
COUNTERS_SHARED;
- struct fe_counters_shared_tg *tg[MAX_TGROUPS];
+ struct fe_counters_shared_tg **tg;
};
/* counters used by listeners and frontends */
struct be_counters_shared {
COUNTERS_SHARED;
- struct be_counters_shared_tg *tg[MAX_TGROUPS];
+ struct be_counters_shared_tg **tg;
};
/* counters used by servers and backends */
/* increase the number of cumulated connections received on the designated frontend */
static inline void proxy_inc_fe_conn_ctr(struct listener *l, struct proxy *fe)
{
- if (fe->fe_counters.shared.tg[tgid - 1])
+ if (fe->fe_counters.shared.tg && fe->fe_counters.shared.tg[tgid - 1]) {
_HA_ATOMIC_INC(&fe->fe_counters.shared.tg[tgid - 1]->cum_conn);
- if (l && l->counters && l->counters->shared.tg[tgid - 1])
- _HA_ATOMIC_INC(&l->counters->shared.tg[tgid - 1]->cum_conn);
- if (fe->fe_counters.shared.tg[tgid - 1])
update_freq_ctr(&fe->fe_counters.shared.tg[tgid - 1]->conn_per_sec, 1);
+ }
+ if (l && l->counters && l->counters->shared.tg && l->counters->shared.tg[tgid - 1])
+ _HA_ATOMIC_INC(&l->counters->shared.tg[tgid - 1]->cum_conn);
HA_ATOMIC_UPDATE_MAX(&fe->fe_counters.cps_max,
update_freq_ctr(&fe->fe_counters._conn_per_sec, 1));
}
/* increase the number of cumulated connections accepted by the designated frontend */
static inline void proxy_inc_fe_sess_ctr(struct listener *l, struct proxy *fe)
{
- if (fe->fe_counters.shared.tg[tgid - 1])
+ if (fe->fe_counters.shared.tg && fe->fe_counters.shared.tg[tgid - 1]) {
_HA_ATOMIC_INC(&fe->fe_counters.shared.tg[tgid - 1]->cum_sess);
- if (l && l->counters && l->counters->shared.tg[tgid - 1])
- _HA_ATOMIC_INC(&l->counters->shared.tg[tgid - 1]->cum_sess);
- if (fe->fe_counters.shared.tg[tgid - 1])
update_freq_ctr(&fe->fe_counters.shared.tg[tgid - 1]->sess_per_sec, 1);
+ }
+ if (l && l->counters && l->counters->shared.tg && l->counters->shared.tg[tgid - 1])
+ _HA_ATOMIC_INC(&l->counters->shared.tg[tgid - 1]->cum_sess);
HA_ATOMIC_UPDATE_MAX(&fe->fe_counters.sps_max,
update_freq_ctr(&fe->fe_counters._sess_per_sec, 1));
}
http_ver > sizeof(fe->fe_counters.shared.tg[tgid - 1]->cum_sess_ver) / sizeof(*fe->fe_counters.shared.tg[tgid - 1]->cum_sess_ver))
return;
- if (fe->fe_counters.shared.tg[tgid - 1])
+ if (fe->fe_counters.shared.tg && fe->fe_counters.shared.tg[tgid - 1])
_HA_ATOMIC_INC(&fe->fe_counters.shared.tg[tgid - 1]->cum_sess_ver[http_ver - 1]);
- if (l && l->counters && l->counters->shared.tg[tgid - 1])
+ if (l && l->counters && l->counters->shared.tg && l->counters->shared.tg[tgid - 1])
_HA_ATOMIC_INC(&l->counters->shared.tg[tgid - 1]->cum_sess_ver[http_ver - 1]);
}
/* increase the number of cumulated streams on the designated backend */
static inline void proxy_inc_be_ctr(struct proxy *be)
{
- if (be->be_counters.shared.tg[tgid - 1])
+ if (be->be_counters.shared.tg && be->be_counters.shared.tg[tgid - 1]) {
_HA_ATOMIC_INC(&be->be_counters.shared.tg[tgid - 1]->cum_sess);
- if (be->be_counters.shared.tg[tgid - 1])
update_freq_ctr(&be->be_counters.shared.tg[tgid - 1]->sess_per_sec, 1);
+ }
HA_ATOMIC_UPDATE_MAX(&be->be_counters.sps_max,
update_freq_ctr(&be->be_counters._sess_per_sec, 1));
}
if (http_ver >= sizeof(fe->fe_counters.shared.tg[tgid - 1]->p.http.cum_req) / sizeof(*fe->fe_counters.shared.tg[tgid - 1]->p.http.cum_req))
return;
- if (fe->fe_counters.shared.tg[tgid - 1])
+ if (fe->fe_counters.shared.tg && fe->fe_counters.shared.tg[tgid - 1]) {
_HA_ATOMIC_INC(&fe->fe_counters.shared.tg[tgid - 1]->p.http.cum_req[http_ver]);
- if (l && l->counters && l->counters->shared.tg[tgid - 1])
- _HA_ATOMIC_INC(&l->counters->shared.tg[tgid - 1]->p.http.cum_req[http_ver]);
- if (fe->fe_counters.shared.tg[tgid - 1])
update_freq_ctr(&fe->fe_counters.shared.tg[tgid - 1]->req_per_sec, 1);
+ }
+ if (l && l->counters && l->counters->shared.tg && l->counters->shared.tg[tgid - 1])
+ _HA_ATOMIC_INC(&l->counters->shared.tg[tgid - 1]->p.http.cum_req[http_ver]);
HA_ATOMIC_UPDATE_MAX(&fe->fe_counters.p.http.rps_max,
update_freq_ctr(&fe->fe_counters.p.http._req_per_sec, 1));
}
/* increase the number of cumulated streams on the designated server */
static inline void srv_inc_sess_ctr(struct server *s)
{
- if (s->counters.shared.tg[tgid - 1]) {
+ if (s->counters.shared.tg && s->counters.shared.tg[tgid - 1]) {
_HA_ATOMIC_INC(&s->counters.shared.tg[tgid - 1]->cum_sess);
update_freq_ctr(&s->counters.shared.tg[tgid - 1]->sess_per_sec, 1);
}
/* set the time of last session on the designated server */
static inline void srv_set_sess_last(struct server *s)
{
- if (s->counters.shared.tg[tgid - 1])
+ if (s->counters.shared.tg && s->counters.shared.tg[tgid - 1])
HA_ATOMIC_STORE(&s->counters.shared.tg[tgid - 1]->last_sess, ns_to_sec(now_ns));
}
else if (srv != prev_srv) {
if (s->be_tgcounters)
_HA_ATOMIC_INC(&s->be_tgcounters->cum_lbconn);
- if (srv->counters.shared.tg[tgid - 1])
+ if (srv->counters.shared.tg && srv->counters.shared.tg[tgid - 1])
_HA_ATOMIC_INC(&srv->counters.shared.tg[tgid - 1]->cum_lbconn);
}
stream_set_srv_target(s, srv);
s->txn->flags |= TX_CK_DOWN;
}
s->flags |= SF_REDISP;
- if (prev_srv->counters.shared.tg[tgid - 1])
+ if (prev_srv->counters.shared.tg && prev_srv->counters.shared.tg[tgid - 1])
_HA_ATOMIC_INC(&prev_srv->counters.shared.tg[tgid - 1]->redispatches);
if (s->be_tgcounters)
_HA_ATOMIC_INC(&s->be_tgcounters->redispatches);
} else {
- if (prev_srv->counters.shared.tg[tgid - 1])
+ if (prev_srv->counters.shared.tg && prev_srv->counters.shared.tg[tgid - 1])
_HA_ATOMIC_INC(&prev_srv->counters.shared.tg[tgid - 1]->retries);
if (s->be_tgcounters)
_HA_ATOMIC_INC(&s->be_tgcounters->retries);
return ACT_RET_CONT;
if (px == strm_fe(s)) {
- if (px->fe_counters.shared.tg[tgid - 1])
+ if (px->fe_counters.shared.tg && px->fe_counters.shared.tg[tgid - 1])
_HA_ATOMIC_INC(&px->fe_counters.shared.tg[tgid - 1]->p.http.cache_lookups);
}
else {
- if (px->be_counters.shared.tg[tgid - 1])
+ if (px->be_counters.shared.tg && px->be_counters.shared.tg[tgid - 1])
_HA_ATOMIC_INC(&px->be_counters.shared.tg[tgid - 1]->p.http.cache_lookups);
}
should_send_notmodified_response(cache, htxbuf(&s->req.buf), res);
if (px == strm_fe(s)) {
- if (px->fe_counters.shared.tg[tgid - 1])
+ if (px->fe_counters.shared.tg && px->fe_counters.shared.tg[tgid - 1])
_HA_ATOMIC_INC(&px->fe_counters.shared.tg[tgid - 1]->p.http.cache_hits);
}
else {
- if (px->be_counters.shared.tg[tgid - 1])
+ if (px->be_counters.shared.tg && px->be_counters.shared.tg[tgid - 1])
_HA_ATOMIC_INC(&px->be_counters.shared.tg[tgid - 1]->p.http.cache_hits);
}
return ACT_RET_CONT;
if ((!(check->state & CHK_ST_AGENT) ||
(check->status >= HCHK_STATUS_L57DATA)) &&
(check->health > 0)) {
- if (s->counters.shared.tg[tgid - 1])
+ if (s->counters.shared.tg && s->counters.shared.tg[tgid - 1])
_HA_ATOMIC_INC(&s->counters.shared.tg[tgid - 1]->failed_checks);
report = 1;
check->health--;
HA_SPIN_UNLOCK(SERVER_LOCK, &s->lock);
HA_ATOMIC_STORE(&s->consecutive_errors, 0);
- if (s->counters.shared.tg[tgid - 1])
+ if (s->counters.shared.tg && s->counters.shared.tg[tgid - 1])
_HA_ATOMIC_INC(&s->counters.shared.tg[tgid - 1]->failed_hana);
if (s->check.fastinter) {
target_pid = s->pcli_next_pid;
/* we can connect now */
s->target = pcli_pid_to_server(target_pid);
- if (objt_server(s->target))
- s->sv_tgcounters = __objt_server(s->target)->counters.shared.tg[tgid - 1];
+ if (objt_server(s->target)) {
+ struct server *srv = __objt_server(s->target);
+
+ if (srv->counters.shared.tg)
+ s->sv_tgcounters = srv->counters.shared.tg[tgid - 1];
+ else
+ s->sv_tgcounters = NULL;
+ }
if (!s->target)
goto server_disconnect;
if (!shared)
return;
- while (it < global.nbtgroups && shared->tg[it]) {
+ while (it < global.nbtgroups && shared->tg && shared->tg[it]) {
if (shared->flags & COUNTERS_SHARED_F_LOCAL) {
/* memory was allocated using calloc(), simply free it */
free(shared->tg[it]);
}
it += 1;
}
+ free(shared->tg);
}
/* release a shared fe counters struct */
if (!guid->key || !shm_stats_file_hdr)
shared->flags |= COUNTERS_SHARED_F_LOCAL;
+ if (!shared->tg) {
+ shared->tg = calloc(global.nbtgroups, sizeof(*shared->tg));
+ if (!shared->tg) {
+ memprintf(errmsg, "couldn't allocate memory for shared counters");
+ return 0;
+ }
+ }
+
while (it < global.nbtgroups) {
if (shared->flags & COUNTERS_SHARED_F_LOCAL) {
size_t tg_size;
s = appctx_strm(appctx);
s->target = target;
- if (objt_server(s->target))
- s->sv_tgcounters = __objt_server(s->target)->counters.shared.tg[tgid - 1];
+ if (objt_server(s->target)) {
+ struct server *srv = __objt_server(s->target);
+
+ if (srv->counters.shared.tg)
+ s->sv_tgcounters = __objt_server(s->target)->counters.shared.tg[tgid - 1];
+ else
+ s->sv_tgcounters = NULL;
+ }
/* set the "timeout server" */
s->scb->ioto = hc->timeout_server;
int max = 0;
int it;
- for (it = 0; (it < global.nbtgroups && p->fe_counters.shared.tg[it]); it++)
+ for (it = 0; (it < global.nbtgroups && p->fe_counters.shared.tg && p->fe_counters.shared.tg[it]); it++)
max += freq_ctr_remain(&p->fe_counters.shared.tg[it]->sess_per_sec, p->fe_sps_lim, 0);
if (unlikely(!max)) {
return 0;
s->be = be;
- s->be_tgcounters = be->be_counters.shared.tg[tgid - 1];
+ if (be->be_counters.shared.tg)
+ s->be_tgcounters = be->be_counters.shared.tg[tgid - 1];
+ else
+ s->be_tgcounters = NULL;
+
HA_ATOMIC_UPDATE_MAX(&be->be_counters.conn_max,
HA_ATOMIC_ADD_FETCH(&be->beconn, 1));
proxy_inc_be_ctr(be);
}
else if (s->cur_state == SRV_ST_STOPPED) {
/* server was up and is currently down */
- if (s->counters.shared.tg[tgid - 1])
+ if (s->counters.shared.tg && s->counters.shared.tg[tgid - 1])
HA_ATOMIC_INC(&s->counters.shared.tg[tgid - 1]->down_trans);
_srv_event_hdl_publish(EVENT_HDL_SUB_SERVER_DOWN, cb_data.common, s);
}
}
s->last_change = ns_to_sec(now_ns);
- if (s->counters.shared.tg[tgid - 1])
+ if (s->counters.shared.tg && s->counters.shared.tg[tgid - 1])
HA_ATOMIC_STORE(&s->counters.shared.tg[tgid - 1]->last_state_change, s->last_change);
/* publish the state change */
if (last_change < ns_to_sec(now_ns)) // ignore negative times
s->proxy->down_time += ns_to_sec(now_ns) - last_change;
s->proxy->last_change = ns_to_sec(now_ns);
- if (s->proxy->be_counters.shared.tg[tgid - 1])
+ if (s->proxy->be_counters.shared.tg && s->proxy->be_counters.shared.tg[tgid - 1])
HA_ATOMIC_STORE(&s->proxy->be_counters.shared.tg[tgid - 1]->last_state_change, s->proxy->last_change);
}
}
}
srv->last_change = ns_to_sec(now_ns) - srv_last_time_change;
- if (srv->counters.shared.tg[0])
+ if (srv->counters.shared.tg && srv->counters.shared.tg[0])
HA_ATOMIC_STORE(&srv->counters.shared.tg[0]->last_state_change, srv->last_change);
srv->check.status = srv_check_status;
srv->check.result = srv_check_result;
sess->flags = SESS_FL_NONE;
sess->src = NULL;
sess->dst = NULL;
- sess->fe_tgcounters = sess->fe->fe_counters.shared.tg[tgid - 1];
- if (sess->listener && sess->listener->counters)
+ if (sess->fe->fe_counters.shared.tg)
+ sess->fe_tgcounters = sess->fe->fe_counters.shared.tg[tgid - 1];
+ else
+ sess->fe_tgcounters = NULL;
+
+ if (sess->listener && sess->listener->counters && sess->listener->counters->shared.tg)
sess->li_tgcounters = sess->listener->counters->shared.tg[tgid - 1];
else
sess->li_tgcounters = NULL;
if (!(px->cap & PR_CAP_FE))
return 0; /* silently ignored fe/be mismatch */
+ if (!px->fe_counters.shared.tg)
+ return 0;
+
base_off_shared = (char *)px->fe_counters.shared.tg[0];
if (!base_off_shared)
return 0; // not allocated
if (!(px->cap & PR_CAP_BE))
return 0; /* silently ignored fe/be mismatch */
+ if (!px->be_counters.shared.tg)
+ return 0;
+
base_off_shared = (char *)px->be_counters.shared.tg[0];
if (!base_off_shared)
return 0; // not allocated
if (!li->counters)
return 0;
+ if (!li->counters->shared.tg)
+ return 0;
+
base_off_shared = (char *)li->counters->shared.tg[0];
if (!base_off_shared)
return 0; // not allocated
goto err;
srv = __objt_server(node->obj_type);
+ if (!srv->counters.shared.tg)
+ return 0;
+
base_off_shared = (char *)srv->counters.shared.tg[0];
if (!base_off_shared)
return 0; // not allocated
BUG_ON(curr_obj->type != SHM_STATS_FILE_OBJECT_TYPE_FE);
li = __objt_listener(node->obj_type);
// counters are optional for listeners
- if (li->counters)
+ if (li->counters) {
+ if (!li->counters->shared.tg)
+ li->counters->shared.tg = calloc(global.nbtgroups, sizeof(*li->counters->shared.tg));
+ if (li->counters->shared.tg == NULL)
+ goto release;
li->counters->shared.tg[obj_tgid - 1] = &curr_obj->data.fe;
+ }
break;
}
case OBJ_TYPE_SERVER:
BUG_ON(curr_obj->type != SHM_STATS_FILE_OBJECT_TYPE_BE);
sv = __objt_server(node->obj_type);
+ if (!sv->counters.shared.tg)
+ sv->counters.shared.tg = calloc(global.nbtgroups, sizeof(*sv->counters.shared.tg));
+ if (sv->counters.shared.tg == NULL)
+ goto release;
sv->counters.shared.tg[obj_tgid - 1] = &curr_obj->data.be;
break;
}
struct proxy *px;
px = __objt_proxy(node->obj_type);
- if (curr_obj->type == SHM_STATS_FILE_OBJECT_TYPE_FE)
+ if (curr_obj->type == SHM_STATS_FILE_OBJECT_TYPE_FE) {
+ if (!px->fe_counters.shared.tg)
+ px->fe_counters.shared.tg = calloc(global.nbtgroups, sizeof(*px->fe_counters.shared.tg));
+ if (px->fe_counters.shared.tg == NULL)
+ goto release;
px->fe_counters.shared.tg[obj_tgid - 1] = &curr_obj->data.fe;
- else if (curr_obj->type == SHM_STATS_FILE_OBJECT_TYPE_BE)
+ } else if (curr_obj->type == SHM_STATS_FILE_OBJECT_TYPE_BE) {
+ if (!px->be_counters.shared.tg)
+ px->be_counters.shared.tg = calloc(global.nbtgroups, sizeof(*px->be_counters.shared.tg));
+ if (px->fe_counters.shared.tg == NULL)
+ goto release;
px->be_counters.shared.tg[obj_tgid - 1] = &curr_obj->data.be;
- else
+ } else
goto release; // not supported
break;
}
case STATS_PX_CAP_FE:
case STATS_PX_CAP_LI:
if (col->flags & STAT_COL_FL_SHARED) {
- counter = (char *)&((struct fe_counters *)counters)->shared.tg;
+ counter = ((struct fe_counters *)counters)->shared.tg;
offset = col->metric.offset[0];
}
else
case STATS_PX_CAP_BE:
case STATS_PX_CAP_SRV:
if (col->flags & STAT_COL_FL_SHARED) {
- counter = (char *)&((struct be_counters *)counters)->shared.tg;
+ counter = ((struct be_counters *)counters)->shared.tg;
offset = col->metric.offset[1];
}
else
* when the default backend is assigned.
*/
s->be = sess->fe;
- s->be_tgcounters = sess->fe->be_counters.shared.tg[tgid - 1];
+ if (sess->fe->be_counters.shared.tg)
+ s->be_tgcounters = sess->fe->be_counters.shared.tg[tgid - 1];
+ else
+ s->be_tgcounters = NULL;
+
s->sv_tgcounters = NULL; // default value
s->req_cap = NULL;