/* set the time of last session on the backend */
static inline void be_set_sess_last(struct proxy *be)
{
- HA_ATOMIC_STORE(&be->be_counters.shared->last_sess, ns_to_sec(now_ns));
+ HA_ATOMIC_STORE(&be->be_counters.shared->tg[tgid - 1]->last_sess, ns_to_sec(now_ns));
}
/* This function returns non-zero if the designated server will be
#define COUNTERS_SHARED \
struct { \
uint16_t flags; /* COUNTERS_SHARED_F flags */\
+ };
+#define COUNTERS_SHARED_TG \
+ struct { \
unsigned long last_change; /* last time, when the state was changed */\
long long srv_aborts; /* aborted responses during DATA phase caused by the server */\
long long cli_aborts; /* aborted responses during DATA phase caused by the client */\
// for convenience (generic pointer)
struct counters_shared {
COUNTERS_SHARED;
+ struct {
+ COUNTERS_SHARED_TG;
+ } *tg[MAX_TGROUPS];
};
/* counters used by listeners and frontends */
-struct fe_counters_shared {
- COUNTERS_SHARED;
+struct fe_counters_shared_tg {
+ COUNTERS_SHARED_TG;
long long denied_sess; /* denied session requests (tcp-req-sess rules) */
long long denied_conn; /* denied connection requests (tcp-req-conn rules) */
long long cache_lookups;/* cache lookups */
long long comp_rsp; /* number of compressed responses */
long long rsp[6]; /* http response codes */
-
-
} http;
} p; /* protocol-specific stats */
long long failed_req; /* failed requests (eg: invalid or timeout) */
};
+struct fe_counters_shared {
+ COUNTERS_SHARED;
+ struct fe_counters_shared_tg *tg[MAX_TGROUPS];
+};
+
struct fe_counters {
struct fe_counters_shared *shared; /* shared counters */
unsigned int conn_max; /* max # of active sessions */
} p; /* protocol-specific stats */
};
-struct be_counters_shared {
- COUNTERS_SHARED;
+struct be_counters_shared_tg {
+ COUNTERS_SHARED_TG;
long long cum_lbconn; /* cumulated number of sessions processed by load balancing (BE only) */
long long failed_conns; /* failed connect() attempts (BE only) */
};
+struct be_counters_shared {
+ COUNTERS_SHARED;
+ struct be_counters_shared_tg *tg[MAX_TGROUPS];
+};
+
/* counters used by servers and backends */
struct be_counters {
struct be_counters_shared *shared; /* shared counters */
#ifndef _HAPROXY_COUNTERS_H
# define _HAPROXY_COUNTERS_H
+#include <stddef.h>
+
#include <haproxy/counters-t.h>
#include <haproxy/guid-t.h>
void counters_fe_shared_drop(struct fe_counters_shared *counters);
void counters_be_shared_drop(struct be_counters_shared *counters);
+/* time oriented helper: get last time (relative to current time) on a given
+ * <scounter> array, for <elem> member (one member per thread group) which is
+ * assumed to be unsigned long type.
+ *
+ * wrapping is handled by taking the lowest diff between now and last counter.
+ * But since wrapping is expected once every ~136 years (starting 01/01/1970),
+ * perhaps it's not worth the extra CPU cost.. let's see.
+ */
+#define COUNTERS_SHARED_LAST_OFFSET(scounters, type, offset) \
+({ \
+ unsigned long last = HA_ATOMIC_LOAD((type *)((char *)scounters[0] + offset));\
+ unsigned long now_seconds = ns_to_sec(now_ns); \
+ int it; \
+ \
+ for (it = 1; it < global.nbtgroups; it++) { \
+ unsigned long cur = HA_ATOMIC_LOAD((type *)((char *)scounters[it] + offset));\
+ if ((now_seconds - cur) < (now_seconds - last)) \
+ last = cur; \
+ } \
+ last; \
+})
+
+#define COUNTERS_SHARED_LAST(scounters, elem) \
+({ \
+ int offset = offsetof(typeof(**scounters), elem); \
+ unsigned long last = COUNTERS_SHARED_LAST_OFFSET(scounters, typeof(scounters[0]->elem), offset); \
+ \
+ last; \
+})
+
+
+/* generic unsigned integer addition for all <elem> members from
+ * <scounters> array (one member per thread group)
+ * <rfunc> is function taking pointer as parameter to read from the memory
+ * location pointed to scounters[it].elem
+ */
+#define COUNTERS_SHARED_TOTAL_OFFSET(scounters, type, offset, rfunc) \
+({ \
+ uint64_t __ret = 0; \
+ int it; \
+ \
+ for (it = 0; it < global.nbtgroups; it++) \
+ __ret += rfunc((type *)((char *)scounters[it] + offset)); \
+ __ret; \
+})
+
+#define COUNTERS_SHARED_TOTAL(scounters, elem, rfunc) \
+({ \
+ int offset = offsetof(typeof(**scounters), elem); \
+ uint64_t __ret = COUNTERS_SHARED_TOTAL_OFFSET(scounters, typeof(scounters[0]->elem), offset, rfunc);\
+ \
+ __ret; \
+})
+/* same as COUNTERS_SHARED_TOTAL but with <rfunc> taking 2 extras arguments:
+ * <arg1> and <arg2>
+ */
+#define COUNTERS_SHARED_TOTAL_ARG2(scounters, elem, rfunc, arg1, arg2) \
+({ \
+ uint64_t __ret = 0; \
+ int it; \
+ \
+ for (it = 0; it < global.nbtgroups; it++) \
+ __ret += rfunc(&scounters[it]->elem, arg1, arg2); \
+ __ret; \
+})
+
#endif /* _HAPROXY_COUNTERS_H */
/* increase the number of cumulated connections received on the designated frontend */
static inline void proxy_inc_fe_conn_ctr(struct listener *l, struct proxy *fe)
{
- _HA_ATOMIC_INC(&fe->fe_counters.shared->cum_conn);
+ _HA_ATOMIC_INC(&fe->fe_counters.shared->tg[tgid - 1]->cum_conn);
if (l && l->counters)
- _HA_ATOMIC_INC(&l->counters->shared->cum_conn);
- update_freq_ctr(&fe->fe_counters.shared->conn_per_sec, 1);
+ _HA_ATOMIC_INC(&l->counters->shared->tg[tgid - 1]->cum_conn);
+ update_freq_ctr(&fe->fe_counters.shared->tg[tgid - 1]->conn_per_sec, 1);
HA_ATOMIC_UPDATE_MAX(&fe->fe_counters.cps_max,
update_freq_ctr(&fe->fe_counters._conn_per_sec, 1));
}
static inline void proxy_inc_fe_sess_ctr(struct listener *l, struct proxy *fe)
{
- _HA_ATOMIC_INC(&fe->fe_counters.shared->cum_sess);
+ _HA_ATOMIC_INC(&fe->fe_counters.shared->tg[tgid - 1]->cum_sess);
if (l && l->counters)
- _HA_ATOMIC_INC(&l->counters->shared->cum_sess);
- update_freq_ctr(&fe->fe_counters.shared->sess_per_sec, 1);
+ _HA_ATOMIC_INC(&l->counters->shared->tg[tgid - 1]->cum_sess);
+ update_freq_ctr(&fe->fe_counters.shared->tg[tgid - 1]->sess_per_sec, 1);
HA_ATOMIC_UPDATE_MAX(&fe->fe_counters.sps_max,
update_freq_ctr(&fe->fe_counters._sess_per_sec, 1));
}
unsigned int http_ver)
{
if (http_ver == 0 ||
- http_ver > sizeof(fe->fe_counters.shared->cum_sess_ver) / sizeof(*fe->fe_counters.shared->cum_sess_ver))
+ http_ver > sizeof(fe->fe_counters.shared->tg[tgid - 1]->cum_sess_ver) / sizeof(*fe->fe_counters.shared->tg[tgid - 1]->cum_sess_ver))
return;
- _HA_ATOMIC_INC(&fe->fe_counters.shared->cum_sess_ver[http_ver - 1]);
+ _HA_ATOMIC_INC(&fe->fe_counters.shared->tg[tgid - 1]->cum_sess_ver[http_ver - 1]);
if (l && l->counters)
- _HA_ATOMIC_INC(&l->counters->shared->cum_sess_ver[http_ver - 1]);
+ _HA_ATOMIC_INC(&l->counters->shared->tg[tgid - 1]->cum_sess_ver[http_ver - 1]);
}
/* increase the number of cumulated streams on the designated backend */
static inline void proxy_inc_be_ctr(struct proxy *be)
{
- _HA_ATOMIC_INC(&be->be_counters.shared->cum_sess);
- update_freq_ctr(&be->be_counters.shared->sess_per_sec, 1);
+ _HA_ATOMIC_INC(&be->be_counters.shared->tg[tgid - 1]->cum_sess);
+ update_freq_ctr(&be->be_counters.shared->tg[tgid - 1]->sess_per_sec, 1);
HA_ATOMIC_UPDATE_MAX(&be->be_counters.sps_max,
update_freq_ctr(&be->be_counters._sess_per_sec, 1));
}
static inline void proxy_inc_fe_req_ctr(struct listener *l, struct proxy *fe,
unsigned int http_ver)
{
- if (http_ver >= sizeof(fe->fe_counters.shared->p.http.cum_req) / sizeof(*fe->fe_counters.shared->p.http.cum_req))
+ if (http_ver >= sizeof(fe->fe_counters.shared->tg[tgid - 1]->p.http.cum_req) / sizeof(*fe->fe_counters.shared->tg[tgid - 1]->p.http.cum_req))
return;
- _HA_ATOMIC_INC(&fe->fe_counters.shared->p.http.cum_req[http_ver]);
+ _HA_ATOMIC_INC(&fe->fe_counters.shared->tg[tgid - 1]->p.http.cum_req[http_ver]);
if (l && l->counters)
- _HA_ATOMIC_INC(&l->counters->shared->p.http.cum_req[http_ver]);
- update_freq_ctr(&fe->fe_counters.shared->req_per_sec, 1);
+ _HA_ATOMIC_INC(&l->counters->shared->tg[tgid - 1]->p.http.cum_req[http_ver]);
+ update_freq_ctr(&fe->fe_counters.shared->tg[tgid - 1]->req_per_sec, 1);
HA_ATOMIC_UPDATE_MAX(&fe->fe_counters.p.http.rps_max,
update_freq_ctr(&fe->fe_counters.p.http._req_per_sec, 1));
}
/* increase the number of cumulated streams on the designated server */
static inline void srv_inc_sess_ctr(struct server *s)
{
- _HA_ATOMIC_INC(&s->counters.shared->cum_sess);
- update_freq_ctr(&s->counters.shared->sess_per_sec, 1);
+ _HA_ATOMIC_INC(&s->counters.shared->tg[tgid - 1]->cum_sess);
+ update_freq_ctr(&s->counters.shared->tg[tgid - 1]->sess_per_sec, 1);
HA_ATOMIC_UPDATE_MAX(&s->counters.sps_max,
update_freq_ctr(&s->counters._sess_per_sec, 1));
}
/* set the time of last session on the designated server */
static inline void srv_set_sess_last(struct server *s)
{
- HA_ATOMIC_STORE(&s->counters.shared->last_sess, ns_to_sec(now_ns));
+ HA_ATOMIC_STORE(&s->counters.shared->tg[tgid - 1]->last_sess, ns_to_sec(now_ns));
}
/* returns the current server throttle rate between 0 and 100% */
s->scb->state = SC_ST_REQ;
} else {
if (objt_server(s->target))
- _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->retries);
- _HA_ATOMIC_INC(&s->be->be_counters.shared->retries);
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->retries);
+ _HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->retries);
s->scb->state = SC_ST_ASS;
}
#include <haproxy/backend.h>
#include <haproxy/channel.h>
#include <haproxy/check.h>
+#include <haproxy/counters.h>
#include <haproxy/frontend.h>
#include <haproxy/global.h>
#include <haproxy/hash.h>
goto out;
}
else if (srv != prev_srv) {
- _HA_ATOMIC_INC(&s->be->be_counters.shared->cum_lbconn);
- _HA_ATOMIC_INC(&srv->counters.shared->cum_lbconn);
+ _HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->cum_lbconn);
+ _HA_ATOMIC_INC(&srv->counters.shared->tg[tgid - 1]->cum_lbconn);
}
s->target = &srv->obj_type;
}
s->txn->flags |= TX_CK_DOWN;
}
s->flags |= SF_REDISP;
- _HA_ATOMIC_INC(&prev_srv->counters.shared->redispatches);
- _HA_ATOMIC_INC(&s->be->be_counters.shared->redispatches);
+ _HA_ATOMIC_INC(&prev_srv->counters.shared->tg[tgid - 1]->redispatches);
+ _HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->redispatches);
} else {
- _HA_ATOMIC_INC(&prev_srv->counters.shared->retries);
- _HA_ATOMIC_INC(&s->be->be_counters.shared->retries);
+ _HA_ATOMIC_INC(&prev_srv->counters.shared->tg[tgid - 1]->retries);
+ _HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->retries);
}
}
}
s->scb->flags |= SC_FL_NOLINGER;
if (s->flags & SF_SRV_REUSED) {
- _HA_ATOMIC_INC(&s->be->be_counters.shared->reuse);
+ _HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->reuse);
if (srv)
- _HA_ATOMIC_INC(&srv->counters.shared->reuse);
+ _HA_ATOMIC_INC(&srv->counters.shared->tg[tgid - 1]->reuse);
} else {
- _HA_ATOMIC_INC(&s->be->be_counters.shared->connect);
+ _HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->connect);
if (srv)
- _HA_ATOMIC_INC(&srv->counters.shared->connect);
+ _HA_ATOMIC_INC(&srv->counters.shared->tg[tgid - 1]->connect);
}
err = do_connect_server(s, srv_conn);
s->conn_err_type = STRM_ET_QUEUE_ERR;
}
- _HA_ATOMIC_INC(&srv->counters.shared->failed_conns);
- _HA_ATOMIC_INC(&s->be->be_counters.shared->failed_conns);
+ _HA_ATOMIC_INC(&srv->counters.shared->tg[tgid - 1]->failed_conns);
+ _HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_conns);
return 1;
case SRV_STATUS_NOSRV:
s->conn_err_type = STRM_ET_CONN_ERR;
}
- _HA_ATOMIC_INC(&s->be->be_counters.shared->failed_conns);
+ _HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_conns);
return 1;
case SRV_STATUS_QUEUED:
if (srv)
srv_set_sess_last(srv);
if (srv)
- _HA_ATOMIC_INC(&srv->counters.shared->failed_conns);
- _HA_ATOMIC_INC(&s->be->be_counters.shared->failed_conns);
+ _HA_ATOMIC_INC(&srv->counters.shared->tg[tgid - 1]->failed_conns);
+ _HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_conns);
/* release other streams waiting for this server */
if (may_dequeue_tasks(srv, s->be))
if (srv)
srv_set_sess_last(srv);
if (srv)
- _HA_ATOMIC_INC(&srv->counters.shared->failed_conns);
- _HA_ATOMIC_INC(&s->be->be_counters.shared->failed_conns);
+ _HA_ATOMIC_INC(&srv->counters.shared->tg[tgid - 1]->failed_conns);
+ _HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_conns);
/* release other streams waiting for this server */
sess_change_server(s, NULL);
pendconn_cond_unlink(s->pend_pos);
if (srv)
- _HA_ATOMIC_INC(&srv->counters.shared->failed_conns);
- _HA_ATOMIC_INC(&s->be->be_counters.shared->failed_conns);
+ _HA_ATOMIC_INC(&srv->counters.shared->tg[tgid - 1]->failed_conns);
+ _HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_conns);
sc_abort(sc);
sc_shutdown(sc);
req->flags |= CF_WRITE_TIMEOUT;
}
if (objt_server(s->target))
- _HA_ATOMIC_INC(&objt_server(s->target)->counters.shared->failed_conns);
- _HA_ATOMIC_INC(&s->be->be_counters.shared->failed_conns);
+ _HA_ATOMIC_INC(&objt_server(s->target)->counters.shared->tg[tgid - 1]->failed_conns);
+ _HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_conns);
sess_change_server(s, NULL);
if (may_dequeue_tasks(objt_server(s->target), s->be))
process_srv_queue(objt_server(s->target));
s->conn_err_type = STRM_ET_CONN_OTHER;
if (objt_server(s->target))
- _HA_ATOMIC_INC(&objt_server(s->target)->counters.shared->internal_errors);
- _HA_ATOMIC_INC(&s->be->be_counters.shared->internal_errors);
+ _HA_ATOMIC_INC(&objt_server(s->target)->counters.shared->tg[tgid - 1]->internal_errors);
+ _HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->internal_errors);
sess_change_server(s, NULL);
if (may_dequeue_tasks(objt_server(s->target), s->be))
process_srv_queue(objt_server(s->target));
*/
void set_backend_down(struct proxy *be)
{
- HA_ATOMIC_STORE(&be->be_counters.shared->last_change, ns_to_sec(now_ns));
- _HA_ATOMIC_INC(&be->be_counters.shared->down_trans);
+ HA_ATOMIC_STORE(&be->be_counters.shared->tg[tgid - 1]->last_change, ns_to_sec(now_ns));
+ _HA_ATOMIC_INC(&be->be_counters.shared->tg[tgid - 1]->down_trans);
if (!(global.mode & MODE_STARTING)) {
ha_alert("%s '%s' has no server available!\n", proxy_type_str(be), be->id);
}
int be_downtime(struct proxy *px) {
- unsigned long last_change = HA_ATOMIC_LOAD(&px->be_counters.shared->last_change);
+ unsigned long last_change = COUNTERS_SHARED_LAST(px->be_counters.shared->tg, last_change);
if (px->lbprm.tot_weight && last_change < ns_to_sec(now_ns)) // ignore negative time
return px->down_time;
smp->flags = SMP_F_VOL_TEST;
smp->data.type = SMP_T_SINT;
- smp->data.u.sint = read_freq_ctr(&px->be_counters.shared->sess_per_sec);
+ smp->data.u.sint = COUNTERS_SHARED_TOTAL(px->be_counters.shared->tg, sess_per_sec, read_freq_ctr);
return 1;
}
{
smp->flags = SMP_F_VOL_TEST;
smp->data.type = SMP_T_SINT;
- smp->data.u.sint = read_freq_ctr(&args->data.srv->counters.shared->sess_per_sec);
+ smp->data.u.sint = COUNTERS_SHARED_TOTAL(args->data.srv->counters.shared->tg, sess_per_sec, read_freq_ctr);
return 1;
}
return ACT_RET_CONT;
if (px == strm_fe(s))
- _HA_ATOMIC_INC(&px->fe_counters.shared->p.http.cache_lookups);
+ _HA_ATOMIC_INC(&px->fe_counters.shared->tg[tgid - 1]->p.http.cache_lookups);
else
- _HA_ATOMIC_INC(&px->be_counters.shared->p.http.cache_lookups);
+ _HA_ATOMIC_INC(&px->be_counters.shared->tg[tgid - 1]->p.http.cache_lookups);
cache_tree = get_cache_tree_from_hash(cache, read_u32(s->txn->cache_hash));
should_send_notmodified_response(cache, htxbuf(&s->req.buf), res);
if (px == strm_fe(s))
- _HA_ATOMIC_INC(&px->fe_counters.shared->p.http.cache_hits);
+ _HA_ATOMIC_INC(&px->fe_counters.shared->tg[tgid - 1]->p.http.cache_hits);
else
- _HA_ATOMIC_INC(&px->be_counters.shared->p.http.cache_hits);
+ _HA_ATOMIC_INC(&px->be_counters.shared->tg[tgid - 1]->p.http.cache_hits);
return ACT_RET_CONT;
} else {
s->target = NULL;
#include <haproxy/cfgparse.h>
#include <haproxy/check.h>
#include <haproxy/chunk.h>
+#include <haproxy/counters.h>
#include <haproxy/dgram.h>
#include <haproxy/dynbuf.h>
#include <haproxy/extcheck.h>
if ((!(check->state & CHK_ST_AGENT) ||
(check->status >= HCHK_STATUS_L57DATA)) &&
(check->health > 0)) {
- _HA_ATOMIC_INC(&s->counters.shared->failed_checks);
+ _HA_ATOMIC_INC(&s->counters.shared->tg[tgid - 1]->failed_checks);
report = 1;
check->health--;
if (check->health < check->rise)
HA_SPIN_UNLOCK(SERVER_LOCK, &s->lock);
HA_ATOMIC_STORE(&s->consecutive_errors, 0);
- _HA_ATOMIC_INC(&s->counters.shared->failed_hana);
+ _HA_ATOMIC_INC(&s->counters.shared->tg[tgid - 1]->failed_hana);
if (s->check.fastinter) {
/* timer might need to be advanced, it might also already be
"UP %d/%d", "UP",
"NOLB %d/%d", "NOLB",
"no check" };
- unsigned long last_change = HA_ATOMIC_LOAD(&s->counters.shared->last_change);
+ unsigned long last_change = COUNTERS_SHARED_LAST(s->counters.shared->tg, last_change);
if (!(s->check.state & CHK_ST_ENABLED))
sv_state = 6;
#include <haproxy/atomic.h>
#include <haproxy/clock.h>
#include <haproxy/counters.h>
+#include <haproxy/global.h>
#include <haproxy/time.h>
+static void _counters_shared_drop(void *counters)
+{
+ struct counters_shared *shared = counters;
+ int it = 0;
+
+ if (!shared)
+ return;
+
+ /* memory was allocated using calloc(), simply free it */
+ while (it < global.nbtgroups) {
+ free(shared->tg[it]);
+ it += 1;
+ }
+ free(counters);
+}
+
+/* release a shared fe counters struct */
+void counters_fe_shared_drop(struct fe_counters_shared *counters)
+{
+ _counters_shared_drop(counters);
+}
+
+/* release a shared be counters struct */
+void counters_be_shared_drop(struct be_counters_shared *counters)
+{
+ _counters_shared_drop(counters);
+}
+
/* retrieved shared counters pointer for a given <guid> object
- * <size> hint is expected to reflect the actual type size (fe/be)
+ * <size> hint is expected to reflect the actual tg member size (fe/be)
* if <guid> is not set, then sharing is disabled
* Returns the pointer on success or NULL on failure
*/
{
struct counters_shared *shared;
uint last_change;
+ int it = 0;
/* no shared memory for now, simply allocate a memory block
* for the counters (zero-initialized), ignore guid
*/
- shared = calloc(1, size);
+ shared = calloc(1, sizeof(*shared));
if (!shared)
return NULL;
if (!guid->node.key)
shared->flags |= COUNTERS_SHARED_F_LOCAL;
+ while (it < global.nbtgroups) {
+ shared->tg[it] = calloc(1, size);
+ if (!shared->tg[it]) {
+ _counters_shared_drop(shared);
+ return NULL;
+ }
+ it += 1;
+ }
last_change = ns_to_sec(now_ns);
- HA_ATOMIC_STORE(&shared->last_change, last_change);
+
+ /* only set one group, only latest value is considered */
+ HA_ATOMIC_STORE(&shared->tg[0]->last_change, last_change);
return shared;
}
/* retrieve shared fe counters pointer for a given <guid> object */
struct fe_counters_shared *counters_fe_shared_get(const struct guid_node *guid)
{
- return _counters_shared_get(guid, sizeof(struct fe_counters_shared));
+ return _counters_shared_get(guid, sizeof(struct fe_counters_shared_tg));
}
/* retrieve shared be counters pointer for a given <guid> object */
struct be_counters_shared *counters_be_shared_get(const struct guid_node *guid)
{
- return _counters_shared_get(guid, sizeof(struct be_counters_shared));
-}
-
-static void _counters_shared_drop(void *counters)
-{
- /* memory was allocated using calloc(), simply free it */
- free(counters);
-}
-
-/* release a shared fe counters struct */
-void counters_fe_shared_drop(struct fe_counters_shared *counters)
-{
- _counters_shared_drop(counters);
-}
-
-/* release a shared be counters struct */
-void counters_be_shared_drop(struct be_counters_shared *counters)
-{
- _counters_shared_drop(counters);
+ return _counters_shared_get(guid, sizeof(struct be_counters_shared_tg));
}
goto end;
rewrite_err:
- _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->failed_rewrites);
- _HA_ATOMIC_INC(&s->be->be_counters.shared->failed_rewrites);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->failed_rewrites);
+ _HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_rewrites);
if (sess->listener && sess->listener->counters)
- _HA_ATOMIC_INC(&sess->listener->counters->shared->failed_rewrites);
+ _HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->failed_rewrites);
if (objt_server(s->target))
- _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->failed_rewrites);
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->failed_rewrites);
hdr_rule_err:
node = ebpt_first(&hdr_rules);
while (node) {
if (st->comp_ctx[dir] && st->comp_ctx[dir]->cur_lvl > 0) {
update_freq_ctr(&global.comp_bps_in, consumed);
- _HA_ATOMIC_ADD(&strm_fe(s)->fe_counters.shared->comp_in[dir], consumed);
- _HA_ATOMIC_ADD(&s->be->be_counters.shared->comp_in[dir], consumed);
+ _HA_ATOMIC_ADD(&strm_fe(s)->fe_counters.shared->tg[tgid - 1]->comp_in[dir], consumed);
+ _HA_ATOMIC_ADD(&s->be->be_counters.shared->tg[tgid - 1]->comp_in[dir], consumed);
update_freq_ctr(&global.comp_bps_out, to_forward);
- _HA_ATOMIC_ADD(&strm_fe(s)->fe_counters.shared->comp_out[dir], to_forward);
- _HA_ATOMIC_ADD(&s->be->be_counters.shared->comp_out[dir], to_forward);
+ _HA_ATOMIC_ADD(&strm_fe(s)->fe_counters.shared->tg[tgid - 1]->comp_out[dir], to_forward);
+ _HA_ATOMIC_ADD(&s->be->be_counters.shared->tg[tgid - 1]->comp_out[dir], to_forward);
} else {
- _HA_ATOMIC_ADD(&strm_fe(s)->fe_counters.shared->comp_byp[dir], consumed);
- _HA_ATOMIC_ADD(&s->be->be_counters.shared->comp_byp[dir], consumed);
+ _HA_ATOMIC_ADD(&strm_fe(s)->fe_counters.shared->tg[tgid - 1]->comp_byp[dir], consumed);
+ _HA_ATOMIC_ADD(&s->be->be_counters.shared->tg[tgid - 1]->comp_byp[dir], consumed);
}
return to_forward;
goto end;
if (strm_fe(s)->mode == PR_MODE_HTTP)
- _HA_ATOMIC_INC(&strm_fe(s)->fe_counters.shared->p.http.comp_rsp);
+ _HA_ATOMIC_INC(&strm_fe(s)->fe_counters.shared->tg[tgid - 1]->p.http.comp_rsp);
if ((s->flags & SF_BE_ASSIGNED) && (s->be->mode == PR_MODE_HTTP))
- _HA_ATOMIC_INC(&s->be->be_counters.shared->p.http.comp_rsp);
+ _HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->p.http.comp_rsp);
end:
return 1;
}
#include <haproxy/arg.h>
#include <haproxy/chunk.h>
#include <haproxy/connection.h>
+#include <haproxy/counters.h>
#include <haproxy/fd.h>
#include <haproxy/frontend.h>
#include <haproxy/global.h>
smp->flags = SMP_F_VOL_TEST;
smp->data.type = SMP_T_SINT;
- smp->data.u.sint = read_freq_ctr(&px->fe_counters.shared->req_per_sec);
+ smp->data.u.sint = COUNTERS_SHARED_TOTAL(px->fe_counters.shared->tg, req_per_sec, read_freq_ctr);
return 1;
}
smp->flags = SMP_F_VOL_TEST;
smp->data.type = SMP_T_SINT;
- smp->data.u.sint = read_freq_ctr(&px->fe_counters.shared->sess_per_sec);
+ smp->data.u.sint = COUNTERS_SHARED_TOTAL(px->fe_counters.shared->tg, sess_per_sec, read_freq_ctr);
return 1;
}
#include <haproxy/cli.h>
#include <haproxy/clock.h>
#include <haproxy/connection.h>
+#include <haproxy/counters.h>
#ifdef USE_CPU_AFFINITY
#include <haproxy/cpuset.h>
#include <haproxy/cpu_topo.h>
send_log(p, LOG_NOTICE, "SIGHUP received, dumping servers states for proxy %s.\n", p->id);
while (s) {
chunk_printf(&trash,
- "SIGHUP: Server %s/%s is %s. Conn: %d act, %d pend, %lld tot.",
+ "SIGHUP: Server %s/%s is %s. Conn: %d act, %d pend, %llu tot.",
p->id, s->id,
(s->cur_state != SRV_ST_STOPPED) ? "UP" : "DOWN",
- s->cur_sess, s->queueslength, HA_ATOMIC_LOAD(&s->counters.shared->cum_sess));
+ s->cur_sess, s->queueslength, (ullong)COUNTERS_SHARED_TOTAL(s->counters.shared->tg, cum_sess, HA_ATOMIC_LOAD));
ha_warning("%s\n", trash.area);
send_log(p, LOG_NOTICE, "%s\n", trash.area);
s = s->next;
/* FIXME: those info are a bit outdated. We should be able to distinguish between FE and BE. */
if (!p->srv) {
chunk_printf(&trash,
- "SIGHUP: Proxy %s has no servers. Conn: act(FE+BE): %d+%d, %d pend (%d unass), tot(FE+BE): %lld+%lld.",
+ "SIGHUP: Proxy %s has no servers. Conn: act(FE+BE): %d+%d, %d pend (%d unass), tot(FE+BE): %llu+%llu.",
p->id,
- p->feconn, p->beconn, p->totpend, p->queueslength, HA_ATOMIC_LOAD(&p->fe_counters.shared->cum_conn), HA_ATOMIC_LOAD(&p->be_counters.shared->cum_sess));
+ p->feconn, p->beconn, p->totpend, p->queueslength, (ullong)COUNTERS_SHARED_TOTAL(p->fe_counters.shared->tg, cum_conn, HA_ATOMIC_LOAD), (ullong)COUNTERS_SHARED_TOTAL(p->be_counters.shared->tg, cum_sess, HA_ATOMIC_LOAD));
} else if (p->srv_act == 0) {
chunk_printf(&trash,
- "SIGHUP: Proxy %s %s ! Conn: act(FE+BE): %d+%d, %d pend (%d unass), tot(FE+BE): %lld+%lld.",
+ "SIGHUP: Proxy %s %s ! Conn: act(FE+BE): %d+%d, %d pend (%d unass), tot(FE+BE): %llu+%llu.",
p->id,
(p->srv_bck) ? "is running on backup servers" : "has no server available",
- p->feconn, p->beconn, p->totpend, p->queueslength, HA_ATOMIC_LOAD(&p->fe_counters.shared->cum_conn), HA_ATOMIC_LOAD(&p->be_counters.shared->cum_sess));
+ p->feconn, p->beconn, p->totpend, p->queueslength, (ullong)COUNTERS_SHARED_TOTAL(p->fe_counters.shared->tg, cum_conn, HA_ATOMIC_LOAD), (ullong)COUNTERS_SHARED_TOTAL(p->be_counters.shared->tg, cum_sess, HA_ATOMIC_LOAD));
} else {
chunk_printf(&trash,
"SIGHUP: Proxy %s has %d active servers and %d backup servers available."
- " Conn: act(FE+BE): %d+%d, %d pend (%d unass), tot(FE+BE): %lld+%lld.",
+ " Conn: act(FE+BE): %d+%d, %d pend (%d unass), tot(FE+BE): %llu+%llu.",
p->id, p->srv_act, p->srv_bck,
- p->feconn, p->beconn, p->totpend, p->queueslength, HA_ATOMIC_LOAD(&p->fe_counters.shared->cum_conn), HA_ATOMIC_LOAD(&p->be_counters.shared->cum_sess));
+ p->feconn, p->beconn, p->totpend, p->queueslength, (ullong)COUNTERS_SHARED_TOTAL(p->fe_counters.shared->tg, cum_conn, HA_ATOMIC_LOAD), (ullong)COUNTERS_SHARED_TOTAL(p->be_counters.shared->tg, cum_sess, HA_ATOMIC_LOAD));
}
ha_warning("%s\n", trash.area);
send_log(p, LOG_NOTICE, "%s\n", trash.area);
/* let's log the request time */
s->logs.request_ts = now_ns;
if (s->sess->fe == s->be) /* report it if the request was intercepted by the frontend */
- _HA_ATOMIC_INC(&s->sess->fe->fe_counters.shared->intercepted_req);
+ _HA_ATOMIC_INC(&s->sess->fe->fe_counters.shared->tg[tgid - 1]->intercepted_req);
}
done:
goto leave;
fail_rewrite:
- _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->failed_rewrites);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->failed_rewrites);
if (s->flags & SF_BE_ASSIGNED)
- _HA_ATOMIC_INC(&s->be->be_counters.shared->failed_rewrites);
+ _HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_rewrites);
if (sess->listener && sess->listener->counters)
- _HA_ATOMIC_INC(&sess->listener->counters->shared->failed_rewrites);
+ _HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->failed_rewrites);
if (objt_server(s->target))
- _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->failed_rewrites);
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->failed_rewrites);
if (!(s->txn->req.flags & HTTP_MSGF_SOFT_RW)) {
ret = ACT_RET_ERR;
goto leave;
fail_rewrite:
- _HA_ATOMIC_ADD(&sess->fe->fe_counters.shared->failed_rewrites, 1);
+ _HA_ATOMIC_ADD(&sess->fe->fe_counters.shared->tg[tgid - 1]->failed_rewrites, 1);
if (s->flags & SF_BE_ASSIGNED)
- _HA_ATOMIC_ADD(&s->be->be_counters.shared->failed_rewrites, 1);
+ _HA_ATOMIC_ADD(&s->be->be_counters.shared->tg[tgid - 1]->failed_rewrites, 1);
if (sess->listener && sess->listener->counters)
- _HA_ATOMIC_ADD(&sess->listener->counters->shared->failed_rewrites, 1);
+ _HA_ATOMIC_ADD(&sess->listener->counters->shared->tg[tgid - 1]->failed_rewrites, 1);
if (objt_server(s->target))
- _HA_ATOMIC_ADD(&__objt_server(s->target)->counters.shared->failed_rewrites, 1);
+ _HA_ATOMIC_ADD(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->failed_rewrites, 1);
if (!(s->txn->req.flags & HTTP_MSGF_SOFT_RW)) {
ret = ACT_RET_ERR;
goto leave;
fail_rewrite:
- _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->failed_rewrites);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->failed_rewrites);
if (s->flags & SF_BE_ASSIGNED)
- _HA_ATOMIC_INC(&s->be->be_counters.shared->failed_rewrites);
+ _HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_rewrites);
if (sess->listener && sess->listener->counters)
- _HA_ATOMIC_INC(&sess->listener->counters->shared->failed_rewrites);
+ _HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->failed_rewrites);
if (objt_server(s->target))
- _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->failed_rewrites);
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->failed_rewrites);
if (!(s->txn->req.flags & HTTP_MSGF_SOFT_RW)) {
ret = ACT_RET_ERR;
struct session *sess, struct stream *s, int flags)
{
if (http_res_set_status(rule->arg.http.i, rule->arg.http.str, s) == -1) {
- _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->failed_rewrites);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->failed_rewrites);
if (s->flags & SF_BE_ASSIGNED)
- _HA_ATOMIC_INC(&s->be->be_counters.shared->failed_rewrites);
+ _HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_rewrites);
if (sess->listener && sess->listener->counters)
- _HA_ATOMIC_INC(&sess->listener->counters->shared->failed_rewrites);
+ _HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->failed_rewrites);
if (objt_server(s->target))
- _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->failed_rewrites);
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->failed_rewrites);
if (!(s->txn->req.flags & HTTP_MSGF_SOFT_RW)) {
if (!(s->flags & SF_ERR_MASK))
s->req.analysers &= AN_REQ_FLT_END;
s->res.analysers &= AN_RES_FLT_END;
- _HA_ATOMIC_INC(&s->be->be_counters.shared->denied_req);
- _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->denied_req);
+ _HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->denied_req);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->denied_req);
if (sess->listener && sess->listener->counters)
- _HA_ATOMIC_INC(&sess->listener->counters->shared->denied_req);
+ _HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->denied_req);
if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_PRXCOND;
req->analysers &= AN_REQ_FLT_END;
if (s->sess->fe == s->be) /* report it if the request was intercepted by the frontend */
- _HA_ATOMIC_INC(&s->sess->fe->fe_counters.shared->intercepted_req);
+ _HA_ATOMIC_INC(&s->sess->fe->fe_counters.shared->tg[tgid - 1]->intercepted_req);
if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_LOCAL;
goto leave;
fail_rewrite:
- _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->failed_rewrites);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->failed_rewrites);
if (s->flags & SF_BE_ASSIGNED)
- _HA_ATOMIC_INC(&s->be->be_counters.shared->failed_rewrites);
+ _HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_rewrites);
if (sess->listener && sess->listener->counters)
- _HA_ATOMIC_INC(&sess->listener->counters->shared->failed_rewrites);
+ _HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->failed_rewrites);
if (objt_server(s->target))
- _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->failed_rewrites);
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->failed_rewrites);
if (!(msg->flags & HTTP_MSGF_SOFT_RW)) {
ret = ACT_RET_ERR;
goto leave;
fail_rewrite:
- _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->failed_rewrites);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->failed_rewrites);
if (s->flags & SF_BE_ASSIGNED)
- _HA_ATOMIC_INC(&s->be->be_counters.shared->failed_rewrites);
+ _HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_rewrites);
if (sess->listener && sess->listener->counters)
- _HA_ATOMIC_INC(&sess->listener->counters->shared->failed_rewrites);
+ _HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->failed_rewrites);
if (objt_server(s->target))
- _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->failed_rewrites);
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->failed_rewrites);
if (!(msg->flags & HTTP_MSGF_SOFT_RW)) {
ret = ACT_RET_ERR;
req->analysers &= AN_REQ_FLT_END;
if (s->sess->fe == s->be) /* report it if the request was intercepted by the frontend */
- _HA_ATOMIC_INC(&s->sess->fe->fe_counters.shared->intercepted_req);
+ _HA_ATOMIC_INC(&s->sess->fe->fe_counters.shared->tg[tgid - 1]->intercepted_req);
}
return ACT_RET_ABRT;
struct acl_cond *cond;
s->flags |= SF_MONITOR;
- _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->intercepted_req);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->intercepted_req);
/* Check if we want to fail this monitor request or not */
list_for_each_entry(cond, &sess->fe->mon_fail_cond, list) {
txn->status = 500;
if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_INTERNAL;
- _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->internal_errors);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->internal_errors);
if (sess->listener && sess->listener->counters)
- _HA_ATOMIC_INC(&sess->listener->counters->shared->internal_errors);
+ _HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->internal_errors);
stream_report_term_evt(s->scb, strm_tevt_type_internal_err);
goto return_prx_cond;
return_bad_req:
txn->status = 400;
- _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->failed_req);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->failed_req);
if (sess->listener && sess->listener->counters)
- _HA_ATOMIC_INC(&sess->listener->counters->shared->failed_req);
+ _HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->failed_req);
stream_report_term_evt(s->scb, strm_tevt_type_proto_err);
/* fall through */
/* Proceed with the applets now. */
if (unlikely(objt_applet(s->target))) {
if (sess->fe == s->be) /* report it if the request was intercepted by the frontend */
- _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->intercepted_req);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->intercepted_req);
if (http_handle_expect_hdr(s, htx, msg) == -1)
goto return_int_err;
if (!req->analyse_exp)
req->analyse_exp = tick_add(now_ms, 0);
stream_inc_http_err_ctr(s);
- _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->denied_req);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->denied_req);
if (s->flags & SF_BE_ASSIGNED)
- _HA_ATOMIC_INC(&s->be->be_counters.shared->denied_req);
+ _HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->denied_req);
if (sess->listener && sess->listener->counters)
- _HA_ATOMIC_INC(&sess->listener->counters->shared->denied_req);
+ _HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->denied_req);
stream_report_term_evt(s->scf, strm_tevt_type_intercepted);
goto done_without_exp;
s->logs.request_ts = now_ns;
stream_inc_http_err_ctr(s);
- _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->denied_req);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->denied_req);
if (s->flags & SF_BE_ASSIGNED)
- _HA_ATOMIC_INC(&s->be->be_counters.shared->denied_req);
+ _HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->denied_req);
if (sess->listener && sess->listener->counters)
- _HA_ATOMIC_INC(&sess->listener->counters->shared->denied_req);
+ _HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->denied_req);
stream_report_term_evt(s->scf, strm_tevt_type_intercepted);
goto return_prx_err;
return_fail_rewrite:
if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_PRXCOND;
- _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->failed_rewrites);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->failed_rewrites);
if (s->flags & SF_BE_ASSIGNED)
- _HA_ATOMIC_INC(&s->be->be_counters.shared->failed_rewrites);
+ _HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_rewrites);
if (sess->listener && sess->listener->counters)
- _HA_ATOMIC_INC(&sess->listener->counters->shared->failed_rewrites);
+ _HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->failed_rewrites);
if (objt_server(s->target))
- _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->failed_rewrites);
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->failed_rewrites);
/* fall through */
return_int_err:
txn->status = 500;
if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_INTERNAL;
- _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->internal_errors);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->internal_errors);
if (s->flags & SF_BE_ASSIGNED)
- _HA_ATOMIC_INC(&s->be->be_counters.shared->internal_errors);
+ _HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->internal_errors);
if (sess->listener && sess->listener->counters)
- _HA_ATOMIC_INC(&sess->listener->counters->shared->internal_errors);
+ _HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->internal_errors);
stream_report_term_evt(s->scf, strm_tevt_type_internal_err);
goto return_prx_err;
return_bad_req:
txn->status = 400;
- _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->failed_req);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->failed_req);
if (sess->listener && sess->listener->counters)
- _HA_ATOMIC_INC(&sess->listener->counters->shared->failed_req);
+ _HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->failed_req);
stream_report_term_evt(s->scf, strm_tevt_type_proto_err);
/* fall through */
return_fail_rewrite:
if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_PRXCOND;
- _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->failed_rewrites);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->failed_rewrites);
if (s->flags & SF_BE_ASSIGNED)
- _HA_ATOMIC_INC(&s->be->be_counters.shared->failed_rewrites);
+ _HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_rewrites);
if (sess->listener && sess->listener->counters)
- _HA_ATOMIC_INC(&sess->listener->counters->shared->failed_rewrites);
+ _HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->failed_rewrites);
if (objt_server(s->target))
- _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->failed_rewrites);
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->failed_rewrites);
/* fall through */
return_int_err:
txn->status = 500;
if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_INTERNAL;
- _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->internal_errors);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->internal_errors);
if (s->flags & SF_BE_ASSIGNED)
- _HA_ATOMIC_INC(&s->be->be_counters.shared->internal_errors);
+ _HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->internal_errors);
if (sess->listener && sess->listener->counters)
- _HA_ATOMIC_INC(&sess->listener->counters->shared->internal_errors);
+ _HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->internal_errors);
stream_report_term_evt(s->scf, strm_tevt_type_internal_err);
http_set_term_flags(s);
txn->status = 500;
if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_INTERNAL;
- _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->internal_errors);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->internal_errors);
if (s->flags & SF_BE_ASSIGNED)
- _HA_ATOMIC_INC(&s->be->be_counters.shared->internal_errors);
+ _HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->internal_errors);
if (sess->listener && sess->listener->counters)
- _HA_ATOMIC_INC(&sess->listener->counters->shared->internal_errors);
+ _HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->internal_errors);
stream_report_term_evt(s->scf, strm_tevt_type_internal_err);
goto return_prx_err;
return_bad_req: /* let's centralize all bad requests */
txn->status = 400;
- _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->failed_req);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->failed_req);
if (sess->listener && sess->listener->counters)
- _HA_ATOMIC_INC(&sess->listener->counters->shared->failed_req);
+ _HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->failed_req);
stream_report_term_evt(s->scf, strm_tevt_type_proto_err);
/* fall through */
return 0;
return_cli_abort:
- _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->cli_aborts);
- _HA_ATOMIC_INC(&s->be->be_counters.shared->cli_aborts);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->cli_aborts);
+ _HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->cli_aborts);
if (sess->listener && sess->listener->counters)
- _HA_ATOMIC_INC(&sess->listener->counters->shared->cli_aborts);
+ _HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->cli_aborts);
if (objt_server(s->target))
- _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->cli_aborts);
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->cli_aborts);
if (!(s->flags & SF_ERR_MASK))
s->flags |= ((req->flags & CF_READ_TIMEOUT) ? SF_ERR_CLITO : SF_ERR_CLICL);
status = 400;
goto return_prx_cond;
return_srv_abort:
- _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->srv_aborts);
- _HA_ATOMIC_INC(&s->be->be_counters.shared->srv_aborts);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->srv_aborts);
+ _HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->srv_aborts);
if (sess->listener && sess->listener->counters)
- _HA_ATOMIC_INC(&sess->listener->counters->shared->srv_aborts);
+ _HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->srv_aborts);
if (objt_server(s->target))
- _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->srv_aborts);
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->srv_aborts);
if (!(s->flags & SF_ERR_MASK))
s->flags |= ((req->flags & CF_WRITE_TIMEOUT) ? SF_ERR_SRVTO : SF_ERR_SRVCL);
status = 502;
return_int_err:
if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_INTERNAL;
- _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->internal_errors);
- _HA_ATOMIC_INC(&s->be->be_counters.shared->internal_errors);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->internal_errors);
+ _HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->internal_errors);
if (sess->listener && sess->listener->counters)
- _HA_ATOMIC_INC(&sess->listener->counters->shared->internal_errors);
+ _HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->internal_errors);
if (objt_server(s->target))
- _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->internal_errors);
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->internal_errors);
stream_report_term_evt(s->scf, strm_tevt_type_internal_err);
status = 500;
goto return_prx_cond;
return_bad_req:
- _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->failed_req);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->failed_req);
if (sess->listener && sess->listener->counters)
- _HA_ATOMIC_INC(&sess->listener->counters->shared->failed_req);
+ _HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->failed_req);
stream_report_term_evt(s->scf, strm_tevt_type_proto_err);
status = 400;
/* fall through */
s->flags &= ~SF_CURR_SESS;
_HA_ATOMIC_DEC(&__objt_server(s->target)->cur_sess);
}
- _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->retries);
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->retries);
}
- _HA_ATOMIC_INC(&s->be->be_counters.shared->retries);
+ _HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->retries);
req = &s->req;
res = &s->res;
if (s->flags & SF_SRV_REUSED)
goto abort_keep_alive;
- _HA_ATOMIC_INC(&s->be->be_counters.shared->failed_resp);
+ _HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_resp);
if (objt_server(s->target))
- _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->failed_resp);
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->failed_resp);
/* if the server refused the early data, just send a 425 */
if (conn && conn->err_code == CO_ER_SSL_EARLY_FAILED)
return 0;
}
}
- _HA_ATOMIC_INC(&s->be->be_counters.shared->failed_resp);
+ _HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_resp);
if (objt_server(s->target))
- _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->failed_resp);
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->failed_resp);
txn->status = 504;
stream_inc_http_fail_ctr(s);
/* 3: client abort with an abortonclose */
else if ((s->scb->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) && (s->scb->flags & SC_FL_SHUT_DONE) &&
(s->scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE))) {
- _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->cli_aborts);
- _HA_ATOMIC_INC(&s->be->be_counters.shared->cli_aborts);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->cli_aborts);
+ _HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->cli_aborts);
if (sess->listener && sess->listener->counters)
- _HA_ATOMIC_INC(&sess->listener->counters->shared->cli_aborts);
+ _HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->cli_aborts);
if (objt_server(s->target))
- _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->cli_aborts);
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->cli_aborts);
txn->status = 400;
if (s->flags & SF_SRV_REUSED)
goto abort_keep_alive;
- _HA_ATOMIC_INC(&s->be->be_counters.shared->failed_resp);
+ _HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_resp);
if (objt_server(s->target))
- _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->failed_resp);
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->failed_resp);
txn->status = 502;
stream_inc_http_fail_ctr(s);
if (s->flags & SF_SRV_REUSED)
goto abort_keep_alive;
- _HA_ATOMIC_INC(&s->be->be_counters.shared->failed_resp);
+ _HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_resp);
if (objt_server(s->target))
- _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->failed_resp);
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->failed_resp);
rep->analysers &= AN_RES_FLT_END;
if (!(s->flags & SF_ERR_MASK))
if (n < 1 || n > 5)
n = 0;
- _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->p.http.rsp[n]);
- _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->p.http.cum_req);
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->p.http.rsp[n]);
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->p.http.cum_req);
}
/*
return 1;
return_int_err:
- _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->internal_errors);
- _HA_ATOMIC_INC(&s->be->be_counters.shared->internal_errors);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->internal_errors);
+ _HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->internal_errors);
if (sess->listener && sess->listener->counters)
- _HA_ATOMIC_INC(&sess->listener->counters->shared->internal_errors);
+ _HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->internal_errors);
if (objt_server(s->target))
- _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->internal_errors);
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->internal_errors);
txn->status = 500;
if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_INTERNAL;
return 0;
}
- _HA_ATOMIC_INC(&s->be->be_counters.shared->failed_resp);
+ _HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_resp);
if (objt_server(s->target))
- _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->failed_resp);
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->failed_resp);
txn->status = 502;
stream_inc_http_fail_ctr(s);
return 1;
deny:
- _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->denied_resp);
- _HA_ATOMIC_INC(&s->be->be_counters.shared->denied_resp);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->denied_resp);
+ _HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->denied_resp);
if (sess->listener && sess->listener->counters)
- _HA_ATOMIC_INC(&sess->listener->counters->shared->denied_resp);
+ _HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->denied_resp);
if (objt_server(s->target))
- _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->denied_resp);
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->denied_resp);
stream_report_term_evt(s->scb, strm_tevt_type_intercepted);
goto return_prx_err;
return_fail_rewrite:
if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_PRXCOND;
- _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->failed_rewrites);
- _HA_ATOMIC_INC(&s->be->be_counters.shared->failed_rewrites);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->failed_rewrites);
+ _HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_rewrites);
if (sess->listener && sess->listener->counters)
- _HA_ATOMIC_INC(&sess->listener->counters->shared->failed_rewrites);
+ _HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->failed_rewrites);
if (objt_server(s->target))
- _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->failed_rewrites);
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->failed_rewrites);
/* fall through */
return_int_err:
txn->status = 500;
if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_INTERNAL;
- _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->internal_errors);
- _HA_ATOMIC_INC(&s->be->be_counters.shared->internal_errors);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->internal_errors);
+ _HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->internal_errors);
if (sess->listener && sess->listener->counters)
- _HA_ATOMIC_INC(&sess->listener->counters->shared->internal_errors);
+ _HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->internal_errors);
if (objt_server(s->target))
- _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->internal_errors);
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->internal_errors);
stream_report_term_evt(s->scb, strm_tevt_type_internal_err);
goto return_prx_err;
s->logs.t_data = -1; /* was not a valid response */
txn->status = 502;
stream_inc_http_fail_ctr(s);
- _HA_ATOMIC_INC(&s->be->be_counters.shared->failed_resp);
+ _HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_resp);
if (objt_server(s->target)) {
- _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->failed_resp);
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->failed_resp);
health_adjust(__objt_server(s->target), HANA_STATUS_HTTP_RSP);
}
stream_report_term_evt(s->scb, strm_tevt_type_proto_err);
return 0;
return_srv_abort:
- _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->srv_aborts);
- _HA_ATOMIC_INC(&s->be->be_counters.shared->srv_aborts);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->srv_aborts);
+ _HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->srv_aborts);
if (sess->listener && sess->listener->counters)
- _HA_ATOMIC_INC(&sess->listener->counters->shared->srv_aborts);
+ _HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->srv_aborts);
if (objt_server(s->target))
- _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->srv_aborts);
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->srv_aborts);
stream_inc_http_fail_ctr(s);
if (!(s->flags & SF_ERR_MASK))
s->flags |= ((res->flags & CF_READ_TIMEOUT) ? SF_ERR_SRVTO : SF_ERR_SRVCL);
goto return_error;
return_cli_abort:
- _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->cli_aborts);
- _HA_ATOMIC_INC(&s->be->be_counters.shared->cli_aborts);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->cli_aborts);
+ _HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->cli_aborts);
if (sess->listener && sess->listener->counters)
- _HA_ATOMIC_INC(&sess->listener->counters->shared->cli_aborts);
+ _HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->cli_aborts);
if (objt_server(s->target))
- _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->cli_aborts);
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->cli_aborts);
if (!(s->flags & SF_ERR_MASK))
s->flags |= ((res->flags & CF_WRITE_TIMEOUT) ? SF_ERR_CLITO : SF_ERR_CLICL);
goto return_error;
return_int_err:
- _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->internal_errors);
- _HA_ATOMIC_INC(&s->be->be_counters.shared->internal_errors);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->internal_errors);
+ _HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->internal_errors);
if (sess->listener && sess->listener->counters)
- _HA_ATOMIC_INC(&sess->listener->counters->shared->internal_errors);
+ _HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->internal_errors);
if (objt_server(s->target))
- _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->internal_errors);
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->internal_errors);
if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_INTERNAL;
stream_report_term_evt(s->scb, strm_tevt_type_internal_err);
goto return_error;
return_bad_res:
- _HA_ATOMIC_INC(&s->be->be_counters.shared->failed_resp);
+ _HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_resp);
if (objt_server(s->target)) {
- _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->failed_resp);
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->failed_resp);
health_adjust(__objt_server(s->target), HANA_STATUS_HTTP_RSP);
}
stream_inc_http_fail_ctr(s);
req->analysers &= AN_REQ_FLT_END;
if (s->sess->fe == s->be) /* report it if the request was intercepted by the frontend */
- _HA_ATOMIC_INC(&s->sess->fe->fe_counters.shared->intercepted_req);
+ _HA_ATOMIC_INC(&s->sess->fe->fe_counters.shared->tg[tgid - 1]->intercepted_req);
}
out:
txn->status = 408;
if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_CLITO;
- _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->failed_req);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->failed_req);
if (sess->listener && sess->listener->counters)
- _HA_ATOMIC_INC(&sess->listener->counters->shared->failed_req);
+ _HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->failed_req);
goto abort;
abort_res:
#include <haproxy/cfgparse.h>
#include <haproxy/cli-t.h>
#include <haproxy/connection.h>
+#include <haproxy/counters.h>
#include <haproxy/errors.h>
#include <haproxy/fd.h>
#include <haproxy/freq_ctr.h>
}
#endif
if (p && p->fe_sps_lim) {
- int max = freq_ctr_remain(&p->fe_counters.shared->sess_per_sec, p->fe_sps_lim, 0);
+ int max = 0;
+
+ for (int it = 0; it < global.nbtgroups; it++)
+ max += freq_ctr_remain(&p->fe_counters.shared->tg[it]->sess_per_sec, p->fe_sps_lim, 0);
if (unlikely(!max)) {
+ unsigned int min_wait = 0;
+
+ for (int it = 0; it < global.nbtgroups; it++) {
+ unsigned int cur_wait = next_event_delay(&p->fe_counters.shared->tg[it]->sess_per_sec, p->fe_sps_lim, 0);
+ if (!it || cur_wait < min_wait)
+ min_wait = cur_wait;
+ }
+
/* frontend accept rate limit was reached */
- expire = tick_add(now_ms, next_event_delay(&p->fe_counters.shared->sess_per_sec, p->fe_sps_lim, 0));
+ expire = tick_add(now_ms, min_wait);
goto limit_proxy;
}
dequeue_all_listeners();
if (p && !MT_LIST_ISEMPTY(&p->listener_queue) &&
- (!p->fe_sps_lim || freq_ctr_remain(&p->fe_counters.shared->sess_per_sec, p->fe_sps_lim, 0) > 0))
+ (!p->fe_sps_lim || COUNTERS_SHARED_TOTAL_ARG2(p->fe_counters.shared->tg, sess_per_sec, freq_ctr_remain, p->fe_sps_lim, 0) > 0))
dequeue_proxy_listeners(p, 0);
}
return;
dequeue_all_listeners();
if (fe && !MT_LIST_ISEMPTY(&fe->listener_queue) &&
- (!fe->fe_sps_lim || freq_ctr_remain(&fe->fe_counters.shared->sess_per_sec, fe->fe_sps_lim, 0) > 0))
+ (!fe->fe_sps_lim || COUNTERS_SHARED_TOTAL_ARG2(fe->fe_counters.shared->tg, sess_per_sec, freq_ctr_remain, fe->fe_sps_lim, 0) > 0))
dequeue_proxy_listeners(fe, 0);
else if (fe) {
unsigned int wait;
int expire = TICK_ETERNITY;
if (fe->task && fe->fe_sps_lim &&
- (wait = next_event_delay(&fe->fe_counters.shared->sess_per_sec,fe->fe_sps_lim, 0))) {
+ (wait = COUNTERS_SHARED_TOTAL_ARG2(fe->fe_counters.shared->tg, sess_per_sec, next_event_delay, fe->fe_sps_lim, 0))) {
/* we're blocking because a limit was reached on the number of
* requests/s on the frontend. We want to re-check ASAP, which
* means in 1 ms before estimated expiration date, because the
parse_error:
if (l->counters)
- _HA_ATOMIC_INC(&l->counters->shared->failed_req);
- _HA_ATOMIC_INC(&frontend->fe_counters.shared->failed_req);
+ _HA_ATOMIC_INC(&l->counters->shared->tg[tgid - 1]->failed_req);
+ _HA_ATOMIC_INC(&frontend->fe_counters.shared->tg[tgid - 1]->failed_req);
goto error;
cli_abort:
if (l->counters)
- _HA_ATOMIC_INC(&l->counters->shared->cli_aborts);
- _HA_ATOMIC_INC(&frontend->fe_counters.shared->cli_aborts);
+ _HA_ATOMIC_INC(&l->counters->shared->tg[tgid - 1]->cli_aborts);
+ _HA_ATOMIC_INC(&frontend->fe_counters.shared->tg[tgid - 1]->cli_aborts);
error:
se_fl_set(appctx->sedesc, SE_FL_ERROR);
}
session_inc_http_req_ctr(sess);
proxy_inc_fe_req_ctr(sess->listener, sess->fe, 1);
- _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->p.http.rsp[5]);
- _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->internal_errors);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->p.http.rsp[5]);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->internal_errors);
if (sess->listener && sess->listener->counters)
- _HA_ATOMIC_INC(&sess->listener->counters->shared->internal_errors);
+ _HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->internal_errors);
h1c->errcode = 500;
ret = h1_send_error(h1c);
session_inc_http_req_ctr(sess);
session_inc_http_err_ctr(sess);
proxy_inc_fe_req_ctr(sess->listener, sess->fe, 1);
- _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->p.http.rsp[4]);
- _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->failed_req);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->p.http.rsp[4]);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->failed_req);
if (sess->listener && sess->listener->counters)
- _HA_ATOMIC_INC(&sess->listener->counters->shared->failed_req);
+ _HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->failed_req);
if (!h1c->errcode)
h1c->errcode = 400;
session_inc_http_req_ctr(sess);
proxy_inc_fe_req_ctr(sess->listener, sess->fe, 1);
- _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->p.http.rsp[4]);
- _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->failed_req);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->p.http.rsp[4]);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->failed_req);
if (sess->listener && sess->listener->counters)
- _HA_ATOMIC_INC(&sess->listener->counters->shared->failed_req);
+ _HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->failed_req);
h1c->errcode = 501;
ret = h1_send_error(h1c);
session_inc_http_req_ctr(sess);
proxy_inc_fe_req_ctr(sess->listener, sess->fe, 1);
- _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->p.http.rsp[4]);
- _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->failed_req);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->p.http.rsp[4]);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->failed_req);
if (sess->listener && sess->listener->counters)
- _HA_ATOMIC_INC(&sess->listener->counters->shared->failed_req);
+ _HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->failed_req);
h1c->errcode = 408;
ret = h1_send_error(h1c);
* the data plane but on the control plane.
*/
if (p->cap & PR_CAP_FE)
- cum_conn = HA_ATOMIC_LOAD(&p->fe_counters.shared->cum_conn);
+ cum_conn = COUNTERS_SHARED_TOTAL(p->fe_counters.shared->tg, cum_conn, HA_ATOMIC_LOAD);
if (p->cap & PR_CAP_BE)
- cum_sess = HA_ATOMIC_LOAD(&p->be_counters.shared->cum_sess);
+ cum_sess = COUNTERS_SHARED_TOTAL(p->be_counters.shared->tg, cum_sess, HA_ATOMIC_LOAD);
if ((p->mode == PR_MODE_TCP || p->mode == PR_MODE_HTTP || p->mode == PR_MODE_SYSLOG || p->mode == PR_MODE_SPOP) && !(p->cap & PR_CAP_INT))
ha_warning("Proxy %s stopped (cumulated conns: FE: %lld, BE: %lld).\n",
goto out;
if (p->fe_sps_lim &&
- (wait = next_event_delay(&p->fe_counters.shared->sess_per_sec, p->fe_sps_lim, 0))) {
+ (wait = COUNTERS_SHARED_TOTAL_ARG2(p->fe_counters.shared->tg, sess_per_sec, next_event_delay, p->fe_sps_lim, 0))) {
+
/* we're blocking because a limit was reached on the number of
* requests/s on the frontend. We want to re-check ASAP, which
* means in 1 ms before estimated expiration date, because the
dump_server_addr(&srv->check.addr, srv_check_addr);
dump_server_addr(&srv->agent.addr, srv_agent_addr);
- srv_time_since_last_change = ns_to_sec(now_ns) - HA_ATOMIC_LOAD(&srv->counters.shared->last_change);
+ srv_time_since_last_change = ns_to_sec(now_ns) - COUNTERS_SHARED_LAST(srv->counters.shared->tg, last_change);
bk_f_forced_id = px->options & PR_O_FORCED_ID ? 1 : 0;
srv_f_forced_id = srv->flags & SRV_F_FORCED_ID ? 1 : 0;
#include <import/eb32tree.h>
#include <haproxy/api.h>
#include <haproxy/backend.h>
+#include <haproxy/counters.h>
#include <haproxy/http_rules.h>
#include <haproxy/pool.h>
#include <haproxy/queue.h>
else max = MAX(s->minconn,
s->proxy->beconn * s->maxconn / s->proxy->fullconn);
- last_change = HA_ATOMIC_LOAD(&s->counters.shared->last_change);
+ last_change = COUNTERS_SHARED_LAST(s->counters.shared->tg, last_change);
if ((s->cur_state == SRV_ST_STARTING) &&
ns_to_sec(now_ns) < last_change + s->slowstart &&
int srv_downtime(const struct server *s)
{
- unsigned long last_change = HA_ATOMIC_LOAD(&s->counters.shared->last_change);
+ unsigned long last_change = COUNTERS_SHARED_LAST(s->proxy->be_counters.shared->tg, last_change);
if ((s->cur_state != SRV_ST_STOPPED) || last_change >= ns_to_sec(now_ns)) // ignore negative time
return s->down_time;
*/
void server_recalc_eweight(struct server *sv, int must_update)
{
- unsigned long last_change = HA_ATOMIC_LOAD(&sv->counters.shared->last_change);
+ unsigned long last_change = COUNTERS_SHARED_LAST(sv->proxy->be_counters.shared->tg, last_change);
struct proxy *px = sv->proxy;
unsigned w;
if (srv->next_state == SRV_ST_STARTING) {
task_schedule(srv->warmup,
tick_add(now_ms,
- MS_TO_TICKS(MAX(1000, (ns_to_sec(now_ns) - HA_ATOMIC_LOAD(&srv->counters.shared->last_change))) / 20)));
+ MS_TO_TICKS(MAX(1000, (ns_to_sec(now_ns) - COUNTERS_SHARED_LAST(srv->proxy->be_counters.shared->tg, last_change))) / 20)));
}
}
/* check if server stats must be updated due the the server state change */
if (srv_prev_state != s->cur_state) {
if (srv_prev_state == SRV_ST_STOPPED) {
- unsigned long last_change = HA_ATOMIC_LOAD(&s->counters.shared->last_change);
+ unsigned long last_change = COUNTERS_SHARED_LAST(s->proxy->be_counters.shared->tg, last_change);
/* server was down and no longer is */
if (last_change < ns_to_sec(now_ns)) // ignore negative times
}
else if (s->cur_state == SRV_ST_STOPPED) {
/* server was up and is currently down */
- HA_ATOMIC_INC(&s->counters.shared->down_trans);
+ HA_ATOMIC_INC(&s->counters.shared->tg[tgid - 1]->down_trans);
_srv_event_hdl_publish(EVENT_HDL_SUB_SERVER_DOWN, cb_data.common, s);
}
if (s->cur_state != SRV_ST_RUNNING && s->proxy->ready_srv == s)
HA_ATOMIC_STORE(&s->proxy->ready_srv, NULL);
- HA_ATOMIC_STORE(&s->counters.shared->last_change, ns_to_sec(now_ns));
+ HA_ATOMIC_STORE(&s->counters.shared->tg[tgid - 1]->last_change, ns_to_sec(now_ns));
/* publish the state change */
_srv_event_hdl_prepare_state(&cb_data.state,
if (prev_srv_count && s->proxy->srv_bck == 0 && s->proxy->srv_act == 0)
set_backend_down(s->proxy); /* backend going down */
else if (!prev_srv_count && (s->proxy->srv_bck || s->proxy->srv_act)) {
- unsigned long last_change = HA_ATOMIC_LOAD(&s->proxy->be_counters.shared->last_change);
+ unsigned long last_change = COUNTERS_SHARED_LAST(s->proxy->be_counters.shared->tg, last_change);
/* backend was down and is back up again:
* no helper function, updating last_change and backend downtime stats
*/
if (last_change < ns_to_sec(now_ns)) // ignore negative times
s->proxy->down_time += ns_to_sec(now_ns) - last_change;
- HA_ATOMIC_STORE(&s->proxy->be_counters.shared->last_change, ns_to_sec(now_ns));
+ HA_ATOMIC_STORE(&s->proxy->be_counters.shared->tg[tgid - 1]->last_change, ns_to_sec(now_ns));
}
}
srv_adm_set_drain(srv);
}
- HA_ATOMIC_STORE(&srv->counters.shared->last_change, ns_to_sec(now_ns) - srv_last_time_change);
+ HA_ATOMIC_STORE(&srv->counters.shared->tg[0]->last_change, ns_to_sec(now_ns) - srv_last_time_change);
srv->check.status = srv_check_status;
srv->check.result = srv_check_result;
if (!(px->cap & PR_CAP_FE))
return 0; /* silently ignored fe/be mismatch */
- base_off_shared = (char *)px->fe_counters.shared;
+ base_off_shared = (char *)px->fe_counters.shared->tg[0];
base_off = (char *)&px->fe_counters;
off = 0;
if (!(px->cap & PR_CAP_BE))
return 0; /* silently ignored fe/be mismatch */
- base_off_shared = (char *)px->be_counters.shared;
+ base_off_shared = (char *)px->be_counters.shared->tg[0];
base_off = (char *)&px->be_counters;
off = 1;
if (!li->counters)
return 0;
- base_off_shared = (char *)li->counters->shared;
+ base_off_shared = (char *)li->counters->shared->tg[0];
base_off = (char *)li->counters;
off = 0;
goto err;
srv = __objt_server(node->obj_type);
- base_off_shared = (char *)srv->counters.shared;
+ base_off_shared = (char *)srv->counters.shared->tg[0];
base_off = (char *)&srv->counters;
off = 1;
#include <haproxy/backend.h>
#include <haproxy/check.h>
#include <haproxy/chunk.h>
+#include <haproxy/counters.h>
#include <haproxy/freq_ctr.h>
#include <haproxy/list.h>
#include <haproxy/listener.h>
{ \
ME_NEW_COMMON(name_f, alt_n, nature, format, offset_f, cap_f, desc_f) \
.flags = STAT_COL_FL_GENERIC | STAT_COL_FL_SHARED, \
- .metric.offset[0] = offsetof(struct fe_counters_shared, offset_f), \
- .metric.offset[1] = offsetof(struct be_counters_shared, offset_f), \
+ .metric.offset[0] = offsetof(struct fe_counters_shared_tg, offset_f), \
+ .metric.offset[1] = offsetof(struct be_counters_shared_tg, offset_f), \
}
/* Define a new generic metric for frontend side only. */
{ \
ME_NEW_COMMON(name_f, alt_n, nature, format, offset_f, cap_f, desc_f) \
.flags = STAT_COL_FL_GENERIC | STAT_COL_FL_SHARED, \
- .metric.offset[0] = offsetof(struct fe_counters_shared, offset_f), \
+ .metric.offset[0] = offsetof(struct fe_counters_shared_tg, offset_f), \
}
/* Define a new generic metric for backend side only. */
{ \
ME_NEW_COMMON(name_f, alt_n, nature, format, offset_f, cap_f, desc_f) \
.flags = STAT_COL_FL_GENERIC | STAT_COL_FL_SHARED, \
- .metric.offset[1] = offsetof(struct be_counters_shared, offset_f), \
+ .metric.offset[1] = offsetof(struct be_counters_shared_tg, offset_f), \
}
const struct stat_col stat_cols_px[ST_I_PX_MAX] = {
case ST_I_PX_LASTSESS:
if (srv)
- return !HA_ATOMIC_LOAD(&srv->counters.shared->last_sess);
+ return !COUNTERS_SHARED_LAST(srv->counters.shared->tg, last_sess);
else if (px)
- return !HA_ATOMIC_LOAD(&px->be_counters.shared->last_sess);
+ return !COUNTERS_SHARED_LAST(px->be_counters.shared->tg, last_sess);
else
return 0;
enum field_nature fn;
struct field value;
void *counter = NULL;
+ int offset = 0;
int wrong_side = 0;
/* Only generic stat column must be used as input. */
switch (cap) {
case STATS_PX_CAP_FE:
case STATS_PX_CAP_LI:
- if (col->flags & STAT_COL_FL_SHARED)
- counter = (char *)((struct fe_counters *)counters)->shared + col->metric.offset[0];
+ if (col->flags & STAT_COL_FL_SHARED) {
+ counter = (char *)&((struct fe_counters *)counters)->shared->tg;
+ offset = col->metric.offset[0];
+ }
else
counter = (char *)counters + col->metric.offset[0];
wrong_side = !(col->cap & (STATS_PX_CAP_FE|STATS_PX_CAP_LI));
case STATS_PX_CAP_BE:
case STATS_PX_CAP_SRV:
- if (col->flags & STAT_COL_FL_SHARED)
- counter = (char *)((struct be_counters *)counters)->shared + col->metric.offset[1];
+ if (col->flags & STAT_COL_FL_SHARED) {
+ counter = (char *)&((struct be_counters *)counters)->shared->tg;
+ offset = col->metric.offset[1];
+ }
else
counter = (char *)counters + col->metric.offset[1];
wrong_side = !(col->cap & (STATS_PX_CAP_BE|STATS_PX_CAP_SRV));
if (idx == ST_I_PX_REQ_TOT && cap == STATS_PX_CAP_FE && !stat_file) {
struct proxy *px = __objt_proxy(objt);
const size_t nb_reqs =
- sizeof(px->fe_counters.shared->p.http.cum_req) /
- sizeof(*px->fe_counters.shared->p.http.cum_req);
+ sizeof(px->fe_counters.shared->tg[0]->p.http.cum_req) /
+ sizeof(*px->fe_counters.shared->tg[0]->p.http.cum_req);
uint64_t total_req = 0;
int i;
for (i = 0; i < nb_reqs; i++)
- total_req += HA_ATOMIC_LOAD(&px->fe_counters.shared->p.http.cum_req[i]);
+ total_req += COUNTERS_SHARED_TOTAL(px->fe_counters.shared->tg, p.http.cum_req[i], HA_ATOMIC_LOAD);
return mkf_u64(FN_COUNTER, total_req);
}
if (fn == FN_COUNTER) {
switch (stcol_format(col)) {
case FF_U64:
- if (col->flags & STAT_COL_FL_SHARED)
- value = mkf_u64(FN_COUNTER, HA_ATOMIC_LOAD((uint64_t *)counter));
+ if (col->flags & STAT_COL_FL_SHARED) {
+ uint64_t total;
+ total = COUNTERS_SHARED_TOTAL_OFFSET(((char **)counter), uint64_t, offset, HA_ATOMIC_LOAD);
+ value = mkf_u64(FN_COUNTER, total);
+ }
else
value = mkf_u64(FN_COUNTER, *(uint64_t *)counter);
break;
else if (fn == FN_RATE) {
/* freq-ctr always uses FF_U32 */
BUG_ON(stcol_format(col) != FF_U32);
- value = mkf_u32(FN_RATE, read_freq_ctr(counter));
+ if (col->flags & STAT_COL_FL_SHARED) {
+ uint64_t total;
+
+ total = COUNTERS_SHARED_TOTAL_OFFSET(((char **)counter), struct freq_ctr, offset, read_freq_ctr);
+ value = mkf_u32(FN_RATE, total);
+ }
+ else
+ value = mkf_u32(FN_RATE, read_freq_ctr(counter));
}
else if (fn == FN_AGE) {
unsigned long age;
if (col->flags & STAT_COL_FL_SHARED)
- age = HA_ATOMIC_LOAD((unsigned long *)counter);
+ age = COUNTERS_SHARED_LAST_OFFSET(((char **)counter), unsigned long, offset);
else
age = *(unsigned long *)counter;
int i;
uint64_t total_sess;
size_t nb_sess =
- sizeof(px->fe_counters.shared->cum_sess_ver) / sizeof(*px->fe_counters.shared->cum_sess_ver);
+ sizeof(px->fe_counters.shared->tg[0]->cum_sess_ver) / sizeof(*px->fe_counters.shared->tg[0]->cum_sess_ver);
- total_sess = HA_ATOMIC_LOAD(&px->fe_counters.shared->cum_sess);
+ total_sess = COUNTERS_SHARED_TOTAL(px->fe_counters.shared->tg, cum_sess, HA_ATOMIC_LOAD);
for (i = 0; i < nb_sess; i++)
- total_sess -= HA_ATOMIC_LOAD(&px->fe_counters.shared->cum_sess_ver[i]);
+ total_sess -= COUNTERS_SHARED_TOTAL(px->fe_counters.shared->tg, cum_sess_ver[i], HA_ATOMIC_LOAD);
+
total_sess = (int64_t)total_sess < 0 ? 0 : total_sess;
field = mkf_u64(FN_COUNTER, total_sess);
break;
if (index == NULL || *index == ST_I_PX_QTIME ||
*index == ST_I_PX_CTIME || *index == ST_I_PX_RTIME ||
*index == ST_I_PX_TTIME) {
- srv_samples_counter = (px->mode == PR_MODE_HTTP) ? HA_ATOMIC_LOAD(&sv->counters.shared->p.http.cum_req) : HA_ATOMIC_LOAD(&sv->counters.shared->cum_lbconn);
+ srv_samples_counter = (px->mode == PR_MODE_HTTP) ? COUNTERS_SHARED_TOTAL(sv->counters.shared->tg, p.http.cum_req, HA_ATOMIC_LOAD) : COUNTERS_SHARED_TOTAL(sv->counters.shared->tg, cum_lbconn, HA_ATOMIC_LOAD);
if (srv_samples_counter < TIME_STATS_SAMPLES && srv_samples_counter > 0)
srv_samples_window = srv_samples_counter;
}
if (!index || *index == ST_I_PX_QTIME ||
*index == ST_I_PX_CTIME || *index == ST_I_PX_RTIME ||
*index == ST_I_PX_TTIME) {
- be_samples_counter = (px->mode == PR_MODE_HTTP) ? HA_ATOMIC_LOAD(&px->be_counters.shared->p.http.cum_req) : HA_ATOMIC_LOAD(&px->be_counters.shared->cum_lbconn);
+ be_samples_counter = (px->mode == PR_MODE_HTTP) ? COUNTERS_SHARED_TOTAL(px->be_counters.shared->tg, p.http.cum_req, HA_ATOMIC_LOAD) : COUNTERS_SHARED_TOTAL(px->be_counters.shared->tg, cum_lbconn, HA_ATOMIC_LOAD);
if (be_samples_counter < TIME_STATS_SAMPLES && be_samples_counter > 0)
be_samples_window = be_samples_counter;
}
bytes = s->req.total - s->logs.bytes_in;
s->logs.bytes_in = s->req.total;
if (bytes) {
- _HA_ATOMIC_ADD(&sess->fe->fe_counters.shared->bytes_in, bytes);
- _HA_ATOMIC_ADD(&s->be->be_counters.shared->bytes_in, bytes);
+ _HA_ATOMIC_ADD(&sess->fe->fe_counters.shared->tg[tgid - 1]->bytes_in, bytes);
+ _HA_ATOMIC_ADD(&s->be->be_counters.shared->tg[tgid - 1]->bytes_in, bytes);
if (objt_server(s->target))
- _HA_ATOMIC_ADD(&__objt_server(s->target)->counters.shared->bytes_in, bytes);
+ _HA_ATOMIC_ADD(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->bytes_in, bytes);
if (sess->listener && sess->listener->counters)
- _HA_ATOMIC_ADD(&sess->listener->counters->shared->bytes_in, bytes);
+ _HA_ATOMIC_ADD(&sess->listener->counters->shared->tg[tgid - 1]->bytes_in, bytes);
for (i = 0; i < global.tune.nb_stk_ctr; i++) {
if (!stkctr_inc_bytes_in_ctr(&s->stkctr[i], bytes))
bytes = s->res.total - s->logs.bytes_out;
s->logs.bytes_out = s->res.total;
if (bytes) {
- _HA_ATOMIC_ADD(&sess->fe->fe_counters.shared->bytes_out, bytes);
- _HA_ATOMIC_ADD(&s->be->be_counters.shared->bytes_out, bytes);
+ _HA_ATOMIC_ADD(&sess->fe->fe_counters.shared->tg[tgid - 1]->bytes_out, bytes);
+ _HA_ATOMIC_ADD(&s->be->be_counters.shared->tg[tgid - 1]->bytes_out, bytes);
if (objt_server(s->target))
- _HA_ATOMIC_ADD(&__objt_server(s->target)->counters.shared->bytes_out, bytes);
+ _HA_ATOMIC_ADD(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->bytes_out, bytes);
if (sess->listener && sess->listener->counters)
- _HA_ATOMIC_ADD(&sess->listener->counters->shared->bytes_out, bytes);
+ _HA_ATOMIC_ADD(&sess->listener->counters->shared->tg[tgid - 1]->bytes_out, bytes);
for (i = 0; i < global.tune.nb_stk_ctr; i++) {
if (!stkctr_inc_bytes_out_ctr(&s->stkctr[i], bytes))
if (!(s->flags & SF_FINST_MASK)) {
if (s->scb->state == SC_ST_INI) {
/* anything before REQ in fact */
- _HA_ATOMIC_INC(&strm_fe(s)->fe_counters.shared->failed_req);
+ _HA_ATOMIC_INC(&strm_fe(s)->fe_counters.shared->tg[tgid - 1]->failed_req);
if (strm_li(s) && strm_li(s)->counters)
- _HA_ATOMIC_INC(&strm_li(s)->counters->shared->failed_req);
+ _HA_ATOMIC_INC(&strm_li(s)->counters->shared->tg[tgid - 1]->failed_req);
s->flags |= SF_FINST_R;
}
if (rule->from != ACT_F_HTTP_REQ) {
if (sess->fe == s->be) /* report it if the request was intercepted by the frontend */
- _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->intercepted_req);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->intercepted_req);
/* The flag SF_ASSIGNED prevent from server assignment. */
s->flags |= SF_ASSIGNED;
sc_shutdown(scf);
if (!(req->analysers) && !(res->analysers)) {
COUNT_IF(1, "Report a client abort (no analysers)");
- _HA_ATOMIC_INC(&s->be->be_counters.shared->cli_aborts);
- _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->cli_aborts);
+ _HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->cli_aborts);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->cli_aborts);
if (sess->listener && sess->listener->counters)
- _HA_ATOMIC_INC(&sess->listener->counters->shared->cli_aborts);
+ _HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->cli_aborts);
if (srv)
- _HA_ATOMIC_INC(&srv->counters.shared->cli_aborts);
+ _HA_ATOMIC_INC(&srv->counters.shared->tg[tgid - 1]->cli_aborts);
if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_CLICL;
if (!(s->flags & SF_FINST_MASK))
if (sc_state_in(scb->state, SC_SB_EST|SC_SB_DIS)) {
sc_abort(scb);
sc_shutdown(scb);
- _HA_ATOMIC_INC(&s->be->be_counters.shared->failed_resp);
+ _HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_resp);
if (srv)
- _HA_ATOMIC_INC(&srv->counters.shared->failed_resp);
+ _HA_ATOMIC_INC(&srv->counters.shared->tg[tgid - 1]->failed_resp);
if (!(req->analysers) && !(res->analysers)) {
COUNT_IF(1, "Report a client abort (no analysers)");
- _HA_ATOMIC_INC(&s->be->be_counters.shared->srv_aborts);
- _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->srv_aborts);
+ _HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->srv_aborts);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->srv_aborts);
if (sess->listener && sess->listener->counters)
- _HA_ATOMIC_INC(&sess->listener->counters->shared->srv_aborts);
+ _HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->srv_aborts);
if (srv)
- _HA_ATOMIC_INC(&srv->counters.shared->srv_aborts);
+ _HA_ATOMIC_INC(&srv->counters.shared->tg[tgid - 1]->srv_aborts);
if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_SRVCL;
if (!(s->flags & SF_FINST_MASK))
req->analysers &= AN_REQ_FLT_END;
channel_auto_close(req);
if (scf->flags & SC_FL_ERROR) {
- _HA_ATOMIC_INC(&s->be->be_counters.shared->cli_aborts);
- _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->cli_aborts);
+ _HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->cli_aborts);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->cli_aborts);
if (sess->listener && sess->listener->counters)
- _HA_ATOMIC_INC(&sess->listener->counters->shared->cli_aborts);
+ _HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->cli_aborts);
if (srv)
- _HA_ATOMIC_INC(&srv->counters.shared->cli_aborts);
+ _HA_ATOMIC_INC(&srv->counters.shared->tg[tgid - 1]->cli_aborts);
s->flags |= SF_ERR_CLICL;
COUNT_IF(1, "Report unhandled client error");
}
else if (req->flags & CF_READ_TIMEOUT) {
- _HA_ATOMIC_INC(&s->be->be_counters.shared->cli_aborts);
- _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->cli_aborts);
+ _HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->cli_aborts);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->cli_aborts);
if (sess->listener && sess->listener->counters)
- _HA_ATOMIC_INC(&sess->listener->counters->shared->cli_aborts);
+ _HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->cli_aborts);
if (srv)
- _HA_ATOMIC_INC(&srv->counters.shared->cli_aborts);
+ _HA_ATOMIC_INC(&srv->counters.shared->tg[tgid - 1]->cli_aborts);
s->flags |= SF_ERR_CLITO;
COUNT_IF(1, "Report unhandled client timeout (RD)");
}
else {
- _HA_ATOMIC_INC(&s->be->be_counters.shared->srv_aborts);
- _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->srv_aborts);
+ _HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->srv_aborts);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->srv_aborts);
if (sess->listener && sess->listener->counters)
- _HA_ATOMIC_INC(&sess->listener->counters->shared->srv_aborts);
+ _HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->srv_aborts);
if (srv)
- _HA_ATOMIC_INC(&srv->counters.shared->srv_aborts);
+ _HA_ATOMIC_INC(&srv->counters.shared->tg[tgid - 1]->srv_aborts);
s->flags |= SF_ERR_SRVTO;
COUNT_IF(1, "Report unhandled server timeout (WR)");
}
res->analysers &= AN_RES_FLT_END;
channel_auto_close(res);
if (scb->flags & SC_FL_ERROR) {
- _HA_ATOMIC_INC(&s->be->be_counters.shared->srv_aborts);
- _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->srv_aborts);
+ _HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->srv_aborts);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->srv_aborts);
if (sess->listener && sess->listener->counters)
- _HA_ATOMIC_INC(&sess->listener->counters->shared->srv_aborts);
+ _HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->srv_aborts);
if (srv)
- _HA_ATOMIC_INC(&srv->counters.shared->srv_aborts);
+ _HA_ATOMIC_INC(&srv->counters.shared->tg[tgid - 1]->srv_aborts);
s->flags |= SF_ERR_SRVCL;
COUNT_IF(1, "Report unhandled server error");
}
else if (res->flags & CF_READ_TIMEOUT) {
- _HA_ATOMIC_INC(&s->be->be_counters.shared->srv_aborts);
- _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->srv_aborts);
+ _HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->srv_aborts);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->srv_aborts);
if (sess->listener && sess->listener->counters)
- _HA_ATOMIC_INC(&sess->listener->counters->shared->srv_aborts);
+ _HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->srv_aborts);
if (srv)
- _HA_ATOMIC_INC(&srv->counters.shared->srv_aborts);
+ _HA_ATOMIC_INC(&srv->counters.shared->tg[tgid - 1]->srv_aborts);
s->flags |= SF_ERR_SRVTO;
COUNT_IF(1, "Report unhandled server timeout (RD)");
}
else {
- _HA_ATOMIC_INC(&s->be->be_counters.shared->cli_aborts);
- _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->cli_aborts);
+ _HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->cli_aborts);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->cli_aborts);
if (sess->listener && sess->listener->counters)
- _HA_ATOMIC_INC(&sess->listener->counters->shared->cli_aborts);
+ _HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->cli_aborts);
if (srv)
- _HA_ATOMIC_INC(&srv->counters.shared->cli_aborts);
+ _HA_ATOMIC_INC(&srv->counters.shared->tg[tgid - 1]->cli_aborts);
s->flags |= SF_ERR_CLITO;
COUNT_IF(1, "Report unhandled client timeout (WR)");
}
n = 0;
if (sess->fe->mode == PR_MODE_HTTP) {
- _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->p.http.rsp[n]);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->p.http.rsp[n]);
}
if ((s->flags & SF_BE_ASSIGNED) &&
(s->be->mode == PR_MODE_HTTP)) {
- _HA_ATOMIC_INC(&s->be->be_counters.shared->p.http.rsp[n]);
- _HA_ATOMIC_INC(&s->be->be_counters.shared->p.http.cum_req);
+ _HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->p.http.rsp[n]);
+ _HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->p.http.cum_req);
}
}
srv = objt_server(s->target);
if (srv) {
samples_window = (((s->be->mode == PR_MODE_HTTP) ?
- HA_ATOMIC_LOAD(&srv->counters.shared->p.http.cum_req) : HA_ATOMIC_LOAD(&srv->counters.shared->cum_lbconn)) > TIME_STATS_SAMPLES) ? TIME_STATS_SAMPLES : 0;
+ HA_ATOMIC_LOAD(&srv->counters.shared->tg[tgid - 1]->p.http.cum_req) : HA_ATOMIC_LOAD(&srv->counters.shared->tg[tgid - 1]->cum_lbconn)) > TIME_STATS_SAMPLES) ? TIME_STATS_SAMPLES : 0;
swrate_add_dynamic(&srv->counters.q_time, samples_window, t_queue);
swrate_add_dynamic(&srv->counters.c_time, samples_window, t_connect);
swrate_add_dynamic(&srv->counters.d_time, samples_window, t_data);
HA_ATOMIC_UPDATE_MAX(&srv->counters.ttime_max, t_close);
}
samples_window = (((s->be->mode == PR_MODE_HTTP) ?
- HA_ATOMIC_LOAD(&s->be->be_counters.shared->p.http.cum_req) : HA_ATOMIC_LOAD(&s->be->be_counters.shared->cum_lbconn)) > TIME_STATS_SAMPLES) ? TIME_STATS_SAMPLES : 0;
+ HA_ATOMIC_LOAD(&s->be->be_counters.shared->tg[tgid - 1]->p.http.cum_req) : HA_ATOMIC_LOAD(&s->be->be_counters.shared->tg[tgid - 1]->cum_lbconn)) > TIME_STATS_SAMPLES) ? TIME_STATS_SAMPLES : 0;
swrate_add_dynamic(&s->be->be_counters.q_time, samples_window, t_queue);
swrate_add_dynamic(&s->be->be_counters.c_time, samples_window, t_connect);
swrate_add_dynamic(&s->be->be_counters.d_time, samples_window, t_data);
strm->req.analysers &= AN_REQ_FLT_END;
strm->res.analysers &= AN_RES_FLT_END;
if (strm->flags & SF_BE_ASSIGNED)
- _HA_ATOMIC_INC(&strm->be->be_counters.shared->denied_req);
+ _HA_ATOMIC_INC(&strm->be->be_counters.shared->tg[tgid - 1]->denied_req);
if (!(strm->flags & SF_ERR_MASK))
strm->flags |= SF_ERR_PRXCOND;
if (!(strm->flags & SF_FINST_MASK))
strm->flags |= SF_FINST_R;
}
- _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->denied_req);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->denied_req);
if (sess->listener && sess->listener->counters)
- _HA_ATOMIC_INC(&sess->listener->counters->shared->denied_req);
+ _HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->denied_req);
return ACT_RET_ABRT;
}
return 0;
deny:
- _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->denied_req);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->denied_req);
if (sess->listener && sess->listener->counters)
- _HA_ATOMIC_INC(&sess->listener->counters->shared->denied_req);
+ _HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->denied_req);
stream_report_term_evt(s->scf, strm_tevt_type_intercepted);
goto reject;
internal:
- _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->internal_errors);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->internal_errors);
if (sess->listener && sess->listener->counters)
- _HA_ATOMIC_INC(&sess->listener->counters->shared->internal_errors);
+ _HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->internal_errors);
if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_INTERNAL;
stream_report_term_evt(s->scf, strm_tevt_type_internal_err);
goto reject;
invalid:
- _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->failed_req);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->failed_req);
if (sess->listener && sess->listener->counters)
- _HA_ATOMIC_INC(&sess->listener->counters->shared->failed_req);
+ _HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->failed_req);
stream_report_term_evt(s->scf, strm_tevt_type_proto_err);
reject:
return 0;
deny:
- _HA_ATOMIC_INC(&s->sess->fe->fe_counters.shared->denied_resp);
- _HA_ATOMIC_INC(&s->be->be_counters.shared->denied_resp);
+ _HA_ATOMIC_INC(&s->sess->fe->fe_counters.shared->tg[tgid - 1]->denied_resp);
+ _HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->denied_resp);
if (s->sess->listener && s->sess->listener->counters)
- _HA_ATOMIC_INC(&s->sess->listener->counters->shared->denied_resp);
+ _HA_ATOMIC_INC(&s->sess->listener->counters->shared->tg[tgid - 1]->denied_resp);
if (objt_server(s->target))
- _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->denied_resp);
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->denied_resp);
stream_report_term_evt(s->scb, strm_tevt_type_intercepted);
goto reject;
internal:
- _HA_ATOMIC_INC(&s->sess->fe->fe_counters.shared->internal_errors);
- _HA_ATOMIC_INC(&s->be->be_counters.shared->internal_errors);
+ _HA_ATOMIC_INC(&s->sess->fe->fe_counters.shared->tg[tgid - 1]->internal_errors);
+ _HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->internal_errors);
if (s->sess->listener && s->sess->listener->counters)
- _HA_ATOMIC_INC(&s->sess->listener->counters->shared->internal_errors);
+ _HA_ATOMIC_INC(&s->sess->listener->counters->shared->tg[tgid - 1]->internal_errors);
if (objt_server(s->target))
- _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->internal_errors);
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->internal_errors);
if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_INTERNAL;
stream_report_term_evt(s->scf, strm_tevt_type_internal_err);
goto reject;
invalid:
- _HA_ATOMIC_INC(&s->be->be_counters.shared->failed_resp);
+ _HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_resp);
if (objt_server(s->target))
- _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->failed_resp);
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->failed_resp);
stream_report_term_evt(s->scf, strm_tevt_type_proto_err);
reject:
goto end;
}
else if (rule->action == ACT_ACTION_DENY) {
- _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->denied_conn);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->denied_conn);
if (sess->listener && sess->listener->counters)
- _HA_ATOMIC_INC(&sess->listener->counters->shared->denied_conn);
+ _HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->denied_conn);
result = 0;
goto end;
goto end;
}
else if (rule->action == ACT_ACTION_DENY) {
- _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->denied_sess);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->denied_sess);
if (sess->listener && sess->listener->counters)
- _HA_ATOMIC_INC(&sess->listener->counters->shared->denied_sess);
+ _HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->denied_sess);
result = 0;
goto end;