_HA_ATOMIC_SUB(&srv->curr_idle_conns, 1);
_HA_ATOMIC_SUB(conn->flags & CO_FL_SAFE_LIST ? &srv->curr_safe_nb : &srv->curr_idle_nb, 1);
_HA_ATOMIC_SUB(&srv->curr_idle_thr[tid], 1);
+ } else {
+ struct server *srv = objt_server(conn->target);
+
+ if (srv)
+ _HA_ATOMIC_SUB(&srv->curr_used_conns, 1);
}
conn_force_unsubscribe(conn);
_HA_ATOMIC_SUB(&srv->curr_idle_conns, 1);
return 0;
}
+ _HA_ATOMIC_SUB(&srv->curr_used_conns, 1);
MT_LIST_DEL(&conn->list);
conn->idle_time = now_ms;
if (is_safe) {
unsigned int curr_idle_conns; /* Current number of orphan idling connections, both the idle and the safe lists */
unsigned int curr_idle_nb; /* Current number of connections in the idle list */
unsigned int curr_safe_nb; /* Current number of connections in the safe list */
+ unsigned int curr_used_conns; /* Current number of used connections */
+ unsigned int max_used_conns; /* Max number of used connections (the counter is resetted at each connection purges */
unsigned int *curr_idle_thr; /* Current number of orphan idling connections per thread */
int max_reuse; /* Max number of requests on a same connection */
struct eb32_node idle_node; /* When to next do cleanup in the idle connections */
srv_cs = NULL;
}
+ if (srv_conn && srv) {
+ _HA_ATOMIC_ADD(&srv->curr_used_conns, 1);
+ /* It's ok not to do that atomically, we don't need an
+ * exact max.
+ */
+ if (srv->max_used_conns < srv->curr_used_conns)
+ srv->max_used_conns = srv->curr_used_conns;
+ }
if (!srv_conn || !sockaddr_alloc(&srv_conn->dst)) {
if (srv_conn)
conn_free(srv_conn);