#define BC_O_UNLIMITED 0x00002000 /* listeners not subject to global limits (peers & stats socket) */
#define BC_O_NOSTOP 0x00004000 /* keep the listeners active even after a soft stop */
#define BC_O_REVERSE_HTTP 0x00008000 /* a reverse HTTP bind is used */
+#define BC_O_XPRT_MAXCONN 0x00010000 /* transport layer allocates its own resource prior to accept and is responsible to check maxconn limit */
/* flags used with bind_conf->ssl_options */
#ifdef USE_QUIC
if (listener->bind_conf->xprt == xprt_get(XPRT_QUIC)) {
+ /* quic_conn are counted against maxconn. */
+ listener->bind_conf->options |= BC_O_XPRT_MAXCONN;
+
# ifdef USE_QUIC_OPENSSL_COMPAT
/* store the last checked bind_conf in bind_conf */
if (!(global.tune.options & GTUNE_NO_QUIC) &&
return 1024;
}
+/* Returns true if listener <l> must check maxconn limit prior to accept. */
+static inline int listener_uses_maxconn(const struct listener *l)
+{
+ return !(l->bind_conf->options & (BC_O_UNLIMITED|BC_O_XPRT_MAXCONN));
+}
+
/* This function is called on a read event from a listening socket, corresponding
* to an accept. It tries to accept as many connections as possible, and for each
* calls the listener's accept handler (generally the frontend's accept handler).
} while (!_HA_ATOMIC_CAS(&p->feconn, &count, next_feconn));
}
- if (!(l->bind_conf->options & BC_O_UNLIMITED)) {
+ if (listener_uses_maxconn(l)) {
next_actconn = increment_actconn();
if (!next_actconn) {
/* the process was marked full or another
_HA_ATOMIC_DEC(&l->nbconn);
if (p)
_HA_ATOMIC_DEC(&p->feconn);
- if (!(l->bind_conf->options & BC_O_UNLIMITED))
+ if (listener_uses_maxconn(l))
_HA_ATOMIC_DEC(&actconn);
continue;
{
struct proxy *fe = l->bind_conf->frontend;
- if (!(l->bind_conf->options & BC_O_UNLIMITED))
+ if (listener_uses_maxconn(l))
_HA_ATOMIC_DEC(&actconn);
if (fe)
_HA_ATOMIC_DEC(&fe->feconn);
qc_free_ssl_sock_ctx(&qc->xprt_ctx);
}
+ /* Decrement on quic_conn free. quic_cc_conn instances are not counted
+ * into global counters because they are designed to run for a limited
+ * time with a limited memory.
+ */
+ _HA_ATOMIC_DEC(&actconn);
+
/* in the unlikely (but possible) case the connection was just added to
* the accept_list we must delete it from there.
*/
#include <haproxy/quic_rx.h>
+#include <haproxy/frontend.h>
#include <haproxy/h3.h>
#include <haproxy/list.h>
#include <haproxy/ncbuf.h>
struct quic_conn *qc = NULL;
struct proxy *prx;
struct quic_counters *prx_counters;
+ unsigned int next_actconn = 0;
TRACE_ENTER(QUIC_EV_CONN_LPKT);
pkt->saddr = dgram->saddr;
ipv4 = dgram->saddr.ss_family == AF_INET;
+ next_actconn = increment_actconn();
+ if (!next_actconn) {
+ _HA_ATOMIC_INC(&maxconn_reached);
+ TRACE_STATE("drop packet on maxconn reached",
+ QUIC_EV_CONN_LPKT, NULL, NULL, NULL, pkt->version);
+ goto err;
+ }
+
/* Generate the first connection CID. This is derived from the client
* ODCID and address. This allows to retrieve the connection from the
* ODCID without storing it in the CID tree. This is an interesting
goto err;
}
+ /* Now quic_conn is allocated. If a future error
+ * occurred it will be freed with quic_conn_release()
+ * which also ensure actconn is decremented.
+ * Reset guard value to prevent a double decrement.
+ */
+ next_actconn = 0;
+
/* Compute and store into the quic_conn the hash used to compute extra CIDs */
if (quic_hash64_from_cid)
qc->hash64 = quic_hash64_from_cid(conn_id->cid.data, conn_id->cid.len,
qc->cntrs.dropped_pkt++;
else
HA_ATOMIC_INC(&prx_counters->dropped_pkt);
+
+ /* Reset active conn counter if needed. */
+ if (next_actconn)
+ _HA_ATOMIC_DEC(&actconn);
+
TRACE_LEAVE(QUIC_EV_CONN_LPKT);
return NULL;
}