extern struct quic_dghdlr *quic_dghdlrs;
+extern THREAD_LOCAL struct cshared quic_mem_diff;
+
#endif /* _HAPROXY_PROTO_QUIC_H */
#include <haproxy/api.h>
#include <haproxy/buf.h>
#include <haproxy/chunk.h>
+#include <haproxy/proto_quic.h>
#include <haproxy/quic_cc-t.h>
#include <haproxy/quic_conn-t.h>
#include <haproxy/quic_loss.h>
+#include <haproxy/thread.h>
void quic_cc_init(struct quic_cc *cc, struct quic_cc_algo *algo, struct quic_conn *qc);
void quic_cc_event(struct quic_cc *cc, struct quic_cc_event *ev);
*(size_t *)&path->mtu = max_dgram_sz;
path->initial_wnd = QUIC_MIN(10 * max_dgram_sz, QUIC_MAX(max_dgram_sz << 1, 14720U));
path->cwnd = path->initial_wnd;
+ cshared_add(&quic_mem_diff, path->cwnd);
path->cwnd_last_max = path->cwnd;
path->limit_max = max_cwnd;
path->limit_min = max_dgram_sz << 1;
#include <haproxy/sock.h>
#include <haproxy/sock_inet.h>
#include <haproxy/task.h>
+#include <haproxy/thread.h>
#include <haproxy/tools.h>
/* per-thread quic datagram handlers */
struct quic_dghdlr *quic_dghdlrs;
+static uint64_t quic_mem_global;
+THREAD_LOCAL struct cshared quic_mem_diff;
+
/* Size of the internal buffer of QUIC RX buffer at the fd level */
#define QUIC_RX_BUFSZ (1UL << 18)
qc_bind_tid_reset(qc);
}
+static int quic_init_mem(void)
+{
+ /* 1024 is enough to limit modification on global counter but keeping it precise enough even with a lot of threads */
+ cshared_init(&quic_mem_diff, &quic_mem_global, 1024);
+ return 1;
+}
+REGISTER_PER_THREAD_INIT(quic_init_mem);
+
static int quic_alloc_dghdlrs(void)
{
int i;
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#include <haproxy/proto_quic.h>
#include <haproxy/quic_cc.h>
#include <haproxy/quic_pacing.h>
+#include <haproxy/thread.h>
struct quic_cc_algo *default_quic_cc_algo = &quic_cc_algo_cubic;
/* Restore congestion window for <path> to its minimal value. */
void quic_cc_path_reset(struct quic_cc_path *path)
{
+ const uint64_t old = path->cwnd;
path->cwnd = path->limit_min;
+ cshared_add(&quic_mem_diff, path->cwnd - old);
}
/* Set congestion window for <path> to <val>. Min and max limits are enforced. */
void quic_cc_path_set(struct quic_cc_path *path, uint64_t val)
{
+ const uint64_t old = path->cwnd;
+
path->cwnd = QUIC_MIN(val, path->limit_max);
path->cwnd = QUIC_MAX(path->cwnd, path->limit_min);
+ cshared_add(&quic_mem_diff, path->cwnd - old);
path->cwnd_last_max = QUIC_MAX(path->cwnd, path->cwnd_last_max);
}
*/
void quic_cc_path_inc(struct quic_cc_path *path, uint64_t val)
{
+ const uint64_t old = path->cwnd;
+
if (quic_cwnd_may_increase(path)) {
path->cwnd = QUIC_MIN(path->cwnd + val, path->limit_max);
path->cwnd = QUIC_MAX(path->cwnd, path->limit_min);
+ cshared_add(&quic_mem_diff, path->cwnd - old);
path->cwnd_last_max = QUIC_MAX(path->cwnd, path->cwnd_last_max);
}
HA_ATOMIC_DEC(&qc->li->rx.quic_curr_accept);
}
+ /* Substract last congestion window from global memory counter. */
+ cshared_add(&quic_mem_diff, -qc->path->cwnd);
+ qc->path->cwnd = 0;
+
/* free remaining stream descriptors */
node = eb64_first(&qc->streams_by_id);
while (node) {