]> git.ipfire.org Git - thirdparty/haproxy.git/commitdiff
MINOR: quic: Maximum congestion control window for each algo
authorFrédéric Lécaille <flecaille@haproxy.com>
Mon, 13 Nov 2023 15:55:11 +0000 (16:55 +0100)
committerFrédéric Lécaille <flecaille@haproxy.com>
Mon, 13 Nov 2023 16:53:18 +0000 (17:53 +0100)
Make all the congestion support the maximum congestion control window
set by configuration. There is nothing special to explain. For each
each algo, each time the window is incremented it is also bounded.

include/haproxy/quic_cc-t.h
include/haproxy/quic_conn-t.h
include/haproxy/quic_conn.h
src/quic_cc_cubic.c
src/quic_cc_newreno.c
src/quic_cc_nocc.c
src/quic_conn.c

index 60efc0ad4dbc9a8bd63dae418c5d4393e2122212..cae12bec6b7eb439d112f32b98663d6e6194ddbd 100644 (file)
@@ -39,7 +39,6 @@ extern struct quic_cc_algo *default_quic_cc_algo;
 
 /* Fake algorithm with its fixed window */
 extern struct quic_cc_algo quic_cc_algo_nocc;
-extern unsigned int quic_cc_nocc_fixed_cwnd;
 
 extern unsigned long long last_ts;
 
index ae2e7685e8f558c57577fac4e04c7ef4dbaf73e7..2ead20de01c1413abf1ed757055449a770078209 100644 (file)
@@ -334,7 +334,10 @@ struct quic_path {
        size_t mtu;
        /* Congestion window. */
        uint64_t cwnd;
+       /* The current maximum congestion window value reached. */
        uint64_t mcwnd;
+       /* The maximum congestion window value which can be reached. */
+       uint64_t max_cwnd;
        /* Minimum congestion window. */
        uint64_t min_cwnd;
        /* Prepared data to be sent (in bytes). */
index a1f72c0ef3e1386f55716f6bb2f153450500f24c..640fe5bcc929d73ab309a87cefd73ee5b435dd01 100644 (file)
@@ -458,7 +458,7 @@ static inline uint64_t quic_compute_ack_delay_us(unsigned int time_received,
 /* Initialize <p> QUIC network path depending on <ipv4> boolean
  * which is true for an IPv4 path, if not false for an IPv6 path.
  */
-static inline void quic_path_init(struct quic_path *path, int ipv4,
+static inline void quic_path_init(struct quic_path *path, int ipv4, unsigned long max_cwnd,
                                   struct quic_cc_algo *algo, struct quic_conn *qc)
 {
        unsigned int max_dgram_sz;
@@ -468,6 +468,7 @@ static inline void quic_path_init(struct quic_path *path, int ipv4,
        path->mtu = max_dgram_sz;
        path->cwnd = QUIC_MIN(10 * max_dgram_sz, QUIC_MAX(max_dgram_sz << 1, 14720U));
        path->mcwnd = path->cwnd;
+       path->max_cwnd = max_cwnd;
        path->min_cwnd = max_dgram_sz << 1;
        path->prep_in_flight = 0;
        path->in_flight = 0;
index 2dac2fb7a16ce181533f4e00524f73e5784c6653..2e5599d75ed0be899dfb003aacc9228839d0d113 100644 (file)
@@ -168,6 +168,7 @@ static inline void quic_cubic_update(struct quic_cc *cc, uint32_t acked)
        }
 
        path->cwnd += inc;
+       path->cwnd = QUIC_MIN(path->max_cwnd, path->cwnd);
        path->mcwnd = QUIC_MAX(path->cwnd, path->mcwnd);
  leave:
        TRACE_LEAVE(QUIC_EV_CONN_CC, cc->qc);
@@ -213,8 +214,10 @@ static void quic_cc_cubic_ss_cb(struct quic_cc *cc, struct quic_cc_event *ev)
        TRACE_PROTO("CC cubic", QUIC_EV_CONN_CC, cc->qc, ev);
        switch (ev->type) {
        case QUIC_CC_EVT_ACK:
-               if (path->cwnd < QUIC_CC_INFINITE_SSTHESH - ev->ack.acked)
+               if (path->cwnd < QUIC_CC_INFINITE_SSTHESH - ev->ack.acked) {
                        path->cwnd += ev->ack.acked;
+                       path->cwnd = QUIC_MIN(path->max_cwnd, path->cwnd);
+               }
                /* Exit to congestion avoidance if slow start threshold is reached. */
                if (path->cwnd >= c->ssthresh)
                        c->state = QUIC_CC_ST_CA;
index 5fa8c0ee6b19f08b6a2a979da998d8c0cac63585..7756a611982b8651bb56e0730c368a4807bca3a3 100644 (file)
@@ -87,6 +87,7 @@ static void quic_cc_nr_ss_cb(struct quic_cc *cc, struct quic_cc_event *ev)
        switch (ev->type) {
        case QUIC_CC_EVT_ACK:
                path->cwnd += ev->ack.acked;
+               path->cwnd = QUIC_MIN(path->max_cwnd, path->cwnd);
                path->mcwnd = QUIC_MAX(path->cwnd, path->mcwnd);
                /* Exit to congestion avoidance if slow start threshold is reached. */
                if (path->cwnd > nr->ssthresh)
@@ -124,6 +125,7 @@ static void quic_cc_nr_ca_cb(struct quic_cc *cc, struct quic_cc_event *ev)
                acked = ev->ack.acked * path->mtu + nr->remain_acked;
                nr->remain_acked = acked % path->cwnd;
                path->cwnd += acked / path->cwnd;
+               path->cwnd = QUIC_MIN(path->max_cwnd, path->cwnd);
                path->mcwnd = QUIC_MAX(path->cwnd, path->mcwnd);
                break;
        }
index 5fa6db5d79786fc86420781eb88e153c6f0f37e9..b512a38c2d6f238afe6906eef060f6d3dc0ccdbc 100644 (file)
@@ -9,14 +9,12 @@
 #include <haproxy/quic_trace.h>
 #include <haproxy/trace.h>
 
-unsigned int quic_cc_nocc_fixed_cwnd;
-
 static int quic_cc_nocc_init(struct quic_cc *cc)
 {
        struct quic_path *path;
 
        path = container_of(cc, struct quic_path, cc);
-       path->cwnd = quic_cc_nocc_fixed_cwnd << 10;
+       path->cwnd = path->max_cwnd;
        return 1;
 }
 
index 067de20434b7c606f48f69c2041126fe695ac602..4b8beb794b9628167bdfb7139df63e5caaa45259 100644 (file)
@@ -1370,7 +1370,8 @@ struct quic_conn *qc_new_conn(const struct quic_version *qv, int ipv4,
        qc->max_ack_delay = 0;
        /* Only one path at this time (multipath not supported) */
        qc->path = &qc->paths[0];
-       quic_path_init(qc->path, ipv4, cc_algo ? cc_algo : default_quic_cc_algo, qc);
+       quic_path_init(qc->path, ipv4, server ? l->bind_conf->max_cwnd : 0,
+                      cc_algo ? cc_algo : default_quic_cc_algo, qc);
 
        qc->stream_buf_count = 0;
        memcpy(&qc->local_addr, local_addr, sizeof(qc->local_addr));