From: Amaury Denoyelle Date: Fri, 31 May 2024 07:42:13 +0000 (+0200) Subject: BUG/MINOR: quic: ensure Tx buf is always purged X-Git-Tag: v3.1-dev1~60 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=0ef94e2dff873c0755584d0797c81e1b2c697e52;p=thirdparty%2Fhaproxy.git BUG/MINOR: quic: ensure Tx buf is always purged quic_conn API for sending was recently refactored. The main objective was to regroup the different functions present for both handshake and application emission. After this refactoring, an optimization was introduced to avoid calling qc_send() if there was nothing new to emit. However, this prevent the Tx buffer to be purged if previous sending was interrupted, until new frames are finally available. To fix this, simply remove the optimization. qc_send() is thus now always called in quic_conn IO handlers. The impact of this bug should be minimal as it happens only on sending temporary error. However in this case, this could cause extra latency or even a complete sending freeze in the worst scenario. This must be backported up to 3.0. --- diff --git a/src/quic_conn.c b/src/quic_conn.c index 8424561143..9a3ce2964a 100644 --- a/src/quic_conn.c +++ b/src/quic_conn.c @@ -551,11 +551,8 @@ struct task *quic_conn_app_io_cb(struct task *t, void *context, unsigned int sta { struct list send_list = LIST_HEAD_INIT(send_list); struct quic_conn *qc = context; - struct quic_enc_level *qel; TRACE_ENTER(QUIC_EV_CONN_IO_CB, qc); - - qel = qc->ael; TRACE_STATE("connection handshake state", QUIC_EV_CONN_IO_CB, qc, &qc->state); if (qc_test_fd(qc)) @@ -594,11 +591,10 @@ struct task *quic_conn_app_io_cb(struct task *t, void *context, unsigned int sta goto out; } - if (!qel_need_sending(qel, qc)) - goto out; - /* XXX TODO: how to limit the list frames to send */ - qel_register_send(&send_list, qel, &qel->pktns->tx.frms); + if (qel_need_sending(qc->ael, qc)) + qel_register_send(&send_list, qc->ael, &qc->ael->pktns->tx.frms); + if (!qc_send(qc, 0, &send_list)) { TRACE_DEVEL("qc_send() failed", QUIC_EV_CONN_IO_CB, qc); goto out; @@ -804,10 +800,6 @@ struct task *quic_conn_io_cb(struct task *t, void *context, unsigned int state) qel_register_send(&send_list, qel, &qel->pktns->tx.frms); } - /* Skip sending if no QEL with frames to sent. */ - if (LIST_ISEMPTY(&send_list)) - goto out; - if (!qc_send(qc, 0, &send_list)) { TRACE_DEVEL("qc_send() failed", QUIC_EV_CONN_IO_CB, qc); goto out; diff --git a/src/quic_tx.c b/src/quic_tx.c index 3b710ecb32..39b917652f 100644 --- a/src/quic_tx.c +++ b/src/quic_tx.c @@ -691,7 +691,7 @@ static int qc_prep_pkts(struct quic_conn *qc, struct buffer *buf, int qc_send(struct quic_conn *qc, int old_data, struct list *send_list) { struct quic_enc_level *qel, *tmp_qel; - int ret, status = 0; + int ret = 0, status = 0; struct buffer *buf; TRACE_ENTER(QUIC_EV_CONN_TXPKT, qc); @@ -713,7 +713,7 @@ int qc_send(struct quic_conn *qc, int old_data, struct list *send_list) } /* Prepare and send packets until we could not further prepare packets. */ - do { + while (!LIST_ISEMPTY(send_list)) { /* Buffer must always be empty before qc_prep_pkts() usage. * qc_send_ppkts() ensures it is cleared on success. */ @@ -727,7 +727,12 @@ int qc_send(struct quic_conn *qc, int old_data, struct list *send_list) qc_txb_release(qc); goto out; } - } while (ret > 0 && !LIST_ISEMPTY(send_list)); + + if (ret <= 0) { + TRACE_DEVEL("stopping on qc_prep_pkts() return", QUIC_EV_CONN_TXPKT, qc); + break; + } + } qc_txb_release(qc); if (ret < 0)