{
LIST_INIT(&pacer->frms);
pacer->path = path;
+ pacer->next = 0;
}
static inline void quic_pacing_reset(struct quic_pacer *pacer)
return &pacer->frms;
}
+static inline ullong quic_pacing_ns_pkt(const struct quic_pacer *pacer)
+{
+ return pacer->path->loss.srtt * 1000000 / (pacer->path->cwnd / pacer->path->mtu + 1);
+}
+
+int quic_pacing_expired(const struct quic_pacer *pacer);
+
+void quic_pacing_sent_done(struct quic_pacer *pacer, int sent);
+
#endif /* _HAPROXY_QUIC_PACING_H */
#include <haproxy/list-t.h>
#include <haproxy/quic_conn-t.h>
#include <haproxy/quic_tls-t.h>
+#include <haproxy/quic_pacing-t.h>
#include <haproxy/quic_rx-t.h>
#include <haproxy/quic_tx-t.h>
int qc_purge_txbuf(struct quic_conn *qc, struct buffer *buf);
struct buffer *qc_get_txb(struct quic_conn *qc);
-enum quic_tx_err qc_send_mux(struct quic_conn *qc, struct list *frms);
+enum quic_tx_err qc_send_mux(struct quic_conn *qc, struct list *frms,
+ struct quic_pacer *pacer);
void qel_register_send(struct list *send_list, struct quic_enc_level *qel,
struct list *frms);
return 1;
}
- ret = qc_send_mux(qcc->conn->handle.qc, frms);
+ ret = qc_send_mux(qcc->conn->handle.qc, frms, NULL);
if (ret == QUIC_TX_ERR_FATAL) {
TRACE_DEVEL("error on sending", QMUX_EV_QCC_SEND, qcc->conn);
qcc_subscribe_send(qcc);
#include <haproxy/quic_pacing.h>
+
+#include <haproxy/quic_tx.h>
+
+struct quic_conn;
+
+int quic_pacing_expired(const struct quic_pacer *pacer)
+{
+ return !pacer->next || pacer->next <= now_mono_time();
+}
+
+void quic_pacing_sent_done(struct quic_pacer *pacer, int sent)
+{
+ pacer->next = now_mono_time() + quic_pacing_ns_pkt(pacer) * sent;
+}
#include <haproxy/trace.h>
#include <haproxy/quic_cid.h>
#include <haproxy/quic_conn.h>
+#include <haproxy/quic_pacing.h>
#include <haproxy/quic_retransmit.h>
#include <haproxy/quic_retry.h>
#include <haproxy/quic_sock.h>
*
* Returns the result from qc_send() function.
*/
-enum quic_tx_err qc_send_mux(struct quic_conn *qc, struct list *frms)
+enum quic_tx_err qc_send_mux(struct quic_conn *qc, struct list *frms,
+ struct quic_pacer *pacer)
{
struct list send_list = LIST_HEAD_INIT(send_list);
enum quic_tx_err ret = QUIC_TX_ERR_NONE;
- int sent;
+ int max_dgram = 0, sent;
TRACE_ENTER(QUIC_EV_CONN_TXPKT, qc);
BUG_ON(qc->mux_state != QC_MUX_READY); /* Only MUX can uses this function so it must be ready. */
qc_send(qc, 0, &send_list, 0);
}
+ if (pacer)
+ max_dgram = 1;
+
TRACE_STATE("preparing data (from MUX)", QUIC_EV_CONN_TXPKT, qc);
qel_register_send(&send_list, qc->ael, frms);
- sent = qc_send(qc, 0, &send_list, 0);
- if (sent <= 0)
+ sent = qc_send(qc, 0, &send_list, max_dgram);
+ if (sent <= 0) {
ret = QUIC_TX_ERR_FATAL;
+ }
+ else if (pacer) {
+ if (max_dgram && max_dgram == sent && !LIST_ISEMPTY(frms))
+ ret = QUIC_TX_ERR_AGAIN;
+ quic_pacing_sent_done(pacer, sent);
+ }
TRACE_LEAVE(QUIC_EV_CONN_TXPKT, qc);
return ret;