return pkt->type != QUIC_PACKET_TYPE_SHORT;
}
+/* Release the memory for the RX packets which are no more referenced
+ * and consume their payloads which have been copied to the RX buffer
+ * for the connection.
+ * Always succeeds.
+ */
+static inline void quic_rx_packet_pool_purge(struct quic_conn *qc)
+{
+ struct quic_rx_packet *pkt, *pktback;
+
+ list_for_each_entry_safe(pkt, pktback, &qc->rx.pkt_list, qc_rx_pkt_list) {
+ if (pkt->data != (unsigned char *)b_head(&qc->rx.buf))
+ break;
+
+ if (!HA_ATOMIC_LOAD(&pkt->refcnt)) {
+ b_del(&qc->rx.buf, pkt->raw_len);
+ LIST_DELETE(&pkt->qc_rx_pkt_list);
+ pool_free(pool_head_quic_rx_packet, pkt);
+ }
+ }
+}
+
/* Increment the reference counter of <pkt> */
static inline void quic_rx_packet_refinc(struct quic_rx_packet *pkt)
{
/* Decrement the reference counter of <pkt> */
static inline void quic_rx_packet_refdec(struct quic_rx_packet *pkt)
{
- if (!HA_ATOMIC_SUB_FETCH(&pkt->refcnt, 1))
+ if (HA_ATOMIC_SUB_FETCH(&pkt->refcnt, 1))
+ return;
+
+ if (!pkt->qc) {
+ /* It is possible the connection for this packet has not already been
+ * identified. In such a case, we only need to free this packet.
+ */
pool_free(pool_head_quic_rx_packet, pkt);
+ }
+ else {
+ struct quic_conn *qc = pkt->qc;
+
+ HA_RWLOCK_WRLOCK(QUIC_LOCK, &qc->rx.buf_rwlock);
+ if (pkt->data == (unsigned char *)b_head(&qc->rx.buf)) {
+ b_del(&qc->rx.buf, pkt->raw_len);
+ LIST_DELETE(&pkt->qc_rx_pkt_list);
+ pool_free(pool_head_quic_rx_packet, pkt);
+ quic_rx_packet_pool_purge(qc);
+ }
+ HA_RWLOCK_WRUNLOCK(QUIC_LOCK, &qc->rx.buf_rwlock);
+ }
}
/* Increment the reference counter of <pkt> */
pool_free(pool_head_quic_tx_packet, pkt);
}
-ssize_t quic_lstnr_dgram_read(char *buf, size_t len, void *owner,
+ssize_t quic_lstnr_dgram_read(struct buffer *buf, size_t len, void *owner,
struct sockaddr_storage *saddr);
#endif /* USE_QUIC */
#endif /* _HAPROXY_XPRT_QUIC_H */
DECLARE_POOL(pool_head_quic_tx_ring, "quic_tx_ring_pool", QUIC_TX_RING_BUFSZ);
DECLARE_POOL(pool_head_quic_rxbuf, "quic_rxbuf_pool", QUIC_RX_BUFSZ);
+DECLARE_POOL(pool_head_quic_conn_rxbuf, "quic_conn_rxbuf", QUIC_CONN_RX_BUFSZ);
DECLARE_STATIC_POOL(pool_head_quic_conn_ctx,
"quic_conn_ctx_pool", sizeof(struct ssl_sock_ctx));
DECLARE_STATIC_POOL(pool_head_quic_conn, "quic_conn", sizeof(struct quic_conn));
quic_conn_enc_level_uninit(&conn->els[i]);
if (conn->timer_task)
task_destroy(conn->timer_task);
+ pool_free(pool_head_quic_conn_rxbuf, conn->rx.buf.area);
pool_free(pool_head_quic_conn, conn);
}
struct quic_conn *qc;
/* Initial CID. */
struct quic_connection_id *icid;
+ char *buf_area;
TRACE_ENTER(QUIC_EV_CONN_INIT);
qc = pool_zalloc(pool_head_quic_conn);
goto err;
}
+ buf_area = pool_alloc(pool_head_quic_conn_rxbuf);
+ if (!buf_area) {
+ TRACE_PROTO("Could not allocate a new RX buffer", QUIC_EV_CONN_INIT);
+ goto err;
+ }
+
qc->cids = EB_ROOT;
/* QUIC Server (or listener). */
if (server) {
/* RX part. */
qc->rx.bytes = 0;
qc->rx.nb_ack_eliciting = 0;
+ qc->rx.buf = b_make(buf_area, QUIC_CONN_RX_BUFSZ, 0, 0);
+ HA_RWLOCK_INIT(&qc->rx.buf_rwlock);
+ LIST_INIT(&qc->rx.pkt_list);
/* XXX TO DO: Only one path at this time. */
qc->path = &qc->paths[0];
return 0;
}
-/* Try to remove the header protecttion of <pkt> QUIC packet attached to <conn>
+/* Insert <pkt> RX packet in its <qel> RX packets tree */
+static void qc_pkt_insert(struct quic_rx_packet *pkt, struct quic_enc_level *qel)
+{
+ pkt->pn_node.key = pkt->pn;
+ HA_RWLOCK_WRLOCK(QUIC_LOCK, &qel->rx.pkts_rwlock);
+ eb64_insert(&qel->rx.pkts, &pkt->pn_node);
+ HA_RWLOCK_WRUNLOCK(QUIC_LOCK, &qel->rx.pkts_rwlock);
+ quic_rx_packet_refinc(pkt);
+}
+
+/* Try to remove the header protection of <pkt> QUIC packet attached to <qc>
* QUIC connection with <buf> as packet number field address, <end> a pointer to one
* byte past the end of the buffer containing this packet and <beg> the address of
* the packet first byte.
static inline int qc_try_rm_hp(struct quic_rx_packet *pkt,
unsigned char **buf, unsigned char *beg,
const unsigned char *end,
- struct quic_conn *qc, struct ssl_sock_ctx *ctx)
+ struct quic_conn *qc, struct quic_enc_level **el,
+ struct ssl_sock_ctx *ctx)
{
unsigned char *pn = NULL; /* Packet number field */
struct quic_enc_level *qel;
/* The AAD includes the packet number field found at <pn>. */
pkt->aad_len = pn - beg + pkt->pnl;
qpkt_trace = pkt;
- /* Store the packet */
- pkt->pn_node.key = pkt->pn;
- HA_RWLOCK_WRLOCK(QUIC_LOCK, &qel->rx.pkts_rwlock);
- eb64_insert(&qel->rx.pkts, &pkt->pn_node);
- quic_rx_packet_refinc(pkt);
- HA_RWLOCK_WRUNLOCK(QUIC_LOCK, &qel->rx.pkts_rwlock);
}
else if (qel) {
+ if (qel->tls_ctx.rx.flags & QUIC_FL_TLS_SECRETS_DCD) {
+ /* If the packet number space has been discarded, this packet
+ * will be not parsed.
+ */
+ TRACE_PROTO("Discarded pktns", QUIC_EV_CONN_TRMHP, ctx ? ctx->conn : NULL, pkt);
+ goto out;
+ }
+
TRACE_PROTO("hp not removed", QUIC_EV_CONN_TRMHP, ctx ? ctx->conn : NULL, pkt);
pkt->pn_offset = pn - beg;
MT_LIST_APPEND(&qel->rx.pqpkts, &pkt->list);
quic_rx_packet_refinc(pkt);
}
+ else {
+ TRACE_PROTO("Unknown packet type", QUIC_EV_CONN_TRMHP, ctx ? ctx->conn : NULL);
+ goto err;
+ }
- memcpy(pkt->data, beg, pkt->len);
+ *el = qel;
+ /* No reference counter incrementation here!!! */
+ LIST_APPEND(&qc->rx.pkt_list, &pkt->qc_rx_pkt_list);
+ memcpy(b_tail(&qc->rx.buf), beg, pkt->len);
+ pkt->data = (unsigned char *)b_tail(&qc->rx.buf);
+ b_add(&qc->rx.buf, pkt->len);
+ out:
/* Updtate the offset of <*buf> for the next QUIC packet. */
*buf = beg + pkt->len;
struct connection *srv_conn;
struct ssl_sock_ctx *conn_ctx;
int long_header;
+ size_t b_cspace;
+ struct quic_enc_level *qel;
qc = NULL;
TRACE_ENTER(QUIC_EV_CONN_SPKT);
goto err;
}
- if (pkt->len > sizeof pkt->data) {
- TRACE_PROTO("Too big packet", QUIC_EV_CONN_SPKT, qc->conn, pkt, &pkt->len);
- goto err;
+ HA_RWLOCK_WRLOCK(QUIC_LOCK, &qc->rx.buf_rwlock);
+ b_cspace = b_contig_space(&qc->rx.buf);
+ if (b_cspace < pkt->len) {
+ /* Let us consume the remaining contiguous space. */
+ b_add(&qc->rx.buf, b_cspace);
+ if (b_contig_space(&qc->rx.buf) < pkt->len) {
+ HA_RWLOCK_WRUNLOCK(QUIC_LOCK, &qc->rx.buf_rwlock);
+ TRACE_PROTO("Too big packet", QUIC_EV_CONN_SPKT, qc->conn, pkt, &pkt->len);
+ goto err;
+ }
}
- if (!qc_try_rm_hp(pkt, buf, beg, end, qc, conn_ctx))
+ if (!qc_try_rm_hp(pkt, buf, beg, end, qc, &qel, conn_ctx)) {
+ HA_RWLOCK_WRUNLOCK(QUIC_LOCK, &qc->rx.buf_rwlock);
+ TRACE_PROTO("Packet dropped", QUIC_EV_CONN_SPKT, qc->conn);
goto err;
+ }
+ HA_RWLOCK_WRUNLOCK(QUIC_LOCK, &qc->rx.buf_rwlock);
+ if (pkt->aad_len)
+ qc_pkt_insert(pkt, qel);
/* Wake the tasklet of the QUIC connection packet handler. */
if (conn_ctx)
tasklet_wakeup(conn_ctx->wait_event.tasklet);
struct listener *l;
struct ssl_sock_ctx *conn_ctx;
int long_header = 0;
+ size_t b_cspace;
+ struct quic_enc_level *qel;
qc = NULL;
conn_ctx = NULL;
pkt->len = end - *buf;
}
+ pkt->qc = qc;
+
/* Store the DCID used for this packet to check the packet which
* come in this UDP datagram match with it.
*/
}
/* Increase the total length of this packet by the header length. */
- pkt->len += *buf - beg;
+ pkt->raw_len = pkt->len += *buf - beg;
/* Do not check the DCID node before the length. */
if (dgram_ctx->dcid_node != node) {
TRACE_PROTO("Packet dropped", QUIC_EV_CONN_LPKT, qc->conn);
goto err;
}
- if (pkt->len > sizeof pkt->data) {
- TRACE_PROTO("Too big packet", QUIC_EV_CONN_LPKT, qc->conn, pkt, &pkt->len);
- goto err;
+ HA_RWLOCK_WRLOCK(QUIC_LOCK, &qc->rx.buf_rwlock);
+ b_cspace = b_contig_space(&qc->rx.buf);
+ if (b_cspace < pkt->len) {
+ /* Let us consume the remaining contiguous space. */
+ b_add(&qc->rx.buf, b_cspace);
+ if (b_contig_space(&qc->rx.buf) < pkt->len) {
+ HA_RWLOCK_WRUNLOCK(QUIC_LOCK, &qc->rx.buf_rwlock);
+ TRACE_PROTO("Too big packet", QUIC_EV_CONN_LPKT, qc->conn, pkt, &pkt->len);
+ goto err;
+ }
}
- if (!qc_try_rm_hp(pkt, buf, beg, end, qc, conn_ctx)) {
+ if (!qc_try_rm_hp(pkt, buf, beg, end, qc, &qel, conn_ctx)) {
+ HA_RWLOCK_WRUNLOCK(QUIC_LOCK, &qc->rx.buf_rwlock);
TRACE_PROTO("Packet dropped", QUIC_EV_CONN_LPKT, qc->conn);
goto err;
}
-
+ HA_RWLOCK_WRUNLOCK(QUIC_LOCK, &qc->rx.buf_rwlock);
TRACE_PROTO("New packet", QUIC_EV_CONN_LPKT, qc->conn, pkt);
+ if (pkt->aad_len)
+ qc_pkt_insert(pkt, qel);
/* Wake up the connection packet handler task from here only if all
* the contexts have been initialized, especially the mux context
* conn_ctx->conn->ctx. Note that this is ->start xprt callback which
BIO_meth_free(ha_quic_meth);
}
-/* Read all the QUIC packets found in <buf> with <len> as length (typically a UDP
- * datagram), <ctx> being the QUIC I/O handler context, from QUIC connections,
- * calling <func> function;
+/* Read all the QUIC packets found in <buf> from QUIC connection with <owner>
+ * as owner calling <func> function.
* Return the number of bytes read if succeeded, -1 if not.
*/
-static ssize_t quic_dgram_read(char *buf, size_t len, void *owner,
+static ssize_t quic_dgram_read(struct buffer *buf, size_t len, void *owner,
struct sockaddr_storage *saddr, qpkt_read_func *func)
{
unsigned char *pos;
.owner = owner,
};
- pos = (unsigned char *)buf;
+ pos = (unsigned char *)b_head(buf);
end = pos + len;
-
do {
int ret;
struct quic_rx_packet *pkt;
return -1;
}
-ssize_t quic_lstnr_dgram_read(char *buf, size_t len, void *owner,
+ssize_t quic_lstnr_dgram_read(struct buffer *buf, size_t len, void *owner,
struct sockaddr_storage *saddr)
{
return quic_dgram_read(buf, len, owner, saddr, qc_lstnr_pkt_rcv);