SSL_SERVER_LOCK,
SFT_LOCK, /* sink forward target */
IDLE_CONNS_LOCK,
+ QUIC_LOCK,
OTHER_LOCK,
/* WT: make sure never to use these ones outside of development,
* we need them for lock profiling!
case SSL_SERVER_LOCK: return "SSL_SERVER";
case SFT_LOCK: return "SFT";
case IDLE_CONNS_LOCK: return "IDLE_CONNS";
+ case QUIC_LOCK: return "QUIC";
case OTHER_LOCK: return "OTHER";
case DEBUG1_LOCK: return "DEBUG1";
case DEBUG2_LOCK: return "DEBUG2";
pqpkt->aad_len = pqpkt->pn_offset + pqpkt->pnl;
/* Store the packet into the tree of packets to decrypt. */
pqpkt->pn_node.key = pqpkt->pn;
+ HA_RWLOCK_WRLOCK(QUIC_LOCK, &el->rx.rwlock);
quic_rx_packet_eb64_insert(&el->rx.pkts, &pqpkt->pn_node);
+ HA_RWLOCK_WRUNLOCK(QUIC_LOCK, &el->rx.rwlock);
TRACE_PROTO("hp removed", QUIC_EV_CONN_ELRMHP, ctx->conn, pqpkt);
}
quic_rx_packet_list_del(pqpkt);
TRACE_ENTER(QUIC_EV_CONN_ELRXPKTS, ctx->conn);
tls_ctx = &el->tls_ctx;
+ HA_RWLOCK_WRLOCK(QUIC_LOCK, &el->rx.rwlock);
node = eb64_first(&el->rx.pkts);
while (node) {
struct quic_rx_packet *pkt;
node = eb64_next(node);
quic_rx_packet_eb64_delete(&pkt->pn_node);
}
+ HA_RWLOCK_WRUNLOCK(QUIC_LOCK, &el->rx.rwlock);
if (!qc_treat_rx_crypto_frms(el, ctx))
goto err;
qel->tls_ctx.tx.flags = 0;
qel->rx.pkts = EB_ROOT;
+ HA_RWLOCK_INIT(&qel->rx.rwlock);
LIST_INIT(&qel->rx.pqpkts);
/* Allocate only one buffer. */
qpkt_trace = pkt;
/* Store the packet */
pkt->pn_node.key = pkt->pn;
+ HA_RWLOCK_WRLOCK(QUIC_LOCK, &qel->rx.rwlock);
quic_rx_packet_eb64_insert(&qel->rx.pkts, &pkt->pn_node);
+ HA_RWLOCK_WRUNLOCK(QUIC_LOCK, &qel->rx.rwlock);
}
else if (qel) {
TRACE_PROTO("hp not removed", QUIC_EV_CONN_TRMHP, ctx ? ctx->conn : NULL, pkt);