return _quic_cid_tree_idx(cid->data);
}
-/* Insert <conn_id> into global CID tree. Do not check if value is already
- * present in the tree. As such, it should not be used for the first DCID of a
- * connection instance.
- */
-static inline void _quic_cid_insert(struct quic_connection_id *conn_id)
-{
- const uchar idx = quic_cid_tree_idx(&conn_id->cid);
- struct quic_cid_tree *tree = &quic_cid_trees[idx];
-
- HA_RWLOCK_WRLOCK(QC_CID_LOCK, &tree->lock);
- ebmb_insert(&tree->root, &conn_id->node, conn_id->cid.len);
- HA_RWLOCK_WRUNLOCK(QC_CID_LOCK, &tree->lock);
-}
-
/* Remove <conn_id> from global CID tree as a thread-safe operation. */
static inline void quic_cid_delete(struct quic_connection_id *conn_id)
{
TRACE_LEAVE(QUIC_EV_CONN_TXPKT, qc);
}
-/* Insert <conn_id> in global CID tree. It may fail if an identical value is
- * already stored. In this case, <new_tid> will be filled with the thread ID of
- * the already stored CID.
+/* Insert <conn_id> in global CID tree. It may fail if a collision happens due
+ * to an identical CID already stored.
+ *
+ * If <new_tid> is non null, it will be used as an output parameter,
+ * initialized to -1 by default. In case of a CID collision, it will be set to
+ * the thread ID of the already stored CID.
*
* Returns 0 on insert success else non-zero.
*/
struct quic_cid_tree *tree;
int ret;
- *new_tid = -1;
+ if (new_tid)
+ *new_tid = -1;
tree = &quic_cid_trees[quic_cid_tree_idx(&conn_id->cid)];
HA_RWLOCK_WRLOCK(QC_CID_LOCK, &tree->lock);
node = ebmb_insert(&tree->root, &conn_id->node, conn_id->cid.len);
if (node != &conn_id->node) {
- /* Node already inserted, may happen on thread contention. */
- conn_id = ebmb_entry(node, struct quic_connection_id, node);
- *new_tid = HA_ATOMIC_LOAD(&conn_id->tid);
+ if (new_tid) {
+ conn_id = ebmb_entry(node, struct quic_connection_id, node);
+ *new_tid = HA_ATOMIC_LOAD(&conn_id->tid);
+ }
ret = -1;
}
else {
goto err;
}
- /* TODO To prevent CID tree locking, all CIDs created here
- * could be allocated at the same time as the first one.
- */
- _quic_cid_insert(conn_id);
+ if (quic_cid_insert(conn_id, NULL)) {
+ TRACE_STATE("collision during CID generation on post-handshake", QUIC_EV_CONN_IO_CB, qc);
+ pool_free(pool_head_quic_connection_id, conn_id);
+ qc_frm_free(qc, &frm);
+ continue;
+ }
quic_cid_register_seq_num(conn_id);
quic_connection_id_to_frm_cpy(frm, conn_id);
goto err;
}
- _quic_cid_insert(conn_cid);
+ if (quic_cid_insert(conn_cid, NULL)) {
+ pool_free(pool_head_quic_connection_id, conn_cid);
+ goto err;
+ }
+
quic_cid_register_seq_num(conn_cid);
dcid = &qc->dcid;
conn_id = conn_cid;
case QUIC_FT_RETIRE_CONNECTION_ID:
{
struct quic_connection_id *conn_id = NULL;
+ int retry_rand_cid = 3; /* Number of random retries on CID collision. */
if (!qc_handle_retire_connection_id_frm(qc, frm, &pkt->dcid, &conn_id))
goto err;
goto err;
}
- if (quic_cid_generate(conn_id)) {
- TRACE_ERROR("error on CID generation", QUIC_EV_CONN_PSTRM, qc);
+ while (retry_rand_cid--) {
+ if (quic_cid_generate(conn_id)) {
+ TRACE_ERROR("error on CID generation", QUIC_EV_CONN_PSTRM, qc);
+ quic_set_connection_close(qc, quic_err_transport(QC_ERR_INTERNAL_ERROR));
+ pool_free(pool_head_quic_connection_id, conn_id);
+ qc_notify_err(qc);
+ goto err;
+ }
+
+ if (quic_cid_insert(conn_id, NULL) == 0)
+ break;
+ }
+
+ if (retry_rand_cid < 0) {
+ /* Multiple collisions during CID random gen.
+ * Prefer to close the connection in error
+ * immediately.
+ */
+ TRACE_ERROR("CID pool exhausted", QUIC_EV_CONN_IO_CB, qc);
quic_set_connection_close(qc, quic_err_transport(QC_ERR_INTERNAL_ERROR));
pool_free(pool_head_quic_connection_id, conn_id);
qc_notify_err(qc);
goto err;
}
- _quic_cid_insert(conn_id);
quic_cid_register_seq_num(conn_id);