qc->wait_event.tasklet->process = quic_conn_io_cb;
qc->wait_event.tasklet->context = qc;
qc->wait_event.events = 0;
- /* Set tasklet tid based on the SCID selected by us for this
- * connection. The upper layer will also be binded on the same thread.
- */
- qc->tid = quic_get_cid_tid(qc->scid.data, &l->rx);
- qc->wait_event.tasklet->tid = qc->tid;
qc->subs = NULL;
if (qc_conn_alloc_ssl_ctx(qc) ||
/* Attach this task to the same thread ID used for the connection */
TRACE_ENTER(QUIC_EV_CONN_NEW, qc);
- qc->timer_task = task_new_on(qc->tid);
+ qc->timer_task = task_new_here();
if (!qc->timer_task) {
TRACE_ERROR("timer task allocation failed", QUIC_EV_CONN_NEW, qc);
goto leave;
}
/* CIDs */
- chunk_appendf(&trash, "* %p[%02u]: scid=", qc, qc->tid);
+ chunk_appendf(&trash, "* %p[%02u]: scid=", qc, ctx->thr);
for (cid_len = 0; cid_len < qc->scid.len; ++cid_len)
chunk_appendf(&trash, "%02x", qc->scid.data[cid_len]);
while (cid_len++ < 20)
*/
void quic_accept_push_qc(struct quic_conn *qc)
{
- struct quic_accept_queue *queue = &quic_accept_queues[qc->tid];
- struct li_per_thread *lthr = &qc->li->per_thr[qc->tid];
+ struct quic_accept_queue *queue = &quic_accept_queues[tid];
+ struct li_per_thread *lthr = &qc->li->per_thr[tid];
/* early return if accept is already in progress/done for this
* connection
MT_LIST_APPEND(<hr->quic_accept.conns, &qc->accept_list);
/* 3. wake up the queue tasklet */
- tasklet_wakeup(quic_accept_queues[qc->tid].tasklet);
+ tasklet_wakeup(quic_accept_queues[tid].tasklet);
}
/* Tasklet handler to accept QUIC connections. Call listener_accept on every