const void *a1, const void *a2, const void *a3, const void *a4);
static const struct trace_event qmux_trace_events[] = {
+#define QMUX_EV_QCC_NEW (1ULL << 0)
+ { .mask = QMUX_EV_QCC_NEW , .name = "qcc_new", .desc = "new QUIC connection" },
#define QMUX_EV_QCC_RECV (1ULL << 1)
{ .mask = QMUX_EV_QCC_RECV, .name = "qcc_recv", .desc = "Rx on QUIC connection" },
#define QMUX_EV_QCC_SEND (1ULL << 2)
*/
static void qcc_emit_cc(struct qcc *qcc, int err)
{
+ TRACE_ENTER(QMUX_EV_QCC_END, qcc->conn);
+
+ TRACE_STATE("set CONNECTION_CLOSE on quic-conn", QMUX_EV_QCC_WAKE, qcc->conn);
quic_set_connection_close(qcc->conn->handle.qc, quic_err_transport(err));
qcc->flags |= QC_CF_CC_EMIT;
tasklet_wakeup(qcc->wait_event.tasklet);
+
+ TRACE_LEAVE(QMUX_EV_QCC_END, qcc->conn);
}
/* Allocate a new QUIC streams with id <id> and type <type>. */
*/
static void qcs_free(struct qcs *qcs)
{
+ TRACE_ENTER(QMUX_EV_QCS_END, qcs);
+
qc_free_ncbuf(qcs, &qcs->rx.ncbuf);
b_free(&qcs->tx.buf);
eb64_delete(&qcs->by_id);
pool_free(pool_head_qcs, qcs);
+
+ TRACE_LEAVE(QMUX_EV_QCS_END);
}
static forceinline struct stconn *qcs_sc(const struct qcs *qcs)
struct ncbuf *buf = &qcs->rx.ncbuf;
enum ncb_ret ret;
+ TRACE_ENTER(QMUX_EV_QCS_RECV, qcc->conn, qcs);
+
ret = ncb_advance(buf, bytes);
if (ret) {
ABORT_NOW(); /* should not happens because removal only in data */
qcs->rx.offset += bytes;
if (qcs->rx.msd - qcs->rx.offset < qcs->rx.msd_init / 2) {
+ TRACE_DATA("increase stream credit via MAX_STREAM_DATA", QMUX_EV_QCS_RECV, qcc->conn, qcs);
frm = pool_zalloc(pool_head_quic_frame);
BUG_ON(!frm); /* TODO handle this properly */
qcc->lfctl.offsets_consume += bytes;
if (qcc->lfctl.md - qcc->lfctl.offsets_consume < qcc->lfctl.md_init / 2) {
+ TRACE_DATA("increase conn credit via MAX_DATA", QMUX_EV_QCS_RECV, qcc->conn, qcs);
frm = pool_zalloc(pool_head_quic_frame);
BUG_ON(!frm); /* TODO handle this properly */
LIST_APPEND(&qcs->qcc->lfctl.frms, &frm->list);
tasklet_wakeup(qcs->qcc->wait_event.tasklet);
}
+
+ TRACE_LEAVE(QMUX_EV_QCS_RECV, qcc->conn, qcs);
}
/* Decode the content of STREAM frames already received on the stream instance
*/
void qcc_emit_cc_app(struct qcc *qcc, int err, int immediate)
{
+ TRACE_ENTER(QMUX_EV_QCC_END, qcc->conn);
+
if (immediate) {
quic_set_connection_close(qcc->conn->handle.qc, quic_err_app(err));
qcc->flags |= QC_CF_CC_EMIT;
/* Only register the error code for graceful shutdown. */
qcc->conn->handle.qc->err = quic_err_app(err);
}
+
+ TRACE_LEAVE(QMUX_EV_QCC_END, qcc->conn);
}
/* Prepare for the emission of RESET_STREAM on <qcs> with error code <err>. */
tasklet_wakeup(qcc->wait_event.tasklet);
}
+/* Install the <app_ops> applicative layer of a QUIC connection on mux <qcc>.
+ * Returns 0 on success else non-zero.
+ */
+int qcc_install_app_ops(struct qcc *qcc, const struct qcc_app_ops *app_ops)
+{
+ TRACE_ENTER(QMUX_EV_QCC_NEW, qcc->conn);
+
+ qcc->app_ops = app_ops;
+ if (qcc->app_ops->init && !qcc->app_ops->init(qcc)) {
+ TRACE_ERROR("app ops init error", QMUX_EV_QCC_NEW, qcc->conn);
+ goto err;
+ }
+
+ TRACE_PROTO("application layer initialized", QMUX_EV_QCC_NEW, qcc->conn);
+
+ if (qcc->app_ops->finalize)
+ qcc->app_ops->finalize(qcc->ctx);
+
+ TRACE_LEAVE(QMUX_EV_QCC_NEW, qcc->conn);
+ return 0;
+
+ err:
+ TRACE_LEAVE(QMUX_EV_QCC_NEW, qcc->conn);
+ return 1;
+}
+
/* Handle a new STREAM frame for stream with id <id>. Payload is pointed by
* <data> with length <len> and represents the offset <offset>. <fin> is set if
* the QUIC frame FIN bit is set.
goto out;
}
+ TRACE_PROTO("receiving STREAM", QMUX_EV_QCC_RECV|QMUX_EV_QCS_RECV, qcc->conn, qcs);
qcs_idle_open(qcs);
if (offset + len > qcs->rx.offset_max) {
{
TRACE_ENTER(QMUX_EV_QCC_RECV, qcc->conn);
+ TRACE_PROTO("receiving MAX_DATA", QMUX_EV_QCC_RECV, qcc->conn);
if (qcc->rfctl.md < max) {
qcc->rfctl.md = max;
TRACE_DEVEL("increase remote max-data", QMUX_EV_QCC_RECV, qcc->conn);
}
if (qcs) {
+ TRACE_PROTO("receiving MAX_STREAM_DATA", QMUX_EV_QCC_RECV|QMUX_EV_QCS_RECV, qcc->conn, qcs);
if (max > qcs->tx.msd) {
qcs->tx.msd = max;
TRACE_DEVEL("increase remote max-stream-data", QMUX_EV_QCC_RECV|QMUX_EV_QCS_RECV, qcc->conn, qcs);
{
struct quic_frame *frm;
+ TRACE_ENTER(QMUX_EV_QCS_END, qcc->conn);
+
if (quic_stream_is_bidi(id)) {
++qcc->lfctl.cl_bidi_r;
if (qcc->lfctl.cl_bidi_r > qcc->lfctl.ms_bidi_init / 2) {
+ TRACE_DATA("increase max stream limit with MAX_STREAMS_BIDI", QMUX_EV_QCC_SEND, qcc->conn);
frm = pool_zalloc(pool_head_quic_frame);
BUG_ON(!frm); /* TODO handle this properly */
/* TODO */
}
+ TRACE_LEAVE(QMUX_EV_QCS_END, qcc->conn);
+
return 0;
}
BUG_ON(qcs->tx.sent_offset + total > qcs->tx.offset);
BUG_ON(qcc->tx.sent_offsets + total > qcc->rfctl.md);
+ TRACE_PROTO("sending STREAM frame", QMUX_EV_QCS_SEND, qcc->conn, qcs);
frm = pool_zalloc(pool_head_quic_frame);
- if (!frm)
+ if (!frm) {
+ TRACE_ERROR("frame alloc failure", QMUX_EV_QCS_SEND, qcc->conn, qcs);
goto err;
+ }
LIST_INIT(&frm->reflist);
frm->type = QUIC_FT_STREAM_8;
struct qcc *qcc = qcs->qcc;
uint64_t diff;
+ TRACE_ENTER(QMUX_EV_QCS_SEND, qcc->conn, qcs);
+
BUG_ON(offset > qcs->tx.sent_offset);
BUG_ON(offset + data > qcs->tx.offset);
/* check if the STREAM frame has already been notified. It can happen
* for retransmission.
*/
- if (offset + data < qcs->tx.sent_offset)
- return;
+ if (offset + data < qcs->tx.sent_offset) {
+ TRACE_DEVEL("offset already notified", QMUX_EV_QCS_SEND, qcc->conn, qcs);
+ goto out;
+ }
qcs_idle_open(qcs);
/* Reset flag to not emit multiple FIN STREAM frames. */
qcs->flags &= ~QC_SF_FIN_STREAM;
}
+
+ out:
+ TRACE_LEAVE(QMUX_EV_QCS_SEND, qcc->conn, qcs);
}
/* Wrapper for send on transport layer. Send a list of frames <frms> for the
else {
qcc_emit_cc_app(qcc, QC_ERR_NO_ERROR, 0);
}
+ TRACE_PROTO("application layer released", QMUX_EV_QCC_END, qcc->conn);
if (qcc->task) {
task_destroy(qcc->task);
if (qc_purge_streams(qcc)) {
if (qcc_is_dead(qcc)) {
+ TRACE_STATE("releasing dead connection", QMUX_EV_QCC_WAKE, qcc->conn);
qc_release(qcc);
goto end;
}
* shutdown should occurs. For all other cases, an immediate close
* seems legitimate.
*/
- if (qcc_is_dead(qcc))
+ if (qcc_is_dead(qcc)) {
+ TRACE_STATE("releasing dead connection", QMUX_EV_QCC_WAKE, qcc->conn);
qc_release(qcc);
+ }
out:
TRACE_LEAVE(QMUX_EV_QCC_WAKE);
struct qcc *qcc;
struct quic_transport_params *lparams, *rparams;
+ TRACE_ENTER(QMUX_EV_QCC_NEW);
+
qcc = pool_alloc(pool_head_qcc);
- if (!qcc)
+ if (!qcc) {
+ TRACE_ERROR("alloc failure", QMUX_EV_QCC_NEW);
goto fail_no_qcc;
+ }
qcc->conn = conn;
conn->ctx = qcc;
}
qcc->wait_event.tasklet = tasklet_new();
- if (!qcc->wait_event.tasklet)
+ if (!qcc->wait_event.tasklet) {
+ TRACE_ERROR("taslket alloc failure", QMUX_EV_QCC_NEW);
goto fail_no_tasklet;
+ }
LIST_INIT(&qcc->send_retry_list);
prx->timeout.client;
if (tick_isset(qcc->timeout)) {
qcc->task = task_new_here();
- if (!qcc->task)
+ if (!qcc->task) {
+ TRACE_ERROR("timeout task alloc failure", QMUX_EV_QCC_NEW);
goto fail_no_timeout_task;
+ }
qcc->task->process = qc_timeout_task;
qcc->task->context = qcc;
qcc->task->expire = tick_add(now_ms, qcc->timeout);
/* init read cycle */
tasklet_wakeup(qcc->wait_event.tasklet);
+ TRACE_LEAVE(QMUX_EV_QCC_NEW, qcc->conn);
return 0;
fail_no_timeout_task:
fail_no_tasklet:
pool_free(pool_head_qcc, qcc);
fail_no_qcc:
+ TRACE_LEAVE(QMUX_EV_QCC_NEW);
return -1;
}
return 0;
release:
+ TRACE_STATE("releasing dead connection", QMUX_EV_QCC_WAKE, qcc->conn);
qc_release(qcc);
TRACE_LEAVE(QMUX_EV_QCC_WAKE);
return 1;