void qcc_wakeup(struct qcc *qcc)
{
+ HA_ATOMIC_AND(&qcc->wait_event.tasklet->state, ~TASK_F_USR1);
+ tasklet_wakeup(qcc->wait_event.tasklet);
+}
+
+static void qcc_wakeup_pacing(struct qcc *qcc)
+{
+ HA_ATOMIC_OR(&qcc->wait_event.tasklet->state, TASK_F_USR1);
tasklet_wakeup(qcc->wait_event.tasklet);
}
*
* Returns 0 if all data sent with success else non-zero.
*/
-static int qcc_send_frames(struct qcc *qcc, struct list *frms)
+static int qcc_send_frames(struct qcc *qcc, struct list *frms, int stream)
{
enum quic_tx_err ret;
+ struct quic_pacer *pacer = NULL;
TRACE_ENTER(QMUX_EV_QCC_SEND, qcc->conn);
if (LIST_ISEMPTY(frms)) {
TRACE_DEVEL("leaving on no frame to send", QMUX_EV_QCC_SEND, qcc->conn);
- return 1;
+ return -1;
}
- ret = qc_send_mux(qcc->conn->handle.qc, frms, NULL);
+ if (stream)
+ pacer = &qcc->tx.pacer;
+
+ ret = qc_send_mux(qcc->conn->handle.qc, frms, pacer);
if (ret == QUIC_TX_ERR_FATAL) {
TRACE_DEVEL("error on sending", QMUX_EV_QCC_SEND, qcc->conn);
qcc_subscribe_send(qcc);
/* If there is frames left at this stage, transport layer is blocked.
* Subscribe on it to retry later.
*/
- if (!LIST_ISEMPTY(frms)) {
+ if (!LIST_ISEMPTY(frms) && ret != QUIC_TX_ERR_AGAIN) {
TRACE_DEVEL("remaining frames to send", QMUX_EV_QCC_SEND, qcc->conn);
qcc_subscribe_send(qcc);
goto err;
}
TRACE_LEAVE(QMUX_EV_QCC_SEND, qcc->conn);
- return 0;
+ return ret == QUIC_TX_ERR_AGAIN ? 1 : 0;
err:
TRACE_DEVEL("leaving on error", QMUX_EV_QCC_SEND, qcc->conn);
- return 1;
+ return -1;
}
/* Emit a RESET_STREAM on <qcs>.
frm->reset_stream.final_size = qcs->tx.fc.off_real;
LIST_APPEND(&frms, &frm->list);
- if (qcc_send_frames(qcs->qcc, &frms)) {
+ if (qcc_send_frames(qcs->qcc, &frms, 0)) {
if (!LIST_ISEMPTY(&frms))
qc_frm_free(qcs->qcc->conn->handle.qc, &frm);
TRACE_DEVEL("cannot send RESET_STREAM", QMUX_EV_QCS_SEND, qcs->qcc->conn, qcs);
frm->stop_sending.app_error_code = qcs->err;
LIST_APPEND(&frms, &frm->list);
- if (qcc_send_frames(qcs->qcc, &frms)) {
+ if (qcc_send_frames(qcs->qcc, &frms, 0)) {
if (!LIST_ISEMPTY(&frms))
qc_frm_free(qcc->conn->handle.qc, &frm);
TRACE_DEVEL("cannot send STOP_SENDING", QMUX_EV_QCS_SEND, qcs->qcc->conn, qcs);
static int qcc_io_send(struct qcc *qcc)
{
/* Temporary list for QCS on error. */
- struct list *frms = quic_pacing_frms(&qcc->tx.pacer);
+ struct quic_pacer *pacer = &qcc->tx.pacer;
+ struct list *frms = quic_pacing_frms(pacer);
struct list qcs_failed = LIST_HEAD_INIT(qcs_failed);
struct qcs *qcs, *qcs_tmp, *first_qcs = NULL;
uint64_t window_conn = qfctl_rcap(&qcc->tx.fc);
- int ret, total = 0, resent;
+ int ret = 0, total = 0, resent;
TRACE_ENTER(QMUX_EV_QCC_SEND, qcc->conn);
* apply for STREAM frames.
*/
+ quic_pacing_reset(pacer);
+
/* Check for transport error. */
if (qcc->flags & QC_CF_ERR_CONN || qcc->conn->flags & CO_FL_ERROR) {
TRACE_DEVEL("connection on error", QMUX_EV_QCC_SEND, qcc->conn);
}
if (!LIST_ISEMPTY(&qcc->lfctl.frms)) {
- if (qcc_send_frames(qcc, &qcc->lfctl.frms)) {
+ if (qcc_send_frames(qcc, &qcc->lfctl.frms, 0)) {
TRACE_DEVEL("flow-control frames rejected by transport, aborting send", QMUX_EV_QCC_SEND, qcc->conn);
goto out;
}
}
}
+ if (!LIST_ISEMPTY(frms) && !quic_pacing_expired(&qcc->tx.pacer)) {
+ qcc_wakeup_pacing(qcc);
+ return 1;
+ }
+
/* Retry sending until no frame to send, data rejected or connection
* flow-control limit reached.
*/
- while ((ret = qcc_send_frames(qcc, frms)) == 0 && !qfctl_rblocked(&qcc->tx.fc)) {
+ while ((ret = qcc_send_frames(qcc, frms, 1)) == 0 && !qfctl_rblocked(&qcc->tx.fc)) {
window_conn = qfctl_rcap(&qcc->tx.fc);
resent = 0;
}
sent_done:
- /* Deallocate frames that the transport layer has rejected. */
- quic_pacing_reset(&qcc->tx.pacer);
+ if (ret == 1) {
+ qcc_wakeup_pacing(qcc);
+ }
+ else if (!LIST_ISEMPTY(quic_pacing_frms(pacer))) {
+ /* Deallocate frames that the transport layer has rejected. */
+ quic_pacing_reset(pacer);
+ }
/* Re-insert on-error QCS at the end of the send-list. */
if (!LIST_ISEMPTY(&qcs_failed)) {
TRACE_LEAVE(QMUX_EV_QCC_END);
}
+static void qcc_purge_sending(struct qcc *qcc)
+{
+ struct quic_conn *qc = qcc->conn->handle.qc;
+ struct quic_pacer *pacer = &qcc->tx.pacer;
+ enum quic_tx_err ret;
+
+ ret = quic_pacing_send(pacer, qc);
+ if (ret == QUIC_TX_ERR_AGAIN) {
+ BUG_ON(LIST_ISEMPTY(quic_pacing_frms(pacer)));
+ qcc_wakeup_pacing(qcc);
+ }
+ else if (ret == QUIC_TX_ERR_FATAL) {
+ TRACE_DEVEL("error on sending", QMUX_EV_QCC_SEND, qcc->conn);
+ HA_ATOMIC_AND(&qcc->wait_event.tasklet->state, ~TASK_F_USR1);
+ qcc_subscribe_send(qcc);
+ }
+ else {
+ if (!LIST_ISEMPTY(quic_pacing_frms(pacer)))
+ qcc_subscribe_send(qcc);
+ }
+}
+
struct task *qcc_io_cb(struct task *t, void *ctx, unsigned int status)
{
struct qcc *qcc = ctx;
TRACE_ENTER(QMUX_EV_QCC_WAKE, qcc->conn);
+ if (status & TASK_F_USR1) {
+ qcc_purge_sending(qcc);
+ return NULL;
+ }
+
if (!(qcc->wait_event.events & SUB_RETRY_SEND))
qcc_io_send(qcc);