TRACE_ERROR("overlapping data rejected", QUIC_EV_CONN_PRSHPKT, qc);
quic_set_connection_close(qc, quic_err_transport(QC_ERR_PROTOCOL_VIOLATION));
qc_notify_err(qc);
+ goto err;
}
else if (ncb_ret == NCB_RET_GAP_SIZE) {
- TRACE_ERROR("cannot bufferize frame due to gap size limit",
- QUIC_EV_CONN_PRSHPKT, qc);
+ TRACE_DATA("cannot bufferize frame due to gap size limit",
+ QUIC_EV_CONN_PRSHPKT, qc);
+ ret = QUIC_RX_RET_FRM_AGAIN;
+ goto done;
}
- goto err;
}
/* Reschedule with TASK_HEAVY if CRYPTO data ready for decoding. */
static int qc_parse_pkt_frms(struct quic_conn *qc, struct quic_rx_packet *pkt,
struct quic_enc_level *qel)
{
- struct quic_frame *frm = NULL;
+ struct list retry_frms = LIST_HEAD_INIT(retry_frms);
+ struct quic_frame *frm = NULL, *frm_tmp;
const unsigned char *pos, *end;
enum quic_rx_ret_frm ret;
int fast_retrans = 0;
+ /* parsing may be rerun multiple times, but no more than <iter>. */
+ int iter = 3, parsing_stage = 0;
TRACE_ENTER(QUIC_EV_CONN_PRSHPKT, qc);
/* Skip the AAD */
switch (ret) {
case QUIC_RX_RET_FRM_FATAL:
goto err;
+
+ case QUIC_RX_RET_FRM_AGAIN:
+ if (parsing_stage == 0) {
+ TRACE_STATE("parsing stage set to 1 (AGAIN encountered)", QUIC_EV_CONN_PRSHPKT, qc);
+ ++parsing_stage;
+ }
+ /* Save frame in temp list to reparse it later. A new instance must be used for next packet frames. */
+ LIST_APPEND(&retry_frms, &frm->list);
+ frm = NULL;
+ break;
+
case QUIC_RX_RET_FRM_DUP:
if (qc_is_listener(qc) && qel == qc->iel &&
!(qc->flags & QUIC_FL_CONN_HANDSHAKE_SPEED_UP)) {
fast_retrans = 1;
}
break;
+
case QUIC_RX_RET_FRM_DONE:
- /* nothing to do here */
+ if (parsing_stage == 1) {
+ TRACE_STATE("parsing stage set to 2 (DONE after AGAIN)", QUIC_EV_CONN_PRSHPKT, qc);
+ ++parsing_stage;
+ }
break;
}
+
break;
case QUIC_FT_NEW_TOKEN:
/* TODO */
if (frm)
qc_frm_free(qc, &frm);
+ while (!LIST_ISEMPTY(&retry_frms)) {
+ if (--iter <= 0) {
+ TRACE_ERROR("interrupt parsing due to max iteration reached",
+ QUIC_EV_CONN_PRSHPKT, qc);
+ goto err;
+ }
+ else if (parsing_stage <= 1) {
+ TRACE_ERROR("interrupt parsing due to buffering blocked on gap size limit",
+ QUIC_EV_CONN_PRSHPKT, qc);
+ goto err;
+ }
+
+ parsing_stage = 0;
+ list_for_each_entry_safe(frm, frm_tmp, &retry_frms, list) {
+ /* only CRYPTO frames may be reparsed for now */
+ BUG_ON(frm->type != QUIC_FT_CRYPTO);
+ ret = qc_handle_crypto_frm(qc, &frm->crypto, pkt, qel);
+ switch (ret) {
+ case QUIC_RX_RET_FRM_FATAL:
+ goto err;
+
+ case QUIC_RX_RET_FRM_AGAIN:
+ if (parsing_stage == 0) {
+ TRACE_STATE("parsing stage set to 1 (AGAIN encountered)", QUIC_EV_CONN_PRSHPKT, qc);
+ ++parsing_stage;
+ }
+ break;
+
+ case QUIC_RX_RET_FRM_DONE:
+ TRACE_PROTO("frame handled after a new parsing iteration",
+ QUIC_EV_CONN_PRSAFRM, qc, frm);
+ if (parsing_stage == 1) {
+ TRACE_STATE("parsing stage set to 2 (DONE after AGAIN)", QUIC_EV_CONN_PRSHPKT, qc);
+ ++parsing_stage;
+ }
+ __fallthrough;
+ case QUIC_RX_RET_FRM_DUP:
+ qc_frm_free(qc, &frm);
+ break;
+ }
+ }
+ }
+
+ /* Error should be returned if some frames cannot be parsed. */
+ BUG_ON(!LIST_ISEMPTY(&retry_frms));
+
if (fast_retrans && qc->iel && qc->hel) {
struct quic_enc_level *iqel = qc->iel;
struct quic_enc_level *hqel = qc->hel;
err:
if (frm)
qc_frm_free(qc, &frm);
+ list_for_each_entry_safe(frm, frm_tmp, &retry_frms, list) {
+ qc_frm_free(qc, &frm);
+ }
+
TRACE_DEVEL("leaving on error", QUIC_EV_CONN_PRSHPKT, qc);
return 0;
}