TRACE_LEAVE(QUIC_EV_CONN_PKTLOSS, qc);
}
+/* Handle <pkts> list of lost packets detected at <now_us> handling their TX
+ * frames. Send a packet loss event to the congestion controller if in flight
+ * packet have been lost. Also frees the packet in <pkts> list.
+ *
+ * Returns 1 on success else 0 if loss limit has been exceeded. A
+ * CONNECTION_CLOSE was prepared to close the connection ASAP.
+ */
+int qc_release_lost_pkts(struct quic_conn *qc, struct quic_pktns *pktns,
+ struct list *pkts, uint64_t now_us)
+{
+ struct quic_tx_packet *pkt, *tmp, *oldest_lost, *newest_lost;
+ int close = 0;
+
+ TRACE_ENTER(QUIC_EV_CONN_PRSAFRM, qc);
+
+ if (LIST_ISEMPTY(pkts))
+ goto leave;
+
+ oldest_lost = newest_lost = NULL;
+ list_for_each_entry_safe(pkt, tmp, pkts, list) {
+ struct list tmp = LIST_HEAD_INIT(tmp);
+
+ pkt->pktns->tx.in_flight -= pkt->in_flight_len;
+ qc->path->prep_in_flight -= pkt->in_flight_len;
+ qc->path->in_flight -= pkt->in_flight_len;
+ if (pkt->flags & QUIC_FL_TX_PACKET_ACK_ELICITING)
+ qc->path->ifae_pkts--;
+ /* Treat the frames of this lost packet. */
+ if (!qc_handle_frms_of_lost_pkt(qc, pkt, &pktns->tx.frms))
+ close = 1;
+ LIST_DELETE(&pkt->list);
+ if (!oldest_lost) {
+ oldest_lost = newest_lost = pkt;
+ }
+ else {
+ if (newest_lost != oldest_lost)
+ quic_tx_packet_refdec(newest_lost);
+ newest_lost = pkt;
+ }
+ }
+
+ if (!close) {
+ if (newest_lost) {
+ /* Sent a congestion event to the controller */
+ struct quic_cc_event ev = { };
+
+ ev.type = QUIC_CC_EVT_LOSS;
+ ev.loss.time_sent = newest_lost->time_sent;
+
+ quic_cc_event(&qc->path->cc, &ev);
+ }
+
+ /* If an RTT have been already sampled, <rtt_min> has been set.
+ * We must check if we are experiencing a persistent congestion.
+ * If this is the case, the congestion controller must re-enter
+ * slow start state.
+ */
+ if (qc->path->loss.rtt_min && newest_lost != oldest_lost) {
+ unsigned int period = newest_lost->time_sent - oldest_lost->time_sent;
+
+ if (quic_loss_persistent_congestion(&qc->path->loss, period,
+ now_ms, qc->max_ack_delay))
+ qc->path->cc.algo->slow_start(&qc->path->cc);
+ }
+ }
+
+ /* <oldest_lost> cannot be NULL at this stage because we have ensured
+ * that <pkts> list is not empty. Without this, GCC 12.2.0 reports a
+ * possible overflow on a 0 byte region with O2 optimization.
+ */
+ ALREADY_CHECKED(oldest_lost);
+ quic_tx_packet_refdec(oldest_lost);
+ if (newest_lost != oldest_lost)
+ quic_tx_packet_refdec(newest_lost);
+
+ leave:
+ TRACE_LEAVE(QUIC_EV_CONN_PRSAFRM, qc);
+ return !close;
+}
return ret;
}
-/* Treat <frm> frame whose packet it is attached to has just been acknowledged. */
-static void qc_treat_acked_tx_frm(struct quic_conn *qc, struct quic_frame *frm)
+/* Handle <frm> frame whose packet it is attached to has just been acknowledged. The memory allocated
+ * for this frame will be at least released in every cases.
+ * Never fail.
+ */
+static void qc_handle_newly_acked_frm(struct quic_conn *qc, struct quic_frame *frm)
{
TRACE_ENTER(QUIC_EV_CONN_PRSAFRM, qc);
TRACE_PROTO("RX ack TX frm", QUIC_EV_CONN_PRSAFRM, qc, frm);
TRACE_LEAVE(QUIC_EV_CONN_PRSAFRM, qc);
}
-/* Remove <largest> down to <smallest> node entries from <pkts> tree of TX packet,
- * deallocating them, and their TX frames.
- * May be NULL if <largest> node could not be found.
- */
-static void qc_ackrng_pkts(struct quic_conn *qc,
- unsigned int *pkt_flags, struct list *newly_acked_pkts)
+/* Handle <newly_acked_pkts> list of newly acknowledged TX packets */
+static void qc_handle_newly_acked_pkts(struct quic_conn *qc,
+ unsigned int *pkt_flags, struct list *newly_acked_pkts)
{
struct quic_tx_packet *pkt, *tmp;
*pkt_flags |= pkt->flags;
TRACE_DEVEL("Removing packet #", QUIC_EV_CONN_PRSAFRM, qc, NULL, &pkt->pn_node.key);
list_for_each_entry_safe(frm, frmbak, &pkt->frms, list)
- qc_treat_acked_tx_frm(qc, frm);
+ qc_handle_newly_acked_frm(qc, frm);
/* If there are others packet in the same datagram <pkt> is attached to,
* detach the previous one and the next one from <pkt>.
*/
TRACE_LEAVE(QUIC_EV_CONN_PRSAFRM, qc);
}
-/* Remove all frames from <pkt_frm_list> and reinsert them in the same order
+/* Handle all frames sent from <pkt> packet and reinsert them in the same order
* they have been sent into <pktns_frm_list>. The loss counter of each frame is
* incremented and checked if it does not exceed retransmission limit.
*
* Returns 1 on success, 0 if a frame loss limit is exceeded. A
* CONNECTION_CLOSE is scheduled in this case.
*/
-static int qc_requeue_nacked_pkt_tx_frms(struct quic_conn *qc,
- struct quic_tx_packet *pkt,
- struct list *pktns_frm_list)
+int qc_handle_frms_of_lost_pkt(struct quic_conn *qc,
+ struct quic_tx_packet *pkt,
+ struct list *pktns_frm_list)
{
struct quic_frame *frm, *frmbak;
struct list *pkt_frm_list = &pkt->frms;
* <newly_acked_pkts> list and free them.
* Always succeeds.
*/
-static void qc_treat_newly_acked_pkts(struct quic_conn *qc,
- struct list *newly_acked_pkts)
+static void qc_notify_cc_of_newly_acked_pkts(struct quic_conn *qc,
+ struct list *newly_acked_pkts)
{
struct quic_tx_packet *pkt, *tmp;
struct quic_cc_event ev = { .type = QUIC_CC_EVT_ACK, };
}
-/* Handle <pkts> list of lost packets detected at <now_us> handling their TX
- * frames. Send a packet loss event to the congestion controller if in flight
- * packet have been lost. Also frees the packet in <pkts> list.
- *
- * Returns 1 on success else 0 if loss limit has been exceeded. A
- * CONNECTION_CLOSE was prepared to close the connection ASAP.
- */
-int qc_release_lost_pkts(struct quic_conn *qc, struct quic_pktns *pktns,
- struct list *pkts, uint64_t now_us)
-{
- struct quic_tx_packet *pkt, *tmp, *oldest_lost, *newest_lost;
- int close = 0;
-
- TRACE_ENTER(QUIC_EV_CONN_PRSAFRM, qc);
-
- if (LIST_ISEMPTY(pkts))
- goto leave;
-
- oldest_lost = newest_lost = NULL;
- list_for_each_entry_safe(pkt, tmp, pkts, list) {
- struct list tmp = LIST_HEAD_INIT(tmp);
-
- pkt->pktns->tx.in_flight -= pkt->in_flight_len;
- qc->path->prep_in_flight -= pkt->in_flight_len;
- qc->path->in_flight -= pkt->in_flight_len;
- if (pkt->flags & QUIC_FL_TX_PACKET_ACK_ELICITING)
- qc->path->ifae_pkts--;
- /* Treat the frames of this lost packet. */
- if (!qc_requeue_nacked_pkt_tx_frms(qc, pkt, &pktns->tx.frms))
- close = 1;
- LIST_DELETE(&pkt->list);
- if (!oldest_lost) {
- oldest_lost = newest_lost = pkt;
- }
- else {
- if (newest_lost != oldest_lost)
- quic_tx_packet_refdec(newest_lost);
- newest_lost = pkt;
- }
- }
-
- if (!close) {
- if (newest_lost) {
- /* Sent a congestion event to the controller */
- struct quic_cc_event ev = { };
-
- ev.type = QUIC_CC_EVT_LOSS;
- ev.loss.time_sent = newest_lost->time_sent;
-
- quic_cc_event(&qc->path->cc, &ev);
- }
-
- /* If an RTT have been already sampled, <rtt_min> has been set.
- * We must check if we are experiencing a persistent congestion.
- * If this is the case, the congestion controller must re-enter
- * slow start state.
- */
- if (qc->path->loss.rtt_min && newest_lost != oldest_lost) {
- unsigned int period = newest_lost->time_sent - oldest_lost->time_sent;
-
- if (quic_loss_persistent_congestion(&qc->path->loss, period,
- now_ms, qc->max_ack_delay))
- qc->path->cc.algo->slow_start(&qc->path->cc);
- }
- }
-
- /* <oldest_lost> cannot be NULL at this stage because we have ensured
- * that <pkts> list is not empty. Without this, GCC 12.2.0 reports a
- * possible overflow on a 0 byte region with O2 optimization.
- */
- ALREADY_CHECKED(oldest_lost);
- quic_tx_packet_refdec(oldest_lost);
- if (newest_lost != oldest_lost)
- quic_tx_packet_refdec(newest_lost);
-
- leave:
- TRACE_LEAVE(QUIC_EV_CONN_PRSAFRM, qc);
- return !close;
-}
-
/* Parse ACK frame into <frm> from a buffer at <buf> address with <end> being at
* one byte past the end of this buffer. Also update <rtt_sample> if needed, i.e.
* if the largest acked packet was newly acked and if there was at least one newly
} while (1);
if (!LIST_ISEMPTY(&newly_acked_pkts)) {
- qc_ackrng_pkts(qc, &pkt_flags, &newly_acked_pkts);
+ qc_handle_newly_acked_pkts(qc, &pkt_flags, &newly_acked_pkts);
if (new_largest_acked_pn && (pkt_flags & QUIC_FL_TX_PACKET_ACK_ELICITING)) {
*rtt_sample = tick_remain(time_sent, now_ms);
qel->pktns->rx.largest_acked_pn = ack_frm->largest_ack;
if (!qc_release_lost_pkts(qc, qel->pktns, &lost_pkts, now_ms))
goto leave;
}
- qc_treat_newly_acked_pkts(qc, &newly_acked_pkts);
+ qc_notify_cc_of_newly_acked_pkts(qc, &newly_acked_pkts);
if (quic_peer_validated_addr(qc))
qc->path->loss.pto_count = 0;
qc_set_timer(qc);