A packet is considered as reordered when it is detected as lost because its packet
number is above the largest acknowledeged packet number by at least the
packet reordering threshold value.
Add ->nb_reordered_pkt new quic_loss struct member at the same location that
the number of lost packets to count such packets.
Should be backported to 2.6.
/* Number of NACKed sent PTO. */
unsigned int pto_count;
unsigned long nb_lost_pkt;
+ unsigned long nb_reordered_pkt;
};
#endif /* USE_QUIC */
ql->rtt_min = 0;
ql->pto_count = 0;
ql->nb_lost_pkt = 0;
+ ql->nb_reordered_pkt = 0;
}
/* Return 1 if a persistent congestion is observed for a list of
}
chunk_appendf(&trash, " srtt=%-4u rttvar=%-4u rttmin=%-4u ptoc=%-4u cwnd=%-6llu"
- " mcwnd=%-6llu sentpkts=%-6llu lostpkts=%-6llu\n",
+ " mcwnd=%-6llu sentpkts=%-6llu lostpkts=%-6llu\n reorderedpkts=%-6llu",
qc->path->loss.srtt, qc->path->loss.rtt_var,
qc->path->loss.rtt_min, qc->path->loss.pto_count, (ullong)qc->path->cwnd,
- (ullong)qc->path->mcwnd, (ullong)qc->cntrs.sent_pkt, (ullong)qc->path->loss.nb_lost_pkt);
+ (ullong)qc->path->mcwnd, (ullong)qc->cntrs.sent_pkt, (ullong)qc->path->loss.nb_lost_pkt, (ullong)qc->path->loss.nb_reordered_pkt);
if (qc->cntrs.dropped_pkt) {
chunk_appendf(&trash, " droppkts=%-6llu", qc->cntrs.dropped_pkt);
struct quic_tx_packet *pkt;
int64_t largest_acked_pn;
unsigned int loss_time_limit, time_sent;
+ int reordered;
pkt = eb64_entry(&node->node, struct quic_tx_packet, pn_node);
largest_acked_pn = pktns->rx.largest_acked_pn;
time_sent = pkt->time_sent;
loss_time_limit = tick_add(time_sent, loss_delay);
- if (tick_is_le(loss_time_limit, now_ms) ||
- (int64_t)largest_acked_pn >= pkt->pn_node.key + pktthresh) {
+
+ reordered = (int64_t)largest_acked_pn >= pkt->pn_node.key + pktthresh;
+ if (reordered)
+ ql->nb_reordered_pkt++;
+
+ if (tick_is_le(loss_time_limit, now_ms) || reordered) {
eb64_delete(&pkt->pn_node);
LIST_APPEND(lost_pkts, &pkt->list);
ql->nb_lost_pkt++;