Add the number of packet losts and the maximum congestion control window computed
by the algorithms to "show quic".
Same thing for the traces of existent congestion control algorithms.
Must be backported to 2.7 and 2.6.
size_t mtu;
/* Congestion window. */
uint64_t cwnd;
+ uint64_t mcwnd;
/* Minimum congestion window. */
uint64_t min_cwnd;
/* Prepared data to be sent (in bytes). */
quic_loss_init(&path->loss);
path->mtu = max_dgram_sz;
path->cwnd = QUIC_MIN(10 * max_dgram_sz, QUIC_MAX(max_dgram_sz << 1, 14720U));
+ path->mcwnd = path->cwnd;
path->min_cwnd = max_dgram_sz << 1;
path->prep_in_flight = 0;
path->in_flight = 0;
unsigned int rtt_min;
/* Number of NACKed sent PTO. */
unsigned int pto_count;
+ unsigned long nb_lost_pkt;
};
#endif /* USE_QUIC */
ql->rtt_var = (QUIC_LOSS_INITIAL_RTT >> 1) << 2;
ql->rtt_min = 0;
ql->pto_count = 0;
+ ql->nb_lost_pkt = 0;
}
/* Return 1 if a persistent congestion is observed for a list of
}
path->cwnd += inc;
+ path->mcwnd = QUIC_MAX(path->cwnd, path->mcwnd);
leave:
TRACE_LEAVE(QUIC_EV_CONN_CC, cc->qc);
}
/* Exit to congestion avoidance if slow start threshold is reached. */
if (path->cwnd >= c->ssthresh)
c->state = QUIC_CC_ST_CA;
+ path->mcwnd = QUIC_MAX(path->cwnd, path->mcwnd);
break;
case QUIC_CC_EVT_LOSS:
struct cubic *c = quic_cc_priv(cc);
path = container_of(cc, struct quic_path, cc);
- chunk_appendf(buf, " state=%s cwnd=%llu ssthresh=%d rpst=%dms",
+ chunk_appendf(buf, " state=%s cwnd=%llu mcwnd=%llu ssthresh=%d rpst=%dms",
quic_cc_state_str(c->state),
(unsigned long long)path->cwnd,
+ (unsigned long long)path->mcwnd,
(int)c->ssthresh,
!tick_isset(c->recovery_start_time) ? -1 :
TICKS_TO_MS(tick_remain(c->recovery_start_time, now_ms)));
switch (ev->type) {
case QUIC_CC_EVT_ACK:
path->cwnd += ev->ack.acked;
+ path->mcwnd = QUIC_MAX(path->cwnd, path->mcwnd);
/* Exit to congestion avoidance if slow start threshold is reached. */
if (path->cwnd > nr->ssthresh)
nr->state = QUIC_CC_ST_CA;
acked = ev->ack.acked * path->mtu + nr->remain_acked;
nr->remain_acked = acked % path->cwnd;
path->cwnd += acked / path->cwnd;
+ path->mcwnd = QUIC_MAX(path->cwnd, path->mcwnd);
break;
}
struct nr *nr = quic_cc_priv(cc);
path = container_of(cc, struct quic_path, cc);
- chunk_appendf(buf, " state=%s cwnd=%llu ssthresh=%ld recovery_start_time=%llu",
+ chunk_appendf(buf, " state=%s cwnd=%llu mcwnd=%llu ssthresh=%ld rpst=%dms pktloss=%llu",
quic_cc_state_str(nr->state),
(unsigned long long)path->cwnd,
+ (unsigned long long)path->mcwnd,
(long)nr->ssthresh,
- (unsigned long long)nr->recovery_start_time);
+ !tick_isset(nr->recovery_start_time) ? -1 :
+ TICKS_TO_MS(tick_remain(nr->recovery_start_time, now_ms)),
+ (unsigned long long)path->loss.nb_lost_pkt);
}
static void (*quic_cc_nr_state_cbs[])(struct quic_cc *cc,
chunk_appendf(&trash, " [01rtt] rx.ackrng=%-6zu tx.inflight=%-6zu\n",
pktns->rx.arngs.sz, pktns->tx.in_flight);
- chunk_appendf(&trash, " srtt=%-4u rttvar=%-4u rttmin=%-4u ptoc=%-4u cwnd=%-6llu\n",
+ chunk_appendf(&trash, " srtt=%-4u rttvar=%-4u rttmin=%-4u ptoc=%-4u cwnd=%-6llu"
+ " mcwnd=%-6llu lostpkts=%-6llu\n",
qc->path->loss.srtt >> 3, qc->path->loss.rtt_var >> 2,
- qc->path->loss.rtt_min, qc->path->loss.pto_count, (ullong)qc->path->cwnd);
+ qc->path->loss.rtt_min, qc->path->loss.pto_count, (ullong)qc->path->cwnd,
+ (ullong)qc->path->mcwnd, (ullong)qc->path->loss.nb_lost_pkt);
/* Streams */
(int64_t)largest_acked_pn >= pkt->pn_node.key + QUIC_LOSS_PACKET_THRESHOLD) {
eb64_delete(&pkt->pn_node);
LIST_APPEND(lost_pkts, &pkt->list);
+ ql->nb_lost_pkt++;
HA_ATOMIC_INC(&qc->prx_counters->lost_pkt);
}
else {