/* Congestion window. */
uint64_t cwnd;
/* The current maximum congestion window value reached. */
- uint64_t mcwnd;
- /* The maximum congestion window value which can be reached. */
- uint64_t max_cwnd;
- /* Minimum congestion window. */
- uint64_t min_cwnd;
+ uint64_t cwnd_last_max;
+ /* Max limit on congestion window size. */
+ uint64_t limit_max;
+ /* Min limit on congestion window size. */
+ uint64_t limit_min;
/* Prepared data to be sent (in bytes). */
uint64_t prep_in_flight;
/* Outstanding data (in bytes). */
*(size_t *)&path->mtu = max_dgram_sz;
path->initial_wnd = QUIC_MIN(10 * max_dgram_sz, QUIC_MAX(max_dgram_sz << 1, 14720U));
path->cwnd = path->initial_wnd;
- path->mcwnd = path->cwnd;
- path->max_cwnd = max_cwnd;
- path->min_cwnd = max_dgram_sz << 1;
+ path->cwnd_last_max = path->cwnd;
+ path->limit_max = max_cwnd;
+ path->limit_min = max_dgram_sz << 1;
path->prep_in_flight = 0;
path->in_flight = 0;
path->ifae_pkts = 0;
bbr_bound_cwnd_for_probe_rtt(bbr, p);
bbr_bound_cwnd_for_model(bbr, p);
/* Limitation by configuration (not in BBR RFC). */
- p->cwnd = MIN(p->cwnd, p->max_cwnd);
+ p->cwnd = MIN(p->cwnd, p->limit_max);
}
static int bbr_init(struct quic_cc *cc)
if (quic_cwnd_may_increase(path)) {
path->cwnd += inc;
- path->cwnd = QUIC_MIN(path->max_cwnd, path->cwnd);
- path->mcwnd = QUIC_MAX(path->cwnd, path->mcwnd);
+ path->cwnd = QUIC_MIN(path->limit_max, path->cwnd);
+ path->cwnd_last_max = QUIC_MAX(path->cwnd, path->cwnd_last_max);
}
leave:
TRACE_LEAVE(QUIC_EV_CONN_CC, cc->qc);
}
c->ssthresh = (CUBIC_BETA_SCALED * path->cwnd) >> CUBIC_SCALE_FACTOR_SHIFT;
- path->cwnd = QUIC_MAX(c->ssthresh, (uint32_t)path->min_cwnd);
+ path->cwnd = QUIC_MAX(c->ssthresh, (uint32_t)path->limit_min);
c->state = QUIC_CC_ST_RP;
TRACE_LEAVE(QUIC_EV_CONN_CC, cc->qc, NULL, cc);
}
if (quic_cwnd_may_increase(path)) {
path->cwnd += acked;
- path->mcwnd = QUIC_MAX(path->cwnd, path->mcwnd);
+ path->cwnd_last_max = QUIC_MAX(path->cwnd, path->cwnd_last_max);
}
quic_cc_hystart_track_min_rtt(cc, h, path->loss.latest_rtt);
if (ev->ack.pn >= h->wnd_end)
else if (path->cwnd < QUIC_CC_INFINITE_SSTHESH - ev->ack.acked) {
if (quic_cwnd_may_increase(path)) {
path->cwnd += ev->ack.acked;
- path->cwnd = QUIC_MIN(path->max_cwnd, path->cwnd);
+ path->cwnd = QUIC_MIN(path->limit_max, path->cwnd);
}
}
/* Exit to congestion avoidance if slow start threshold is reached. */
if (path->cwnd >= c->ssthresh)
c->state = QUIC_CC_ST_CA;
- path->mcwnd = QUIC_MAX(path->cwnd, path->mcwnd);
+ path->cwnd_last_max = QUIC_MAX(path->cwnd, path->cwnd_last_max);
break;
case QUIC_CC_EVT_LOSS:
if (quic_cwnd_may_increase(path)) {
path->cwnd += acked;
- path->mcwnd = QUIC_MAX(path->cwnd, path->mcwnd);
+ path->cwnd_last_max = QUIC_MAX(path->cwnd, path->cwnd_last_max);
}
quic_cc_hystart_track_min_rtt(cc, h, path->loss.latest_rtt);
if (quic_cc_hystart_may_reenter_ss(h)) {
struct cubic *c = quic_cc_priv(cc);
path = container_of(cc, struct quic_cc_path, cc);
- chunk_appendf(buf, " state=%s cwnd=%llu mcwnd=%llu ssthresh=%d rpst=%dms",
+ chunk_appendf(buf, " state=%s cwnd=%llu cwnd_last_max=%llu ssthresh=%d rpst=%dms",
quic_cc_state_str(c->state),
(unsigned long long)path->cwnd,
- (unsigned long long)path->mcwnd,
+ (unsigned long long)path->cwnd_last_max,
(int)c->ssthresh,
!tick_isset(c->recovery_start_time) ? -1 :
TICKS_TO_MS(tick_remain(c->recovery_start_time, now_ms)));
struct nr *nr = quic_cc_priv(cc);
path = container_of(cc, struct quic_cc_path, cc);
- path->cwnd = path->min_cwnd;
+ path->cwnd = path->limit_min;
/* Re-entering slow start state. */
nr->state = QUIC_CC_ST_SS;
/* Recovery start time reset */
path = container_of(cc, struct quic_cc_path, cc);
nr->recovery_start_time = now_ms;
nr->ssthresh = path->cwnd >> 1;
- path->cwnd = QUIC_MAX(nr->ssthresh, (uint32_t)path->min_cwnd);
+ path->cwnd = QUIC_MAX(nr->ssthresh, (uint32_t)path->limit_min);
nr->state = QUIC_CC_ST_RP;
}
case QUIC_CC_EVT_ACK:
if (quic_cwnd_may_increase(path)) {
path->cwnd += ev->ack.acked;
- path->cwnd = QUIC_MIN(path->max_cwnd, path->cwnd);
- path->mcwnd = QUIC_MAX(path->cwnd, path->mcwnd);
+ path->cwnd = QUIC_MIN(path->limit_max, path->cwnd);
+ path->cwnd_last_max = QUIC_MAX(path->cwnd, path->cwnd_last_max);
}
/* Exit to congestion avoidance if slow start threshold is reached. */
if (path->cwnd > nr->ssthresh)
nr->remain_acked = acked % path->cwnd;
if (quic_cwnd_may_increase(path)) {
path->cwnd += acked / path->cwnd;
- path->cwnd = QUIC_MIN(path->max_cwnd, path->cwnd);
- path->mcwnd = QUIC_MAX(path->cwnd, path->mcwnd);
+ path->cwnd = QUIC_MIN(path->limit_max, path->cwnd);
+ path->cwnd_last_max = QUIC_MAX(path->cwnd, path->cwnd_last_max);
}
break;
}
struct nr *nr = quic_cc_priv(cc);
path = container_of(cc, struct quic_cc_path, cc);
- chunk_appendf(buf, " state=%s cwnd=%llu mcwnd=%llu ssthresh=%ld rpst=%dms pktloss=%llu",
+ chunk_appendf(buf, " state=%s cwnd=%llu cwnd_last_max=%llu ssthresh=%ld rpst=%dms pktloss=%llu",
quic_cc_state_str(nr->state),
(unsigned long long)path->cwnd,
- (unsigned long long)path->mcwnd,
+ (unsigned long long)path->cwnd_last_max,
(long)nr->ssthresh,
!tick_isset(nr->recovery_start_time) ? -1 :
TICKS_TO_MS(tick_remain(nr->recovery_start_time, now_ms)),
struct quic_cc_path *path;
path = container_of(cc, struct quic_cc_path, cc);
- path->cwnd = path->max_cwnd;
+ path->cwnd = path->limit_max;
return 1;
}
qc->path->cc.algo->state_cli(&trash, qc->path);
chunk_appendf(&trash, " srtt=%-4u rttvar=%-4u rttmin=%-4u ptoc=%-4u\n"
- " cwnd=%-6llu mcwnd=%-6llu\n"
+ " cwnd=%-6llu cwnd_last_max=%-6llu\n"
" sentbytes=%-12llu sentbytesgso=%-12llu sentpkts=%-6llu\n"
" lostpkts=%-6llu reorderedpkts=%-6llu\n",
qc->path->loss.srtt, qc->path->loss.rtt_var,
qc->path->loss.rtt_min, qc->path->loss.pto_count, (ullong)qc->path->cwnd,
- (ullong)qc->path->mcwnd, (ullong)qc->cntrs.sent_bytes, (ullong)qc->cntrs.sent_bytes_gso,
+ (ullong)qc->path->cwnd_last_max, (ullong)qc->cntrs.sent_bytes, (ullong)qc->cntrs.sent_bytes_gso,
(ullong)qc->cntrs.sent_pkt, (ullong)qc->path->loss.nb_lost_pkt, (ullong)qc->path->loss.nb_reordered_pkt);
}