return path->cwnd - path->prep_in_flight;
}
-int quic_cwnd_may_increase(const struct quic_cc_path *path);
+void quic_cc_path_reset(struct quic_cc_path *path);
+void quic_cc_path_set(struct quic_cc_path *path, uint64_t val);
+void quic_cc_path_inc(struct quic_cc_path *path, uint64_t val);
#endif /* USE_QUIC */
#endif /* _PROTO_QUIC_CC_H */
}
/* Returns true if congestion window on path ought to be increased. */
-int quic_cwnd_may_increase(const struct quic_cc_path *path)
+static int quic_cwnd_may_increase(const struct quic_cc_path *path)
{
/* RFC 9002 7.8. Underutilizing the Congestion Window
*
*/
return 2 * path->in_flight >= path->cwnd || path->cwnd < 16384;
}
+
+/* Restore congestion window for <path> to its minimal value. */
+void quic_cc_path_reset(struct quic_cc_path *path)
+{
+ path->cwnd = path->limit_min;
+}
+
+/* Set congestion window for <path> to <val>. Min and max limits are enforced. */
+void quic_cc_path_set(struct quic_cc_path *path, uint64_t val)
+{
+ path->cwnd = QUIC_MIN(val, path->limit_max);
+ path->cwnd = QUIC_MAX(path->cwnd, path->limit_min);
+
+ path->cwnd_last_max = QUIC_MAX(path->cwnd, path->cwnd_last_max);
+}
+
+/* Increment congestion window for <path> with <val>. Min and max limits are
+ * enforced. Contrary to quic_cc_path_set(), increase is performed only if a
+ * certain minimal level of the window was already filled.
+ */
+void quic_cc_path_inc(struct quic_cc_path *path, uint64_t val)
+{
+ if (quic_cwnd_may_increase(path)) {
+ path->cwnd = QUIC_MIN(path->cwnd + val, path->limit_max);
+ path->cwnd = QUIC_MAX(path->cwnd, path->limit_min);
+
+ path->cwnd_last_max = QUIC_MAX(path->cwnd, path->cwnd_last_max);
+ }
+}
static void bbr_restore_cwnd(struct bbr *bbr, struct quic_cc_path *p)
{
- p->cwnd = MAX(p->cwnd, bbr->prior_cwnd);
+ quic_cc_path_set(p, MAX(p->cwnd, bbr->prior_cwnd));
}
/* <gain> must be provided in percents. */
cwnd = bbr_bound_cwnd_for_probe_rtt(bbr, p, cwnd);
cwnd = bbr_bound_cwnd_for_model(bbr, p, cwnd);
/* Limitation by configuration (not in BBR RFC). */
- p->cwnd = MIN(cwnd, p->limit_max);
+ quic_cc_path_set(p, cwnd);
}
static int bbr_init(struct quic_cc *cc)
bbr->round_count_at_recovery =
bbr->round_start ? bbr->round_count : bbr->round_count + 1;
bbr_save_cwnd(bbr, p);
- p->cwnd = p->in_flight + MAX(acked, p->mtu);
+ quic_cc_path_set(p, p->in_flight + MAX(acked, p->mtu));
p->recovery_start_ts = bbr->recovery_start_ts;
bbr->recovery_start_ts = TICK_ETERNITY;
}
inc = W_est_inc;
}
- if (quic_cwnd_may_increase(path)) {
- path->cwnd += inc;
- path->cwnd = QUIC_MIN(path->limit_max, path->cwnd);
- path->cwnd_last_max = QUIC_MAX(path->cwnd, path->cwnd_last_max);
- }
+ quic_cc_path_inc(path, inc);
leave:
TRACE_LEAVE(QUIC_EV_CONN_CC, cc->qc);
}
}
c->ssthresh = (CUBIC_BETA_SCALED * path->cwnd) >> CUBIC_SCALE_FACTOR_SHIFT;
- path->cwnd = QUIC_MAX(c->ssthresh, (uint32_t)path->limit_min);
+ quic_cc_path_set(path, c->ssthresh);
c->state = QUIC_CC_ST_RP;
TRACE_LEAVE(QUIC_EV_CONN_CC, cc->qc, NULL, cc);
}
if (path->cwnd >= QUIC_CC_INFINITE_SSTHESH - acked)
goto out;
- if (quic_cwnd_may_increase(path)) {
- path->cwnd += acked;
- path->cwnd_last_max = QUIC_MAX(path->cwnd, path->cwnd_last_max);
- }
+ quic_cc_path_inc(path, acked);
quic_cc_hystart_track_min_rtt(cc, h, path->loss.latest_rtt);
if (ev->ack.pn >= h->wnd_end)
h->wnd_end = UINT64_MAX;
}
}
else if (path->cwnd < QUIC_CC_INFINITE_SSTHESH - ev->ack.acked) {
- if (quic_cwnd_may_increase(path)) {
- path->cwnd += ev->ack.acked;
- path->cwnd = QUIC_MIN(path->limit_max, path->cwnd);
- }
+ quic_cc_path_inc(path, ev->ack.acked);
}
/* Exit to congestion avoidance if slow start threshold is reached. */
if (path->cwnd >= c->ssthresh)
c->state = QUIC_CC_ST_CA;
- path->cwnd_last_max = QUIC_MAX(path->cwnd, path->cwnd_last_max);
break;
case QUIC_CC_EVT_LOSS:
if (path->cwnd >= QUIC_CC_INFINITE_SSTHESH - acked)
goto out;
- if (quic_cwnd_may_increase(path)) {
- path->cwnd += acked;
- path->cwnd_last_max = QUIC_MAX(path->cwnd, path->cwnd_last_max);
- }
+ quic_cc_path_inc(path, acked);
quic_cc_hystart_track_min_rtt(cc, h, path->loss.latest_rtt);
if (quic_cc_hystart_may_reenter_ss(h)) {
/* Exit to slow start */
struct nr *nr = quic_cc_priv(cc);
path = container_of(cc, struct quic_cc_path, cc);
- path->cwnd = path->limit_min;
+ quic_cc_path_reset(path);
/* Re-entering slow start state. */
nr->state = QUIC_CC_ST_SS;
/* Recovery start time reset */
path = container_of(cc, struct quic_cc_path, cc);
nr->recovery_start_time = now_ms;
nr->ssthresh = path->cwnd >> 1;
- path->cwnd = QUIC_MAX(nr->ssthresh, (uint32_t)path->limit_min);
+ quic_cc_path_set(path, nr->ssthresh);
nr->state = QUIC_CC_ST_RP;
}
path = container_of(cc, struct quic_cc_path, cc);
switch (ev->type) {
case QUIC_CC_EVT_ACK:
- if (quic_cwnd_may_increase(path)) {
- path->cwnd += ev->ack.acked;
- path->cwnd = QUIC_MIN(path->limit_max, path->cwnd);
- path->cwnd_last_max = QUIC_MAX(path->cwnd, path->cwnd_last_max);
- }
+ quic_cc_path_inc(path, ev->ack.acked);
/* Exit to congestion avoidance if slow start threshold is reached. */
if (path->cwnd > nr->ssthresh)
nr->state = QUIC_CC_ST_CA;
*/
acked = ev->ack.acked * path->mtu + nr->remain_acked;
nr->remain_acked = acked % path->cwnd;
- if (quic_cwnd_may_increase(path)) {
- path->cwnd += acked / path->cwnd;
- path->cwnd = QUIC_MIN(path->limit_max, path->cwnd);
- path->cwnd_last_max = QUIC_MAX(path->cwnd, path->cwnd_last_max);
- }
+ quic_cc_path_inc(path, acked / path->cwnd);
break;
}
nr->state = QUIC_CC_ST_CA;
nr->recovery_start_time = TICK_ETERNITY;
- path->cwnd = nr->ssthresh;
+ quic_cc_path_set(path, nr->ssthresh);
break;
case QUIC_CC_EVT_LOSS:
/* Do nothing */