}
static void bbr_init_pacing_rate(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat) {
- cstat->pacing_interval =
- (cstat->first_rtt_sample_ts == UINT64_MAX ? NGTCP2_MILLISECONDS
- : cstat->smoothed_rtt) *
+ cstat->pacing_interval_m =
+ ((cstat->first_rtt_sample_ts == UINT64_MAX ? NGTCP2_MILLISECONDS
+ : cstat->smoothed_rtt)
+ << 10) *
100 / NGTCP2_BBR_STARTUP_PACING_GAIN_H / bbr->initial_cwnd;
}
static void bbr_set_pacing_rate_with_gain(ngtcp2_cc_bbr *bbr,
ngtcp2_conn_stat *cstat,
uint64_t pacing_gain_h) {
- ngtcp2_duration interval;
+ uint64_t interval_m;
if (bbr->bw == 0) {
return;
}
- interval = NGTCP2_SECONDS * 100 * 100 / pacing_gain_h / bbr->bw /
- (100 - NGTCP2_BBR_PACING_MARGIN_PERCENT);
+ interval_m = (NGTCP2_SECONDS << 10) * 100 * 100 / pacing_gain_h / bbr->bw /
+ (100 - NGTCP2_BBR_PACING_MARGIN_PERCENT);
+ interval_m = ngtcp2_max_uint64(interval_m, 1);
- if (bbr->full_bw_reached || interval < cstat->pacing_interval) {
- cstat->pacing_interval = interval;
+ if (bbr->full_bw_reached || interval_m < cstat->pacing_interval_m) {
+ cstat->pacing_interval_m = interval_m;
}
}
size_t send_quantum = 64 * 1024;
(void)bbr;
- if (cstat->pacing_interval) {
- send_quantum = ngtcp2_min_size(
- send_quantum, (size_t)(NGTCP2_MILLISECONDS / cstat->pacing_interval));
- }
+ send_quantum =
+ ngtcp2_min_size(send_quantum, (size_t)((NGTCP2_MILLISECONDS << 10) /
+ cstat->pacing_interval_m));
cstat->send_quantum =
ngtcp2_max_size(send_quantum, 2 * cstat->max_tx_udp_payload_size);
v->epoch_start = UINT64_MAX;
v->w_est = 0;
- v->state = NGTCP2_CUBIC_STATE_INITIAL;
v->app_limited_start_ts = UINT64_MAX;
v->app_limited_duration = 0;
v->pending_bytes_delivered = 0;
#define NGTCP2_HS_CSS_GROWTH_DIVISOR 4
#define NGTCP2_HS_CSS_ROUNDS 5
-static uint64_t cubic_cc_compute_w_cubic(ngtcp2_cc_cubic *cubic,
- const ngtcp2_conn_stat *cstat,
- ngtcp2_tstamp ts) {
+static int64_t cubic_cc_compute_w_cubic(ngtcp2_cc_cubic *cubic,
+ const ngtcp2_conn_stat *cstat,
+ ngtcp2_tstamp ts) {
ngtcp2_duration t = ts - cubic->current.epoch_start;
- uint64_t delta;
- uint64_t tx = (t << 10) / NGTCP2_SECONDS;
- uint64_t kx = (cubic->current.k << 10) / NGTCP2_SECONDS;
- uint64_t time_delta;
-
- if (tx < kx) {
- return UINT64_MAX;
- }
-
- time_delta = tx - kx;
+ int64_t tx = (int64_t)((t << 10) / NGTCP2_SECONDS);
+ int64_t time_delta = ngtcp2_min_int64(tx - cubic->current.k, 3600 << 10);
+ int64_t delta = ((((time_delta * time_delta) >> 10) * time_delta) >> 20) *
+ (int64_t)cstat->max_tx_udp_payload_size * 4 / 10;
- delta = cstat->max_tx_udp_payload_size *
- ((((time_delta * time_delta) >> 10) * time_delta) >> 10) * 4 / 10;
-
- return cubic->current.w_max + (delta >> 10);
+ return (int64_t)cubic->current.w_max + delta;
}
void ngtcp2_cc_cubic_cc_on_ack_recv(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat,
const ngtcp2_cc_ack *ack,
ngtcp2_tstamp ts) {
ngtcp2_cc_cubic *cubic = ngtcp2_struct_of(cc, ngtcp2_cc_cubic, cc);
- uint64_t w_cubic, w_cubic_next, target, m;
+ int64_t w_cubic, w_cubic_next;
+ uint64_t target, m;
ngtcp2_duration rtt_thresh;
int round_start;
int is_app_limited =
return;
}
- if (cubic->current.state == NGTCP2_CUBIC_STATE_CONGESTION_AVOIDANCE) {
- if (is_app_limited) {
- if (cubic->current.app_limited_start_ts == UINT64_MAX) {
- cubic->current.app_limited_start_ts = ts;
- }
-
- return;
- }
+ if (cstat->cwnd < cstat->ssthresh) {
+ /* slow-start */
+ round_start = ack->pkt_delivered >= cubic->next_round_delivered;
+ if (round_start) {
+ cubic->next_round_delivered = cubic->rst->delivered;
- if (cubic->current.app_limited_start_ts != UINT64_MAX) {
- cubic->current.app_limited_duration +=
- ts - cubic->current.app_limited_start_ts;
- cubic->current.app_limited_start_ts = UINT64_MAX;
+ cubic->rst->is_cwnd_limited = 0;
}
- } else if (is_app_limited) {
- return;
- }
- round_start = ack->pkt_delivered >= cubic->next_round_delivered;
- if (round_start) {
- cubic->next_round_delivered = cubic->rst->delivered;
-
- cubic->rst->is_cwnd_limited = 0;
- }
+ if (!is_app_limited) {
+ if (cubic->hs.css_round) {
+ cstat->cwnd += ack->bytes_delivered / NGTCP2_HS_CSS_GROWTH_DIVISOR;
+ } else {
+ cstat->cwnd += ack->bytes_delivered;
+ }
- if (cstat->cwnd < cstat->ssthresh) {
- /* slow-start */
- if (cubic->hs.css_round) {
- cstat->cwnd += ack->bytes_delivered / NGTCP2_HS_CSS_GROWTH_DIVISOR;
- } else {
- cstat->cwnd += ack->bytes_delivered;
+ ngtcp2_log_info(cubic->cc.log, NGTCP2_LOG_EVENT_CCA,
+ "%" PRIu64 " bytes acked, slow start cwnd=%" PRIu64,
+ ack->bytes_delivered, cstat->cwnd);
}
- ngtcp2_log_info(cubic->cc.log, NGTCP2_LOG_EVENT_CCA,
- "%" PRIu64 " bytes acked, slow start cwnd=%" PRIu64,
- ack->bytes_delivered, cstat->cwnd);
-
if (round_start) {
cubic->hs.last_round_min_rtt = cubic->hs.current_round_min_rtt;
cubic->hs.current_round_min_rtt = UINT64_MAX;
ngtcp2_log_info(cubic->cc.log, NGTCP2_LOG_EVENT_CCA,
"HyStart++ exit slow start");
+ cubic->current.epoch_start = ts;
+ cubic->current.w_max = cstat->cwnd;
cstat->ssthresh = cstat->cwnd;
+ cubic->current.cwnd_prior = cstat->cwnd;
+ cubic->current.w_est = cstat->cwnd;
}
return;
}
/* congestion avoidance */
+ if (is_app_limited) {
+ if (cubic->current.app_limited_start_ts == UINT64_MAX) {
+ cubic->current.app_limited_start_ts = ts;
+ }
- switch (cubic->current.state) {
- case NGTCP2_CUBIC_STATE_INITIAL:
- m = cstat->max_tx_udp_payload_size * ack->bytes_delivered +
- cubic->current.pending_bytes_delivered;
- cstat->cwnd += m / cstat->cwnd;
- cubic->current.pending_bytes_delivered = m % cstat->cwnd;
return;
- case NGTCP2_CUBIC_STATE_RECOVERY:
- cubic->current.state = NGTCP2_CUBIC_STATE_CONGESTION_AVOIDANCE;
- cubic->current.epoch_start = ts;
- break;
- default:
- break;
+ }
+
+ if (cubic->current.app_limited_start_ts != UINT64_MAX) {
+ cubic->current.app_limited_duration +=
+ ts - cubic->current.app_limited_start_ts;
+ cubic->current.app_limited_start_ts = UINT64_MAX;
}
w_cubic = cubic_cc_compute_w_cubic(cubic, cstat,
cubic, cstat,
ts - cubic->current.app_limited_duration + cstat->smoothed_rtt);
- if (w_cubic_next == UINT64_MAX || w_cubic_next < cstat->cwnd) {
+ if (w_cubic_next < (int64_t)cstat->cwnd) {
target = cstat->cwnd;
- } else if (2 * w_cubic_next > 3 * cstat->cwnd) {
+ } else if (2 * w_cubic_next > 3 * (int64_t)cstat->cwnd) {
target = cstat->cwnd * 3 / 2;
} else {
- target = w_cubic_next;
+ assert(w_cubic_next >= 0);
+ target = (uint64_t)w_cubic_next;
}
m = ack->bytes_delivered * cstat->max_tx_udp_payload_size +
cubic->current.w_est += m / cstat->cwnd;
}
- if (w_cubic == UINT64_MAX || cubic->current.w_est > w_cubic) {
+ if ((int64_t)cubic->current.w_est > w_cubic) {
cstat->cwnd = cubic->current.w_est;
} else {
m = (target - cstat->cwnd) * cstat->max_tx_udp_payload_size +
ngtcp2_tstamp ts) {
ngtcp2_cc_cubic *cubic = ngtcp2_struct_of(cc, ngtcp2_cc_cubic, cc);
uint64_t flight_size;
+ uint64_t cwnd_delta;
if (in_congestion_recovery(cstat, sent_ts)) {
return;
cstat->congestion_recovery_start_ts = ts;
- cubic->current.state = NGTCP2_CUBIC_STATE_RECOVERY;
- cubic->current.epoch_start = UINT64_MAX;
+ cubic->current.epoch_start = ts;
cubic->current.app_limited_start_ts = UINT64_MAX;
cubic->current.app_limited_duration = 0;
cubic->current.pending_bytes_delivered = 0;
flight_size = cstat->bytes_in_flight + bytes_lost;
cstat->ssthresh = ngtcp2_min_uint64(
cstat->ssthresh,
- ngtcp2_max_uint64(cubic->rst->rs.delivered, flight_size) * 7 / 10);
+ ngtcp2_max_uint64(cubic->rst->rs.delivered, flight_size));
}
cstat->ssthresh =
cubic->current.w_est = cstat->cwnd;
if (cstat->cwnd < cubic->current.w_max) {
- cubic->current.k =
- ngtcp2_cbrt(((cubic->current.w_max - cstat->cwnd) << 10) * 10 / 4 /
- cstat->max_tx_udp_payload_size) *
- NGTCP2_SECONDS;
- cubic->current.k >>= 10;
+ cwnd_delta = cubic->current.w_max - cstat->cwnd;
} else {
- cubic->current.k = 0;
+ cwnd_delta = cstat->cwnd - cubic->current.w_max;
+ }
+
+ cubic->current.k = (int64_t)ngtcp2_cbrt((cwnd_delta << 30) * 10 / 4 /
+ cstat->max_tx_udp_payload_size);
+ if (cstat->cwnd >= cubic->current.w_max) {
+ cubic->current.k = -cubic->current.k;
}
ngtcp2_log_info(cubic->cc.log, NGTCP2_LOG_EVENT_CCA,
void ngtcp2_cc_reno_cc_reset(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat,
ngtcp2_tstamp ts);
-typedef enum ngtcp2_cubic_state {
- /* NGTCP2_CUBIC_STATE_INITIAL is the state where CUBIC is in slow
- start phase, or congestion avoidance phase before congestion
- events occur. */
- NGTCP2_CUBIC_STATE_INITIAL,
- /* NGTCP2_CUBIC_STATE_RECOVERY is the state that a connection is in
- recovery period. */
- NGTCP2_CUBIC_STATE_RECOVERY,
- /* NGTCP2_CUBIC_STATE_CONGESTION_AVOIDANCE is the state where CUBIC
- is in congestion avoidance phase after recovery period ends. */
- NGTCP2_CUBIC_STATE_CONGESTION_AVOIDANCE,
-} ngtcp2_cubic_state;
-
typedef struct ngtcp2_cubic_vars {
uint64_t cwnd_prior;
uint64_t w_max;
- ngtcp2_duration k;
+ int64_t k;
ngtcp2_tstamp epoch_start;
uint64_t w_est;
- ngtcp2_cubic_state state;
/* app_limited_start_ts is the timestamp where app limited period
started. */
ngtcp2_tstamp app_limited_start_ts;
cstat->congestion_recovery_start_ts = UINT64_MAX;
cstat->bytes_in_flight = 0;
cstat->delivery_rate_sec = 0;
- cstat->pacing_interval = 0;
+ cstat->pacing_interval_m = 0;
cstat->send_quantum = 64 * 1024;
}
require_padding = require_padding || !conn->server ||
destlen >= NGTCP2_MAX_UDP_PAYLOAD_SIZE;
/* We don't retransmit PATH_RESPONSE. */
+
+ /* Include PING to make a packet non-probing as per
+ https://datatracker.ietf.org/doc/html/rfc9000#section-9.3.3
+
+ An endpoint that receives a PATH_CHALLENGE on an active
+ path SHOULD send a non-probing packet in response. */
+ lfr.type = NGTCP2_FRAME_PING;
+ rv = conn_ppe_write_frame_hd_log(conn, ppe, &hd_logged, hd, &lfr);
+ if (rv != 0) {
+ assert(NGTCP2_ERR_NOBUF == rv);
+ }
}
}
}
rtb_entry_flags |= conn->pkt.rtb_entry_flags;
pkt_empty = conn->pkt.pkt_empty;
hd_logged = conn->pkt.hd_logged;
+ require_padding = conn->pkt.require_padding;
}
left = ngtcp2_ppe_left(ppe);
conn->pkt.pkt_empty = pkt_empty;
conn->pkt.rtb_entry_flags = rtb_entry_flags;
conn->pkt.hd_logged = hd_logged;
+ conn->pkt.require_padding = require_padding;
conn->flags |= NGTCP2_CONN_FLAG_PPE_PENDING;
}
conn->pkt.pkt_empty = pkt_empty;
conn->pkt.rtb_entry_flags = rtb_entry_flags;
conn->pkt.hd_logged = hd_logged;
+ conn->pkt.require_padding = require_padding;
conn->flags |= NGTCP2_CONN_FLAG_PPE_PENDING;
assert(vmsg);
if (spktlen > 0 &&
ngtcp2_pkt_get_type_long(version, dest[0]) == NGTCP2_PKT_INITIAL) {
wflags |= NGTCP2_WRITE_PKT_FLAG_REQUIRE_PADDING;
- conn->pkt.require_padding = 1;
}
} else {
assert(!conn->pktns.crypto.rx.ckm);
assert(!conn->pktns.crypto.tx.ckm);
assert(conn->early.ckm);
- if (conn->pkt.require_padding) {
- wflags |= NGTCP2_WRITE_PKT_FLAG_REQUIRE_PADDING;
- }
spktlen = conn->pkt.hs_spktlen;
}
if (ppe_pending) {
res = conn->pkt.hs_spktlen;
- if (conn->pkt.require_padding) {
- wflags |= NGTCP2_WRITE_PKT_FLAG_REQUIRE_PADDING;
- }
/* dest and destlen have already been adjusted in ppe in the first
run. They are adjusted for probe packet later. */
nwrite = conn_write_pkt(conn, pi, dest, destlen, (size_t)res, vmsg,
}
void ngtcp2_conn_update_pkt_tx_time(ngtcp2_conn *conn, ngtcp2_tstamp ts) {
- ngtcp2_duration pacing_interval;
+ uint64_t pacing_interval_m;
ngtcp2_duration wait, d;
conn_update_timestamp(conn, ts);
return;
}
- if (conn->cstat.pacing_interval) {
- pacing_interval = conn->cstat.pacing_interval;
+ if (conn->cstat.pacing_interval_m) {
+ pacing_interval_m = conn->cstat.pacing_interval_m;
} else {
/* 1.25 is the under-utilization avoidance factor described in
https://datatracker.ietf.org/doc/html/rfc9002#section-7.7 */
- pacing_interval = (conn->cstat.first_rtt_sample_ts == UINT64_MAX
- ? NGTCP2_MILLISECONDS
- : conn->cstat.smoothed_rtt) *
- 100 / 125 / conn->cstat.cwnd;
+ pacing_interval_m = ((conn->cstat.first_rtt_sample_ts == UINT64_MAX
+ ? NGTCP2_MILLISECONDS
+ : conn->cstat.smoothed_rtt)
+ << 10) *
+ 100 / 125 / conn->cstat.cwnd;
+ pacing_interval_m = ngtcp2_max_uint64(pacing_interval_m, 1);
}
- wait = (ngtcp2_duration)(conn->tx.pacing.pktlen * pacing_interval);
+ wait = (ngtcp2_duration)((conn->tx.pacing.pktlen * pacing_interval_m) >> 10);
- if (conn->tx.pacing.compensation >= NGTCP2_MILLISECONDS) {
- d = ngtcp2_min_uint64(wait, conn->tx.pacing.compensation);
- wait -= d;
- conn->tx.pacing.compensation -= d;
- }
+ d = ngtcp2_min_uint64(wait / 2, conn->tx.pacing.compensation);
+ wait -= d;
+ conn->tx.pacing.compensation -= d;
conn->tx.pacing.next_ts = ts + wait;
conn->tx.pacing.pktlen = 0;
*/
uint64_t delivery_rate_sec;
/**
- * :member:`pacing_interval` is the inverse of pacing rate, which is
- * the current packet sending rate computed by a congestion
+ * :member:`pacing_interval_m` is the inverse of pacing rate, which
+ * is the current packet sending rate computed by a congestion
* controller. 0 if a congestion controller does not set pacing
* interval. Even if this value is set to 0, the library paces
- * packets.
+ * packets. The unit of this value is 1/1024 of nanoseconds.
*/
- ngtcp2_duration pacing_interval;
+ uint64_t pacing_interval_m;
/**
* :member:`send_quantum` is the maximum size of a data aggregate
* scheduled and transmitted together.
* @macro
*
* :macro:`NGTCP2_WRITE_STREAM_FLAG_PADDING` indicates that a
- * non-empty 0 RTT or 1 RTT packet is padded to the minimum length of
- * a sending path MTU or a given packet buffer when finalizing it.
- * ACK, PATH_CHALLENGE, PATH_RESPONSE, CONNECTION_CLOSE only packets
- * and PMTUD packets are excluded.
+ * non-empty 0 RTT or 1 RTT ack-eliciting packet is padded to the
+ * minimum length of a sending path MTU or a given packet buffer when
+ * finalizing it. PATH_CHALLENGE, PATH_RESPONSE, CONNECTION_CLOSE
+ * only packets and PMTUD packets are excluded.
*/
#define NGTCP2_WRITE_STREAM_FLAG_PADDING 0x04u
* coalescing and write a packet.
*
* If :macro:`NGTCP2_WRITE_STREAM_FLAG_PADDING` is set in |flags| when
- * finalizing a non-empty 0 RTT or 1 RTT packet, the packet is padded
- * to the minimum length of a sending path MTU or a given packet
- * buffer.
+ * finalizing a non-empty 0 RTT or 1 RTT ack-eliciting packet, the
+ * packet is padded to the minimum length of a sending path MTU or a
+ * given packet buffer.
*
* This function returns 0 if it cannot write any frame because buffer
* is too small, or packet is congestion limited. Application should
* @macro
*
* :macro:`NGTCP2_WRITE_DATAGRAM_FLAG_PADDING` indicates that a
- * non-empty 0 RTT or 1 RTT packet is padded to the minimum length of
- * a sending path MTU or a given packet buffer when finalizing it.
- * ACK, PATH_CHALLENGE, PATH_RESPONSE, CONNECTION_CLOSE only packets
- * and PMTUD packets are excluded.
+ * non-empty 0 RTT or 1 RTT ack-eliciting packet is padded to the
+ * minimum length of a sending path MTU or a given packet buffer when
+ * finalizing it. PATH_CHALLENGE, PATH_RESPONSE, CONNECTION_CLOSE
+ * only packets and PMTUD packets are excluded.
*/
#define NGTCP2_WRITE_DATAGRAM_FLAG_PADDING 0x02u
* (which indicates a complete packet is ready).
*
* If :macro:`NGTCP2_WRITE_DATAGRAM_FLAG_PADDING` is set in |flags|
- * when finalizing a non-empty 0 RTT or 1 RTT packet, the packet is
- * padded to the minimum length of a sending path MTU or a given
- * packet buffer.
+ * when finalizing a non-empty 0 RTT or 1 RTT ack-eliciting packet,
+ * the packet is padded to the minimum length of a sending path MTU or
+ * a given packet buffer.
*
* This function returns the number of bytes written in |dest| if it
* succeeds, or one of the following negative error codes:
*
* Version number of the ngtcp2 library release.
*/
-#define NGTCP2_VERSION "1.12.0"
+#define NGTCP2_VERSION "1.13.0"
/**
* @macro
* number, 8 bits for minor and 8 bits for patch. Version 1.2.3
* becomes 0x010203.
*/
-#define NGTCP2_VERSION_NUM 0x010c00
+#define NGTCP2_VERSION_NUM 0x010d00
#endif /* !defined(NGTCP2_VERSION_H) */