--- /dev/null
+From e0a851fe6b9b619527bd928aa93caaddd003f70c Mon Sep 17 00:00:00 2001
+From: Lukas Wunner <lukas@wunner.de>
+Date: Tue, 12 May 2020 14:40:01 +0200
+Subject: serial: 8250: Avoid error message on reprobe
+
+From: Lukas Wunner <lukas@wunner.de>
+
+commit e0a851fe6b9b619527bd928aa93caaddd003f70c upstream.
+
+If the call to uart_add_one_port() in serial8250_register_8250_port()
+fails, a half-initialized entry in the serial_8250ports[] array is left
+behind.
+
+A subsequent reprobe of the same serial port causes that entry to be
+reused. Because uart->port.dev is set, uart_remove_one_port() is called
+for the half-initialized entry and bails out with an error message:
+
+bcm2835-aux-uart 3f215040.serial: Removing wrong port: (null) != (ptrval)
+
+The same happens on failure of mctrl_gpio_init() since commit
+4a96895f74c9 ("tty/serial/8250: use mctrl_gpio helpers").
+
+Fix by zeroing the uart->port.dev pointer in the probe error path.
+
+The bug was introduced in v2.6.10 by historical commit befff6f5bf5f
+("[SERIAL] Add new port registration/unregistration functions."):
+https://git.kernel.org/tglx/history/c/befff6f5bf5f
+
+The commit added an unconditional call to uart_remove_one_port() in
+serial8250_register_port(). In v3.7, commit 835d844d1a28 ("8250_pnp:
+do pnp probe before legacy probe") made that call conditional on
+uart->port.dev which allows me to fix the issue by zeroing that pointer
+in the error path. Thus, the present commit will fix the problem as far
+back as v3.7 whereas still older versions need to also cherry-pick
+835d844d1a28.
+
+Fixes: 835d844d1a28 ("8250_pnp: do pnp probe before legacy probe")
+Signed-off-by: Lukas Wunner <lukas@wunner.de>
+Cc: stable@vger.kernel.org # v2.6.10
+Cc: stable@vger.kernel.org # v2.6.10: 835d844d1a28: 8250_pnp: do pnp probe before legacy
+Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Link: https://lore.kernel.org/r/b4a072013ee1a1d13ee06b4325afb19bda57ca1b.1589285873.git.lukas@wunner.de
+[iwamatsu: Backported to 4.14, 4.19: adjust context]
+Signed-off-by: Nobuhiro Iwamatsu (CIP) <nobuhiro1.iwamatsu@toshiba.co.jp>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/tty/serial/8250/8250_core.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+--- a/drivers/tty/serial/8250/8250_core.c
++++ b/drivers/tty/serial/8250/8250_core.c
+@@ -1062,8 +1062,10 @@ int serial8250_register_8250_port(struct
+ serial8250_apply_quirks(uart);
+ ret = uart_add_one_port(&serial8250_reg,
+ &uart->port);
+- if (ret == 0)
+- ret = uart->port.line;
++ if (ret)
++ goto err;
++
++ ret = uart->port.line;
+ } else {
+ dev_info(uart->port.dev,
+ "skipping CIR port at 0x%lx / 0x%llx, IRQ %d\n",
+@@ -1088,6 +1090,11 @@ int serial8250_register_8250_port(struct
+ mutex_unlock(&serial_mutex);
+
+ return ret;
++
++err:
++ uart->port.dev = NULL;
++ mutex_unlock(&serial_mutex);
++ return ret;
+ }
+ EXPORT_SYMBOL(serial8250_register_8250_port);
+
--- /dev/null
+From 78dc70ebaa38aa303274e333be6c98eef87619e2 Mon Sep 17 00:00:00 2001
+From: Priyaranjan Jha <priyarjha@google.com>
+Date: Wed, 23 Jan 2019 12:04:54 -0800
+Subject: tcp_bbr: adapt cwnd based on ack aggregation estimation
+
+From: Priyaranjan Jha <priyarjha@google.com>
+
+commit 78dc70ebaa38aa303274e333be6c98eef87619e2 upstream.
+
+Aggregation effects are extremely common with wifi, cellular, and cable
+modem link technologies, ACK decimation in middleboxes, and LRO and GRO
+in receiving hosts. The aggregation can happen in either direction,
+data or ACKs, but in either case the aggregation effect is visible
+to the sender in the ACK stream.
+
+Previously BBR's sending was often limited by cwnd under severe ACK
+aggregation/decimation because BBR sized the cwnd at 2*BDP. If packets
+were acked in bursts after long delays (e.g. one ACK acking 5*BDP after
+5*RTT), BBR's sending was halted after sending 2*BDP over 2*RTT, leaving
+the bottleneck idle for potentially long periods. Note that loss-based
+congestion control does not have this issue because when facing
+aggregation it continues increasing cwnd after bursts of ACKs, growing
+cwnd until the buffer is full.
+
+To achieve good throughput in the presence of aggregation effects, this
+algorithm allows the BBR sender to put extra data in flight to keep the
+bottleneck utilized during silences in the ACK stream that it has evidence
+to suggest were caused by aggregation.
+
+A summary of the algorithm: when a burst of packets are acked by a
+stretched ACK or a burst of ACKs or both, BBR first estimates the expected
+amount of data that should have been acked, based on its estimated
+bandwidth. Then the surplus ("extra_acked") is recorded in a windowed-max
+filter to estimate the recent level of observed ACK aggregation. Then cwnd
+is increased by the ACK aggregation estimate. The larger cwnd avoids BBR
+being cwnd-limited in the face of ACK silences that recent history suggests
+were caused by aggregation. As a sanity check, the ACK aggregation degree
+is upper-bounded by the cwnd (at the time of measurement) and a global max
+of BW * 100ms. The algorithm is further described by the following
+presentation:
+https://datatracker.ietf.org/meeting/101/materials/slides-101-iccrg-an-update-on-bbr-work-at-google-00
+
+In our internal testing, we observed a significant increase in BBR
+throughput (measured using netperf), in a basic wifi setup.
+- Host1 (sender on ethernet) -> AP -> Host2 (receiver on wifi)
+- 2.4 GHz -> BBR before: ~73 Mbps; BBR after: ~102 Mbps; CUBIC: ~100 Mbps
+- 5.0 GHz -> BBR before: ~362 Mbps; BBR after: ~593 Mbps; CUBIC: ~601 Mbps
+
+Also, this code is running globally on YouTube TCP connections and produced
+significant bandwidth increases for YouTube traffic.
+
+This is based on Ian Swett's max_ack_height_ algorithm from the
+QUIC BBR implementation.
+
+Signed-off-by: Priyaranjan Jha <priyarjha@google.com>
+Signed-off-by: Neal Cardwell <ncardwell@google.com>
+Signed-off-by: Yuchung Cheng <ycheng@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/net/inet_connection_sock.h | 4 -
+ net/ipv4/tcp_bbr.c | 122 ++++++++++++++++++++++++++++++++++++-
+ 2 files changed, 123 insertions(+), 3 deletions(-)
+
+--- a/include/net/inet_connection_sock.h
++++ b/include/net/inet_connection_sock.h
+@@ -139,8 +139,8 @@ struct inet_connection_sock {
+ } icsk_mtup;
+ u32 icsk_user_timeout;
+
+- u64 icsk_ca_priv[88 / sizeof(u64)];
+-#define ICSK_CA_PRIV_SIZE (11 * sizeof(u64))
++ u64 icsk_ca_priv[104 / sizeof(u64)];
++#define ICSK_CA_PRIV_SIZE (13 * sizeof(u64))
+ };
+
+ #define ICSK_TIME_RETRANS 1 /* Retransmit timer */
+--- a/net/ipv4/tcp_bbr.c
++++ b/net/ipv4/tcp_bbr.c
+@@ -115,6 +115,14 @@ struct bbr {
+ unused_b:5;
+ u32 prior_cwnd; /* prior cwnd upon entering loss recovery */
+ u32 full_bw; /* recent bw, to estimate if pipe is full */
++
++ /* For tracking ACK aggregation: */
++ u64 ack_epoch_mstamp; /* start of ACK sampling epoch */
++ u16 extra_acked[2]; /* max excess data ACKed in epoch */
++ u32 ack_epoch_acked:20, /* packets (S)ACKed in sampling epoch */
++ extra_acked_win_rtts:5, /* age of extra_acked, in round trips */
++ extra_acked_win_idx:1, /* current index in extra_acked array */
++ unused_c:6;
+ };
+
+ #define CYCLE_LEN 8 /* number of phases in a pacing gain cycle */
+@@ -174,6 +182,15 @@ static const u32 bbr_lt_bw_diff = 4000 /
+ /* If we estimate we're policed, use lt_bw for this many round trips: */
+ static const u32 bbr_lt_bw_max_rtts = 48;
+
++/* Gain factor for adding extra_acked to target cwnd: */
++static const int bbr_extra_acked_gain = BBR_UNIT;
++/* Window length of extra_acked window. */
++static const u32 bbr_extra_acked_win_rtts = 5;
++/* Max allowed val for ack_epoch_acked, after which sampling epoch is reset */
++static const u32 bbr_ack_epoch_acked_reset_thresh = 1U << 20;
++/* Time period for clamping cwnd increment due to ack aggregation */
++static const u32 bbr_extra_acked_max_us = 100 * 1000;
++
+ static void bbr_check_probe_rtt_done(struct sock *sk);
+
+ /* Do we estimate that STARTUP filled the pipe? */
+@@ -200,6 +217,16 @@ static u32 bbr_bw(const struct sock *sk)
+ return bbr->lt_use_bw ? bbr->lt_bw : bbr_max_bw(sk);
+ }
+
++/* Return maximum extra acked in past k-2k round trips,
++ * where k = bbr_extra_acked_win_rtts.
++ */
++static u16 bbr_extra_acked(const struct sock *sk)
++{
++ struct bbr *bbr = inet_csk_ca(sk);
++
++ return max(bbr->extra_acked[0], bbr->extra_acked[1]);
++}
++
+ /* Return rate in bytes per second, optionally with a gain.
+ * The order here is chosen carefully to avoid overflow of u64. This should
+ * work for input rates of up to 2.9Tbit/sec and gain of 2.89x.
+@@ -305,6 +332,8 @@ static void bbr_cwnd_event(struct sock *
+
+ if (event == CA_EVENT_TX_START && tp->app_limited) {
+ bbr->idle_restart = 1;
++ bbr->ack_epoch_mstamp = tp->tcp_mstamp;
++ bbr->ack_epoch_acked = 0;
+ /* Avoid pointless buffer overflows: pace at est. bw if we don't
+ * need more speed (we're restarting from idle and app-limited).
+ */
+@@ -385,6 +414,22 @@ static u32 bbr_inflight(struct sock *sk,
+ return inflight;
+ }
+
++/* Find the cwnd increment based on estimate of ack aggregation */
++static u32 bbr_ack_aggregation_cwnd(struct sock *sk)
++{
++ u32 max_aggr_cwnd, aggr_cwnd = 0;
++
++ if (bbr_extra_acked_gain && bbr_full_bw_reached(sk)) {
++ max_aggr_cwnd = ((u64)bbr_bw(sk) * bbr_extra_acked_max_us)
++ / BW_UNIT;
++ aggr_cwnd = (bbr_extra_acked_gain * bbr_extra_acked(sk))
++ >> BBR_SCALE;
++ aggr_cwnd = min(aggr_cwnd, max_aggr_cwnd);
++ }
++
++ return aggr_cwnd;
++}
++
+ /* An optimization in BBR to reduce losses: On the first round of recovery, we
+ * follow the packet conservation principle: send P packets per P packets acked.
+ * After that, we slow-start and send at most 2*P packets per P packets acked.
+@@ -445,9 +490,15 @@ static void bbr_set_cwnd(struct sock *sk
+ if (bbr_set_cwnd_to_recover_or_restore(sk, rs, acked, &cwnd))
+ goto done;
+
+- /* If we're below target cwnd, slow start cwnd toward target cwnd. */
+ target_cwnd = bbr_bdp(sk, bw, gain);
++
++ /* Increment the cwnd to account for excess ACKed data that seems
++ * due to aggregation (of data and/or ACKs) visible in the ACK stream.
++ */
++ target_cwnd += bbr_ack_aggregation_cwnd(sk);
+ target_cwnd = bbr_quantization_budget(sk, target_cwnd, gain);
++
++ /* If we're below target cwnd, slow start cwnd toward target cwnd. */
+ if (bbr_full_bw_reached(sk)) /* only cut cwnd if we filled the pipe */
+ cwnd = min(cwnd + acked, target_cwnd);
+ else if (cwnd < target_cwnd || tp->delivered < TCP_INIT_CWND)
+@@ -717,6 +768,67 @@ static void bbr_update_bw(struct sock *s
+ }
+ }
+
++/* Estimates the windowed max degree of ack aggregation.
++ * This is used to provision extra in-flight data to keep sending during
++ * inter-ACK silences.
++ *
++ * Degree of ack aggregation is estimated as extra data acked beyond expected.
++ *
++ * max_extra_acked = "maximum recent excess data ACKed beyond max_bw * interval"
++ * cwnd += max_extra_acked
++ *
++ * Max extra_acked is clamped by cwnd and bw * bbr_extra_acked_max_us (100 ms).
++ * Max filter is an approximate sliding window of 5-10 (packet timed) round
++ * trips.
++ */
++static void bbr_update_ack_aggregation(struct sock *sk,
++ const struct rate_sample *rs)
++{
++ u32 epoch_us, expected_acked, extra_acked;
++ struct bbr *bbr = inet_csk_ca(sk);
++ struct tcp_sock *tp = tcp_sk(sk);
++
++ if (!bbr_extra_acked_gain || rs->acked_sacked <= 0 ||
++ rs->delivered < 0 || rs->interval_us <= 0)
++ return;
++
++ if (bbr->round_start) {
++ bbr->extra_acked_win_rtts = min(0x1F,
++ bbr->extra_acked_win_rtts + 1);
++ if (bbr->extra_acked_win_rtts >= bbr_extra_acked_win_rtts) {
++ bbr->extra_acked_win_rtts = 0;
++ bbr->extra_acked_win_idx = bbr->extra_acked_win_idx ?
++ 0 : 1;
++ bbr->extra_acked[bbr->extra_acked_win_idx] = 0;
++ }
++ }
++
++ /* Compute how many packets we expected to be delivered over epoch. */
++ epoch_us = tcp_stamp_us_delta(tp->delivered_mstamp,
++ bbr->ack_epoch_mstamp);
++ expected_acked = ((u64)bbr_bw(sk) * epoch_us) / BW_UNIT;
++
++ /* Reset the aggregation epoch if ACK rate is below expected rate or
++ * significantly large no. of ack received since epoch (potentially
++ * quite old epoch).
++ */
++ if (bbr->ack_epoch_acked <= expected_acked ||
++ (bbr->ack_epoch_acked + rs->acked_sacked >=
++ bbr_ack_epoch_acked_reset_thresh)) {
++ bbr->ack_epoch_acked = 0;
++ bbr->ack_epoch_mstamp = tp->delivered_mstamp;
++ expected_acked = 0;
++ }
++
++ /* Compute excess data delivered, beyond what was expected. */
++ bbr->ack_epoch_acked = min_t(u32, 0xFFFFF,
++ bbr->ack_epoch_acked + rs->acked_sacked);
++ extra_acked = bbr->ack_epoch_acked - expected_acked;
++ extra_acked = min(extra_acked, tp->snd_cwnd);
++ if (extra_acked > bbr->extra_acked[bbr->extra_acked_win_idx])
++ bbr->extra_acked[bbr->extra_acked_win_idx] = extra_acked;
++}
++
+ /* Estimate when the pipe is full, using the change in delivery rate: BBR
+ * estimates that STARTUP filled the pipe if the estimated bw hasn't changed by
+ * at least bbr_full_bw_thresh (25%) after bbr_full_bw_cnt (3) non-app-limited
+@@ -846,6 +958,7 @@ static void bbr_update_min_rtt(struct so
+ static void bbr_update_model(struct sock *sk, const struct rate_sample *rs)
+ {
+ bbr_update_bw(sk, rs);
++ bbr_update_ack_aggregation(sk, rs);
+ bbr_update_cycle_phase(sk, rs);
+ bbr_check_full_bw_reached(sk, rs);
+ bbr_check_drain(sk, rs);
+@@ -896,6 +1009,13 @@ static void bbr_init(struct sock *sk)
+ bbr_reset_lt_bw_sampling(sk);
+ bbr_reset_startup_mode(sk);
+
++ bbr->ack_epoch_mstamp = tp->tcp_mstamp;
++ bbr->ack_epoch_acked = 0;
++ bbr->extra_acked_win_rtts = 0;
++ bbr->extra_acked_win_idx = 0;
++ bbr->extra_acked[0] = 0;
++ bbr->extra_acked[1] = 0;
++
+ cmpxchg(&sk->sk_pacing_status, SK_PACING_NONE, SK_PACING_NEEDED);
+ }
+
--- /dev/null
+From 232aa8ec3ed979d4716891540c03a806ecab0c37 Mon Sep 17 00:00:00 2001
+From: Priyaranjan Jha <priyarjha@google.com>
+Date: Wed, 23 Jan 2019 12:04:53 -0800
+Subject: tcp_bbr: refactor bbr_target_cwnd() for general inflight provisioning
+
+From: Priyaranjan Jha <priyarjha@google.com>
+
+commit 232aa8ec3ed979d4716891540c03a806ecab0c37 upstream.
+
+Because bbr_target_cwnd() is really a general-purpose BBR helper for
+computing some volume of inflight data as a function of the estimated
+BDP, refactor it into following helper functions:
+- bbr_bdp()
+- bbr_quantization_budget()
+- bbr_inflight()
+
+Signed-off-by: Priyaranjan Jha <priyarjha@google.com>
+Signed-off-by: Neal Cardwell <ncardwell@google.com>
+Signed-off-by: Yuchung Cheng <ycheng@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ net/ipv4/tcp_bbr.c | 60 ++++++++++++++++++++++++++++++++++-------------------
+ 1 file changed, 39 insertions(+), 21 deletions(-)
+
+--- a/net/ipv4/tcp_bbr.c
++++ b/net/ipv4/tcp_bbr.c
+@@ -315,30 +315,19 @@ static void bbr_cwnd_event(struct sock *
+ }
+ }
+
+-/* Find target cwnd. Right-size the cwnd based on min RTT and the
+- * estimated bottleneck bandwidth:
++/* Calculate bdp based on min RTT and the estimated bottleneck bandwidth:
+ *
+- * cwnd = bw * min_rtt * gain = BDP * gain
++ * bdp = bw * min_rtt * gain
+ *
+ * The key factor, gain, controls the amount of queue. While a small gain
+ * builds a smaller queue, it becomes more vulnerable to noise in RTT
+ * measurements (e.g., delayed ACKs or other ACK compression effects). This
+ * noise may cause BBR to under-estimate the rate.
+- *
+- * To achieve full performance in high-speed paths, we budget enough cwnd to
+- * fit full-sized skbs in-flight on both end hosts to fully utilize the path:
+- * - one skb in sending host Qdisc,
+- * - one skb in sending host TSO/GSO engine
+- * - one skb being received by receiver host LRO/GRO/delayed-ACK engine
+- * Don't worry, at low rates (bbr_min_tso_rate) this won't bloat cwnd because
+- * in such cases tso_segs_goal is 1. The minimum cwnd is 4 packets,
+- * which allows 2 outstanding 2-packet sequences, to try to keep pipe
+- * full even with ACK-every-other-packet delayed ACKs.
+ */
+-static u32 bbr_target_cwnd(struct sock *sk, u32 bw, int gain)
++static u32 bbr_bdp(struct sock *sk, u32 bw, int gain)
+ {
+ struct bbr *bbr = inet_csk_ca(sk);
+- u32 cwnd;
++ u32 bdp;
+ u64 w;
+
+ /* If we've never had a valid RTT sample, cap cwnd at the initial
+@@ -353,7 +342,24 @@ static u32 bbr_target_cwnd(struct sock *
+ w = (u64)bw * bbr->min_rtt_us;
+
+ /* Apply a gain to the given value, then remove the BW_SCALE shift. */
+- cwnd = (((w * gain) >> BBR_SCALE) + BW_UNIT - 1) / BW_UNIT;
++ bdp = (((w * gain) >> BBR_SCALE) + BW_UNIT - 1) / BW_UNIT;
++
++ return bdp;
++}
++
++/* To achieve full performance in high-speed paths, we budget enough cwnd to
++ * fit full-sized skbs in-flight on both end hosts to fully utilize the path:
++ * - one skb in sending host Qdisc,
++ * - one skb in sending host TSO/GSO engine
++ * - one skb being received by receiver host LRO/GRO/delayed-ACK engine
++ * Don't worry, at low rates (bbr_min_tso_rate) this won't bloat cwnd because
++ * in such cases tso_segs_goal is 1. The minimum cwnd is 4 packets,
++ * which allows 2 outstanding 2-packet sequences, to try to keep pipe
++ * full even with ACK-every-other-packet delayed ACKs.
++ */
++static u32 bbr_quantization_budget(struct sock *sk, u32 cwnd, int gain)
++{
++ struct bbr *bbr = inet_csk_ca(sk);
+
+ /* Allow enough full-sized skbs in flight to utilize end systems. */
+ cwnd += 3 * bbr_tso_segs_goal(sk);
+@@ -368,6 +374,17 @@ static u32 bbr_target_cwnd(struct sock *
+ return cwnd;
+ }
+
++/* Find inflight based on min RTT and the estimated bottleneck bandwidth. */
++static u32 bbr_inflight(struct sock *sk, u32 bw, int gain)
++{
++ u32 inflight;
++
++ inflight = bbr_bdp(sk, bw, gain);
++ inflight = bbr_quantization_budget(sk, inflight, gain);
++
++ return inflight;
++}
++
+ /* An optimization in BBR to reduce losses: On the first round of recovery, we
+ * follow the packet conservation principle: send P packets per P packets acked.
+ * After that, we slow-start and send at most 2*P packets per P packets acked.
+@@ -429,7 +446,8 @@ static void bbr_set_cwnd(struct sock *sk
+ goto done;
+
+ /* If we're below target cwnd, slow start cwnd toward target cwnd. */
+- target_cwnd = bbr_target_cwnd(sk, bw, gain);
++ target_cwnd = bbr_bdp(sk, bw, gain);
++ target_cwnd = bbr_quantization_budget(sk, target_cwnd, gain);
+ if (bbr_full_bw_reached(sk)) /* only cut cwnd if we filled the pipe */
+ cwnd = min(cwnd + acked, target_cwnd);
+ else if (cwnd < target_cwnd || tp->delivered < TCP_INIT_CWND)
+@@ -470,14 +488,14 @@ static bool bbr_is_next_cycle_phase(stru
+ if (bbr->pacing_gain > BBR_UNIT)
+ return is_full_length &&
+ (rs->losses || /* perhaps pacing_gain*BDP won't fit */
+- inflight >= bbr_target_cwnd(sk, bw, bbr->pacing_gain));
++ inflight >= bbr_inflight(sk, bw, bbr->pacing_gain));
+
+ /* A pacing_gain < 1.0 tries to drain extra queue we added if bw
+ * probing didn't find more bw. If inflight falls to match BDP then we
+ * estimate queue is drained; persisting would underutilize the pipe.
+ */
+ return is_full_length ||
+- inflight <= bbr_target_cwnd(sk, bw, BBR_UNIT);
++ inflight <= bbr_inflight(sk, bw, BBR_UNIT);
+ }
+
+ static void bbr_advance_cycle_phase(struct sock *sk)
+@@ -736,11 +754,11 @@ static void bbr_check_drain(struct sock
+ bbr->pacing_gain = bbr_drain_gain; /* pace slow to drain */
+ bbr->cwnd_gain = bbr_high_gain; /* maintain cwnd */
+ tcp_sk(sk)->snd_ssthresh =
+- bbr_target_cwnd(sk, bbr_max_bw(sk), BBR_UNIT);
++ bbr_inflight(sk, bbr_max_bw(sk), BBR_UNIT);
+ } /* fall through to check if in-flight is already small: */
+ if (bbr->mode == BBR_DRAIN &&
+ tcp_packets_in_flight(tcp_sk(sk)) <=
+- bbr_target_cwnd(sk, bbr_max_bw(sk), BBR_UNIT))
++ bbr_inflight(sk, bbr_max_bw(sk), BBR_UNIT))
+ bbr_reset_probe_bw_mode(sk); /* we estimate queue is drained */
+ }
+