]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.19-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 14 Dec 2018 19:07:23 +0000 (20:07 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 14 Dec 2018 19:07:23 +0000 (20:07 +0100)
added patches:
tcp-lack-of-available-data-can-also-cause-tso-defer.patch

queue-4.19/series
queue-4.19/tcp-lack-of-available-data-can-also-cause-tso-defer.patch [new file with mode: 0644]

index 783c176e5f2f1c2ffba5a5b4bc4564c378bcea17..70645279d951b15f37a71a46235db755b3138322 100644 (file)
@@ -140,3 +140,4 @@ alsa-hda-realtek-enable-audio-jacks-of-asus-ux433fn-ux333fa-with-alc294.patch
 alsa-hda-realtek-fix-the-mute-led-regresion-on-lenovo-x1-carbon.patch
 ib-hfi1-fix-an-out-of-bounds-access-in-get_hw_stats.patch
 bpf-fix-off-by-one-error-in-adjust_subprog_starts.patch
+tcp-lack-of-available-data-can-also-cause-tso-defer.patch
diff --git a/queue-4.19/tcp-lack-of-available-data-can-also-cause-tso-defer.patch b/queue-4.19/tcp-lack-of-available-data-can-also-cause-tso-defer.patch
new file mode 100644 (file)
index 0000000..de37796
--- /dev/null
@@ -0,0 +1,112 @@
+From f9bfe4e6a9d08d405fe7b081ee9a13e649c97ecf Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Thu, 6 Dec 2018 09:58:24 -0800
+Subject: tcp: lack of available data can also cause TSO defer
+
+From: Eric Dumazet <edumazet@google.com>
+
+commit f9bfe4e6a9d08d405fe7b081ee9a13e649c97ecf upstream.
+
+tcp_tso_should_defer() can return true in three different cases :
+
+ 1) We are cwnd-limited
+ 2) We are rwnd-limited
+ 3) We are application limited.
+
+Neal pointed out that my recent fix went too far, since
+it assumed that if we were not in 1) case, we must be rwnd-limited
+
+Fix this by properly populating the is_cwnd_limited and
+is_rwnd_limited booleans.
+
+After this change, we can finally move the silly check for FIN
+flag only for the application-limited case.
+
+The same move for EOR bit will be handled in net-next,
+since commit 1c09f7d073b1 ("tcp: do not try to defer skbs
+with eor mark (MSG_EOR)") is scheduled for linux-4.21
+
+Tested by running 200 concurrent netperf -t TCP_RR -- -r 60000,100
+and checking none of them was rwnd_limited in the chrono_stat
+output from "ss -ti" command.
+
+Fixes: 41727549de3e ("tcp: Do not underestimate rwnd_limited")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Suggested-by: Neal Cardwell <ncardwell@google.com>
+Reviewed-by: Neal Cardwell <ncardwell@google.com>
+Acked-by: Soheil Hassas Yeganeh <soheil@google.com>
+Reviewed-by: Yuchung Cheng <ycheng@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/ipv4/tcp_output.c |   35 ++++++++++++++++++++++++-----------
+ 1 file changed, 24 insertions(+), 11 deletions(-)
+
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -1902,7 +1902,9 @@ static int tso_fragment(struct sock *sk,
+  * This algorithm is from John Heffner.
+  */
+ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
+-                               bool *is_cwnd_limited, u32 max_segs)
++                               bool *is_cwnd_limited,
++                               bool *is_rwnd_limited,
++                               u32 max_segs)
+ {
+       const struct inet_connection_sock *icsk = inet_csk(sk);
+       u32 age, send_win, cong_win, limit, in_flight;
+@@ -1910,9 +1912,6 @@ static bool tcp_tso_should_defer(struct
+       struct sk_buff *head;
+       int win_divisor;
+-      if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
+-              goto send_now;
+-
+       if (icsk->icsk_ca_state >= TCP_CA_Recovery)
+               goto send_now;
+@@ -1971,10 +1970,27 @@ static bool tcp_tso_should_defer(struct
+       if (age < (tp->srtt_us >> 4))
+               goto send_now;
+-      /* Ok, it looks like it is advisable to defer. */
++      /* Ok, it looks like it is advisable to defer.
++       * Three cases are tracked :
++       * 1) We are cwnd-limited
++       * 2) We are rwnd-limited
++       * 3) We are application limited.
++       */
++      if (cong_win < send_win) {
++              if (cong_win <= skb->len) {
++                      *is_cwnd_limited = true;
++                      return true;
++              }
++      } else {
++              if (send_win <= skb->len) {
++                      *is_rwnd_limited = true;
++                      return true;
++              }
++      }
+-      if (cong_win < send_win && cong_win <= skb->len)
+-              *is_cwnd_limited = true;
++      /* If this packet won't get more data, do not wait. */
++      if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
++              goto send_now;
+       return true;
+@@ -2338,11 +2354,8 @@ static bool tcp_write_xmit(struct sock *
+               } else {
+                       if (!push_one &&
+                           tcp_tso_should_defer(sk, skb, &is_cwnd_limited,
+-                                               max_segs)) {
+-                              if (!is_cwnd_limited)
+-                                      is_rwnd_limited = true;
++                                               &is_rwnd_limited, max_segs))
+                               break;
+-                      }
+               }
+               limit = mss_now;