From: Sasha Levin Date: Tue, 6 Aug 2019 23:27:07 +0000 (-0400) Subject: fixes for 4.14 X-Git-Tag: v5.2.8~33 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=abaff0839f37297ada022a335dc8bbdea6df1d05;p=thirdparty%2Fkernel%2Fstable-queue.git fixes for 4.14 Signed-off-by: Sasha Levin --- diff --git a/queue-4.14/series b/queue-4.14/series index 3a919147696..941dc541cb6 100644 --- a/queue-4.14/series +++ b/queue-4.14/series @@ -1,3 +1,4 @@ scsi-fcoe-embed-fc_rport_priv-in-fcoe_rport-structure.patch arm-dts-add-pinmuxing-for-i2c2-and-i2c3-for-logicpd-.patch arm-dts-add-pinmuxing-for-i2c2-and-i2c3-for-logicpd-.patch-16786 +tcp-be-more-careful-in-tcp_fragment.patch diff --git a/queue-4.14/tcp-be-more-careful-in-tcp_fragment.patch b/queue-4.14/tcp-be-more-careful-in-tcp_fragment.patch new file mode 100644 index 00000000000..cf62d0731ce --- /dev/null +++ b/queue-4.14/tcp-be-more-careful-in-tcp_fragment.patch @@ -0,0 +1,110 @@ +From 1389aab57d94bc0190eb9ae948df8098260b5e7f Mon Sep 17 00:00:00 2001 +From: Eric Dumazet +Date: Tue, 6 Aug 2019 17:09:14 +0200 +Subject: tcp: be more careful in tcp_fragment() + +commit b617158dc096709d8600c53b6052144d12b89fab upstream. + +Some applications set tiny SO_SNDBUF values and expect +TCP to just work. Recent patches to address CVE-2019-11478 +broke them in case of losses, since retransmits might +be prevented. + +We should allow these flows to make progress. + +This patch allows the first and last skb in retransmit queue +to be split even if memory limits are hit. + +It also adds the some room due to the fact that tcp_sendmsg() +and tcp_sendpage() might overshoot sk_wmem_queued by about one full +TSO skb (64KB size). Note this allowance was already present +in stable backports for kernels < 4.15 + +Note for < 4.15 backports : + tcp_rtx_queue_tail() will probably look like : + +static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk) +{ + struct sk_buff *skb = tcp_send_head(sk); + + return skb ? tcp_write_queue_prev(sk, skb) : tcp_write_queue_tail(sk); +} + +Fixes: f070ef2ac667 ("tcp: tcp_fragment() should apply sane memory limits") +Signed-off-by: Eric Dumazet +Reported-by: Andrew Prout +Tested-by: Andrew Prout +Tested-by: Jonathan Lemon +Tested-by: Michal Kubecek +Acked-by: Neal Cardwell +Acked-by: Yuchung Cheng +Acked-by: Christoph Paasch +Cc: Jonathan Looney +Signed-off-by: David S. Miller +Signed-off-by: Matthieu Baerts +Signed-off-by: Sasha Levin +--- + include/net/tcp.h | 17 +++++++++++++++++ + net/ipv4/tcp_output.c | 11 ++++++++++- + 2 files changed, 27 insertions(+), 1 deletion(-) + +diff --git a/include/net/tcp.h b/include/net/tcp.h +index 0b477a1e11770..7994e569644e0 100644 +--- a/include/net/tcp.h ++++ b/include/net/tcp.h +@@ -1688,6 +1688,23 @@ static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unli + tcp_sk(sk)->highest_sack = NULL; + } + ++static inline struct sk_buff *tcp_rtx_queue_head(const struct sock *sk) ++{ ++ struct sk_buff *skb = tcp_write_queue_head(sk); ++ ++ if (skb == tcp_send_head(sk)) ++ skb = NULL; ++ ++ return skb; ++} ++ ++static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk) ++{ ++ struct sk_buff *skb = tcp_send_head(sk); ++ ++ return skb ? tcp_write_queue_prev(sk, skb) : tcp_write_queue_tail(sk); ++} ++ + static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb) + { + __skb_queue_tail(&sk->sk_write_queue, skb); +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c +index a5960b9b6741c..a99086bf26eaf 100644 +--- a/net/ipv4/tcp_output.c ++++ b/net/ipv4/tcp_output.c +@@ -1264,6 +1264,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, + struct tcp_sock *tp = tcp_sk(sk); + struct sk_buff *buff; + int nsize, old_factor; ++ long limit; + int nlen; + u8 flags; + +@@ -1274,7 +1275,15 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, + if (nsize < 0) + nsize = 0; + +- if (unlikely((sk->sk_wmem_queued >> 1) > sk->sk_sndbuf + 0x20000)) { ++ /* tcp_sendmsg() can overshoot sk_wmem_queued by one full size skb. ++ * We need some allowance to not penalize applications setting small ++ * SO_SNDBUF values. ++ * Also allow first and last skb in retransmit queue to be split. ++ */ ++ limit = sk->sk_sndbuf + 2 * SKB_TRUESIZE(GSO_MAX_SIZE); ++ if (unlikely((sk->sk_wmem_queued >> 1) > limit && ++ skb != tcp_rtx_queue_head(sk) && ++ skb != tcp_rtx_queue_tail(sk))) { + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPWQUEUETOOBIG); + return -ENOMEM; + } +-- +2.20.1 +