From: Sasha Levin Date: Tue, 6 Aug 2019 23:27:07 +0000 (-0400) Subject: fixes for 4.9 X-Git-Tag: v5.2.8~32 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=53ff2ec83a88a9fae34d853a719c9a4593b54dd2;p=thirdparty%2Fkernel%2Fstable-queue.git fixes for 4.9 Signed-off-by: Sasha Levin --- diff --git a/queue-4.9/series b/queue-4.9/series index e09a14ca37f..eeffc5ebd41 100644 --- a/queue-4.9/series +++ b/queue-4.9/series @@ -4,3 +4,4 @@ arm-dts-add-pinmuxing-for-i2c2-and-i2c3-for-logicpd-.patch-5864 arm-dts-logicpd-som-lv-fix-audio-mute.patch arm64-cpufeature-fix-ctr_el0-field-definitions.patch arm64-cpufeature-fix-feature-comparison-for-ctr_el0..patch +tcp-be-more-careful-in-tcp_fragment.patch diff --git a/queue-4.9/tcp-be-more-careful-in-tcp_fragment.patch b/queue-4.9/tcp-be-more-careful-in-tcp_fragment.patch new file mode 100644 index 00000000000..2d2fe0d0e24 --- /dev/null +++ b/queue-4.9/tcp-be-more-careful-in-tcp_fragment.patch @@ -0,0 +1,109 @@ +From b826c9f6580257900da2071c5e16980b66b21b4d Mon Sep 17 00:00:00 2001 +From: Eric Dumazet +Date: Tue, 6 Aug 2019 17:09:14 +0200 +Subject: tcp: be more careful in tcp_fragment() + +[ Upstream commit b617158dc096709d8600c53b6052144d12b89fab ] + +Some applications set tiny SO_SNDBUF values and expect +TCP to just work. Recent patches to address CVE-2019-11478 +broke them in case of losses, since retransmits might +be prevented. + +We should allow these flows to make progress. + +This patch allows the first and last skb in retransmit queue +to be split even if memory limits are hit. + +It also adds the some room due to the fact that tcp_sendmsg() +and tcp_sendpage() might overshoot sk_wmem_queued by about one full +TSO skb (64KB size). Note this allowance was already present +in stable backports for kernels < 4.15 + +Note for < 4.15 backports : + tcp_rtx_queue_tail() will probably look like : + +static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk) +{ + struct sk_buff *skb = tcp_send_head(sk); + + return skb ? tcp_write_queue_prev(sk, skb) : tcp_write_queue_tail(sk); +} + +Fixes: f070ef2ac667 ("tcp: tcp_fragment() should apply sane memory limits") +Signed-off-by: Eric Dumazet +Reported-by: Andrew Prout +Tested-by: Andrew Prout +Tested-by: Jonathan Lemon +Tested-by: Michal Kubecek +Acked-by: Neal Cardwell +Acked-by: Yuchung Cheng +Acked-by: Christoph Paasch +Cc: Jonathan Looney +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + include/net/tcp.h | 17 +++++++++++++++++ + net/ipv4/tcp_output.c | 11 ++++++++++- + 2 files changed, 27 insertions(+), 1 deletion(-) + +diff --git a/include/net/tcp.h b/include/net/tcp.h +index 1eda31f7f013b..a474213ca015b 100644 +--- a/include/net/tcp.h ++++ b/include/net/tcp.h +@@ -1595,6 +1595,23 @@ static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unli + tcp_sk(sk)->highest_sack = NULL; + } + ++static inline struct sk_buff *tcp_rtx_queue_head(const struct sock *sk) ++{ ++ struct sk_buff *skb = tcp_write_queue_head(sk); ++ ++ if (skb == tcp_send_head(sk)) ++ skb = NULL; ++ ++ return skb; ++} ++ ++static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk) ++{ ++ struct sk_buff *skb = tcp_send_head(sk); ++ ++ return skb ? tcp_write_queue_prev(sk, skb) : tcp_write_queue_tail(sk); ++} ++ + static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb) + { + __skb_queue_tail(&sk->sk_write_queue, skb); +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c +index 0c195b0f42169..9ddb05b98312c 100644 +--- a/net/ipv4/tcp_output.c ++++ b/net/ipv4/tcp_output.c +@@ -1175,6 +1175,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, + struct tcp_sock *tp = tcp_sk(sk); + struct sk_buff *buff; + int nsize, old_factor; ++ long limit; + int nlen; + u8 flags; + +@@ -1185,7 +1186,15 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, + if (nsize < 0) + nsize = 0; + +- if (unlikely((sk->sk_wmem_queued >> 1) > sk->sk_sndbuf + 0x20000)) { ++ /* tcp_sendmsg() can overshoot sk_wmem_queued by one full size skb. ++ * We need some allowance to not penalize applications setting small ++ * SO_SNDBUF values. ++ * Also allow first and last skb in retransmit queue to be split. ++ */ ++ limit = sk->sk_sndbuf + 2 * SKB_TRUESIZE(GSO_MAX_SIZE); ++ if (unlikely((sk->sk_wmem_queued >> 1) > limit && ++ skb != tcp_rtx_queue_head(sk) && ++ skb != tcp_rtx_queue_tail(sk))) { + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPWQUEUETOOBIG); + return -ENOMEM; + } +-- +2.20.1 +