From: Sasha Levin Date: Tue, 6 Aug 2019 23:27:08 +0000 (-0400) Subject: fixes for 4.4 X-Git-Tag: v5.2.8~31 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=4e31bfdaeca879314fd218b37eef1bd19a3720fc;p=thirdparty%2Fkernel%2Fstable-queue.git fixes for 4.4 Signed-off-by: Sasha Levin --- diff --git a/queue-4.4/netfilter-nfnetlink_acct-validate-nfacct_quota-param.patch-161 b/queue-4.4/netfilter-nfnetlink_acct-validate-nfacct_quota-param.patch-161 new file mode 100644 index 00000000000..20dfa02c345 --- /dev/null +++ b/queue-4.4/netfilter-nfnetlink_acct-validate-nfacct_quota-param.patch-161 @@ -0,0 +1,34 @@ +From d84be8b32a68fa491a016af0afe387391209a69e Mon Sep 17 00:00:00 2001 +From: Phil Turnbull +Date: Tue, 3 May 2016 16:39:19 -0400 +Subject: netfilter: nfnetlink_acct: validate NFACCT_QUOTA parameter + +[ Upstream commit eda3fc50daa93b08774a18d51883c5a5d8d85e15 ] + +If a quota bit is set in NFACCT_FLAGS but the NFACCT_QUOTA parameter is +missing then a NULL pointer dereference is triggered. CAP_NET_ADMIN is +required to trigger the bug. + +Signed-off-by: Phil Turnbull +Signed-off-by: Pablo Neira Ayuso +Signed-off-by: Sasha Levin +--- + net/netfilter/nfnetlink_acct.c | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c +index 088e8da06b00b..0f3cb410e42ee 100644 +--- a/net/netfilter/nfnetlink_acct.c ++++ b/net/netfilter/nfnetlink_acct.c +@@ -97,6 +97,8 @@ nfnl_acct_new(struct sock *nfnl, struct sk_buff *skb, + return -EINVAL; + if (flags & NFACCT_F_OVERQUOTA) + return -EINVAL; ++ if ((flags & NFACCT_F_QUOTA) && !tb[NFACCT_QUOTA]) ++ return -EINVAL; + + size += sizeof(u64); + } +-- +2.20.1 + diff --git a/queue-4.4/series b/queue-4.4/series index ad8550b32f6..ec0aa77cd92 100644 --- a/queue-4.4/series +++ b/queue-4.4/series @@ -1,3 +1,5 @@ arm64-cpufeature-fix-ctr_el0-field-definitions.patch arm64-cpufeature-fix-feature-comparison-for-ctr_el0..patch netfilter-nfnetlink_acct-validate-nfacct_quota-param.patch +netfilter-nfnetlink_acct-validate-nfacct_quota-param.patch-161 +tcp-be-more-careful-in-tcp_fragment.patch diff --git a/queue-4.4/tcp-be-more-careful-in-tcp_fragment.patch b/queue-4.4/tcp-be-more-careful-in-tcp_fragment.patch new file mode 100644 index 00000000000..4106cb311fb --- /dev/null +++ b/queue-4.4/tcp-be-more-careful-in-tcp_fragment.patch @@ -0,0 +1,109 @@ +From 4bdc78226c2f03f9b2aed1829e55c50b7841529f Mon Sep 17 00:00:00 2001 +From: Eric Dumazet +Date: Tue, 6 Aug 2019 17:09:14 +0200 +Subject: tcp: be more careful in tcp_fragment() + +[ Upstream commit b617158dc096709d8600c53b6052144d12b89fab ] + +Some applications set tiny SO_SNDBUF values and expect +TCP to just work. Recent patches to address CVE-2019-11478 +broke them in case of losses, since retransmits might +be prevented. + +We should allow these flows to make progress. + +This patch allows the first and last skb in retransmit queue +to be split even if memory limits are hit. + +It also adds the some room due to the fact that tcp_sendmsg() +and tcp_sendpage() might overshoot sk_wmem_queued by about one full +TSO skb (64KB size). Note this allowance was already present +in stable backports for kernels < 4.15 + +Note for < 4.15 backports : + tcp_rtx_queue_tail() will probably look like : + +static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk) +{ + struct sk_buff *skb = tcp_send_head(sk); + + return skb ? tcp_write_queue_prev(sk, skb) : tcp_write_queue_tail(sk); +} + +Fixes: f070ef2ac667 ("tcp: tcp_fragment() should apply sane memory limits") +Signed-off-by: Eric Dumazet +Reported-by: Andrew Prout +Tested-by: Andrew Prout +Tested-by: Jonathan Lemon +Tested-by: Michal Kubecek +Acked-by: Neal Cardwell +Acked-by: Yuchung Cheng +Acked-by: Christoph Paasch +Cc: Jonathan Looney +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + include/net/tcp.h | 17 +++++++++++++++++ + net/ipv4/tcp_output.c | 11 ++++++++++- + 2 files changed, 27 insertions(+), 1 deletion(-) + +diff --git a/include/net/tcp.h b/include/net/tcp.h +index 77438a8406ecf..0410fd29d5695 100644 +--- a/include/net/tcp.h ++++ b/include/net/tcp.h +@@ -1526,6 +1526,23 @@ static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unli + tcp_sk(sk)->highest_sack = NULL; + } + ++static inline struct sk_buff *tcp_rtx_queue_head(const struct sock *sk) ++{ ++ struct sk_buff *skb = tcp_write_queue_head(sk); ++ ++ if (skb == tcp_send_head(sk)) ++ skb = NULL; ++ ++ return skb; ++} ++ ++static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk) ++{ ++ struct sk_buff *skb = tcp_send_head(sk); ++ ++ return skb ? tcp_write_queue_prev(sk, skb) : tcp_write_queue_tail(sk); ++} ++ + static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb) + { + __skb_queue_tail(&sk->sk_write_queue, skb); +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c +index 53edd60fd3817..76ffce0c18aeb 100644 +--- a/net/ipv4/tcp_output.c ++++ b/net/ipv4/tcp_output.c +@@ -1151,6 +1151,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, + struct tcp_sock *tp = tcp_sk(sk); + struct sk_buff *buff; + int nsize, old_factor; ++ long limit; + int nlen; + u8 flags; + +@@ -1161,7 +1162,15 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, + if (nsize < 0) + nsize = 0; + +- if (unlikely((sk->sk_wmem_queued >> 1) > sk->sk_sndbuf + 0x20000)) { ++ /* tcp_sendmsg() can overshoot sk_wmem_queued by one full size skb. ++ * We need some allowance to not penalize applications setting small ++ * SO_SNDBUF values. ++ * Also allow first and last skb in retransmit queue to be split. ++ */ ++ limit = sk->sk_sndbuf + 2 * SKB_TRUESIZE(GSO_MAX_SIZE); ++ if (unlikely((sk->sk_wmem_queued >> 1) > limit && ++ skb != tcp_rtx_queue_head(sk) && ++ skb != tcp_rtx_queue_tail(sk))) { + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPWQUEUETOOBIG); + return -ENOMEM; + } +-- +2.20.1 +