--- /dev/null
+From d84be8b32a68fa491a016af0afe387391209a69e Mon Sep 17 00:00:00 2001
+From: Phil Turnbull <phil.turnbull@oracle.com>
+Date: Tue, 3 May 2016 16:39:19 -0400
+Subject: netfilter: nfnetlink_acct: validate NFACCT_QUOTA parameter
+
+[ Upstream commit eda3fc50daa93b08774a18d51883c5a5d8d85e15 ]
+
+If a quota bit is set in NFACCT_FLAGS but the NFACCT_QUOTA parameter is
+missing then a NULL pointer dereference is triggered. CAP_NET_ADMIN is
+required to trigger the bug.
+
+Signed-off-by: Phil Turnbull <phil.turnbull@oracle.com>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nfnetlink_acct.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c
+index 088e8da06b00b..0f3cb410e42ee 100644
+--- a/net/netfilter/nfnetlink_acct.c
++++ b/net/netfilter/nfnetlink_acct.c
+@@ -97,6 +97,8 @@ nfnl_acct_new(struct sock *nfnl, struct sk_buff *skb,
+ return -EINVAL;
+ if (flags & NFACCT_F_OVERQUOTA)
+ return -EINVAL;
++ if ((flags & NFACCT_F_QUOTA) && !tb[NFACCT_QUOTA])
++ return -EINVAL;
+
+ size += sizeof(u64);
+ }
+--
+2.20.1
+
--- /dev/null
+From 4bdc78226c2f03f9b2aed1829e55c50b7841529f Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Tue, 6 Aug 2019 17:09:14 +0200
+Subject: tcp: be more careful in tcp_fragment()
+
+[ Upstream commit b617158dc096709d8600c53b6052144d12b89fab ]
+
+Some applications set tiny SO_SNDBUF values and expect
+TCP to just work. Recent patches to address CVE-2019-11478
+broke them in case of losses, since retransmits might
+be prevented.
+
+We should allow these flows to make progress.
+
+This patch allows the first and last skb in retransmit queue
+to be split even if memory limits are hit.
+
+It also adds the some room due to the fact that tcp_sendmsg()
+and tcp_sendpage() might overshoot sk_wmem_queued by about one full
+TSO skb (64KB size). Note this allowance was already present
+in stable backports for kernels < 4.15
+
+Note for < 4.15 backports :
+ tcp_rtx_queue_tail() will probably look like :
+
+static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk)
+{
+ struct sk_buff *skb = tcp_send_head(sk);
+
+ return skb ? tcp_write_queue_prev(sk, skb) : tcp_write_queue_tail(sk);
+}
+
+Fixes: f070ef2ac667 ("tcp: tcp_fragment() should apply sane memory limits")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: Andrew Prout <aprout@ll.mit.edu>
+Tested-by: Andrew Prout <aprout@ll.mit.edu>
+Tested-by: Jonathan Lemon <jonathan.lemon@gmail.com>
+Tested-by: Michal Kubecek <mkubecek@suse.cz>
+Acked-by: Neal Cardwell <ncardwell@google.com>
+Acked-by: Yuchung Cheng <ycheng@google.com>
+Acked-by: Christoph Paasch <cpaasch@apple.com>
+Cc: Jonathan Looney <jtl@netflix.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/tcp.h | 17 +++++++++++++++++
+ net/ipv4/tcp_output.c | 11 ++++++++++-
+ 2 files changed, 27 insertions(+), 1 deletion(-)
+
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index 77438a8406ecf..0410fd29d5695 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -1526,6 +1526,23 @@ static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unli
+ tcp_sk(sk)->highest_sack = NULL;
+ }
+
++static inline struct sk_buff *tcp_rtx_queue_head(const struct sock *sk)
++{
++ struct sk_buff *skb = tcp_write_queue_head(sk);
++
++ if (skb == tcp_send_head(sk))
++ skb = NULL;
++
++ return skb;
++}
++
++static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk)
++{
++ struct sk_buff *skb = tcp_send_head(sk);
++
++ return skb ? tcp_write_queue_prev(sk, skb) : tcp_write_queue_tail(sk);
++}
++
+ static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
+ {
+ __skb_queue_tail(&sk->sk_write_queue, skb);
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 53edd60fd3817..76ffce0c18aeb 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -1151,6 +1151,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct sk_buff *buff;
+ int nsize, old_factor;
++ long limit;
+ int nlen;
+ u8 flags;
+
+@@ -1161,7 +1162,15 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
+ if (nsize < 0)
+ nsize = 0;
+
+- if (unlikely((sk->sk_wmem_queued >> 1) > sk->sk_sndbuf + 0x20000)) {
++ /* tcp_sendmsg() can overshoot sk_wmem_queued by one full size skb.
++ * We need some allowance to not penalize applications setting small
++ * SO_SNDBUF values.
++ * Also allow first and last skb in retransmit queue to be split.
++ */
++ limit = sk->sk_sndbuf + 2 * SKB_TRUESIZE(GSO_MAX_SIZE);
++ if (unlikely((sk->sk_wmem_queued >> 1) > limit &&
++ skb != tcp_rtx_queue_head(sk) &&
++ skb != tcp_rtx_queue_tail(sk))) {
+ NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPWQUEUETOOBIG);
+ return -ENOMEM;
+ }
+--
+2.20.1
+