--- /dev/null
+From foo@baz Thu 19 Sep 2019 03:07:06 PM CEST
+From: Xin Long <lucien.xin@gmail.com>
+Date: Fri, 13 Sep 2019 17:45:47 +0800
+Subject: ip6_gre: fix a dst leak in ip6erspan_tunnel_xmit
+
+From: Xin Long <lucien.xin@gmail.com>
+
+[ Upstream commit 28e486037747c2180470b77c290d4090ad42f259 ]
+
+In ip6erspan_tunnel_xmit(), if the skb will not be sent out, it has to
+be freed on the tx_err path. Otherwise when deleting a netns, it would
+cause dst/dev to leak, and dmesg shows:
+
+ unregister_netdevice: waiting for lo to become free. Usage count = 1
+
+Fixes: ef7baf5e083c ("ip6_gre: add ip6 erspan collect_md mode")
+Signed-off-by: Xin Long <lucien.xin@gmail.com>
+Acked-by: William Tu <u9012063@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/ip6_gre.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/ipv6/ip6_gre.c
++++ b/net/ipv6/ip6_gre.c
+@@ -988,7 +988,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit
+ if (unlikely(!tun_info ||
+ !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
+ ip_tunnel_info_af(tun_info) != AF_INET6))
+- return -EINVAL;
++ goto tx_err;
+
+ key = &tun_info->key;
+ memset(&fl6, 0, sizeof(fl6));
--- /dev/null
+From foo@baz Thu 19 Sep 2019 03:07:06 PM CEST
+From: Cong Wang <xiyou.wangcong@gmail.com>
+Date: Thu, 12 Sep 2019 10:22:30 -0700
+Subject: net_sched: let qdisc_put() accept NULL pointer
+
+From: Cong Wang <xiyou.wangcong@gmail.com>
+
+[ Upstream commit 6efb971ba8edfbd80b666f29de12882852f095ae ]
+
+When tcf_block_get() fails in sfb_init(), q->qdisc is still a NULL
+pointer which leads to a crash in sfb_destroy(). Similar for
+sch_dsmark.
+
+Instead of fixing each separately, Linus suggested to just accept
+NULL pointer in qdisc_put(), which would make callers easier.
+
+(For sch_dsmark, the bug probably exists long before commit
+6529eaba33f0.)
+
+Fixes: 6529eaba33f0 ("net: sched: introduce tcf block infractructure")
+Reported-by: syzbot+d5870a903591faaca4ae@syzkaller.appspotmail.com
+Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Jamal Hadi Salim <jhs@mojatatu.com>
+Cc: Jiri Pirko <jiri@resnulli.us>
+Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
+Acked-by: Jiri Pirko <jiri@mellanox.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sched/sch_generic.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/net/sched/sch_generic.c
++++ b/net/sched/sch_generic.c
+@@ -950,6 +950,9 @@ void qdisc_destroy(struct Qdisc *qdisc)
+ const struct Qdisc_ops *ops = qdisc->ops;
+ struct sk_buff *skb, *tmp;
+
++ if (!qdisc)
++ return;
++
+ if (qdisc->flags & TCQ_F_BUILTIN ||
+ !refcount_dec_and_test(&qdisc->refcnt))
+ return;
--- /dev/null
+From foo@baz Thu 19 Sep 2019 03:07:06 PM CEST
+From: Willem de Bruijn <willemb@google.com>
+Date: Thu, 12 Sep 2019 21:16:39 -0400
+Subject: udp: correct reuseport selection with connected sockets
+
+From: Willem de Bruijn <willemb@google.com>
+
+[ Upstream commit acdcecc61285faed359f1a3568c32089cc3a8329 ]
+
+UDP reuseport groups can hold a mix unconnected and connected sockets.
+Ensure that connections only receive all traffic to their 4-tuple.
+
+Fast reuseport returns on the first reuseport match on the assumption
+that all matches are equal. Only if connections are present, return to
+the previous behavior of scoring all sockets.
+
+Record if connections are present and if so (1) treat such connected
+sockets as an independent match from the group, (2) only return
+2-tuple matches from reuseport and (3) do not return on the first
+2-tuple reuseport match to allow for a higher scoring match later.
+
+New field has_conns is set without locks. No other fields in the
+bitmap are modified at runtime and the field is only ever set
+unconditionally, so an RMW cannot miss a change.
+
+Fixes: e32ea7e74727 ("soreuseport: fast reuseport UDP socket selection")
+Link: http://lkml.kernel.org/r/CA+FuTSfRP09aJNYRt04SS6qj22ViiOEWaWmLAwX0psk8-PGNxw@mail.gmail.com
+Signed-off-by: Willem de Bruijn <willemb@google.com>
+Acked-by: Paolo Abeni <pabeni@redhat.com>
+Acked-by: Craig Gallek <kraig@google.com>
+Signed-off-by: Willem de Bruijn <willemb@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/sock_reuseport.h | 21 ++++++++++++++++++++-
+ net/core/sock_reuseport.c | 15 +++++++++++++--
+ net/ipv4/datagram.c | 2 ++
+ net/ipv4/udp.c | 5 +++--
+ net/ipv6/datagram.c | 2 ++
+ net/ipv6/udp.c | 5 +++--
+ 6 files changed, 43 insertions(+), 7 deletions(-)
+
+--- a/include/net/sock_reuseport.h
++++ b/include/net/sock_reuseport.h
+@@ -21,7 +21,8 @@ struct sock_reuseport {
+ unsigned int synq_overflow_ts;
+ /* ID stays the same even after the size of socks[] grows. */
+ unsigned int reuseport_id;
+- bool bind_inany;
++ unsigned int bind_inany:1;
++ unsigned int has_conns:1;
+ struct bpf_prog __rcu *prog; /* optional BPF sock selector */
+ struct sock *socks[0]; /* array of sock pointers */
+ };
+@@ -35,6 +36,24 @@ extern struct sock *reuseport_select_soc
+ struct sk_buff *skb,
+ int hdr_len);
+ extern int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog);
++
++static inline bool reuseport_has_conns(struct sock *sk, bool set)
++{
++ struct sock_reuseport *reuse;
++ bool ret = false;
++
++ rcu_read_lock();
++ reuse = rcu_dereference(sk->sk_reuseport_cb);
++ if (reuse) {
++ if (set)
++ reuse->has_conns = 1;
++ ret = reuse->has_conns;
++ }
++ rcu_read_unlock();
++
++ return ret;
++}
++
+ int reuseport_get_id(struct sock_reuseport *reuse);
+
+ #endif /* _SOCK_REUSEPORT_H */
+--- a/net/core/sock_reuseport.c
++++ b/net/core/sock_reuseport.c
+@@ -292,8 +292,19 @@ struct sock *reuseport_select_sock(struc
+
+ select_by_hash:
+ /* no bpf or invalid bpf result: fall back to hash usage */
+- if (!sk2)
+- sk2 = reuse->socks[reciprocal_scale(hash, socks)];
++ if (!sk2) {
++ int i, j;
++
++ i = j = reciprocal_scale(hash, socks);
++ while (reuse->socks[i]->sk_state == TCP_ESTABLISHED) {
++ i++;
++ if (i >= reuse->num_socks)
++ i = 0;
++ if (i == j)
++ goto out;
++ }
++ sk2 = reuse->socks[i];
++ }
+ }
+
+ out:
+--- a/net/ipv4/datagram.c
++++ b/net/ipv4/datagram.c
+@@ -19,6 +19,7 @@
+ #include <net/sock.h>
+ #include <net/route.h>
+ #include <net/tcp_states.h>
++#include <net/sock_reuseport.h>
+
+ int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+ {
+@@ -73,6 +74,7 @@ int __ip4_datagram_connect(struct sock *
+ }
+ inet->inet_daddr = fl4->daddr;
+ inet->inet_dport = usin->sin_port;
++ reuseport_has_conns(sk, true);
+ sk->sk_state = TCP_ESTABLISHED;
+ sk_set_txhash(sk);
+ inet->inet_id = jiffies;
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -443,12 +443,13 @@ static struct sock *udp4_lib_lookup2(str
+ score = compute_score(sk, net, saddr, sport,
+ daddr, hnum, dif, sdif, exact_dif);
+ if (score > badness) {
+- if (sk->sk_reuseport) {
++ if (sk->sk_reuseport &&
++ sk->sk_state != TCP_ESTABLISHED) {
+ hash = udp_ehashfn(net, daddr, hnum,
+ saddr, sport);
+ result = reuseport_select_sock(sk, hash, skb,
+ sizeof(struct udphdr));
+- if (result)
++ if (result && !reuseport_has_conns(sk, false))
+ return result;
+ }
+ badness = score;
+--- a/net/ipv6/datagram.c
++++ b/net/ipv6/datagram.c
+@@ -31,6 +31,7 @@
+ #include <net/ip6_route.h>
+ #include <net/tcp_states.h>
+ #include <net/dsfield.h>
++#include <net/sock_reuseport.h>
+
+ #include <linux/errqueue.h>
+ #include <linux/uaccess.h>
+@@ -258,6 +259,7 @@ ipv4_connected:
+ goto out;
+ }
+
++ reuseport_has_conns(sk, true);
+ sk->sk_state = TCP_ESTABLISHED;
+ sk_set_txhash(sk);
+ out:
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -177,13 +177,14 @@ static struct sock *udp6_lib_lookup2(str
+ score = compute_score(sk, net, saddr, sport,
+ daddr, hnum, dif, sdif, exact_dif);
+ if (score > badness) {
+- if (sk->sk_reuseport) {
++ if (sk->sk_reuseport &&
++ sk->sk_state != TCP_ESTABLISHED) {
+ hash = udp6_ehashfn(net, daddr, hnum,
+ saddr, sport);
+
+ result = reuseport_select_sock(sk, hash, skb,
+ sizeof(struct udphdr));
+- if (result)
++ if (result && !reuseport_has_conns(sk, false))
+ return result;
+ }
+ result = sk;
--- /dev/null
+From foo@baz Thu 19 Sep 2019 03:07:06 PM CEST
+From: Dongli Zhang <dongli.zhang@oracle.com>
+Date: Mon, 16 Sep 2019 11:46:59 +0800
+Subject: xen-netfront: do not assume sk_buff_head list is empty in error handling
+
+From: Dongli Zhang <dongli.zhang@oracle.com>
+
+[ Upstream commit 00b368502d18f790ab715e055869fd4bb7484a9b ]
+
+When skb_shinfo(skb) is not able to cache extra fragment (that is,
+skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS), xennet_fill_frags() assumes
+the sk_buff_head list is already empty. As a result, cons is increased only
+by 1 and returns to error handling path in xennet_poll().
+
+However, if the sk_buff_head list is not empty, queue->rx.rsp_cons may be
+set incorrectly. That is, queue->rx.rsp_cons would point to the rx ring
+buffer entries whose queue->rx_skbs[i] and queue->grant_rx_ref[i] are
+already cleared to NULL. This leads to NULL pointer access in the next
+iteration to process rx ring buffer entries.
+
+Below is how xennet_poll() does error handling. All remaining entries in
+tmpq are accounted to queue->rx.rsp_cons without assuming how many
+outstanding skbs are remained in the list.
+
+ 985 static int xennet_poll(struct napi_struct *napi, int budget)
+... ...
+1032 if (unlikely(xennet_set_skb_gso(skb, gso))) {
+1033 __skb_queue_head(&tmpq, skb);
+1034 queue->rx.rsp_cons += skb_queue_len(&tmpq);
+1035 goto err;
+1036 }
+
+It is better to always have the error handling in the same way.
+
+Fixes: ad4f15dc2c70 ("xen/netfront: don't bug in case of too many frags")
+Signed-off-by: Dongli Zhang <dongli.zhang@oracle.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/xen-netfront.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/xen-netfront.c
++++ b/drivers/net/xen-netfront.c
+@@ -909,7 +909,7 @@ static RING_IDX xennet_fill_frags(struct
+ __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
+ }
+ if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
+- queue->rx.rsp_cons = ++cons;
++ queue->rx.rsp_cons = ++cons + skb_queue_len(list);
+ kfree_skb(nskb);
+ return ~0U;
+ }