--- /dev/null
+From foo@baz Sun 06 Oct 2019 09:49:37 AM CEST
+From: Vishal Kulkarni <vishal@chelsio.com>
+Date: Fri, 4 Oct 2019 04:06:15 +0530
+Subject: cxgb4:Fix out-of-bounds MSI-X info array access
+
+From: Vishal Kulkarni <vishal@chelsio.com>
+
+[ Upstream commit 6b517374f4ea5a3c6e307e1219ec5f35d42e6d00 ]
+
+When fetching free MSI-X vectors for ULDs, check for the error code
+before accessing MSI-X info array. Otherwise, an out-of-bounds access is
+attempted, which results in kernel panic.
+
+Fixes: 94cdb8bb993a ("cxgb4: Add support for dynamic allocation of resources for ULD")
+Signed-off-by: Shahjada Abul Husain <shahjada@chelsio.com>
+Signed-off-by: Vishal Kulkarni <vishal@chelsio.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+@@ -137,13 +137,12 @@ static int uldrx_handler(struct sge_rspq
+ static int alloc_uld_rxqs(struct adapter *adap,
+ struct sge_uld_rxq_info *rxq_info, bool lro)
+ {
+- struct sge *s = &adap->sge;
+ unsigned int nq = rxq_info->nrxq + rxq_info->nciq;
++ int i, err, msi_idx, que_idx = 0, bmap_idx = 0;
+ struct sge_ofld_rxq *q = rxq_info->uldrxq;
+ unsigned short *ids = rxq_info->rspq_id;
+- unsigned int bmap_idx = 0;
++ struct sge *s = &adap->sge;
+ unsigned int per_chan;
+- int i, err, msi_idx, que_idx = 0;
+
+ per_chan = rxq_info->nrxq / adap->params.nports;
+
+@@ -161,6 +160,10 @@ static int alloc_uld_rxqs(struct adapter
+
+ if (msi_idx >= 0) {
+ bmap_idx = get_msix_idx_from_bmap(adap);
++ if (bmap_idx < 0) {
++ err = -ENOSPC;
++ goto freeout;
++ }
+ msi_idx = adap->msix_info_ulds[bmap_idx].idx;
+ }
+ err = t4_sge_alloc_rxq(adap, &q->rspq, false,
--- /dev/null
+From foo@baz Sun 06 Oct 2019 09:49:37 AM CEST
+From: Haishuang Yan <yanhaishuang@cmss.chinamobile.com>
+Date: Fri, 27 Sep 2019 14:58:20 +0800
+Subject: erspan: remove the incorrect mtu limit for erspan
+
+From: Haishuang Yan <yanhaishuang@cmss.chinamobile.com>
+
+[ Upstream commit 0e141f757b2c78c983df893e9993313e2dc21e38 ]
+
+erspan driver calls ether_setup(), after commit 61e84623ace3
+("net: centralize net_device min/max MTU checking"), the range
+of mtu is [min_mtu, max_mtu], which is [68, 1500] by default.
+
+It causes the dev mtu of the erspan device to not be greater
+than 1500, this limit value is not correct for ipgre tap device.
+
+Tested:
+Before patch:
+# ip link set erspan0 mtu 1600
+Error: mtu greater than device maximum.
+After patch:
+# ip link set erspan0 mtu 1600
+# ip -d link show erspan0
+21: erspan0@NONE: <BROADCAST,MULTICAST> mtu 1600 qdisc noop state DOWN
+mode DEFAULT group default qlen 1000
+ link/ether 00:00:00:00:00:00 brd ff:ff:ff:ff:ff:ff promiscuity 0 minmtu 68 maxmtu 0
+
+Fixes: 61e84623ace3 ("net: centralize net_device min/max MTU checking")
+Signed-off-by: Haishuang Yan <yanhaishuang@cmss.chinamobile.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/ip_gre.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/net/ipv4/ip_gre.c
++++ b/net/ipv4/ip_gre.c
+@@ -1446,6 +1446,7 @@ static void erspan_setup(struct net_devi
+ struct ip_tunnel *t = netdev_priv(dev);
+
+ ether_setup(dev);
++ dev->max_mtu = 0;
+ dev->netdev_ops = &erspan_netdev_ops;
+ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
--- /dev/null
+From foo@baz Sun 06 Oct 2019 09:49:37 AM CEST
+From: Johan Hovold <johan@kernel.org>
+Date: Mon, 30 Sep 2019 17:12:41 +0200
+Subject: hso: fix NULL-deref on tty open
+
+From: Johan Hovold <johan@kernel.org>
+
+[ Upstream commit 8353da9fa69722b54cba82b2ec740afd3d438748 ]
+
+Fix NULL-pointer dereference on tty open due to a failure to handle a
+missing interrupt-in endpoint when probing modem ports:
+
+ BUG: kernel NULL pointer dereference, address: 0000000000000006
+ ...
+ RIP: 0010:tiocmget_submit_urb+0x1c/0xe0 [hso]
+ ...
+ Call Trace:
+ hso_start_serial_device+0xdc/0x140 [hso]
+ hso_serial_open+0x118/0x1b0 [hso]
+ tty_open+0xf1/0x490
+
+Fixes: 542f54823614 ("tty: Modem functions for the HSO driver")
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/usb/hso.c | 12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/usb/hso.c
++++ b/drivers/net/usb/hso.c
+@@ -2620,14 +2620,18 @@ static struct hso_device *hso_create_bul
+ */
+ if (serial->tiocmget) {
+ tiocmget = serial->tiocmget;
++ tiocmget->endp = hso_get_ep(interface,
++ USB_ENDPOINT_XFER_INT,
++ USB_DIR_IN);
++ if (!tiocmget->endp) {
++ dev_err(&interface->dev, "Failed to find INT IN ep\n");
++ goto exit;
++ }
++
+ tiocmget->urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (tiocmget->urb) {
+ mutex_init(&tiocmget->mutex);
+ init_waitqueue_head(&tiocmget->waitq);
+- tiocmget->endp = hso_get_ep(
+- interface,
+- USB_ENDPOINT_XFER_INT,
+- USB_DIR_IN);
+ } else
+ hso_free_tiomget(serial);
+ }
--- /dev/null
+From foo@baz Sun 06 Oct 2019 09:49:37 AM CEST
+From: Eric Dumazet <edumazet@google.com>
+Date: Wed, 2 Oct 2019 09:38:55 -0700
+Subject: ipv6: drop incoming packets having a v4mapped source address
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 6af1799aaf3f1bc8defedddfa00df3192445bbf3 ]
+
+This began with a syzbot report. syzkaller was injecting
+IPv6 TCP SYN packets having a v4mapped source address.
+
+After an unsuccessful 4-tuple lookup, TCP creates a request
+socket (SYN_RECV) and calls reqsk_queue_hash_req()
+
+reqsk_queue_hash_req() calls sk_ehashfn(sk)
+
+At this point we have AF_INET6 sockets, and the heuristic
+used by sk_ehashfn() to either hash the IPv4 or IPv6 addresses
+is to use ipv6_addr_v4mapped(&sk->sk_v6_daddr)
+
+For the particular spoofed packet, we end up hashing V4 addresses
+which were not initialized by the TCP IPv6 stack, so KMSAN fired
+a warning.
+
+I first fixed sk_ehashfn() to test both source and destination addresses,
+but then faced various problems, including user-space programs
+like packetdrill that had similar assumptions.
+
+Instead of trying to fix the whole ecosystem, it is better
+to admit that we have a dual stack behavior, and that we
+can not build linux kernels without V4 stack anyway.
+
+The dual stack API automatically forces the traffic to be IPv4
+if v4mapped addresses are used at bind() or connect(), so it makes
+no sense to allow IPv6 traffic to use the same v4mapped class.
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Florian Westphal <fw@strlen.de>
+Cc: Hannes Frederic Sowa <hannes@stressinduktion.org>
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/ip6_input.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/net/ipv6/ip6_input.c
++++ b/net/ipv6/ip6_input.c
+@@ -221,6 +221,16 @@ static struct sk_buff *ip6_rcv_core(stru
+ if (ipv6_addr_is_multicast(&hdr->saddr))
+ goto err;
+
++ /* While RFC4291 is not explicit about v4mapped addresses
++ * in IPv6 headers, it seems clear linux dual-stack
++ * model can not deal properly with these.
++ * Security models could be fooled by ::ffff:127.0.0.1 for example.
++ *
++ * https://tools.ietf.org/html/draft-itojun-v6ops-v4mapped-harmful-02
++ */
++ if (ipv6_addr_v4mapped(&hdr->saddr))
++ goto err;
++
+ skb->transport_header = skb->network_header + sizeof(*hdr);
+ IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr);
+
--- /dev/null
+From foo@baz Sun 06 Oct 2019 09:49:37 AM CEST
+From: David Ahern <dsahern@gmail.com>
+Date: Fri, 4 Oct 2019 08:03:09 -0700
+Subject: ipv6: Handle missing host route in __ipv6_ifa_notify
+
+From: David Ahern <dsahern@gmail.com>
+
+[ Upstream commit 2d819d250a1393a3e725715425ab70a0e0772a71 ]
+
+Rajendra reported a kernel panic when a link was taken down:
+
+ [ 6870.263084] BUG: unable to handle kernel NULL pointer dereference at 00000000000000a8
+ [ 6870.271856] IP: [<ffffffff8efc5764>] __ipv6_ifa_notify+0x154/0x290
+
+ <snip>
+
+ [ 6870.570501] Call Trace:
+ [ 6870.573238] [<ffffffff8efc58c6>] ? ipv6_ifa_notify+0x26/0x40
+ [ 6870.579665] [<ffffffff8efc98ec>] ? addrconf_dad_completed+0x4c/0x2c0
+ [ 6870.586869] [<ffffffff8efe70c6>] ? ipv6_dev_mc_inc+0x196/0x260
+ [ 6870.593491] [<ffffffff8efc9c6a>] ? addrconf_dad_work+0x10a/0x430
+ [ 6870.600305] [<ffffffff8f01ade4>] ? __switch_to_asm+0x34/0x70
+ [ 6870.606732] [<ffffffff8ea93a7a>] ? process_one_work+0x18a/0x430
+ [ 6870.613449] [<ffffffff8ea93d6d>] ? worker_thread+0x4d/0x490
+ [ 6870.619778] [<ffffffff8ea93d20>] ? process_one_work+0x430/0x430
+ [ 6870.626495] [<ffffffff8ea99dd9>] ? kthread+0xd9/0xf0
+ [ 6870.632145] [<ffffffff8f01ade4>] ? __switch_to_asm+0x34/0x70
+ [ 6870.638573] [<ffffffff8ea99d00>] ? kthread_park+0x60/0x60
+ [ 6870.644707] [<ffffffff8f01ae77>] ? ret_from_fork+0x57/0x70
+ [ 6870.650936] Code: 31 c0 31 d2 41 b9 20 00 08 02 b9 09 00 00 0
+
+addrconf_dad_work is kicked to be scheduled when a device is brought
+up. There is a race between addrcond_dad_work getting scheduled and
+taking the rtnl lock and a process taking the link down (under rtnl).
+The latter removes the host route from the inet6_addr as part of
+addrconf_ifdown which is run for NETDEV_DOWN. The former attempts
+to use the host route in __ipv6_ifa_notify. If the down event removes
+the host route due to the race to the rtnl, then the BUG listed above
+occurs.
+
+Since the DAD sequence can not be aborted, add a check for the missing
+host route in __ipv6_ifa_notify. The only way this should happen is due
+to the previously mentioned race. The host route is created when the
+address is added to an interface; it is only removed on a down event
+where the address is kept. Add a warning if the host route is missing
+AND the device is up; this is a situation that should never happen.
+
+Fixes: f1705ec197e7 ("net: ipv6: Make address flushing on ifdown optional")
+Reported-by: Rajendra Dendukuri <rajendra.dendukuri@broadcom.com>
+Signed-off-by: David Ahern <dsahern@gmail.com>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/addrconf.c | 17 ++++++++++++-----
+ 1 file changed, 12 insertions(+), 5 deletions(-)
+
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -5962,13 +5962,20 @@ static void __ipv6_ifa_notify(int event,
+ switch (event) {
+ case RTM_NEWADDR:
+ /*
+- * If the address was optimistic
+- * we inserted the route at the start of
+- * our DAD process, so we don't need
+- * to do it again
++ * If the address was optimistic we inserted the route at the
++ * start of our DAD process, so we don't need to do it again.
++ * If the device was taken down in the middle of the DAD
++ * cycle there is a race where we could get here without a
++ * host route, so nothing to insert. That will be fixed when
++ * the device is brought up.
+ */
+- if (!rcu_access_pointer(ifp->rt->fib6_node))
++ if (ifp->rt && !rcu_access_pointer(ifp->rt->fib6_node)) {
+ ip6_ins_rt(net, ifp->rt);
++ } else if (!ifp->rt && (ifp->idev->dev->flags & IFF_UP)) {
++ pr_warn("BUG: Address %pI6c on device %s is missing its host route.\n",
++ &ifp->addr, ifp->idev->dev->name);
++ }
++
+ if (ifp->idev->cnf.forwarding)
+ addrconf_join_anycast(ifp);
+ if (!ipv6_addr_any(&ifp->peer_addr))
--- /dev/null
+From foo@baz Sun 06 Oct 2019 09:49:37 AM CEST
+From: Linus Walleij <linus.walleij@linaro.org>
+Date: Tue, 1 Oct 2019 16:28:43 +0200
+Subject: net: dsa: rtl8366: Check VLAN ID and not ports
+
+From: Linus Walleij <linus.walleij@linaro.org>
+
+[ Upstream commit e8521e53cca584ddf8ec4584d3c550a6c65f88c4 ]
+
+There has been some confusion between the port number and
+the VLAN ID in this driver. What we need to check for
+validity is the VLAN ID, nothing else.
+
+The current confusion came from assigning a few default
+VLANs for default routing and we need to rewrite that
+properly.
+
+Instead of checking if the port number is a valid VLAN
+ID, check the actual VLAN IDs passed in to the callback
+one by one as expected.
+
+Fixes: d8652956cf37 ("net: dsa: realtek-smi: Add Realtek SMI driver")
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/dsa/rtl8366.c | 11 +++++++----
+ 1 file changed, 7 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/dsa/rtl8366.c
++++ b/drivers/net/dsa/rtl8366.c
+@@ -339,10 +339,12 @@ int rtl8366_vlan_prepare(struct dsa_swit
+ const struct switchdev_obj_port_vlan *vlan)
+ {
+ struct realtek_smi *smi = ds->priv;
++ u16 vid;
+ int ret;
+
+- if (!smi->ops->is_vlan_valid(smi, port))
+- return -EINVAL;
++ for (vid = vlan->vid_begin; vid < vlan->vid_end; vid++)
++ if (!smi->ops->is_vlan_valid(smi, vid))
++ return -EINVAL;
+
+ dev_info(smi->dev, "prepare VLANs %04x..%04x\n",
+ vlan->vid_begin, vlan->vid_end);
+@@ -370,8 +372,9 @@ void rtl8366_vlan_add(struct dsa_switch
+ u16 vid;
+ int ret;
+
+- if (!smi->ops->is_vlan_valid(smi, port))
+- return;
++ for (vid = vlan->vid_begin; vid < vlan->vid_end; vid++)
++ if (!smi->ops->is_vlan_valid(smi, vid))
++ return;
+
+ dev_info(smi->dev, "add VLAN on port %d, %s, %s\n",
+ port,
--- /dev/null
+From foo@baz Sun 06 Oct 2019 09:49:37 AM CEST
+From: Paolo Abeni <pabeni@redhat.com>
+Date: Fri, 4 Oct 2019 15:11:17 +0200
+Subject: net: ipv4: avoid mixed n_redirects and rate_tokens usage
+
+From: Paolo Abeni <pabeni@redhat.com>
+
+[ Upstream commit b406472b5ad79ede8d10077f0c8f05505ace8b6d ]
+
+Since commit c09551c6ff7f ("net: ipv4: use a dedicated counter
+for icmp_v4 redirect packets") we use 'n_redirects' to account
+for redirect packets, but we still use 'rate_tokens' to compute
+the redirect packets exponential backoff.
+
+If the device sent to the relevant peer any ICMP error packet
+after sending a redirect, it will also update 'rate_token' according
+to the leaking bucket schema; typically 'rate_token' will raise
+above BITS_PER_LONG and the redirect packets backoff algorithm
+will produce undefined behavior.
+
+Fix the issue using 'n_redirects' to compute the exponential backoff
+in ip_rt_send_redirect().
+
+Note that we still clear rate_tokens after a redirect silence period,
+to avoid changing an established behaviour.
+
+The root cause predates git history; before the mentioned commit in
+the critical scenario, the kernel stopped sending redirects, after
+the mentioned commit the behavior more randomic.
+
+Reported-by: Xiumei Mu <xmu@redhat.com>
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Fixes: c09551c6ff7f ("net: ipv4: use a dedicated counter for icmp_v4 redirect packets")
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Acked-by: Lorenzo Bianconi <lorenzo.bianconi@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/route.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -915,16 +915,15 @@ void ip_rt_send_redirect(struct sk_buff
+ if (peer->rate_tokens == 0 ||
+ time_after(jiffies,
+ (peer->rate_last +
+- (ip_rt_redirect_load << peer->rate_tokens)))) {
++ (ip_rt_redirect_load << peer->n_redirects)))) {
+ __be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr);
+
+ icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
+ peer->rate_last = jiffies;
+- ++peer->rate_tokens;
+ ++peer->n_redirects;
+ #ifdef CONFIG_IP_ROUTE_VERBOSE
+ if (log_martians &&
+- peer->rate_tokens == ip_rt_redirect_number)
++ peer->n_redirects == ip_rt_redirect_number)
+ net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
+ &ip_hdr(skb)->saddr, inet_iif(skb),
+ &ip_hdr(skb)->daddr, &gw);
--- /dev/null
+From foo@baz Sun 06 Oct 2019 09:49:37 AM CEST
+From: Navid Emamdoost <navid.emamdoost@gmail.com>
+Date: Fri, 4 Oct 2019 15:24:39 -0500
+Subject: net: qlogic: Fix memory leak in ql_alloc_large_buffers
+
+From: Navid Emamdoost <navid.emamdoost@gmail.com>
+
+[ Upstream commit 1acb8f2a7a9f10543868ddd737e37424d5c36cf4 ]
+
+In ql_alloc_large_buffers, a new skb is allocated via netdev_alloc_skb.
+This skb should be released if pci_dma_mapping_error fails.
+
+Fixes: 0f8ab89e825f ("qla3xxx: Check return code from pci_map_single() in ql_release_to_lrg_buf_free_list(), ql_populate_free_queue(), ql_alloc_large_buffers(), and ql3xxx_send()")
+Signed-off-by: Navid Emamdoost <navid.emamdoost@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/qlogic/qla3xxx.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/net/ethernet/qlogic/qla3xxx.c
++++ b/drivers/net/ethernet/qlogic/qla3xxx.c
+@@ -2787,6 +2787,7 @@ static int ql_alloc_large_buffers(struct
+ netdev_err(qdev->ndev,
+ "PCI mapping failed with error: %d\n",
+ err);
++ dev_kfree_skb_irq(skb);
+ ql_free_large_buffers(qdev);
+ return -ENOMEM;
+ }
--- /dev/null
+From foo@baz Sun 06 Oct 2019 09:49:37 AM CEST
+From: Dotan Barak <dotanb@dev.mellanox.co.il>
+Date: Tue, 1 Oct 2019 10:21:02 -0700
+Subject: net/rds: Fix error handling in rds_ib_add_one()
+
+From: Dotan Barak <dotanb@dev.mellanox.co.il>
+
+[ Upstream commit d64bf89a75b65f83f06be9fb8f978e60d53752db ]
+
+rds_ibdev:ipaddr_list and rds_ibdev:conn_list are initialized
+after allocation some resources such as protection domain.
+If allocation of such resources fail, then these uninitialized
+variables are accessed in rds_ib_dev_free() in failure path. This
+can potentially crash the system. The code has been updated to
+initialize these variables very early in the function.
+
+Signed-off-by: Dotan Barak <dotanb@dev.mellanox.co.il>
+Signed-off-by: Sudhakar Dindukurti <sudhakar.dindukurti@oracle.com>
+Acked-by: Santosh Shilimkar <santosh.shilimkar@oracle.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/rds/ib.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/net/rds/ib.c
++++ b/net/rds/ib.c
+@@ -143,6 +143,9 @@ static void rds_ib_add_one(struct ib_dev
+ refcount_set(&rds_ibdev->refcount, 1);
+ INIT_WORK(&rds_ibdev->free_work, rds_ib_dev_free);
+
++ INIT_LIST_HEAD(&rds_ibdev->ipaddr_list);
++ INIT_LIST_HEAD(&rds_ibdev->conn_list);
++
+ rds_ibdev->max_wrs = device->attrs.max_qp_wr;
+ rds_ibdev->max_sge = min(device->attrs.max_send_sge, RDS_IB_MAX_SGE);
+
+@@ -203,9 +206,6 @@ static void rds_ib_add_one(struct ib_dev
+ device->name,
+ rds_ibdev->use_fastreg ? "FRMR" : "FMR");
+
+- INIT_LIST_HEAD(&rds_ibdev->ipaddr_list);
+- INIT_LIST_HEAD(&rds_ibdev->conn_list);
+-
+ down_write(&rds_ib_devices_lock);
+ list_add_tail_rcu(&rds_ibdev->list, &rds_ib_devices);
+ up_write(&rds_ib_devices_lock);
--- /dev/null
+From foo@baz Sun 06 Oct 2019 09:49:37 AM CEST
+From: Vladimir Oltean <olteanv@gmail.com>
+Date: Sun, 29 Sep 2019 02:39:48 +0300
+Subject: net: sched: cbs: Avoid division by zero when calculating the port rate
+
+From: Vladimir Oltean <olteanv@gmail.com>
+
+[ Upstream commit 83c8c3cf45163f0c823db37be6ab04dfcf8ac751 ]
+
+As explained in the "net: sched: taprio: Avoid division by zero on
+invalid link speed" commit, it is legal for the ethtool API to return
+zero as a link speed. So guard against it to ensure we don't perform a
+division by zero in kernel.
+
+Fixes: e0a7683d30e9 ("net/sched: cbs: fix port_rate miscalculation")
+Signed-off-by: Vladimir Oltean <olteanv@gmail.com>
+Acked-by: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sched/sch_cbs.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/sched/sch_cbs.c
++++ b/net/sched/sch_cbs.c
+@@ -306,7 +306,7 @@ static void cbs_set_port_rate(struct net
+ if (err < 0)
+ goto skip;
+
+- if (ecmd.base.speed != SPEED_UNKNOWN)
++ if (ecmd.base.speed && ecmd.base.speed != SPEED_UNKNOWN)
+ speed = ecmd.base.speed;
+
+ skip:
--- /dev/null
+From foo@baz Sun 06 Oct 2019 09:49:37 AM CEST
+From: Vladimir Oltean <olteanv@gmail.com>
+Date: Sun, 29 Sep 2019 02:37:22 +0300
+Subject: net: sched: taprio: Avoid division by zero on invalid link speed
+
+From: Vladimir Oltean <olteanv@gmail.com>
+
+[ Upstream commit 9a9251a3534745d08a92abfeca0ca467b912b5f6 ]
+
+The check in taprio_set_picos_per_byte is currently not robust enough
+and will trigger this division by zero, due to e.g. PHYLINK not setting
+kset->base.speed when there is no PHY connected:
+
+[ 27.109992] Division by zero in kernel.
+[ 27.113842] CPU: 1 PID: 198 Comm: tc Not tainted 5.3.0-rc5-01246-gc4006b8c2637-dirty #212
+[ 27.121974] Hardware name: Freescale LS1021A
+[ 27.126234] [<c03132e0>] (unwind_backtrace) from [<c030d8b8>] (show_stack+0x10/0x14)
+[ 27.133938] [<c030d8b8>] (show_stack) from [<c10b21b0>] (dump_stack+0xb0/0xc4)
+[ 27.141124] [<c10b21b0>] (dump_stack) from [<c10af97c>] (Ldiv0_64+0x8/0x18)
+[ 27.148052] [<c10af97c>] (Ldiv0_64) from [<c0700260>] (div64_u64+0xcc/0xf0)
+[ 27.154978] [<c0700260>] (div64_u64) from [<c07002d0>] (div64_s64+0x4c/0x68)
+[ 27.161993] [<c07002d0>] (div64_s64) from [<c0f3d890>] (taprio_set_picos_per_byte+0xe8/0xf4)
+[ 27.170388] [<c0f3d890>] (taprio_set_picos_per_byte) from [<c0f3f614>] (taprio_change+0x668/0xcec)
+[ 27.179302] [<c0f3f614>] (taprio_change) from [<c0f2bc24>] (qdisc_create+0x1fc/0x4f4)
+[ 27.187091] [<c0f2bc24>] (qdisc_create) from [<c0f2c0c8>] (tc_modify_qdisc+0x1ac/0x6f8)
+[ 27.195055] [<c0f2c0c8>] (tc_modify_qdisc) from [<c0ee9604>] (rtnetlink_rcv_msg+0x268/0x2dc)
+[ 27.203449] [<c0ee9604>] (rtnetlink_rcv_msg) from [<c0f4fef0>] (netlink_rcv_skb+0xe0/0x114)
+[ 27.211756] [<c0f4fef0>] (netlink_rcv_skb) from [<c0f4f6cc>] (netlink_unicast+0x1b4/0x22c)
+[ 27.219977] [<c0f4f6cc>] (netlink_unicast) from [<c0f4fa84>] (netlink_sendmsg+0x284/0x340)
+[ 27.228198] [<c0f4fa84>] (netlink_sendmsg) from [<c0eae5fc>] (sock_sendmsg+0x14/0x24)
+[ 27.235988] [<c0eae5fc>] (sock_sendmsg) from [<c0eaedf8>] (___sys_sendmsg+0x214/0x228)
+[ 27.243863] [<c0eaedf8>] (___sys_sendmsg) from [<c0eb015c>] (__sys_sendmsg+0x50/0x8c)
+[ 27.251652] [<c0eb015c>] (__sys_sendmsg) from [<c0301000>] (ret_fast_syscall+0x0/0x54)
+[ 27.259524] Exception stack(0xe8045fa8 to 0xe8045ff0)
+[ 27.264546] 5fa0: b6f608c8 000000f8 00000003 bed7e2f0 00000000 00000000
+[ 27.272681] 5fc0: b6f608c8 000000f8 004ce54c 00000128 5d3ce8c7 00000000 00000026 00505c9c
+[ 27.280812] 5fe0: 00000070 bed7e298 004ddd64 b6dd1e64
+
+Russell King points out that the ethtool API says zero is a valid return
+value of __ethtool_get_link_ksettings:
+
+ * If it is enabled then they are read-only; if the link
+ * is up they represent the negotiated link mode; if the link is down,
+ * the speed is 0, %SPEED_UNKNOWN or the highest enabled speed and
+ * @duplex is %DUPLEX_UNKNOWN or the best enabled duplex mode.
+
+ So, it seems that taprio is not following the API... I'd suggest either
+ fixing taprio, or getting agreement to change the ethtool API.
+
+The chosen path was to fix taprio.
+
+Fixes: 7b9eba7ba0c1 ("net/sched: taprio: fix picos_per_byte miscalculation")
+Signed-off-by: Vladimir Oltean <olteanv@gmail.com>
+Acked-by: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sched/sch_taprio.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/sched/sch_taprio.c
++++ b/net/sched/sch_taprio.c
+@@ -668,7 +668,7 @@ static void taprio_set_picos_per_byte(st
+ if (err < 0)
+ goto skip;
+
+- if (ecmd.base.speed != SPEED_UNKNOWN)
++ if (ecmd.base.speed && ecmd.base.speed != SPEED_UNKNOWN)
+ speed = ecmd.base.speed;
+
+ skip:
--- /dev/null
+From foo@baz Sun 06 Oct 2019 09:49:37 AM CEST
+From: Vladimir Oltean <olteanv@gmail.com>
+Date: Sun, 29 Sep 2019 02:01:39 +0300
+Subject: net: sched: taprio: Fix potential integer overflow in taprio_set_picos_per_byte
+
+From: Vladimir Oltean <olteanv@gmail.com>
+
+[ Upstream commit 68ce6688a5baefde30914fc07fc27292dbbe8320 ]
+
+The speed divisor is used in a context expecting an s64, but it is
+evaluated using 32-bit arithmetic.
+
+To avoid that happening, instead of multiplying by 1,000,000 in the
+first place, simplify the fraction and do a standard 32 bit division
+instead.
+
+Fixes: f04b514c0ce2 ("taprio: Set default link speed to 10 Mbps in taprio_set_picos_per_byte")
+Reported-by: Gustavo A. R. Silva <gustavo@embeddedor.com>
+Suggested-by: Eric Dumazet <eric.dumazet@gmail.com>
+Signed-off-by: Vladimir Oltean <olteanv@gmail.com>
+Acked-by: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sched/sch_taprio.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/net/sched/sch_taprio.c
++++ b/net/sched/sch_taprio.c
+@@ -672,8 +672,7 @@ static void taprio_set_picos_per_byte(st
+ speed = ecmd.base.speed;
+
+ skip:
+- picos_per_byte = div64_s64(NSEC_PER_SEC * 1000LL * 8,
+- speed * 1000 * 1000);
++ picos_per_byte = (USEC_PER_SEC * 8) / speed;
+
+ atomic64_set(&q->picos_per_byte, picos_per_byte);
+ netdev_dbg(dev, "taprio: set %s's picos_per_byte to: %lld, linkspeed: %d\n",
--- /dev/null
+From foo@baz Sun 06 Oct 2019 09:49:37 AM CEST
+From: Martin KaFai Lau <kafai@fb.com>
+Date: Fri, 27 Sep 2019 16:00:31 -0700
+Subject: net: Unpublish sk from sk_reuseport_cb before call_rcu
+
+From: Martin KaFai Lau <kafai@fb.com>
+
+[ Upstream commit 8c7138b33e5c690c308b2a7085f6313fdcb3f616 ]
+
+The "reuse->sock[]" array is shared by multiple sockets. The going away
+sk must unpublish itself from "reuse->sock[]" before making call_rcu()
+call. However, this unpublish-action is currently done after a grace
+period and it may cause use-after-free.
+
+The fix is to move reuseport_detach_sock() to sk_destruct().
+Due to the above reason, any socket with sk_reuseport_cb has
+to go through the rcu grace period before freeing it.
+
+It is a rather old bug (~3 yrs). The Fixes tag is not necessary
+the right commit but it is the one that introduced the SOCK_RCU_FREE
+logic and this fix is depending on it.
+
+Fixes: a4298e4522d6 ("net: add SOCK_RCU_FREE socket flag")
+Cc: Eric Dumazet <eric.dumazet@gmail.com>
+Suggested-by: Eric Dumazet <eric.dumazet@gmail.com>
+Signed-off-by: Martin KaFai Lau <kafai@fb.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/sock.c | 11 ++++++++---
+ 1 file changed, 8 insertions(+), 3 deletions(-)
+
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -1696,8 +1696,6 @@ static void __sk_destruct(struct rcu_hea
+ sk_filter_uncharge(sk, filter);
+ RCU_INIT_POINTER(sk->sk_filter, NULL);
+ }
+- if (rcu_access_pointer(sk->sk_reuseport_cb))
+- reuseport_detach_sock(sk);
+
+ sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
+
+@@ -1724,7 +1722,14 @@ static void __sk_destruct(struct rcu_hea
+
+ void sk_destruct(struct sock *sk)
+ {
+- if (sock_flag(sk, SOCK_RCU_FREE))
++ bool use_call_rcu = sock_flag(sk, SOCK_RCU_FREE);
++
++ if (rcu_access_pointer(sk->sk_reuseport_cb)) {
++ reuseport_detach_sock(sk);
++ use_call_rcu = true;
++ }
++
++ if (use_call_rcu)
+ call_rcu(&sk->sk_rcu, __sk_destruct);
+ else
+ __sk_destruct(&sk->sk_rcu);
--- /dev/null
+From foo@baz Sun 06 Oct 2019 09:49:37 AM CEST
+From: Eric Dumazet <edumazet@google.com>
+Date: Fri, 4 Oct 2019 11:08:34 -0700
+Subject: nfc: fix memory leak in llcp_sock_bind()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit a0c2dc1fe63e2869b74c1c7f6a81d1745c8a695d ]
+
+sysbot reported a memory leak after a bind() has failed.
+
+While we are at it, abort the operation if kmemdup() has failed.
+
+BUG: memory leak
+unreferenced object 0xffff888105d83ec0 (size 32):
+ comm "syz-executor067", pid 7207, jiffies 4294956228 (age 19.430s)
+ hex dump (first 32 bytes):
+ 00 69 6c 65 20 72 65 61 64 00 6e 65 74 3a 5b 34 .ile read.net:[4
+ 30 32 36 35 33 33 30 39 37 5d 00 00 00 00 00 00 026533097]......
+ backtrace:
+ [<0000000036bac473>] kmemleak_alloc_recursive /./include/linux/kmemleak.h:43 [inline]
+ [<0000000036bac473>] slab_post_alloc_hook /mm/slab.h:522 [inline]
+ [<0000000036bac473>] slab_alloc /mm/slab.c:3319 [inline]
+ [<0000000036bac473>] __do_kmalloc /mm/slab.c:3653 [inline]
+ [<0000000036bac473>] __kmalloc_track_caller+0x169/0x2d0 /mm/slab.c:3670
+ [<000000000cd39d07>] kmemdup+0x27/0x60 /mm/util.c:120
+ [<000000008e57e5fc>] kmemdup /./include/linux/string.h:432 [inline]
+ [<000000008e57e5fc>] llcp_sock_bind+0x1b3/0x230 /net/nfc/llcp_sock.c:107
+ [<000000009cb0b5d3>] __sys_bind+0x11c/0x140 /net/socket.c:1647
+ [<00000000492c3bbc>] __do_sys_bind /net/socket.c:1658 [inline]
+ [<00000000492c3bbc>] __se_sys_bind /net/socket.c:1656 [inline]
+ [<00000000492c3bbc>] __x64_sys_bind+0x1e/0x30 /net/socket.c:1656
+ [<0000000008704b2a>] do_syscall_64+0x76/0x1a0 /arch/x86/entry/common.c:296
+ [<000000009f4c57a4>] entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+Fixes: 30cc4587659e ("NFC: Move LLCP code to the NFC top level diirectory")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/nfc/llcp_sock.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/net/nfc/llcp_sock.c
++++ b/net/nfc/llcp_sock.c
+@@ -107,9 +107,14 @@ static int llcp_sock_bind(struct socket
+ llcp_sock->service_name = kmemdup(llcp_addr.service_name,
+ llcp_sock->service_name_len,
+ GFP_KERNEL);
+-
++ if (!llcp_sock->service_name) {
++ ret = -ENOMEM;
++ goto put_dev;
++ }
+ llcp_sock->ssap = nfc_llcp_get_sdp_ssap(local, llcp_sock);
+ if (llcp_sock->ssap == LLCP_SAP_MAX) {
++ kfree(llcp_sock->service_name);
++ llcp_sock->service_name = NULL;
+ ret = -EADDRINUSE;
+ goto put_dev;
+ }
--- /dev/null
+From foo@baz Sun 06 Oct 2019 09:49:37 AM CEST
+From: Vladimir Oltean <olteanv@gmail.com>
+Date: Tue, 1 Oct 2019 22:07:01 +0300
+Subject: ptp_qoriq: Initialize the registers' spinlock before calling ptp_qoriq_settime
+
+From: Vladimir Oltean <olteanv@gmail.com>
+
+[ Upstream commit db34a4714c013b644eec2de0ec81b1f0373b8b93 ]
+
+Because ptp_qoriq_settime is being called prior to spin_lock_init, the
+following stack trace can be seen at driver probe time:
+
+[ 2.269117] the code is fine but needs lockdep annotation.
+[ 2.274569] turning off the locking correctness validator.
+[ 2.280027] CPU: 1 PID: 1 Comm: swapper/0 Not tainted 5.3.0-rc7-01478-g01eaa67a4797 #263
+[ 2.288073] Hardware name: Freescale LS1021A
+[ 2.292337] [<c0313cb4>] (unwind_backtrace) from [<c030e11c>] (show_stack+0x10/0x14)
+[ 2.300045] [<c030e11c>] (show_stack) from [<c1219440>] (dump_stack+0xcc/0xf8)
+[ 2.307235] [<c1219440>] (dump_stack) from [<c03b9b44>] (register_lock_class+0x730/0x73c)
+[ 2.315372] [<c03b9b44>] (register_lock_class) from [<c03b6190>] (__lock_acquire+0x78/0x270c)
+[ 2.323856] [<c03b6190>] (__lock_acquire) from [<c03b90cc>] (lock_acquire+0xe0/0x22c)
+[ 2.331649] [<c03b90cc>] (lock_acquire) from [<c123c310>] (_raw_spin_lock_irqsave+0x54/0x68)
+[ 2.340048] [<c123c310>] (_raw_spin_lock_irqsave) from [<c0e73fe4>] (ptp_qoriq_settime+0x38/0x80)
+[ 2.348878] [<c0e73fe4>] (ptp_qoriq_settime) from [<c0e746d4>] (ptp_qoriq_init+0x1f8/0x484)
+[ 2.357189] [<c0e746d4>] (ptp_qoriq_init) from [<c0e74aac>] (ptp_qoriq_probe+0xd0/0x184)
+[ 2.365243] [<c0e74aac>] (ptp_qoriq_probe) from [<c0b0a07c>] (platform_drv_probe+0x48/0x9c)
+[ 2.373555] [<c0b0a07c>] (platform_drv_probe) from [<c0b07a14>] (really_probe+0x1c4/0x400)
+[ 2.381779] [<c0b07a14>] (really_probe) from [<c0b07e28>] (driver_probe_device+0x78/0x1b8)
+[ 2.390003] [<c0b07e28>] (driver_probe_device) from [<c0b081d0>] (device_driver_attach+0x58/0x60)
+[ 2.398832] [<c0b081d0>] (device_driver_attach) from [<c0b082d4>] (__driver_attach+0xfc/0x160)
+[ 2.407402] [<c0b082d4>] (__driver_attach) from [<c0b05a84>] (bus_for_each_dev+0x68/0xb4)
+[ 2.415539] [<c0b05a84>] (bus_for_each_dev) from [<c0b06b68>] (bus_add_driver+0x104/0x20c)
+[ 2.423763] [<c0b06b68>] (bus_add_driver) from [<c0b0909c>] (driver_register+0x78/0x10c)
+[ 2.431815] [<c0b0909c>] (driver_register) from [<c030313c>] (do_one_initcall+0x8c/0x3ac)
+[ 2.439954] [<c030313c>] (do_one_initcall) from [<c1f013f4>] (kernel_init_freeable+0x468/0x548)
+[ 2.448610] [<c1f013f4>] (kernel_init_freeable) from [<c12344d8>] (kernel_init+0x8/0x10c)
+[ 2.456745] [<c12344d8>] (kernel_init) from [<c03010b4>] (ret_from_fork+0x14/0x20)
+[ 2.464273] Exception stack(0xea89ffb0 to 0xea89fff8)
+[ 2.469297] ffa0: 00000000 00000000 00000000 00000000
+[ 2.477432] ffc0: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+[ 2.485566] ffe0: 00000000 00000000 00000000 00000000 00000013 00000000
+
+Fixes: ff54571a747b ("ptp_qoriq: convert to use ptp_qoriq_init/free")
+Signed-off-by: Vladimir Oltean <olteanv@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/ptp/ptp_qoriq.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/ptp/ptp_qoriq.c
++++ b/drivers/ptp/ptp_qoriq.c
+@@ -507,6 +507,8 @@ int ptp_qoriq_init(struct ptp_qoriq *ptp
+ ptp_qoriq->regs.etts_regs = base + ETTS_REGS_OFFSET;
+ }
+
++ spin_lock_init(&ptp_qoriq->lock);
++
+ ktime_get_real_ts64(&now);
+ ptp_qoriq_settime(&ptp_qoriq->caps, &now);
+
+@@ -514,7 +516,6 @@ int ptp_qoriq_init(struct ptp_qoriq *ptp
+ (ptp_qoriq->tclk_period & TCLK_PERIOD_MASK) << TCLK_PERIOD_SHIFT |
+ (ptp_qoriq->cksel & CKSEL_MASK) << CKSEL_SHIFT;
+
+- spin_lock_init(&ptp_qoriq->lock);
+ spin_lock_irqsave(&ptp_qoriq->lock, flags);
+
+ regs = &ptp_qoriq->regs;
--- /dev/null
+From foo@baz Sun 06 Oct 2019 09:49:37 AM CEST
+From: Reinhard Speyerer <rspmn@arcor.de>
+Date: Thu, 3 Oct 2019 18:34:39 +0200
+Subject: qmi_wwan: add support for Cinterion CLS8 devices
+
+From: Reinhard Speyerer <rspmn@arcor.de>
+
+[ Upstream commit cf74ac6db25d4002089e85cc623ad149ecc25614 ]
+
+Add support for Cinterion CLS8 devices.
+Use QMI_QUIRK_SET_DTR as required for Qualcomm MDM9x07 chipsets.
+
+T: Bus=01 Lev=03 Prnt=05 Port=01 Cnt=02 Dev#= 25 Spd=480 MxCh= 0
+D: Ver= 2.00 Cls=00(>ifc ) Sub=00 Prot=00 MxPS=64 #Cfgs= 1
+P: Vendor=1e2d ProdID=00b0 Rev= 3.18
+S: Manufacturer=GEMALTO
+S: Product=USB Modem
+C:* #Ifs= 5 Cfg#= 1 Atr=80 MxPwr=500mA
+I:* If#= 0 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=42 Prot=01 Driver=(none)
+E: Ad=01(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E: Ad=81(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+I:* If#= 1 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=00 Prot=00 Driver=option
+E: Ad=83(I) Atr=03(Int.) MxPS= 10 Ivl=32ms
+E: Ad=82(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E: Ad=02(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+I:* If#= 2 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=00 Prot=00 Driver=option
+E: Ad=85(I) Atr=03(Int.) MxPS= 10 Ivl=32ms
+E: Ad=84(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E: Ad=03(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+I:* If#= 3 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=00 Prot=00 Driver=option
+E: Ad=87(I) Atr=03(Int.) MxPS= 10 Ivl=32ms
+E: Ad=86(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E: Ad=04(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+I:* If#= 4 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=ff Prot=ff Driver=qmi_wwan
+E: Ad=89(I) Atr=03(Int.) MxPS= 8 Ivl=32ms
+E: Ad=88(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E: Ad=05(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+
+Signed-off-by: Reinhard Speyerer <rspmn@arcor.de>
+Acked-by: Bjørn Mork <bjorn@mork.no>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/usb/qmi_wwan.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1349,6 +1349,7 @@ static const struct usb_device_id produc
+ {QMI_FIXED_INTF(0x1e2d, 0x0082, 4)}, /* Cinterion PHxx,PXxx (2 RmNet) */
+ {QMI_FIXED_INTF(0x1e2d, 0x0082, 5)}, /* Cinterion PHxx,PXxx (2 RmNet) */
+ {QMI_FIXED_INTF(0x1e2d, 0x0083, 4)}, /* Cinterion PHxx,PXxx (1 RmNet + USB Audio)*/
++ {QMI_QUIRK_SET_DTR(0x1e2d, 0x00b0, 4)}, /* Cinterion CLS8 */
+ {QMI_FIXED_INTF(0x413c, 0x81a2, 8)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
+ {QMI_FIXED_INTF(0x413c, 0x81a3, 8)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
+ {QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
--- /dev/null
+From foo@baz Sun 06 Oct 2019 09:49:37 AM CEST
+From: David Howells <dhowells@redhat.com>
+Date: Thu, 3 Oct 2019 17:44:44 +0100
+Subject: rxrpc: Fix rxrpc_recvmsg tracepoint
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit db9b2e0af605e7c994784527abfd9276cabd718a ]
+
+Fix the rxrpc_recvmsg tracepoint to handle being called with a NULL call
+parameter.
+
+Fixes: a25e21f0bcd2 ("rxrpc, afs: Use debug_ids rather than pointers in traces")
+Signed-off-by: David Howells <dhowells@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/trace/events/rxrpc.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/include/trace/events/rxrpc.h
++++ b/include/trace/events/rxrpc.h
+@@ -1071,7 +1071,7 @@ TRACE_EVENT(rxrpc_recvmsg,
+ ),
+
+ TP_fast_assign(
+- __entry->call = call->debug_id;
++ __entry->call = call ? call->debug_id : 0;
+ __entry->why = why;
+ __entry->seq = seq;
+ __entry->offset = offset;
--- /dev/null
+From foo@baz Sun 06 Oct 2019 09:49:37 AM CEST
+From: Eric Dumazet <edumazet@google.com>
+Date: Thu, 26 Sep 2019 18:24:43 -0700
+Subject: sch_cbq: validate TCA_CBQ_WRROPT to avoid crash
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit e9789c7cc182484fc031fd88097eb14cb26c4596 ]
+
+syzbot reported a crash in cbq_normalize_quanta() caused
+by an out of range cl->priority.
+
+iproute2 enforces this check, but malicious users do not.
+
+kasan: CONFIG_KASAN_INLINE enabled
+kasan: GPF could be caused by NULL-ptr deref or user memory access
+general protection fault: 0000 [#1] SMP KASAN PTI
+Modules linked in:
+CPU: 1 PID: 26447 Comm: syz-executor.1 Not tainted 5.3+ #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
+RIP: 0010:cbq_normalize_quanta.part.0+0x1fd/0x430 net/sched/sch_cbq.c:902
+RSP: 0018:ffff8801a5c333b0 EFLAGS: 00010206
+RAX: 0000000020000003 RBX: 00000000fffffff8 RCX: ffffc9000712f000
+RDX: 00000000000043bf RSI: ffffffff83be8962 RDI: 0000000100000018
+RBP: ffff8801a5c33420 R08: 000000000000003a R09: 0000000000000000
+R10: 0000000000000000 R11: 0000000000000000 R12: 00000000000002ef
+R13: ffff88018da95188 R14: dffffc0000000000 R15: 0000000000000015
+FS: 00007f37d26b1700(0000) GS:ffff8801dad00000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 00000000004c7cec CR3: 00000001bcd0a006 CR4: 00000000001626f0
+DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+Call Trace:
+ [<ffffffff83be9d57>] cbq_normalize_quanta include/net/pkt_sched.h:27 [inline]
+ [<ffffffff83be9d57>] cbq_addprio net/sched/sch_cbq.c:1097 [inline]
+ [<ffffffff83be9d57>] cbq_set_wrr+0x2d7/0x450 net/sched/sch_cbq.c:1115
+ [<ffffffff83bee8a7>] cbq_change_class+0x987/0x225b net/sched/sch_cbq.c:1537
+ [<ffffffff83b96985>] tc_ctl_tclass+0x555/0xcd0 net/sched/sch_api.c:2329
+ [<ffffffff83a84655>] rtnetlink_rcv_msg+0x485/0xc10 net/core/rtnetlink.c:5248
+ [<ffffffff83cadf0a>] netlink_rcv_skb+0x17a/0x460 net/netlink/af_netlink.c:2510
+ [<ffffffff83a7db6d>] rtnetlink_rcv+0x1d/0x30 net/core/rtnetlink.c:5266
+ [<ffffffff83cac2c6>] netlink_unicast_kernel net/netlink/af_netlink.c:1324 [inline]
+ [<ffffffff83cac2c6>] netlink_unicast+0x536/0x720 net/netlink/af_netlink.c:1350
+ [<ffffffff83cacd4a>] netlink_sendmsg+0x89a/0xd50 net/netlink/af_netlink.c:1939
+ [<ffffffff8399d46e>] sock_sendmsg_nosec net/socket.c:673 [inline]
+ [<ffffffff8399d46e>] sock_sendmsg+0x12e/0x170 net/socket.c:684
+ [<ffffffff8399f1fd>] ___sys_sendmsg+0x81d/0x960 net/socket.c:2359
+ [<ffffffff839a2d05>] __sys_sendmsg+0x105/0x1d0 net/socket.c:2397
+ [<ffffffff839a2df9>] SYSC_sendmsg net/socket.c:2406 [inline]
+ [<ffffffff839a2df9>] SyS_sendmsg+0x29/0x30 net/socket.c:2404
+ [<ffffffff8101ccc8>] do_syscall_64+0x528/0x770 arch/x86/entry/common.c:305
+ [<ffffffff84400091>] entry_SYSCALL_64_after_hwframe+0x42/0xb7
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sched/sch_cbq.c | 43 +++++++++++++++++++++++++++++--------------
+ 1 file changed, 29 insertions(+), 14 deletions(-)
+
+--- a/net/sched/sch_cbq.c
++++ b/net/sched/sch_cbq.c
+@@ -1127,6 +1127,33 @@ static const struct nla_policy cbq_polic
+ [TCA_CBQ_POLICE] = { .len = sizeof(struct tc_cbq_police) },
+ };
+
++static int cbq_opt_parse(struct nlattr *tb[TCA_CBQ_MAX + 1],
++ struct nlattr *opt,
++ struct netlink_ext_ack *extack)
++{
++ int err;
++
++ if (!opt) {
++ NL_SET_ERR_MSG(extack, "CBQ options are required for this operation");
++ return -EINVAL;
++ }
++
++ err = nla_parse_nested_deprecated(tb, TCA_CBQ_MAX, opt,
++ cbq_policy, extack);
++ if (err < 0)
++ return err;
++
++ if (tb[TCA_CBQ_WRROPT]) {
++ const struct tc_cbq_wrropt *wrr = nla_data(tb[TCA_CBQ_WRROPT]);
++
++ if (wrr->priority > TC_CBQ_MAXPRIO) {
++ NL_SET_ERR_MSG(extack, "priority is bigger than TC_CBQ_MAXPRIO");
++ err = -EINVAL;
++ }
++ }
++ return err;
++}
++
+ static int cbq_init(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
+ {
+@@ -1139,13 +1166,7 @@ static int cbq_init(struct Qdisc *sch, s
+ hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
+ q->delay_timer.function = cbq_undelay;
+
+- if (!opt) {
+- NL_SET_ERR_MSG(extack, "CBQ options are required for this operation");
+- return -EINVAL;
+- }
+-
+- err = nla_parse_nested_deprecated(tb, TCA_CBQ_MAX, opt, cbq_policy,
+- extack);
++ err = cbq_opt_parse(tb, opt, extack);
+ if (err < 0)
+ return err;
+
+@@ -1464,13 +1485,7 @@ cbq_change_class(struct Qdisc *sch, u32
+ struct cbq_class *parent;
+ struct qdisc_rate_table *rtab = NULL;
+
+- if (!opt) {
+- NL_SET_ERR_MSG(extack, "Mandatory qdisc options missing");
+- return -EINVAL;
+- }
+-
+- err = nla_parse_nested_deprecated(tb, TCA_CBQ_MAX, opt, cbq_policy,
+- extack);
++ err = cbq_opt_parse(tb, opt, extack);
+ if (err < 0)
+ return err;
+
--- /dev/null
+From foo@baz Sun 06 Oct 2019 09:49:37 AM CEST
+From: Eric Dumazet <edumazet@google.com>
+Date: Fri, 4 Oct 2019 10:34:45 -0700
+Subject: sch_dsmark: fix potential NULL deref in dsmark_init()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 474f0813a3002cb299bb73a5a93aa1f537a80ca8 ]
+
+Make sure TCA_DSMARK_INDICES was provided by the user.
+
+syzbot reported :
+
+kasan: CONFIG_KASAN_INLINE enabled
+kasan: GPF could be caused by NULL-ptr deref or user memory access
+general protection fault: 0000 [#1] PREEMPT SMP KASAN
+CPU: 1 PID: 8799 Comm: syz-executor235 Not tainted 5.3.0+ #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
+RIP: 0010:nla_get_u16 include/net/netlink.h:1501 [inline]
+RIP: 0010:dsmark_init net/sched/sch_dsmark.c:364 [inline]
+RIP: 0010:dsmark_init+0x193/0x640 net/sched/sch_dsmark.c:339
+Code: 85 db 58 0f 88 7d 03 00 00 e8 e9 1a ac fb 48 8b 9d 70 ff ff ff 48 b8 00 00 00 00 00 fc ff df 48 8d 7b 04 48 89 fa 48 c1 ea 03 <0f> b6 14 02 48 89 f8 83 e0 07 83 c0 01 38 d0 7c 08 84 d2 0f 85 ca
+RSP: 0018:ffff88809426f3b8 EFLAGS: 00010247
+RAX: dffffc0000000000 RBX: 0000000000000000 RCX: ffffffff85c6eb09
+RDX: 0000000000000000 RSI: ffffffff85c6eb17 RDI: 0000000000000004
+RBP: ffff88809426f4b0 R08: ffff88808c4085c0 R09: ffffed1015d26159
+R10: ffffed1015d26158 R11: ffff8880ae930ac7 R12: ffff8880a7e96940
+R13: dffffc0000000000 R14: ffff88809426f8c0 R15: 0000000000000000
+FS: 0000000001292880(0000) GS:ffff8880ae900000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 0000000020000080 CR3: 000000008ca1b000 CR4: 00000000001406e0
+DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+Call Trace:
+ qdisc_create+0x4ee/0x1210 net/sched/sch_api.c:1237
+ tc_modify_qdisc+0x524/0x1c50 net/sched/sch_api.c:1653
+ rtnetlink_rcv_msg+0x463/0xb00 net/core/rtnetlink.c:5223
+ netlink_rcv_skb+0x177/0x450 net/netlink/af_netlink.c:2477
+ rtnetlink_rcv+0x1d/0x30 net/core/rtnetlink.c:5241
+ netlink_unicast_kernel net/netlink/af_netlink.c:1302 [inline]
+ netlink_unicast+0x531/0x710 net/netlink/af_netlink.c:1328
+ netlink_sendmsg+0x8a5/0xd60 net/netlink/af_netlink.c:1917
+ sock_sendmsg_nosec net/socket.c:637 [inline]
+ sock_sendmsg+0xd7/0x130 net/socket.c:657
+ ___sys_sendmsg+0x803/0x920 net/socket.c:2311
+ __sys_sendmsg+0x105/0x1d0 net/socket.c:2356
+ __do_sys_sendmsg net/socket.c:2365 [inline]
+ __se_sys_sendmsg net/socket.c:2363 [inline]
+ __x64_sys_sendmsg+0x78/0xb0 net/socket.c:2363
+ do_syscall_64+0xfa/0x760 arch/x86/entry/common.c:290
+ entry_SYSCALL_64_after_hwframe+0x49/0xbe
+RIP: 0033:0x440369
+
+Fixes: 758cc43c6d73 ("[PKT_SCHED]: Fix dsmark to apply changes consistent")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sched/sch_dsmark.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/net/sched/sch_dsmark.c
++++ b/net/sched/sch_dsmark.c
+@@ -361,6 +361,8 @@ static int dsmark_init(struct Qdisc *sch
+ goto errout;
+
+ err = -EINVAL;
++ if (!tb[TCA_DSMARK_INDICES])
++ goto errout;
+ indices = nla_get_u16(tb[TCA_DSMARK_INDICES]);
+
+ if (hweight32(indices) != 1)
+cxgb4-fix-out-of-bounds-msi-x-info-array-access.patch
+erspan-remove-the-incorrect-mtu-limit-for-erspan.patch
+hso-fix-null-deref-on-tty-open.patch
+ipv6-drop-incoming-packets-having-a-v4mapped-source-address.patch
+ipv6-handle-missing-host-route-in-__ipv6_ifa_notify.patch
+net-ipv4-avoid-mixed-n_redirects-and-rate_tokens-usage.patch
+net-qlogic-fix-memory-leak-in-ql_alloc_large_buffers.patch
+net-sched-taprio-fix-potential-integer-overflow-in-taprio_set_picos_per_byte.patch
+net-unpublish-sk-from-sk_reuseport_cb-before-call_rcu.patch
+nfc-fix-memory-leak-in-llcp_sock_bind.patch
+qmi_wwan-add-support-for-cinterion-cls8-devices.patch
+rxrpc-fix-rxrpc_recvmsg-tracepoint.patch
+sch_cbq-validate-tca_cbq_wrropt-to-avoid-crash.patch
+sch_dsmark-fix-potential-null-deref-in-dsmark_init.patch
+tipc-fix-unlimited-bundling-of-small-messages.patch
+udp-fix-gso_segs-calculations.patch
+vsock-fix-a-lockdep-warning-in-__vsock_release.patch
+net-dsa-rtl8366-check-vlan-id-and-not-ports.patch
+tcp-adjust-rto_base-in-retransmits_timed_out.patch
+udp-only-do-gso-if-of-segs-1.patch
+net-rds-fix-error-handling-in-rds_ib_add_one.patch
+xen-netfront-do-not-use-0u-as-error-return-value-for-xennet_fill_frags.patch
+ptp_qoriq-initialize-the-registers-spinlock-before-calling-ptp_qoriq_settime.patch
+net-sched-cbs-avoid-division-by-zero-when-calculating-the-port-rate.patch
+net-sched-taprio-avoid-division-by-zero-on-invalid-link-speed.patch
drm-vkms-fix-crc-worker-races.patch
drm-bridge-tc358767-increase-aux-transfer-length-lim.patch
drm-vkms-avoid-assigning-0-for-possible_crtc.patch
--- /dev/null
+From foo@baz Sun 06 Oct 2019 09:49:37 AM CEST
+From: Eric Dumazet <edumazet@google.com>
+Date: Mon, 30 Sep 2019 15:44:44 -0700
+Subject: tcp: adjust rto_base in retransmits_timed_out()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 3256a2d6ab1f71f9a1bd2d7f6f18eb8108c48d17 ]
+
+The cited commit exposed an old retransmits_timed_out() bug
+which assumed it could call tcp_model_timeout() with
+TCP_RTO_MIN as rto_base for all states.
+
+But flows in SYN_SENT or SYN_RECV state uses a different
+RTO base (1 sec instead of 200 ms, unless BPF choses
+another value)
+
+This caused a reduction of SYN retransmits from 6 to 4 with
+the default /proc/sys/net/ipv4/tcp_syn_retries value.
+
+Fixes: a41e8a88b06e ("tcp: better handle TCP_USER_TIMEOUT in SYN_SENT state")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Yuchung Cheng <ycheng@google.com>
+Cc: Marek Majkowski <marek@cloudflare.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp_timer.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+--- a/net/ipv4/tcp_timer.c
++++ b/net/ipv4/tcp_timer.c
+@@ -198,8 +198,13 @@ static bool retransmits_timed_out(struct
+ return false;
+
+ start_ts = tcp_sk(sk)->retrans_stamp;
+- if (likely(timeout == 0))
+- timeout = tcp_model_timeout(sk, boundary, TCP_RTO_MIN);
++ if (likely(timeout == 0)) {
++ unsigned int rto_base = TCP_RTO_MIN;
++
++ if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
++ rto_base = tcp_timeout_init(sk);
++ timeout = tcp_model_timeout(sk, boundary, rto_base);
++ }
+
+ return (s32)(tcp_time_stamp(tcp_sk(sk)) - start_ts - timeout) >= 0;
+ }
--- /dev/null
+From foo@baz Sun 06 Oct 2019 09:49:37 AM CEST
+From: Tuong Lien <tuong.t.lien@dektech.com.au>
+Date: Wed, 2 Oct 2019 18:49:43 +0700
+Subject: tipc: fix unlimited bundling of small messages
+
+From: Tuong Lien <tuong.t.lien@dektech.com.au>
+
+[ Upstream commit e95584a889e1902fdf1ded9712e2c3c3083baf96 ]
+
+We have identified a problem with the "oversubscription" policy in the
+link transmission code.
+
+When small messages are transmitted, and the sending link has reached
+the transmit window limit, those messages will be bundled and put into
+the link backlog queue. However, bundles of data messages are counted
+at the 'CRITICAL' level, so that the counter for that level, instead of
+the counter for the real, bundled message's level is the one being
+increased.
+Subsequent, to-be-bundled data messages at non-CRITICAL levels continue
+to be tested against the unchanged counter for their own level, while
+contributing to an unrestrained increase at the CRITICAL backlog level.
+
+This leaves a gap in congestion control algorithm for small messages
+that can result in starvation for other users or a "real" CRITICAL
+user. Even that eventually can lead to buffer exhaustion & link reset.
+
+We fix this by keeping a 'target_bskb' buffer pointer at each levels,
+then when bundling, we only bundle messages at the same importance
+level only. This way, we know exactly how many slots a certain level
+have occupied in the queue, so can manage level congestion accurately.
+
+By bundling messages at the same level, we even have more benefits. Let
+consider this:
+- One socket sends 64-byte messages at the 'CRITICAL' level;
+- Another sends 4096-byte messages at the 'LOW' level;
+
+When a 64-byte message comes and is bundled the first time, we put the
+overhead of message bundle to it (+ 40-byte header, data copy, etc.)
+for later use, but the next message can be a 4096-byte one that cannot
+be bundled to the previous one. This means the last bundle carries only
+one payload message which is totally inefficient, as for the receiver
+also! Later on, another 64-byte message comes, now we make a new bundle
+and the same story repeats...
+
+With the new bundling algorithm, this will not happen, the 64-byte
+messages will be bundled together even when the 4096-byte message(s)
+comes in between. However, if the 4096-byte messages are sent at the
+same level i.e. 'CRITICAL', the bundling algorithm will again cause the
+same overhead.
+
+Also, the same will happen even with only one socket sending small
+messages at a rate close to the link transmit's one, so that, when one
+message is bundled, it's transmitted shortly. Then, another message
+comes, a new bundle is created and so on...
+
+We will solve this issue radically by another patch.
+
+Fixes: 365ad353c256 ("tipc: reduce risk of user starvation during link congestion")
+Reported-by: Hoang Le <hoang.h.le@dektech.com.au>
+Acked-by: Jon Maloy <jon.maloy@ericsson.com>
+Signed-off-by: Tuong Lien <tuong.t.lien@dektech.com.au>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/tipc/link.c | 29 ++++++++++++++++++-----------
+ net/tipc/msg.c | 5 +----
+ 2 files changed, 19 insertions(+), 15 deletions(-)
+
+--- a/net/tipc/link.c
++++ b/net/tipc/link.c
+@@ -163,6 +163,7 @@ struct tipc_link {
+ struct {
+ u16 len;
+ u16 limit;
++ struct sk_buff *target_bskb;
+ } backlog[5];
+ u16 snd_nxt;
+ u16 prev_from;
+@@ -872,6 +873,7 @@ static void link_prepare_wakeup(struct t
+ void tipc_link_reset(struct tipc_link *l)
+ {
+ struct sk_buff_head list;
++ u32 imp;
+
+ __skb_queue_head_init(&list);
+
+@@ -893,11 +895,10 @@ void tipc_link_reset(struct tipc_link *l
+ __skb_queue_purge(&l->deferdq);
+ __skb_queue_purge(&l->backlogq);
+ __skb_queue_purge(&l->failover_deferdq);
+- l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
+- l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
+- l->backlog[TIPC_HIGH_IMPORTANCE].len = 0;
+- l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0;
+- l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0;
++ for (imp = 0; imp <= TIPC_SYSTEM_IMPORTANCE; imp++) {
++ l->backlog[imp].len = 0;
++ l->backlog[imp].target_bskb = NULL;
++ }
+ kfree_skb(l->reasm_buf);
+ kfree_skb(l->failover_reasm_skb);
+ l->reasm_buf = NULL;
+@@ -938,7 +939,7 @@ int tipc_link_xmit(struct tipc_link *l,
+ u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
+ struct sk_buff_head *transmq = &l->transmq;
+ struct sk_buff_head *backlogq = &l->backlogq;
+- struct sk_buff *skb, *_skb, *bskb;
++ struct sk_buff *skb, *_skb, **tskb;
+ int pkt_cnt = skb_queue_len(list);
+ int rc = 0;
+
+@@ -988,19 +989,21 @@ int tipc_link_xmit(struct tipc_link *l,
+ seqno++;
+ continue;
+ }
+- if (tipc_msg_bundle(skb_peek_tail(backlogq), hdr, mtu)) {
++ tskb = &l->backlog[imp].target_bskb;
++ if (tipc_msg_bundle(*tskb, hdr, mtu)) {
+ kfree_skb(__skb_dequeue(list));
+ l->stats.sent_bundled++;
+ continue;
+ }
+- if (tipc_msg_make_bundle(&bskb, hdr, mtu, l->addr)) {
++ if (tipc_msg_make_bundle(tskb, hdr, mtu, l->addr)) {
+ kfree_skb(__skb_dequeue(list));
+- __skb_queue_tail(backlogq, bskb);
+- l->backlog[msg_importance(buf_msg(bskb))].len++;
++ __skb_queue_tail(backlogq, *tskb);
++ l->backlog[imp].len++;
+ l->stats.sent_bundled++;
+ l->stats.sent_bundles++;
+ continue;
+ }
++ l->backlog[imp].target_bskb = NULL;
+ l->backlog[imp].len += skb_queue_len(list);
+ skb_queue_splice_tail_init(list, backlogq);
+ }
+@@ -1016,6 +1019,7 @@ static void tipc_link_advance_backlog(st
+ u16 seqno = l->snd_nxt;
+ u16 ack = l->rcv_nxt - 1;
+ u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
++ u32 imp;
+
+ while (skb_queue_len(&l->transmq) < l->window) {
+ skb = skb_peek(&l->backlogq);
+@@ -1026,7 +1030,10 @@ static void tipc_link_advance_backlog(st
+ break;
+ __skb_dequeue(&l->backlogq);
+ hdr = buf_msg(skb);
+- l->backlog[msg_importance(hdr)].len--;
++ imp = msg_importance(hdr);
++ l->backlog[imp].len--;
++ if (unlikely(skb == l->backlog[imp].target_bskb))
++ l->backlog[imp].target_bskb = NULL;
+ __skb_queue_tail(&l->transmq, skb);
+ /* next retransmit attempt */
+ if (link_is_bc_sndlink(l))
+--- a/net/tipc/msg.c
++++ b/net/tipc/msg.c
+@@ -484,10 +484,7 @@ bool tipc_msg_make_bundle(struct sk_buff
+ bmsg = buf_msg(_skb);
+ tipc_msg_init(msg_prevnode(msg), bmsg, MSG_BUNDLER, 0,
+ INT_H_SIZE, dnode);
+- if (msg_isdata(msg))
+- msg_set_importance(bmsg, TIPC_CRITICAL_IMPORTANCE);
+- else
+- msg_set_importance(bmsg, TIPC_SYSTEM_IMPORTANCE);
++ msg_set_importance(bmsg, msg_importance(msg));
+ msg_set_seqno(bmsg, msg_seqno(msg));
+ msg_set_ack(bmsg, msg_ack(msg));
+ msg_set_bcast_ack(bmsg, msg_bcast_ack(msg));
--- /dev/null
+From foo@baz Sun 06 Oct 2019 09:49:37 AM CEST
+From: Josh Hunt <johunt@akamai.com>
+Date: Wed, 2 Oct 2019 13:29:22 -0400
+Subject: udp: fix gso_segs calculations
+
+From: Josh Hunt <johunt@akamai.com>
+
+[ Upstream commit 44b321e5020d782ad6e8ae8183f09b163be6e6e2 ]
+
+Commit dfec0ee22c0a ("udp: Record gso_segs when supporting UDP segmentation offload")
+added gso_segs calculation, but incorrectly got sizeof() the pointer and
+not the underlying data type. In addition let's fix the v6 case.
+
+Fixes: bec1f6f69736 ("udp: generate gso with UDP_SEGMENT")
+Fixes: dfec0ee22c0a ("udp: Record gso_segs when supporting UDP segmentation offload")
+Signed-off-by: Josh Hunt <johunt@akamai.com>
+Reviewed-by: Alexander Duyck <alexander.h.duyck@linux.intel.com>
+Acked-by: Willem de Bruijn <willemb@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/udp.c | 2 +-
+ net/ipv6/udp.c | 2 ++
+ 2 files changed, 3 insertions(+), 1 deletion(-)
+
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -868,7 +868,7 @@ static int udp_send_skb(struct sk_buff *
+
+ skb_shinfo(skb)->gso_size = cork->gso_size;
+ skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
+- skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(len - sizeof(uh),
++ skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(len - sizeof(*uh),
+ cork->gso_size);
+ goto csum_partial;
+ }
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -1156,6 +1156,8 @@ static int udp_v6_send_skb(struct sk_buf
+
+ skb_shinfo(skb)->gso_size = cork->gso_size;
+ skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
++ skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(len - sizeof(*uh),
++ cork->gso_size);
+ goto csum_partial;
+ }
+
--- /dev/null
+From foo@baz Sun 06 Oct 2019 09:49:37 AM CEST
+From: Josh Hunt <johunt@akamai.com>
+Date: Wed, 2 Oct 2019 13:29:23 -0400
+Subject: udp: only do GSO if # of segs > 1
+
+From: Josh Hunt <johunt@akamai.com>
+
+[ Upstream commit 4094871db1d65810acab3d57f6089aa39ef7f648 ]
+
+Prior to this change an application sending <= 1MSS worth of data and
+enabling UDP GSO would fail if the system had SW GSO enabled, but the
+same send would succeed if HW GSO offload is enabled. In addition to this
+inconsistency the error in the SW GSO case does not get back to the
+application if sending out of a real device so the user is unaware of this
+failure.
+
+With this change we only perform GSO if the # of segments is > 1 even
+if the application has enabled segmentation. I've also updated the
+relevant udpgso selftests.
+
+Fixes: bec1f6f69736 ("udp: generate gso with UDP_SEGMENT")
+Signed-off-by: Josh Hunt <johunt@akamai.com>
+Reviewed-by: Willem de Bruijn <willemb@google.com>
+Reviewed-by: Alexander Duyck <alexander.h.duyck@linux.intel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/udp.c | 11 +++++++----
+ net/ipv6/udp.c | 11 +++++++----
+ tools/testing/selftests/net/udpgso.c | 16 ++++------------
+ 3 files changed, 18 insertions(+), 20 deletions(-)
+
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -833,6 +833,7 @@ static int udp_send_skb(struct sk_buff *
+ int is_udplite = IS_UDPLITE(sk);
+ int offset = skb_transport_offset(skb);
+ int len = skb->len - offset;
++ int datalen = len - sizeof(*uh);
+ __wsum csum = 0;
+
+ /*
+@@ -866,10 +867,12 @@ static int udp_send_skb(struct sk_buff *
+ return -EIO;
+ }
+
+- skb_shinfo(skb)->gso_size = cork->gso_size;
+- skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
+- skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(len - sizeof(*uh),
+- cork->gso_size);
++ if (datalen > cork->gso_size) {
++ skb_shinfo(skb)->gso_size = cork->gso_size;
++ skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
++ skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen,
++ cork->gso_size);
++ }
+ goto csum_partial;
+ }
+
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -1122,6 +1122,7 @@ static int udp_v6_send_skb(struct sk_buf
+ __wsum csum = 0;
+ int offset = skb_transport_offset(skb);
+ int len = skb->len - offset;
++ int datalen = len - sizeof(*uh);
+
+ /*
+ * Create a UDP header
+@@ -1154,10 +1155,12 @@ static int udp_v6_send_skb(struct sk_buf
+ return -EIO;
+ }
+
+- skb_shinfo(skb)->gso_size = cork->gso_size;
+- skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
+- skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(len - sizeof(*uh),
+- cork->gso_size);
++ if (datalen > cork->gso_size) {
++ skb_shinfo(skb)->gso_size = cork->gso_size;
++ skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
++ skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen,
++ cork->gso_size);
++ }
+ goto csum_partial;
+ }
+
+--- a/tools/testing/selftests/net/udpgso.c
++++ b/tools/testing/selftests/net/udpgso.c
+@@ -89,12 +89,9 @@ struct testcase testcases_v4[] = {
+ .tfail = true,
+ },
+ {
+- /* send a single MSS: will fail with GSO, because the segment
+- * logic in udp4_ufo_fragment demands a gso skb to be > MTU
+- */
++ /* send a single MSS: will fall back to no GSO */
+ .tlen = CONST_MSS_V4,
+ .gso_len = CONST_MSS_V4,
+- .tfail = true,
+ .r_num_mss = 1,
+ },
+ {
+@@ -139,10 +136,9 @@ struct testcase testcases_v4[] = {
+ .tfail = true,
+ },
+ {
+- /* send a single 1B MSS: will fail, see single MSS above */
++ /* send a single 1B MSS: will fall back to no GSO */
+ .tlen = 1,
+ .gso_len = 1,
+- .tfail = true,
+ .r_num_mss = 1,
+ },
+ {
+@@ -196,12 +192,9 @@ struct testcase testcases_v6[] = {
+ .tfail = true,
+ },
+ {
+- /* send a single MSS: will fail with GSO, because the segment
+- * logic in udp4_ufo_fragment demands a gso skb to be > MTU
+- */
++ /* send a single MSS: will fall back to no GSO */
+ .tlen = CONST_MSS_V6,
+ .gso_len = CONST_MSS_V6,
+- .tfail = true,
+ .r_num_mss = 1,
+ },
+ {
+@@ -246,10 +239,9 @@ struct testcase testcases_v6[] = {
+ .tfail = true,
+ },
+ {
+- /* send a single 1B MSS: will fail, see single MSS above */
++ /* send a single 1B MSS: will fall back to no GSO */
+ .tlen = 1,
+ .gso_len = 1,
+- .tfail = true,
+ .r_num_mss = 1,
+ },
+ {
--- /dev/null
+From foo@baz Sun 06 Oct 2019 09:49:37 AM CEST
+From: Dexuan Cui <decui@microsoft.com>
+Date: Mon, 30 Sep 2019 18:43:50 +0000
+Subject: vsock: Fix a lockdep warning in __vsock_release()
+
+From: Dexuan Cui <decui@microsoft.com>
+
+[ Upstream commit 0d9138ffac24cf8b75366ede3a68c951e6dcc575 ]
+
+Lockdep is unhappy if two locks from the same class are held.
+
+Fix the below warning for hyperv and virtio sockets (vmci socket code
+doesn't have the issue) by using lock_sock_nested() when __vsock_release()
+is called recursively:
+
+============================================
+WARNING: possible recursive locking detected
+5.3.0+ #1 Not tainted
+--------------------------------------------
+server/1795 is trying to acquire lock:
+ffff8880c5158990 (sk_lock-AF_VSOCK){+.+.}, at: hvs_release+0x10/0x120 [hv_sock]
+
+but task is already holding lock:
+ffff8880c5158150 (sk_lock-AF_VSOCK){+.+.}, at: __vsock_release+0x2e/0xf0 [vsock]
+
+other info that might help us debug this:
+ Possible unsafe locking scenario:
+
+ CPU0
+ ----
+ lock(sk_lock-AF_VSOCK);
+ lock(sk_lock-AF_VSOCK);
+
+ *** DEADLOCK ***
+
+ May be due to missing lock nesting notation
+
+2 locks held by server/1795:
+ #0: ffff8880c5d05ff8 (&sb->s_type->i_mutex_key#10){+.+.}, at: __sock_release+0x2d/0xa0
+ #1: ffff8880c5158150 (sk_lock-AF_VSOCK){+.+.}, at: __vsock_release+0x2e/0xf0 [vsock]
+
+stack backtrace:
+CPU: 5 PID: 1795 Comm: server Not tainted 5.3.0+ #1
+Call Trace:
+ dump_stack+0x67/0x90
+ __lock_acquire.cold.67+0xd2/0x20b
+ lock_acquire+0xb5/0x1c0
+ lock_sock_nested+0x6d/0x90
+ hvs_release+0x10/0x120 [hv_sock]
+ __vsock_release+0x24/0xf0 [vsock]
+ __vsock_release+0xa0/0xf0 [vsock]
+ vsock_release+0x12/0x30 [vsock]
+ __sock_release+0x37/0xa0
+ sock_close+0x14/0x20
+ __fput+0xc1/0x250
+ task_work_run+0x98/0xc0
+ do_exit+0x344/0xc60
+ do_group_exit+0x47/0xb0
+ get_signal+0x15c/0xc50
+ do_signal+0x30/0x720
+ exit_to_usermode_loop+0x50/0xa0
+ do_syscall_64+0x24e/0x270
+ entry_SYSCALL_64_after_hwframe+0x49/0xbe
+RIP: 0033:0x7f4184e85f31
+
+Tested-by: Stefano Garzarella <sgarzare@redhat.com>
+Signed-off-by: Dexuan Cui <decui@microsoft.com>
+Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/vmw_vsock/af_vsock.c | 16 ++++++++++++----
+ net/vmw_vsock/hyperv_transport.c | 2 +-
+ net/vmw_vsock/virtio_transport_common.c | 2 +-
+ 3 files changed, 14 insertions(+), 6 deletions(-)
+
+--- a/net/vmw_vsock/af_vsock.c
++++ b/net/vmw_vsock/af_vsock.c
+@@ -638,7 +638,7 @@ struct sock *__vsock_create(struct net *
+ }
+ EXPORT_SYMBOL_GPL(__vsock_create);
+
+-static void __vsock_release(struct sock *sk)
++static void __vsock_release(struct sock *sk, int level)
+ {
+ if (sk) {
+ struct sk_buff *skb;
+@@ -648,9 +648,17 @@ static void __vsock_release(struct sock
+ vsk = vsock_sk(sk);
+ pending = NULL; /* Compiler warning. */
+
++ /* The release call is supposed to use lock_sock_nested()
++ * rather than lock_sock(), if a sock lock should be acquired.
++ */
+ transport->release(vsk);
+
+- lock_sock(sk);
++ /* When "level" is SINGLE_DEPTH_NESTING, use the nested
++ * version to avoid the warning "possible recursive locking
++ * detected". When "level" is 0, lock_sock_nested(sk, level)
++ * is the same as lock_sock(sk).
++ */
++ lock_sock_nested(sk, level);
+ sock_orphan(sk);
+ sk->sk_shutdown = SHUTDOWN_MASK;
+
+@@ -659,7 +667,7 @@ static void __vsock_release(struct sock
+
+ /* Clean up any sockets that never were accepted. */
+ while ((pending = vsock_dequeue_accept(sk)) != NULL) {
+- __vsock_release(pending);
++ __vsock_release(pending, SINGLE_DEPTH_NESTING);
+ sock_put(pending);
+ }
+
+@@ -708,7 +716,7 @@ EXPORT_SYMBOL_GPL(vsock_stream_has_space
+
+ static int vsock_release(struct socket *sock)
+ {
+- __vsock_release(sock->sk);
++ __vsock_release(sock->sk, 0);
+ sock->sk = NULL;
+ sock->state = SS_FREE;
+
+--- a/net/vmw_vsock/hyperv_transport.c
++++ b/net/vmw_vsock/hyperv_transport.c
+@@ -528,7 +528,7 @@ static void hvs_release(struct vsock_soc
+ struct sock *sk = sk_vsock(vsk);
+ bool remove_sock;
+
+- lock_sock(sk);
++ lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
+ remove_sock = hvs_close_lock_held(vsk);
+ release_sock(sk);
+ if (remove_sock)
+--- a/net/vmw_vsock/virtio_transport_common.c
++++ b/net/vmw_vsock/virtio_transport_common.c
+@@ -790,7 +790,7 @@ void virtio_transport_release(struct vso
+ struct sock *sk = &vsk->sk;
+ bool remove_sock = true;
+
+- lock_sock(sk);
++ lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
+ if (sk->sk_type == SOCK_STREAM)
+ remove_sock = virtio_transport_close(vsk);
+
--- /dev/null
+From foo@baz Sun 06 Oct 2019 09:49:37 AM CEST
+From: Dongli Zhang <dongli.zhang@oracle.com>
+Date: Tue, 1 Oct 2019 21:56:41 +0800
+Subject: xen-netfront: do not use ~0U as error return value for xennet_fill_frags()
+
+From: Dongli Zhang <dongli.zhang@oracle.com>
+
+[ Upstream commit a761129e3625688310aecf26e1be9e98e85f8eb5 ]
+
+xennet_fill_frags() uses ~0U as return value when the sk_buff is not able
+to cache extra fragments. This is incorrect because the return type of
+xennet_fill_frags() is RING_IDX and 0xffffffff is an expected value for
+ring buffer index.
+
+In the situation when the rsp_cons is approaching 0xffffffff, the return
+value of xennet_fill_frags() may become 0xffffffff which xennet_poll() (the
+caller) would regard as error. As a result, queue->rx.rsp_cons is set
+incorrectly because it is updated only when there is error. If there is no
+error, xennet_poll() would be responsible to update queue->rx.rsp_cons.
+Finally, queue->rx.rsp_cons would point to the rx ring buffer entries whose
+queue->rx_skbs[i] and queue->grant_rx_ref[i] are already cleared to NULL.
+This leads to NULL pointer access in the next iteration to process rx ring
+buffer entries.
+
+The symptom is similar to the one fixed in
+commit 00b368502d18 ("xen-netfront: do not assume sk_buff_head list is
+empty in error handling").
+
+This patch changes the return type of xennet_fill_frags() to indicate
+whether it is successful or failed. The queue->rx.rsp_cons will be
+always updated inside this function.
+
+Fixes: ad4f15dc2c70 ("xen/netfront: don't bug in case of too many frags")
+Signed-off-by: Dongli Zhang <dongli.zhang@oracle.com>
+Reviewed-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/xen-netfront.c | 17 +++++++++--------
+ 1 file changed, 9 insertions(+), 8 deletions(-)
+
+--- a/drivers/net/xen-netfront.c
++++ b/drivers/net/xen-netfront.c
+@@ -887,9 +887,9 @@ static int xennet_set_skb_gso(struct sk_
+ return 0;
+ }
+
+-static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
+- struct sk_buff *skb,
+- struct sk_buff_head *list)
++static int xennet_fill_frags(struct netfront_queue *queue,
++ struct sk_buff *skb,
++ struct sk_buff_head *list)
+ {
+ RING_IDX cons = queue->rx.rsp_cons;
+ struct sk_buff *nskb;
+@@ -908,7 +908,7 @@ static RING_IDX xennet_fill_frags(struct
+ if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
+ queue->rx.rsp_cons = ++cons + skb_queue_len(list);
+ kfree_skb(nskb);
+- return ~0U;
++ return -ENOENT;
+ }
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+@@ -919,7 +919,9 @@ static RING_IDX xennet_fill_frags(struct
+ kfree_skb(nskb);
+ }
+
+- return cons;
++ queue->rx.rsp_cons = cons;
++
++ return 0;
+ }
+
+ static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
+@@ -1045,8 +1047,7 @@ err:
+ skb->data_len = rx->status;
+ skb->len += rx->status;
+
+- i = xennet_fill_frags(queue, skb, &tmpq);
+- if (unlikely(i == ~0U))
++ if (unlikely(xennet_fill_frags(queue, skb, &tmpq)))
+ goto err;
+
+ if (rx->flags & XEN_NETRXF_csum_blank)
+@@ -1056,7 +1057,7 @@ err:
+
+ __skb_queue_tail(&rxq, skb);
+
+- queue->rx.rsp_cons = ++i;
++ i = ++queue->rx.rsp_cons;
+ work_done++;
+ }
+