--- /dev/null
+From foo@baz Fri Jan 4 19:32:42 CET 2019
+From: Cong Wang <xiyou.wangcong@gmail.com>
+Date: Sat, 29 Dec 2018 13:56:36 -0800
+Subject: ax25: fix a use-after-free in ax25_fillin_cb()
+
+From: Cong Wang <xiyou.wangcong@gmail.com>
+
+[ Upstream commit c433570458e49bccea5c551df628d058b3526289 ]
+
+There are multiple issues here:
+
+1. After freeing dev->ax25_ptr, we need to set it to NULL otherwise
+ we may use a dangling pointer.
+
+2. There is a race between ax25_setsockopt() and device notifier as
+ reported by syzbot. Close it by holding RTNL lock.
+
+3. We need to test if dev->ax25_ptr is NULL before using it.
+
+Reported-and-tested-by: syzbot+ae6bb869cbed29b29040@syzkaller.appspotmail.com
+Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ax25/af_ax25.c | 11 +++++++++--
+ net/ax25/ax25_dev.c | 2 ++
+ 2 files changed, 11 insertions(+), 2 deletions(-)
+
+--- a/net/ax25/af_ax25.c
++++ b/net/ax25/af_ax25.c
+@@ -653,15 +653,22 @@ static int ax25_setsockopt(struct socket
+ break;
+ }
+
+- dev = dev_get_by_name(&init_net, devname);
++ rtnl_lock();
++ dev = __dev_get_by_name(&init_net, devname);
+ if (!dev) {
++ rtnl_unlock();
+ res = -ENODEV;
+ break;
+ }
+
+ ax25->ax25_dev = ax25_dev_ax25dev(dev);
++ if (!ax25->ax25_dev) {
++ rtnl_unlock();
++ res = -ENODEV;
++ break;
++ }
+ ax25_fillin_cb(ax25, ax25->ax25_dev);
+- dev_put(dev);
++ rtnl_unlock();
+ break;
+
+ default:
+--- a/net/ax25/ax25_dev.c
++++ b/net/ax25/ax25_dev.c
+@@ -116,6 +116,7 @@ void ax25_dev_device_down(struct net_dev
+ if ((s = ax25_dev_list) == ax25_dev) {
+ ax25_dev_list = s->next;
+ spin_unlock_bh(&ax25_dev_lock);
++ dev->ax25_ptr = NULL;
+ dev_put(dev);
+ kfree(ax25_dev);
+ return;
+@@ -125,6 +126,7 @@ void ax25_dev_device_down(struct net_dev
+ if (s->next == ax25_dev) {
+ s->next = ax25_dev->next;
+ spin_unlock_bh(&ax25_dev_lock);
++ dev->ax25_ptr = NULL;
+ dev_put(dev);
+ kfree(ax25_dev);
+ return;
--- /dev/null
+From foo@baz Fri Jan 4 19:32:42 CET 2019
+From: Lorenzo Bianconi <lorenzo.bianconi@redhat.com>
+Date: Wed, 19 Dec 2018 23:23:00 +0100
+Subject: gro_cell: add napi_disable in gro_cells_destroy
+
+From: Lorenzo Bianconi <lorenzo.bianconi@redhat.com>
+
+[ Upstream commit 8e1da73acded4751a93d4166458a7e640f37d26c ]
+
+Add napi_disable routine in gro_cells_destroy since starting from
+commit c42858eaf492 ("gro_cells: remove spinlock protecting receive
+queues") gro_cell_poll and gro_cells_destroy can run concurrently on
+napi_skbs list producing a kernel Oops if the tunnel interface is
+removed while gro_cell_poll is running. The following Oops has been
+triggered removing a vxlan device while the interface is receiving
+traffic
+
+[ 5628.948853] BUG: unable to handle kernel NULL pointer dereference at 0000000000000008
+[ 5628.949981] PGD 0 P4D 0
+[ 5628.950308] Oops: 0002 [#1] SMP PTI
+[ 5628.950748] CPU: 0 PID: 9 Comm: ksoftirqd/0 Not tainted 4.20.0-rc6+ #41
+[ 5628.952940] RIP: 0010:gro_cell_poll+0x49/0x80
+[ 5628.955615] RSP: 0018:ffffc9000004fdd8 EFLAGS: 00010202
+[ 5628.956250] RAX: 0000000000000000 RBX: ffffe8ffffc08150 RCX: 0000000000000000
+[ 5628.957102] RDX: 0000000000000000 RSI: ffff88802356bf00 RDI: ffffe8ffffc08150
+[ 5628.957940] RBP: 0000000000000026 R08: 0000000000000000 R09: 0000000000000000
+[ 5628.958803] R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000040
+[ 5628.959661] R13: ffffe8ffffc08100 R14: 0000000000000000 R15: 0000000000000040
+[ 5628.960682] FS: 0000000000000000(0000) GS:ffff88803ea00000(0000) knlGS:0000000000000000
+[ 5628.961616] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ 5628.962359] CR2: 0000000000000008 CR3: 000000000221c000 CR4: 00000000000006b0
+[ 5628.963188] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+[ 5628.964034] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+[ 5628.964871] Call Trace:
+[ 5628.965179] net_rx_action+0xf0/0x380
+[ 5628.965637] __do_softirq+0xc7/0x431
+[ 5628.966510] run_ksoftirqd+0x24/0x30
+[ 5628.966957] smpboot_thread_fn+0xc5/0x160
+[ 5628.967436] kthread+0x113/0x130
+[ 5628.968283] ret_from_fork+0x3a/0x50
+[ 5628.968721] Modules linked in:
+[ 5628.969099] CR2: 0000000000000008
+[ 5628.969510] ---[ end trace 9d9dedc7181661fe ]---
+[ 5628.970073] RIP: 0010:gro_cell_poll+0x49/0x80
+[ 5628.972965] RSP: 0018:ffffc9000004fdd8 EFLAGS: 00010202
+[ 5628.973611] RAX: 0000000000000000 RBX: ffffe8ffffc08150 RCX: 0000000000000000
+[ 5628.974504] RDX: 0000000000000000 RSI: ffff88802356bf00 RDI: ffffe8ffffc08150
+[ 5628.975462] RBP: 0000000000000026 R08: 0000000000000000 R09: 0000000000000000
+[ 5628.976413] R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000040
+[ 5628.977375] R13: ffffe8ffffc08100 R14: 0000000000000000 R15: 0000000000000040
+[ 5628.978296] FS: 0000000000000000(0000) GS:ffff88803ea00000(0000) knlGS:0000000000000000
+[ 5628.979327] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ 5628.980044] CR2: 0000000000000008 CR3: 000000000221c000 CR4: 00000000000006b0
+[ 5628.980929] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+[ 5628.981736] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+[ 5628.982409] Kernel panic - not syncing: Fatal exception in interrupt
+[ 5628.983307] Kernel Offset: disabled
+
+Fixes: c42858eaf492 ("gro_cells: remove spinlock protecting receive queues")
+Signed-off-by: Lorenzo Bianconi <lorenzo.bianconi@redhat.com>
+Acked-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/gro_cells.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/net/core/gro_cells.c
++++ b/net/core/gro_cells.c
+@@ -84,6 +84,7 @@ void gro_cells_destroy(struct gro_cells
+ for_each_possible_cpu(i) {
+ struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
+
++ napi_disable(&cell->napi);
+ netif_napi_del(&cell->napi);
+ __skb_queue_purge(&cell->napi_skbs);
+ }
--- /dev/null
+From foo@baz Fri Jan 4 19:32:42 CET 2019
+From: Tyrel Datwyler <tyreld@linux.vnet.ibm.com>
+Date: Mon, 31 Dec 2018 15:43:01 -0600
+Subject: ibmveth: fix DMA unmap error in ibmveth_xmit_start error path
+
+From: Tyrel Datwyler <tyreld@linux.vnet.ibm.com>
+
+[ Upstream commit 756af9c642329d54f048bac2a62f829b391f6944 ]
+
+Commit 33a48ab105a7 ("ibmveth: Fix DMA unmap error") fixed an issue in the
+normal code path of ibmveth_xmit_start() that was originally introduced by
+Commit 6e8ab30ec677 ("ibmveth: Add scatter-gather support"). This original
+fix missed the error path where dma_unmap_page is wrongly called on the
+header portion in descs[0] which was mapped with dma_map_single. As a
+result a failure to DMA map any of the frags results in a dmesg warning
+when CONFIG_DMA_API_DEBUG is enabled.
+
+------------[ cut here ]------------
+DMA-API: ibmveth 30000002: device driver frees DMA memory with wrong function
+ [device address=0x000000000a430000] [size=172 bytes] [mapped as page] [unmapped as single]
+WARNING: CPU: 1 PID: 8426 at kernel/dma/debug.c:1085 check_unmap+0x4fc/0xe10
+...
+<snip>
+...
+DMA-API: Mapped at:
+ibmveth_start_xmit+0x30c/0xb60
+dev_hard_start_xmit+0x100/0x450
+sch_direct_xmit+0x224/0x490
+__qdisc_run+0x20c/0x980
+__dev_queue_xmit+0x1bc/0xf20
+
+This fixes the API misuse by unampping descs[0] with dma_unmap_single.
+
+Fixes: 6e8ab30ec677 ("ibmveth: Add scatter-gather support")
+Signed-off-by: Tyrel Datwyler <tyreld@linux.vnet.ibm.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/ibm/ibmveth.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/ibm/ibmveth.c
++++ b/drivers/net/ethernet/ibm/ibmveth.c
+@@ -1172,11 +1172,15 @@ out:
+
+ map_failed_frags:
+ last = i+1;
+- for (i = 0; i < last; i++)
++ for (i = 1; i < last; i++)
+ dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
+ descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
+ DMA_TO_DEVICE);
+
++ dma_unmap_single(&adapter->vdev->dev,
++ descs[0].fields.address,
++ descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
++ DMA_TO_DEVICE);
+ map_failed:
+ if (!firmware_has_feature(FW_FEATURE_CMO))
+ netdev_err(netdev, "tx: unable to map xmit buffer\n");
--- /dev/null
+From foo@baz Fri Jan 4 19:32:42 CET 2019
+From: Willem de Bruijn <willemb@google.com>
+Date: Sun, 23 Dec 2018 12:52:18 -0500
+Subject: ieee802154: lowpan_header_create check must check daddr
+
+From: Willem de Bruijn <willemb@google.com>
+
+[ Upstream commit 40c3ff6d5e0809505a067dd423c110c5658c478c ]
+
+Packet sockets may call dev_header_parse with NULL daddr. Make
+lowpan_header_ops.create fail.
+
+Fixes: 87a93e4eceb4 ("ieee802154: change needed headroom/tailroom")
+Signed-off-by: Willem de Bruijn <willemb@google.com>
+Acked-by: Alexander Aring <aring@mojatatu.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ieee802154/6lowpan/tx.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/net/ieee802154/6lowpan/tx.c
++++ b/net/ieee802154/6lowpan/tx.c
+@@ -48,6 +48,9 @@ int lowpan_header_create(struct sk_buff
+ const struct ipv6hdr *hdr = ipv6_hdr(skb);
+ struct neighbour *n;
+
++ if (!daddr)
++ return -EINVAL;
++
+ /* TODO:
+ * if this package isn't ipv6 one, where should it be routed?
+ */
--- /dev/null
+From foo@baz Fri Jan 4 19:32:42 CET 2019
+From: Willem de Bruijn <willemb@google.com>
+Date: Sun, 30 Dec 2018 17:24:36 -0500
+Subject: ip: validate header length on virtual device xmit
+
+From: Willem de Bruijn <willemb@google.com>
+
+[ Upstream commit cb9f1b783850b14cbd7f87d061d784a666dfba1f ]
+
+KMSAN detected read beyond end of buffer in vti and sit devices when
+passing truncated packets with PF_PACKET. The issue affects additional
+ip tunnel devices.
+
+Extend commit 76c0ddd8c3a6 ("ip6_tunnel: be careful when accessing the
+inner header") and commit ccfec9e5cb2d ("ip_tunnel: be careful when
+accessing the inner header").
+
+Move the check to a separate helper and call at the start of each
+ndo_start_xmit function in net/ipv4 and net/ipv6.
+
+Minor changes:
+- convert dev_kfree_skb to kfree_skb on error path,
+ as dev_kfree_skb calls consume_skb which is not for error paths.
+- use pskb_network_may_pull even though that is pedantic here,
+ as the same as pskb_may_pull for devices without llheaders.
+- do not cache ipv6 hdrs if used only once
+ (unsafe across pskb_may_pull, was more relevant to earlier patch)
+
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Willem de Bruijn <willemb@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/ip_tunnels.h | 20 ++++++++++++++++++++
+ net/ipv4/ip_gre.c | 9 +++++++++
+ net/ipv4/ip_tunnel.c | 9 ---------
+ net/ipv4/ip_vti.c | 12 +++++++++---
+ net/ipv6/ip6_gre.c | 10 +++++++---
+ net/ipv6/ip6_tunnel.c | 10 +++-------
+ net/ipv6/ip6_vti.c | 8 ++++----
+ net/ipv6/ip6mr.c | 17 +++++++++++------
+ net/ipv6/sit.c | 3 +++
+ 9 files changed, 66 insertions(+), 32 deletions(-)
+
+--- a/include/net/ip_tunnels.h
++++ b/include/net/ip_tunnels.h
+@@ -326,6 +326,26 @@ int ip_tunnel_encap_del_ops(const struct
+ int ip_tunnel_encap_setup(struct ip_tunnel *t,
+ struct ip_tunnel_encap *ipencap);
+
++static inline bool pskb_inet_may_pull(struct sk_buff *skb)
++{
++ int nhlen;
++
++ switch (skb->protocol) {
++#if IS_ENABLED(CONFIG_IPV6)
++ case htons(ETH_P_IPV6):
++ nhlen = sizeof(struct ipv6hdr);
++ break;
++#endif
++ case htons(ETH_P_IP):
++ nhlen = sizeof(struct iphdr);
++ break;
++ default:
++ nhlen = 0;
++ }
++
++ return pskb_network_may_pull(skb, nhlen);
++}
++
+ static inline int ip_encap_hlen(struct ip_tunnel_encap *e)
+ {
+ const struct ip_tunnel_encap_ops *ops;
+--- a/net/ipv4/ip_gre.c
++++ b/net/ipv4/ip_gre.c
+@@ -677,6 +677,9 @@ static netdev_tx_t ipgre_xmit(struct sk_
+ struct ip_tunnel *tunnel = netdev_priv(dev);
+ const struct iphdr *tnl_params;
+
++ if (!pskb_inet_may_pull(skb))
++ goto free_skb;
++
+ if (tunnel->collect_md) {
+ gre_fb_xmit(skb, dev, skb->protocol);
+ return NETDEV_TX_OK;
+@@ -720,6 +723,9 @@ static netdev_tx_t erspan_xmit(struct sk
+ struct ip_tunnel *tunnel = netdev_priv(dev);
+ bool truncate = false;
+
++ if (!pskb_inet_may_pull(skb))
++ goto free_skb;
++
+ if (tunnel->collect_md) {
+ erspan_fb_xmit(skb, dev, skb->protocol);
+ return NETDEV_TX_OK;
+@@ -763,6 +769,9 @@ static netdev_tx_t gre_tap_xmit(struct s
+ {
+ struct ip_tunnel *tunnel = netdev_priv(dev);
+
++ if (!pskb_inet_may_pull(skb))
++ goto free_skb;
++
+ if (tunnel->collect_md) {
+ gre_fb_xmit(skb, dev, htons(ETH_P_TEB));
+ return NETDEV_TX_OK;
+--- a/net/ipv4/ip_tunnel.c
++++ b/net/ipv4/ip_tunnel.c
+@@ -627,7 +627,6 @@ void ip_tunnel_xmit(struct sk_buff *skb,
+ const struct iphdr *tnl_params, u8 protocol)
+ {
+ struct ip_tunnel *tunnel = netdev_priv(dev);
+- unsigned int inner_nhdr_len = 0;
+ const struct iphdr *inner_iph;
+ struct flowi4 fl4;
+ u8 tos, ttl;
+@@ -637,14 +636,6 @@ void ip_tunnel_xmit(struct sk_buff *skb,
+ __be32 dst;
+ bool connected;
+
+- /* ensure we can access the inner net header, for several users below */
+- if (skb->protocol == htons(ETH_P_IP))
+- inner_nhdr_len = sizeof(struct iphdr);
+- else if (skb->protocol == htons(ETH_P_IPV6))
+- inner_nhdr_len = sizeof(struct ipv6hdr);
+- if (unlikely(!pskb_may_pull(skb, inner_nhdr_len)))
+- goto tx_error;
+-
+ inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
+ connected = (tunnel->parms.iph.daddr != 0);
+
+--- a/net/ipv4/ip_vti.c
++++ b/net/ipv4/ip_vti.c
+@@ -241,6 +241,9 @@ static netdev_tx_t vti_tunnel_xmit(struc
+ struct ip_tunnel *tunnel = netdev_priv(dev);
+ struct flowi fl;
+
++ if (!pskb_inet_may_pull(skb))
++ goto tx_err;
++
+ memset(&fl, 0, sizeof(fl));
+
+ switch (skb->protocol) {
+@@ -253,15 +256,18 @@ static netdev_tx_t vti_tunnel_xmit(struc
+ memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
+ break;
+ default:
+- dev->stats.tx_errors++;
+- dev_kfree_skb(skb);
+- return NETDEV_TX_OK;
++ goto tx_err;
+ }
+
+ /* override mark with tunnel output key */
+ fl.flowi_mark = be32_to_cpu(tunnel->parms.o_key);
+
+ return vti_xmit(skb, dev, &fl);
++
++tx_err:
++ dev->stats.tx_errors++;
++ kfree_skb(skb);
++ return NETDEV_TX_OK;
+ }
+
+ static int vti4_err(struct sk_buff *skb, u32 info)
+--- a/net/ipv6/ip6_gre.c
++++ b/net/ipv6/ip6_gre.c
+@@ -897,6 +897,9 @@ static netdev_tx_t ip6gre_tunnel_xmit(st
+ struct net_device_stats *stats = &t->dev->stats;
+ int ret;
+
++ if (!pskb_inet_may_pull(skb))
++ goto tx_err;
++
+ if (!ip6_tnl_xmit_ctl(t, &t->parms.laddr, &t->parms.raddr))
+ goto tx_err;
+
+@@ -939,6 +942,9 @@ static netdev_tx_t ip6erspan_tunnel_xmit
+ int nhoff;
+ int thoff;
+
++ if (!pskb_inet_may_pull(skb))
++ goto tx_err;
++
+ if (!ip6_tnl_xmit_ctl(t, &t->parms.laddr, &t->parms.raddr))
+ goto tx_err;
+
+@@ -1011,8 +1017,6 @@ static netdev_tx_t ip6erspan_tunnel_xmit
+ goto tx_err;
+ }
+ } else {
+- struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+-
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
+@@ -1020,7 +1024,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit
+ &dsfield, &encap_limit);
+ break;
+ case htons(ETH_P_IPV6):
+- if (ipv6_addr_equal(&t->parms.raddr, &ipv6h->saddr))
++ if (ipv6_addr_equal(&t->parms.raddr, &ipv6_hdr(skb)->saddr))
+ goto tx_err;
+ if (prepare_ip6gre_xmit_ipv6(skb, dev, &fl6,
+ &dsfield, &encap_limit))
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -1243,10 +1243,6 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, str
+ u8 tproto;
+ int err;
+
+- /* ensure we can access the full inner ip header */
+- if (!pskb_may_pull(skb, sizeof(struct iphdr)))
+- return -1;
+-
+ iph = ip_hdr(skb);
+ memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
+
+@@ -1321,9 +1317,6 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, str
+ u8 tproto;
+ int err;
+
+- if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
+- return -1;
+-
+ ipv6h = ipv6_hdr(skb);
+ tproto = READ_ONCE(t->parms.proto);
+ if ((tproto != IPPROTO_IPV6 && tproto != 0) ||
+@@ -1405,6 +1398,9 @@ ip6_tnl_start_xmit(struct sk_buff *skb,
+ struct net_device_stats *stats = &t->dev->stats;
+ int ret;
+
++ if (!pskb_inet_may_pull(skb))
++ goto tx_err;
++
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ ret = ip4ip6_tnl_xmit(skb, dev);
+--- a/net/ipv6/ip6_vti.c
++++ b/net/ipv6/ip6_vti.c
+@@ -522,18 +522,18 @@ vti6_tnl_xmit(struct sk_buff *skb, struc
+ {
+ struct ip6_tnl *t = netdev_priv(dev);
+ struct net_device_stats *stats = &t->dev->stats;
+- struct ipv6hdr *ipv6h;
+ struct flowi fl;
+ int ret;
+
++ if (!pskb_inet_may_pull(skb))
++ goto tx_err;
++
+ memset(&fl, 0, sizeof(fl));
+
+ switch (skb->protocol) {
+ case htons(ETH_P_IPV6):
+- ipv6h = ipv6_hdr(skb);
+-
+ if ((t->parms.proto != IPPROTO_IPV6 && t->parms.proto != 0) ||
+- vti6_addr_conflict(t, ipv6h))
++ vti6_addr_conflict(t, ipv6_hdr(skb)))
+ goto tx_err;
+
+ xfrm_decode_session(skb, &fl, AF_INET6);
+--- a/net/ipv6/ip6mr.c
++++ b/net/ipv6/ip6mr.c
+@@ -51,6 +51,7 @@
+ #include <linux/export.h>
+ #include <net/ip6_checksum.h>
+ #include <linux/netconf.h>
++#include <net/ip_tunnels.h>
+
+ #include <linux/nospec.h>
+
+@@ -593,13 +594,12 @@ static netdev_tx_t reg_vif_xmit(struct s
+ .flowi6_iif = skb->skb_iif ? : LOOPBACK_IFINDEX,
+ .flowi6_mark = skb->mark,
+ };
+- int err;
+
+- err = ip6mr_fib_lookup(net, &fl6, &mrt);
+- if (err < 0) {
+- kfree_skb(skb);
+- return err;
+- }
++ if (!pskb_inet_may_pull(skb))
++ goto tx_err;
++
++ if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
++ goto tx_err;
+
+ read_lock(&mrt_lock);
+ dev->stats.tx_bytes += skb->len;
+@@ -608,6 +608,11 @@ static netdev_tx_t reg_vif_xmit(struct s
+ read_unlock(&mrt_lock);
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
++
++tx_err:
++ dev->stats.tx_errors++;
++ kfree_skb(skb);
++ return NETDEV_TX_OK;
+ }
+
+ static int reg_vif_get_iflink(const struct net_device *dev)
+--- a/net/ipv6/sit.c
++++ b/net/ipv6/sit.c
+@@ -1021,6 +1021,9 @@ tx_error:
+ static netdev_tx_t sit_tunnel_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+ {
++ if (!pskb_inet_may_pull(skb))
++ goto tx_err;
++
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ sit_tunnel_xmit__(skb, dev, IPPROTO_IPIP);
--- /dev/null
+From foo@baz Fri Jan 4 19:32:42 CET 2019
+From: "Gustavo A. R. Silva" <gustavo@embeddedor.com>
+Date: Tue, 11 Dec 2018 14:10:08 -0600
+Subject: ip6mr: Fix potential Spectre v1 vulnerability
+
+From: "Gustavo A. R. Silva" <gustavo@embeddedor.com>
+
+[ Upstream commit 69d2c86766da2ded2b70281f1bf242cb0d58a778 ]
+
+vr.mifi is indirectly controlled by user-space, hence leading to
+a potential exploitation of the Spectre variant 1 vulnerability.
+
+This issue was detected with the help of Smatch:
+
+net/ipv6/ip6mr.c:1845 ip6mr_ioctl() warn: potential spectre issue 'mrt->vif_table' [r] (local cap)
+net/ipv6/ip6mr.c:1919 ip6mr_compat_ioctl() warn: potential spectre issue 'mrt->vif_table' [r] (local cap)
+
+Fix this by sanitizing vr.mifi before using it to index mrt->vif_table'
+
+Notice that given that speculation windows are large, the policy is
+to kill the speculation on the first load and not worry if it can be
+completed with a dependent load/store [1].
+
+[1] https://marc.info/?l=linux-kernel&m=152449131114778&w=2
+
+Signed-off-by: Gustavo A. R. Silva <gustavo@embeddedor.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/ip6mr.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/net/ipv6/ip6mr.c
++++ b/net/ipv6/ip6mr.c
+@@ -52,6 +52,8 @@
+ #include <net/ip6_checksum.h>
+ #include <linux/netconf.h>
+
++#include <linux/nospec.h>
++
+ struct ip6mr_rule {
+ struct fib_rule common;
+ };
+@@ -1831,6 +1833,7 @@ int ip6mr_ioctl(struct sock *sk, int cmd
+ return -EFAULT;
+ if (vr.mifi >= mrt->maxvif)
+ return -EINVAL;
++ vr.mifi = array_index_nospec(vr.mifi, mrt->maxvif);
+ read_lock(&mrt_lock);
+ vif = &mrt->vif_table[vr.mifi];
+ if (VIF_EXISTS(mrt, vr.mifi)) {
+@@ -1905,6 +1908,7 @@ int ip6mr_compat_ioctl(struct sock *sk,
+ return -EFAULT;
+ if (vr.mifi >= mrt->maxvif)
+ return -EINVAL;
++ vr.mifi = array_index_nospec(vr.mifi, mrt->maxvif);
+ read_lock(&mrt_lock);
+ vif = &mrt->vif_table[vr.mifi];
+ if (VIF_EXISTS(mrt, vr.mifi)) {
--- /dev/null
+From foo@baz Fri Jan 4 19:32:42 CET 2019
+From: "Gustavo A. R. Silva" <gustavo@embeddedor.com>
+Date: Mon, 10 Dec 2018 12:41:24 -0600
+Subject: ipv4: Fix potential Spectre v1 vulnerability
+
+From: "Gustavo A. R. Silva" <gustavo@embeddedor.com>
+
+[ Upstream commit 5648451e30a0d13d11796574919a359025d52cce ]
+
+vr.vifi is indirectly controlled by user-space, hence leading to
+a potential exploitation of the Spectre variant 1 vulnerability.
+
+This issue was detected with the help of Smatch:
+
+net/ipv4/ipmr.c:1616 ipmr_ioctl() warn: potential spectre issue 'mrt->vif_table' [r] (local cap)
+net/ipv4/ipmr.c:1690 ipmr_compat_ioctl() warn: potential spectre issue 'mrt->vif_table' [r] (local cap)
+
+Fix this by sanitizing vr.vifi before using it to index mrt->vif_table'
+
+Notice that given that speculation windows are large, the policy is
+to kill the speculation on the first load and not worry if it can be
+completed with a dependent load/store [1].
+
+[1] https://marc.info/?l=linux-kernel&m=152449131114778&w=2
+
+Signed-off-by: Gustavo A. R. Silva <gustavo@embeddedor.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/ipmr.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/net/ipv4/ipmr.c
++++ b/net/ipv4/ipmr.c
+@@ -69,6 +69,8 @@
+ #include <net/nexthop.h>
+ #include <net/switchdev.h>
+
++#include <linux/nospec.h>
++
+ struct ipmr_rule {
+ struct fib_rule common;
+ };
+@@ -1612,6 +1614,7 @@ int ipmr_ioctl(struct sock *sk, int cmd,
+ return -EFAULT;
+ if (vr.vifi >= mrt->maxvif)
+ return -EINVAL;
++ vr.vifi = array_index_nospec(vr.vifi, mrt->maxvif);
+ read_lock(&mrt_lock);
+ vif = &mrt->vif_table[vr.vifi];
+ if (VIF_EXISTS(mrt, vr.vifi)) {
+@@ -1686,6 +1689,7 @@ int ipmr_compat_ioctl(struct sock *sk, u
+ return -EFAULT;
+ if (vr.vifi >= mrt->maxvif)
+ return -EINVAL;
++ vr.vifi = array_index_nospec(vr.vifi, mrt->maxvif);
+ read_lock(&mrt_lock);
+ vif = &mrt->vif_table[vr.vifi];
+ if (VIF_EXISTS(mrt, vr.vifi)) {
--- /dev/null
+From foo@baz Fri Jan 4 19:32:42 CET 2019
+From: Cong Wang <xiyou.wangcong@gmail.com>
+Date: Tue, 18 Dec 2018 21:17:44 -0800
+Subject: ipv6: explicitly initialize udp6_addr in udp_sock_create6()
+
+From: Cong Wang <xiyou.wangcong@gmail.com>
+
+[ Upstream commit fb24274546310872eeeaf3d1d53799d8414aa0f2 ]
+
+syzbot reported the use of uninitialized udp6_addr::sin6_scope_id.
+We can just set ::sin6_scope_id to zero, as tunnels are unlikely
+to use an IPv6 address that needs a scope id and there is no
+interface to bind in this context.
+
+For net-next, it looks different as we have cfg->bind_ifindex there
+so we can probably call ipv6_iface_scope_id().
+
+Same for ::sin6_flowinfo, tunnels don't use it.
+
+Fixes: 8024e02879dd ("udp: Add udp_sock_create for UDP tunnels to open listener socket")
+Reported-by: syzbot+c56449ed3652e6720f30@syzkaller.appspotmail.com
+Cc: Jon Maloy <jon.maloy@ericsson.com>
+Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/ip6_udp_tunnel.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/net/ipv6/ip6_udp_tunnel.c
++++ b/net/ipv6/ip6_udp_tunnel.c
+@@ -15,7 +15,7 @@
+ int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg,
+ struct socket **sockp)
+ {
+- struct sockaddr_in6 udp6_addr;
++ struct sockaddr_in6 udp6_addr = {};
+ int err;
+ struct socket *sock = NULL;
+
+@@ -42,6 +42,7 @@ int udp_sock_create6(struct net *net, st
+ goto error;
+
+ if (cfg->peer_udp_port) {
++ memset(&udp6_addr, 0, sizeof(udp6_addr));
+ udp6_addr.sin6_family = AF_INET6;
+ memcpy(&udp6_addr.sin6_addr, &cfg->peer_ip6,
+ sizeof(udp6_addr.sin6_addr));
--- /dev/null
+From foo@baz Fri Jan 4 19:32:42 CET 2019
+From: Herbert Xu <herbert@gondor.apana.org.au>
+Date: Thu, 20 Dec 2018 21:20:10 +0800
+Subject: ipv6: frags: Fix bogus skb->sk in reassembled packets
+
+From: Herbert Xu <herbert@gondor.apana.org.au>
+
+[ Upstream commit d15f5ac8deea936d3adf629421a66a88b42b8a2f ]
+
+It was reported that IPsec would crash when it encounters an IPv6
+reassembled packet because skb->sk is non-zero and not a valid
+pointer.
+
+This is because skb->sk is now a union with ip_defrag_offset.
+
+This patch fixes this by resetting skb->sk when exiting from
+the reassembly code.
+
+Reported-by: Xiumei Mu <xmu@redhat.com>
+Fixes: 219badfaade9 ("ipv6: frags: get rid of ip6frag_skb_cb/...")
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/reassembly.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/net/ipv6/reassembly.c
++++ b/net/ipv6/reassembly.c
+@@ -384,6 +384,7 @@ static int ip6_frag_reasm(struct frag_qu
+ if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
+ kfree_skb_partial(fp, headstolen);
+ } else {
++ fp->sk = NULL;
+ if (!skb_shinfo(head)->frag_list)
+ skb_shinfo(head)->frag_list = fp;
+ head->data_len += fp->len;
--- /dev/null
+From foo@baz Fri Jan 4 19:32:42 CET 2019
+From: Stefano Brivio <sbrivio@redhat.com>
+Date: Wed, 2 Jan 2019 13:29:27 +0100
+Subject: ipv6: route: Fix return value of ip6_neigh_lookup() on neigh_create() error
+
+From: Stefano Brivio <sbrivio@redhat.com>
+
+[ Upstream commit 7adf3246092f5e87ed0fa610e8088fae416c581f ]
+
+In ip6_neigh_lookup(), we must not return errors coming from
+neigh_create(): if creation of a neighbour entry fails, the lookup should
+return NULL, in the same way as it's done in __neigh_lookup().
+
+Otherwise, callers legitimately checking for a non-NULL return value of
+the lookup function might dereference an invalid pointer.
+
+For instance, on neighbour table overflow, ndisc_router_discovery()
+crashes ndisc_update() by passing ERR_PTR(-ENOBUFS) as 'neigh' argument.
+
+Reported-by: Jianlin Shi <jishi@redhat.com>
+Fixes: f8a1b43b709d ("net/ipv6: Create a neigh_lookup for FIB entries")
+Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
+Reviewed-by: David Ahern <dsahern@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/route.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -210,7 +210,9 @@ struct neighbour *ip6_neigh_lookup(const
+ n = __ipv6_neigh_lookup(dev, daddr);
+ if (n)
+ return n;
+- return neigh_create(&nd_tbl, daddr, dev);
++
++ n = neigh_create(&nd_tbl, daddr, dev);
++ return IS_ERR(n) ? NULL : n;
+ }
+
+ static struct neighbour *ip6_dst_neigh_lookup(const struct dst_entry *dst,
--- /dev/null
+From foo@baz Fri Jan 4 19:32:42 CET 2019
+From: Eric Dumazet <edumazet@google.com>
+Date: Fri, 21 Dec 2018 07:47:51 -0800
+Subject: ipv6: tunnels: fix two use-after-free
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit cbb49697d5512ce9e61b45ce75d3ee43d7ea5524 ]
+
+xfrm6_policy_check() might have re-allocated skb->head, we need
+to reload ipv6 header pointer.
+
+sysbot reported :
+
+BUG: KASAN: use-after-free in __ipv6_addr_type+0x302/0x32f net/ipv6/addrconf_core.c:40
+Read of size 4 at addr ffff888191b8cb70 by task syz-executor2/1304
+
+CPU: 0 PID: 1304 Comm: syz-executor2 Not tainted 4.20.0-rc7+ #356
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
+Call Trace:
+ <IRQ>
+ __dump_stack lib/dump_stack.c:77 [inline]
+ dump_stack+0x244/0x39d lib/dump_stack.c:113
+ print_address_description.cold.7+0x9/0x1ff mm/kasan/report.c:256
+ kasan_report_error mm/kasan/report.c:354 [inline]
+ kasan_report.cold.8+0x242/0x309 mm/kasan/report.c:412
+ __asan_report_load4_noabort+0x14/0x20 mm/kasan/report.c:432
+ __ipv6_addr_type+0x302/0x32f net/ipv6/addrconf_core.c:40
+ ipv6_addr_type include/net/ipv6.h:403 [inline]
+ ip6_tnl_get_cap+0x27/0x190 net/ipv6/ip6_tunnel.c:727
+ ip6_tnl_rcv_ctl+0xdb/0x2a0 net/ipv6/ip6_tunnel.c:757
+ vti6_rcv+0x336/0x8f3 net/ipv6/ip6_vti.c:321
+ xfrm6_ipcomp_rcv+0x1a5/0x3a0 net/ipv6/xfrm6_protocol.c:132
+ ip6_protocol_deliver_rcu+0x372/0x1940 net/ipv6/ip6_input.c:394
+ ip6_input_finish+0x84/0x170 net/ipv6/ip6_input.c:434
+ NF_HOOK include/linux/netfilter.h:289 [inline]
+ ip6_input+0xe9/0x600 net/ipv6/ip6_input.c:443
+IPVS: ftp: loaded support on port[0] = 21
+ ip6_mc_input+0x514/0x11c0 net/ipv6/ip6_input.c:537
+ dst_input include/net/dst.h:450 [inline]
+ ip6_rcv_finish+0x17a/0x330 net/ipv6/ip6_input.c:76
+ NF_HOOK include/linux/netfilter.h:289 [inline]
+ ipv6_rcv+0x115/0x640 net/ipv6/ip6_input.c:272
+ __netif_receive_skb_one_core+0x14d/0x200 net/core/dev.c:4973
+ __netif_receive_skb+0x2c/0x1e0 net/core/dev.c:5083
+ process_backlog+0x24e/0x7a0 net/core/dev.c:5923
+ napi_poll net/core/dev.c:6346 [inline]
+ net_rx_action+0x7fa/0x19b0 net/core/dev.c:6412
+ __do_softirq+0x308/0xb7e kernel/softirq.c:292
+ do_softirq_own_stack+0x2a/0x40 arch/x86/entry/entry_64.S:1027
+ </IRQ>
+ do_softirq.part.14+0x126/0x160 kernel/softirq.c:337
+ do_softirq+0x19/0x20 kernel/softirq.c:340
+ netif_rx_ni+0x521/0x860 net/core/dev.c:4569
+ dev_loopback_xmit+0x287/0x8c0 net/core/dev.c:3576
+ NF_HOOK include/linux/netfilter.h:289 [inline]
+ ip6_finish_output2+0x193a/0x2930 net/ipv6/ip6_output.c:84
+ ip6_fragment+0x2b06/0x3850 net/ipv6/ip6_output.c:727
+ ip6_finish_output+0x6b7/0xc50 net/ipv6/ip6_output.c:152
+ NF_HOOK_COND include/linux/netfilter.h:278 [inline]
+ ip6_output+0x232/0x9d0 net/ipv6/ip6_output.c:171
+ dst_output include/net/dst.h:444 [inline]
+ ip6_local_out+0xc5/0x1b0 net/ipv6/output_core.c:176
+ ip6_send_skb+0xbc/0x340 net/ipv6/ip6_output.c:1727
+ ip6_push_pending_frames+0xc5/0xf0 net/ipv6/ip6_output.c:1747
+ rawv6_push_pending_frames net/ipv6/raw.c:615 [inline]
+ rawv6_sendmsg+0x3a3e/0x4b40 net/ipv6/raw.c:945
+kobject: 'queues' (0000000089e6eea2): kobject_add_internal: parent: 'tunl0', set: '<NULL>'
+kobject: 'queues' (0000000089e6eea2): kobject_uevent_env
+ inet_sendmsg+0x1a1/0x690 net/ipv4/af_inet.c:798
+kobject: 'queues' (0000000089e6eea2): kobject_uevent_env: filter function caused the event to drop!
+ sock_sendmsg_nosec net/socket.c:621 [inline]
+ sock_sendmsg+0xd5/0x120 net/socket.c:631
+ sock_write_iter+0x35e/0x5c0 net/socket.c:900
+ call_write_iter include/linux/fs.h:1857 [inline]
+ new_sync_write fs/read_write.c:474 [inline]
+ __vfs_write+0x6b8/0x9f0 fs/read_write.c:487
+kobject: 'rx-0' (00000000e2d902d9): kobject_add_internal: parent: 'queues', set: 'queues'
+kobject: 'rx-0' (00000000e2d902d9): kobject_uevent_env
+ vfs_write+0x1fc/0x560 fs/read_write.c:549
+ ksys_write+0x101/0x260 fs/read_write.c:598
+kobject: 'rx-0' (00000000e2d902d9): fill_kobj_path: path = '/devices/virtual/net/tunl0/queues/rx-0'
+ __do_sys_write fs/read_write.c:610 [inline]
+ __se_sys_write fs/read_write.c:607 [inline]
+ __x64_sys_write+0x73/0xb0 fs/read_write.c:607
+ do_syscall_64+0x1b9/0x820 arch/x86/entry/common.c:290
+kobject: 'tx-0' (00000000443b70ac): kobject_add_internal: parent: 'queues', set: 'queues'
+ entry_SYSCALL_64_after_hwframe+0x49/0xbe
+RIP: 0033:0x457669
+Code: fd b3 fb ff c3 66 2e 0f 1f 84 00 00 00 00 00 66 90 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 0f 83 cb b3 fb ff c3 66 2e 0f 1f 84 00 00 00 00
+RSP: 002b:00007f9bd200bc78 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
+RAX: ffffffffffffffda RBX: 0000000000000003 RCX: 0000000000457669
+RDX: 000000000000058f RSI: 00000000200033c0 RDI: 0000000000000003
+kobject: 'tx-0' (00000000443b70ac): kobject_uevent_env
+RBP: 000000000072bf00 R08: 0000000000000000 R09: 0000000000000000
+R10: 0000000000000000 R11: 0000000000000246 R12: 00007f9bd200c6d4
+R13: 00000000004c2dcc R14: 00000000004da398 R15: 00000000ffffffff
+
+Allocated by task 1304:
+ save_stack+0x43/0xd0 mm/kasan/kasan.c:448
+ set_track mm/kasan/kasan.c:460 [inline]
+ kasan_kmalloc+0xc7/0xe0 mm/kasan/kasan.c:553
+ __do_kmalloc_node mm/slab.c:3684 [inline]
+ __kmalloc_node_track_caller+0x50/0x70 mm/slab.c:3698
+ __kmalloc_reserve.isra.41+0x41/0xe0 net/core/skbuff.c:140
+ __alloc_skb+0x155/0x760 net/core/skbuff.c:208
+kobject: 'tx-0' (00000000443b70ac): fill_kobj_path: path = '/devices/virtual/net/tunl0/queues/tx-0'
+ alloc_skb include/linux/skbuff.h:1011 [inline]
+ __ip6_append_data.isra.49+0x2f1a/0x3f50 net/ipv6/ip6_output.c:1450
+ ip6_append_data+0x1bc/0x2d0 net/ipv6/ip6_output.c:1619
+ rawv6_sendmsg+0x15ab/0x4b40 net/ipv6/raw.c:938
+ inet_sendmsg+0x1a1/0x690 net/ipv4/af_inet.c:798
+ sock_sendmsg_nosec net/socket.c:621 [inline]
+ sock_sendmsg+0xd5/0x120 net/socket.c:631
+ ___sys_sendmsg+0x7fd/0x930 net/socket.c:2116
+ __sys_sendmsg+0x11d/0x280 net/socket.c:2154
+ __do_sys_sendmsg net/socket.c:2163 [inline]
+ __se_sys_sendmsg net/socket.c:2161 [inline]
+ __x64_sys_sendmsg+0x78/0xb0 net/socket.c:2161
+ do_syscall_64+0x1b9/0x820 arch/x86/entry/common.c:290
+ entry_SYSCALL_64_after_hwframe+0x49/0xbe
+kobject: 'gre0' (00000000cb1b2d7b): kobject_add_internal: parent: 'net', set: 'devices'
+
+Freed by task 1304:
+ save_stack+0x43/0xd0 mm/kasan/kasan.c:448
+ set_track mm/kasan/kasan.c:460 [inline]
+ __kasan_slab_free+0x102/0x150 mm/kasan/kasan.c:521
+ kasan_slab_free+0xe/0x10 mm/kasan/kasan.c:528
+ __cache_free mm/slab.c:3498 [inline]
+ kfree+0xcf/0x230 mm/slab.c:3817
+ skb_free_head+0x93/0xb0 net/core/skbuff.c:553
+ pskb_expand_head+0x3b2/0x10d0 net/core/skbuff.c:1498
+ __pskb_pull_tail+0x156/0x18a0 net/core/skbuff.c:1896
+ pskb_may_pull include/linux/skbuff.h:2188 [inline]
+ _decode_session6+0xd11/0x14d0 net/ipv6/xfrm6_policy.c:150
+ __xfrm_decode_session+0x71/0x140 net/xfrm/xfrm_policy.c:3272
+kobject: 'gre0' (00000000cb1b2d7b): kobject_uevent_env
+ __xfrm_policy_check+0x380/0x2c40 net/xfrm/xfrm_policy.c:3322
+ __xfrm_policy_check2 include/net/xfrm.h:1170 [inline]
+ xfrm_policy_check include/net/xfrm.h:1175 [inline]
+ xfrm6_policy_check include/net/xfrm.h:1185 [inline]
+ vti6_rcv+0x4bd/0x8f3 net/ipv6/ip6_vti.c:316
+ xfrm6_ipcomp_rcv+0x1a5/0x3a0 net/ipv6/xfrm6_protocol.c:132
+ ip6_protocol_deliver_rcu+0x372/0x1940 net/ipv6/ip6_input.c:394
+ ip6_input_finish+0x84/0x170 net/ipv6/ip6_input.c:434
+ NF_HOOK include/linux/netfilter.h:289 [inline]
+ ip6_input+0xe9/0x600 net/ipv6/ip6_input.c:443
+ ip6_mc_input+0x514/0x11c0 net/ipv6/ip6_input.c:537
+ dst_input include/net/dst.h:450 [inline]
+ ip6_rcv_finish+0x17a/0x330 net/ipv6/ip6_input.c:76
+ NF_HOOK include/linux/netfilter.h:289 [inline]
+ ipv6_rcv+0x115/0x640 net/ipv6/ip6_input.c:272
+ __netif_receive_skb_one_core+0x14d/0x200 net/core/dev.c:4973
+ __netif_receive_skb+0x2c/0x1e0 net/core/dev.c:5083
+ process_backlog+0x24e/0x7a0 net/core/dev.c:5923
+kobject: 'gre0' (00000000cb1b2d7b): fill_kobj_path: path = '/devices/virtual/net/gre0'
+ napi_poll net/core/dev.c:6346 [inline]
+ net_rx_action+0x7fa/0x19b0 net/core/dev.c:6412
+ __do_softirq+0x308/0xb7e kernel/softirq.c:292
+
+The buggy address belongs to the object at ffff888191b8cac0
+ which belongs to the cache kmalloc-512 of size 512
+The buggy address is located 176 bytes inside of
+ 512-byte region [ffff888191b8cac0, ffff888191b8ccc0)
+The buggy address belongs to the page:
+page:ffffea000646e300 count:1 mapcount:0 mapping:ffff8881da800940 index:0x0
+flags: 0x2fffc0000000200(slab)
+raw: 02fffc0000000200 ffffea0006eaaa48 ffffea00065356c8 ffff8881da800940
+raw: 0000000000000000 ffff888191b8c0c0 0000000100000006 0000000000000000
+page dumped because: kasan: bad access detected
+kobject: 'queues' (000000005fd6226e): kobject_add_internal: parent: 'gre0', set: '<NULL>'
+
+Memory state around the buggy address:
+ ffff888191b8ca00: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
+ ffff888191b8ca80: fc fc fc fc fc fc fc fc fb fb fb fb fb fb fb fb
+>ffff888191b8cb00: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+ ^
+ ffff888191b8cb80: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+ ffff888191b8cc00: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+
+Fixes: 0d3c703a9d17 ("ipv6: Cleanup IPv6 tunnel receive path")
+Fixes: ed1efb2aefbb ("ipv6: Add support for IPsec virtual tunnel interfaces")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Steffen Klassert <steffen.klassert@secunet.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/ip6_tunnel.c | 1 +
+ net/ipv6/ip6_vti.c | 1 +
+ 2 files changed, 2 insertions(+)
+
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -901,6 +901,7 @@ static int ipxip6_rcv(struct sk_buff *sk
+ goto drop;
+ if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
+ goto drop;
++ ipv6h = ipv6_hdr(skb);
+ if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr))
+ goto drop;
+ if (iptunnel_pull_header(skb, 0, tpi->proto, false))
+--- a/net/ipv6/ip6_vti.c
++++ b/net/ipv6/ip6_vti.c
+@@ -318,6 +318,7 @@ static int vti6_rcv(struct sk_buff *skb)
+ return 0;
+ }
+
++ ipv6h = ipv6_hdr(skb);
+ if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr)) {
+ t->dev->stats.rx_dropped++;
+ rcu_read_unlock();
--- /dev/null
+From foo@baz Fri Jan 4 19:32:42 CET 2019
+From: Eric Dumazet <edumazet@google.com>
+Date: Wed, 2 Jan 2019 09:20:27 -0800
+Subject: isdn: fix kernel-infoleak in capi_unlocked_ioctl
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit d63967e475ae10f286dbd35e189cb241e0b1f284 ]
+
+Since capi_ioctl() copies 64 bytes after calling
+capi20_get_manufacturer() we need to ensure to not leak
+information to user.
+
+BUG: KMSAN: kernel-infoleak in _copy_to_user+0x16b/0x1f0 lib/usercopy.c:32
+CPU: 0 PID: 11245 Comm: syz-executor633 Not tainted 4.20.0-rc7+ #2
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
+Call Trace:
+ __dump_stack lib/dump_stack.c:77 [inline]
+ dump_stack+0x173/0x1d0 lib/dump_stack.c:113
+ kmsan_report+0x12e/0x2a0 mm/kmsan/kmsan.c:613
+ kmsan_internal_check_memory+0x9d4/0xb00 mm/kmsan/kmsan.c:704
+ kmsan_copy_to_user+0xab/0xc0 mm/kmsan/kmsan_hooks.c:601
+ _copy_to_user+0x16b/0x1f0 lib/usercopy.c:32
+ capi_ioctl include/linux/uaccess.h:177 [inline]
+ capi_unlocked_ioctl+0x1a0b/0x1bf0 drivers/isdn/capi/capi.c:939
+ do_vfs_ioctl+0xebd/0x2bf0 fs/ioctl.c:46
+ ksys_ioctl fs/ioctl.c:713 [inline]
+ __do_sys_ioctl fs/ioctl.c:720 [inline]
+ __se_sys_ioctl+0x1da/0x270 fs/ioctl.c:718
+ __x64_sys_ioctl+0x4a/0x70 fs/ioctl.c:718
+ do_syscall_64+0xbc/0xf0 arch/x86/entry/common.c:291
+ entry_SYSCALL_64_after_hwframe+0x63/0xe7
+RIP: 0033:0x440019
+Code: 18 89 d0 c3 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 0f 83 fb 13 fc ff c3 66 2e 0f 1f 84 00 00 00 00
+RSP: 002b:00007ffdd4659fb8 EFLAGS: 00000213 ORIG_RAX: 0000000000000010
+RAX: ffffffffffffffda RBX: 00000000004002c8 RCX: 0000000000440019
+RDX: 0000000020000080 RSI: 00000000c0044306 RDI: 0000000000000003
+RBP: 00000000006ca018 R08: 0000000000000000 R09: 00000000004002c8
+R10: 0000000000000000 R11: 0000000000000213 R12: 00000000004018a0
+R13: 0000000000401930 R14: 0000000000000000 R15: 0000000000000000
+
+Local variable description: ----data.i@capi_unlocked_ioctl
+Variable was created at:
+ capi_ioctl drivers/isdn/capi/capi.c:747 [inline]
+ capi_unlocked_ioctl+0x82/0x1bf0 drivers/isdn/capi/capi.c:939
+ do_vfs_ioctl+0xebd/0x2bf0 fs/ioctl.c:46
+
+Bytes 12-63 of 64 are uninitialized
+Memory access of size 64 starts at ffff88807ac5fce8
+Data copied to user address 0000000020000080
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Cc: Karsten Keil <isdn@linux-pingi.de>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/isdn/capi/kcapi.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/isdn/capi/kcapi.c
++++ b/drivers/isdn/capi/kcapi.c
+@@ -852,7 +852,7 @@ u16 capi20_get_manufacturer(u32 contr, u
+ u16 ret;
+
+ if (contr == 0) {
+- strlcpy(buf, capi_manufakturer, CAPI_MANUFACTURER_LEN);
++ strncpy(buf, capi_manufakturer, CAPI_MANUFACTURER_LEN);
+ return CAPI_NOERROR;
+ }
+
+@@ -860,7 +860,7 @@ u16 capi20_get_manufacturer(u32 contr, u
+
+ ctr = get_capi_ctr_by_nr(contr);
+ if (ctr && ctr->state == CAPI_CTR_RUNNING) {
+- strlcpy(buf, ctr->manu, CAPI_MANUFACTURER_LEN);
++ strncpy(buf, ctr->manu, CAPI_MANUFACTURER_LEN);
+ ret = CAPI_NOERROR;
+ } else
+ ret = CAPI_REGNOTINSTALLED;
--- /dev/null
+From foo@baz Fri Jan 4 19:32:42 CET 2019
+From: Shalom Toledo <shalomt@mellanox.com>
+Date: Tue, 18 Dec 2018 15:59:20 +0000
+Subject: mlxsw: core: Increase timeout during firmware flash process
+
+From: Shalom Toledo <shalomt@mellanox.com>
+
+[ Upstream commit cf0b70e71b32137ccf9c1f3dd9fb30cbf89b4322 ]
+
+During the firmware flash process, some of the EMADs get timed out, which
+causes the driver to send them again with a limit of 5 retries. There are
+some situations in which 5 retries is not enough and the EMAD access fails.
+If the failed EMAD was related to the flashing process, the driver fails
+the flashing.
+
+The reason for these timeouts during firmware flashing is cache misses in
+the CPU running the firmware. In case the CPU needs to fetch instructions
+from the flash when a firmware is flashed, it needs to wait for the
+flashing to complete. Since flashing takes time, it is possible for pending
+EMADs to timeout.
+
+Fix by increasing EMADs' timeout while flashing firmware.
+
+Fixes: ce6ef68f433f ("mlxsw: spectrum: Implement the ethtool flash_device callback")
+Signed-off-by: Shalom Toledo <shalomt@mellanox.com>
+Signed-off-by: Ido Schimmel <idosch@mellanox.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlxsw/core.c | 19 ++++++++++++++++++-
+ drivers/net/ethernet/mellanox/mlxsw/core.h | 3 +++
+ drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 7 ++++++-
+ 3 files changed, 27 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
+@@ -81,6 +81,7 @@ struct mlxsw_core {
+ struct mlxsw_core_port *ports;
+ unsigned int max_ports;
+ bool reload_fail;
++ bool fw_flash_in_progress;
+ unsigned long driver_priv[0];
+ /* driver_priv has to be always the last item */
+ };
+@@ -428,12 +429,16 @@ struct mlxsw_reg_trans {
+ struct rcu_head rcu;
+ };
+
+-#define MLXSW_EMAD_TIMEOUT_MS 200
++#define MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS 3000
++#define MLXSW_EMAD_TIMEOUT_MS 200
+
+ static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans)
+ {
+ unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS);
+
++ if (trans->core->fw_flash_in_progress)
++ timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS);
++
+ queue_delayed_work(trans->core->emad_wq, &trans->timeout_dw, timeout);
+ }
+
+@@ -1854,6 +1859,18 @@ int mlxsw_core_kvd_sizes_get(struct mlxs
+ }
+ EXPORT_SYMBOL(mlxsw_core_kvd_sizes_get);
+
++void mlxsw_core_fw_flash_start(struct mlxsw_core *mlxsw_core)
++{
++ mlxsw_core->fw_flash_in_progress = true;
++}
++EXPORT_SYMBOL(mlxsw_core_fw_flash_start);
++
++void mlxsw_core_fw_flash_end(struct mlxsw_core *mlxsw_core)
++{
++ mlxsw_core->fw_flash_in_progress = false;
++}
++EXPORT_SYMBOL(mlxsw_core_fw_flash_end);
++
+ static int __init mlxsw_core_module_init(void)
+ {
+ int err;
+--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
++++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
+@@ -292,6 +292,9 @@ int mlxsw_core_kvd_sizes_get(struct mlxs
+ u64 *p_single_size, u64 *p_double_size,
+ u64 *p_linear_size);
+
++void mlxsw_core_fw_flash_start(struct mlxsw_core *mlxsw_core);
++void mlxsw_core_fw_flash_end(struct mlxsw_core *mlxsw_core);
++
+ bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core,
+ enum mlxsw_res_id res_id);
+
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+@@ -308,8 +308,13 @@ static int mlxsw_sp_firmware_flash(struc
+ },
+ .mlxsw_sp = mlxsw_sp
+ };
++ int err;
+
+- return mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware);
++ mlxsw_core_fw_flash_start(mlxsw_sp->core);
++ err = mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware);
++ mlxsw_core_fw_flash_end(mlxsw_sp->core);
++
++ return err;
+ }
+
+ static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp)
--- /dev/null
+From foo@baz Fri Jan 4 19:32:42 CET 2019
+From: "Allan W. Nielsen" <allan.nielsen@microchip.com>
+Date: Thu, 20 Dec 2018 09:37:17 +0100
+Subject: mscc: Configured MAC entries should be locked.
+
+From: "Allan W. Nielsen" <allan.nielsen@microchip.com>
+
+[ Upstream commit 8fd1a4affbdafda592f80cd01bf7a382a5ff2fe8 ]
+
+The MAC table in Ocelot supports auto aging (normal) and static entries.
+MAC entries that is manually configured should be static and not subject
+to aging.
+
+Fixes: a556c76adc05 ("net: mscc: Add initial Ocelot switch support")
+Signed-off-by: Allan Nielsen <allan.nielsen@microchip.com>
+Reviewed-by: Steen Hegelund <steen.hegelund@microchip.com>
+Signed-off-by: Steen Hegelund <steen.hegelund@microchip.com>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mscc/ocelot.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/mscc/ocelot.c
++++ b/drivers/net/ethernet/mscc/ocelot.c
+@@ -733,7 +733,7 @@ static int ocelot_fdb_add(struct ndmsg *
+ }
+
+ return ocelot_mact_learn(ocelot, port->chip_port, addr, vid,
+- ENTRYTYPE_NORMAL);
++ ENTRYTYPE_LOCKED);
+ }
+
+ static int ocelot_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
--- /dev/null
+From foo@baz Fri Jan 4 19:32:42 CET 2019
+From: Eric Dumazet <edumazet@google.com>
+Date: Fri, 14 Dec 2018 06:46:49 -0800
+Subject: net: clear skb->tstamp in forwarding paths
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 8203e2d844d34af247a151d8ebd68553a6e91785 ]
+
+Sergey reported that forwarding was no longer working
+if fq packet scheduler was used.
+
+This is caused by the recent switch to EDT model, since incoming
+packets might have been timestamped by __net_timestamp()
+
+__net_timestamp() uses ktime_get_real(), while fq expects packets
+using CLOCK_MONOTONIC base.
+
+The fix is to clear skb->tstamp in forwarding paths.
+
+Fixes: 80b14dee2bea ("net: Add a new socket option for a future transmit time.")
+Fixes: fb420d5d91c1 ("tcp/fq: move back to CLOCK_MONOTONIC")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: Sergey Matyukevich <geomatsi@gmail.com>
+Tested-by: Sergey Matyukevich <geomatsi@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/ip_forward.c | 1 +
+ net/ipv6/ip6_output.c | 1 +
+ 2 files changed, 2 insertions(+)
+
+--- a/net/ipv4/ip_forward.c
++++ b/net/ipv4/ip_forward.c
+@@ -72,6 +72,7 @@ static int ip_forward_finish(struct net
+ if (unlikely(opt->optlen))
+ ip_forward_options(skb);
+
++ skb->tstamp = 0;
+ return dst_output(net, sk, skb);
+ }
+
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -378,6 +378,7 @@ static inline int ip6_forward_finish(str
+ __IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
+ __IP6_ADD_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
+
++ skb->tstamp = 0;
+ return dst_output(net, sk, skb);
+ }
+
--- /dev/null
+From foo@baz Fri Jan 4 19:32:42 CET 2019
+From: "Gustavo A. R. Silva" <gustavo@embeddedor.com>
+Date: Fri, 21 Dec 2018 14:49:01 -0600
+Subject: net: core: Fix Spectre v1 vulnerability
+
+From: "Gustavo A. R. Silva" <gustavo@embeddedor.com>
+
+[ Upstream commit 50d5258634aee2e62832aa086d2fb0de00e72b91 ]
+
+flen is indirectly controlled by user-space, hence leading to
+a potential exploitation of the Spectre variant 1 vulnerability.
+
+This issue was detected with the help of Smatch:
+
+net/core/filter.c:1101 bpf_check_classic() warn: potential spectre issue 'filter' [w]
+
+Fix this by sanitizing flen before using it to index filter at line 1101:
+
+ switch (filter[flen - 1].code) {
+
+and through pc at line 1040:
+
+ const struct sock_filter *ftest = &filter[pc];
+
+Notice that given that speculation windows are large, the policy is
+to kill the speculation on the first load and not worry if it can be
+completed with a dependent load/store [1].
+
+[1] https://marc.info/?l=linux-kernel&m=152449131114778&w=2
+
+Signed-off-by: Gustavo A. R. Silva <gustavo@embeddedor.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/filter.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -68,6 +68,7 @@
+ #include <linux/seg6_local.h>
+ #include <net/seg6.h>
+ #include <net/seg6_local.h>
++#include <linux/nospec.h>
+
+ /**
+ * sk_filter_trim_cap - run a packet through a socket filter
+@@ -1033,6 +1034,7 @@ static int bpf_check_classic(const struc
+ bool anc_found;
+ int pc;
+
++ flen = array_index_nospec(flen, BPF_MAXINSNS + 1);
+ /* Check the filter code now */
+ for (pc = 0; pc < flen; pc++) {
+ const struct sock_filter *ftest = &filter[pc];
--- /dev/null
+From foo@baz Fri Jan 4 19:32:42 CET 2019
+From: Eric Dumazet <edumazet@google.com>
+Date: Wed, 2 Jan 2019 04:24:20 -0800
+Subject: net/hamradio/6pack: use mod_timer() to rearm timers
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 202700e30740c6568b5a6943662f3829566dd533 ]
+
+Using del_timer() + add_timer() is generally unsafe on SMP,
+as noticed by syzbot. Use mod_timer() instead.
+
+kernel BUG at kernel/time/timer.c:1136!
+invalid opcode: 0000 [#1] PREEMPT SMP KASAN
+CPU: 1 PID: 1026 Comm: kworker/u4:4 Not tainted 4.20.0+ #2
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
+Workqueue: events_unbound flush_to_ldisc
+RIP: 0010:add_timer kernel/time/timer.c:1136 [inline]
+RIP: 0010:add_timer+0xa81/0x1470 kernel/time/timer.c:1134
+Code: 4d 89 7d 40 48 c7 85 70 fe ff ff 00 00 00 00 c7 85 7c fe ff ff ff ff ff ff 48 89 85 90 fe ff ff e9 e6 f7 ff ff e8 cf 42 12 00 <0f> 0b e8 c8 42 12 00 0f 0b e8 c1 42 12 00 4c 89 bd 60 fe ff ff e9
+RSP: 0018:ffff8880a7fdf5a8 EFLAGS: 00010293
+RAX: ffff8880a7846340 RBX: dffffc0000000000 RCX: 0000000000000000
+RDX: 0000000000000000 RSI: ffffffff816f3ee1 RDI: ffff88808a514ff8
+RBP: ffff8880a7fdf760 R08: 0000000000000007 R09: ffff8880a7846c58
+R10: ffff8880a7846340 R11: 0000000000000000 R12: ffff88808a514ff8
+R13: ffff88808a514ff8 R14: ffff88808a514dc0 R15: 0000000000000030
+FS: 0000000000000000(0000) GS:ffff8880ae700000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 000000000061c500 CR3: 00000000994d9000 CR4: 00000000001406e0
+DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+Call Trace:
+ decode_prio_command drivers/net/hamradio/6pack.c:903 [inline]
+ sixpack_decode drivers/net/hamradio/6pack.c:971 [inline]
+ sixpack_receive_buf drivers/net/hamradio/6pack.c:457 [inline]
+ sixpack_receive_buf+0xf9c/0x1470 drivers/net/hamradio/6pack.c:434
+ tty_ldisc_receive_buf+0x164/0x1c0 drivers/tty/tty_buffer.c:465
+ tty_port_default_receive_buf+0x114/0x190 drivers/tty/tty_port.c:38
+ receive_buf drivers/tty/tty_buffer.c:481 [inline]
+ flush_to_ldisc+0x3b2/0x590 drivers/tty/tty_buffer.c:533
+ process_one_work+0xd0c/0x1ce0 kernel/workqueue.c:2153
+ worker_thread+0x143/0x14a0 kernel/workqueue.c:2296
+ kthread+0x357/0x430 kernel/kthread.c:246
+ ret_from_fork+0x3a/0x50 arch/x86/entry/entry_64.S:352
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Cc: Andreas Koensgen <ajk@comnets.uni-bremen.de>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/hamradio/6pack.c | 16 ++++------------
+ 1 file changed, 4 insertions(+), 12 deletions(-)
+
+--- a/drivers/net/hamradio/6pack.c
++++ b/drivers/net/hamradio/6pack.c
+@@ -524,10 +524,7 @@ static void resync_tnc(struct timer_list
+
+
+ /* Start resync timer again -- the TNC might be still absent */
+-
+- del_timer(&sp->resync_t);
+- sp->resync_t.expires = jiffies + SIXP_RESYNC_TIMEOUT;
+- add_timer(&sp->resync_t);
++ mod_timer(&sp->resync_t, jiffies + SIXP_RESYNC_TIMEOUT);
+ }
+
+ static inline int tnc_init(struct sixpack *sp)
+@@ -538,9 +535,7 @@ static inline int tnc_init(struct sixpac
+
+ sp->tty->ops->write(sp->tty, &inbyte, 1);
+
+- del_timer(&sp->resync_t);
+- sp->resync_t.expires = jiffies + SIXP_RESYNC_TIMEOUT;
+- add_timer(&sp->resync_t);
++ mod_timer(&sp->resync_t, jiffies + SIXP_RESYNC_TIMEOUT);
+
+ return 0;
+ }
+@@ -918,11 +913,8 @@ static void decode_prio_command(struct s
+ /* if the state byte has been received, the TNC is present,
+ so the resync timer can be reset. */
+
+- if (sp->tnc_state == TNC_IN_SYNC) {
+- del_timer(&sp->resync_t);
+- sp->resync_t.expires = jiffies + SIXP_INIT_RESYNC_TIMEOUT;
+- add_timer(&sp->resync_t);
+- }
++ if (sp->tnc_state == TNC_IN_SYNC)
++ mod_timer(&sp->resync_t, jiffies + SIXP_INIT_RESYNC_TIMEOUT);
+
+ sp->status1 = cmd & SIXP_PRIO_DATA_MASK;
+ }
--- /dev/null
+From foo@baz Fri Jan 4 19:32:42 CET 2019
+From: Michal Kubecek <mkubecek@suse.cz>
+Date: Thu, 13 Dec 2018 17:23:32 +0100
+Subject: net: ipv4: do not handle duplicate fragments as overlapping
+
+From: Michal Kubecek <mkubecek@suse.cz>
+
+[ Upstream commit ade446403bfb79d3528d56071a84b15351a139ad ]
+
+Since commit 7969e5c40dfd ("ip: discard IPv4 datagrams with overlapping
+segments.") IPv4 reassembly code drops the whole queue whenever an
+overlapping fragment is received. However, the test is written in a way
+which detects duplicate fragments as overlapping so that in environments
+with many duplicate packets, fragmented packets may be undeliverable.
+
+Add an extra test and for (potentially) duplicate fragment, only drop the
+new fragment rather than the whole queue. Only starting offset and length
+are checked, not the contents of the fragments as that would be too
+expensive. For similar reason, linear list ("run") of a rbtree node is not
+iterated, we only check if the new fragment is a subset of the interval
+covered by existing consecutive fragments.
+
+v2: instead of an exact check iterating through linear list of an rbtree
+node, only check if the new fragment is subset of the "run" (suggested
+by Eric Dumazet)
+
+Fixes: 7969e5c40dfd ("ip: discard IPv4 datagrams with overlapping segments.")
+Signed-off-by: Michal Kubecek <mkubecek@suse.cz>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/ip_fragment.c | 18 ++++++++++++------
+ 1 file changed, 12 insertions(+), 6 deletions(-)
+
+--- a/net/ipv4/ip_fragment.c
++++ b/net/ipv4/ip_fragment.c
+@@ -347,10 +347,10 @@ static int ip_frag_queue(struct ipq *qp,
+ struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
+ struct rb_node **rbn, *parent;
+ struct sk_buff *skb1, *prev_tail;
++ int ihl, end, skb1_run_end;
+ struct net_device *dev;
+ unsigned int fragsize;
+ int flags, offset;
+- int ihl, end;
+ int err = -ENOENT;
+ u8 ecn;
+
+@@ -420,7 +420,9 @@ static int ip_frag_queue(struct ipq *qp,
+ * overlapping fragment, the entire datagram (and any constituent
+ * fragments) MUST be silently discarded.
+ *
+- * We do the same here for IPv4 (and increment an snmp counter).
++ * We do the same here for IPv4 (and increment an snmp counter) but
++ * we do not want to drop the whole queue in response to a duplicate
++ * fragment.
+ */
+
+ /* Find out where to put this fragment. */
+@@ -444,13 +446,17 @@ static int ip_frag_queue(struct ipq *qp,
+ do {
+ parent = *rbn;
+ skb1 = rb_to_skb(parent);
++ skb1_run_end = skb1->ip_defrag_offset +
++ FRAG_CB(skb1)->frag_run_len;
+ if (end <= skb1->ip_defrag_offset)
+ rbn = &parent->rb_left;
+- else if (offset >= skb1->ip_defrag_offset +
+- FRAG_CB(skb1)->frag_run_len)
++ else if (offset >= skb1_run_end)
+ rbn = &parent->rb_right;
+- else /* Found an overlap with skb1. */
+- goto discard_qp;
++ else if (offset >= skb1->ip_defrag_offset &&
++ end <= skb1_run_end)
++ goto err; /* No new data, potential duplicate */
++ else
++ goto discard_qp; /* Found an overlap */
+ } while (*rbn);
+ /* Here we have parent properly set, and rbn pointing to
+ * one of its NULL left/right children. Insert skb.
--- /dev/null
+From foo@baz Fri Jan 4 19:32:42 CET 2019
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Date: Fri, 28 Dec 2018 23:28:21 +0100
+Subject: net/ipv6: Fix a test against 'ipv6_find_idev()' return value
+
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+
+[ Upstream commit 178fe94405bffbd1acd83b6ff3b40211185ae9c9 ]
+
+'ipv6_find_idev()' returns NULL on error, not an error pointer.
+Update the test accordingly and return -ENOBUFS, as already done in
+'addrconf_add_dev()', if NULL is returned.
+
+Fixes: ("ipv6: allow userspace to add IFA_F_OPTIMISTIC addresses")
+Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/addrconf.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -4711,8 +4711,8 @@ inet6_rtm_newaddr(struct sk_buff *skb, s
+ IFA_F_MCAUTOJOIN | IFA_F_OPTIMISTIC;
+
+ idev = ipv6_find_idev(dev);
+- if (IS_ERR(idev))
+- return PTR_ERR(idev);
++ if (!idev)
++ return -ENOBUFS;
+
+ if (!ipv6_allow_optimistic_dad(net, idev))
+ cfg.ifa_flags &= ~IFA_F_OPTIMISTIC;
--- /dev/null
+From foo@baz Fri Jan 4 19:32:42 CET 2019
+From: Claudiu Beznea <claudiu.beznea@microchip.com>
+Date: Mon, 17 Dec 2018 10:02:42 +0000
+Subject: net: macb: restart tx after tx used bit read
+
+From: Claudiu Beznea <claudiu.beznea@microchip.com>
+
+[ Upstream commit 4298388574dae6168fa8940b3edc7ba965e8a7ab ]
+
+On some platforms (currently detected only on SAMA5D4) TX might stuck
+even the pachets are still present in DMA memories and TX start was
+issued for them. This happens due to race condition between MACB driver
+updating next TX buffer descriptor to be used and IP reading the same
+descriptor. In such a case, the "TX USED BIT READ" interrupt is asserted.
+GEM/MACB user guide specifies that if a "TX USED BIT READ" interrupt
+is asserted TX must be restarted. Restart TX if used bit is read and
+packets are present in software TX queue. Packets are removed from software
+TX queue if TX was successful for them (see macb_tx_interrupt()).
+
+Signed-off-by: Claudiu Beznea <claudiu.beznea@microchip.com>
+Acked-by: Nicolas Ferre <nicolas.ferre@microchip.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/cadence/macb_main.c | 21 ++++++++++++++++++++-
+ 1 file changed, 20 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/cadence/macb_main.c
++++ b/drivers/net/ethernet/cadence/macb_main.c
+@@ -61,7 +61,8 @@
+ #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \
+ | MACB_BIT(ISR_RLE) \
+ | MACB_BIT(TXERR))
+-#define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
++#define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP) \
++ | MACB_BIT(TXUBR))
+
+ /* Max length of transmit frame must be a multiple of 8 bytes */
+ #define MACB_TX_LEN_ALIGN 8
+@@ -1313,6 +1314,21 @@ static void macb_hresp_error_task(unsign
+ netif_tx_start_all_queues(dev);
+ }
+
++static void macb_tx_restart(struct macb_queue *queue)
++{
++ unsigned int head = queue->tx_head;
++ unsigned int tail = queue->tx_tail;
++ struct macb *bp = queue->bp;
++
++ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
++ queue_writel(queue, ISR, MACB_BIT(TXUBR));
++
++ if (head == tail)
++ return;
++
++ macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
++}
++
+ static irqreturn_t macb_interrupt(int irq, void *dev_id)
+ {
+ struct macb_queue *queue = dev_id;
+@@ -1370,6 +1386,9 @@ static irqreturn_t macb_interrupt(int ir
+ if (status & MACB_BIT(TCOMP))
+ macb_tx_interrupt(queue);
+
++ if (status & MACB_BIT(TXUBR))
++ macb_tx_restart(queue);
++
+ /* Link change detection isn't possible with RMII, so we'll
+ * add that if/when we get our hands on a full-blown MII PHY.
+ */
--- /dev/null
+From foo@baz Fri Jan 4 19:32:42 CET 2019
+From: Yuval Avnery <yuvalav@mellanox.com>
+Date: Thu, 13 Dec 2018 02:26:46 +0200
+Subject: net/mlx5: Typo fix in del_sw_hw_rule
+
+From: Yuval Avnery <yuvalav@mellanox.com>
+
+[ Upstream commit f0337889147c956721696553ffcc97212b0948fe ]
+
+Expression terminated with "," instead of ";", resulted in
+set_fte getting bad value for modify_enable_mask field.
+
+Fixes: bd5251dbf156 ("net/mlx5_core: Introduce flow steering destination of type counter")
+Signed-off-by: Yuval Avnery <yuvalav@mellanox.com>
+Reviewed-by: Daniel Jurgens <danielj@mellanox.com>
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+@@ -432,7 +432,7 @@ static void del_sw_hw_rule(struct fs_nod
+
+ if ((fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
+ --fte->dests_size) {
+- modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST),
++ modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
+ update_fte = true;
+ }
+ out:
--- /dev/null
+From foo@baz Fri Jan 4 19:32:42 CET 2019
+From: Tal Gilboa <talgi@mellanox.com>
+Date: Thu, 22 Nov 2018 14:20:45 +0200
+Subject: net/mlx5e: Cancel DIM work on close SQ
+
+From: Tal Gilboa <talgi@mellanox.com>
+
+[ Upstream commit fa2bf86bab4bbc61e5678a42a14e40075093a98f ]
+
+TXQ SQ closure is followed by closing the corresponding CQ. A pending
+DIM work would try to modify the now non-existing CQ.
+This would trigger an error:
+[85535.835926] mlx5_core 0000:af:00.0: mlx5_cmd_check:769:(pid 124399):
+MODIFY_CQ(0x403) op_mod(0x0) failed, status bad resource state(0x9), syndrome (0x1d7771)
+
+Fix by making sure to cancel any pending DIM work before destroying the SQ.
+
+Fixes: cbce4f444798 ("net/mlx5e: Enable adaptive-TX moderation")
+Signed-off-by: Tal Gilboa <talgi@mellanox.com>
+Reviewed-by: Tariq Toukan <tariqt@mellanox.com>
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -1383,6 +1383,7 @@ static void mlx5e_close_txqsq(struct mlx
+ struct mlx5_core_dev *mdev = c->mdev;
+ struct mlx5_rate_limit rl = {0};
+
++ cancel_work_sync(&sq->dim.work);
+ mlx5e_destroy_sq(mdev, sq->sqn);
+ if (sq->rate_limit) {
+ rl.rate = sq->rate_limit;
--- /dev/null
+From foo@baz Fri Jan 4 19:32:42 CET 2019
+From: Alaa Hleihel <alaa@mellanox.com>
+Date: Sun, 25 Nov 2018 11:46:09 +0200
+Subject: net/mlx5e: Remove the false indication of software timestamping support
+
+From: Alaa Hleihel <alaa@mellanox.com>
+
+[ Upstream commit 4765420439e758bfa4808392d18b0a4cb6f06065 ]
+
+mlx5 driver falsely advertises support of software timestamping.
+Fix it by removing the false indication.
+
+Fixes: ef9814deafd0 ("net/mlx5e: Add HW timestamping (TS) support")
+Signed-off-by: Alaa Hleihel <alaa@mellanox.com>
+Reviewed-by: Tariq Toukan <tariqt@mellanox.com>
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c | 11 +++--------
+ 1 file changed, 3 insertions(+), 8 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+@@ -1101,11 +1101,6 @@ int mlx5e_ethtool_get_ts_info(struct mlx
+ struct ethtool_ts_info *info)
+ {
+ struct mlx5_core_dev *mdev = priv->mdev;
+- int ret;
+-
+- ret = ethtool_op_get_ts_info(priv->netdev, info);
+- if (ret)
+- return ret;
+
+ info->phc_index = mlx5_clock_get_ptp_index(mdev);
+
+@@ -1113,9 +1108,9 @@ int mlx5e_ethtool_get_ts_info(struct mlx
+ info->phc_index == -1)
+ return 0;
+
+- info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
+- SOF_TIMESTAMPING_RX_HARDWARE |
+- SOF_TIMESTAMPING_RAW_HARDWARE;
++ info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
++ SOF_TIMESTAMPING_RX_HARDWARE |
++ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) |
+ BIT(HWTSTAMP_TX_ON);
--- /dev/null
+From foo@baz Fri Jan 4 19:32:42 CET 2019
+From: Mikhael Goikhman <migo@mellanox.com>
+Date: Mon, 19 Nov 2018 19:11:12 +0200
+Subject: net/mlx5e: Remove unused UDP GSO remaining counter
+
+From: Mikhael Goikhman <migo@mellanox.com>
+
+[ Upstream commit d13b224f431579fe2d712871d4265d7a22ca6c9c ]
+
+Remove tx_udp_seg_rem counter from ethtool output, as it is no longer
+being updated in the driver's data flow.
+
+Fixes: 3f44899ef2ce ("net/mlx5e: Use PARTIAL_GSO for UDP segmentation")
+Signed-off-by: Mikhael Goikhman <migo@mellanox.com>
+Reviewed-by: Eran Ben Elisha <eranbe@mellanox.com>
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_stats.c | 2 --
+ drivers/net/ethernet/mellanox/mlx5/core/en_stats.h | 2 --
+ 2 files changed, 4 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+@@ -73,7 +73,6 @@ static const struct counter_desc sw_stat
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_recover) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
+- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_udp_seg_rem) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) },
+@@ -194,7 +193,6 @@ void mlx5e_grp_sw_update_stats(struct ml
+ s->tx_nop += sq_stats->nop;
+ s->tx_queue_stopped += sq_stats->stopped;
+ s->tx_queue_wake += sq_stats->wake;
+- s->tx_udp_seg_rem += sq_stats->udp_seg_rem;
+ s->tx_queue_dropped += sq_stats->dropped;
+ s->tx_cqe_err += sq_stats->cqe_err;
+ s->tx_recover += sq_stats->recover;
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+@@ -86,7 +86,6 @@ struct mlx5e_sw_stats {
+ u64 tx_recover;
+ u64 tx_cqes;
+ u64 tx_queue_wake;
+- u64 tx_udp_seg_rem;
+ u64 tx_cqe_err;
+ u64 tx_xdp_xmit;
+ u64 tx_xdp_full;
+@@ -217,7 +216,6 @@ struct mlx5e_sq_stats {
+ u64 csum_partial_inner;
+ u64 added_vlan_packets;
+ u64 nop;
+- u64 udp_seg_rem;
+ #ifdef CONFIG_MLX5_EN_TLS
+ u64 tls_ooo;
+ u64 tls_resync_bytes;
--- /dev/null
+From foo@baz Fri Jan 4 19:32:42 CET 2019
+From: Tariq Toukan <tariqt@mellanox.com>
+Date: Sun, 2 Dec 2018 15:45:53 +0200
+Subject: net/mlx5e: RX, Fix wrong early return in receive queue poll
+
+From: Tariq Toukan <tariqt@mellanox.com>
+
+[ Upstream commit bfc698254ba97b3e3e4ebbfae0ffa1f7e2fa0717 ]
+
+When the completion queue of the RQ is empty, do not immediately return.
+If left-over decompressed CQEs (from the previous cycle) were processed,
+need to go to the finalization part of the poll function.
+
+Bug exists only when CQE compression is turned ON.
+
+This solves the following issue:
+mlx5_core 0000:82:00.1: mlx5_eq_int:544:(pid 0): CQ error on CQN 0xc08, syndrome 0x1
+mlx5_core 0000:82:00.1 p4p2: mlx5e_cq_error_event: cqn=0x000c08 event=0x04
+
+Fixes: 4b7dfc992514 ("net/mlx5e: Early-return on empty completion queues")
+Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
+Reviewed-by: Eran Ben Elisha <eranbe@mellanox.com>
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+@@ -1150,7 +1150,7 @@ mpwrq_cqe_out:
+ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
+ {
+ struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
+- struct mlx5e_xdpsq *xdpsq;
++ struct mlx5e_xdpsq *xdpsq = &rq->xdpsq;
+ struct mlx5_cqe64 *cqe;
+ int work_done = 0;
+
+@@ -1161,10 +1161,11 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq
+ work_done += mlx5e_decompress_cqes_cont(rq, cq, 0, budget);
+
+ cqe = mlx5_cqwq_get_cqe(&cq->wq);
+- if (!cqe)
++ if (!cqe) {
++ if (unlikely(work_done))
++ goto out;
+ return 0;
+-
+- xdpsq = &rq->xdpsq;
++ }
+
+ do {
+ if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) {
+@@ -1179,6 +1180,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq
+ rq->handle_rx_cqe(rq, cqe);
+ } while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
+
++out:
+ if (xdpsq->doorbell) {
+ mlx5e_xmit_xdp_doorbell(xdpsq);
+ xdpsq->doorbell = false;
--- /dev/null
+From foo@baz Fri Jan 4 19:32:42 CET 2019
+From: Moshe Shemesh <moshe@mellanox.com>
+Date: Fri, 2 Nov 2018 06:10:49 +0200
+Subject: net/mlx5e: RX, Verify MPWQE stride size is in range
+
+From: Moshe Shemesh <moshe@mellanox.com>
+
+[ Upstream commit e1c15b62b7015119d3e5915cd2ae3b89d59c2576 ]
+
+Add check of MPWQE stride size is within range supported by HW. In case
+calculated MPWQE stride size exceed range, linear SKB can't be used and
+we should use non linear MPWQE instead.
+
+Fixes: 619a8f2a42f1 ("net/mlx5e: Use linear SKB in Striding RQ")
+Signed-off-by: Moshe Shemesh <moshe@mellanox.com>
+Reviewed-by: Tariq Toukan <tariqt@mellanox.com>
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -128,6 +128,8 @@ static bool mlx5e_rx_is_linear_skb(struc
+ return !params->lro_en && frag_sz <= PAGE_SIZE;
+ }
+
++#define MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ ((BIT(__mlx5_bit_sz(wq, log_wqe_stride_size)) - 1) + \
++ MLX5_MPWQE_LOG_STRIDE_SZ_BASE)
+ static bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params)
+ {
+@@ -138,6 +140,9 @@ static bool mlx5e_rx_mpwqe_is_linear_skb
+ if (!mlx5e_rx_is_linear_skb(mdev, params))
+ return false;
+
++ if (order_base_2(frag_sz) > MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ)
++ return false;
++
+ if (MLX5_CAP_GEN(mdev, ext_stride_num_range))
+ return true;
+
--- /dev/null
+From foo@baz Fri Jan 4 19:32:42 CET 2019
+From: Marcin Wojtas <mw@semihalf.com>
+Date: Tue, 11 Dec 2018 13:56:49 +0100
+Subject: net: mvneta: fix operation for 64K PAGE_SIZE
+
+From: Marcin Wojtas <mw@semihalf.com>
+
+[ Upstream commit e735fd55b94bb48363737db3b1d57627c1a16b47 ]
+
+Recent changes in the mvneta driver reworked allocation
+and handling of the ingress buffers to use entire pages.
+Apart from that in SW BM scenario the HW must be informed
+via PRXDQS about the biggest possible incoming buffer
+that can be propagated by RX descriptors.
+
+The BufferSize field was filled according to the MTU-dependent
+pkt_size value. Later change to PAGE_SIZE broke RX operation
+when usin 64K pages, as the field is simply too small.
+
+This patch conditionally limits the value passed to the BufferSize
+of the PRXDQS register, depending on the PAGE_SIZE used.
+On the occasion remove now unused frag_size field of the mvneta_port
+structure.
+
+Fixes: 562e2f467e71 ("net: mvneta: Improve the buffer allocation method for SWBM")
+Signed-off-by: Marcin Wojtas <mw@semihalf.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/marvell/mvneta.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -406,7 +406,6 @@ struct mvneta_port {
+ struct mvneta_pcpu_stats __percpu *stats;
+
+ int pkt_size;
+- unsigned int frag_size;
+ void __iomem *base;
+ struct mvneta_rx_queue *rxqs;
+ struct mvneta_tx_queue *txqs;
+@@ -2905,7 +2904,9 @@ static void mvneta_rxq_hw_init(struct mv
+ if (!pp->bm_priv) {
+ /* Set Offset */
+ mvneta_rxq_offset_set(pp, rxq, 0);
+- mvneta_rxq_buf_size_set(pp, rxq, pp->frag_size);
++ mvneta_rxq_buf_size_set(pp, rxq, PAGE_SIZE < SZ_64K ?
++ PAGE_SIZE :
++ MVNETA_RX_BUF_SIZE(pp->pkt_size));
+ mvneta_rxq_bm_disable(pp, rxq);
+ mvneta_rxq_fill(pp, rxq, rxq->size);
+ } else {
+@@ -3749,7 +3750,6 @@ static int mvneta_open(struct net_device
+ int ret;
+
+ pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
+- pp->frag_size = PAGE_SIZE;
+
+ ret = mvneta_setup_rxqs(pp);
+ if (ret)
--- /dev/null
+From foo@baz Fri Jan 4 19:32:42 CET 2019
+From: Antoine Tenart <antoine.tenart@bootlin.com>
+Date: Tue, 11 Dec 2018 17:32:28 +0100
+Subject: net: mvpp2: 10G modes aren't supported on all ports
+
+From: Antoine Tenart <antoine.tenart@bootlin.com>
+
+[ Upstream commit 006791772084383de779ef29f2e06f3a6e111e7d ]
+
+The mvpp2_phylink_validate() function sets all modes that are
+supported by a given PPv2 port. A recent change made all ports to
+advertise they support 10G modes in certain cases. This is not true,
+as only the port #0 can do so. This patch fixes it.
+
+Fixes: 01b3fd5ac97c ("net: mvpp2: fix detection of 10G SFP modules")
+Cc: Baruch Siach <baruch@tkos.co.il>
+Signed-off-by: Antoine Tenart <antoine.tenart@bootlin.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c | 14 ++++++++------
+ 1 file changed, 8 insertions(+), 6 deletions(-)
+
+--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+@@ -4292,12 +4292,14 @@ static void mvpp2_phylink_validate(struc
+ case PHY_INTERFACE_MODE_10GKR:
+ case PHY_INTERFACE_MODE_XAUI:
+ case PHY_INTERFACE_MODE_NA:
+- phylink_set(mask, 10000baseCR_Full);
+- phylink_set(mask, 10000baseSR_Full);
+- phylink_set(mask, 10000baseLR_Full);
+- phylink_set(mask, 10000baseLRM_Full);
+- phylink_set(mask, 10000baseER_Full);
+- phylink_set(mask, 10000baseKR_Full);
++ if (port->gop_id == 0) {
++ phylink_set(mask, 10000baseCR_Full);
++ phylink_set(mask, 10000baseSR_Full);
++ phylink_set(mask, 10000baseLR_Full);
++ phylink_set(mask, 10000baseLRM_Full);
++ phylink_set(mask, 10000baseER_Full);
++ phylink_set(mask, 10000baseKR_Full);
++ }
+ /* Fall-through */
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
--- /dev/null
+From foo@baz Fri Jan 4 19:32:42 CET 2019
+From: Antoine Tenart <antoine.tenart@bootlin.com>
+Date: Wed, 19 Dec 2018 18:00:12 +0100
+Subject: net: mvpp2: fix the phylink mode validation
+
+From: Antoine Tenart <antoine.tenart@bootlin.com>
+
+[ Upstream commit 1b451fb2051b464b9758c09a3492104403252e2b ]
+
+The mvpp2_phylink_validate() sets all modes that are supported by a
+given PPv2 port. An mistake made the 10000baseT_Full mode being
+advertised in some cases when a port wasn't configured to perform at
+10G. This patch fixes this.
+
+Fixes: d97c9f4ab000 ("net: mvpp2: 1000baseX support")
+Reported-by: Russell King <linux@armlinux.org.uk>
+Signed-off-by: Antoine Tenart <antoine.tenart@bootlin.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+@@ -4293,6 +4293,7 @@ static void mvpp2_phylink_validate(struc
+ case PHY_INTERFACE_MODE_XAUI:
+ case PHY_INTERFACE_MODE_NA:
+ if (port->gop_id == 0) {
++ phylink_set(mask, 10000baseT_Full);
+ phylink_set(mask, 10000baseCR_Full);
+ phylink_set(mask, 10000baseSR_Full);
+ phylink_set(mask, 10000baseLR_Full);
+@@ -4310,7 +4311,6 @@ static void mvpp2_phylink_validate(struc
+ phylink_set(mask, 10baseT_Full);
+ phylink_set(mask, 100baseT_Half);
+ phylink_set(mask, 100baseT_Full);
+- phylink_set(mask, 10000baseT_Full);
+ /* Fall-through */
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
--- /dev/null
+From foo@baz Fri Jan 4 19:32:42 CET 2019
+From: Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
+Date: Tue, 18 Dec 2018 16:57:04 +0900
+Subject: net: phy: Fix the issue that netif always links up after resuming
+
+From: Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
+
+[ Upstream commit 8742beb50f2db903d3b6d69ddd81d67ce9914453 ]
+
+Even though the link is down before entering hibernation,
+there is an issue that the network interface always links up after resuming
+from hibernation.
+
+If the link is still down before enabling the network interface,
+and after resuming from hibernation, the phydev->state is forcibly set
+to PHY_UP in mdio_bus_phy_restore(), and the link becomes up.
+
+In suspend sequence, only if the PHY is attached, mdio_bus_phy_suspend()
+calls phy_stop_machine(), and mdio_bus_phy_resume() calls
+phy_start_machine().
+In resume sequence, it's enough to do the same as mdio_bus_phy_resume()
+because the state has been preserved.
+
+This patch fixes the issue by calling phy_start_machine() in
+mdio_bus_phy_restore() in the same way as mdio_bus_phy_resume().
+
+Fixes: bc87922ff59d ("phy: Move PHY PM operations into phy_device")
+Suggested-by: Heiner Kallweit <hkallweit1@gmail.com>
+Signed-off-by: Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/phy/phy_device.c | 7 ++-----
+ 1 file changed, 2 insertions(+), 5 deletions(-)
+
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -164,11 +164,8 @@ static int mdio_bus_phy_restore(struct d
+ if (ret < 0)
+ return ret;
+
+- /* The PHY needs to renegotiate. */
+- phydev->link = 0;
+- phydev->state = PHY_UP;
+-
+- phy_start_machine(phydev);
++ if (phydev->attached_dev && phydev->adjust_link)
++ phy_start_machine(phydev);
+
+ return 0;
+ }
--- /dev/null
+From foo@baz Fri Jan 4 19:32:42 CET 2019
+From: Myungho Jung <mhjungk@gmail.com>
+Date: Tue, 18 Dec 2018 09:02:25 -0800
+Subject: net/smc: fix TCP fallback socket release
+
+From: Myungho Jung <mhjungk@gmail.com>
+
+[ Upstream commit 78abe3d0dfad196959b1246003366e2610775ea6 ]
+
+clcsock can be released while kernel_accept() references it in TCP
+listen worker. Also, clcsock needs to wake up before released if TCP
+fallback is used and the clcsock is blocked by accept. Add a lock to
+safely release clcsock and call kernel_sock_shutdown() to wake up
+clcsock from accept in smc_release().
+
+Reported-by: syzbot+0bf2e01269f1274b4b03@syzkaller.appspotmail.com
+Reported-by: syzbot+e3132895630f957306bc@syzkaller.appspotmail.com
+Signed-off-by: Myungho Jung <mhjungk@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/smc/af_smc.c | 14 ++++++++++++--
+ net/smc/smc.h | 4 ++++
+ 2 files changed, 16 insertions(+), 2 deletions(-)
+
+--- a/net/smc/af_smc.c
++++ b/net/smc/af_smc.c
+@@ -145,8 +145,14 @@ static int smc_release(struct socket *so
+ sk->sk_shutdown |= SHUTDOWN_MASK;
+ }
+ if (smc->clcsock) {
++ if (smc->use_fallback && sk->sk_state == SMC_LISTEN) {
++ /* wake up clcsock accept */
++ rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR);
++ }
++ mutex_lock(&smc->clcsock_release_lock);
+ sock_release(smc->clcsock);
+ smc->clcsock = NULL;
++ mutex_unlock(&smc->clcsock_release_lock);
+ }
+ if (smc->use_fallback) {
+ if (sk->sk_state != SMC_LISTEN && sk->sk_state != SMC_INIT)
+@@ -203,6 +209,7 @@ static struct sock *smc_sock_alloc(struc
+ spin_lock_init(&smc->conn.send_lock);
+ sk->sk_prot->hash(sk);
+ sk_refcnt_debug_inc(sk);
++ mutex_init(&smc->clcsock_release_lock);
+
+ return sk;
+ }
+@@ -818,7 +825,7 @@ static int smc_clcsock_accept(struct smc
+ struct socket *new_clcsock = NULL;
+ struct sock *lsk = &lsmc->sk;
+ struct sock *new_sk;
+- int rc;
++ int rc = -EINVAL;
+
+ release_sock(lsk);
+ new_sk = smc_sock_alloc(sock_net(lsk), NULL, lsk->sk_protocol);
+@@ -831,7 +838,10 @@ static int smc_clcsock_accept(struct smc
+ }
+ *new_smc = smc_sk(new_sk);
+
+- rc = kernel_accept(lsmc->clcsock, &new_clcsock, 0);
++ mutex_lock(&lsmc->clcsock_release_lock);
++ if (lsmc->clcsock)
++ rc = kernel_accept(lsmc->clcsock, &new_clcsock, 0);
++ mutex_unlock(&lsmc->clcsock_release_lock);
+ lock_sock(lsk);
+ if (rc < 0)
+ lsk->sk_err = -rc;
+--- a/net/smc/smc.h
++++ b/net/smc/smc.h
+@@ -219,6 +219,10 @@ struct smc_sock { /* smc sock contain
+ * started, waiting for unsent
+ * data to be sent
+ */
++ struct mutex clcsock_release_lock;
++ /* protects clcsock of a listen
++ * socket
++ * */
+ };
+
+ static inline struct smc_sock *smc_sk(const struct sock *sk)
--- /dev/null
+From foo@baz Fri Jan 4 19:32:42 CET 2019
+From: Dan Carpenter <dan.carpenter@oracle.com>
+Date: Mon, 17 Dec 2018 11:06:06 +0300
+Subject: net: stmmac: Fix an error code in probe()
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+[ Upstream commit b26322d2ac6c1c1087af73856531bb836f6963ca ]
+
+The function should return an error if create_singlethread_workqueue()
+fails.
+
+Fixes: 34877a15f787 ("net: stmmac: Rework and fix TX Timeout code")
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -4247,6 +4247,7 @@ int stmmac_dvr_probe(struct device *devi
+ priv->wq = create_singlethread_workqueue("stmmac_wq");
+ if (!priv->wq) {
+ dev_err(priv->device, "failed to create workqueue\n");
++ ret = -ENOMEM;
+ goto error_wq;
+ }
+
--- /dev/null
+From foo@baz Fri Jan 4 19:32:42 CET 2019
+From: Ganesh Goudar <ganeshgr@chelsio.com>
+Date: Wed, 19 Dec 2018 17:18:22 +0530
+Subject: net/tls: allocate tls context using GFP_ATOMIC
+
+From: Ganesh Goudar <ganeshgr@chelsio.com>
+
+[ Upstream commit c6ec179a0082e2e76e3a72050c2b99d3d0f3da3f ]
+
+create_ctx can be called from atomic context, hence use
+GFP_ATOMIC instead of GFP_KERNEL.
+
+[ 395.962599] BUG: sleeping function called from invalid context at mm/slab.h:421
+[ 395.979896] in_atomic(): 1, irqs_disabled(): 0, pid: 16254, name: openssl
+[ 395.996564] 2 locks held by openssl/16254:
+[ 396.010492] #0: 00000000347acb52 (sk_lock-AF_INET){+.+.}, at: do_tcp_setsockopt.isra.44+0x13b/0x9a0
+[ 396.029838] #1: 000000006c9552b5 (device_spinlock){+...}, at: tls_init+0x1d/0x280
+[ 396.047675] CPU: 5 PID: 16254 Comm: openssl Tainted: G O 4.20.0-rc6+ #25
+[ 396.066019] Hardware name: Supermicro X10SRA-F/X10SRA-F, BIOS 2.0c 09/25/2017
+[ 396.083537] Call Trace:
+[ 396.096265] dump_stack+0x5e/0x8b
+[ 396.109876] ___might_sleep+0x216/0x250
+[ 396.123940] kmem_cache_alloc_trace+0x1b0/0x240
+[ 396.138800] create_ctx+0x1f/0x60
+[ 396.152504] tls_init+0xbd/0x280
+[ 396.166135] tcp_set_ulp+0x191/0x2d0
+[ 396.180035] ? tcp_set_ulp+0x2c/0x2d0
+[ 396.193960] do_tcp_setsockopt.isra.44+0x148/0x9a0
+[ 396.209013] __sys_setsockopt+0x7c/0xe0
+[ 396.223054] __x64_sys_setsockopt+0x20/0x30
+[ 396.237378] do_syscall_64+0x4a/0x180
+[ 396.251200] entry_SYSCALL_64_after_hwframe+0x49/0xbe
+
+Fixes: df9d4a178022 ("net/tls: sleeping function from invalid context")
+Signed-off-by: Ganesh Goudar <ganeshgr@chelsio.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/tls/tls_main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/tls/tls_main.c
++++ b/net/tls/tls_main.c
+@@ -550,7 +550,7 @@ static struct tls_context *create_ctx(st
+ struct inet_connection_sock *icsk = inet_csk(sk);
+ struct tls_context *ctx;
+
+- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
++ ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
+ if (!ctx)
+ return NULL;
+
--- /dev/null
+From foo@baz Fri Jan 4 19:32:42 CET 2019
+From: Davide Caratti <dcaratti@redhat.com>
+Date: Mon, 17 Dec 2018 11:26:38 +0100
+Subject: net: Use __kernel_clockid_t in uapi net_stamp.h
+
+From: Davide Caratti <dcaratti@redhat.com>
+
+[ Upstream commit e2c4cf7f98a519eb4d95532bfa06bcaf3562fed5 ]
+
+Herton reports the following error when building a userspace program that
+includes net_stamp.h:
+
+ In file included from foo.c:2:
+ /usr/include/linux/net_tstamp.h:158:2: error: unknown type name
+ ‘clockid_t’
+ clockid_t clockid; /* reference clockid */
+ ^~~~~~~~~
+
+Fix it by using __kernel_clockid_t in place of clockid_t.
+
+Fixes: 80b14dee2bea ("net: Add a new socket option for a future transmit time.")
+Cc: Timothy Redaelli <tredaelli@redhat.com>
+Reported-by: Herton R. Krzesinski <herton@redhat.com>
+Signed-off-by: Davide Caratti <dcaratti@redhat.com>
+Tested-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/uapi/linux/net_tstamp.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/include/uapi/linux/net_tstamp.h
++++ b/include/uapi/linux/net_tstamp.h
+@@ -155,8 +155,8 @@ enum txtime_flags {
+ };
+
+ struct sock_txtime {
+- clockid_t clockid; /* reference clockid */
+- __u32 flags; /* as defined by enum txtime_flags */
++ __kernel_clockid_t clockid;/* reference clockid */
++ __u32 flags; /* as defined by enum txtime_flags */
+ };
+
+ #endif /* _NET_TIMESTAMPING_H */
--- /dev/null
+From foo@baz Fri Jan 4 19:32:42 CET 2019
+From: Cong Wang <xiyou.wangcong@gmail.com>
+Date: Sat, 29 Dec 2018 13:56:37 -0800
+Subject: net/wan: fix a double free in x25_asy_open_tty()
+
+From: Cong Wang <xiyou.wangcong@gmail.com>
+
+[ Upstream commit d5c7c745f254c6cb98b3b3f15fe789b8bd770c72 ]
+
+When x25_asy_open() fails, it already cleans up by itself,
+so its caller doesn't need to free the memory again.
+
+It seems we still have to call x25_asy_free() to clear the SLF_INUSE
+bit, so just set these pointers to NULL after kfree().
+
+Reported-and-tested-by: syzbot+5e5e969e525129229052@syzkaller.appspotmail.com
+Fixes: 3b780bed3138 ("x25_asy: Free x25_asy on x25_asy_open() failure.")
+Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wan/x25_asy.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/net/wan/x25_asy.c
++++ b/drivers/net/wan/x25_asy.c
+@@ -486,8 +486,10 @@ static int x25_asy_open(struct net_devic
+
+ /* Cleanup */
+ kfree(sl->xbuff);
++ sl->xbuff = NULL;
+ noxbuff:
+ kfree(sl->rbuff);
++ sl->rbuff = NULL;
+ norbuff:
+ return -ENOMEM;
+ }
--- /dev/null
+From foo@baz Fri Jan 4 19:32:42 CET 2019
+From: Cong Wang <xiyou.wangcong@gmail.com>
+Date: Sat, 29 Dec 2018 13:56:38 -0800
+Subject: netrom: fix locking in nr_find_socket()
+
+From: Cong Wang <xiyou.wangcong@gmail.com>
+
+[ Upstream commit 7314f5480f3e37e570104dc5e0f28823ef849e72 ]
+
+nr_find_socket(), nr_find_peer() and nr_find_listener() lock the
+sock after finding it in the global list. However, the call path
+requires BH disabled for the sock lock consistently.
+
+Actually the locking is unnecessary at this point, we can just hold
+the sock refcnt to make sure it is not gone after we unlock the global
+list, and lock it later only when needed.
+
+Reported-and-tested-by: syzbot+f621cda8b7e598908efa@syzkaller.appspotmail.com
+Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/netrom/af_netrom.c | 15 ++++++++++-----
+ 1 file changed, 10 insertions(+), 5 deletions(-)
+
+--- a/net/netrom/af_netrom.c
++++ b/net/netrom/af_netrom.c
+@@ -153,7 +153,7 @@ static struct sock *nr_find_listener(ax2
+ sk_for_each(s, &nr_list)
+ if (!ax25cmp(&nr_sk(s)->source_addr, addr) &&
+ s->sk_state == TCP_LISTEN) {
+- bh_lock_sock(s);
++ sock_hold(s);
+ goto found;
+ }
+ s = NULL;
+@@ -174,7 +174,7 @@ static struct sock *nr_find_socket(unsig
+ struct nr_sock *nr = nr_sk(s);
+
+ if (nr->my_index == index && nr->my_id == id) {
+- bh_lock_sock(s);
++ sock_hold(s);
+ goto found;
+ }
+ }
+@@ -198,7 +198,7 @@ static struct sock *nr_find_peer(unsigne
+
+ if (nr->your_index == index && nr->your_id == id &&
+ !ax25cmp(&nr->dest_addr, dest)) {
+- bh_lock_sock(s);
++ sock_hold(s);
+ goto found;
+ }
+ }
+@@ -224,7 +224,7 @@ static unsigned short nr_find_next_circu
+ if (i != 0 && j != 0) {
+ if ((sk=nr_find_socket(i, j)) == NULL)
+ break;
+- bh_unlock_sock(sk);
++ sock_put(sk);
+ }
+
+ id++;
+@@ -920,6 +920,7 @@ int nr_rx_frame(struct sk_buff *skb, str
+ }
+
+ if (sk != NULL) {
++ bh_lock_sock(sk);
+ skb_reset_transport_header(skb);
+
+ if (frametype == NR_CONNACK && skb->len == 22)
+@@ -929,6 +930,7 @@ int nr_rx_frame(struct sk_buff *skb, str
+
+ ret = nr_process_rx_frame(sk, skb);
+ bh_unlock_sock(sk);
++ sock_put(sk);
+ return ret;
+ }
+
+@@ -960,10 +962,12 @@ int nr_rx_frame(struct sk_buff *skb, str
+ (make = nr_make_new(sk)) == NULL) {
+ nr_transmit_refusal(skb, 0);
+ if (sk)
+- bh_unlock_sock(sk);
++ sock_put(sk);
+ return 0;
+ }
+
++ bh_lock_sock(sk);
++
+ window = skb->data[20];
+
+ skb->sk = make;
+@@ -1016,6 +1020,7 @@ int nr_rx_frame(struct sk_buff *skb, str
+ sk->sk_data_ready(sk);
+
+ bh_unlock_sock(sk);
++ sock_put(sk);
+
+ nr_insert_socket(make);
+
--- /dev/null
+From foo@baz Fri Jan 4 19:32:42 CET 2019
+From: Pieter Jansen van Vuuren <pieter.jansenvanvuuren@netronome.com>
+Date: Mon, 10 Dec 2018 15:03:43 -0800
+Subject: nfp: flower: ensure TCP flags can be placed in IPv6 frame
+
+From: Pieter Jansen van Vuuren <pieter.jansenvanvuuren@netronome.com>
+
+[ Upstream commit 290974d434783624c13a9530a23c45f9c5ffe018 ]
+
+Previously we did not ensure tcp flags have a place to be stored
+when using IPv6. We correct this by including IPv6 key layer when
+we match tcp flags and the IPv6 key layer has not been included
+already.
+
+Fixes: 07e1671cfca5 ("nfp: flower: refactor shared ip header in match offload")
+Signed-off-by: Pieter Jansen van Vuuren <pieter.jansenvanvuuren@netronome.com>
+Reviewed-by: Jakub Kicinski <jakub.kicinski@netronome.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/netronome/nfp/flower/offload.c | 28 +++++++++++++++-----
+ 1 file changed, 22 insertions(+), 6 deletions(-)
+
+--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
++++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
+@@ -375,13 +375,29 @@ nfp_flower_calculate_key_layers(struct n
+ !(tcp_flags & (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST)))
+ return -EOPNOTSUPP;
+
+- /* We need to store TCP flags in the IPv4 key space, thus
+- * we need to ensure we include a IPv4 key layer if we have
+- * not done so already.
++ /* We need to store TCP flags in the either the IPv4 or IPv6 key
++ * space, thus we need to ensure we include a IPv4/IPv6 key
++ * layer if we have not done so already.
+ */
+- if (!(key_layer & NFP_FLOWER_LAYER_IPV4)) {
+- key_layer |= NFP_FLOWER_LAYER_IPV4;
+- key_size += sizeof(struct nfp_flower_ipv4);
++ if (!key_basic)
++ return -EOPNOTSUPP;
++
++ if (!(key_layer & NFP_FLOWER_LAYER_IPV4) &&
++ !(key_layer & NFP_FLOWER_LAYER_IPV6)) {
++ switch (key_basic->n_proto) {
++ case cpu_to_be16(ETH_P_IP):
++ key_layer |= NFP_FLOWER_LAYER_IPV4;
++ key_size += sizeof(struct nfp_flower_ipv4);
++ break;
++
++ case cpu_to_be16(ETH_P_IPV6):
++ key_layer |= NFP_FLOWER_LAYER_IPV6;
++ key_size += sizeof(struct nfp_flower_ipv6);
++ break;
++
++ default:
++ return -EOPNOTSUPP;
++ }
+ }
+ }
+
--- /dev/null
+From foo@baz Fri Jan 4 19:32:42 CET 2019
+From: Willem de Bruijn <willemb@google.com>
+Date: Sat, 22 Dec 2018 16:53:45 -0500
+Subject: packet: validate address length if non-zero
+
+From: Willem de Bruijn <willemb@google.com>
+
+[ Upstream commit 6b8d95f1795c42161dc0984b6863e95d6acf24ed ]
+
+Validate packet socket address length if a length is given. Zero
+length is equivalent to not setting an address.
+
+Fixes: 99137b7888f4 ("packet: validate address length")
+Reported-by: Ido Schimmel <idosch@idosch.org>
+Signed-off-by: Willem de Bruijn <willemb@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/packet/af_packet.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -2625,7 +2625,7 @@ static int tpacket_snd(struct packet_soc
+ sll_addr)))
+ goto out;
+ proto = saddr->sll_protocol;
+- addr = saddr->sll_addr;
++ addr = saddr->sll_halen ? saddr->sll_addr : NULL;
+ dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
+ if (addr && dev && saddr->sll_halen < dev->addr_len)
+ goto out;
+@@ -2825,7 +2825,7 @@ static int packet_snd(struct socket *soc
+ if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
+ goto out;
+ proto = saddr->sll_protocol;
+- addr = saddr->sll_addr;
++ addr = saddr->sll_halen ? saddr->sll_addr : NULL;
+ dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
+ if (addr && dev && saddr->sll_halen < dev->addr_len)
+ goto out;
--- /dev/null
+From foo@baz Fri Jan 4 19:32:42 CET 2019
+From: Willem de Bruijn <willemb@google.com>
+Date: Fri, 21 Dec 2018 12:06:59 -0500
+Subject: packet: validate address length
+
+From: Willem de Bruijn <willemb@google.com>
+
+[ Upstream commit 99137b7888f4058087895d035d81c6b2d31015c5 ]
+
+Packet sockets with SOCK_DGRAM may pass an address for use in
+dev_hard_header. Ensure that it is of sufficient length.
+
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Willem de Bruijn <willemb@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/packet/af_packet.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -2627,6 +2627,8 @@ static int tpacket_snd(struct packet_soc
+ proto = saddr->sll_protocol;
+ addr = saddr->sll_addr;
+ dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
++ if (addr && dev && saddr->sll_halen < dev->addr_len)
++ goto out;
+ }
+
+ err = -ENXIO;
+@@ -2825,6 +2827,8 @@ static int packet_snd(struct socket *soc
+ proto = saddr->sll_protocol;
+ addr = saddr->sll_addr;
+ dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
++ if (addr && dev && saddr->sll_halen < dev->addr_len)
++ goto out;
+ }
+
+ err = -ENXIO;
--- /dev/null
+From foo@baz Fri Jan 4 19:32:42 CET 2019
+From: "Gustavo A. R. Silva" <gustavo@embeddedor.com>
+Date: Fri, 21 Dec 2018 15:41:17 -0600
+Subject: phonet: af_phonet: Fix Spectre v1 vulnerability
+
+From: "Gustavo A. R. Silva" <gustavo@embeddedor.com>
+
+[ Upstream commit d686026b1e6ed4ea27d630d8f54f9a694db088b2 ]
+
+protocol is indirectly controlled by user-space, hence leading to
+a potential exploitation of the Spectre variant 1 vulnerability.
+
+This issue was detected with the help of Smatch:
+
+net/phonet/af_phonet.c:48 phonet_proto_get() warn: potential spectre issue 'proto_tab' [w] (local cap)
+
+Fix this by sanitizing protocol before using it to index proto_tab.
+
+Notice that given that speculation windows are large, the policy is
+to kill the speculation on the first load and not worry if it can be
+completed with a dependent load/store [1].
+
+[1] https://marc.info/?l=linux-kernel&m=152449131114778&w=2
+
+Signed-off-by: Gustavo A. R. Silva <gustavo@embeddedor.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/phonet/af_phonet.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/net/phonet/af_phonet.c
++++ b/net/phonet/af_phonet.c
+@@ -34,6 +34,8 @@
+ #include <net/phonet/phonet.h>
+ #include <net/phonet/pn_dev.h>
+
++#include <linux/nospec.h>
++
+ /* Transport protocol registration */
+ static const struct phonet_protocol *proto_tab[PHONET_NPROTO] __read_mostly;
+
+@@ -43,6 +45,7 @@ static const struct phonet_protocol *pho
+
+ if (protocol >= PHONET_NPROTO)
+ return NULL;
++ protocol = array_index_nospec(protocol, PHONET_NPROTO);
+
+ rcu_read_lock();
+ pp = rcu_dereference(proto_tab[protocol]);
--- /dev/null
+From foo@baz Fri Jan 4 19:32:42 CET 2019
+From: Cong Wang <xiyou.wangcong@gmail.com>
+Date: Sun, 30 Dec 2018 12:43:42 -0800
+Subject: ptr_ring: wrap back ->producer in __ptr_ring_swap_queue()
+
+From: Cong Wang <xiyou.wangcong@gmail.com>
+
+[ Upstream commit aff6db454599d62191aabc208930e891748e4322 ]
+
+__ptr_ring_swap_queue() tries to move pointers from the old
+ring to the new one, but it forgets to check if ->producer
+is beyond the new size at the end of the operation. This leads
+to an out-of-bound access in __ptr_ring_produce() as reported
+by syzbot.
+
+Reported-by: syzbot+8993c0fa96d57c399735@syzkaller.appspotmail.com
+Fixes: 5d49de532002 ("ptr_ring: resize support")
+Cc: "Michael S. Tsirkin" <mst@redhat.com>
+Cc: John Fastabend <john.fastabend@gmail.com>
+Cc: Jason Wang <jasowang@redhat.com>
+Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
+Acked-by: Michael S. Tsirkin <mst@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/ptr_ring.h | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/include/linux/ptr_ring.h
++++ b/include/linux/ptr_ring.h
+@@ -573,6 +573,8 @@ static inline void **__ptr_ring_swap_que
+ else if (destroy)
+ destroy(ptr);
+
++ if (producer >= size)
++ producer = 0;
+ __ptr_ring_set_size(r, size);
+ r->producer = producer;
+ r->consumer_head = 0;
--- /dev/null
+From foo@baz Fri Jan 4 19:32:42 CET 2019
+From: Sudarsana Reddy Kalluru <sudarsana.kalluru@cavium.com>
+Date: Sun, 9 Dec 2018 23:27:01 -0800
+Subject: qed: Fix command number mismatch between driver and the mfw
+
+From: Sudarsana Reddy Kalluru <sudarsana.kalluru@cavium.com>
+
+[ Upstream commit c3db8d531045774aeee6e0f731ab15b0c450de45 ]
+
+The value for OEM_CFG_UPDATE command differs between driver and the
+Management firmware (mfw). Fix this gap with adding a reserved field.
+
+Fixes: cac6f691546b ("qed: Add support for Unified Fabric Port.")
+Signed-off-by: Sudarsana Reddy Kalluru <Sudarsana.Kalluru@cavium.com>
+Signed-off-by: Ariel Elior <ariel.elior@cavium.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/qlogic/qed/qed_hsi.h | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
++++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+@@ -12669,8 +12669,9 @@ enum MFW_DRV_MSG_TYPE {
+ MFW_DRV_MSG_BW_UPDATE10,
+ MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE,
+ MFW_DRV_MSG_BW_UPDATE11,
+- MFW_DRV_MSG_OEM_CFG_UPDATE,
++ MFW_DRV_MSG_RESERVED,
+ MFW_DRV_MSG_GET_TLV_REQ,
++ MFW_DRV_MSG_OEM_CFG_UPDATE,
+ MFW_DRV_MSG_MAX
+ };
+
--- /dev/null
+From foo@baz Fri Jan 4 19:32:42 CET 2019
+From: "Jörgen Storvist" <jorgen.storvist@gmail.com>
+Date: Fri, 21 Dec 2018 15:38:52 +0100
+Subject: qmi_wwan: Add support for Fibocom NL678 series
+
+From: "Jörgen Storvist" <jorgen.storvist@gmail.com>
+
+[ Upstream commit 7c3db4105ce8d69bcb5c04bfa9acd1e9119af8d5 ]
+
+Added support for Fibocom NL678 series cellular module QMI interface.
+Using QMI_QUIRK_SET_DTR required for Qualcomm MDM9x40 series chipsets.
+
+Signed-off-by: Jörgen Storvist <jorgen.storvist@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/usb/qmi_wwan.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1265,6 +1265,7 @@ static const struct usb_device_id produc
+ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */
+ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */
+ {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */
++ {QMI_QUIRK_SET_DTR(0x2cb7, 0x0104, 4)}, /* Fibocom NL678 series */
+
+ /* 4. Gobi 1000 devices */
+ {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
--- /dev/null
+From foo@baz Fri Jan 4 19:32:42 CET 2019
+From: "Jörgen Storvist" <jorgen.storvist@gmail.com>
+Date: Wed, 12 Dec 2018 22:45:34 +0100
+Subject: qmi_wwan: Added support for Fibocom NL668 series
+
+From: "Jörgen Storvist" <jorgen.storvist@gmail.com>
+
+[ Upstream commit 110a1cc28bc383adb4885eff27e18c61ddebffb4 ]
+
+Added support for Fibocom NL668 series QMI interface.
+Using QMI_QUIRK_SET_DTR required for Qualcomm MDM9x07 chipsets.
+
+Signed-off-by: Jörgen Storvist <jorgen.storvist@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/usb/qmi_wwan.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1117,6 +1117,7 @@ static const struct usb_device_id produc
+ {QMI_FIXED_INTF(0x1435, 0xd181, 4)}, /* Wistron NeWeb D18Q1 */
+ {QMI_FIXED_INTF(0x1435, 0xd181, 5)}, /* Wistron NeWeb D18Q1 */
+ {QMI_FIXED_INTF(0x1435, 0xd191, 4)}, /* Wistron NeWeb D19Q1 */
++ {QMI_QUIRK_SET_DTR(0x1508, 0x1001, 4)}, /* Fibocom NL668 series */
+ {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */
+ {QMI_FIXED_INTF(0x16d8, 0x6007, 0)}, /* CMOTech CHE-628S */
+ {QMI_FIXED_INTF(0x16d8, 0x6008, 0)}, /* CMOTech CMU-301 */
--- /dev/null
+From foo@baz Fri Jan 4 19:32:42 CET 2019
+From: "Jörgen Storvist" <jorgen.storvist@gmail.com>
+Date: Thu, 13 Dec 2018 17:00:35 +0100
+Subject: qmi_wwan: Added support for Telit LN940 series
+
+From: "Jörgen Storvist" <jorgen.storvist@gmail.com>
+
+[ Upstream commit 1986af16e8ed355822600c24b3d2f0be46b573df ]
+
+Added support for the Telit LN940 series cellular modules QMI interface.
+QMI_QUIRK_SET_DTR quirk requied for Qualcomm MDM9x40 chipset.
+
+Signed-off-by: Jörgen Storvist <jorgen.storvist@gmail.com>
+Acked-by: Bjørn Mork <bjorn@mork.no>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/usb/qmi_wwan.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1230,6 +1230,7 @@ static const struct usb_device_id produc
+ {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */
+ {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */
++ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1900, 1)}, /* Telit LN940 series */
+ {QMI_FIXED_INTF(0x1c9e, 0x9801, 3)}, /* Telewell TW-3G HSPA+ */
+ {QMI_FIXED_INTF(0x1c9e, 0x9803, 4)}, /* Telewell TW-3G HSPA+ */
+ {QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */
--- /dev/null
+From foo@baz Fri Jan 4 19:32:42 CET 2019
+From: Heiner Kallweit <hkallweit1@gmail.com>
+Date: Sun, 30 Dec 2018 13:16:12 +0100
+Subject: r8169: fix WoL device wakeup enable
+
+From: Heiner Kallweit <hkallweit1@gmail.com>
+
+[ Upstream commit 3bd8264511035dc97c902f03fa9f1d07f95f8f62 ]
+
+In rtl8169_runtime_resume() we configure WoL but don't set the device
+to wakeup-enabled. This prevents PME generation once the cable is
+re-plugged. Fix this by moving the call to device_set_wakeup_enable()
+to __rtl8169_set_wol().
+
+Fixes: 433f9d0ddcc6 ("r8169: improve saved_wolopts handling")
+Signed-off-by: Heiner Kallweit <hkallweit1@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/realtek/r8169.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -1528,6 +1528,8 @@ static void __rtl8169_set_wol(struct rtl
+ }
+
+ RTL_W8(tp, Cfg9346, Cfg9346_Lock);
++
++ device_set_wakeup_enable(tp_to_dev(tp), wolopts);
+ }
+
+ static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+@@ -1549,8 +1551,6 @@ static int rtl8169_set_wol(struct net_de
+
+ rtl_unlock_work(tp);
+
+- device_set_wakeup_enable(d, tp->saved_wolopts);
+-
+ pm_runtime_put_noidle(d);
+
+ return 0;
--- /dev/null
+From foo@baz Fri Jan 4 19:32:42 CET 2019
+From: Xin Long <lucien.xin@gmail.com>
+Date: Mon, 10 Dec 2018 18:00:52 +0800
+Subject: sctp: initialize sin6_flowinfo for ipv6 addrs in sctp_inet6addr_event
+
+From: Xin Long <lucien.xin@gmail.com>
+
+[ Upstream commit 4a2eb0c37b4759416996fbb4c45b932500cf06d3 ]
+
+syzbot reported a kernel-infoleak, which is caused by an uninitialized
+field(sin6_flowinfo) of addr->a.v6 in sctp_inet6addr_event().
+The call trace is as below:
+
+ BUG: KMSAN: kernel-infoleak in _copy_to_user+0x19a/0x230 lib/usercopy.c:33
+ CPU: 1 PID: 8164 Comm: syz-executor2 Not tainted 4.20.0-rc3+ #95
+ Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS
+ Google 01/01/2011
+ Call Trace:
+ __dump_stack lib/dump_stack.c:77 [inline]
+ dump_stack+0x32d/0x480 lib/dump_stack.c:113
+ kmsan_report+0x12c/0x290 mm/kmsan/kmsan.c:683
+ kmsan_internal_check_memory+0x32a/0xa50 mm/kmsan/kmsan.c:743
+ kmsan_copy_to_user+0x78/0xd0 mm/kmsan/kmsan_hooks.c:634
+ _copy_to_user+0x19a/0x230 lib/usercopy.c:33
+ copy_to_user include/linux/uaccess.h:183 [inline]
+ sctp_getsockopt_local_addrs net/sctp/socket.c:5998 [inline]
+ sctp_getsockopt+0x15248/0x186f0 net/sctp/socket.c:7477
+ sock_common_getsockopt+0x13f/0x180 net/core/sock.c:2937
+ __sys_getsockopt+0x489/0x550 net/socket.c:1939
+ __do_sys_getsockopt net/socket.c:1950 [inline]
+ __se_sys_getsockopt+0xe1/0x100 net/socket.c:1947
+ __x64_sys_getsockopt+0x62/0x80 net/socket.c:1947
+ do_syscall_64+0xcf/0x110 arch/x86/entry/common.c:291
+ entry_SYSCALL_64_after_hwframe+0x63/0xe7
+
+sin6_flowinfo is not really used by SCTP, so it will be fixed by simply
+setting it to 0.
+
+The issue exists since very beginning.
+Thanks Alexander for the reproducer provided.
+
+Reported-by: syzbot+ad5d327e6936a2e284be@syzkaller.appspotmail.com
+Signed-off-by: Xin Long <lucien.xin@gmail.com>
+Acked-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
+Acked-by: Neil Horman <nhorman@tuxdriver.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sctp/ipv6.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/net/sctp/ipv6.c
++++ b/net/sctp/ipv6.c
+@@ -101,6 +101,7 @@ static int sctp_inet6addr_event(struct n
+ if (addr) {
+ addr->a.v6.sin6_family = AF_INET6;
+ addr->a.v6.sin6_port = 0;
++ addr->a.v6.sin6_flowinfo = 0;
+ addr->a.v6.sin6_addr = ifa->addr;
+ addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex;
+ addr->valid = 1;
--- /dev/null
+ax25-fix-a-use-after-free-in-ax25_fillin_cb.patch
+gro_cell-add-napi_disable-in-gro_cells_destroy.patch
+ibmveth-fix-dma-unmap-error-in-ibmveth_xmit_start-error-path.patch
+ieee802154-lowpan_header_create-check-must-check-daddr.patch
+ip6mr-fix-potential-spectre-v1-vulnerability.patch
+ipv4-fix-potential-spectre-v1-vulnerability.patch
+ipv6-explicitly-initialize-udp6_addr-in-udp_sock_create6.patch
+ipv6-tunnels-fix-two-use-after-free.patch
+ip-validate-header-length-on-virtual-device-xmit.patch
+isdn-fix-kernel-infoleak-in-capi_unlocked_ioctl.patch
+net-clear-skb-tstamp-in-forwarding-paths.patch
+net-core-fix-spectre-v1-vulnerability.patch
+net-hamradio-6pack-use-mod_timer-to-rearm-timers.patch
+net-ipv4-do-not-handle-duplicate-fragments-as-overlapping.patch
+net-macb-restart-tx-after-tx-used-bit-read.patch
+net-mvpp2-10g-modes-aren-t-supported-on-all-ports.patch
+net-phy-fix-the-issue-that-netif-always-links-up-after-resuming.patch
+netrom-fix-locking-in-nr_find_socket.patch
+net-smc-fix-tcp-fallback-socket-release.patch
+net-stmmac-fix-an-error-code-in-probe.patch
+net-tls-allocate-tls-context-using-gfp_atomic.patch
+net-wan-fix-a-double-free-in-x25_asy_open_tty.patch
+packet-validate-address-length.patch
+packet-validate-address-length-if-non-zero.patch
+phonet-af_phonet-fix-spectre-v1-vulnerability.patch
+ptr_ring-wrap-back-producer-in-__ptr_ring_swap_queue.patch
+qmi_wwan-added-support-for-fibocom-nl668-series.patch
+qmi_wwan-added-support-for-telit-ln940-series.patch
+qmi_wwan-add-support-for-fibocom-nl678-series.patch
+sctp-initialize-sin6_flowinfo-for-ipv6-addrs-in-sctp_inet6addr_event.patch
+sock-make-sock-sk_stamp-thread-safe.patch
+tcp-fix-a-race-in-inet_diag_dump_icsk.patch
+tipc-check-tsk-group-in-tipc_wait_for_cond.patch
+tipc-compare-remote-and-local-protocols-in-tipc_udp_enable.patch
+tipc-fix-a-double-free-in-tipc_enable_bearer.patch
+tipc-fix-a-double-kfree_skb.patch
+tipc-use-lock_sock-in-tipc_sk_reinit.patch
+vhost-make-sure-used-idx-is-seen-before-log-in-vhost_add_used_n.patch
+vsock-send-reset-control-packet-when-socket-is-partially-bound.patch
+xen-netfront-tolerate-frags-with-no-data.patch
+net-mlx5-typo-fix-in-del_sw_hw_rule.patch
+tipc-check-group-dests-after-tipc_wait_for_cond.patch
+net-mlx5e-remove-the-false-indication-of-software-timestamping-support.patch
+ipv6-frags-fix-bogus-skb-sk-in-reassembled-packets.patch
+net-ipv6-fix-a-test-against-ipv6_find_idev-return-value.patch
+nfp-flower-ensure-tcp-flags-can-be-placed-in-ipv6-frame.patch
+ipv6-route-fix-return-value-of-ip6_neigh_lookup-on-neigh_create-error.patch
+mscc-configured-mac-entries-should-be-locked.patch
+net-mlx5e-cancel-dim-work-on-close-sq.patch
+net-mlx5e-rx-verify-mpwqe-stride-size-is-in-range.patch
+net-mvpp2-fix-the-phylink-mode-validation.patch
+qed-fix-command-number-mismatch-between-driver-and-the-mfw.patch
+mlxsw-core-increase-timeout-during-firmware-flash-process.patch
+net-mlx5e-remove-unused-udp-gso-remaining-counter.patch
+net-mlx5e-rx-fix-wrong-early-return-in-receive-queue-poll.patch
+net-mvneta-fix-operation-for-64k-page_size.patch
+net-use-__kernel_clockid_t-in-uapi-net_stamp.h.patch
+r8169-fix-wol-device-wakeup-enable.patch
--- /dev/null
+From foo@baz Fri Jan 4 19:32:42 CET 2019
+From: Deepa Dinamani <deepa.kernel@gmail.com>
+Date: Thu, 27 Dec 2018 18:55:09 -0800
+Subject: sock: Make sock->sk_stamp thread-safe
+
+From: Deepa Dinamani <deepa.kernel@gmail.com>
+
+[ Upstream commit 3a0ed3e9619738067214871e9cb826fa23b2ddb9 ]
+
+Al Viro mentioned (Message-ID
+<20170626041334.GZ10672@ZenIV.linux.org.uk>)
+that there is probably a race condition
+lurking in accesses of sk_stamp on 32-bit machines.
+
+sock->sk_stamp is of type ktime_t which is always an s64.
+On a 32 bit architecture, we might run into situations of
+unsafe access as the access to the field becomes non atomic.
+
+Use seqlocks for synchronization.
+This allows us to avoid using spinlocks for readers as
+readers do not need mutual exclusion.
+
+Another approach to solve this is to require sk_lock for all
+modifications of the timestamps. The current approach allows
+for timestamps to have their own lock: sk_stamp_lock.
+This allows for the patch to not compete with already
+existing critical sections, and side effects are limited
+to the paths in the patch.
+
+The addition of the new field maintains the data locality
+optimizations from
+commit 9115e8cd2a0c ("net: reorganize struct sock for better data
+locality")
+
+Note that all the instances of the sk_stamp accesses
+are either through the ioctl or the syscall recvmsg.
+
+Signed-off-by: Deepa Dinamani <deepa.kernel@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/sock.h | 38 +++++++++++++++++++++++++++++++++++---
+ net/compat.c | 15 +++++++++------
+ net/core/sock.c | 15 ++++++++++-----
+ net/sunrpc/svcsock.c | 2 +-
+ 4 files changed, 55 insertions(+), 15 deletions(-)
+
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -298,6 +298,7 @@ struct sock_common {
+ * @sk_filter: socket filtering instructions
+ * @sk_timer: sock cleanup timer
+ * @sk_stamp: time stamp of last packet received
++ * @sk_stamp_seq: lock for accessing sk_stamp on 32 bit architectures only
+ * @sk_tsflags: SO_TIMESTAMPING socket options
+ * @sk_tskey: counter to disambiguate concurrent tstamp requests
+ * @sk_zckey: counter to order MSG_ZEROCOPY notifications
+@@ -474,6 +475,9 @@ struct sock {
+ const struct cred *sk_peer_cred;
+ long sk_rcvtimeo;
+ ktime_t sk_stamp;
++#if BITS_PER_LONG==32
++ seqlock_t sk_stamp_seq;
++#endif
+ u16 sk_tsflags;
+ u8 sk_shutdown;
+ u32 sk_tskey;
+@@ -2290,6 +2294,34 @@ static inline void sk_drops_add(struct s
+ atomic_add(segs, &sk->sk_drops);
+ }
+
++static inline ktime_t sock_read_timestamp(struct sock *sk)
++{
++#if BITS_PER_LONG==32
++ unsigned int seq;
++ ktime_t kt;
++
++ do {
++ seq = read_seqbegin(&sk->sk_stamp_seq);
++ kt = sk->sk_stamp;
++ } while (read_seqretry(&sk->sk_stamp_seq, seq));
++
++ return kt;
++#else
++ return sk->sk_stamp;
++#endif
++}
++
++static inline void sock_write_timestamp(struct sock *sk, ktime_t kt)
++{
++#if BITS_PER_LONG==32
++ write_seqlock(&sk->sk_stamp_seq);
++ sk->sk_stamp = kt;
++ write_sequnlock(&sk->sk_stamp_seq);
++#else
++ sk->sk_stamp = kt;
++#endif
++}
++
+ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
+ struct sk_buff *skb);
+ void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
+@@ -2314,7 +2346,7 @@ sock_recv_timestamp(struct msghdr *msg,
+ (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE)))
+ __sock_recv_timestamp(msg, sk, skb);
+ else
+- sk->sk_stamp = kt;
++ sock_write_timestamp(sk, kt);
+
+ if (sock_flag(sk, SOCK_WIFI_STATUS) && skb->wifi_acked_valid)
+ __sock_recv_wifi_status(msg, sk, skb);
+@@ -2335,9 +2367,9 @@ static inline void sock_recv_ts_and_drop
+ if (sk->sk_flags & FLAGS_TS_OR_DROPS || sk->sk_tsflags & TSFLAGS_ANY)
+ __sock_recv_ts_and_drops(msg, sk, skb);
+ else if (unlikely(sock_flag(sk, SOCK_TIMESTAMP)))
+- sk->sk_stamp = skb->tstamp;
++ sock_write_timestamp(sk, skb->tstamp);
+ else if (unlikely(sk->sk_stamp == SK_DEFAULT_STAMP))
+- sk->sk_stamp = 0;
++ sock_write_timestamp(sk, 0);
+ }
+
+ void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags);
+--- a/net/compat.c
++++ b/net/compat.c
+@@ -467,12 +467,14 @@ int compat_sock_get_timestamp(struct soc
+ ctv = (struct compat_timeval __user *) userstamp;
+ err = -ENOENT;
+ sock_enable_timestamp(sk, SOCK_TIMESTAMP);
+- tv = ktime_to_timeval(sk->sk_stamp);
++ tv = ktime_to_timeval(sock_read_timestamp(sk));
++
+ if (tv.tv_sec == -1)
+ return err;
+ if (tv.tv_sec == 0) {
+- sk->sk_stamp = ktime_get_real();
+- tv = ktime_to_timeval(sk->sk_stamp);
++ ktime_t kt = ktime_get_real();
++ sock_write_timestamp(sk, kt);
++ tv = ktime_to_timeval(kt);
+ }
+ err = 0;
+ if (put_user(tv.tv_sec, &ctv->tv_sec) ||
+@@ -494,12 +496,13 @@ int compat_sock_get_timestampns(struct s
+ ctv = (struct compat_timespec __user *) userstamp;
+ err = -ENOENT;
+ sock_enable_timestamp(sk, SOCK_TIMESTAMP);
+- ts = ktime_to_timespec(sk->sk_stamp);
++ ts = ktime_to_timespec(sock_read_timestamp(sk));
+ if (ts.tv_sec == -1)
+ return err;
+ if (ts.tv_sec == 0) {
+- sk->sk_stamp = ktime_get_real();
+- ts = ktime_to_timespec(sk->sk_stamp);
++ ktime_t kt = ktime_get_real();
++ sock_write_timestamp(sk, kt);
++ ts = ktime_to_timespec(kt);
+ }
+ err = 0;
+ if (put_user(ts.tv_sec, &ctv->tv_sec) ||
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -2803,6 +2803,9 @@ void sock_init_data(struct socket *sock,
+ sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
+
+ sk->sk_stamp = SK_DEFAULT_STAMP;
++#if BITS_PER_LONG==32
++ seqlock_init(&sk->sk_stamp_seq);
++#endif
+ atomic_set(&sk->sk_zckey, 0);
+
+ #ifdef CONFIG_NET_RX_BUSY_POLL
+@@ -2902,12 +2905,13 @@ int sock_get_timestamp(struct sock *sk,
+ struct timeval tv;
+
+ sock_enable_timestamp(sk, SOCK_TIMESTAMP);
+- tv = ktime_to_timeval(sk->sk_stamp);
++ tv = ktime_to_timeval(sock_read_timestamp(sk));
+ if (tv.tv_sec == -1)
+ return -ENOENT;
+ if (tv.tv_sec == 0) {
+- sk->sk_stamp = ktime_get_real();
+- tv = ktime_to_timeval(sk->sk_stamp);
++ ktime_t kt = ktime_get_real();
++ sock_write_timestamp(sk, kt);
++ tv = ktime_to_timeval(kt);
+ }
+ return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
+ }
+@@ -2918,11 +2922,12 @@ int sock_get_timestampns(struct sock *sk
+ struct timespec ts;
+
+ sock_enable_timestamp(sk, SOCK_TIMESTAMP);
+- ts = ktime_to_timespec(sk->sk_stamp);
++ ts = ktime_to_timespec(sock_read_timestamp(sk));
+ if (ts.tv_sec == -1)
+ return -ENOENT;
+ if (ts.tv_sec == 0) {
+- sk->sk_stamp = ktime_get_real();
++ ktime_t kt = ktime_get_real();
++ sock_write_timestamp(sk, kt);
+ ts = ktime_to_timespec(sk->sk_stamp);
+ }
+ return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
+--- a/net/sunrpc/svcsock.c
++++ b/net/sunrpc/svcsock.c
+@@ -574,7 +574,7 @@ static int svc_udp_recvfrom(struct svc_r
+ /* Don't enable netstamp, sunrpc doesn't
+ need that much accuracy */
+ }
+- svsk->sk_sk->sk_stamp = skb->tstamp;
++ sock_write_timestamp(svsk->sk_sk, skb->tstamp);
+ set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); /* there may be more data... */
+
+ len = skb->len;
--- /dev/null
+From foo@baz Fri Jan 4 19:32:42 CET 2019
+From: Eric Dumazet <edumazet@google.com>
+Date: Thu, 20 Dec 2018 15:28:56 -0800
+Subject: tcp: fix a race in inet_diag_dump_icsk()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit f0c928d878e7d01b613c9ae5c971a6b1e473a938 ]
+
+Alexei reported use after frees in inet_diag_dump_icsk() [1]
+
+Because we use refcount_set() when various sockets are setup and
+inserted into ehash, we also need to make sure inet_diag_dump_icsk()
+wont race with the refcount_set() operations.
+
+Jonathan Lemon sent a patch changing net_twsk_hashdance() but
+other spots would need risky changes.
+
+Instead, fix inet_diag_dump_icsk() as this bug came with
+linux-4.10 only.
+
+[1] Quoting Alexei :
+
+First something iterating over sockets finds already freed tw socket:
+
+refcount_t: increment on 0; use-after-free.
+WARNING: CPU: 2 PID: 2738 at lib/refcount.c:153 refcount_inc+0x26/0x30
+RIP: 0010:refcount_inc+0x26/0x30
+RSP: 0018:ffffc90004c8fbc0 EFLAGS: 00010282
+RAX: 000000000000002b RBX: 0000000000000000 RCX: 0000000000000000
+RDX: ffff88085ee9d680 RSI: ffff88085ee954c8 RDI: ffff88085ee954c8
+RBP: ffff88010ecbd2c0 R08: 0000000000000000 R09: 000000000000174c
+R10: ffffffff81e7c5a0 R11: 0000000000000000 R12: 0000000000000000
+R13: ffff8806ba9bf210 R14: ffffffff82304600 R15: ffff88010ecbd328
+FS: 00007f81f5a7d700(0000) GS:ffff88085ee80000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 00007f81e2a95000 CR3: 000000069b2eb006 CR4: 00000000003606e0
+DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+Call Trace:
+ inet_diag_dump_icsk+0x2b3/0x4e0 [inet_diag] // sock_hold(sk); in net/ipv4/inet_diag.c:1002
+ ? kmalloc_large_node+0x37/0x70
+ ? __kmalloc_node_track_caller+0x1cb/0x260
+ ? __alloc_skb+0x72/0x1b0
+ ? __kmalloc_reserve.isra.40+0x2e/0x80
+ __inet_diag_dump+0x3b/0x80 [inet_diag]
+ netlink_dump+0x116/0x2a0
+ netlink_recvmsg+0x205/0x3c0
+ sock_read_iter+0x89/0xd0
+ __vfs_read+0xf7/0x140
+ vfs_read+0x8a/0x140
+ SyS_read+0x3f/0xa0
+ do_syscall_64+0x5a/0x100
+
+then a minute later twsk timer fires and hits two bad refcnts
+for this freed socket:
+
+refcount_t: decrement hit 0; leaking memory.
+WARNING: CPU: 31 PID: 0 at lib/refcount.c:228 refcount_dec+0x2e/0x40
+Modules linked in:
+RIP: 0010:refcount_dec+0x2e/0x40
+RSP: 0018:ffff88085f5c3ea8 EFLAGS: 00010296
+RAX: 000000000000002c RBX: ffff88010ecbd2c0 RCX: 000000000000083f
+RDX: 0000000000000000 RSI: 00000000000000f6 RDI: 000000000000003f
+RBP: ffffc90003c77280 R08: 0000000000000000 R09: 00000000000017d3
+R10: ffffffff81e7c5a0 R11: 0000000000000000 R12: ffffffff82ad2d80
+R13: ffffffff8182de00 R14: ffff88085f5c3ef8 R15: 0000000000000000
+FS: 0000000000000000(0000) GS:ffff88085f5c0000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 00007fbe42685250 CR3: 0000000002209001 CR4: 00000000003606e0
+DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+Call Trace:
+ <IRQ>
+ inet_twsk_kill+0x9d/0xc0 // inet_twsk_bind_unhash(tw, hashinfo);
+ call_timer_fn+0x29/0x110
+ run_timer_softirq+0x36b/0x3a0
+
+refcount_t: underflow; use-after-free.
+WARNING: CPU: 31 PID: 0 at lib/refcount.c:187 refcount_sub_and_test+0x46/0x50
+RIP: 0010:refcount_sub_and_test+0x46/0x50
+RSP: 0018:ffff88085f5c3eb8 EFLAGS: 00010296
+RAX: 0000000000000026 RBX: ffff88010ecbd2c0 RCX: 000000000000083f
+RDX: 0000000000000000 RSI: 00000000000000f6 RDI: 000000000000003f
+RBP: ffff88010ecbd358 R08: 0000000000000000 R09: 000000000000185b
+R10: ffffffff81e7c5a0 R11: 0000000000000000 R12: ffff88010ecbd358
+R13: ffffffff8182de00 R14: ffff88085f5c3ef8 R15: 0000000000000000
+FS: 0000000000000000(0000) GS:ffff88085f5c0000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 00007fbe42685250 CR3: 0000000002209001 CR4: 00000000003606e0
+DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+Call Trace:
+ <IRQ>
+ inet_twsk_put+0x12/0x20 // inet_twsk_put(tw);
+ call_timer_fn+0x29/0x110
+ run_timer_softirq+0x36b/0x3a0
+
+Fixes: 67db3e4bfbc9 ("tcp: no longer hold ehash lock while calling tcp_get_info()")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: Alexei Starovoitov <ast@kernel.org>
+Cc: Jonathan Lemon <jonathan.lemon@gmail.com>
+Acked-by: Jonathan Lemon <jonathan.lemon@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/inet_diag.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/net/ipv4/inet_diag.c
++++ b/net/ipv4/inet_diag.c
+@@ -998,7 +998,9 @@ next_chunk:
+ if (!inet_diag_bc_sk(bc, sk))
+ goto next_normal;
+
+- sock_hold(sk);
++ if (!refcount_inc_not_zero(&sk->sk_refcnt))
++ goto next_normal;
++
+ num_arr[accum] = num;
+ sk_arr[accum] = sk;
+ if (++accum == SKARR_SZ)
--- /dev/null
+From foo@baz Fri Jan 4 19:32:42 CET 2019
+From: Cong Wang <xiyou.wangcong@gmail.com>
+Date: Sun, 16 Dec 2018 23:25:12 -0800
+Subject: tipc: check group dests after tipc_wait_for_cond()
+
+From: Cong Wang <xiyou.wangcong@gmail.com>
+
+[ Upstream commit 3c6306d44082ef007a258ae1b86ea58e6974ee3f ]
+
+Similar to commit 143ece654f9f ("tipc: check tsk->group in tipc_wait_for_cond()")
+we have to reload grp->dests too after we re-take the sock lock.
+This means we need to move the dsts check after tipc_wait_for_cond()
+too.
+
+Fixes: 75da2163dbb6 ("tipc: introduce communication groups")
+Reported-and-tested-by: syzbot+99f20222fc5018d2b97a@syzkaller.appspotmail.com
+Cc: Ying Xue <ying.xue@windriver.com>
+Cc: Jon Maloy <jon.maloy@ericsson.com>
+Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
+Acked-by: Jon Maloy <jon.maloy@ericsson.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/tipc/socket.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+--- a/net/tipc/socket.c
++++ b/net/tipc/socket.c
+@@ -1007,7 +1007,7 @@ static int tipc_send_group_bcast(struct
+ struct sock *sk = sock->sk;
+ struct net *net = sock_net(sk);
+ struct tipc_sock *tsk = tipc_sk(sk);
+- struct tipc_nlist *dsts = tipc_group_dests(tsk->group);
++ struct tipc_nlist *dsts;
+ struct tipc_mc_method *method = &tsk->mc_method;
+ bool ack = method->mandatory && method->rcast;
+ int blks = tsk_blocks(MCAST_H_SIZE + dlen);
+@@ -1016,9 +1016,6 @@ static int tipc_send_group_bcast(struct
+ struct sk_buff_head pkts;
+ int rc = -EHOSTUNREACH;
+
+- if (!dsts->local && !dsts->remote)
+- return -EHOSTUNREACH;
+-
+ /* Block or return if any destination link or member is congested */
+ rc = tipc_wait_for_cond(sock, &timeout,
+ !tsk->cong_link_cnt && tsk->group &&
+@@ -1026,6 +1023,10 @@ static int tipc_send_group_bcast(struct
+ if (unlikely(rc))
+ return rc;
+
++ dsts = tipc_group_dests(tsk->group);
++ if (!dsts->local && !dsts->remote)
++ return -EHOSTUNREACH;
++
+ /* Complete message header */
+ if (dest) {
+ msg_set_type(hdr, TIPC_GRP_MCAST_MSG);
--- /dev/null
+From foo@baz Fri Jan 4 19:32:42 CET 2019
+From: Cong Wang <xiyou.wangcong@gmail.com>
+Date: Tue, 11 Dec 2018 21:43:51 -0800
+Subject: tipc: check tsk->group in tipc_wait_for_cond()
+
+From: Cong Wang <xiyou.wangcong@gmail.com>
+
+[ Upstream commit 143ece654f9f5b37bedea252a990be37e48ae3a5 ]
+
+tipc_wait_for_cond() drops socket lock before going to sleep,
+but tsk->group could be freed right after that release_sock().
+So we have to re-check and reload tsk->group after it wakes up.
+
+After this patch, tipc_wait_for_cond() returns -ERESTARTSYS when
+tsk->group is NULL, instead of continuing with the assumption of
+a non-NULL tsk->group.
+
+(It looks like 'dsts' should be re-checked and reloaded too, but
+it is a different bug.)
+
+Similar for tipc_send_group_unicast() and tipc_send_group_anycast().
+
+Reported-by: syzbot+10a9db47c3a0e13eb31c@syzkaller.appspotmail.com
+Fixes: b7d42635517f ("tipc: introduce flow control for group broadcast messages")
+Fixes: ee106d7f942d ("tipc: introduce group anycast messaging")
+Fixes: 27bd9ec027f3 ("tipc: introduce group unicast messaging")
+Cc: Ying Xue <ying.xue@windriver.com>
+Cc: Jon Maloy <jon.maloy@ericsson.com>
+Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
+Acked-by: Ying Xue <ying.xue@windriver.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/tipc/socket.c | 25 ++++++++++++++-----------
+ 1 file changed, 14 insertions(+), 11 deletions(-)
+
+--- a/net/tipc/socket.c
++++ b/net/tipc/socket.c
+@@ -878,7 +878,6 @@ static int tipc_send_group_unicast(struc
+ DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
+ int blks = tsk_blocks(GROUP_H_SIZE + dlen);
+ struct tipc_sock *tsk = tipc_sk(sk);
+- struct tipc_group *grp = tsk->group;
+ struct net *net = sock_net(sk);
+ struct tipc_member *mb = NULL;
+ u32 node, port;
+@@ -892,7 +891,9 @@ static int tipc_send_group_unicast(struc
+ /* Block or return if destination link or member is congested */
+ rc = tipc_wait_for_cond(sock, &timeout,
+ !tipc_dest_find(&tsk->cong_links, node, 0) &&
+- !tipc_group_cong(grp, node, port, blks, &mb));
++ tsk->group &&
++ !tipc_group_cong(tsk->group, node, port, blks,
++ &mb));
+ if (unlikely(rc))
+ return rc;
+
+@@ -922,7 +923,6 @@ static int tipc_send_group_anycast(struc
+ struct tipc_sock *tsk = tipc_sk(sk);
+ struct list_head *cong_links = &tsk->cong_links;
+ int blks = tsk_blocks(GROUP_H_SIZE + dlen);
+- struct tipc_group *grp = tsk->group;
+ struct tipc_msg *hdr = &tsk->phdr;
+ struct tipc_member *first = NULL;
+ struct tipc_member *mbr = NULL;
+@@ -939,9 +939,10 @@ static int tipc_send_group_anycast(struc
+ type = msg_nametype(hdr);
+ inst = dest->addr.name.name.instance;
+ scope = msg_lookup_scope(hdr);
+- exclude = tipc_group_exclude(grp);
+
+ while (++lookups < 4) {
++ exclude = tipc_group_exclude(tsk->group);
++
+ first = NULL;
+
+ /* Look for a non-congested destination member, if any */
+@@ -950,7 +951,8 @@ static int tipc_send_group_anycast(struc
+ &dstcnt, exclude, false))
+ return -EHOSTUNREACH;
+ tipc_dest_pop(&dsts, &node, &port);
+- cong = tipc_group_cong(grp, node, port, blks, &mbr);
++ cong = tipc_group_cong(tsk->group, node, port, blks,
++ &mbr);
+ if (!cong)
+ break;
+ if (mbr == first)
+@@ -969,7 +971,8 @@ static int tipc_send_group_anycast(struc
+ /* Block or return if destination link or member is congested */
+ rc = tipc_wait_for_cond(sock, &timeout,
+ !tipc_dest_find(cong_links, node, 0) &&
+- !tipc_group_cong(grp, node, port,
++ tsk->group &&
++ !tipc_group_cong(tsk->group, node, port,
+ blks, &mbr));
+ if (unlikely(rc))
+ return rc;
+@@ -1004,8 +1007,7 @@ static int tipc_send_group_bcast(struct
+ struct sock *sk = sock->sk;
+ struct net *net = sock_net(sk);
+ struct tipc_sock *tsk = tipc_sk(sk);
+- struct tipc_group *grp = tsk->group;
+- struct tipc_nlist *dsts = tipc_group_dests(grp);
++ struct tipc_nlist *dsts = tipc_group_dests(tsk->group);
+ struct tipc_mc_method *method = &tsk->mc_method;
+ bool ack = method->mandatory && method->rcast;
+ int blks = tsk_blocks(MCAST_H_SIZE + dlen);
+@@ -1018,8 +1020,9 @@ static int tipc_send_group_bcast(struct
+ return -EHOSTUNREACH;
+
+ /* Block or return if any destination link or member is congested */
+- rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt &&
+- !tipc_group_bc_cong(grp, blks));
++ rc = tipc_wait_for_cond(sock, &timeout,
++ !tsk->cong_link_cnt && tsk->group &&
++ !tipc_group_bc_cong(tsk->group, blks));
+ if (unlikely(rc))
+ return rc;
+
+@@ -1034,7 +1037,7 @@ static int tipc_send_group_bcast(struct
+ msg_set_hdr_sz(hdr, GROUP_H_SIZE);
+ msg_set_destport(hdr, 0);
+ msg_set_destnode(hdr, 0);
+- msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(grp));
++ msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(tsk->group));
+
+ /* Avoid getting stuck with repeated forced replicasts */
+ msg_set_grp_bc_ack_req(hdr, ack);
--- /dev/null
+From foo@baz Fri Jan 4 19:32:42 CET 2019
+From: Cong Wang <xiyou.wangcong@gmail.com>
+Date: Mon, 10 Dec 2018 15:23:30 -0800
+Subject: tipc: compare remote and local protocols in tipc_udp_enable()
+
+From: Cong Wang <xiyou.wangcong@gmail.com>
+
+[ Upstream commit fb83ed496b9a654f60cd1d58a0e1e79ec5694808 ]
+
+When TIPC_NLA_UDP_REMOTE is an IPv6 mcast address but
+TIPC_NLA_UDP_LOCAL is an IPv4 address, a NULL-ptr deref is triggered
+as the UDP tunnel sock is initialized to IPv4 or IPv6 sock merely
+based on the protocol in local address.
+
+We should just error out when the remote address and local address
+have different protocols.
+
+Reported-by: syzbot+eb4da3a20fad2e52555d@syzkaller.appspotmail.com
+Cc: Ying Xue <ying.xue@windriver.com>
+Cc: Jon Maloy <jon.maloy@ericsson.com>
+Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
+Acked-by: Jon Maloy <jon.maloy@ericsson.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/tipc/udp_media.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/net/tipc/udp_media.c
++++ b/net/tipc/udp_media.c
+@@ -680,6 +680,11 @@ static int tipc_udp_enable(struct net *n
+ if (err)
+ goto err;
+
++ if (remote.proto != local.proto) {
++ err = -EINVAL;
++ goto err;
++ }
++
+ /* Autoconfigure own node identity if needed */
+ if (!tipc_own_id(net)) {
+ memcpy(node_id, local.ipv6.in6_u.u6_addr8, 16);
--- /dev/null
+From foo@baz Fri Jan 4 19:32:42 CET 2019
+From: Cong Wang <xiyou.wangcong@gmail.com>
+Date: Sun, 23 Dec 2018 21:45:56 -0800
+Subject: tipc: fix a double free in tipc_enable_bearer()
+
+From: Cong Wang <xiyou.wangcong@gmail.com>
+
+[ Upstream commit dc4501ff287547dea7ca10f1c580c741291a8760 ]
+
+bearer_disable() already calls kfree_rcu() to free struct tipc_bearer,
+we don't need to call kfree() again.
+
+Fixes: cb30a63384bc ("tipc: refactor function tipc_enable_bearer()")
+Reported-by: syzbot+b981acf1fb240c0c128b@syzkaller.appspotmail.com
+Cc: Ying Xue <ying.xue@windriver.com>
+Cc: Jon Maloy <jon.maloy@ericsson.com>
+Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/tipc/bearer.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/net/tipc/bearer.c
++++ b/net/tipc/bearer.c
+@@ -317,7 +317,6 @@ static int tipc_enable_bearer(struct net
+ res = tipc_disc_create(net, b, &b->bcast_addr, &skb);
+ if (res) {
+ bearer_disable(net, b);
+- kfree(b);
+ errstr = "failed to create discoverer";
+ goto rejected;
+ }
--- /dev/null
+From foo@baz Fri Jan 4 19:32:42 CET 2019
+From: Cong Wang <xiyou.wangcong@gmail.com>
+Date: Mon, 10 Dec 2018 12:45:45 -0800
+Subject: tipc: fix a double kfree_skb()
+
+From: Cong Wang <xiyou.wangcong@gmail.com>
+
+[ Upstream commit acb4a33e9856d5fa3384b87d3d8369229be06d31 ]
+
+tipc_udp_xmit() drops the packet on error, there is no
+need to drop it again.
+
+Fixes: ef20cd4dd163 ("tipc: introduce UDP replicast")
+Reported-and-tested-by: syzbot+eae585ba2cc2752d3704@syzkaller.appspotmail.com
+Cc: Ying Xue <ying.xue@windriver.com>
+Cc: Jon Maloy <jon.maloy@ericsson.com>
+Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/tipc/udp_media.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/net/tipc/udp_media.c
++++ b/net/tipc/udp_media.c
+@@ -245,10 +245,8 @@ static int tipc_udp_send_msg(struct net
+ }
+
+ err = tipc_udp_xmit(net, _skb, ub, src, &rcast->addr);
+- if (err) {
+- kfree_skb(_skb);
++ if (err)
+ goto out;
+- }
+ }
+ err = 0;
+ out:
--- /dev/null
+From foo@baz Fri Jan 4 19:32:42 CET 2019
+From: Cong Wang <xiyou.wangcong@gmail.com>
+Date: Mon, 10 Dec 2018 11:49:55 -0800
+Subject: tipc: use lock_sock() in tipc_sk_reinit()
+
+From: Cong Wang <xiyou.wangcong@gmail.com>
+
+[ Upstream commit 15ef70e286176165d28b0b8a969b422561a68dfc ]
+
+lock_sock() must be used in process context to be race-free with
+other lock_sock() callers, for example, tipc_release(). Otherwise
+using the spinlock directly can't serialize a parallel tipc_release().
+
+As it is blocking, we have to hold the sock refcnt before
+rhashtable_walk_stop() and release it after rhashtable_walk_start().
+
+Fixes: 07f6c4bc048a ("tipc: convert tipc reference table to use generic rhashtable")
+Reported-by: Dmitry Vyukov <dvyukov@google.com>
+Cc: Ying Xue <ying.xue@windriver.com>
+Cc: Jon Maloy <jon.maloy@ericsson.com>
+Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/tipc/socket.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/net/tipc/socket.c
++++ b/net/tipc/socket.c
+@@ -2686,11 +2686,15 @@ void tipc_sk_reinit(struct net *net)
+ rhashtable_walk_start(&iter);
+
+ while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
+- spin_lock_bh(&tsk->sk.sk_lock.slock);
++ sock_hold(&tsk->sk);
++ rhashtable_walk_stop(&iter);
++ lock_sock(&tsk->sk);
+ msg = &tsk->phdr;
+ msg_set_prevnode(msg, tipc_own_addr(net));
+ msg_set_orignode(msg, tipc_own_addr(net));
+- spin_unlock_bh(&tsk->sk.sk_lock.slock);
++ release_sock(&tsk->sk);
++ rhashtable_walk_start(&iter);
++ sock_put(&tsk->sk);
+ }
+
+ rhashtable_walk_stop(&iter);
--- /dev/null
+From foo@baz Fri Jan 4 19:32:42 CET 2019
+From: Jason Wang <jasowang@redhat.com>
+Date: Thu, 13 Dec 2018 10:53:37 +0800
+Subject: vhost: make sure used idx is seen before log in vhost_add_used_n()
+
+From: Jason Wang <jasowang@redhat.com>
+
+[ Upstream commit 841df922417eb82c835e93d4b93eb6a68c99d599 ]
+
+We miss a write barrier that guarantees used idx is updated and seen
+before log. This will let userspace sync and copy used ring before
+used idx is update. Fix this by adding a barrier before log_write().
+
+Fixes: 8dd014adfea6f ("vhost-net: mergeable buffers support")
+Acked-by: Michael S. Tsirkin <mst@redhat.com>
+Signed-off-by: Jason Wang <jasowang@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/vhost/vhost.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/vhost/vhost.c
++++ b/drivers/vhost/vhost.c
+@@ -2233,6 +2233,8 @@ int vhost_add_used_n(struct vhost_virtqu
+ return -EFAULT;
+ }
+ if (unlikely(vq->log_used)) {
++ /* Make sure used idx is seen before log. */
++ smp_wmb();
+ /* Log used index update. */
+ log_write(vq->log_base,
+ vq->log_addr + offsetof(struct vring_used, idx),
--- /dev/null
+From foo@baz Fri Jan 4 19:32:42 CET 2019
+From: Jorgen Hansen <jhansen@vmware.com>
+Date: Tue, 18 Dec 2018 00:34:06 -0800
+Subject: VSOCK: Send reset control packet when socket is partially bound
+
+From: Jorgen Hansen <jhansen@vmware.com>
+
+[ Upstream commit a915b982d8f5e4295f64b8dd37ce753874867e88 ]
+
+If a server side socket is bound to an address, but not in the listening
+state yet, incoming connection requests should receive a reset control
+packet in response. However, the function used to send the reset
+silently drops the reset packet if the sending socket isn't bound
+to a remote address (as is the case for a bound socket not yet in
+the listening state). This change fixes this by using the src
+of the incoming packet as destination for the reset packet in
+this case.
+
+Fixes: d021c344051a ("VSOCK: Introduce VM Sockets")
+Reviewed-by: Adit Ranadive <aditr@vmware.com>
+Reviewed-by: Vishnu Dasa <vdasa@vmware.com>
+Signed-off-by: Jorgen Hansen <jhansen@vmware.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/vmw_vsock/vmci_transport.c | 67 ++++++++++++++++++++++++++++++-----------
+ 1 file changed, 50 insertions(+), 17 deletions(-)
+
+--- a/net/vmw_vsock/vmci_transport.c
++++ b/net/vmw_vsock/vmci_transport.c
+@@ -264,6 +264,31 @@ vmci_transport_send_control_pkt_bh(struc
+ }
+
+ static int
++vmci_transport_alloc_send_control_pkt(struct sockaddr_vm *src,
++ struct sockaddr_vm *dst,
++ enum vmci_transport_packet_type type,
++ u64 size,
++ u64 mode,
++ struct vmci_transport_waiting_info *wait,
++ u16 proto,
++ struct vmci_handle handle)
++{
++ struct vmci_transport_packet *pkt;
++ int err;
++
++ pkt = kmalloc(sizeof(*pkt), GFP_KERNEL);
++ if (!pkt)
++ return -ENOMEM;
++
++ err = __vmci_transport_send_control_pkt(pkt, src, dst, type, size,
++ mode, wait, proto, handle,
++ true);
++ kfree(pkt);
++
++ return err;
++}
++
++static int
+ vmci_transport_send_control_pkt(struct sock *sk,
+ enum vmci_transport_packet_type type,
+ u64 size,
+@@ -272,9 +297,7 @@ vmci_transport_send_control_pkt(struct s
+ u16 proto,
+ struct vmci_handle handle)
+ {
+- struct vmci_transport_packet *pkt;
+ struct vsock_sock *vsk;
+- int err;
+
+ vsk = vsock_sk(sk);
+
+@@ -284,17 +307,10 @@ vmci_transport_send_control_pkt(struct s
+ if (!vsock_addr_bound(&vsk->remote_addr))
+ return -EINVAL;
+
+- pkt = kmalloc(sizeof(*pkt), GFP_KERNEL);
+- if (!pkt)
+- return -ENOMEM;
+-
+- err = __vmci_transport_send_control_pkt(pkt, &vsk->local_addr,
+- &vsk->remote_addr, type, size,
+- mode, wait, proto, handle,
+- true);
+- kfree(pkt);
+-
+- return err;
++ return vmci_transport_alloc_send_control_pkt(&vsk->local_addr,
++ &vsk->remote_addr,
++ type, size, mode,
++ wait, proto, handle);
+ }
+
+ static int vmci_transport_send_reset_bh(struct sockaddr_vm *dst,
+@@ -312,12 +328,29 @@ static int vmci_transport_send_reset_bh(
+ static int vmci_transport_send_reset(struct sock *sk,
+ struct vmci_transport_packet *pkt)
+ {
++ struct sockaddr_vm *dst_ptr;
++ struct sockaddr_vm dst;
++ struct vsock_sock *vsk;
++
+ if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_RST)
+ return 0;
+- return vmci_transport_send_control_pkt(sk,
+- VMCI_TRANSPORT_PACKET_TYPE_RST,
+- 0, 0, NULL, VSOCK_PROTO_INVALID,
+- VMCI_INVALID_HANDLE);
++
++ vsk = vsock_sk(sk);
++
++ if (!vsock_addr_bound(&vsk->local_addr))
++ return -EINVAL;
++
++ if (vsock_addr_bound(&vsk->remote_addr)) {
++ dst_ptr = &vsk->remote_addr;
++ } else {
++ vsock_addr_init(&dst, pkt->dg.src.context,
++ pkt->src_port);
++ dst_ptr = &dst;
++ }
++ return vmci_transport_alloc_send_control_pkt(&vsk->local_addr, dst_ptr,
++ VMCI_TRANSPORT_PACKET_TYPE_RST,
++ 0, 0, NULL, VSOCK_PROTO_INVALID,
++ VMCI_INVALID_HANDLE);
+ }
+
+ static int vmci_transport_send_negotiate(struct sock *sk, size_t size)
--- /dev/null
+From foo@baz Fri Jan 4 19:32:42 CET 2019
+From: Juergen Gross <jgross@suse.com>
+Date: Tue, 18 Dec 2018 16:06:19 +0100
+Subject: xen/netfront: tolerate frags with no data
+
+From: Juergen Gross <jgross@suse.com>
+
+[ Upstream commit d81c5054a5d1d4999c7cdead7636b6cd4af83d36 ]
+
+At least old Xen net backends seem to send frags with no real data
+sometimes. In case such a fragment happens to occur with the frag limit
+already reached the frontend will BUG currently even if this situation
+is easily recoverable.
+
+Modify the BUG_ON() condition accordingly.
+
+Tested-by: Dietmar Hahn <dietmar.hahn@ts.fujitsu.com>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/xen-netfront.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/xen-netfront.c
++++ b/drivers/net/xen-netfront.c
+@@ -905,7 +905,7 @@ static RING_IDX xennet_fill_frags(struct
+ if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
+ unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
+
+- BUG_ON(pull_to <= skb_headlen(skb));
++ BUG_ON(pull_to < skb_headlen(skb));
+ __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
+ }
+ if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {