]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.14-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 19 Jul 2018 06:33:30 +0000 (08:33 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 19 Jul 2018 06:33:30 +0000 (08:33 +0200)
added patches:
alx-take-rtnl-before-calling-__alx_open-from-resume.patch
atm-preserve-value-of-skb-truesize-when-accounting-to-vcc.patch
atm-zatm-fix-potential-spectre-v1.patch
hv_netvsc-split-sub-channel-setup-into-async-and-sync.patch
ipv6-sr-fix-passing-wrong-flags-to-crypto_alloc_shash.patch
ipvlan-fix-ifla_mtu-ignored-on-newlink.patch
ixgbe-split-xdp_tx-tail-and-xdp_redirect-map-flushing.patch
net-dccp-avoid-crash-in-ccid3_hc_rx_send_feedback.patch
net-dccp-switch-rx_tstamp_last_feedback-to-monotonic-clock.patch
net-fix-use-after-free-in-gro-with-esp.patch
net-macb-fix-ptp-time-adjustment-for-large-negative-delta.patch
net-mlx5-e-switch-avoid-setup-attempt-if-not-being-e-switch-manager.patch
net-mlx5-fix-command-interface-race-in-polling-mode.patch
net-mlx5-fix-incorrect-raw-command-length-parsing.patch
net-mlx5-fix-required-capability-for-manipulating-mpfs.patch
net-mlx5-fix-wrong-size-allocation-for-qos-etc-tc-regitster.patch
net-mlx5e-avoid-dealing-with-vport-representors-if-not-being-e-switch-manager.patch
net-mlx5e-don-t-attempt-to-dereference-the-ppriv-struct-if-not-being-eswitch-manager.patch
net-mvneta-fix-the-rx-desc-dma-address-in-the-rx-path.patch
net-packet-fix-use-after-free.patch
net-sungem-fix-rx-checksum-support.patch
net-tcp-fix-socket-lookups-with-so_bindtodevice.patch
net_sched-blackhole-tell-upper-qdisc-about-dropped-packets.patch
qed-fix-setting-of-incorrect-eswitch-mode.patch
qed-fix-use-of-incorrect-size-in-memcpy-call.patch
qed-limit-msix-vectors-in-kdump-kernel-to-the-minimum-required-count.patch
qede-adverstise-software-timestamp-caps-when-phc-is-not-available.patch
qmi_wwan-add-support-for-the-dell-wireless-5821e-module.patch
r8152-napi-hangup-fix-after-disconnect.patch
stmmac-fix-dma-channel-hang-in-half-duplex-mode.patch
strparser-remove-early-eaten-to-fix-full-tcp-receive-buffer-stall.patch
tcp-fix-fast-open-key-endianness.patch
tcp-prevent-bogus-frto-undos-with-non-sack-flows.patch
vhost_net-validate-sock-before-trying-to-put-its-fd.patch
vsock-fix-loopback-on-big-endian-systems.patch

36 files changed:
queue-4.14/alx-take-rtnl-before-calling-__alx_open-from-resume.patch [new file with mode: 0644]
queue-4.14/atm-preserve-value-of-skb-truesize-when-accounting-to-vcc.patch [new file with mode: 0644]
queue-4.14/atm-zatm-fix-potential-spectre-v1.patch [new file with mode: 0644]
queue-4.14/hv_netvsc-split-sub-channel-setup-into-async-and-sync.patch [new file with mode: 0644]
queue-4.14/ipv6-sr-fix-passing-wrong-flags-to-crypto_alloc_shash.patch [new file with mode: 0644]
queue-4.14/ipvlan-fix-ifla_mtu-ignored-on-newlink.patch [new file with mode: 0644]
queue-4.14/ixgbe-split-xdp_tx-tail-and-xdp_redirect-map-flushing.patch [new file with mode: 0644]
queue-4.14/net-dccp-avoid-crash-in-ccid3_hc_rx_send_feedback.patch [new file with mode: 0644]
queue-4.14/net-dccp-switch-rx_tstamp_last_feedback-to-monotonic-clock.patch [new file with mode: 0644]
queue-4.14/net-fix-use-after-free-in-gro-with-esp.patch [new file with mode: 0644]
queue-4.14/net-macb-fix-ptp-time-adjustment-for-large-negative-delta.patch [new file with mode: 0644]
queue-4.14/net-mlx5-e-switch-avoid-setup-attempt-if-not-being-e-switch-manager.patch [new file with mode: 0644]
queue-4.14/net-mlx5-fix-command-interface-race-in-polling-mode.patch [new file with mode: 0644]
queue-4.14/net-mlx5-fix-incorrect-raw-command-length-parsing.patch [new file with mode: 0644]
queue-4.14/net-mlx5-fix-required-capability-for-manipulating-mpfs.patch [new file with mode: 0644]
queue-4.14/net-mlx5-fix-wrong-size-allocation-for-qos-etc-tc-regitster.patch [new file with mode: 0644]
queue-4.14/net-mlx5e-avoid-dealing-with-vport-representors-if-not-being-e-switch-manager.patch [new file with mode: 0644]
queue-4.14/net-mlx5e-don-t-attempt-to-dereference-the-ppriv-struct-if-not-being-eswitch-manager.patch [new file with mode: 0644]
queue-4.14/net-mvneta-fix-the-rx-desc-dma-address-in-the-rx-path.patch [new file with mode: 0644]
queue-4.14/net-packet-fix-use-after-free.patch [new file with mode: 0644]
queue-4.14/net-sungem-fix-rx-checksum-support.patch [new file with mode: 0644]
queue-4.14/net-tcp-fix-socket-lookups-with-so_bindtodevice.patch [new file with mode: 0644]
queue-4.14/net_sched-blackhole-tell-upper-qdisc-about-dropped-packets.patch [new file with mode: 0644]
queue-4.14/qed-fix-setting-of-incorrect-eswitch-mode.patch [new file with mode: 0644]
queue-4.14/qed-fix-use-of-incorrect-size-in-memcpy-call.patch [new file with mode: 0644]
queue-4.14/qed-limit-msix-vectors-in-kdump-kernel-to-the-minimum-required-count.patch [new file with mode: 0644]
queue-4.14/qede-adverstise-software-timestamp-caps-when-phc-is-not-available.patch [new file with mode: 0644]
queue-4.14/qmi_wwan-add-support-for-the-dell-wireless-5821e-module.patch [new file with mode: 0644]
queue-4.14/r8152-napi-hangup-fix-after-disconnect.patch [new file with mode: 0644]
queue-4.14/series
queue-4.14/stmmac-fix-dma-channel-hang-in-half-duplex-mode.patch [new file with mode: 0644]
queue-4.14/strparser-remove-early-eaten-to-fix-full-tcp-receive-buffer-stall.patch [new file with mode: 0644]
queue-4.14/tcp-fix-fast-open-key-endianness.patch [new file with mode: 0644]
queue-4.14/tcp-prevent-bogus-frto-undos-with-non-sack-flows.patch [new file with mode: 0644]
queue-4.14/vhost_net-validate-sock-before-trying-to-put-its-fd.patch [new file with mode: 0644]
queue-4.14/vsock-fix-loopback-on-big-endian-systems.patch [new file with mode: 0644]

diff --git a/queue-4.14/alx-take-rtnl-before-calling-__alx_open-from-resume.patch b/queue-4.14/alx-take-rtnl-before-calling-__alx_open-from-resume.patch
new file mode 100644 (file)
index 0000000..2c04cd6
--- /dev/null
@@ -0,0 +1,48 @@
+From foo@baz Thu Jul 19 08:32:33 CEST 2018
+From: Sabrina Dubroca <sd@queasysnail.net>
+Date: Fri, 29 Jun 2018 17:51:26 +0200
+Subject: alx: take rtnl before calling __alx_open from resume
+
+From: Sabrina Dubroca <sd@queasysnail.net>
+
+[ Upstream commit bc800e8b39bad60ccdb83be828da63af71ab87b3 ]
+
+The __alx_open function can be called from ndo_open, which is called
+under RTNL, or from alx_resume, which isn't. Since commit d768319cd427,
+we're calling the netif_set_real_num_{tx,rx}_queues functions, which
+need to be called under RTNL.
+
+This is similar to commit 0c2cc02e571a ("igb: Move the calls to set the
+Tx and Rx queues into igb_open").
+
+Fixes: d768319cd427 ("alx: enable multiple tx queues")
+Signed-off-by: Sabrina Dubroca <sd@queasysnail.net>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/atheros/alx/main.c |    8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/atheros/alx/main.c
++++ b/drivers/net/ethernet/atheros/alx/main.c
+@@ -1897,13 +1897,19 @@ static int alx_resume(struct device *dev
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct alx_priv *alx = pci_get_drvdata(pdev);
+       struct alx_hw *hw = &alx->hw;
++      int err;
+       alx_reset_phy(hw);
+       if (!netif_running(alx->dev))
+               return 0;
+       netif_device_attach(alx->dev);
+-      return __alx_open(alx, true);
++
++      rtnl_lock();
++      err = __alx_open(alx, true);
++      rtnl_unlock();
++
++      return err;
+ }
+ static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume);
diff --git a/queue-4.14/atm-preserve-value-of-skb-truesize-when-accounting-to-vcc.patch b/queue-4.14/atm-preserve-value-of-skb-truesize-when-accounting-to-vcc.patch
new file mode 100644 (file)
index 0000000..e2a9bc6
--- /dev/null
@@ -0,0 +1,174 @@
+From foo@baz Thu Jul 19 08:32:33 CEST 2018
+From: David Woodhouse <dwmw2@infradead.org>
+Date: Sat, 16 Jun 2018 11:55:44 +0100
+Subject: atm: Preserve value of skb->truesize when accounting to vcc
+
+From: David Woodhouse <dwmw2@infradead.org>
+
+[ Upstream commit 9bbe60a67be5a1c6f79b3c9be5003481a50529ff ]
+
+ATM accounts for in-flight TX packets in sk_wmem_alloc of the VCC on
+which they are to be sent. But it doesn't take ownership of those
+packets from the sock (if any) which originally owned them. They should
+remain owned by their actual sender until they've left the box.
+
+There's a hack in pskb_expand_head() to avoid adjusting skb->truesize
+for certain skbs, precisely to avoid messing up sk_wmem_alloc
+accounting. Ideally that hack would cover the ATM use case too, but it
+doesn't — skbs which aren't owned by any sock, for example PPP control
+frames, still get their truesize adjusted when the low-level ATM driver
+adds headroom.
+
+This has always been an issue, it seems. The truesize of a packet
+increases, and sk_wmem_alloc on the VCC goes negative. But this wasn't
+for normal traffic, only for control frames. So I think we just got away
+with it, and we probably needed to send 2GiB of LCP echo frames before
+the misaccounting would ever have caused a problem and caused
+atm_may_send() to start refusing packets.
+
+Commit 14afee4b609 ("net: convert sock.sk_wmem_alloc from atomic_t to
+refcount_t") did exactly what it was intended to do, and turned this
+mostly-theoretical problem into a real one, causing PPPoATM to fail
+immediately as sk_wmem_alloc underflows and atm_may_send() *immediately*
+starts refusing to allow new packets.
+
+The least intrusive solution to this problem is to stash the value of
+skb->truesize that was accounted to the VCC, in a new member of the
+ATM_SKB(skb) structure. Then in atm_pop_raw() subtract precisely that
+value instead of the then-current value of skb->truesize.
+
+Fixes: 158f323b9868 ("net: adjust skb->truesize in pskb_expand_head()")
+Signed-off-by: David Woodhouse <dwmw2@infradead.org>
+Tested-by: Kevin Darbyshire-Bryant <ldir@darbyshire-bryant.me.uk>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/atmdev.h |   15 +++++++++++++++
+ net/atm/br2684.c       |    3 +--
+ net/atm/clip.c         |    3 +--
+ net/atm/common.c       |    3 +--
+ net/atm/lec.c          |    3 +--
+ net/atm/mpc.c          |    3 +--
+ net/atm/pppoatm.c      |    3 +--
+ net/atm/raw.c          |    4 ++--
+ 8 files changed, 23 insertions(+), 14 deletions(-)
+
+--- a/include/linux/atmdev.h
++++ b/include/linux/atmdev.h
+@@ -214,6 +214,7 @@ struct atmphy_ops {
+ struct atm_skb_data {
+       struct atm_vcc  *vcc;           /* ATM VCC */
+       unsigned long   atm_options;    /* ATM layer options */
++      unsigned int    acct_truesize;  /* truesize accounted to vcc */
+ };
+ #define VCC_HTABLE_SIZE 32
+@@ -241,6 +242,20 @@ void vcc_insert_socket(struct sock *sk);
+ void atm_dev_release_vccs(struct atm_dev *dev);
++static inline void atm_account_tx(struct atm_vcc *vcc, struct sk_buff *skb)
++{
++      /*
++       * Because ATM skbs may not belong to a sock (and we don't
++       * necessarily want to), skb->truesize may be adjusted,
++       * escaping the hack in pskb_expand_head() which avoids
++       * doing so for some cases. So stash the value of truesize
++       * at the time we accounted it, and atm_pop_raw() can use
++       * that value later, in case it changes.
++       */
++      refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
++      ATM_SKB(skb)->acct_truesize = skb->truesize;
++      ATM_SKB(skb)->atm_options = vcc->atm_options;
++}
+ static inline void atm_force_charge(struct atm_vcc *vcc,int truesize)
+ {
+--- a/net/atm/br2684.c
++++ b/net/atm/br2684.c
+@@ -252,8 +252,7 @@ static int br2684_xmit_vcc(struct sk_buf
+       ATM_SKB(skb)->vcc = atmvcc = brvcc->atmvcc;
+       pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, atmvcc, atmvcc->dev);
+-      refcount_add(skb->truesize, &sk_atm(atmvcc)->sk_wmem_alloc);
+-      ATM_SKB(skb)->atm_options = atmvcc->atm_options;
++      atm_account_tx(atmvcc, skb);
+       dev->stats.tx_packets++;
+       dev->stats.tx_bytes += skb->len;
+--- a/net/atm/clip.c
++++ b/net/atm/clip.c
+@@ -381,8 +381,7 @@ static netdev_tx_t clip_start_xmit(struc
+               memcpy(here, llc_oui, sizeof(llc_oui));
+               ((__be16 *) here)[3] = skb->protocol;
+       }
+-      refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
+-      ATM_SKB(skb)->atm_options = vcc->atm_options;
++      atm_account_tx(vcc, skb);
+       entry->vccs->last_use = jiffies;
+       pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, vcc, vcc->dev);
+       old = xchg(&entry->vccs->xoff, 1);      /* assume XOFF ... */
+--- a/net/atm/common.c
++++ b/net/atm/common.c
+@@ -630,10 +630,9 @@ int vcc_sendmsg(struct socket *sock, str
+               goto out;
+       }
+       pr_debug("%d += %d\n", sk_wmem_alloc_get(sk), skb->truesize);
+-      refcount_add(skb->truesize, &sk->sk_wmem_alloc);
++      atm_account_tx(vcc, skb);
+       skb->dev = NULL; /* for paths shared with net_device interfaces */
+-      ATM_SKB(skb)->atm_options = vcc->atm_options;
+       if (!copy_from_iter_full(skb_put(skb, size), size, &m->msg_iter)) {
+               kfree_skb(skb);
+               error = -EFAULT;
+--- a/net/atm/lec.c
++++ b/net/atm/lec.c
+@@ -182,9 +182,8 @@ lec_send(struct atm_vcc *vcc, struct sk_
+       struct net_device *dev = skb->dev;
+       ATM_SKB(skb)->vcc = vcc;
+-      ATM_SKB(skb)->atm_options = vcc->atm_options;
++      atm_account_tx(vcc, skb);
+-      refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
+       if (vcc->send(vcc, skb) < 0) {
+               dev->stats.tx_dropped++;
+               return;
+--- a/net/atm/mpc.c
++++ b/net/atm/mpc.c
+@@ -555,8 +555,7 @@ static int send_via_shortcut(struct sk_b
+                                       sizeof(struct llc_snap_hdr));
+       }
+-      refcount_add(skb->truesize, &sk_atm(entry->shortcut)->sk_wmem_alloc);
+-      ATM_SKB(skb)->atm_options = entry->shortcut->atm_options;
++      atm_account_tx(entry->shortcut, skb);
+       entry->shortcut->send(entry->shortcut, skb);
+       entry->packets_fwded++;
+       mpc->in_ops->put(entry);
+--- a/net/atm/pppoatm.c
++++ b/net/atm/pppoatm.c
+@@ -350,8 +350,7 @@ static int pppoatm_send(struct ppp_chann
+               return 1;
+       }
+-      refcount_add(skb->truesize, &sk_atm(ATM_SKB(skb)->vcc)->sk_wmem_alloc);
+-      ATM_SKB(skb)->atm_options = ATM_SKB(skb)->vcc->atm_options;
++      atm_account_tx(vcc, skb);
+       pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n",
+                skb, ATM_SKB(skb)->vcc, ATM_SKB(skb)->vcc->dev);
+       ret = ATM_SKB(skb)->vcc->send(ATM_SKB(skb)->vcc, skb)
+--- a/net/atm/raw.c
++++ b/net/atm/raw.c
+@@ -35,8 +35,8 @@ static void atm_pop_raw(struct atm_vcc *
+       struct sock *sk = sk_atm(vcc);
+       pr_debug("(%d) %d -= %d\n",
+-               vcc->vci, sk_wmem_alloc_get(sk), skb->truesize);
+-      WARN_ON(refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc));
++               vcc->vci, sk_wmem_alloc_get(sk), ATM_SKB(skb)->acct_truesize);
++      WARN_ON(refcount_sub_and_test(ATM_SKB(skb)->acct_truesize, &sk->sk_wmem_alloc));
+       dev_kfree_skb_any(skb);
+       sk->sk_write_space(sk);
+ }
diff --git a/queue-4.14/atm-zatm-fix-potential-spectre-v1.patch b/queue-4.14/atm-zatm-fix-potential-spectre-v1.patch
new file mode 100644 (file)
index 0000000..401e348
--- /dev/null
@@ -0,0 +1,44 @@
+From foo@baz Thu Jul 19 08:32:33 CEST 2018
+From: "Gustavo A. R. Silva" <gustavo@embeddedor.com>
+Date: Fri, 29 Jun 2018 13:28:07 -0500
+Subject: atm: zatm: Fix potential Spectre v1
+
+From: "Gustavo A. R. Silva" <gustavo@embeddedor.com>
+
+[ Upstream commit ced9e191501e52b95e1b57b8e0db00943869eed0 ]
+
+pool can be indirectly controlled by user-space, hence leading to
+a potential exploitation of the Spectre variant 1 vulnerability.
+
+This issue was detected with the help of Smatch:
+
+drivers/atm/zatm.c:1491 zatm_ioctl() warn: potential spectre issue
+'zatm_dev->pool_info' (local cap)
+
+Fix this by sanitizing pool before using it to index
+zatm_dev->pool_info
+
+Notice that given that speculation windows are large, the policy is
+to kill the speculation on the first load and not worry if it can be
+completed with a dependent load/store [1].
+
+[1] https://marc.info/?l=linux-kernel&m=152449131114778&w=2
+
+Signed-off-by: Gustavo A. R. Silva <gustavo@embeddedor.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/atm/zatm.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/atm/zatm.c
++++ b/drivers/atm/zatm.c
+@@ -1483,6 +1483,8 @@ static int zatm_ioctl(struct atm_dev *de
+                                       return -EFAULT;
+                               if (pool < 0 || pool > ZATM_LAST_POOL)
+                                       return -EINVAL;
++                              pool = array_index_nospec(pool,
++                                                        ZATM_LAST_POOL + 1);
+                               if (copy_from_user(&info,
+                                   &((struct zatm_pool_req __user *) arg)->info,
+                                   sizeof(info))) return -EFAULT;
diff --git a/queue-4.14/hv_netvsc-split-sub-channel-setup-into-async-and-sync.patch b/queue-4.14/hv_netvsc-split-sub-channel-setup-into-async-and-sync.patch
new file mode 100644 (file)
index 0000000..6722fe8
--- /dev/null
@@ -0,0 +1,230 @@
+From foo@baz Thu Jul 19 08:32:33 CEST 2018
+From: Stephen Hemminger <sthemmin@microsoft.com>
+Date: Fri, 29 Jun 2018 14:07:16 -0700
+Subject: hv_netvsc: split sub-channel setup into async and sync
+
+From: Stephen Hemminger <sthemmin@microsoft.com>
+
+[ Upstream commit 3ffe64f1a641b80a82d9ef4efa7a05ce69049871 ]
+
+When doing device hotplug the sub channel must be async to avoid
+deadlock issues because device is discovered in softirq context.
+
+When doing changes to MTU and number of channels, the setup
+must be synchronous to avoid races such as when MTU and device
+settings are done in a single ip command.
+
+Reported-by: Thomas Walker <Thomas.Walker@twosigma.com>
+Fixes: 8195b1396ec8 ("hv_netvsc: fix deadlock on hotplug")
+Fixes: 732e49850c5e ("netvsc: fix race on sub channel creation")
+Signed-off-by: Stephen Hemminger <sthemmin@microsoft.com>
+Signed-off-by: Haiyang Zhang <haiyangz@microsoft.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/hyperv/hyperv_net.h   |    2 -
+ drivers/net/hyperv/netvsc.c       |   37 ++++++++++++++++++++++-
+ drivers/net/hyperv/netvsc_drv.c   |   17 +++++++++-
+ drivers/net/hyperv/rndis_filter.c |   61 +++++++-------------------------------
+ 4 files changed, 65 insertions(+), 52 deletions(-)
+
+--- a/drivers/net/hyperv/hyperv_net.h
++++ b/drivers/net/hyperv/hyperv_net.h
+@@ -207,7 +207,7 @@ int netvsc_recv_callback(struct net_devi
+ void netvsc_channel_cb(void *context);
+ int netvsc_poll(struct napi_struct *napi, int budget);
+-void rndis_set_subchannel(struct work_struct *w);
++int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev);
+ int rndis_filter_open(struct netvsc_device *nvdev);
+ int rndis_filter_close(struct netvsc_device *nvdev);
+ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
+--- a/drivers/net/hyperv/netvsc.c
++++ b/drivers/net/hyperv/netvsc.c
+@@ -62,6 +62,41 @@ void netvsc_switch_datapath(struct net_d
+                              VM_PKT_DATA_INBAND, 0);
+ }
++/* Worker to setup sub channels on initial setup
++ * Initial hotplug event occurs in softirq context
++ * and can't wait for channels.
++ */
++static void netvsc_subchan_work(struct work_struct *w)
++{
++      struct netvsc_device *nvdev =
++              container_of(w, struct netvsc_device, subchan_work);
++      struct rndis_device *rdev;
++      int i, ret;
++
++      /* Avoid deadlock with device removal already under RTNL */
++      if (!rtnl_trylock()) {
++              schedule_work(w);
++              return;
++      }
++
++      rdev = nvdev->extension;
++      if (rdev) {
++              ret = rndis_set_subchannel(rdev->ndev, nvdev);
++              if (ret == 0) {
++                      netif_device_attach(rdev->ndev);
++              } else {
++                      /* fallback to only primary channel */
++                      for (i = 1; i < nvdev->num_chn; i++)
++                              netif_napi_del(&nvdev->chan_table[i].napi);
++
++                      nvdev->max_chn = 1;
++                      nvdev->num_chn = 1;
++              }
++      }
++
++      rtnl_unlock();
++}
++
+ static struct netvsc_device *alloc_net_device(void)
+ {
+       struct netvsc_device *net_device;
+@@ -78,7 +113,7 @@ static struct netvsc_device *alloc_net_d
+       init_completion(&net_device->channel_init_wait);
+       init_waitqueue_head(&net_device->subchan_open);
+-      INIT_WORK(&net_device->subchan_work, rndis_set_subchannel);
++      INIT_WORK(&net_device->subchan_work, netvsc_subchan_work);
+       return net_device;
+ }
+--- a/drivers/net/hyperv/netvsc_drv.c
++++ b/drivers/net/hyperv/netvsc_drv.c
+@@ -911,8 +911,20 @@ static int netvsc_attach(struct net_devi
+       if (IS_ERR(nvdev))
+               return PTR_ERR(nvdev);
+-      /* Note: enable and attach happen when sub-channels setup */
++      if (nvdev->num_chn > 1) {
++              ret = rndis_set_subchannel(ndev, nvdev);
++
++              /* if unavailable, just proceed with one queue */
++              if (ret) {
++                      nvdev->max_chn = 1;
++                      nvdev->num_chn = 1;
++              }
++      }
++      /* In any case device is now ready */
++      netif_device_attach(ndev);
++
++      /* Note: enable and attach happen when sub-channels setup */
+       netif_carrier_off(ndev);
+       if (netif_running(ndev)) {
+@@ -2035,6 +2047,9 @@ static int netvsc_probe(struct hv_device
+       memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
++      if (nvdev->num_chn > 1)
++              schedule_work(&nvdev->subchan_work);
++
+       /* hw_features computed in rndis_netdev_set_hwcaps() */
+       net->features = net->hw_features |
+               NETIF_F_HIGHDMA | NETIF_F_SG |
+--- a/drivers/net/hyperv/rndis_filter.c
++++ b/drivers/net/hyperv/rndis_filter.c
+@@ -1055,29 +1055,15 @@ static void netvsc_sc_open(struct vmbus_
+  * This breaks overlap of processing the host message for the
+  * new primary channel with the initialization of sub-channels.
+  */
+-void rndis_set_subchannel(struct work_struct *w)
++int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev)
+ {
+-      struct netvsc_device *nvdev
+-              = container_of(w, struct netvsc_device, subchan_work);
+       struct nvsp_message *init_packet = &nvdev->channel_init_pkt;
+-      struct net_device_context *ndev_ctx;
+-      struct rndis_device *rdev;
+-      struct net_device *ndev;
+-      struct hv_device *hv_dev;
++      struct net_device_context *ndev_ctx = netdev_priv(ndev);
++      struct hv_device *hv_dev = ndev_ctx->device_ctx;
++      struct rndis_device *rdev = nvdev->extension;
+       int i, ret;
+-      if (!rtnl_trylock()) {
+-              schedule_work(w);
+-              return;
+-      }
+-
+-      rdev = nvdev->extension;
+-      if (!rdev)
+-              goto unlock;    /* device was removed */
+-
+-      ndev = rdev->ndev;
+-      ndev_ctx = netdev_priv(ndev);
+-      hv_dev = ndev_ctx->device_ctx;
++      ASSERT_RTNL();
+       memset(init_packet, 0, sizeof(struct nvsp_message));
+       init_packet->hdr.msg_type = NVSP_MSG5_TYPE_SUBCHANNEL;
+@@ -1091,13 +1077,13 @@ void rndis_set_subchannel(struct work_st
+                              VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+       if (ret) {
+               netdev_err(ndev, "sub channel allocate send failed: %d\n", ret);
+-              goto failed;
++              return ret;
+       }
+       wait_for_completion(&nvdev->channel_init_wait);
+       if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) {
+               netdev_err(ndev, "sub channel request failed\n");
+-              goto failed;
++              return -EIO;
+       }
+       nvdev->num_chn = 1 +
+@@ -1116,21 +1102,7 @@ void rndis_set_subchannel(struct work_st
+       for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
+               ndev_ctx->tx_table[i] = i % nvdev->num_chn;
+-      netif_device_attach(ndev);
+-      rtnl_unlock();
+-      return;
+-
+-failed:
+-      /* fallback to only primary channel */
+-      for (i = 1; i < nvdev->num_chn; i++)
+-              netif_napi_del(&nvdev->chan_table[i].napi);
+-
+-      nvdev->max_chn = 1;
+-      nvdev->num_chn = 1;
+-
+-      netif_device_attach(ndev);
+-unlock:
+-      rtnl_unlock();
++      return 0;
+ }
+ static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
+@@ -1321,21 +1293,12 @@ struct netvsc_device *rndis_filter_devic
+               netif_napi_add(net, &net_device->chan_table[i].napi,
+                              netvsc_poll, NAPI_POLL_WEIGHT);
+-      if (net_device->num_chn > 1)
+-              schedule_work(&net_device->subchan_work);
++      return net_device;
+ out:
+-      /* if unavailable, just proceed with one queue */
+-      if (ret) {
+-              net_device->max_chn = 1;
+-              net_device->num_chn = 1;
+-      }
+-
+-      /* No sub channels, device is ready */
+-      if (net_device->num_chn == 1)
+-              netif_device_attach(net);
+-
+-      return net_device;
++      /* setting up multiple channels failed */
++      net_device->max_chn = 1;
++      net_device->num_chn = 1;
+ err_dev_remv:
+       rndis_filter_device_remove(dev, net_device);
diff --git a/queue-4.14/ipv6-sr-fix-passing-wrong-flags-to-crypto_alloc_shash.patch b/queue-4.14/ipv6-sr-fix-passing-wrong-flags-to-crypto_alloc_shash.patch
new file mode 100644 (file)
index 0000000..f578c33
--- /dev/null
@@ -0,0 +1,31 @@
+From foo@baz Thu Jul 19 08:32:33 CEST 2018
+From: Eric Biggers <ebiggers@google.com>
+Date: Sat, 30 Jun 2018 15:26:56 -0700
+Subject: ipv6: sr: fix passing wrong flags to crypto_alloc_shash()
+
+From: Eric Biggers <ebiggers@google.com>
+
+[ Upstream commit fc9c2029e37c3ae9efc28bf47045e0b87e09660c ]
+
+The 'mask' argument to crypto_alloc_shash() uses the CRYPTO_ALG_* flags,
+not 'gfp_t'.  So don't pass GFP_KERNEL to it.
+
+Fixes: bf355b8d2c30 ("ipv6: sr: add core files for SR HMAC support")
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/seg6_hmac.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/ipv6/seg6_hmac.c
++++ b/net/ipv6/seg6_hmac.c
+@@ -373,7 +373,7 @@ static int seg6_hmac_init_algo(void)
+                       return -ENOMEM;
+               for_each_possible_cpu(cpu) {
+-                      tfm = crypto_alloc_shash(algo->name, 0, GFP_KERNEL);
++                      tfm = crypto_alloc_shash(algo->name, 0, 0);
+                       if (IS_ERR(tfm))
+                               return PTR_ERR(tfm);
+                       p_tfm = per_cpu_ptr(algo->tfms, cpu);
diff --git a/queue-4.14/ipvlan-fix-ifla_mtu-ignored-on-newlink.patch b/queue-4.14/ipvlan-fix-ifla_mtu-ignored-on-newlink.patch
new file mode 100644 (file)
index 0000000..ecb37e2
--- /dev/null
@@ -0,0 +1,38 @@
+From foo@baz Thu Jul 19 08:32:33 CEST 2018
+From: Xin Long <lucien.xin@gmail.com>
+Date: Thu, 21 Jun 2018 12:56:04 +0800
+Subject: ipvlan: fix IFLA_MTU ignored on NEWLINK
+
+From: Xin Long <lucien.xin@gmail.com>
+
+[ Upstream commit 30877961b1cdd6fdca783c2e8c4f0f47e95dc58c ]
+
+Commit 296d48568042 ("ipvlan: inherit MTU from master device") adjusted
+the mtu from the master device when creating a ipvlan device, but it
+would also override the mtu value set in rtnl_create_link. It causes
+IFLA_MTU param not to take effect.
+
+So this patch is to not adjust the mtu if IFLA_MTU param is set when
+creating a ipvlan device.
+
+Fixes: 296d48568042 ("ipvlan: inherit MTU from master device")
+Reported-by: Jianlin Shi <jishi@redhat.com>
+Signed-off-by: Xin Long <lucien.xin@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ipvlan/ipvlan_main.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ipvlan/ipvlan_main.c
++++ b/drivers/net/ipvlan/ipvlan_main.c
+@@ -546,7 +546,8 @@ int ipvlan_link_new(struct net *src_net,
+       ipvlan->dev = dev;
+       ipvlan->port = port;
+       ipvlan->sfeatures = IPVLAN_FEATURES;
+-      ipvlan_adjust_mtu(ipvlan, phy_dev);
++      if (!tb[IFLA_MTU])
++              ipvlan_adjust_mtu(ipvlan, phy_dev);
+       INIT_LIST_HEAD(&ipvlan->addrs);
+       /* If the port-id base is at the MAX value, then wrap it around and
diff --git a/queue-4.14/ixgbe-split-xdp_tx-tail-and-xdp_redirect-map-flushing.patch b/queue-4.14/ixgbe-split-xdp_tx-tail-and-xdp_redirect-map-flushing.patch
new file mode 100644 (file)
index 0000000..7ba2af1
--- /dev/null
@@ -0,0 +1,89 @@
+From foo@baz Thu Jul 19 08:32:33 CEST 2018
+From: Jesper Dangaard Brouer <brouer@redhat.com>
+Date: Tue, 26 Jun 2018 17:39:48 +0200
+Subject: ixgbe: split XDP_TX tail and XDP_REDIRECT map flushing
+
+From: Jesper Dangaard Brouer <brouer@redhat.com>
+
+[ Upstream commit ad088ec480768850db019a5cc543685e868a513d ]
+
+The driver was combining the XDP_TX tail flush and XDP_REDIRECT
+map flushing (xdp_do_flush_map).  This is suboptimal, these two
+flush operations should be kept separate.
+
+Fixes: 11393cc9b9be ("xdp: Add batching support to redirect map")
+Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/intel/ixgbe/ixgbe_main.c |   24 ++++++++++++++----------
+ 1 file changed, 14 insertions(+), 10 deletions(-)
+
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+@@ -2211,9 +2211,10 @@ static struct sk_buff *ixgbe_build_skb(s
+       return skb;
+ }
+-#define IXGBE_XDP_PASS 0
+-#define IXGBE_XDP_CONSUMED 1
+-#define IXGBE_XDP_TX 2
++#define IXGBE_XDP_PASS                0
++#define IXGBE_XDP_CONSUMED    BIT(0)
++#define IXGBE_XDP_TX          BIT(1)
++#define IXGBE_XDP_REDIR               BIT(2)
+ static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
+                              struct xdp_buff *xdp);
+@@ -2242,7 +2243,7 @@ static struct sk_buff *ixgbe_run_xdp(str
+       case XDP_REDIRECT:
+               err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
+               if (!err)
+-                      result = IXGBE_XDP_TX;
++                      result = IXGBE_XDP_REDIR;
+               else
+                       result = IXGBE_XDP_CONSUMED;
+               break;
+@@ -2302,7 +2303,7 @@ static int ixgbe_clean_rx_irq(struct ixg
+       unsigned int mss = 0;
+ #endif /* IXGBE_FCOE */
+       u16 cleaned_count = ixgbe_desc_unused(rx_ring);
+-      bool xdp_xmit = false;
++      unsigned int xdp_xmit = 0;
+       while (likely(total_rx_packets < budget)) {
+               union ixgbe_adv_rx_desc *rx_desc;
+@@ -2342,8 +2343,10 @@ static int ixgbe_clean_rx_irq(struct ixg
+               }
+               if (IS_ERR(skb)) {
+-                      if (PTR_ERR(skb) == -IXGBE_XDP_TX) {
+-                              xdp_xmit = true;
++                      unsigned int xdp_res = -PTR_ERR(skb);
++
++                      if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) {
++                              xdp_xmit |= xdp_res;
+                               ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size);
+                       } else {
+                               rx_buffer->pagecnt_bias++;
+@@ -2415,7 +2418,10 @@ static int ixgbe_clean_rx_irq(struct ixg
+               total_rx_packets++;
+       }
+-      if (xdp_xmit) {
++      if (xdp_xmit & IXGBE_XDP_REDIR)
++              xdp_do_flush_map();
++
++      if (xdp_xmit & IXGBE_XDP_TX) {
+               struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
+               /* Force memory writes to complete before letting h/w
+@@ -2423,8 +2429,6 @@ static int ixgbe_clean_rx_irq(struct ixg
+                */
+               wmb();
+               writel(ring->next_to_use, ring->tail);
+-
+-              xdp_do_flush_map();
+       }
+       u64_stats_update_begin(&rx_ring->syncp);
diff --git a/queue-4.14/net-dccp-avoid-crash-in-ccid3_hc_rx_send_feedback.patch b/queue-4.14/net-dccp-avoid-crash-in-ccid3_hc_rx_send_feedback.patch
new file mode 100644 (file)
index 0000000..e1ddb89
--- /dev/null
@@ -0,0 +1,71 @@
+From foo@baz Thu Jul 19 08:32:33 CEST 2018
+From: Eric Dumazet <edumazet@google.com>
+Date: Fri, 22 Jun 2018 06:44:14 -0700
+Subject: net: dccp: avoid crash in ccid3_hc_rx_send_feedback()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 74174fe5634ffbf645a7ca5a261571f700b2f332 ]
+
+On fast hosts or malicious bots, we trigger a DCCP_BUG() which
+seems excessive.
+
+syzbot reported :
+
+BUG: delta (-6195) <= 0 at net/dccp/ccids/ccid3.c:628/ccid3_hc_rx_send_feedback()
+CPU: 1 PID: 18 Comm: ksoftirqd/1 Not tainted 4.18.0-rc1+ #112
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
+Call Trace:
+ __dump_stack lib/dump_stack.c:77 [inline]
+ dump_stack+0x1c9/0x2b4 lib/dump_stack.c:113
+ ccid3_hc_rx_send_feedback net/dccp/ccids/ccid3.c:628 [inline]
+ ccid3_hc_rx_packet_recv.cold.16+0x38/0x71 net/dccp/ccids/ccid3.c:793
+ ccid_hc_rx_packet_recv net/dccp/ccid.h:185 [inline]
+ dccp_deliver_input_to_ccids+0xf0/0x280 net/dccp/input.c:180
+ dccp_rcv_established+0x87/0xb0 net/dccp/input.c:378
+ dccp_v4_do_rcv+0x153/0x180 net/dccp/ipv4.c:654
+ sk_backlog_rcv include/net/sock.h:914 [inline]
+ __sk_receive_skb+0x3ba/0xd80 net/core/sock.c:517
+ dccp_v4_rcv+0x10f9/0x1f58 net/dccp/ipv4.c:875
+ ip_local_deliver_finish+0x2eb/0xda0 net/ipv4/ip_input.c:215
+ NF_HOOK include/linux/netfilter.h:287 [inline]
+ ip_local_deliver+0x1e9/0x750 net/ipv4/ip_input.c:256
+ dst_input include/net/dst.h:450 [inline]
+ ip_rcv_finish+0x823/0x2220 net/ipv4/ip_input.c:396
+ NF_HOOK include/linux/netfilter.h:287 [inline]
+ ip_rcv+0xa18/0x1284 net/ipv4/ip_input.c:492
+ __netif_receive_skb_core+0x2488/0x3680 net/core/dev.c:4628
+ __netif_receive_skb+0x2c/0x1e0 net/core/dev.c:4693
+ process_backlog+0x219/0x760 net/core/dev.c:5373
+ napi_poll net/core/dev.c:5771 [inline]
+ net_rx_action+0x7da/0x1980 net/core/dev.c:5837
+ __do_softirq+0x2e8/0xb17 kernel/softirq.c:284
+ run_ksoftirqd+0x86/0x100 kernel/softirq.c:645
+ smpboot_thread_fn+0x417/0x870 kernel/smpboot.c:164
+ kthread+0x345/0x410 kernel/kthread.c:240
+ ret_from_fork+0x3a/0x50 arch/x86/entry/entry_64.S:412
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Cc: Gerrit Renker <gerrit@erg.abdn.ac.uk>
+Cc: dccp@vger.kernel.org
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/dccp/ccids/ccid3.c |    5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+--- a/net/dccp/ccids/ccid3.c
++++ b/net/dccp/ccids/ccid3.c
+@@ -624,9 +624,8 @@ static void ccid3_hc_rx_send_feedback(st
+       case CCID3_FBACK_PERIODIC:
+               delta = ktime_us_delta(now, hc->rx_tstamp_last_feedback);
+               if (delta <= 0)
+-                      DCCP_BUG("delta (%ld) <= 0", (long)delta);
+-              else
+-                      hc->rx_x_recv = scaled_div32(hc->rx_bytes_recv, delta);
++                      delta = 1;
++              hc->rx_x_recv = scaled_div32(hc->rx_bytes_recv, delta);
+               break;
+       default:
+               return;
diff --git a/queue-4.14/net-dccp-switch-rx_tstamp_last_feedback-to-monotonic-clock.patch b/queue-4.14/net-dccp-switch-rx_tstamp_last_feedback-to-monotonic-clock.patch
new file mode 100644 (file)
index 0000000..83b3c41
--- /dev/null
@@ -0,0 +1,65 @@
+From foo@baz Thu Jul 19 08:32:33 CEST 2018
+From: Eric Dumazet <edumazet@google.com>
+Date: Fri, 22 Jun 2018 06:44:15 -0700
+Subject: net: dccp: switch rx_tstamp_last_feedback to monotonic clock
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 0ce4e70ff00662ad7490e545ba0cd8c1fa179fca ]
+
+To compute delays, better not use time of the day which can
+be changed by admins or malicious programs.
+
+Also change ccid3_first_li() to use s64 type for delta variable
+to avoid potential overflows.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Gerrit Renker <gerrit@erg.abdn.ac.uk>
+Cc: dccp@vger.kernel.org
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/dccp/ccids/ccid3.c |   11 +++++++----
+ 1 file changed, 7 insertions(+), 4 deletions(-)
+
+--- a/net/dccp/ccids/ccid3.c
++++ b/net/dccp/ccids/ccid3.c
+@@ -599,7 +599,7 @@ static void ccid3_hc_rx_send_feedback(st
+ {
+       struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
+       struct dccp_sock *dp = dccp_sk(sk);
+-      ktime_t now = ktime_get_real();
++      ktime_t now = ktime_get();
+       s64 delta = 0;
+       switch (fbtype) {
+@@ -631,7 +631,7 @@ static void ccid3_hc_rx_send_feedback(st
+               return;
+       }
+-      ccid3_pr_debug("Interval %ldusec, X_recv=%u, 1/p=%u\n", (long)delta,
++      ccid3_pr_debug("Interval %lldusec, X_recv=%u, 1/p=%u\n", delta,
+                      hc->rx_x_recv, hc->rx_pinv);
+       hc->rx_tstamp_last_feedback = now;
+@@ -678,7 +678,8 @@ static int ccid3_hc_rx_insert_options(st
+ static u32 ccid3_first_li(struct sock *sk)
+ {
+       struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
+-      u32 x_recv, p, delta;
++      u32 x_recv, p;
++      s64 delta;
+       u64 fval;
+       if (hc->rx_rtt == 0) {
+@@ -686,7 +687,9 @@ static u32 ccid3_first_li(struct sock *s
+               hc->rx_rtt = DCCP_FALLBACK_RTT;
+       }
+-      delta  = ktime_to_us(net_timedelta(hc->rx_tstamp_last_feedback));
++      delta = ktime_us_delta(ktime_get(), hc->rx_tstamp_last_feedback);
++      if (delta <= 0)
++              delta = 1;
+       x_recv = scaled_div32(hc->rx_bytes_recv, delta);
+       if (x_recv == 0) {              /* would also trigger divide-by-zero */
+               DCCP_WARN("X_recv==0\n");
diff --git a/queue-4.14/net-fix-use-after-free-in-gro-with-esp.patch b/queue-4.14/net-fix-use-after-free-in-gro-with-esp.patch
new file mode 100644 (file)
index 0000000..ec0831a
--- /dev/null
@@ -0,0 +1,141 @@
+From foo@baz Thu Jul 19 08:32:33 CEST 2018
+From: Sabrina Dubroca <sd@queasysnail.net>
+Date: Sat, 30 Jun 2018 17:38:55 +0200
+Subject: net: fix use-after-free in GRO with ESP
+
+From: Sabrina Dubroca <sd@queasysnail.net>
+
+[ Upstream commit 603d4cf8fe095b1ee78f423d514427be507fb513 ]
+
+Since the addition of GRO for ESP, gro_receive can consume the skb and
+return -EINPROGRESS. In that case, the lower layer GRO handler cannot
+touch the skb anymore.
+
+Commit 5f114163f2f5 ("net: Add a skb_gro_flush_final helper.") converted
+some of the gro_receive handlers that can lead to ESP's gro_receive so
+that they wouldn't access the skb when -EINPROGRESS is returned, but
+missed other spots, mainly in tunneling protocols.
+
+This patch finishes the conversion to using skb_gro_flush_final(), and
+adds a new helper, skb_gro_flush_final_remcsum(), used in VXLAN and
+GUE.
+
+Fixes: 5f114163f2f5 ("net: Add a skb_gro_flush_final helper.")
+Signed-off-by: Sabrina Dubroca <sd@queasysnail.net>
+Reviewed-by: Stefano Brivio <sbrivio@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/geneve.c      |    2 +-
+ drivers/net/vxlan.c       |    4 +---
+ include/linux/netdevice.h |   20 ++++++++++++++++++++
+ net/8021q/vlan.c          |    2 +-
+ net/ipv4/fou.c            |    4 +---
+ net/ipv4/gre_offload.c    |    2 +-
+ net/ipv4/udp_offload.c    |    2 +-
+ 7 files changed, 26 insertions(+), 10 deletions(-)
+
+--- a/drivers/net/geneve.c
++++ b/drivers/net/geneve.c
+@@ -474,7 +474,7 @@ static struct sk_buff **geneve_gro_recei
+ out_unlock:
+       rcu_read_unlock();
+ out:
+-      NAPI_GRO_CB(skb)->flush |= flush;
++      skb_gro_flush_final(skb, pp, flush);
+       return pp;
+ }
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -623,9 +623,7 @@ static struct sk_buff **vxlan_gro_receiv
+       flush = 0;
+ out:
+-      skb_gro_remcsum_cleanup(skb, &grc);
+-      skb->remcsum_offload = 0;
+-      NAPI_GRO_CB(skb)->flush |= flush;
++      skb_gro_flush_final_remcsum(skb, pp, flush, &grc);
+       return pp;
+ }
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -2668,11 +2668,31 @@ static inline void skb_gro_flush_final(s
+       if (PTR_ERR(pp) != -EINPROGRESS)
+               NAPI_GRO_CB(skb)->flush |= flush;
+ }
++static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
++                                             struct sk_buff **pp,
++                                             int flush,
++                                             struct gro_remcsum *grc)
++{
++      if (PTR_ERR(pp) != -EINPROGRESS) {
++              NAPI_GRO_CB(skb)->flush |= flush;
++              skb_gro_remcsum_cleanup(skb, grc);
++              skb->remcsum_offload = 0;
++      }
++}
+ #else
+ static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush)
+ {
+       NAPI_GRO_CB(skb)->flush |= flush;
+ }
++static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
++                                             struct sk_buff **pp,
++                                             int flush,
++                                             struct gro_remcsum *grc)
++{
++      NAPI_GRO_CB(skb)->flush |= flush;
++      skb_gro_remcsum_cleanup(skb, grc);
++      skb->remcsum_offload = 0;
++}
+ #endif
+ static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
+--- a/net/8021q/vlan.c
++++ b/net/8021q/vlan.c
+@@ -664,7 +664,7 @@ static struct sk_buff **vlan_gro_receive
+ out_unlock:
+       rcu_read_unlock();
+ out:
+-      NAPI_GRO_CB(skb)->flush |= flush;
++      skb_gro_flush_final(skb, pp, flush);
+       return pp;
+ }
+--- a/net/ipv4/fou.c
++++ b/net/ipv4/fou.c
+@@ -448,9 +448,7 @@ next_proto:
+ out_unlock:
+       rcu_read_unlock();
+ out:
+-      NAPI_GRO_CB(skb)->flush |= flush;
+-      skb_gro_remcsum_cleanup(skb, &grc);
+-      skb->remcsum_offload = 0;
++      skb_gro_flush_final_remcsum(skb, pp, flush, &grc);
+       return pp;
+ }
+--- a/net/ipv4/gre_offload.c
++++ b/net/ipv4/gre_offload.c
+@@ -223,7 +223,7 @@ static struct sk_buff **gre_gro_receive(
+ out_unlock:
+       rcu_read_unlock();
+ out:
+-      NAPI_GRO_CB(skb)->flush |= flush;
++      skb_gro_flush_final(skb, pp, flush);
+       return pp;
+ }
+--- a/net/ipv4/udp_offload.c
++++ b/net/ipv4/udp_offload.c
+@@ -295,7 +295,7 @@ unflush:
+ out_unlock:
+       rcu_read_unlock();
+ out:
+-      NAPI_GRO_CB(skb)->flush |= flush;
++      skb_gro_flush_final(skb, pp, flush);
+       return pp;
+ }
+ EXPORT_SYMBOL(udp_gro_receive);
diff --git a/queue-4.14/net-macb-fix-ptp-time-adjustment-for-large-negative-delta.patch b/queue-4.14/net-macb-fix-ptp-time-adjustment-for-large-negative-delta.patch
new file mode 100644 (file)
index 0000000..24203ed
--- /dev/null
@@ -0,0 +1,36 @@
+From foo@baz Thu Jul 19 08:32:33 CEST 2018
+From: Harini Katakam <harini.katakam@xilinx.com>
+Date: Wed, 20 Jun 2018 17:04:20 +0530
+Subject: net: macb: Fix ptp time adjustment for large negative delta
+
+From: Harini Katakam <harini.katakam@xilinx.com>
+
+[ Upstream commit 64d7839af8c8f67daaf9bf387135052c55d85f90 ]
+
+When delta passed to gem_ptp_adjtime is negative, the sign is
+maintained in the ns_to_timespec64 conversion. Hence timespec_add
+should be used directly. timespec_sub will just subtract the negative
+value thus increasing the time difference.
+
+Signed-off-by: Harini Katakam <harini.katakam@xilinx.com>
+Acked-by: Nicolas Ferre <nicolas.ferre@microchip.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/cadence/macb_ptp.c |    5 +----
+ 1 file changed, 1 insertion(+), 4 deletions(-)
+
+--- a/drivers/net/ethernet/cadence/macb_ptp.c
++++ b/drivers/net/ethernet/cadence/macb_ptp.c
+@@ -170,10 +170,7 @@ static int gem_ptp_adjtime(struct ptp_cl
+       if (delta > TSU_NSEC_MAX_VAL) {
+               gem_tsu_get_time(&bp->ptp_clock_info, &now);
+-              if (sign)
+-                      now = timespec64_sub(now, then);
+-              else
+-                      now = timespec64_add(now, then);
++              now = timespec64_add(now, then);
+               gem_tsu_set_time(&bp->ptp_clock_info,
+                                (const struct timespec64 *)&now);
diff --git a/queue-4.14/net-mlx5-e-switch-avoid-setup-attempt-if-not-being-e-switch-manager.patch b/queue-4.14/net-mlx5-e-switch-avoid-setup-attempt-if-not-being-e-switch-manager.patch
new file mode 100644 (file)
index 0000000..666a88a
--- /dev/null
@@ -0,0 +1,154 @@
+From foo@baz Thu Jul 19 08:32:33 CEST 2018
+From: Or Gerlitz <ogerlitz@mellanox.com>
+Date: Thu, 31 May 2018 11:16:18 +0300
+Subject: net/mlx5: E-Switch, Avoid setup attempt if not being e-switch manager
+
+From: Or Gerlitz <ogerlitz@mellanox.com>
+
+[ Upstream commit 0efc8562491b7d36f6bbc4fbc8f3348cb6641e9c ]
+
+In smartnic env, the host (PF) driver might not be an e-switch
+manager, hence the FW will err on driver attempts to deal with
+setting/unsetting the eswitch and as a result the overall setup
+of sriov will fail.
+
+Fix that by avoiding the operation if e-switch management is not
+allowed for this driver instance. While here, move to use the
+correct name for the esw manager capability name.
+
+Fixes: 81848731ff40 ('net/mlx5: E-Switch, Add SR-IOV (FDB) support')
+Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
+Reported-by: Guy Kushnir <guyk@mellanox.com>
+Reviewed-by: Eli Cohen <eli@melloanox.com>
+Tested-by: Eli Cohen <eli@melloanox.com>
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_rep.c  |    2 +-
+ drivers/net/ethernet/mellanox/mlx5/core/eswitch.c |    2 +-
+ drivers/net/ethernet/mellanox/mlx5/core/eswitch.h |    2 ++
+ drivers/net/ethernet/mellanox/mlx5/core/fs_core.c |    3 ++-
+ drivers/net/ethernet/mellanox/mlx5/core/fw.c      |    5 +++--
+ drivers/net/ethernet/mellanox/mlx5/core/sriov.c   |    7 ++++++-
+ include/linux/mlx5/mlx5_ifc.h                     |    2 +-
+ 7 files changed, 16 insertions(+), 7 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+@@ -726,7 +726,7 @@ static bool mlx5e_is_vf_vport_rep(struct
+       struct mlx5e_rep_priv *rpriv = priv->ppriv;
+       struct mlx5_eswitch_rep *rep;
+-      if (!MLX5_CAP_GEN(priv->mdev, eswitch_flow_table))
++      if (!MLX5_ESWITCH_MANAGER(priv->mdev))
+               return false;
+       rep = rpriv->rep;
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+@@ -1535,7 +1535,7 @@ int mlx5_eswitch_enable_sriov(struct mlx
+       if (!ESW_ALLOWED(esw))
+               return 0;
+-      if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) ||
++      if (!MLX5_ESWITCH_MANAGER(esw->dev) ||
+           !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
+               esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n");
+               return -EOPNOTSUPP;
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+@@ -39,6 +39,8 @@
+ #include <linux/mlx5/device.h>
+ #include "lib/mpfs.h"
++#define MLX5_ESWITCH_MANAGER(mdev) MLX5_CAP_GEN(mdev, eswitch_manager)
++
+ enum {
+       SRIOV_NONE,
+       SRIOV_LEGACY,
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+@@ -36,6 +36,7 @@
+ #include "mlx5_core.h"
+ #include "fs_core.h"
+ #include "fs_cmd.h"
++#include "eswitch.h"
+ #include "diag/fs_tracepoint.h"
+ #define INIT_TREE_NODE_ARRAY_SIZE(...)        (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
+@@ -2211,7 +2212,7 @@ int mlx5_init_fs(struct mlx5_core_dev *d
+                       goto err;
+       }
+-      if (MLX5_CAP_GEN(dev, eswitch_flow_table)) {
++      if (MLX5_ESWITCH_MANAGER(dev)) {
+               if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) {
+                       err = init_fdb_root_ns(steering);
+                       if (err)
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+@@ -34,6 +34,7 @@
+ #include <linux/mlx5/cmd.h>
+ #include <linux/module.h>
+ #include "mlx5_core.h"
++#include "eswitch.h"
+ #include "../../mlxfw/mlxfw.h"
+ static int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev, u32 *out,
+@@ -152,13 +153,13 @@ int mlx5_query_hca_caps(struct mlx5_core
+       }
+       if (MLX5_CAP_GEN(dev, vport_group_manager) &&
+-          MLX5_CAP_GEN(dev, eswitch_flow_table)) {
++          MLX5_ESWITCH_MANAGER(dev)) {
+               err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE);
+               if (err)
+                       return err;
+       }
+-      if (MLX5_CAP_GEN(dev, eswitch_flow_table)) {
++      if (MLX5_ESWITCH_MANAGER(dev)) {
+               err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH);
+               if (err)
+                       return err;
+--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+@@ -88,6 +88,9 @@ static int mlx5_device_enable_sriov(stru
+               return -EBUSY;
+       }
++      if (!MLX5_ESWITCH_MANAGER(dev))
++              goto enable_vfs_hca;
++
+       err = mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs, SRIOV_LEGACY);
+       if (err) {
+               mlx5_core_warn(dev,
+@@ -95,6 +98,7 @@ static int mlx5_device_enable_sriov(stru
+               return err;
+       }
++enable_vfs_hca:
+       for (vf = 0; vf < num_vfs; vf++) {
+               err = mlx5_core_enable_hca(dev, vf + 1);
+               if (err) {
+@@ -140,7 +144,8 @@ static void mlx5_device_disable_sriov(st
+       }
+ out:
+-      mlx5_eswitch_disable_sriov(dev->priv.eswitch);
++      if (MLX5_ESWITCH_MANAGER(dev))
++              mlx5_eswitch_disable_sriov(dev->priv.eswitch);
+       if (mlx5_wait_for_vf_pages(dev))
+               mlx5_core_warn(dev, "timeout reclaiming VFs pages\n");
+--- a/include/linux/mlx5/mlx5_ifc.h
++++ b/include/linux/mlx5/mlx5_ifc.h
+@@ -857,7 +857,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
+       u8         reserved_at_1a4[0x1];
+       u8         ets[0x1];
+       u8         nic_flow_table[0x1];
+-      u8         eswitch_flow_table[0x1];
++      u8         eswitch_manager[0x1];
+       u8         early_vf_enable[0x1];
+       u8         mcam_reg[0x1];
+       u8         pcam_reg[0x1];
diff --git a/queue-4.14/net-mlx5-fix-command-interface-race-in-polling-mode.patch b/queue-4.14/net-mlx5-fix-command-interface-race-in-polling-mode.patch
new file mode 100644 (file)
index 0000000..1ee42d3
--- /dev/null
@@ -0,0 +1,79 @@
+From foo@baz Thu Jul 19 08:32:33 CEST 2018
+From: Alex Vesker <valex@mellanox.com>
+Date: Tue, 12 Jun 2018 16:14:31 +0300
+Subject: net/mlx5: Fix command interface race in polling mode
+
+From: Alex Vesker <valex@mellanox.com>
+
+[ Upstream commit d412c31dae053bf30a1bc15582a9990df297a660 ]
+
+The command interface can work in two modes: Events and Polling.
+In the general case, each time we invoke a command, a work is
+queued to handle it.
+
+When working in events, the interrupt handler completes the
+command execution. On the other hand, when working in polling
+mode, the work itself completes it.
+
+Due to a bug in the work handler, a command could have been
+completed by the interrupt handler, while the work handler
+hasn't finished yet, causing the it to complete once again
+if the command interface mode was changed from Events to
+polling after the interrupt handler was called.
+
+mlx5_unload_one()
+        mlx5_stop_eqs()
+                // Destroy the EQ before cmd EQ
+                ...cmd_work_handler()
+                        write_doorbell()
+                        --> EVENT_TYPE_CMD
+                                mlx5_cmd_comp_handler() // First free
+                                        free_ent(cmd, ent->idx)
+                                        complete(&ent->done)
+
+        <-- mlx5_stop_eqs //cmd was complete
+                // move to polling before destroying the last cmd EQ
+                mlx5_cmd_use_polling()
+                        cmd->mode = POLL;
+
+                --> cmd_work_handler (continues)
+                        if (cmd->mode == POLL)
+                                mlx5_cmd_comp_handler() // Double free
+
+The solution is to store the cmd->mode before writing the doorbell.
+
+Fixes: e126ba97dba9 ("mlx5: Add driver for Mellanox Connect-IB adapters")
+Signed-off-by: Alex Vesker <valex@mellanox.com>
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/cmd.c |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+@@ -801,6 +801,7 @@ static void cmd_work_handler(struct work
+       unsigned long flags;
+       bool poll_cmd = ent->polling;
+       int alloc_ret;
++      int cmd_mode;
+       sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
+       down(sem);
+@@ -847,6 +848,7 @@ static void cmd_work_handler(struct work
+       set_signature(ent, !cmd->checksum_disabled);
+       dump_command(dev, ent, 1);
+       ent->ts1 = ktime_get_ns();
++      cmd_mode = cmd->mode;
+       if (ent->callback)
+               schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
+@@ -871,7 +873,7 @@ static void cmd_work_handler(struct work
+       iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
+       mmiowb();
+       /* if not in polling don't use ent after this point */
+-      if (cmd->mode == CMD_MODE_POLLING || poll_cmd) {
++      if (cmd_mode == CMD_MODE_POLLING || poll_cmd) {
+               poll_timeout(ent);
+               /* make sure we read the descriptor after ownership is SW */
+               rmb();
diff --git a/queue-4.14/net-mlx5-fix-incorrect-raw-command-length-parsing.patch b/queue-4.14/net-mlx5-fix-incorrect-raw-command-length-parsing.patch
new file mode 100644 (file)
index 0000000..635b241
--- /dev/null
@@ -0,0 +1,42 @@
+From foo@baz Thu Jul 19 08:32:33 CEST 2018
+From: Alex Vesker <valex@mellanox.com>
+Date: Fri, 25 May 2018 20:25:59 +0300
+Subject: net/mlx5: Fix incorrect raw command length parsing
+
+From: Alex Vesker <valex@mellanox.com>
+
+[ Upstream commit 603b7bcff824740500ddfa001d7a7168b0b38542 ]
+
+The NULL character was not set correctly for the string containing
+the command length, this caused failures reading the output of the
+command due to a random length. The fix is to initialize the output
+length string.
+
+Fixes: e126ba97dba9 ("mlx5: Add driver for Mellanox Connect-IB adapters")
+Signed-off-by: Alex Vesker <valex@mellanox.com>
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/cmd.c |    4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+@@ -1274,7 +1274,7 @@ static ssize_t outlen_write(struct file
+ {
+       struct mlx5_core_dev *dev = filp->private_data;
+       struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
+-      char outlen_str[8];
++      char outlen_str[8] = {0};
+       int outlen;
+       void *ptr;
+       int err;
+@@ -1289,8 +1289,6 @@ static ssize_t outlen_write(struct file
+       if (copy_from_user(outlen_str, buf, count))
+               return -EFAULT;
+-      outlen_str[7] = 0;
+-
+       err = sscanf(outlen_str, "%d", &outlen);
+       if (err < 0)
+               return err;
diff --git a/queue-4.14/net-mlx5-fix-required-capability-for-manipulating-mpfs.patch b/queue-4.14/net-mlx5-fix-required-capability-for-manipulating-mpfs.patch
new file mode 100644 (file)
index 0000000..c70414b
--- /dev/null
@@ -0,0 +1,66 @@
+From foo@baz Thu Jul 19 08:32:33 CEST 2018
+From: Eli Cohen <eli@mellanox.com>
+Date: Wed, 13 Jun 2018 10:27:34 +0300
+Subject: net/mlx5: Fix required capability for manipulating MPFS
+
+From: Eli Cohen <eli@mellanox.com>
+
+[ Upstream commit f811980444ec59ad62f9e041adbb576a821132c7 ]
+
+Manipulating of the MPFS requires eswitch manager capabilities.
+
+Fixes: eeb66cdb6826 ('net/mlx5: Separate between E-Switch and MPFS')
+Signed-off-by: Eli Cohen <eli@mellanox.com>
+Reviewed-by: Or Gerlitz <ogerlitz@mellanox.com>
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c |    9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
+@@ -34,6 +34,7 @@
+ #include <linux/mlx5/driver.h>
+ #include <linux/mlx5/mlx5_ifc.h>
+ #include "mlx5_core.h"
++#include "eswitch.h"
+ #include "lib/mpfs.h"
+ /* HW L2 Table (MPFS) management */
+@@ -98,7 +99,7 @@ int mlx5_mpfs_init(struct mlx5_core_dev
+       int l2table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table);
+       struct mlx5_mpfs *mpfs;
+-      if (!MLX5_VPORT_MANAGER(dev))
++      if (!MLX5_ESWITCH_MANAGER(dev))
+               return 0;
+       mpfs = kzalloc(sizeof(*mpfs), GFP_KERNEL);
+@@ -122,7 +123,7 @@ void mlx5_mpfs_cleanup(struct mlx5_core_
+ {
+       struct mlx5_mpfs *mpfs = dev->priv.mpfs;
+-      if (!MLX5_VPORT_MANAGER(dev))
++      if (!MLX5_ESWITCH_MANAGER(dev))
+               return;
+       WARN_ON(!hlist_empty(mpfs->hash));
+@@ -137,7 +138,7 @@ int mlx5_mpfs_add_mac(struct mlx5_core_d
+       u32 index;
+       int err;
+-      if (!MLX5_VPORT_MANAGER(dev))
++      if (!MLX5_ESWITCH_MANAGER(dev))
+               return 0;
+       mutex_lock(&mpfs->lock);
+@@ -179,7 +180,7 @@ int mlx5_mpfs_del_mac(struct mlx5_core_d
+       int err = 0;
+       u32 index;
+-      if (!MLX5_VPORT_MANAGER(dev))
++      if (!MLX5_ESWITCH_MANAGER(dev))
+               return 0;
+       mutex_lock(&mpfs->lock);
diff --git a/queue-4.14/net-mlx5-fix-wrong-size-allocation-for-qos-etc-tc-regitster.patch b/queue-4.14/net-mlx5-fix-wrong-size-allocation-for-qos-etc-tc-regitster.patch
new file mode 100644 (file)
index 0000000..16e3749
--- /dev/null
@@ -0,0 +1,40 @@
+From foo@baz Thu Jul 19 08:32:33 CEST 2018
+From: Shay Agroskin <shayag@mellanox.com>
+Date: Tue, 22 May 2018 14:14:02 +0300
+Subject: net/mlx5: Fix wrong size allocation for QoS ETC TC regitster
+
+From: Shay Agroskin <shayag@mellanox.com>
+
+[ Upstream commit d14fcb8d877caf1b8d6bd65d444bf62b21f2070c ]
+
+The driver allocates wrong size (due to wrong struct name) when issuing
+a query/set request to NIC's register.
+
+Fixes: d8880795dabf ("net/mlx5e: Implement DCBNL IEEE max rate")
+Signed-off-by: Shay Agroskin <shayag@mellanox.com>
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/port.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
+@@ -641,7 +641,7 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_prio_t
+ static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in,
+                                  int inlen)
+ {
+-      u32 out[MLX5_ST_SZ_DW(qtct_reg)];
++      u32 out[MLX5_ST_SZ_DW(qetc_reg)];
+       if (!MLX5_CAP_GEN(mdev, ets))
+               return -EOPNOTSUPP;
+@@ -653,7 +653,7 @@ static int mlx5_set_port_qetcr_reg(struc
+ static int mlx5_query_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *out,
+                                    int outlen)
+ {
+-      u32 in[MLX5_ST_SZ_DW(qtct_reg)];
++      u32 in[MLX5_ST_SZ_DW(qetc_reg)];
+       if (!MLX5_CAP_GEN(mdev, ets))
+               return -EOPNOTSUPP;
diff --git a/queue-4.14/net-mlx5e-avoid-dealing-with-vport-representors-if-not-being-e-switch-manager.patch b/queue-4.14/net-mlx5e-avoid-dealing-with-vport-representors-if-not-being-e-switch-manager.patch
new file mode 100644 (file)
index 0000000..8422f3d
--- /dev/null
@@ -0,0 +1,110 @@
+From foo@baz Thu Jul 19 08:32:33 CEST 2018
+From: Or Gerlitz <ogerlitz@mellanox.com>
+Date: Thu, 31 May 2018 11:32:56 +0300
+Subject: net/mlx5e: Avoid dealing with vport representors if not being e-switch manager
+
+From: Or Gerlitz <ogerlitz@mellanox.com>
+
+[ Upstream commit 733d3e5497070d05971352ca5087bac83c197c3d ]
+
+In smartnic env, the host (PF) driver might not be an e-switch
+manager, hence the switchdev mode representors are running on
+the embedded cpu (EC) and not at the host.
+
+As such, we should avoid dealing with vport representors if
+not being esw manager.
+
+While here, make sure to disallow eswitch switchdev related
+setups through devlink if we are not esw managers.
+
+Fixes: cb67b832921c ('net/mlx5e: Introduce SRIOV VF representors')
+Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
+Reviewed-by: Eli Cohen <eli@mellanox.com>
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_main.c          |   12 ++++++------
+ drivers/net/ethernet/mellanox/mlx5/core/en_rep.c           |    2 +-
+ drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c |    4 ++--
+ 3 files changed, 9 insertions(+), 9 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -2626,7 +2626,7 @@ void mlx5e_activate_priv_channels(struct
+       mlx5e_activate_channels(&priv->channels);
+       netif_tx_start_all_queues(priv->netdev);
+-      if (MLX5_VPORT_MANAGER(priv->mdev))
++      if (MLX5_ESWITCH_MANAGER(priv->mdev))
+               mlx5e_add_sqs_fwd_rules(priv);
+       mlx5e_wait_channels_min_rx_wqes(&priv->channels);
+@@ -2637,7 +2637,7 @@ void mlx5e_deactivate_priv_channels(stru
+ {
+       mlx5e_redirect_rqts_to_drop(priv);
+-      if (MLX5_VPORT_MANAGER(priv->mdev))
++      if (MLX5_ESWITCH_MANAGER(priv->mdev))
+               mlx5e_remove_sqs_fwd_rules(priv);
+       /* FIXME: This is a W/A only for tx timeout watch dog false alarm when
+@@ -4127,7 +4127,7 @@ static void mlx5e_build_nic_netdev(struc
+       mlx5e_set_netdev_dev_addr(netdev);
+ #if IS_ENABLED(CONFIG_MLX5_ESWITCH)
+-      if (MLX5_VPORT_MANAGER(mdev))
++      if (MLX5_ESWITCH_MANAGER(mdev))
+               netdev->switchdev_ops = &mlx5e_switchdev_ops;
+ #endif
+@@ -4273,7 +4273,7 @@ static void mlx5e_nic_enable(struct mlx5
+       mlx5e_enable_async_events(priv);
+-      if (MLX5_VPORT_MANAGER(priv->mdev))
++      if (MLX5_ESWITCH_MANAGER(priv->mdev))
+               mlx5e_register_vport_reps(priv);
+       if (netdev->reg_state != NETREG_REGISTERED)
+@@ -4300,7 +4300,7 @@ static void mlx5e_nic_disable(struct mlx
+       queue_work(priv->wq, &priv->set_rx_mode_work);
+-      if (MLX5_VPORT_MANAGER(priv->mdev))
++      if (MLX5_ESWITCH_MANAGER(priv->mdev))
+               mlx5e_unregister_vport_reps(priv);
+       mlx5e_disable_async_events(priv);
+@@ -4483,7 +4483,7 @@ static void *mlx5e_add(struct mlx5_core_
+               return NULL;
+ #ifdef CONFIG_MLX5_ESWITCH
+-      if (MLX5_VPORT_MANAGER(mdev)) {
++      if (MLX5_ESWITCH_MANAGER(mdev)) {
+               rpriv = mlx5e_alloc_nic_rep_priv(mdev);
+               if (!rpriv) {
+                       mlx5_core_warn(mdev, "Failed to alloc NIC rep priv data\n");
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+@@ -710,7 +710,7 @@ bool mlx5e_is_uplink_rep(struct mlx5e_pr
+       struct mlx5e_rep_priv *rpriv = priv->ppriv;
+       struct mlx5_eswitch_rep *rep;
+-      if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager))
++      if (!MLX5_ESWITCH_MANAGER(priv->mdev))
+               return false;
+       rep = rpriv->rep;
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+@@ -912,8 +912,8 @@ static int mlx5_devlink_eswitch_check(st
+       if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+               return -EOPNOTSUPP;
+-      if (!MLX5_CAP_GEN(dev, vport_group_manager))
+-              return -EOPNOTSUPP;
++      if(!MLX5_ESWITCH_MANAGER(dev))
++              return -EPERM;
+       if (dev->priv.eswitch->mode == SRIOV_NONE)
+               return -EOPNOTSUPP;
diff --git a/queue-4.14/net-mlx5e-don-t-attempt-to-dereference-the-ppriv-struct-if-not-being-eswitch-manager.patch b/queue-4.14/net-mlx5e-don-t-attempt-to-dereference-the-ppriv-struct-if-not-being-eswitch-manager.patch
new file mode 100644 (file)
index 0000000..dcd7b13
--- /dev/null
@@ -0,0 +1,41 @@
+From foo@baz Thu Jul 19 08:32:33 CEST 2018
+From: Or Gerlitz <ogerlitz@mellanox.com>
+Date: Mon, 4 Jun 2018 19:46:53 +0300
+Subject: net/mlx5e: Don't attempt to dereference the ppriv struct if not being eswitch manager
+
+From: Or Gerlitz <ogerlitz@mellanox.com>
+
+[ Upstream commit 8ffd569aaa818f2624ca821d9a246342fa8b8c50 ]
+
+The check for cpu hit statistics was not returning immediate false for
+any non vport rep netdev and hence we crashed (say on mlx5 probed VFs) if
+user-space tool was calling into any possible netdev in the system.
+
+Fix that by doing a proper check before dereferencing.
+
+Fixes: 1d447a39142e ('net/mlx5e: Extendable vport representor netdev private data')
+Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
+Reported-by: Eli Cohen <eli@melloanox.com>
+Reviewed-by: Eli Cohen <eli@melloanox.com>
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_rep.c |    6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+@@ -724,8 +724,12 @@ bool mlx5e_is_uplink_rep(struct mlx5e_pr
+ static bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv)
+ {
+       struct mlx5e_rep_priv *rpriv = priv->ppriv;
+-      struct mlx5_eswitch_rep *rep = rpriv->rep;
++      struct mlx5_eswitch_rep *rep;
++      if (!MLX5_CAP_GEN(priv->mdev, eswitch_flow_table))
++              return false;
++
++      rep = rpriv->rep;
+       if (rep && rep->vport != FDB_UPLINK_VPORT)
+               return true;
diff --git a/queue-4.14/net-mvneta-fix-the-rx-desc-dma-address-in-the-rx-path.patch b/queue-4.14/net-mvneta-fix-the-rx-desc-dma-address-in-the-rx-path.patch
new file mode 100644 (file)
index 0000000..3d5c605
--- /dev/null
@@ -0,0 +1,38 @@
+From foo@baz Thu Jul 19 08:32:33 CEST 2018
+From: Antoine Tenart <antoine.tenart@bootlin.com>
+Date: Fri, 22 Jun 2018 10:15:39 +0200
+Subject: net: mvneta: fix the Rx desc DMA address in the Rx path
+
+From: Antoine Tenart <antoine.tenart@bootlin.com>
+
+[ Upstream commit 271f7ff5aa5a73488b7a9d8b84b5205fb5b2f7cc ]
+
+When using s/w buffer management, buffers are allocated and DMA mapped.
+When doing so on an arm64 platform, an offset correction is applied on
+the DMA address, before storing it in an Rx descriptor. The issue is
+this DMA address is then used later in the Rx path without removing the
+offset correction. Thus the DMA address is wrong, which can led to
+various issues.
+
+This patch fixes this by removing the offset correction from the DMA
+address retrieved from the Rx descriptor before using it in the Rx path.
+
+Fixes: 8d5047cf9ca2 ("net: mvneta: Convert to be 64 bits compatible")
+Signed-off-by: Antoine Tenart <antoine.tenart@bootlin.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/marvell/mvneta.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -1959,7 +1959,7 @@ static int mvneta_rx_swbm(struct mvneta_
+               rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
+               index = rx_desc - rxq->descs;
+               data = rxq->buf_virt_addr[index];
+-              phys_addr = rx_desc->buf_phys_addr;
++              phys_addr = rx_desc->buf_phys_addr - pp->rx_offset_correction;
+               if (!mvneta_rxq_desc_is_first_last(rx_status) ||
+                   (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
diff --git a/queue-4.14/net-packet-fix-use-after-free.patch b/queue-4.14/net-packet-fix-use-after-free.patch
new file mode 100644 (file)
index 0000000..e9fd06d
--- /dev/null
@@ -0,0 +1,176 @@
+From foo@baz Thu Jul 19 08:32:33 CEST 2018
+From: Eric Dumazet <edumazet@google.com>
+Date: Thu, 21 Jun 2018 14:16:02 -0700
+Subject: net/packet: fix use-after-free
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 945d015ee0c3095d2290e845565a23dedfd8027c ]
+
+We should put copy_skb in receive_queue only after
+a successful call to virtio_net_hdr_from_skb().
+
+syzbot report :
+
+BUG: KASAN: use-after-free in __skb_unlink include/linux/skbuff.h:1843 [inline]
+BUG: KASAN: use-after-free in __skb_dequeue include/linux/skbuff.h:1863 [inline]
+BUG: KASAN: use-after-free in skb_dequeue+0x16a/0x180 net/core/skbuff.c:2815
+Read of size 8 at addr ffff8801b044ecc0 by task syz-executor217/4553
+
+CPU: 0 PID: 4553 Comm: syz-executor217 Not tainted 4.18.0-rc1+ #111
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
+Call Trace:
+ __dump_stack lib/dump_stack.c:77 [inline]
+ dump_stack+0x1c9/0x2b4 lib/dump_stack.c:113
+ print_address_description+0x6c/0x20b mm/kasan/report.c:256
+ kasan_report_error mm/kasan/report.c:354 [inline]
+ kasan_report.cold.7+0x242/0x2fe mm/kasan/report.c:412
+ __asan_report_load8_noabort+0x14/0x20 mm/kasan/report.c:433
+ __skb_unlink include/linux/skbuff.h:1843 [inline]
+ __skb_dequeue include/linux/skbuff.h:1863 [inline]
+ skb_dequeue+0x16a/0x180 net/core/skbuff.c:2815
+ skb_queue_purge+0x26/0x40 net/core/skbuff.c:2852
+ packet_set_ring+0x675/0x1da0 net/packet/af_packet.c:4331
+ packet_release+0x630/0xd90 net/packet/af_packet.c:2991
+ __sock_release+0xd7/0x260 net/socket.c:603
+ sock_close+0x19/0x20 net/socket.c:1186
+ __fput+0x35b/0x8b0 fs/file_table.c:209
+ ____fput+0x15/0x20 fs/file_table.c:243
+ task_work_run+0x1ec/0x2a0 kernel/task_work.c:113
+ exit_task_work include/linux/task_work.h:22 [inline]
+ do_exit+0x1b08/0x2750 kernel/exit.c:865
+ do_group_exit+0x177/0x440 kernel/exit.c:968
+ __do_sys_exit_group kernel/exit.c:979 [inline]
+ __se_sys_exit_group kernel/exit.c:977 [inline]
+ __x64_sys_exit_group+0x3e/0x50 kernel/exit.c:977
+ do_syscall_64+0x1b9/0x820 arch/x86/entry/common.c:290
+ entry_SYSCALL_64_after_hwframe+0x49/0xbe
+RIP: 0033:0x4448e9
+Code: Bad RIP value.
+RSP: 002b:00007ffd5f777ca8 EFLAGS: 00000202 ORIG_RAX: 00000000000000e7
+RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00000000004448e9
+RDX: 00000000004448e9 RSI: 000000000000fcfb RDI: 0000000000000001
+RBP: 00000000006cf018 R08: 00007ffd0000a45b R09: 0000000000000000
+R10: 00007ffd5f777e48 R11: 0000000000000202 R12: 00000000004021f0
+R13: 0000000000402280 R14: 0000000000000000 R15: 0000000000000000
+
+Allocated by task 4553:
+ save_stack+0x43/0xd0 mm/kasan/kasan.c:448
+ set_track mm/kasan/kasan.c:460 [inline]
+ kasan_kmalloc+0xc4/0xe0 mm/kasan/kasan.c:553
+ kasan_slab_alloc+0x12/0x20 mm/kasan/kasan.c:490
+ kmem_cache_alloc+0x12e/0x760 mm/slab.c:3554
+ skb_clone+0x1f5/0x500 net/core/skbuff.c:1282
+ tpacket_rcv+0x28f7/0x3200 net/packet/af_packet.c:2221
+ deliver_skb net/core/dev.c:1925 [inline]
+ deliver_ptype_list_skb net/core/dev.c:1940 [inline]
+ __netif_receive_skb_core+0x1bfb/0x3680 net/core/dev.c:4611
+ __netif_receive_skb+0x2c/0x1e0 net/core/dev.c:4693
+ netif_receive_skb_internal+0x12e/0x7d0 net/core/dev.c:4767
+ netif_receive_skb+0xbf/0x420 net/core/dev.c:4791
+ tun_rx_batched.isra.55+0x4ba/0x8c0 drivers/net/tun.c:1571
+ tun_get_user+0x2af1/0x42f0 drivers/net/tun.c:1981
+ tun_chr_write_iter+0xb9/0x154 drivers/net/tun.c:2009
+ call_write_iter include/linux/fs.h:1795 [inline]
+ new_sync_write fs/read_write.c:474 [inline]
+ __vfs_write+0x6c6/0x9f0 fs/read_write.c:487
+ vfs_write+0x1f8/0x560 fs/read_write.c:549
+ ksys_write+0x101/0x260 fs/read_write.c:598
+ __do_sys_write fs/read_write.c:610 [inline]
+ __se_sys_write fs/read_write.c:607 [inline]
+ __x64_sys_write+0x73/0xb0 fs/read_write.c:607
+ do_syscall_64+0x1b9/0x820 arch/x86/entry/common.c:290
+ entry_SYSCALL_64_after_hwframe+0x49/0xbe
+
+Freed by task 4553:
+ save_stack+0x43/0xd0 mm/kasan/kasan.c:448
+ set_track mm/kasan/kasan.c:460 [inline]
+ __kasan_slab_free+0x11a/0x170 mm/kasan/kasan.c:521
+ kasan_slab_free+0xe/0x10 mm/kasan/kasan.c:528
+ __cache_free mm/slab.c:3498 [inline]
+ kmem_cache_free+0x86/0x2d0 mm/slab.c:3756
+ kfree_skbmem+0x154/0x230 net/core/skbuff.c:582
+ __kfree_skb net/core/skbuff.c:642 [inline]
+ kfree_skb+0x1a5/0x580 net/core/skbuff.c:659
+ tpacket_rcv+0x189e/0x3200 net/packet/af_packet.c:2385
+ deliver_skb net/core/dev.c:1925 [inline]
+ deliver_ptype_list_skb net/core/dev.c:1940 [inline]
+ __netif_receive_skb_core+0x1bfb/0x3680 net/core/dev.c:4611
+ __netif_receive_skb+0x2c/0x1e0 net/core/dev.c:4693
+ netif_receive_skb_internal+0x12e/0x7d0 net/core/dev.c:4767
+ netif_receive_skb+0xbf/0x420 net/core/dev.c:4791
+ tun_rx_batched.isra.55+0x4ba/0x8c0 drivers/net/tun.c:1571
+ tun_get_user+0x2af1/0x42f0 drivers/net/tun.c:1981
+ tun_chr_write_iter+0xb9/0x154 drivers/net/tun.c:2009
+ call_write_iter include/linux/fs.h:1795 [inline]
+ new_sync_write fs/read_write.c:474 [inline]
+ __vfs_write+0x6c6/0x9f0 fs/read_write.c:487
+ vfs_write+0x1f8/0x560 fs/read_write.c:549
+ ksys_write+0x101/0x260 fs/read_write.c:598
+ __do_sys_write fs/read_write.c:610 [inline]
+ __se_sys_write fs/read_write.c:607 [inline]
+ __x64_sys_write+0x73/0xb0 fs/read_write.c:607
+ do_syscall_64+0x1b9/0x820 arch/x86/entry/common.c:290
+ entry_SYSCALL_64_after_hwframe+0x49/0xbe
+
+The buggy address belongs to the object at ffff8801b044ecc0
+ which belongs to the cache skbuff_head_cache of size 232
+The buggy address is located 0 bytes inside of
+ 232-byte region [ffff8801b044ecc0, ffff8801b044eda8)
+The buggy address belongs to the page:
+page:ffffea0006c11380 count:1 mapcount:0 mapping:ffff8801d9be96c0 index:0x0
+flags: 0x2fffc0000000100(slab)
+raw: 02fffc0000000100 ffffea0006c17988 ffff8801d9bec248 ffff8801d9be96c0
+raw: 0000000000000000 ffff8801b044e040 000000010000000c 0000000000000000
+page dumped because: kasan: bad access detected
+
+Memory state around the buggy address:
+ ffff8801b044eb80: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ ffff8801b044ec00: 00 00 00 00 00 00 00 00 00 00 00 00 00 fc fc fc
+>ffff8801b044ec80: fc fc fc fc fc fc fc fc fb fb fb fb fb fb fb fb
+                                           ^
+ ffff8801b044ed00: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+ ffff8801b044ed80: fb fb fb fb fb fc fc fc fc fc fc fc fc fc fc fc
+
+Fixes: 58d19b19cd99 ("packet: vnet_hdr support for tpacket_rcv")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Cc: Willem de Bruijn <willemb@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/packet/af_packet.c |   16 +++++++---------
+ 1 file changed, 7 insertions(+), 9 deletions(-)
+
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -2303,6 +2303,13 @@ static int tpacket_rcv(struct sk_buff *s
+               if (po->stats.stats1.tp_drops)
+                       status |= TP_STATUS_LOSING;
+       }
++
++      if (do_vnet &&
++          virtio_net_hdr_from_skb(skb, h.raw + macoff -
++                                  sizeof(struct virtio_net_hdr),
++                                  vio_le(), true, 0))
++              goto drop_n_account;
++
+       po->stats.stats1.tp_packets++;
+       if (copy_skb) {
+               status |= TP_STATUS_COPY;
+@@ -2310,15 +2317,6 @@ static int tpacket_rcv(struct sk_buff *s
+       }
+       spin_unlock(&sk->sk_receive_queue.lock);
+-      if (do_vnet) {
+-              if (virtio_net_hdr_from_skb(skb, h.raw + macoff -
+-                                          sizeof(struct virtio_net_hdr),
+-                                          vio_le(), true, 0)) {
+-                      spin_lock(&sk->sk_receive_queue.lock);
+-                      goto drop_n_account;
+-              }
+-      }
+-
+       skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
+       if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
diff --git a/queue-4.14/net-sungem-fix-rx-checksum-support.patch b/queue-4.14/net-sungem-fix-rx-checksum-support.patch
new file mode 100644 (file)
index 0000000..eb06fb6
--- /dev/null
@@ -0,0 +1,109 @@
+From foo@baz Thu Jul 19 08:32:33 CEST 2018
+From: Eric Dumazet <edumazet@google.com>
+Date: Tue, 19 Jun 2018 19:18:50 -0700
+Subject: net: sungem: fix rx checksum support
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 12b03558cef6d655d0d394f5e98a6fd07c1f6c0f ]
+
+After commit 88078d98d1bb ("net: pskb_trim_rcsum() and CHECKSUM_COMPLETE
+are friends"), sungem owners reported the infamous "eth0: hw csum failure"
+message.
+
+CHECKSUM_COMPLETE has in fact never worked for this driver, but this
+was masked by the fact that upper stacks had to strip the FCS, and
+therefore skb->ip_summed was set back to CHECKSUM_NONE before
+my recent change.
+
+Driver configures a number of bytes to skip when the chip computes
+the checksum, and for some reason only half of the Ethernet header
+was skipped.
+
+Then a second problem is that we should strip the FCS by default,
+unless the driver is updated to eventually support NETIF_F_RXFCS in
+the future.
+
+Finally, a driver should check if NETIF_F_RXCSUM feature is enabled
+or not, so that the admin can turn off rx checksum if wanted.
+
+Many thanks to Andreas Schwab and Mathieu Malaterre for their
+help in debugging this issue.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: Meelis Roos <mroos@linux.ee>
+Reported-by: Mathieu Malaterre <malat@debian.org>
+Reported-by: Andreas Schwab <schwab@linux-m68k.org>
+Tested-by: Andreas Schwab <schwab@linux-m68k.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/sun/sungem.c |   22 ++++++++++++----------
+ 1 file changed, 12 insertions(+), 10 deletions(-)
+
+--- a/drivers/net/ethernet/sun/sungem.c
++++ b/drivers/net/ethernet/sun/sungem.c
+@@ -59,8 +59,7 @@
+ #include <linux/sungem_phy.h>
+ #include "sungem.h"
+-/* Stripping FCS is causing problems, disabled for now */
+-#undef STRIP_FCS
++#define STRIP_FCS
+ #define DEFAULT_MSG   (NETIF_MSG_DRV          | \
+                        NETIF_MSG_PROBE        | \
+@@ -434,7 +433,7 @@ static int gem_rxmac_reset(struct gem *g
+       writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW);
+       writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
+       val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
+-             ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128);
++             (ETH_HLEN << 13) | RXDMA_CFG_FTHRESH_128);
+       writel(val, gp->regs + RXDMA_CFG);
+       if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN)
+               writel(((5 & RXDMA_BLANK_IPKTS) |
+@@ -759,7 +758,6 @@ static int gem_rx(struct gem *gp, int wo
+       struct net_device *dev = gp->dev;
+       int entry, drops, work_done = 0;
+       u32 done;
+-      __sum16 csum;
+       if (netif_msg_rx_status(gp))
+               printk(KERN_DEBUG "%s: rx interrupt, done: %d, rx_new: %d\n",
+@@ -854,9 +852,13 @@ static int gem_rx(struct gem *gp, int wo
+                       skb = copy_skb;
+               }
+-              csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff);
+-              skb->csum = csum_unfold(csum);
+-              skb->ip_summed = CHECKSUM_COMPLETE;
++              if (likely(dev->features & NETIF_F_RXCSUM)) {
++                      __sum16 csum;
++
++                      csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff);
++                      skb->csum = csum_unfold(csum);
++                      skb->ip_summed = CHECKSUM_COMPLETE;
++              }
+               skb->protocol = eth_type_trans(skb, gp->dev);
+               napi_gro_receive(&gp->napi, skb);
+@@ -1760,7 +1762,7 @@ static void gem_init_dma(struct gem *gp)
+       writel(0, gp->regs + TXDMA_KICK);
+       val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
+-             ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128);
++             (ETH_HLEN << 13) | RXDMA_CFG_FTHRESH_128);
+       writel(val, gp->regs + RXDMA_CFG);
+       writel(desc_dma >> 32, gp->regs + RXDMA_DBHI);
+@@ -2986,8 +2988,8 @@ static int gem_init_one(struct pci_dev *
+       pci_set_drvdata(pdev, dev);
+       /* We can do scatter/gather and HW checksum */
+-      dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
+-      dev->features |= dev->hw_features | NETIF_F_RXCSUM;
++      dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
++      dev->features = dev->hw_features;
+       if (pci_using_dac)
+               dev->features |= NETIF_F_HIGHDMA;
diff --git a/queue-4.14/net-tcp-fix-socket-lookups-with-so_bindtodevice.patch b/queue-4.14/net-tcp-fix-socket-lookups-with-so_bindtodevice.patch
new file mode 100644 (file)
index 0000000..5421760
--- /dev/null
@@ -0,0 +1,56 @@
+From foo@baz Thu Jul 19 08:32:33 CEST 2018
+From: David Ahern <dsahern@gmail.com>
+Date: Mon, 18 Jun 2018 12:30:37 -0700
+Subject: net/tcp: Fix socket lookups with SO_BINDTODEVICE
+
+From: David Ahern <dsahern@gmail.com>
+
+[ Upstream commit 8c43bd1706885ba1acfa88da02bc60a2ec16f68c ]
+
+Similar to 69678bcd4d2d ("udp: fix SO_BINDTODEVICE"), TCP socket lookups
+need to fail if dev_match is not true. Currently, a packet to a given port
+can match a socket bound to device when it should not. In the VRF case,
+this causes the lookup to hit a VRF socket and not a global socket
+resulting in a response trying to go through the VRF when it should not.
+
+Fixes: 3fa6f616a7a4d ("net: ipv4: add second dif to inet socket lookups")
+Fixes: 4297a0ef08572 ("net: ipv6: add second dif to inet6 socket lookups")
+Reported-by: Lou Berger <lberger@labn.net>
+Diagnosed-by: Renato Westphal <renato@opensourcerouting.org>
+Tested-by: Renato Westphal <renato@opensourcerouting.org>
+Signed-off-by: David Ahern <dsahern@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/inet_hashtables.c  |    4 ++--
+ net/ipv6/inet6_hashtables.c |    4 ++--
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+--- a/net/ipv4/inet_hashtables.c
++++ b/net/ipv4/inet_hashtables.c
+@@ -188,9 +188,9 @@ static inline int compute_score(struct s
+                       bool dev_match = (sk->sk_bound_dev_if == dif ||
+                                         sk->sk_bound_dev_if == sdif);
+-                      if (exact_dif && !dev_match)
++                      if (!dev_match)
+                               return -1;
+-                      if (sk->sk_bound_dev_if && dev_match)
++                      if (sk->sk_bound_dev_if)
+                               score += 4;
+               }
+               if (sk->sk_incoming_cpu == raw_smp_processor_id())
+--- a/net/ipv6/inet6_hashtables.c
++++ b/net/ipv6/inet6_hashtables.c
+@@ -113,9 +113,9 @@ static inline int compute_score(struct s
+                       bool dev_match = (sk->sk_bound_dev_if == dif ||
+                                         sk->sk_bound_dev_if == sdif);
+-                      if (exact_dif && !dev_match)
++                      if (!dev_match)
+                               return -1;
+-                      if (sk->sk_bound_dev_if && dev_match)
++                      if (sk->sk_bound_dev_if)
+                               score++;
+               }
+               if (sk->sk_incoming_cpu == raw_smp_processor_id())
diff --git a/queue-4.14/net_sched-blackhole-tell-upper-qdisc-about-dropped-packets.patch b/queue-4.14/net_sched-blackhole-tell-upper-qdisc-about-dropped-packets.patch
new file mode 100644 (file)
index 0000000..6aaeb9e
--- /dev/null
@@ -0,0 +1,37 @@
+From foo@baz Thu Jul 19 08:32:33 CEST 2018
+From: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
+Date: Fri, 15 Jun 2018 13:27:31 +0300
+Subject: net_sched: blackhole: tell upper qdisc about dropped packets
+
+From: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
+
+[ Upstream commit 7e85dc8cb35abf16455f1511f0670b57c1a84608 ]
+
+When blackhole is used on top of classful qdisc like hfsc it breaks
+qlen and backlog counters because packets are disappear without notice.
+
+In HFSC non-zero qlen while all classes are inactive triggers warning:
+WARNING: ... at net/sched/sch_hfsc.c:1393 hfsc_dequeue+0xba4/0xe90 [sch_hfsc]
+and schedules watchdog work endlessly.
+
+This patch return __NET_XMIT_BYPASS in addition to NET_XMIT_SUCCESS,
+this flag tells upper layer: this packet is gone and isn't queued.
+
+Signed-off-by: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sched/sch_blackhole.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/sched/sch_blackhole.c
++++ b/net/sched/sch_blackhole.c
+@@ -21,7 +21,7 @@ static int blackhole_enqueue(struct sk_b
+                            struct sk_buff **to_free)
+ {
+       qdisc_drop(skb, sch, to_free);
+-      return NET_XMIT_SUCCESS;
++      return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
+ }
+ static struct sk_buff *blackhole_dequeue(struct Qdisc *sch)
diff --git a/queue-4.14/qed-fix-setting-of-incorrect-eswitch-mode.patch b/queue-4.14/qed-fix-setting-of-incorrect-eswitch-mode.patch
new file mode 100644 (file)
index 0000000..d7e0040
--- /dev/null
@@ -0,0 +1,77 @@
+From foo@baz Thu Jul 19 08:32:33 CEST 2018
+From: Sudarsana Reddy Kalluru <sudarsana.kalluru@cavium.com>
+Date: Sun, 1 Jul 2018 20:03:06 -0700
+Subject: qed: Fix setting of incorrect eswitch mode.
+
+From: Sudarsana Reddy Kalluru <sudarsana.kalluru@cavium.com>
+
+[ Upstream commit 538f8d00ba8bb417c4d9e76c61dee59d812d8287 ]
+
+By default, driver sets the eswitch mode incorrectly as VEB (virtual
+Ethernet bridging).
+Need to set VEB eswitch mode only when sriov is enabled, and it should be
+to set NONE by default. The patch incorporates this change.
+
+Fixes: 0fefbfbaa ("qed*: Management firmware - notifications and defaults")
+Signed-off-by: Sudarsana Reddy Kalluru <Sudarsana.Kalluru@cavium.com>
+Signed-off-by: Michal Kalderon <Michal.Kalderon@cavium.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/qlogic/qed/qed_dev.c   |    2 +-
+ drivers/net/ethernet/qlogic/qed/qed_sriov.c |   19 +++++++++++++++++--
+ 2 files changed, 18 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
+@@ -1782,7 +1782,7 @@ int qed_hw_init(struct qed_dev *cdev, st
+                       DP_INFO(p_hwfn, "Failed to update driver state\n");
+               rc = qed_mcp_ov_update_eswitch(p_hwfn, p_hwfn->p_main_ptt,
+-                                             QED_OV_ESWITCH_VEB);
++                                             QED_OV_ESWITCH_NONE);
+               if (rc)
+                       DP_INFO(p_hwfn, "Failed to update eswitch mode\n");
+       }
+--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+@@ -4396,6 +4396,8 @@ static void qed_sriov_enable_qid_config(
+ static int qed_sriov_enable(struct qed_dev *cdev, int num)
+ {
+       struct qed_iov_vf_init_params params;
++      struct qed_hwfn *hwfn;
++      struct qed_ptt *ptt;
+       int i, j, rc;
+       if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) {
+@@ -4408,8 +4410,8 @@ static int qed_sriov_enable(struct qed_d
+       /* Initialize HW for VF access */
+       for_each_hwfn(cdev, j) {
+-              struct qed_hwfn *hwfn = &cdev->hwfns[j];
+-              struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
++              hwfn = &cdev->hwfns[j];
++              ptt = qed_ptt_acquire(hwfn);
+               /* Make sure not to use more than 16 queues per VF */
+               params.num_queues = min_t(int,
+@@ -4445,6 +4447,19 @@ static int qed_sriov_enable(struct qed_d
+               goto err;
+       }
++      hwfn = QED_LEADING_HWFN(cdev);
++      ptt = qed_ptt_acquire(hwfn);
++      if (!ptt) {
++              DP_ERR(hwfn, "Failed to acquire ptt\n");
++              rc = -EBUSY;
++              goto err;
++      }
++
++      rc = qed_mcp_ov_update_eswitch(hwfn, ptt, QED_OV_ESWITCH_VEB);
++      if (rc)
++              DP_INFO(cdev, "Failed to update eswitch mode\n");
++      qed_ptt_release(hwfn, ptt);
++
+       return num;
+ err:
diff --git a/queue-4.14/qed-fix-use-of-incorrect-size-in-memcpy-call.patch b/queue-4.14/qed-fix-use-of-incorrect-size-in-memcpy-call.patch
new file mode 100644 (file)
index 0000000..0a479dc
--- /dev/null
@@ -0,0 +1,46 @@
+From foo@baz Thu Jul 19 08:32:33 CEST 2018
+From: Sudarsana Reddy Kalluru <sudarsana.kalluru@cavium.com>
+Date: Sun, 1 Jul 2018 20:03:07 -0700
+Subject: qed: Fix use of incorrect size in memcpy call.
+
+From: Sudarsana Reddy Kalluru <sudarsana.kalluru@cavium.com>
+
+[ Upstream commit cc9b27cdf7bd3c86df73439758ac1564bc8f5bbe ]
+
+Use the correct size value while copying chassis/port id values.
+
+Fixes: 6ad8c632e ("qed: Add support for query/config dcbx.")
+Signed-off-by: Sudarsana Reddy Kalluru <Sudarsana.Kalluru@cavium.com>
+Signed-off-by: Michal Kalderon <Michal.Kalderon@cavium.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/qlogic/qed/qed_dcbx.c |    8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+@@ -700,9 +700,9 @@ qed_dcbx_get_local_lldp_params(struct qe
+       p_local = &p_hwfn->p_dcbx_info->lldp_local[LLDP_NEAREST_BRIDGE];
+       memcpy(params->lldp_local.local_chassis_id, p_local->local_chassis_id,
+-             ARRAY_SIZE(p_local->local_chassis_id));
++             sizeof(p_local->local_chassis_id));
+       memcpy(params->lldp_local.local_port_id, p_local->local_port_id,
+-             ARRAY_SIZE(p_local->local_port_id));
++             sizeof(p_local->local_port_id));
+ }
+ static void
+@@ -714,9 +714,9 @@ qed_dcbx_get_remote_lldp_params(struct q
+       p_remote = &p_hwfn->p_dcbx_info->lldp_remote[LLDP_NEAREST_BRIDGE];
+       memcpy(params->lldp_remote.peer_chassis_id, p_remote->peer_chassis_id,
+-             ARRAY_SIZE(p_remote->peer_chassis_id));
++             sizeof(p_remote->peer_chassis_id));
+       memcpy(params->lldp_remote.peer_port_id, p_remote->peer_port_id,
+-             ARRAY_SIZE(p_remote->peer_port_id));
++             sizeof(p_remote->peer_port_id));
+ }
+ static int
diff --git a/queue-4.14/qed-limit-msix-vectors-in-kdump-kernel-to-the-minimum-required-count.patch b/queue-4.14/qed-limit-msix-vectors-in-kdump-kernel-to-the-minimum-required-count.patch
new file mode 100644 (file)
index 0000000..4019bf2
--- /dev/null
@@ -0,0 +1,41 @@
+From foo@baz Thu Jul 19 08:32:33 CEST 2018
+From: Sudarsana Reddy Kalluru <sudarsana.kalluru@cavium.com>
+Date: Sun, 1 Jul 2018 20:03:05 -0700
+Subject: qed: Limit msix vectors in kdump kernel to the minimum required count.
+
+From: Sudarsana Reddy Kalluru <sudarsana.kalluru@cavium.com>
+
+[ Upstream commit bb7858ba1102f82470a917e041fd23e6385c31be ]
+
+Memory size is limited in the kdump kernel environment. Allocation of more
+msix-vectors (or queues) consumes few tens of MBs of memory, which might
+lead to the kdump kernel failure.
+This patch adds changes to limit the number of MSI-X vectors in kdump
+kernel to minimum required value (i.e., 2 per engine).
+
+Fixes: fe56b9e6a ("qed: Add module with basic common support")
+Signed-off-by: Sudarsana Reddy Kalluru <Sudarsana.Kalluru@cavium.com>
+Signed-off-by: Michal Kalderon <Michal.Kalderon@cavium.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/qlogic/qed/qed_main.c |    8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
+@@ -779,6 +779,14 @@ static int qed_slowpath_setup_int(struct
+       /* We want a minimum of one slowpath and one fastpath vector per hwfn */
+       cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2;
++      if (is_kdump_kernel()) {
++              DP_INFO(cdev,
++                      "Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n",
++                      cdev->int_params.in.min_msix_cnt);
++              cdev->int_params.in.num_vectors =
++                      cdev->int_params.in.min_msix_cnt;
++      }
++
+       rc = qed_set_int_mode(cdev, false);
+       if (rc)  {
+               DP_ERR(cdev, "qed_slowpath_setup_int ERR\n");
diff --git a/queue-4.14/qede-adverstise-software-timestamp-caps-when-phc-is-not-available.patch b/queue-4.14/qede-adverstise-software-timestamp-caps-when-phc-is-not-available.patch
new file mode 100644 (file)
index 0000000..0d664ce
--- /dev/null
@@ -0,0 +1,41 @@
+From foo@baz Thu Jul 19 08:32:33 CEST 2018
+From: Sudarsana Reddy Kalluru <sudarsana.kalluru@cavium.com>
+Date: Sun, 1 Jul 2018 20:03:08 -0700
+Subject: qede: Adverstise software timestamp caps when PHC is not available.
+
+From: Sudarsana Reddy Kalluru <sudarsana.kalluru@cavium.com>
+
+[ Upstream commit 82a4e71b1565dea8387f54503e806cf374e779ec ]
+
+When ptp clock is not available for a PF (e.g., higher PFs in NPAR mode),
+get-tsinfo() callback should return the software timestamp capabilities
+instead of returning the error.
+
+Fixes: 4c55215c ("qede: Add driver support for PTP")
+Signed-off-by: Sudarsana Reddy Kalluru <Sudarsana.Kalluru@cavium.com>
+Signed-off-by: Michal Kalderon <Michal.Kalderon@cavium.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/qlogic/qede/qede_ptp.c |   10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c
++++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
+@@ -337,8 +337,14 @@ int qede_ptp_get_ts_info(struct qede_dev
+ {
+       struct qede_ptp *ptp = edev->ptp;
+-      if (!ptp)
+-              return -EIO;
++      if (!ptp) {
++              info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
++                                      SOF_TIMESTAMPING_RX_SOFTWARE |
++                                      SOF_TIMESTAMPING_SOFTWARE;
++              info->phc_index = -1;
++
++              return 0;
++      }
+       info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+                               SOF_TIMESTAMPING_RX_SOFTWARE |
diff --git a/queue-4.14/qmi_wwan-add-support-for-the-dell-wireless-5821e-module.patch b/queue-4.14/qmi_wwan-add-support-for-the-dell-wireless-5821e-module.patch
new file mode 100644 (file)
index 0000000..6b0d026
--- /dev/null
@@ -0,0 +1,34 @@
+From foo@baz Thu Jul 19 08:32:33 CEST 2018
+From: Aleksander Morgado <aleksander@aleksander.es>
+Date: Sat, 23 Jun 2018 23:22:52 +0200
+Subject: qmi_wwan: add support for the Dell Wireless 5821e module
+
+From: Aleksander Morgado <aleksander@aleksander.es>
+
+[ Upstream commit e7e197edd09c25774b4f12cab19f9d5462f240f4 ]
+
+This module exposes two USB configurations: a QMI+AT capable setup on
+USB config #1 and a MBIM capable setup on USB config #2.
+
+By default the kernel will choose the MBIM capable configuration as
+long as the cdc_mbim driver is available. This patch adds support for
+the QMI port in the secondary configuration.
+
+Signed-off-by: Aleksander Morgado <aleksander@aleksander.es>
+Acked-by: Bjørn Mork <bjorn@mork.no>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/usb/qmi_wwan.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1245,6 +1245,7 @@ static const struct usb_device_id produc
+       {QMI_FIXED_INTF(0x413c, 0x81b3, 8)},    /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
+       {QMI_FIXED_INTF(0x413c, 0x81b6, 8)},    /* Dell Wireless 5811e */
+       {QMI_FIXED_INTF(0x413c, 0x81b6, 10)},   /* Dell Wireless 5811e */
++      {QMI_FIXED_INTF(0x413c, 0x81d7, 1)},    /* Dell Wireless 5821e */
+       {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)},    /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
+       {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)},    /* HP lt4120 Snapdragon X5 LTE */
+       {QMI_FIXED_INTF(0x22de, 0x9061, 3)},    /* WeTelecom WPD-600N */
diff --git a/queue-4.14/r8152-napi-hangup-fix-after-disconnect.patch b/queue-4.14/r8152-napi-hangup-fix-after-disconnect.patch
new file mode 100644 (file)
index 0000000..acf05ed
--- /dev/null
@@ -0,0 +1,46 @@
+From foo@baz Thu Jul 19 08:32:33 CEST 2018
+From: Jiri Slaby <jslaby@suse.cz>
+Date: Mon, 25 Jun 2018 09:26:27 +0200
+Subject: r8152: napi hangup fix after disconnect
+
+From: Jiri Slaby <jslaby@suse.cz>
+
+[ Upstream commit 0ee1f4734967af8321ecebaf9c74221ace34f2d5 ]
+
+When unplugging an r8152 adapter while the interface is UP, the NIC
+becomes unusable.  usb->disconnect (aka rtl8152_disconnect) deletes
+napi. Then, rtl8152_disconnect calls unregister_netdev and that invokes
+netdev->ndo_stop (aka rtl8152_close). rtl8152_close tries to
+napi_disable, but the napi is already deleted by disconnect above. So
+the first while loop in napi_disable never finishes. This results in
+complete deadlock of the network layer as there is rtnl_mutex held by
+unregister_netdev.
+
+So avoid the call to napi_disable in rtl8152_close when the device is
+already gone.
+
+The other calls to usb_kill_urb, cancel_delayed_work_sync,
+netif_stop_queue etc. seem to be fine. The urb and netdev is not
+destroyed yet.
+
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+Cc: linux-usb@vger.kernel.org
+Cc: netdev@vger.kernel.org
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/usb/r8152.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/usb/r8152.c
++++ b/drivers/net/usb/r8152.c
+@@ -3959,7 +3959,8 @@ static int rtl8152_close(struct net_devi
+ #ifdef CONFIG_PM_SLEEP
+       unregister_pm_notifier(&tp->pm_notifier);
+ #endif
+-      napi_disable(&tp->napi);
++      if (!test_bit(RTL8152_UNPLUG, &tp->flags))
++              napi_disable(&tp->napi);
+       clear_bit(WORK_ENABLE, &tp->flags);
+       usb_kill_urb(tp->intr_urb);
+       cancel_delayed_work_sync(&tp->schedule);
index 3cb6125bd47ae627cefc4653729567ef94dcbca8..285e34ed3782ff0926195c04217f2a1e1e4247e3 100644 (file)
@@ -11,3 +11,38 @@ bcm63xx_enet-do-not-write-to-random-dma-channel-on-bcm6345.patch
 pci-exynos-fix-a-potential-init_clk_resources-null-pointer-dereference.patch
 crypto-crypto4xx-remove-bad-list_del.patch
 crypto-crypto4xx-fix-crypto4xx_build_pdr-crypto4xx_build_sdr-leak.patch
+alx-take-rtnl-before-calling-__alx_open-from-resume.patch
+atm-preserve-value-of-skb-truesize-when-accounting-to-vcc.patch
+atm-zatm-fix-potential-spectre-v1.patch
+hv_netvsc-split-sub-channel-setup-into-async-and-sync.patch
+ipv6-sr-fix-passing-wrong-flags-to-crypto_alloc_shash.patch
+ipvlan-fix-ifla_mtu-ignored-on-newlink.patch
+ixgbe-split-xdp_tx-tail-and-xdp_redirect-map-flushing.patch
+net-dccp-avoid-crash-in-ccid3_hc_rx_send_feedback.patch
+net-dccp-switch-rx_tstamp_last_feedback-to-monotonic-clock.patch
+net-fix-use-after-free-in-gro-with-esp.patch
+net-macb-fix-ptp-time-adjustment-for-large-negative-delta.patch
+net-mlx5e-avoid-dealing-with-vport-representors-if-not-being-e-switch-manager.patch
+net-mlx5e-don-t-attempt-to-dereference-the-ppriv-struct-if-not-being-eswitch-manager.patch
+net-mlx5-e-switch-avoid-setup-attempt-if-not-being-e-switch-manager.patch
+net-mlx5-fix-command-interface-race-in-polling-mode.patch
+net-mlx5-fix-incorrect-raw-command-length-parsing.patch
+net-mlx5-fix-required-capability-for-manipulating-mpfs.patch
+net-mlx5-fix-wrong-size-allocation-for-qos-etc-tc-regitster.patch
+net-mvneta-fix-the-rx-desc-dma-address-in-the-rx-path.patch
+net-packet-fix-use-after-free.patch
+net_sched-blackhole-tell-upper-qdisc-about-dropped-packets.patch
+net-sungem-fix-rx-checksum-support.patch
+net-tcp-fix-socket-lookups-with-so_bindtodevice.patch
+qede-adverstise-software-timestamp-caps-when-phc-is-not-available.patch
+qed-fix-setting-of-incorrect-eswitch-mode.patch
+qed-fix-use-of-incorrect-size-in-memcpy-call.patch
+qed-limit-msix-vectors-in-kdump-kernel-to-the-minimum-required-count.patch
+qmi_wwan-add-support-for-the-dell-wireless-5821e-module.patch
+r8152-napi-hangup-fix-after-disconnect.patch
+stmmac-fix-dma-channel-hang-in-half-duplex-mode.patch
+strparser-remove-early-eaten-to-fix-full-tcp-receive-buffer-stall.patch
+tcp-fix-fast-open-key-endianness.patch
+tcp-prevent-bogus-frto-undos-with-non-sack-flows.patch
+vhost_net-validate-sock-before-trying-to-put-its-fd.patch
+vsock-fix-loopback-on-big-endian-systems.patch
diff --git a/queue-4.14/stmmac-fix-dma-channel-hang-in-half-duplex-mode.patch b/queue-4.14/stmmac-fix-dma-channel-hang-in-half-duplex-mode.patch
new file mode 100644 (file)
index 0000000..5c860fe
--- /dev/null
@@ -0,0 +1,46 @@
+From foo@baz Thu Jul 19 08:32:33 CEST 2018
+From: Bhadram Varka <vbhadram@nvidia.com>
+Date: Sun, 17 Jun 2018 20:02:05 +0530
+Subject: stmmac: fix DMA channel hang in half-duplex mode
+
+From: Bhadram Varka <vbhadram@nvidia.com>
+
+[ Upstream commit b6cfffa7ad923c73f317ea50fd4ebcb3b4b6669c ]
+
+HW does not support Half-duplex mode in multi-queue
+scenario. Fix it by not advertising the Half-Duplex
+mode if multi-queue enabled.
+
+Signed-off-by: Bhadram Varka <vbhadram@nvidia.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/stmicro/stmmac/stmmac_main.c |   10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -914,6 +914,7 @@ static void stmmac_check_pcs_mode(struct
+ static int stmmac_init_phy(struct net_device *dev)
+ {
+       struct stmmac_priv *priv = netdev_priv(dev);
++      u32 tx_cnt = priv->plat->tx_queues_to_use;
+       struct phy_device *phydev;
+       char phy_id_fmt[MII_BUS_ID_SIZE + 3];
+       char bus_id[MII_BUS_ID_SIZE];
+@@ -955,6 +956,15 @@ static int stmmac_init_phy(struct net_de
+                                        SUPPORTED_1000baseT_Full);
+       /*
++       * Half-duplex mode not supported with multiqueue
++       * half-duplex can only works with single queue
++       */
++      if (tx_cnt > 1)
++              phydev->supported &= ~(SUPPORTED_1000baseT_Half |
++                                     SUPPORTED_100baseT_Half |
++                                     SUPPORTED_10baseT_Half);
++
++      /*
+        * Broken HW is sometimes missing the pull-up resistor on the
+        * MDIO line, which results in reads to non-existent devices returning
+        * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
diff --git a/queue-4.14/strparser-remove-early-eaten-to-fix-full-tcp-receive-buffer-stall.patch b/queue-4.14/strparser-remove-early-eaten-to-fix-full-tcp-receive-buffer-stall.patch
new file mode 100644 (file)
index 0000000..c6ef329
--- /dev/null
@@ -0,0 +1,73 @@
+From foo@baz Thu Jul 19 08:32:33 CEST 2018
+From: Doron Roberts-Kedes <doronrk@fb.com>
+Date: Tue, 26 Jun 2018 18:33:33 -0700
+Subject: strparser: Remove early eaten to fix full tcp receive buffer stall
+
+From: Doron Roberts-Kedes <doronrk@fb.com>
+
+[ Upstream commit 977c7114ebda2e746a114840d3a875e0cdb826fb ]
+
+On receving an incomplete message, the existing code stores the
+remaining length of the cloned skb in the early_eaten field instead of
+incrementing the value returned by __strp_recv. This defers invocation
+of sock_rfree for the current skb until the next invocation of
+__strp_recv, which returns early_eaten if early_eaten is non-zero.
+
+This behavior causes a stall when the current message occupies the very
+tail end of a massive skb, and strp_peek/need_bytes indicates that the
+remainder of the current message has yet to arrive on the socket. The
+TCP receive buffer is totally full, causing the TCP window to go to
+zero, so the remainder of the message will never arrive.
+
+Incrementing the value returned by __strp_recv by the amount otherwise
+stored in early_eaten prevents stalls of this nature.
+
+Signed-off-by: Doron Roberts-Kedes <doronrk@fb.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/strparser/strparser.c |   17 +----------------
+ 1 file changed, 1 insertion(+), 16 deletions(-)
+
+--- a/net/strparser/strparser.c
++++ b/net/strparser/strparser.c
+@@ -35,7 +35,6 @@ struct _strp_msg {
+        */
+       struct strp_msg strp;
+       int accum_len;
+-      int early_eaten;
+ };
+ static inline struct _strp_msg *_strp_msg(struct sk_buff *skb)
+@@ -115,20 +114,6 @@ static int __strp_recv(read_descriptor_t
+       head = strp->skb_head;
+       if (head) {
+               /* Message already in progress */
+-
+-              stm = _strp_msg(head);
+-              if (unlikely(stm->early_eaten)) {
+-                      /* Already some number of bytes on the receive sock
+-                       * data saved in skb_head, just indicate they
+-                       * are consumed.
+-                       */
+-                      eaten = orig_len <= stm->early_eaten ?
+-                              orig_len : stm->early_eaten;
+-                      stm->early_eaten -= eaten;
+-
+-                      return eaten;
+-              }
+-
+               if (unlikely(orig_offset)) {
+                       /* Getting data with a non-zero offset when a message is
+                        * in progress is not expected. If it does happen, we
+@@ -297,9 +282,9 @@ static int __strp_recv(read_descriptor_t
+                               }
+                               stm->accum_len += cand_len;
++                              eaten += cand_len;
+                               strp->need_bytes = stm->strp.full_len -
+                                                      stm->accum_len;
+-                              stm->early_eaten = cand_len;
+                               STRP_STATS_ADD(strp->stats.bytes, cand_len);
+                               desc->count = 0; /* Stop reading socket */
+                               break;
diff --git a/queue-4.14/tcp-fix-fast-open-key-endianness.patch b/queue-4.14/tcp-fix-fast-open-key-endianness.patch
new file mode 100644 (file)
index 0000000..6ca11db
--- /dev/null
@@ -0,0 +1,74 @@
+From foo@baz Thu Jul 19 08:32:33 CEST 2018
+From: Yuchung Cheng <ycheng@google.com>
+Date: Wed, 27 Jun 2018 16:04:48 -0700
+Subject: tcp: fix Fast Open key endianness
+
+From: Yuchung Cheng <ycheng@google.com>
+
+[ Upstream commit c860e997e9170a6d68f9d1e6e2cf61f572191aaf ]
+
+Fast Open key could be stored in different endian based on the CPU.
+Previously hosts in different endianness in a server farm using
+the same key config (sysctl value) would produce different cookies.
+This patch fixes it by always storing it as little endian to keep
+same API for LE hosts.
+
+Reported-by: Daniele Iamartino <danielei@google.com>
+Signed-off-by: Yuchung Cheng <ycheng@google.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Neal Cardwell <ncardwell@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/sysctl_net_ipv4.c |   18 +++++++++++++-----
+ 1 file changed, 13 insertions(+), 5 deletions(-)
+
+--- a/net/ipv4/sysctl_net_ipv4.c
++++ b/net/ipv4/sysctl_net_ipv4.c
+@@ -258,8 +258,9 @@ static int proc_tcp_fastopen_key(struct
+ {
+       struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
+       struct tcp_fastopen_context *ctxt;
+-      int ret;
+       u32  user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
++      __le32 key[4];
++      int ret, i;
+       tbl.data = kmalloc(tbl.maxlen, GFP_KERNEL);
+       if (!tbl.data)
+@@ -268,11 +269,14 @@ static int proc_tcp_fastopen_key(struct
+       rcu_read_lock();
+       ctxt = rcu_dereference(tcp_fastopen_ctx);
+       if (ctxt)
+-              memcpy(user_key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH);
++              memcpy(key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH);
+       else
+-              memset(user_key, 0, sizeof(user_key));
++              memset(key, 0, sizeof(key));
+       rcu_read_unlock();
++      for (i = 0; i < ARRAY_SIZE(key); i++)
++              user_key[i] = le32_to_cpu(key[i]);
++
+       snprintf(tbl.data, tbl.maxlen, "%08x-%08x-%08x-%08x",
+               user_key[0], user_key[1], user_key[2], user_key[3]);
+       ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
+@@ -288,12 +292,16 @@ static int proc_tcp_fastopen_key(struct
+                * first invocation of tcp_fastopen_cookie_gen
+                */
+               tcp_fastopen_init_key_once(false);
+-              tcp_fastopen_reset_cipher(user_key, TCP_FASTOPEN_KEY_LENGTH);
++
++              for (i = 0; i < ARRAY_SIZE(user_key); i++)
++                      key[i] = cpu_to_le32(user_key[i]);
++
++              tcp_fastopen_reset_cipher(key, TCP_FASTOPEN_KEY_LENGTH);
+       }
+ bad_key:
+       pr_debug("proc FO key set 0x%x-%x-%x-%x <- 0x%s: %u\n",
+-             user_key[0], user_key[1], user_key[2], user_key[3],
++               user_key[0], user_key[1], user_key[2], user_key[3],
+              (char *)tbl.data, ret);
+       kfree(tbl.data);
+       return ret;
diff --git a/queue-4.14/tcp-prevent-bogus-frto-undos-with-non-sack-flows.patch b/queue-4.14/tcp-prevent-bogus-frto-undos-with-non-sack-flows.patch
new file mode 100644 (file)
index 0000000..c459e5a
--- /dev/null
@@ -0,0 +1,64 @@
+From foo@baz Thu Jul 19 08:32:33 CEST 2018
+From: "Ilpo Järvinen" <ilpo.jarvinen@helsinki.fi>
+Date: Fri, 29 Jun 2018 13:07:53 +0300
+Subject: tcp: prevent bogus FRTO undos with non-SACK flows
+
+From: "Ilpo Järvinen" <ilpo.jarvinen@helsinki.fi>
+
+[ Upstream commit 1236f22fbae15df3736ab4a984c64c0c6ee6254c ]
+
+If SACK is not enabled and the first cumulative ACK after the RTO
+retransmission covers more than the retransmitted skb, a spurious
+FRTO undo will trigger (assuming FRTO is enabled for that RTO).
+The reason is that any non-retransmitted segment acknowledged will
+set FLAG_ORIG_SACK_ACKED in tcp_clean_rtx_queue even if there is
+no indication that it would have been delivered for real (the
+scoreboard is not kept with TCPCB_SACKED_ACKED bits in the non-SACK
+case so the check for that bit won't help like it does with SACK).
+Having FLAG_ORIG_SACK_ACKED set results in the spurious FRTO undo
+in tcp_process_loss.
+
+We need to use more strict condition for non-SACK case and check
+that none of the cumulatively ACKed segments were retransmitted
+to prove that progress is due to original transmissions. Only then
+keep FLAG_ORIG_SACK_ACKED set, allowing FRTO undo to proceed in
+non-SACK case.
+
+(FLAG_ORIG_SACK_ACKED is planned to be renamed to FLAG_ORIG_PROGRESS
+to better indicate its purpose but to keep this change minimal, it
+will be done in another patch).
+
+Besides burstiness and congestion control violations, this problem
+can result in RTO loop: When the loss recovery is prematurely
+undoed, only new data will be transmitted (if available) and
+the next retransmission can occur only after a new RTO which in case
+of multiple losses (that are not for consecutive packets) requires
+one RTO per loss to recover.
+
+Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi>
+Tested-by: Neal Cardwell <ncardwell@google.com>
+Acked-by: Neal Cardwell <ncardwell@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp_input.c |    9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -3194,6 +3194,15 @@ static int tcp_clean_rtx_queue(struct so
+               if (tcp_is_reno(tp)) {
+                       tcp_remove_reno_sacks(sk, pkts_acked);
++
++                      /* If any of the cumulatively ACKed segments was
++                       * retransmitted, non-SACK case cannot confirm that
++                       * progress was due to original transmission due to
++                       * lack of TCPCB_SACKED_ACKED bits even if some of
++                       * the packets may have been never retransmitted.
++                       */
++                      if (flag & FLAG_RETRANS_DATA_ACKED)
++                              flag &= ~FLAG_ORIG_SACK_ACKED;
+               } else {
+                       int delta;
diff --git a/queue-4.14/vhost_net-validate-sock-before-trying-to-put-its-fd.patch b/queue-4.14/vhost_net-validate-sock-before-trying-to-put-its-fd.patch
new file mode 100644 (file)
index 0000000..01aea5c
--- /dev/null
@@ -0,0 +1,35 @@
+From foo@baz Thu Jul 19 08:32:33 CEST 2018
+From: Jason Wang <jasowang@redhat.com>
+Date: Thu, 21 Jun 2018 13:11:31 +0800
+Subject: vhost_net: validate sock before trying to put its fd
+
+From: Jason Wang <jasowang@redhat.com>
+
+[ Upstream commit b8f1f65882f07913157c44673af7ec0b308d03eb ]
+
+Sock will be NULL if we pass -1 to vhost_net_set_backend(), but when
+we meet errors during ubuf allocation, the code does not check for
+NULL before calling sockfd_put(), this will lead NULL
+dereferencing. Fixing by checking sock pointer before.
+
+Fixes: bab632d69ee4 ("vhost: vhost TX zero-copy support")
+Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: Jason Wang <jasowang@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/vhost/net.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/vhost/net.c
++++ b/drivers/vhost/net.c
+@@ -1186,7 +1186,8 @@ err_used:
+       if (ubufs)
+               vhost_net_ubuf_put_wait_and_free(ubufs);
+ err_ubufs:
+-      sockfd_put(sock);
++      if (sock)
++              sockfd_put(sock);
+ err_vq:
+       mutex_unlock(&vq->mutex);
+ err:
diff --git a/queue-4.14/vsock-fix-loopback-on-big-endian-systems.patch b/queue-4.14/vsock-fix-loopback-on-big-endian-systems.patch
new file mode 100644 (file)
index 0000000..b684432
--- /dev/null
@@ -0,0 +1,36 @@
+From foo@baz Thu Jul 19 08:32:33 CEST 2018
+From: Claudio Imbrenda <imbrenda@linux.vnet.ibm.com>
+Date: Wed, 20 Jun 2018 15:51:51 +0200
+Subject: VSOCK: fix loopback on big-endian systems
+
+From: Claudio Imbrenda <imbrenda@linux.vnet.ibm.com>
+
+[ Upstream commit e5ab564c9ebee77794842ca7d7476147b83d6a27 ]
+
+The dst_cid and src_cid are 64 bits, therefore 64 bit accessors should be
+used, and in fact in virtio_transport_common.c only 64 bit accessors are
+used. Using 32 bit accessors for 64 bit values breaks big endian systems.
+
+This patch fixes a wrong use of le32_to_cpu in virtio_transport_send_pkt.
+
+Fixes: b9116823189e85ccf384 ("VSOCK: add loopback to virtio_transport")
+
+Signed-off-by: Claudio Imbrenda <imbrenda@linux.vnet.ibm.com>
+Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/vmw_vsock/virtio_transport.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/vmw_vsock/virtio_transport.c
++++ b/net/vmw_vsock/virtio_transport.c
+@@ -201,7 +201,7 @@ virtio_transport_send_pkt(struct virtio_
+               return -ENODEV;
+       }
+-      if (le32_to_cpu(pkt->hdr.dst_cid) == vsock->guest_cid)
++      if (le64_to_cpu(pkt->hdr.dst_cid) == vsock->guest_cid)
+               return virtio_transport_send_pkt_loopback(vsock, pkt);
+       if (pkt->reply)