--- /dev/null
+From foo@baz Thu Jul 19 10:08:15 CEST 2018
+From: "Gustavo A. R. Silva" <gustavo@embeddedor.com>
+Date: Fri, 29 Jun 2018 13:28:07 -0500
+Subject: atm: zatm: Fix potential Spectre v1
+
+From: "Gustavo A. R. Silva" <gustavo@embeddedor.com>
+
+[ Upstream commit ced9e191501e52b95e1b57b8e0db00943869eed0 ]
+
+pool can be indirectly controlled by user-space, hence leading to
+a potential exploitation of the Spectre variant 1 vulnerability.
+
+This issue was detected with the help of Smatch:
+
+drivers/atm/zatm.c:1491 zatm_ioctl() warn: potential spectre issue
+'zatm_dev->pool_info' (local cap)
+
+Fix this by sanitizing pool before using it to index
+zatm_dev->pool_info
+
+Notice that given that speculation windows are large, the policy is
+to kill the speculation on the first load and not worry if it can be
+completed with a dependent load/store [1].
+
+[1] https://marc.info/?l=linux-kernel&m=152449131114778&w=2
+
+Signed-off-by: Gustavo A. R. Silva <gustavo@embeddedor.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/atm/zatm.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/atm/zatm.c
++++ b/drivers/atm/zatm.c
+@@ -1481,6 +1481,8 @@ static int zatm_ioctl(struct atm_dev *de
+ return -EFAULT;
+ if (pool < 0 || pool > ZATM_LAST_POOL)
+ return -EINVAL;
++ pool = array_index_nospec(pool,
++ ZATM_LAST_POOL + 1);
+ if (copy_from_user(&info,
+ &((struct zatm_pool_req __user *) arg)->info,
+ sizeof(info))) return -EFAULT;
--- /dev/null
+From foo@baz Thu Jul 19 10:08:15 CEST 2018
+From: Eric Dumazet <edumazet@google.com>
+Date: Fri, 22 Jun 2018 06:44:14 -0700
+Subject: net: dccp: avoid crash in ccid3_hc_rx_send_feedback()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 74174fe5634ffbf645a7ca5a261571f700b2f332 ]
+
+On fast hosts or malicious bots, we trigger a DCCP_BUG() which
+seems excessive.
+
+syzbot reported :
+
+BUG: delta (-6195) <= 0 at net/dccp/ccids/ccid3.c:628/ccid3_hc_rx_send_feedback()
+CPU: 1 PID: 18 Comm: ksoftirqd/1 Not tainted 4.18.0-rc1+ #112
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
+Call Trace:
+ __dump_stack lib/dump_stack.c:77 [inline]
+ dump_stack+0x1c9/0x2b4 lib/dump_stack.c:113
+ ccid3_hc_rx_send_feedback net/dccp/ccids/ccid3.c:628 [inline]
+ ccid3_hc_rx_packet_recv.cold.16+0x38/0x71 net/dccp/ccids/ccid3.c:793
+ ccid_hc_rx_packet_recv net/dccp/ccid.h:185 [inline]
+ dccp_deliver_input_to_ccids+0xf0/0x280 net/dccp/input.c:180
+ dccp_rcv_established+0x87/0xb0 net/dccp/input.c:378
+ dccp_v4_do_rcv+0x153/0x180 net/dccp/ipv4.c:654
+ sk_backlog_rcv include/net/sock.h:914 [inline]
+ __sk_receive_skb+0x3ba/0xd80 net/core/sock.c:517
+ dccp_v4_rcv+0x10f9/0x1f58 net/dccp/ipv4.c:875
+ ip_local_deliver_finish+0x2eb/0xda0 net/ipv4/ip_input.c:215
+ NF_HOOK include/linux/netfilter.h:287 [inline]
+ ip_local_deliver+0x1e9/0x750 net/ipv4/ip_input.c:256
+ dst_input include/net/dst.h:450 [inline]
+ ip_rcv_finish+0x823/0x2220 net/ipv4/ip_input.c:396
+ NF_HOOK include/linux/netfilter.h:287 [inline]
+ ip_rcv+0xa18/0x1284 net/ipv4/ip_input.c:492
+ __netif_receive_skb_core+0x2488/0x3680 net/core/dev.c:4628
+ __netif_receive_skb+0x2c/0x1e0 net/core/dev.c:4693
+ process_backlog+0x219/0x760 net/core/dev.c:5373
+ napi_poll net/core/dev.c:5771 [inline]
+ net_rx_action+0x7da/0x1980 net/core/dev.c:5837
+ __do_softirq+0x2e8/0xb17 kernel/softirq.c:284
+ run_ksoftirqd+0x86/0x100 kernel/softirq.c:645
+ smpboot_thread_fn+0x417/0x870 kernel/smpboot.c:164
+ kthread+0x345/0x410 kernel/kthread.c:240
+ ret_from_fork+0x3a/0x50 arch/x86/entry/entry_64.S:412
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Cc: Gerrit Renker <gerrit@erg.abdn.ac.uk>
+Cc: dccp@vger.kernel.org
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/dccp/ccids/ccid3.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+--- a/net/dccp/ccids/ccid3.c
++++ b/net/dccp/ccids/ccid3.c
+@@ -624,9 +624,8 @@ static void ccid3_hc_rx_send_feedback(st
+ case CCID3_FBACK_PERIODIC:
+ delta = ktime_us_delta(now, hc->rx_tstamp_last_feedback);
+ if (delta <= 0)
+- DCCP_BUG("delta (%ld) <= 0", (long)delta);
+- else
+- hc->rx_x_recv = scaled_div32(hc->rx_bytes_recv, delta);
++ delta = 1;
++ hc->rx_x_recv = scaled_div32(hc->rx_bytes_recv, delta);
+ break;
+ default:
+ return;
--- /dev/null
+From foo@baz Thu Jul 19 10:08:15 CEST 2018
+From: Eric Dumazet <edumazet@google.com>
+Date: Fri, 22 Jun 2018 06:44:15 -0700
+Subject: net: dccp: switch rx_tstamp_last_feedback to monotonic clock
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 0ce4e70ff00662ad7490e545ba0cd8c1fa179fca ]
+
+To compute delays, better not use time of the day which can
+be changed by admins or malicious programs.
+
+Also change ccid3_first_li() to use s64 type for delta variable
+to avoid potential overflows.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Gerrit Renker <gerrit@erg.abdn.ac.uk>
+Cc: dccp@vger.kernel.org
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/dccp/ccids/ccid3.c | 11 +++++++----
+ 1 file changed, 7 insertions(+), 4 deletions(-)
+
+--- a/net/dccp/ccids/ccid3.c
++++ b/net/dccp/ccids/ccid3.c
+@@ -599,7 +599,7 @@ static void ccid3_hc_rx_send_feedback(st
+ {
+ struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
+ struct dccp_sock *dp = dccp_sk(sk);
+- ktime_t now = ktime_get_real();
++ ktime_t now = ktime_get();
+ s64 delta = 0;
+
+ switch (fbtype) {
+@@ -631,7 +631,7 @@ static void ccid3_hc_rx_send_feedback(st
+ return;
+ }
+
+- ccid3_pr_debug("Interval %ldusec, X_recv=%u, 1/p=%u\n", (long)delta,
++ ccid3_pr_debug("Interval %lldusec, X_recv=%u, 1/p=%u\n", delta,
+ hc->rx_x_recv, hc->rx_pinv);
+
+ hc->rx_tstamp_last_feedback = now;
+@@ -678,7 +678,8 @@ static int ccid3_hc_rx_insert_options(st
+ static u32 ccid3_first_li(struct sock *sk)
+ {
+ struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
+- u32 x_recv, p, delta;
++ u32 x_recv, p;
++ s64 delta;
+ u64 fval;
+
+ if (hc->rx_rtt == 0) {
+@@ -686,7 +687,9 @@ static u32 ccid3_first_li(struct sock *s
+ hc->rx_rtt = DCCP_FALLBACK_RTT;
+ }
+
+- delta = ktime_to_us(net_timedelta(hc->rx_tstamp_last_feedback));
++ delta = ktime_us_delta(ktime_get(), hc->rx_tstamp_last_feedback);
++ if (delta <= 0)
++ delta = 1;
+ x_recv = scaled_div32(hc->rx_bytes_recv, delta);
+ if (x_recv == 0) { /* would also trigger divide-by-zero */
+ DCCP_WARN("X_recv==0\n");
--- /dev/null
+From foo@baz Thu Jul 19 09:11:13 CEST 2018
+From: Alex Vesker <valex@mellanox.com>
+Date: Tue, 12 Jun 2018 16:14:31 +0300
+Subject: net/mlx5: Fix command interface race in polling mode
+
+From: Alex Vesker <valex@mellanox.com>
+
+[ Upstream commit d412c31dae053bf30a1bc15582a9990df297a660 ]
+
+The command interface can work in two modes: Events and Polling.
+In the general case, each time we invoke a command, a work is
+queued to handle it.
+
+When working in events, the interrupt handler completes the
+command execution. On the other hand, when working in polling
+mode, the work itself completes it.
+
+Due to a bug in the work handler, a command could have been
+completed by the interrupt handler, while the work handler
+hasn't finished yet, causing the it to complete once again
+if the command interface mode was changed from Events to
+polling after the interrupt handler was called.
+
+mlx5_unload_one()
+ mlx5_stop_eqs()
+ // Destroy the EQ before cmd EQ
+ ...cmd_work_handler()
+ write_doorbell()
+ --> EVENT_TYPE_CMD
+ mlx5_cmd_comp_handler() // First free
+ free_ent(cmd, ent->idx)
+ complete(&ent->done)
+
+ <-- mlx5_stop_eqs //cmd was complete
+ // move to polling before destroying the last cmd EQ
+ mlx5_cmd_use_polling()
+ cmd->mode = POLL;
+
+ --> cmd_work_handler (continues)
+ if (cmd->mode == POLL)
+ mlx5_cmd_comp_handler() // Double free
+
+The solution is to store the cmd->mode before writing the doorbell.
+
+Fixes: e126ba97dba9 ("mlx5: Add driver for Mellanox Connect-IB adapters")
+Signed-off-by: Alex Vesker <valex@mellanox.com>
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/cmd.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+@@ -643,6 +643,7 @@ static void cmd_work_handler(struct work
+ struct semaphore *sem;
+ unsigned long flags;
+ int alloc_ret;
++ int cmd_mode;
+
+ sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
+ down(sem);
+@@ -688,6 +689,7 @@ static void cmd_work_handler(struct work
+ set_signature(ent, !cmd->checksum_disabled);
+ dump_command(dev, ent, 1);
+ ent->ts1 = ktime_get_ns();
++ cmd_mode = cmd->mode;
+
+ /* ring doorbell after the descriptor is valid */
+ mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
+@@ -695,7 +697,7 @@ static void cmd_work_handler(struct work
+ iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
+ mmiowb();
+ /* if not in polling don't use ent after this point */
+- if (cmd->mode == CMD_MODE_POLLING) {
++ if (cmd_mode == CMD_MODE_POLLING) {
+ poll_timeout(ent);
+ /* make sure we read the descriptor after ownership is SW */
+ rmb();
--- /dev/null
+From foo@baz Thu Jul 19 10:08:15 CEST 2018
+From: Alex Vesker <valex@mellanox.com>
+Date: Fri, 25 May 2018 20:25:59 +0300
+Subject: net/mlx5: Fix incorrect raw command length parsing
+
+From: Alex Vesker <valex@mellanox.com>
+
+[ Upstream commit 603b7bcff824740500ddfa001d7a7168b0b38542 ]
+
+The NULL character was not set correctly for the string containing
+the command length, this caused failures reading the output of the
+command due to a random length. The fix is to initialize the output
+length string.
+
+Fixes: e126ba97dba9 ("mlx5: Add driver for Mellanox Connect-IB adapters")
+Signed-off-by: Alex Vesker <valex@mellanox.com>
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/cmd.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+@@ -1126,7 +1126,7 @@ static ssize_t outlen_write(struct file
+ {
+ struct mlx5_core_dev *dev = filp->private_data;
+ struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
+- char outlen_str[8];
++ char outlen_str[8] = {0};
+ int outlen;
+ void *ptr;
+ int err;
+@@ -1141,8 +1141,6 @@ static ssize_t outlen_write(struct file
+ if (copy_from_user(outlen_str, buf, count))
+ return -EFAULT;
+
+- outlen_str[7] = 0;
+-
+ err = sscanf(outlen_str, "%d", &outlen);
+ if (err < 0)
+ return err;
--- /dev/null
+From foo@baz Thu Jul 19 10:08:15 CEST 2018
+From: Eric Dumazet <edumazet@google.com>
+Date: Tue, 19 Jun 2018 19:18:50 -0700
+Subject: net: sungem: fix rx checksum support
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 12b03558cef6d655d0d394f5e98a6fd07c1f6c0f ]
+
+After commit 88078d98d1bb ("net: pskb_trim_rcsum() and CHECKSUM_COMPLETE
+are friends"), sungem owners reported the infamous "eth0: hw csum failure"
+message.
+
+CHECKSUM_COMPLETE has in fact never worked for this driver, but this
+was masked by the fact that upper stacks had to strip the FCS, and
+therefore skb->ip_summed was set back to CHECKSUM_NONE before
+my recent change.
+
+Driver configures a number of bytes to skip when the chip computes
+the checksum, and for some reason only half of the Ethernet header
+was skipped.
+
+Then a second problem is that we should strip the FCS by default,
+unless the driver is updated to eventually support NETIF_F_RXFCS in
+the future.
+
+Finally, a driver should check if NETIF_F_RXCSUM feature is enabled
+or not, so that the admin can turn off rx checksum if wanted.
+
+Many thanks to Andreas Schwab and Mathieu Malaterre for their
+help in debugging this issue.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: Meelis Roos <mroos@linux.ee>
+Reported-by: Mathieu Malaterre <malat@debian.org>
+Reported-by: Andreas Schwab <schwab@linux-m68k.org>
+Tested-by: Andreas Schwab <schwab@linux-m68k.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/sun/sungem.c | 22 ++++++++++++----------
+ 1 file changed, 12 insertions(+), 10 deletions(-)
+
+--- a/drivers/net/ethernet/sun/sungem.c
++++ b/drivers/net/ethernet/sun/sungem.c
+@@ -60,8 +60,7 @@
+ #include <linux/sungem_phy.h>
+ #include "sungem.h"
+
+-/* Stripping FCS is causing problems, disabled for now */
+-#undef STRIP_FCS
++#define STRIP_FCS
+
+ #define DEFAULT_MSG (NETIF_MSG_DRV | \
+ NETIF_MSG_PROBE | \
+@@ -435,7 +434,7 @@ static int gem_rxmac_reset(struct gem *g
+ writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW);
+ writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
+ val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
+- ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128);
++ (ETH_HLEN << 13) | RXDMA_CFG_FTHRESH_128);
+ writel(val, gp->regs + RXDMA_CFG);
+ if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN)
+ writel(((5 & RXDMA_BLANK_IPKTS) |
+@@ -760,7 +759,6 @@ static int gem_rx(struct gem *gp, int wo
+ struct net_device *dev = gp->dev;
+ int entry, drops, work_done = 0;
+ u32 done;
+- __sum16 csum;
+
+ if (netif_msg_rx_status(gp))
+ printk(KERN_DEBUG "%s: rx interrupt, done: %d, rx_new: %d\n",
+@@ -855,9 +853,13 @@ static int gem_rx(struct gem *gp, int wo
+ skb = copy_skb;
+ }
+
+- csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff);
+- skb->csum = csum_unfold(csum);
+- skb->ip_summed = CHECKSUM_COMPLETE;
++ if (likely(dev->features & NETIF_F_RXCSUM)) {
++ __sum16 csum;
++
++ csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff);
++ skb->csum = csum_unfold(csum);
++ skb->ip_summed = CHECKSUM_COMPLETE;
++ }
+ skb->protocol = eth_type_trans(skb, gp->dev);
+
+ napi_gro_receive(&gp->napi, skb);
+@@ -1755,7 +1757,7 @@ static void gem_init_dma(struct gem *gp)
+ writel(0, gp->regs + TXDMA_KICK);
+
+ val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
+- ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128);
++ (ETH_HLEN << 13) | RXDMA_CFG_FTHRESH_128);
+ writel(val, gp->regs + RXDMA_CFG);
+
+ writel(desc_dma >> 32, gp->regs + RXDMA_DBHI);
+@@ -2973,8 +2975,8 @@ static int gem_init_one(struct pci_dev *
+ pci_set_drvdata(pdev, dev);
+
+ /* We can do scatter/gather and HW checksum */
+- dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
+- dev->features |= dev->hw_features | NETIF_F_RXCSUM;
++ dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
++ dev->features = dev->hw_features;
+ if (pci_using_dac)
+ dev->features |= NETIF_F_HIGHDMA;
+
--- /dev/null
+From foo@baz Thu Jul 19 10:08:15 CEST 2018
+From: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
+Date: Fri, 15 Jun 2018 13:27:31 +0300
+Subject: net_sched: blackhole: tell upper qdisc about dropped packets
+
+From: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
+
+[ Upstream commit 7e85dc8cb35abf16455f1511f0670b57c1a84608 ]
+
+When blackhole is used on top of classful qdisc like hfsc it breaks
+qlen and backlog counters because packets are disappear without notice.
+
+In HFSC non-zero qlen while all classes are inactive triggers warning:
+WARNING: ... at net/sched/sch_hfsc.c:1393 hfsc_dequeue+0xba4/0xe90 [sch_hfsc]
+and schedules watchdog work endlessly.
+
+This patch return __NET_XMIT_BYPASS in addition to NET_XMIT_SUCCESS,
+this flag tells upper layer: this packet is gone and isn't queued.
+
+Signed-off-by: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sched/sch_blackhole.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/sched/sch_blackhole.c
++++ b/net/sched/sch_blackhole.c
+@@ -20,7 +20,7 @@
+ static int blackhole_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+ {
+ qdisc_drop(skb, sch);
+- return NET_XMIT_SUCCESS;
++ return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
+ }
+
+ static struct sk_buff *blackhole_dequeue(struct Qdisc *sch)
--- /dev/null
+From foo@baz Thu Jul 19 09:11:13 CEST 2018
+From: Sudarsana Reddy Kalluru <sudarsana.kalluru@cavium.com>
+Date: Sun, 1 Jul 2018 20:03:05 -0700
+Subject: qed: Limit msix vectors in kdump kernel to the minimum required count.
+
+From: Sudarsana Reddy Kalluru <sudarsana.kalluru@cavium.com>
+
+[ Upstream commit bb7858ba1102f82470a917e041fd23e6385c31be ]
+
+Memory size is limited in the kdump kernel environment. Allocation of more
+msix-vectors (or queues) consumes few tens of MBs of memory, which might
+lead to the kdump kernel failure.
+This patch adds changes to limit the number of MSI-X vectors in kdump
+kernel to minimum required value (i.e., 2 per engine).
+
+Fixes: fe56b9e6a ("qed: Add module with basic common support")
+Signed-off-by: Sudarsana Reddy Kalluru <Sudarsana.Kalluru@cavium.com>
+Signed-off-by: Michal Kalderon <Michal.Kalderon@cavium.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/qlogic/qed/qed_main.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
+@@ -22,6 +22,7 @@
+ #include <linux/etherdevice.h>
+ #include <linux/vmalloc.h>
+ #include <linux/qed/qed_if.h>
++#include <linux/crash_dump.h>
+
+ #include "qed.h"
+ #include "qed_sp.h"
+@@ -634,6 +635,14 @@ static int qed_slowpath_setup_int(struct
+ /* We want a minimum of one slowpath and one fastpath vector per hwfn */
+ cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2;
+
++ if (is_kdump_kernel()) {
++ DP_INFO(cdev,
++ "Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n",
++ cdev->int_params.in.min_msix_cnt);
++ cdev->int_params.in.num_vectors =
++ cdev->int_params.in.min_msix_cnt;
++ }
++
+ rc = qed_set_int_mode(cdev, false);
+ if (rc) {
+ DP_ERR(cdev, "qed_slowpath_setup_int ERR\n");
--- /dev/null
+From foo@baz Thu Jul 19 10:08:15 CEST 2018
+From: Jiri Slaby <jslaby@suse.cz>
+Date: Mon, 25 Jun 2018 09:26:27 +0200
+Subject: r8152: napi hangup fix after disconnect
+
+From: Jiri Slaby <jslaby@suse.cz>
+
+[ Upstream commit 0ee1f4734967af8321ecebaf9c74221ace34f2d5 ]
+
+When unplugging an r8152 adapter while the interface is UP, the NIC
+becomes unusable. usb->disconnect (aka rtl8152_disconnect) deletes
+napi. Then, rtl8152_disconnect calls unregister_netdev and that invokes
+netdev->ndo_stop (aka rtl8152_close). rtl8152_close tries to
+napi_disable, but the napi is already deleted by disconnect above. So
+the first while loop in napi_disable never finishes. This results in
+complete deadlock of the network layer as there is rtnl_mutex held by
+unregister_netdev.
+
+So avoid the call to napi_disable in rtl8152_close when the device is
+already gone.
+
+The other calls to usb_kill_urb, cancel_delayed_work_sync,
+netif_stop_queue etc. seem to be fine. The urb and netdev is not
+destroyed yet.
+
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+Cc: linux-usb@vger.kernel.org
+Cc: netdev@vger.kernel.org
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/usb/r8152.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/usb/r8152.c
++++ b/drivers/net/usb/r8152.c
+@@ -3139,7 +3139,8 @@ static int rtl8152_close(struct net_devi
+ #ifdef CONFIG_PM_SLEEP
+ unregister_pm_notifier(&tp->pm_notifier);
+ #endif
+- napi_disable(&tp->napi);
++ if (!test_bit(RTL8152_UNPLUG, &tp->flags))
++ napi_disable(&tp->napi);
+ clear_bit(WORK_ENABLE, &tp->flags);
+ usb_kill_urb(tp->intr_urb);
+ cancel_delayed_work_sync(&tp->schedule);
kbuild-fix-escaping-in-.cmd-files-for-future-make.patch
x86-cpu-probe-cpuid-leaf-6-even-when-cpuid_level-6.patch
perf-tools-move-syscall-number-fallbacks-from-perf-sys.h-to-tools-arch-x86-include-asm.patch
+atm-zatm-fix-potential-spectre-v1.patch
+net-dccp-avoid-crash-in-ccid3_hc_rx_send_feedback.patch
+net-dccp-switch-rx_tstamp_last_feedback-to-monotonic-clock.patch
+net-mlx5-fix-incorrect-raw-command-length-parsing.patch
+net-sungem-fix-rx-checksum-support.patch
+qed-limit-msix-vectors-in-kdump-kernel-to-the-minimum-required-count.patch
+r8152-napi-hangup-fix-after-disconnect.patch
+tcp-fix-fast-open-key-endianness.patch
+tcp-prevent-bogus-frto-undos-with-non-sack-flows.patch
+vhost_net-validate-sock-before-trying-to-put-its-fd.patch
+net_sched-blackhole-tell-upper-qdisc-about-dropped-packets.patch
+net-mlx5-fix-command-interface-race-in-polling-mode.patch
--- /dev/null
+From foo@baz Thu Jul 19 10:08:15 CEST 2018
+From: Yuchung Cheng <ycheng@google.com>
+Date: Wed, 27 Jun 2018 16:04:48 -0700
+Subject: tcp: fix Fast Open key endianness
+
+From: Yuchung Cheng <ycheng@google.com>
+
+[ Upstream commit c860e997e9170a6d68f9d1e6e2cf61f572191aaf ]
+
+Fast Open key could be stored in different endian based on the CPU.
+Previously hosts in different endianness in a server farm using
+the same key config (sysctl value) would produce different cookies.
+This patch fixes it by always storing it as little endian to keep
+same API for LE hosts.
+
+Reported-by: Daniele Iamartino <danielei@google.com>
+Signed-off-by: Yuchung Cheng <ycheng@google.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Neal Cardwell <ncardwell@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/sysctl_net_ipv4.c | 18 +++++++++++++-----
+ 1 file changed, 13 insertions(+), 5 deletions(-)
+
+--- a/net/ipv4/sysctl_net_ipv4.c
++++ b/net/ipv4/sysctl_net_ipv4.c
+@@ -213,8 +213,9 @@ static int proc_tcp_fastopen_key(struct
+ {
+ struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
+ struct tcp_fastopen_context *ctxt;
+- int ret;
+ u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
++ __le32 key[4];
++ int ret, i;
+
+ tbl.data = kmalloc(tbl.maxlen, GFP_KERNEL);
+ if (!tbl.data)
+@@ -223,11 +224,14 @@ static int proc_tcp_fastopen_key(struct
+ rcu_read_lock();
+ ctxt = rcu_dereference(tcp_fastopen_ctx);
+ if (ctxt)
+- memcpy(user_key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH);
++ memcpy(key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH);
+ else
+- memset(user_key, 0, sizeof(user_key));
++ memset(key, 0, sizeof(key));
+ rcu_read_unlock();
+
++ for (i = 0; i < ARRAY_SIZE(key); i++)
++ user_key[i] = le32_to_cpu(key[i]);
++
+ snprintf(tbl.data, tbl.maxlen, "%08x-%08x-%08x-%08x",
+ user_key[0], user_key[1], user_key[2], user_key[3]);
+ ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
+@@ -243,12 +247,16 @@ static int proc_tcp_fastopen_key(struct
+ * first invocation of tcp_fastopen_cookie_gen
+ */
+ tcp_fastopen_init_key_once(false);
+- tcp_fastopen_reset_cipher(user_key, TCP_FASTOPEN_KEY_LENGTH);
++
++ for (i = 0; i < ARRAY_SIZE(user_key); i++)
++ key[i] = cpu_to_le32(user_key[i]);
++
++ tcp_fastopen_reset_cipher(key, TCP_FASTOPEN_KEY_LENGTH);
+ }
+
+ bad_key:
+ pr_debug("proc FO key set 0x%x-%x-%x-%x <- 0x%s: %u\n",
+- user_key[0], user_key[1], user_key[2], user_key[3],
++ user_key[0], user_key[1], user_key[2], user_key[3],
+ (char *)tbl.data, ret);
+ kfree(tbl.data);
+ return ret;
--- /dev/null
+From foo@baz Thu Jul 19 10:08:15 CEST 2018
+From: "Ilpo Järvinen" <ilpo.jarvinen@helsinki.fi>
+Date: Fri, 29 Jun 2018 13:07:53 +0300
+Subject: tcp: prevent bogus FRTO undos with non-SACK flows
+
+From: "Ilpo Järvinen" <ilpo.jarvinen@helsinki.fi>
+
+[ Upstream commit 1236f22fbae15df3736ab4a984c64c0c6ee6254c ]
+
+If SACK is not enabled and the first cumulative ACK after the RTO
+retransmission covers more than the retransmitted skb, a spurious
+FRTO undo will trigger (assuming FRTO is enabled for that RTO).
+The reason is that any non-retransmitted segment acknowledged will
+set FLAG_ORIG_SACK_ACKED in tcp_clean_rtx_queue even if there is
+no indication that it would have been delivered for real (the
+scoreboard is not kept with TCPCB_SACKED_ACKED bits in the non-SACK
+case so the check for that bit won't help like it does with SACK).
+Having FLAG_ORIG_SACK_ACKED set results in the spurious FRTO undo
+in tcp_process_loss.
+
+We need to use more strict condition for non-SACK case and check
+that none of the cumulatively ACKed segments were retransmitted
+to prove that progress is due to original transmissions. Only then
+keep FLAG_ORIG_SACK_ACKED set, allowing FRTO undo to proceed in
+non-SACK case.
+
+(FLAG_ORIG_SACK_ACKED is planned to be renamed to FLAG_ORIG_PROGRESS
+to better indicate its purpose but to keep this change minimal, it
+will be done in another patch).
+
+Besides burstiness and congestion control violations, this problem
+can result in RTO loop: When the loss recovery is prematurely
+undoed, only new data will be transmitted (if available) and
+the next retransmission can occur only after a new RTO which in case
+of multiple losses (that are not for consecutive packets) requires
+one RTO per loss to recover.
+
+Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi>
+Tested-by: Neal Cardwell <ncardwell@google.com>
+Acked-by: Neal Cardwell <ncardwell@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp_input.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -3218,6 +3218,15 @@ static int tcp_clean_rtx_queue(struct so
+
+ if (tcp_is_reno(tp)) {
+ tcp_remove_reno_sacks(sk, pkts_acked);
++
++ /* If any of the cumulatively ACKed segments was
++ * retransmitted, non-SACK case cannot confirm that
++ * progress was due to original transmission due to
++ * lack of TCPCB_SACKED_ACKED bits even if some of
++ * the packets may have been never retransmitted.
++ */
++ if (flag & FLAG_RETRANS_DATA_ACKED)
++ flag &= ~FLAG_ORIG_SACK_ACKED;
+ } else {
+ int delta;
+
--- /dev/null
+From foo@baz Thu Jul 19 10:08:15 CEST 2018
+From: Jason Wang <jasowang@redhat.com>
+Date: Thu, 21 Jun 2018 13:11:31 +0800
+Subject: vhost_net: validate sock before trying to put its fd
+
+From: Jason Wang <jasowang@redhat.com>
+
+[ Upstream commit b8f1f65882f07913157c44673af7ec0b308d03eb ]
+
+Sock will be NULL if we pass -1 to vhost_net_set_backend(), but when
+we meet errors during ubuf allocation, the code does not check for
+NULL before calling sockfd_put(), this will lead NULL
+dereferencing. Fixing by checking sock pointer before.
+
+Fixes: bab632d69ee4 ("vhost: vhost TX zero-copy support")
+Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: Jason Wang <jasowang@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/vhost/net.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/vhost/net.c
++++ b/drivers/vhost/net.c
+@@ -955,7 +955,8 @@ err_used:
+ if (ubufs)
+ vhost_net_ubuf_put_wait_and_free(ubufs);
+ err_ubufs:
+- sockfd_put(sock);
++ if (sock)
++ sockfd_put(sock);
+ err_vq:
+ mutex_unlock(&vq->mutex);
+ err: