--- /dev/null
+From sean.j.christopherson@intel.com Tue Jan 29 10:42:00 2019
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+Date: Mon, 28 Jan 2019 12:51:02 -0800
+Subject: KVM: x86: Fix a 4.14 backport regression related to userspace/guest FPU
+To: Sasha Levin <sashal@kernel.org>
+Cc: kvm@vger.kernel.org, stable@vger.kernel.org, "Greg Kroah-Hartman" <gregkh@linuxfoundation.org>, "Peter Xu" <peterx@redhat.com>, "Rik van Riel" <riel@redhat.com>, "Paolo Bonzini" <pbonzini@redhat.com>, "Radim Krčmář" <rkrcmar@redhat.com>, "Thomas Lindroth" <thomas.lindroth@gmail.com>
+Message-ID: <20190128205102.29393-1-sean.j.christopherson@intel.com>
+
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+
+Upstream commit:
+
+ f775b13eedee ("x86,kvm: move qemu/guest FPU switching out to vcpu_run")
+
+introduced a bug, which was later fixed by upstream commit:
+
+ 5663d8f9bbe4 ("kvm: x86: fix WARN due to uninitialized guest FPU state")
+
+For reasons unknown, both commits were initially passed-over for
+inclusion in the 4.14 stable branch despite being tagged for stable.
+Eventually, someone noticed that the fixup, commit 5663d8f9bbe4, was
+missing from stable[1], and so it was queued up for 4.14 and included in
+release v4.14.79.
+
+Even later, the original buggy patch, commit f775b13eedee, was also
+applied to the 4.14 stable branch. Through an unlucky coincidence, the
+incorrect ordering did not generate a conflict between the two patches,
+and led to v4.14.94 and later releases containing a spurious call to
+kvm_load_guest_fpu() in kvm_arch_vcpu_ioctl_run(). As a result, KVM may
+reload stale guest FPU state, e.g. after accepting in INIT event. This
+can manifest as crashes during boot, segfaults, failed checksums and so
+on and so forth.
+
+Remove the unwanted kvm_{load,put}_guest_fpu() calls, i.e. make
+kvm_arch_vcpu_ioctl_run() look like commit 5663d8f9bbe4 was backported
+after commit f775b13eedee.
+
+[1] https://www.spinics.net/lists/stable/msg263931.html
+
+Fixes: 4124a4cff344 ("x86,kvm: move qemu/guest FPU switching out to vcpu_run")
+Cc: stable@vger.kernel.org
+Cc: Sasha Levin <sashal@kernel.org>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Peter Xu <peterx@redhat.com>
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Radim Krčmář <rkrcmar@redhat.com>
+Reported-by: Roman Mamedov
+Reported-by: Thomas Lindroth <thomas.lindroth@gmail.com>
+Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
+Acked-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/x86.c | 6 +-----
+ 1 file changed, 1 insertion(+), 5 deletions(-)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -7422,14 +7422,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
+ }
+ }
+
+- kvm_load_guest_fpu(vcpu);
+-
+ if (unlikely(vcpu->arch.complete_userspace_io)) {
+ int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io;
+ vcpu->arch.complete_userspace_io = NULL;
+ r = cui(vcpu);
+ if (r <= 0)
+- goto out_fpu;
++ goto out;
+ } else
+ WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed);
+
+@@ -7438,8 +7436,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
+ else
+ r = vcpu_run(vcpu);
+
+-out_fpu:
+- kvm_put_guest_fpu(vcpu);
+ out:
+ kvm_put_guest_fpu(vcpu);
+ post_kvm_run_save(vcpu);
--- /dev/null
+From 52a76235d0c4dd259cd0df503afed4757c04ba1d Mon Sep 17 00:00:00 2001
+From: Jose Abreu <Jose.Abreu@synopsys.com>
+Date: Fri, 13 Oct 2017 10:58:36 +0100
+Subject: net: stmmac: Use correct values in TQS/RQS fields
+
+From: Jose Abreu <Jose.Abreu@synopsys.com>
+
+commit 52a76235d0c4dd259cd0df503afed4757c04ba1d upstream.
+
+Currently we are using all the available fifo size in RQS and
+TQS fields. This will not work correctly in multi-queues IP's
+because total fifo size must be splitted to the enabled queues.
+
+Correct this by computing the available fifo size per queue and
+setting the right value in TQS and RQS fields.
+
+Signed-off-by: Jose Abreu <joabreu@synopsys.com>
+Cc: David S. Miller <davem@davemloft.net>
+Cc: Joao Pinto <jpinto@synopsys.com>
+Cc: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+Cc: Alexandre Torgue <alexandre.torgue@st.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Cc: Niklas Cassel <niklas.cassel@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/ethernet/stmicro/stmmac/common.h | 3 ++-
+ drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c | 15 +++++++++------
+ drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 22 ++++++++++++++++++++--
+ 3 files changed, 31 insertions(+), 9 deletions(-)
+
+--- a/drivers/net/ethernet/stmicro/stmmac/common.h
++++ b/drivers/net/ethernet/stmicro/stmmac/common.h
+@@ -444,7 +444,8 @@ struct stmmac_dma_ops {
+ int rxfifosz);
+ void (*dma_rx_mode)(void __iomem *ioaddr, int mode, u32 channel,
+ int fifosz);
+- void (*dma_tx_mode)(void __iomem *ioaddr, int mode, u32 channel);
++ void (*dma_tx_mode)(void __iomem *ioaddr, int mode, u32 channel,
++ int fifosz);
+ /* To track extra statistic (if supported) */
+ void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x,
+ void __iomem *ioaddr);
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
+@@ -271,9 +271,10 @@ static void dwmac4_dma_rx_chan_op_mode(v
+ }
+
+ static void dwmac4_dma_tx_chan_op_mode(void __iomem *ioaddr, int mode,
+- u32 channel)
++ u32 channel, int fifosz)
+ {
+ u32 mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel));
++ unsigned int tqs = fifosz / 256 - 1;
+
+ if (mode == SF_DMA_MODE) {
+ pr_debug("GMAC: enable TX store and forward mode\n");
+@@ -306,12 +307,14 @@ static void dwmac4_dma_tx_chan_op_mode(v
+ * For an IP with DWC_EQOS_NUM_TXQ > 1, the fields TXQEN and TQS are R/W
+ * with reset values: TXQEN off, TQS 256 bytes.
+ *
+- * Write the bits in both cases, since it will have no effect when RO.
+- * For DWC_EQOS_NUM_TXQ > 1, the top bits in MTL_OP_MODE_TQS_MASK might
+- * be RO, however, writing the whole TQS field will result in a value
+- * equal to DWC_EQOS_TXFIFO_SIZE, just like for DWC_EQOS_NUM_TXQ == 1.
++ * TXQEN must be written for multi-channel operation and TQS must
++ * reflect the available fifo size per queue (total fifo size / number
++ * of enabled queues).
+ */
+- mtl_tx_op |= MTL_OP_MODE_TXQEN | MTL_OP_MODE_TQS_MASK;
++ mtl_tx_op |= MTL_OP_MODE_TXQEN;
++ mtl_tx_op &= ~MTL_OP_MODE_TQS_MASK;
++ mtl_tx_op |= tqs << MTL_OP_MODE_TQS_SHIFT;
++
+ writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(channel));
+ }
+
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -1765,12 +1765,19 @@ static void stmmac_dma_operation_mode(st
+ u32 rx_channels_count = priv->plat->rx_queues_to_use;
+ u32 tx_channels_count = priv->plat->tx_queues_to_use;
+ int rxfifosz = priv->plat->rx_fifo_size;
++ int txfifosz = priv->plat->tx_fifo_size;
+ u32 txmode = 0;
+ u32 rxmode = 0;
+ u32 chan = 0;
+
+ if (rxfifosz == 0)
+ rxfifosz = priv->dma_cap.rx_fifo_size;
++ if (txfifosz == 0)
++ txfifosz = priv->dma_cap.tx_fifo_size;
++
++ /* Adjust for real per queue fifo size */
++ rxfifosz /= rx_channels_count;
++ txfifosz /= tx_channels_count;
+
+ if (priv->plat->force_thresh_dma_mode) {
+ txmode = tc;
+@@ -1798,7 +1805,8 @@ static void stmmac_dma_operation_mode(st
+ rxfifosz);
+
+ for (chan = 0; chan < tx_channels_count; chan++)
+- priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
++ priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan,
++ txfifosz);
+ } else {
+ priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
+ rxfifosz);
+@@ -1967,15 +1975,25 @@ static void stmmac_tx_err(struct stmmac_
+ static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
+ u32 rxmode, u32 chan)
+ {
++ u32 rx_channels_count = priv->plat->rx_queues_to_use;
++ u32 tx_channels_count = priv->plat->tx_queues_to_use;
+ int rxfifosz = priv->plat->rx_fifo_size;
++ int txfifosz = priv->plat->tx_fifo_size;
+
+ if (rxfifosz == 0)
+ rxfifosz = priv->dma_cap.rx_fifo_size;
++ if (txfifosz == 0)
++ txfifosz = priv->dma_cap.tx_fifo_size;
++
++ /* Adjust for real per queue fifo size */
++ rxfifosz /= rx_channels_count;
++ txfifosz /= tx_channels_count;
+
+ if (priv->synopsys_id >= DWMAC_CORE_4_00) {
+ priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
+ rxfifosz);
+- priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
++ priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan,
++ txfifosz);
+ } else {
+ priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
+ rxfifosz);
--- /dev/null
+From ad1f824948e4ed886529219cf7cd717d078c630d Mon Sep 17 00:00:00 2001
+From: Israel Rukshin <israelr@mellanox.com>
+Date: Mon, 19 Nov 2018 10:58:51 +0000
+Subject: nvmet-rdma: Add unlikely for response allocated check
+
+From: Israel Rukshin <israelr@mellanox.com>
+
+commit ad1f824948e4ed886529219cf7cd717d078c630d upstream.
+
+Signed-off-by: Israel Rukshin <israelr@mellanox.com>
+Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
+Reviewed-by: Max Gurtovoy <maxg@mellanox.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Cc: Raju Rangoju <rajur@chelsio.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/nvme/target/rdma.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/nvme/target/rdma.c
++++ b/drivers/nvme/target/rdma.c
+@@ -189,7 +189,7 @@ nvmet_rdma_put_rsp(struct nvmet_rdma_rsp
+ {
+ unsigned long flags;
+
+- if (rsp->allocated) {
++ if (unlikely(rsp->allocated)) {
+ kfree(rsp);
+ return;
+ }
--- /dev/null
+From 5cbab6303b4791a3e6713dfe2c5fda6a867f9adc Mon Sep 17 00:00:00 2001
+From: Raju Rangoju <rajur@chelsio.com>
+Date: Thu, 3 Jan 2019 23:05:31 +0530
+Subject: nvmet-rdma: fix null dereference under heavy load
+
+From: Raju Rangoju <rajur@chelsio.com>
+
+commit 5cbab6303b4791a3e6713dfe2c5fda6a867f9adc upstream.
+
+Under heavy load if we don't have any pre-allocated rsps left, we
+dynamically allocate a rsp, but we are not actually allocating memory
+for nvme_completion (rsp->req.rsp). In such a case, accessing pointer
+fields (req->rsp->status) in nvmet_req_init() will result in crash.
+
+To fix this, allocate the memory for nvme_completion by calling
+nvmet_rdma_alloc_rsp()
+
+Fixes: 8407879c("nvmet-rdma:fix possible bogus dereference under heavy load")
+
+Cc: <stable@vger.kernel.org>
+Reviewed-by: Max Gurtovoy <maxg@mellanox.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Raju Rangoju <rajur@chelsio.com>
+Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/nvme/target/rdma.c | 15 ++++++++++++++-
+ 1 file changed, 14 insertions(+), 1 deletion(-)
+
+--- a/drivers/nvme/target/rdma.c
++++ b/drivers/nvme/target/rdma.c
+@@ -137,6 +137,10 @@ static void nvmet_rdma_recv_done(struct
+ static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc);
+ static void nvmet_rdma_qp_event(struct ib_event *event, void *priv);
+ static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
++static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
++ struct nvmet_rdma_rsp *r);
++static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
++ struct nvmet_rdma_rsp *r);
+
+ static struct nvmet_fabrics_ops nvmet_rdma_ops;
+
+@@ -175,9 +179,17 @@ nvmet_rdma_get_rsp(struct nvmet_rdma_que
+ spin_unlock_irqrestore(&queue->rsps_lock, flags);
+
+ if (unlikely(!rsp)) {
+- rsp = kmalloc(sizeof(*rsp), GFP_KERNEL);
++ int ret;
++
++ rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
+ if (unlikely(!rsp))
+ return NULL;
++ ret = nvmet_rdma_alloc_rsp(queue->dev, rsp);
++ if (unlikely(ret)) {
++ kfree(rsp);
++ return NULL;
++ }
++
+ rsp->allocated = true;
+ }
+
+@@ -190,6 +202,7 @@ nvmet_rdma_put_rsp(struct nvmet_rdma_rsp
+ unsigned long flags;
+
+ if (unlikely(rsp->allocated)) {
++ nvmet_rdma_free_rsp(rsp->queue->dev, rsp);
+ kfree(rsp);
+ return;
+ }
--- /dev/null
+From 60f1bf29c0b2519989927cae640cd1f50f59dc7f Mon Sep 17 00:00:00 2001
+From: David Hildenbrand <david@redhat.com>
+Date: Fri, 11 Jan 2019 15:18:22 +0100
+Subject: s390/smp: Fix calling smp_call_ipl_cpu() from ipl CPU
+
+From: David Hildenbrand <david@redhat.com>
+
+commit 60f1bf29c0b2519989927cae640cd1f50f59dc7f upstream.
+
+When calling smp_call_ipl_cpu() from the IPL CPU, we will try to read
+from pcpu_devices->lowcore. However, due to prefixing, that will result
+in reading from absolute address 0 on that CPU. We have to go via the
+actual lowcore instead.
+
+This means that right now, we will read lc->nodat_stack == 0 and
+therfore work on a very wrong stack.
+
+This BUG essentially broke rebooting under QEMU TCG (which will report
+a low address protection exception). And checking under KVM, it is
+also broken under KVM. With 1 VCPU it can be easily triggered.
+
+:/# echo 1 > /proc/sys/kernel/sysrq
+:/# echo b > /proc/sysrq-trigger
+[ 28.476745] sysrq: SysRq : Resetting
+[ 28.476793] Kernel stack overflow.
+[ 28.476817] CPU: 0 PID: 424 Comm: sh Not tainted 5.0.0-rc1+ #13
+[ 28.476820] Hardware name: IBM 2964 NE1 716 (KVM/Linux)
+[ 28.476826] Krnl PSW : 0400c00180000000 0000000000115c0c (pcpu_delegate+0x12c/0x140)
+[ 28.476861] R:0 T:1 IO:0 EX:0 Key:0 M:0 W:0 P:0 AS:3 CC:0 PM:0 RI:0 EA:3
+[ 28.476863] Krnl GPRS: ffffffffffffffff 0000000000000000 000000000010dff8 0000000000000000
+[ 28.476864] 0000000000000000 0000000000000000 0000000000ab7090 000003e0006efbf0
+[ 28.476864] 000000000010dff8 0000000000000000 0000000000000000 0000000000000000
+[ 28.476865] 000000007fffc000 0000000000730408 000003e0006efc58 0000000000000000
+[ 28.476887] Krnl Code: 0000000000115bfe: 4170f000 la %r7,0(%r15)
+[ 28.476887] 0000000000115c02: 41f0a000 la %r15,0(%r10)
+[ 28.476887] #0000000000115c06: e370f0980024 stg %r7,152(%r15)
+[ 28.476887] >0000000000115c0c: c0e5fffff86e brasl %r14,114ce8
+[ 28.476887] 0000000000115c12: 41f07000 la %r15,0(%r7)
+[ 28.476887] 0000000000115c16: a7f4ffa8 brc 15,115b66
+[ 28.476887] 0000000000115c1a: 0707 bcr 0,%r7
+[ 28.476887] 0000000000115c1c: 0707 bcr 0,%r7
+[ 28.476901] Call Trace:
+[ 28.476902] Last Breaking-Event-Address:
+[ 28.476920] [<0000000000a01c4a>] arch_call_rest_init+0x22/0x80
+[ 28.476927] Kernel panic - not syncing: Corrupt kernel stack, can't continue.
+[ 28.476930] CPU: 0 PID: 424 Comm: sh Not tainted 5.0.0-rc1+ #13
+[ 28.476932] Hardware name: IBM 2964 NE1 716 (KVM/Linux)
+[ 28.476932] Call Trace:
+
+Fixes: 2f859d0dad81 ("s390/smp: reduce size of struct pcpu")
+Cc: stable@vger.kernel.org # 4.0+
+Reported-by: Cornelia Huck <cohuck@redhat.com>
+Signed-off-by: David Hildenbrand <david@redhat.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ arch/s390/kernel/smp.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/arch/s390/kernel/smp.c
++++ b/arch/s390/kernel/smp.c
+@@ -387,9 +387,13 @@ void smp_call_online_cpu(void (*func)(vo
+ */
+ void smp_call_ipl_cpu(void (*func)(void *), void *data)
+ {
++ struct lowcore *lc = pcpu_devices->lowcore;
++
++ if (pcpu_devices[0].address == stap())
++ lc = &S390_lowcore;
++
+ pcpu_delegate(&pcpu_devices[0], func, data,
+- pcpu_devices->lowcore->panic_stack -
+- PANIC_FRAME_OFFSET + PAGE_SIZE);
++ lc->panic_stack - PANIC_FRAME_OFFSET + PAGE_SIZE);
+ }
+
+ int smp_find_processor_id(u16 address)
perf-unwind-unwind-with-libdw-doesn-t-take-symfs-int.patch
perf-unwind-take-pgoff-into-account-when-reporting-e.patch
revert-seccomp-add-a-selftest-for-get_metadata.patch
+net-stmmac-use-correct-values-in-tqs-rqs-fields.patch
+kvm-x86-fix-a-4.14-backport-regression-related-to-userspace-guest-fpu.patch
+s390-smp-fix-calling-smp_call_ipl_cpu-from-ipl-cpu.patch
+nvmet-rdma-add-unlikely-for-response-allocated-check.patch
+nvmet-rdma-fix-null-dereference-under-heavy-load.patch