--- /dev/null
+From 33cc2c9667561b224215e6dfb5bf98e8fa17914e Mon Sep 17 00:00:00 2001
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Thu, 5 Jul 2018 14:58:49 -0700
+Subject: acpi, nfit: Fix scrub idle detection
+
+From: Dan Williams <dan.j.williams@intel.com>
+
+commit 33cc2c9667561b224215e6dfb5bf98e8fa17914e upstream.
+
+The notification of scrub completion happens within the scrub workqueue.
+That can clearly race someone running scrub_show() and work_busy()
+before the workqueue has a chance to flush the recently completed work.
+Add a flag to reliably indicate the idle vs busy state. Without this
+change applications using poll(2) to wait for scrub-completion may
+falsely wakeup and read ARS as being busy even though the thread is
+going idle and then hang indefinitely.
+
+Fixes: bc6ba8085842 ("nfit, address-range-scrub: rework and simplify ARS...")
+Cc: <stable@vger.kernel.org>
+Reported-by: Vishal Verma <vishal.l.verma@intel.com>
+Tested-by: Vishal Verma <vishal.l.verma@intel.com>
+Reported-by: Lukasz Dorau <lukasz.dorau@intel.com>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/acpi/nfit/core.c | 44 +++++++++++++++++++++++++++++++++-----------
+ drivers/acpi/nfit/nfit.h | 1 +
+ 2 files changed, 34 insertions(+), 11 deletions(-)
+
+--- a/drivers/acpi/nfit/core.c
++++ b/drivers/acpi/nfit/core.c
+@@ -1272,7 +1272,7 @@ static ssize_t scrub_show(struct device
+
+ mutex_lock(&acpi_desc->init_mutex);
+ rc = sprintf(buf, "%d%s", acpi_desc->scrub_count,
+- work_busy(&acpi_desc->dwork.work)
++ acpi_desc->scrub_busy
+ && !acpi_desc->cancel ? "+\n" : "\n");
+ mutex_unlock(&acpi_desc->init_mutex);
+ }
+@@ -2949,6 +2949,32 @@ static unsigned int __acpi_nfit_scrub(st
+ return 0;
+ }
+
++static void __sched_ars(struct acpi_nfit_desc *acpi_desc, unsigned int tmo)
++{
++ lockdep_assert_held(&acpi_desc->init_mutex);
++
++ acpi_desc->scrub_busy = 1;
++ /* note this should only be set from within the workqueue */
++ if (tmo)
++ acpi_desc->scrub_tmo = tmo;
++ queue_delayed_work(nfit_wq, &acpi_desc->dwork, tmo * HZ);
++}
++
++static void sched_ars(struct acpi_nfit_desc *acpi_desc)
++{
++ __sched_ars(acpi_desc, 0);
++}
++
++static void notify_ars_done(struct acpi_nfit_desc *acpi_desc)
++{
++ lockdep_assert_held(&acpi_desc->init_mutex);
++
++ acpi_desc->scrub_busy = 0;
++ acpi_desc->scrub_count++;
++ if (acpi_desc->scrub_count_state)
++ sysfs_notify_dirent(acpi_desc->scrub_count_state);
++}
++
+ static void acpi_nfit_scrub(struct work_struct *work)
+ {
+ struct acpi_nfit_desc *acpi_desc;
+@@ -2959,14 +2985,10 @@ static void acpi_nfit_scrub(struct work_
+ mutex_lock(&acpi_desc->init_mutex);
+ query_rc = acpi_nfit_query_poison(acpi_desc);
+ tmo = __acpi_nfit_scrub(acpi_desc, query_rc);
+- if (tmo) {
+- queue_delayed_work(nfit_wq, &acpi_desc->dwork, tmo * HZ);
+- acpi_desc->scrub_tmo = tmo;
+- } else {
+- acpi_desc->scrub_count++;
+- if (acpi_desc->scrub_count_state)
+- sysfs_notify_dirent(acpi_desc->scrub_count_state);
+- }
++ if (tmo)
++ __sched_ars(acpi_desc, tmo);
++ else
++ notify_ars_done(acpi_desc);
+ memset(acpi_desc->ars_status, 0, acpi_desc->max_ars);
+ mutex_unlock(&acpi_desc->init_mutex);
+ }
+@@ -3047,7 +3069,7 @@ static int acpi_nfit_register_regions(st
+ break;
+ }
+
+- queue_delayed_work(nfit_wq, &acpi_desc->dwork, 0);
++ sched_ars(acpi_desc);
+ return 0;
+ }
+
+@@ -3249,7 +3271,7 @@ int acpi_nfit_ars_rescan(struct acpi_nfi
+ }
+ }
+ if (scheduled) {
+- queue_delayed_work(nfit_wq, &acpi_desc->dwork, 0);
++ sched_ars(acpi_desc);
+ dev_dbg(dev, "ars_scan triggered\n");
+ }
+ mutex_unlock(&acpi_desc->init_mutex);
+--- a/drivers/acpi/nfit/nfit.h
++++ b/drivers/acpi/nfit/nfit.h
+@@ -203,6 +203,7 @@ struct acpi_nfit_desc {
+ unsigned int max_ars;
+ unsigned int scrub_count;
+ unsigned int scrub_mode;
++ unsigned int scrub_busy:1;
+ unsigned int cancel:1;
+ unsigned long dimm_cmd_force_en;
+ unsigned long bus_cmd_force_en;
--- /dev/null
+From 2fd8eb4ad87104c54800ef3cea498c92eb15c78a Mon Sep 17 00:00:00 2001
+From: Yandong Zhao <yandong77520@gmail.com>
+Date: Wed, 11 Jul 2018 19:06:28 +0800
+Subject: arm64: neon: Fix function may_use_simd() return error status
+
+From: Yandong Zhao <yandong77520@gmail.com>
+
+commit 2fd8eb4ad87104c54800ef3cea498c92eb15c78a upstream.
+
+It does not matter if the caller of may_use_simd() migrates to
+another cpu after the call, but it is still important that the
+kernel_neon_busy percpu instance that is read matches the cpu the
+task is running on at the time of the read.
+
+This means that raw_cpu_read() is not sufficient. kernel_neon_busy
+may appear true if the caller migrates during the execution of
+raw_cpu_read() and the next task to be scheduled in on the initial
+cpu calls kernel_neon_begin().
+
+This patch replaces raw_cpu_read() with this_cpu_read() to protect
+against this race.
+
+Cc: <stable@vger.kernel.org>
+Fixes: cb84d11e1625 ("arm64: neon: Remove support for nested or hardirq kernel-mode NEON")
+Acked-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Reviewed-by: Dave Martin <Dave.Martin@arm.com>
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Yandong Zhao <yandong77520@gmail.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/include/asm/simd.h | 19 +++++++------------
+ 1 file changed, 7 insertions(+), 12 deletions(-)
+
+--- a/arch/arm64/include/asm/simd.h
++++ b/arch/arm64/include/asm/simd.h
+@@ -29,20 +29,15 @@ DECLARE_PER_CPU(bool, kernel_neon_busy);
+ static __must_check inline bool may_use_simd(void)
+ {
+ /*
+- * The raw_cpu_read() is racy if called with preemption enabled.
+- * This is not a bug: kernel_neon_busy is only set when
+- * preemption is disabled, so we cannot migrate to another CPU
+- * while it is set, nor can we migrate to a CPU where it is set.
+- * So, if we find it clear on some CPU then we're guaranteed to
+- * find it clear on any CPU we could migrate to.
+- *
+- * If we are in between kernel_neon_begin()...kernel_neon_end(),
+- * the flag will be set, but preemption is also disabled, so we
+- * can't migrate to another CPU and spuriously see it become
+- * false.
++ * kernel_neon_busy is only set while preemption is disabled,
++ * and is clear whenever preemption is enabled. Since
++ * this_cpu_read() is atomic w.r.t. preemption, kernel_neon_busy
++ * cannot change under our feet -- if it's set we cannot be
++ * migrated, and if it's clear we cannot be migrated to a CPU
++ * where it is set.
+ */
+ return !in_irq() && !irqs_disabled() && !in_nmi() &&
+- !raw_cpu_read(kernel_neon_busy);
++ !this_cpu_read(kernel_neon_busy);
+ }
+
+ #else /* ! CONFIG_KERNEL_MODE_NEON */
--- /dev/null
+From 70dbcc2254fa2a9add74a122b9dac954c4736e01 Mon Sep 17 00:00:00 2001
+From: Tony Battersby <tonyb@cybernetics.com>
+Date: Wed, 11 Jul 2018 10:46:03 -0400
+Subject: bsg: fix bogus EINVAL on non-data commands
+
+From: Tony Battersby <tonyb@cybernetics.com>
+
+commit 70dbcc2254fa2a9add74a122b9dac954c4736e01 upstream.
+
+Fix a regression introduced in Linux kernel 4.17 where sending a SCSI
+command that does not transfer data (such as TEST UNIT READY) via
+/dev/bsg/* results in EINVAL.
+
+Fixes: 17cb960f29c2 ("bsg: split handling of SCSI CDBs vs transport requeues")
+Cc: <stable@vger.kernel.org> # 4.17+
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Tony Battersby <tonyb@cybernetics.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ block/bsg.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/block/bsg.c
++++ b/block/bsg.c
+@@ -268,8 +268,6 @@ bsg_map_hdr(struct request_queue *q, str
+ } else if (hdr->din_xfer_len) {
+ ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr->din_xferp),
+ hdr->din_xfer_len, GFP_KERNEL);
+- } else {
+- ret = blk_rq_map_user(q, rq, NULL, NULL, 0, GFP_KERNEL);
+ }
+
+ if (ret)
--- /dev/null
+From abe41184abac487264a4904bfcff2d5500dccce6 Mon Sep 17 00:00:00 2001
+From: Wolfram Sang <wsa+renesas@sang-engineering.com>
+Date: Tue, 10 Jul 2018 23:42:15 +0200
+Subject: i2c: recovery: if possible send STOP with recovery pulses
+
+From: Wolfram Sang <wsa+renesas@sang-engineering.com>
+
+commit abe41184abac487264a4904bfcff2d5500dccce6 upstream.
+
+I2C clients may misunderstand recovery pulses if they can't read SDA to
+bail out early. In the worst case, as a write operation. To avoid that
+and if we can write SDA, try to send STOP to avoid the
+misinterpretation.
+
+Signed-off-by: Wolfram Sang <wsa+renesas@sang-engineering.com>
+Reviewed-by: Peter Rosin <peda@axentia.se>
+Signed-off-by: Wolfram Sang <wsa@the-dreams.de>
+Cc: stable@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/i2c/i2c-core-base.c | 11 ++++++++++-
+ 1 file changed, 10 insertions(+), 1 deletion(-)
+
+--- a/drivers/i2c/i2c-core-base.c
++++ b/drivers/i2c/i2c-core-base.c
+@@ -198,7 +198,16 @@ int i2c_generic_scl_recovery(struct i2c_
+
+ val = !val;
+ bri->set_scl(adap, val);
+- ndelay(RECOVERY_NDELAY);
++
++ /*
++ * If we can set SDA, we will always create STOP here to ensure
++ * the additional pulses will do no harm. This is achieved by
++ * letting SDA follow SCL half a cycle later.
++ */
++ ndelay(RECOVERY_NDELAY / 2);
++ if (bri->set_sda)
++ bri->set_sda(adap, val);
++ ndelay(RECOVERY_NDELAY / 2);
+ }
+
+ /* check if recovery actually succeeded */
--- /dev/null
+From 54836e2d03e76d80aec3399368ffaf5b7caadd1b Mon Sep 17 00:00:00 2001
+From: Jon Hunter <jonathanh@nvidia.com>
+Date: Tue, 3 Jul 2018 09:55:43 +0100
+Subject: i2c: tegra: Fix NACK error handling
+
+From: Jon Hunter <jonathanh@nvidia.com>
+
+commit 54836e2d03e76d80aec3399368ffaf5b7caadd1b upstream.
+
+On Tegra30 Cardhu the PCA9546 I2C mux is not ACK'ing I2C commands on
+resume from suspend (which is caused by the reset signal for the I2C
+mux not being configured correctl). However, this NACK is causing the
+Tegra30 to hang on resuming from suspend which is not expected as we
+detect NACKs and handle them. The hang observed appears to occur when
+resetting the I2C controller to recover from the NACK.
+
+Commit 77821b4678f9 ("i2c: tegra: proper handling of error cases") added
+additional error handling for some error cases including NACK, however,
+it appears that this change conflicts with an early fix by commit
+f70893d08338 ("i2c: tegra: Add delay before resetting the controller
+after NACK"). After commit 77821b4678f9 was made we now disable 'packet
+mode' before the delay from commit f70893d08338 happens. Testing shows
+that moving the delay to before disabling 'packet mode' fixes the hang
+observed on Tegra30. The delay was added to give the I2C controller
+chance to send a stop condition and so it makes sense to move this to
+before we disable packet mode. Please note that packet mode is always
+enabled for Tegra.
+
+Fixes: 77821b4678f9 ("i2c: tegra: proper handling of error cases")
+Signed-off-by: Jon Hunter <jonathanh@nvidia.com>
+Acked-by: Thierry Reding <treding@nvidia.com>
+Signed-off-by: Wolfram Sang <wsa@the-dreams.de>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/i2c/busses/i2c-tegra.c | 17 ++++++++---------
+ 1 file changed, 8 insertions(+), 9 deletions(-)
+
+--- a/drivers/i2c/busses/i2c-tegra.c
++++ b/drivers/i2c/busses/i2c-tegra.c
+@@ -547,6 +547,14 @@ static int tegra_i2c_disable_packet_mode
+ {
+ u32 cnfg;
+
++ /*
++ * NACK interrupt is generated before the I2C controller generates
++ * the STOP condition on the bus. So wait for 2 clock periods
++ * before disabling the controller so that the STOP condition has
++ * been delivered properly.
++ */
++ udelay(DIV_ROUND_UP(2 * 1000000, i2c_dev->bus_clk_rate));
++
+ cnfg = i2c_readl(i2c_dev, I2C_CNFG);
+ if (cnfg & I2C_CNFG_PACKET_MODE_EN)
+ i2c_writel(i2c_dev, cnfg & ~I2C_CNFG_PACKET_MODE_EN, I2C_CNFG);
+@@ -708,15 +716,6 @@ static int tegra_i2c_xfer_msg(struct teg
+ if (likely(i2c_dev->msg_err == I2C_ERR_NONE))
+ return 0;
+
+- /*
+- * NACK interrupt is generated before the I2C controller generates
+- * the STOP condition on the bus. So wait for 2 clock periods
+- * before resetting the controller so that the STOP condition has
+- * been delivered properly.
+- */
+- if (i2c_dev->msg_err == I2C_ERR_NO_ACK)
+- udelay(DIV_ROUND_UP(2 * 1000000, i2c_dev->bus_clk_rate));
+-
+ tegra_i2c_init(i2c_dev);
+ if (i2c_dev->msg_err == I2C_ERR_NO_ACK) {
+ if (msg->flags & I2C_M_IGNORE_NAK)
--- /dev/null
+From b697d7d8c741f27b728a878fc55852b06d0f6f5e Mon Sep 17 00:00:00 2001
+From: "Michael J. Ruhl" <michael.j.ruhl@intel.com>
+Date: Wed, 20 Jun 2018 09:29:08 -0700
+Subject: IB/hfi1: Fix incorrect mixing of ERR_PTR and NULL return values
+
+From: Michael J. Ruhl <michael.j.ruhl@intel.com>
+
+commit b697d7d8c741f27b728a878fc55852b06d0f6f5e upstream.
+
+The __get_txreq() function can return a pointer, ERR_PTR(-EBUSY), or NULL.
+All of the relevant call sites look for IS_ERR, so the NULL return would
+lead to a NULL pointer exception.
+
+Do not use the ERR_PTR mechanism for this function.
+
+Update all call sites to handle the return value correctly.
+
+Clean up error paths to reflect return value.
+
+Fixes: 45842abbb292 ("staging/rdma/hfi1: move txreq header code")
+Cc: <stable@vger.kernel.org> # 4.9.x+
+Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
+Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Reviewed-by: Kamenee Arumugam <kamenee.arumugam@intel.com>
+Signed-off-by: Michael J. Ruhl <michael.j.ruhl@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/hw/hfi1/rc.c | 2 +-
+ drivers/infiniband/hw/hfi1/uc.c | 4 ++--
+ drivers/infiniband/hw/hfi1/ud.c | 4 ++--
+ drivers/infiniband/hw/hfi1/verbs_txreq.c | 4 ++--
+ drivers/infiniband/hw/hfi1/verbs_txreq.h | 4 ++--
+ 5 files changed, 9 insertions(+), 9 deletions(-)
+
+--- a/drivers/infiniband/hw/hfi1/rc.c
++++ b/drivers/infiniband/hw/hfi1/rc.c
+@@ -271,7 +271,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp,
+
+ lockdep_assert_held(&qp->s_lock);
+ ps->s_txreq = get_txreq(ps->dev, qp);
+- if (IS_ERR(ps->s_txreq))
++ if (!ps->s_txreq)
+ goto bail_no_tx;
+
+ if (priv->hdr_type == HFI1_PKT_TYPE_9B) {
+--- a/drivers/infiniband/hw/hfi1/uc.c
++++ b/drivers/infiniband/hw/hfi1/uc.c
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright(c) 2015, 2016 Intel Corporation.
++ * Copyright(c) 2015 - 2018 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+@@ -72,7 +72,7 @@ int hfi1_make_uc_req(struct rvt_qp *qp,
+ int middle = 0;
+
+ ps->s_txreq = get_txreq(ps->dev, qp);
+- if (IS_ERR(ps->s_txreq))
++ if (!ps->s_txreq)
+ goto bail_no_tx;
+
+ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
+--- a/drivers/infiniband/hw/hfi1/ud.c
++++ b/drivers/infiniband/hw/hfi1/ud.c
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright(c) 2015, 2016 Intel Corporation.
++ * Copyright(c) 2015 - 2018 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+@@ -482,7 +482,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp,
+ u32 lid;
+
+ ps->s_txreq = get_txreq(ps->dev, qp);
+- if (IS_ERR(ps->s_txreq))
++ if (!ps->s_txreq)
+ goto bail_no_tx;
+
+ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
+--- a/drivers/infiniband/hw/hfi1/verbs_txreq.c
++++ b/drivers/infiniband/hw/hfi1/verbs_txreq.c
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright(c) 2016 - 2017 Intel Corporation.
++ * Copyright(c) 2016 - 2018 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+@@ -94,7 +94,7 @@ struct verbs_txreq *__get_txreq(struct h
+ struct rvt_qp *qp)
+ __must_hold(&qp->s_lock)
+ {
+- struct verbs_txreq *tx = ERR_PTR(-EBUSY);
++ struct verbs_txreq *tx = NULL;
+
+ write_seqlock(&dev->txwait_lock);
+ if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
+--- a/drivers/infiniband/hw/hfi1/verbs_txreq.h
++++ b/drivers/infiniband/hw/hfi1/verbs_txreq.h
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright(c) 2016 Intel Corporation.
++ * Copyright(c) 2016 - 2018 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+@@ -83,7 +83,7 @@ static inline struct verbs_txreq *get_tx
+ if (unlikely(!tx)) {
+ /* call slow path to get the lock */
+ tx = __get_txreq(dev, qp);
+- if (IS_ERR(tx))
++ if (!tx)
+ return tx;
+ }
+ tx->qp = qp;
--- /dev/null
+From 7b72717a20bba8bdd01b14c0460be7d15061cd6b Mon Sep 17 00:00:00 2001
+From: Steve Wise <swise@opengridcomputing.com>
+Date: Thu, 21 Jun 2018 07:43:21 -0700
+Subject: iw_cxgb4: correctly enforce the max reg_mr depth
+
+From: Steve Wise <swise@opengridcomputing.com>
+
+commit 7b72717a20bba8bdd01b14c0460be7d15061cd6b upstream.
+
+The code was mistakenly using the length of the page array memory instead
+of the depth of the page array.
+
+This would cause MR creation to fail in some cases.
+
+Fixes: 8376b86de7d3 ("iw_cxgb4: Support the new memory registration API")
+Cc: stable@vger.kernel.org
+Signed-off-by: Steve Wise <swise@opengridcomputing.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/hw/cxgb4/mem.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/infiniband/hw/cxgb4/mem.c
++++ b/drivers/infiniband/hw/cxgb4/mem.c
+@@ -774,7 +774,7 @@ static int c4iw_set_page(struct ib_mr *i
+ {
+ struct c4iw_mr *mhp = to_c4iw_mr(ibmr);
+
+- if (unlikely(mhp->mpl_len == mhp->max_mpl_len))
++ if (unlikely(mhp->mpl_len == mhp->attr.pbl_size))
+ return -ENOMEM;
+
+ mhp->mpl[mhp->mpl_len++] = addr;
mm-do-not-bug_on-on-incorrect-length-in-__mm_populate.patch
tracing-reorder-display-of-tgid-to-be-after-pid.patch
kbuild-delete-install_fw_path-from-kbuild-documentation.patch
+acpi-nfit-fix-scrub-idle-detection.patch
+arm64-neon-fix-function-may_use_simd-return-error-status.patch
+tools-build-fix-escaping-in-.cmd-files-for-future-make.patch
+ib-hfi1-fix-incorrect-mixing-of-err_ptr-and-null-return-values.patch
+i2c-tegra-fix-nack-error-handling.patch
+i2c-recovery-if-possible-send-stop-with-recovery-pulses.patch
+iw_cxgb4-correctly-enforce-the-max-reg_mr-depth.patch
+xen-remove-global-bit-from-__default_kernel_pte_mask-for-pv-guests.patch
+xen-setup-pv-irq-ops-vector-earlier.patch
+bsg-fix-bogus-einval-on-non-data-commands.patch
--- /dev/null
+From 9feeb638cde083c737e295c0547f1b4f28e99583 Mon Sep 17 00:00:00 2001
+From: Paul Menzel <pmenzel@molgen.mpg.de>
+Date: Tue, 5 Jun 2018 19:00:22 +0200
+Subject: tools build: fix # escaping in .cmd files for future Make
+
+From: Paul Menzel <pmenzel@molgen.mpg.de>
+
+commit 9feeb638cde083c737e295c0547f1b4f28e99583 upstream.
+
+In 2016 GNU Make made a backwards incompatible change to the way '#'
+characters were handled in Makefiles when used inside functions or
+macros:
+
+http://git.savannah.gnu.org/cgit/make.git/commit/?id=c6966b323811c37acedff05b57
+
+Due to this change, when attempting to run `make prepare' I get a
+spurious make syntax error:
+
+ /home/earnest/linux/tools/objtool/.fixdep.o.cmd:1: *** missing separator. Stop.
+
+When inspecting `.fixdep.o.cmd' it includes two lines which use
+unescaped comment characters at the top:
+
+ \# cannot find fixdep (/home/earnest/linux/tools/objtool//fixdep)
+ \# using basic dep data
+
+This is because `tools/build/Build.include' prints these '\#'
+characters:
+
+ printf '\# cannot find fixdep (%s)\n' $(fixdep) > $(dot-target).cmd; \
+ printf '\# using basic dep data\n\n' >> $(dot-target).cmd; \
+
+This completes commit 9564a8cf422d ("Kbuild: fix # escaping in .cmd files
+for future Make").
+
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=197847
+Cc: Randy Dunlap <rdunlap@infradead.org>
+Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk>
+Cc: stable@vger.kernel.org
+Signed-off-by: Paul Menzel <pmenzel@molgen.mpg.de>
+Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ tools/build/Build.include | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/tools/build/Build.include
++++ b/tools/build/Build.include
+@@ -63,8 +63,8 @@ dep-cmd = $(if $(wildcard $(fixdep)),
+ $(fixdep) $(depfile) $@ '$(make-cmd)' > $(dot-target).tmp; \
+ rm -f $(depfile); \
+ mv -f $(dot-target).tmp $(dot-target).cmd, \
+- printf '\# cannot find fixdep (%s)\n' $(fixdep) > $(dot-target).cmd; \
+- printf '\# using basic dep data\n\n' >> $(dot-target).cmd; \
++ printf '$(pound) cannot find fixdep (%s)\n' $(fixdep) > $(dot-target).cmd; \
++ printf '$(pound) using basic dep data\n\n' >> $(dot-target).cmd; \
+ cat $(depfile) >> $(dot-target).cmd; \
+ printf '\n%s\n' 'cmd_$@ := $(make-cmd)' >> $(dot-target).cmd)
+
--- /dev/null
+From e69b5d308da72cbf4e7911c3979f9a46d28532af Mon Sep 17 00:00:00 2001
+From: Juergen Gross <jgross@suse.com>
+Date: Mon, 2 Jul 2018 12:00:18 +0200
+Subject: xen: remove global bit from __default_kernel_pte_mask for pv guests
+
+From: Juergen Gross <jgross@suse.com>
+
+commit e69b5d308da72cbf4e7911c3979f9a46d28532af upstream.
+
+When removing the global bit from __supported_pte_mask do the same for
+__default_kernel_pte_mask in order to avoid the WARN_ONCE() in
+check_pgprot() when setting a kernel pte before having called
+init_mem_mapping().
+
+Cc: <stable@vger.kernel.org> # 4.17
+Reported-by: Michael Young <m.a.young@durham.ac.uk>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/xen/enlighten_pv.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/x86/xen/enlighten_pv.c
++++ b/arch/x86/xen/enlighten_pv.c
+@@ -1228,6 +1228,7 @@ asmlinkage __visible void __init xen_sta
+
+ /* Prevent unwanted bits from being set in PTEs. */
+ __supported_pte_mask &= ~_PAGE_GLOBAL;
++ __default_kernel_pte_mask &= ~_PAGE_GLOBAL;
+
+ /*
+ * Prevent page tables from being allocated in highmem, even
--- /dev/null
+From 0ce0bba4e5e0eb9b753bb821785de5d23c494392 Mon Sep 17 00:00:00 2001
+From: Juergen Gross <jgross@suse.com>
+Date: Thu, 12 Jul 2018 17:40:34 +0200
+Subject: xen: setup pv irq ops vector earlier
+
+From: Juergen Gross <jgross@suse.com>
+
+commit 0ce0bba4e5e0eb9b753bb821785de5d23c494392 upstream.
+
+Setting pv_irq_ops for Xen PV domains should be done as early as
+possible in order to support e.g. very early printk() usage.
+
+The same applies to xen_vcpu_info_reset(0), as it is needed for the
+pv irq ops.
+
+Move the call of xen_setup_machphys_mapping() after initializing the
+pv functions as it contains a WARN_ON(), too.
+
+Remove the no longer necessary conditional in xen_init_irq_ops()
+from PVH V1 times to make clear this is a PV only function.
+
+Cc: <stable@vger.kernel.org> # 4.14
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/xen/enlighten_pv.c | 24 +++++++++++-------------
+ arch/x86/xen/irq.c | 4 +---
+ 2 files changed, 12 insertions(+), 16 deletions(-)
+
+--- a/arch/x86/xen/enlighten_pv.c
++++ b/arch/x86/xen/enlighten_pv.c
+@@ -1206,12 +1206,20 @@ asmlinkage __visible void __init xen_sta
+
+ xen_setup_features();
+
+- xen_setup_machphys_mapping();
+-
+ /* Install Xen paravirt ops */
+ pv_info = xen_info;
+ pv_init_ops.patch = paravirt_patch_default;
+ pv_cpu_ops = xen_cpu_ops;
++ xen_init_irq_ops();
++
++ /*
++ * Setup xen_vcpu early because it is needed for
++ * local_irq_disable(), irqs_disabled(), e.g. in printk().
++ *
++ * Don't do the full vcpu_info placement stuff until we have
++ * the cpu_possible_mask and a non-dummy shared_info.
++ */
++ xen_vcpu_info_reset(0);
+
+ x86_platform.get_nmi_reason = xen_get_nmi_reason;
+
+@@ -1224,6 +1232,7 @@ asmlinkage __visible void __init xen_sta
+ * Set up some pagetable state before starting to set any ptes.
+ */
+
++ xen_setup_machphys_mapping();
+ xen_init_mmu_ops();
+
+ /* Prevent unwanted bits from being set in PTEs. */
+@@ -1249,20 +1258,9 @@ asmlinkage __visible void __init xen_sta
+ get_cpu_cap(&boot_cpu_data);
+ x86_configure_nx();
+
+- xen_init_irq_ops();
+-
+ /* Let's presume PV guests always boot on vCPU with id 0. */
+ per_cpu(xen_vcpu_id, 0) = 0;
+
+- /*
+- * Setup xen_vcpu early because idt_setup_early_handler needs it for
+- * local_irq_disable(), irqs_disabled().
+- *
+- * Don't do the full vcpu_info placement stuff until we have
+- * the cpu_possible_mask and a non-dummy shared_info.
+- */
+- xen_vcpu_info_reset(0);
+-
+ idt_setup_early_handler();
+
+ xen_init_capabilities();
+--- a/arch/x86/xen/irq.c
++++ b/arch/x86/xen/irq.c
+@@ -128,8 +128,6 @@ static const struct pv_irq_ops xen_irq_o
+
+ void __init xen_init_irq_ops(void)
+ {
+- /* For PVH we use default pv_irq_ops settings. */
+- if (!xen_feature(XENFEAT_hvm_callback_vector))
+- pv_irq_ops = xen_irq_ops;
++ pv_irq_ops = xen_irq_ops;
+ x86_init.irqs.intr_init = xen_init_IRQ;
+ }