--- /dev/null
+From e46daee53bb50bde38805f1823a182979724c229 Mon Sep 17 00:00:00 2001
+From: Kees Cook <keescook@chromium.org>
+Date: Tue, 30 Oct 2018 22:12:56 +0100
+Subject: ARM: 8806/1: kprobes: Fix false positive with FORTIFY_SOURCE
+
+From: Kees Cook <keescook@chromium.org>
+
+commit e46daee53bb50bde38805f1823a182979724c229 upstream.
+
+The arm compiler internally interprets an inline assembly label
+as an unsigned long value, not a pointer. As a result, under
+CONFIG_FORTIFY_SOURCE, the address of a label has a size of 4 bytes,
+which was tripping the runtime checks. Instead, we can just cast the label
+(as done with the size calculations earlier).
+
+Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1639397
+
+Reported-by: William Cohen <wcohen@redhat.com>
+Fixes: 6974f0c4555e ("include/linux/string.h: add the option of fortified string.h functions")
+Cc: stable@vger.kernel.org
+Acked-by: Laura Abbott <labbott@redhat.com>
+Acked-by: Masami Hiramatsu <mhiramat@kernel.org>
+Tested-by: William Cohen <wcohen@redhat.com>
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/probes/kprobes/opt-arm.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm/probes/kprobes/opt-arm.c
++++ b/arch/arm/probes/kprobes/opt-arm.c
+@@ -247,7 +247,7 @@ int arch_prepare_optimized_kprobe(struct
+ }
+
+ /* Copy arch-dep-instance from template. */
+- memcpy(code, &optprobe_template_entry,
++ memcpy(code, (unsigned char *)optprobe_template_entry,
+ TMPL_END_IDX * sizeof(kprobe_opcode_t));
+
+ /* Adjust buffer according to instruction. */
--- /dev/null
+From b4aecf78083d8c6424657c1746c7c3de6e61669f Mon Sep 17 00:00:00 2001
+From: Will Deacon <will.deacon@arm.com>
+Date: Fri, 7 Dec 2018 12:47:10 +0000
+Subject: arm64: hibernate: Avoid sending cross-calling with interrupts disabled
+
+From: Will Deacon <will.deacon@arm.com>
+
+commit b4aecf78083d8c6424657c1746c7c3de6e61669f upstream.
+
+Since commit 3b8c9f1cdfc50 ("arm64: IPI each CPU after invalidating the
+I-cache for kernel mappings"), a call to flush_icache_range() will use
+an IPI to cross-call other online CPUs so that any stale instructions
+are flushed from their pipelines. This triggers a WARN during the
+hibernation resume path, where flush_icache_range() is called with
+interrupts disabled and is therefore prone to deadlock:
+
+ | Disabling non-boot CPUs ...
+ | CPU1: shutdown
+ | psci: CPU1 killed.
+ | CPU2: shutdown
+ | psci: CPU2 killed.
+ | CPU3: shutdown
+ | psci: CPU3 killed.
+ | WARNING: CPU: 0 PID: 1 at ../kernel/smp.c:416 smp_call_function_many+0xd4/0x350
+ | Modules linked in:
+ | CPU: 0 PID: 1 Comm: swapper/0 Not tainted 4.20.0-rc4 #1
+
+Since all secondary CPUs have been taken offline prior to invalidating
+the I-cache, there's actually no need for an IPI and we can simply call
+__flush_icache_range() instead.
+
+Cc: <stable@vger.kernel.org>
+Fixes: 3b8c9f1cdfc50 ("arm64: IPI each CPU after invalidating the I-cache for kernel mappings")
+Reported-by: Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
+Tested-by: Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
+Tested-by: James Morse <james.morse@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/kernel/hibernate.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm64/kernel/hibernate.c
++++ b/arch/arm64/kernel/hibernate.c
+@@ -214,7 +214,7 @@ static int create_safe_exec_page(void *s
+ }
+
+ memcpy((void *)dst, src_start, length);
+- flush_icache_range(dst, dst + length);
++ __flush_icache_range(dst, dst + length);
+
+ pgdp = pgd_offset_raw(allocator(mask), dst_addr);
+ if (pgd_none(READ_ONCE(*pgdp))) {
--- /dev/null
+From 59861547ec9a9736e7882f6fb0c096a720ff811a Mon Sep 17 00:00:00 2001
+From: Bin Liu <b-liu@ti.com>
+Date: Mon, 12 Nov 2018 09:43:22 -0600
+Subject: dmaengine: cppi41: delete channel from pending list when stop channel
+
+From: Bin Liu <b-liu@ti.com>
+
+commit 59861547ec9a9736e7882f6fb0c096a720ff811a upstream.
+
+The driver defines three states for a cppi channel.
+- idle: .chan_busy == 0 && not in .pending list
+- pending: .chan_busy == 0 && in .pending list
+- busy: .chan_busy == 1 && not in .pending list
+
+There are cases in which the cppi channel could be in the pending state
+when cppi41_dma_issue_pending() is called after cppi41_runtime_suspend()
+is called.
+
+cppi41_stop_chan() has a bug for these cases to set channels to idle state.
+It only checks the .chan_busy flag, but not the .pending list, then later
+when cppi41_runtime_resume() is called the channels in .pending list will
+be transitioned to busy state.
+
+Removing channels from the .pending list solves the problem.
+
+Fixes: 975faaeb9985 ("dma: cppi41: start tear down only if channel is busy")
+Cc: stable@vger.kernel.org # v3.15+
+Signed-off-by: Bin Liu <b-liu@ti.com>
+Reviewed-by: Peter Ujfalusi <peter.ujfalusi@ti.com>
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/dma/ti/cppi41.c | 16 +++++++++++++++-
+ 1 file changed, 15 insertions(+), 1 deletion(-)
+
+--- a/drivers/dma/ti/cppi41.c
++++ b/drivers/dma/ti/cppi41.c
+@@ -723,8 +723,22 @@ static int cppi41_stop_chan(struct dma_c
+
+ desc_phys = lower_32_bits(c->desc_phys);
+ desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
+- if (!cdd->chan_busy[desc_num])
++ if (!cdd->chan_busy[desc_num]) {
++ struct cppi41_channel *cc, *_ct;
++
++ /*
++ * channels might still be in the pendling list if
++ * cppi41_dma_issue_pending() is called after
++ * cppi41_runtime_suspend() is called
++ */
++ list_for_each_entry_safe(cc, _ct, &cdd->pending, node) {
++ if (cc != c)
++ continue;
++ list_del(&cc->node);
++ break;
++ }
+ return 0;
++ }
+
+ ret = cppi41_tear_down_chan(c);
+ if (ret)
--- /dev/null
+From ffe843b18211301ad25893eba09f402c19d12304 Mon Sep 17 00:00:00 2001
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Date: Wed, 5 Dec 2018 18:33:59 +0200
+Subject: dmaengine: dw: Fix FIFO size for Intel Merrifield
+
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+
+commit ffe843b18211301ad25893eba09f402c19d12304 upstream.
+
+Intel Merrifield has a reduced size of FIFO used in iDMA 32-bit controller,
+i.e. 512 bytes instead of 1024.
+
+Fix this by partitioning it as 64 bytes per channel.
+
+Note, in the future we might switch to 'fifo-size' property instead of
+hard coded value.
+
+Fixes: 199244d69458 ("dmaengine: dw: add support of iDMA 32-bit hardware")
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/dma/dw/core.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/dma/dw/core.c
++++ b/drivers/dma/dw/core.c
+@@ -1064,12 +1064,12 @@ static void dwc_issue_pending(struct dma
+ /*
+ * Program FIFO size of channels.
+ *
+- * By default full FIFO (1024 bytes) is assigned to channel 0. Here we
++ * By default full FIFO (512 bytes) is assigned to channel 0. Here we
+ * slice FIFO on equal parts between channels.
+ */
+ static void idma32_fifo_partition(struct dw_dma *dw)
+ {
+- u64 value = IDMA32C_FP_PSIZE_CH0(128) | IDMA32C_FP_PSIZE_CH1(128) |
++ u64 value = IDMA32C_FP_PSIZE_CH0(64) | IDMA32C_FP_PSIZE_CH1(64) |
+ IDMA32C_FP_UPDATE;
+ u64 fifo_partition = 0;
+
+@@ -1082,7 +1082,7 @@ static void idma32_fifo_partition(struct
+ /* Fill FIFO_PARTITION high bits (Channels 2..3, 6..7) */
+ fifo_partition |= value << 32;
+
+- /* Program FIFO Partition registers - 128 bytes for each channel */
++ /* Program FIFO Partition registers - 64 bytes per channel */
+ idma32_writeq(dw, FIFO_PARTITION1, fifo_partition);
+ idma32_writeq(dw, FIFO_PARTITION0, fifo_partition);
+ }
--- /dev/null
+From b8603d2a5795c42f78998e70dc792336e0dc20c9 Mon Sep 17 00:00:00 2001
+From: Lucas Stach <l.stach@pengutronix.de>
+Date: Tue, 6 Nov 2018 03:40:33 +0000
+Subject: dmaengine: imx-sdma: implement channel termination via worker
+
+From: Lucas Stach <l.stach@pengutronix.de>
+
+commit b8603d2a5795c42f78998e70dc792336e0dc20c9 upstream.
+
+The dmaengine documentation states that device_terminate_all may be
+asynchronous and need not wait for the active transfers to stop.
+
+This allows us to move most of the functionality currently implemented
+in the sdma channel termination function to run in a worker, outside
+of any atomic context. Moving this out of atomic context has two
+benefits: we can now sleep while waiting for the channel to terminate,
+instead of busy waiting and the freeing of the dma descriptors happens
+with IRQs enabled, getting rid of a warning in the dma mapping code.
+
+As the termination is now async, we need to implement the
+device_synchronize dma engine function which simply waits for the
+worker to finish its execution.
+
+Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
+Signed-off-by: Robin Gong <yibin.gong@nxp.com>
+Cc: stable <stable@vger.kernel.org>
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/dma/imx-sdma.c | 51 ++++++++++++++++++++++++++++++++++++-------------
+ 1 file changed, 38 insertions(+), 13 deletions(-)
+
+--- a/drivers/dma/imx-sdma.c
++++ b/drivers/dma/imx-sdma.c
+@@ -32,6 +32,7 @@
+ #include <linux/of_address.h>
+ #include <linux/of_device.h>
+ #include <linux/of_dma.h>
++#include <linux/workqueue.h>
+
+ #include <asm/irq.h>
+ #include <linux/platform_data/dma-imx-sdma.h>
+@@ -375,6 +376,7 @@ struct sdma_channel {
+ u32 shp_addr, per_addr;
+ enum dma_status status;
+ struct imx_dma_data data;
++ struct work_struct terminate_worker;
+ };
+
+ #define IMX_DMA_SG_LOOP BIT(0)
+@@ -1025,31 +1027,49 @@ static int sdma_disable_channel(struct d
+
+ return 0;
+ }
+-
+-static int sdma_disable_channel_with_delay(struct dma_chan *chan)
++static void sdma_channel_terminate_work(struct work_struct *work)
+ {
+- struct sdma_channel *sdmac = to_sdma_chan(chan);
++ struct sdma_channel *sdmac = container_of(work, struct sdma_channel,
++ terminate_worker);
+ unsigned long flags;
+ LIST_HEAD(head);
+
+- sdma_disable_channel(chan);
+- spin_lock_irqsave(&sdmac->vc.lock, flags);
+- vchan_get_all_descriptors(&sdmac->vc, &head);
+- sdmac->desc = NULL;
+- spin_unlock_irqrestore(&sdmac->vc.lock, flags);
+- vchan_dma_desc_free_list(&sdmac->vc, &head);
+-
+ /*
+ * According to NXP R&D team a delay of one BD SDMA cost time
+ * (maximum is 1ms) should be added after disable of the channel
+ * bit, to ensure SDMA core has really been stopped after SDMA
+ * clients call .device_terminate_all.
+ */
+- mdelay(1);
++ usleep_range(1000, 2000);
++
++ spin_lock_irqsave(&sdmac->vc.lock, flags);
++ vchan_get_all_descriptors(&sdmac->vc, &head);
++ sdmac->desc = NULL;
++ spin_unlock_irqrestore(&sdmac->vc.lock, flags);
++ vchan_dma_desc_free_list(&sdmac->vc, &head);
++}
++
++static int sdma_disable_channel_async(struct dma_chan *chan)
++{
++ struct sdma_channel *sdmac = to_sdma_chan(chan);
++
++ sdma_disable_channel(chan);
++
++ if (sdmac->desc)
++ schedule_work(&sdmac->terminate_worker);
+
+ return 0;
+ }
+
++static void sdma_channel_synchronize(struct dma_chan *chan)
++{
++ struct sdma_channel *sdmac = to_sdma_chan(chan);
++
++ vchan_synchronize(&sdmac->vc);
++
++ flush_work(&sdmac->terminate_worker);
++}
++
+ static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac)
+ {
+ struct sdma_engine *sdma = sdmac->sdma;
+@@ -1287,7 +1307,9 @@ static void sdma_free_chan_resources(str
+ struct sdma_channel *sdmac = to_sdma_chan(chan);
+ struct sdma_engine *sdma = sdmac->sdma;
+
+- sdma_disable_channel_with_delay(chan);
++ sdma_disable_channel_async(chan);
++
++ sdma_channel_synchronize(chan);
+
+ if (sdmac->event_id0)
+ sdma_event_disable(sdmac, sdmac->event_id0);
+@@ -1993,6 +2015,8 @@ static int sdma_probe(struct platform_de
+
+ sdmac->channel = i;
+ sdmac->vc.desc_free = sdma_desc_free;
++ INIT_WORK(&sdmac->terminate_worker,
++ sdma_channel_terminate_work);
+ /*
+ * Add the channel to the DMAC list. Do not add channel 0 though
+ * because we need it internally in the SDMA driver. This also means
+@@ -2044,7 +2068,8 @@ static int sdma_probe(struct platform_de
+ sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
+ sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
+ sdma->dma_device.device_config = sdma_config;
+- sdma->dma_device.device_terminate_all = sdma_disable_channel_with_delay;
++ sdma->dma_device.device_terminate_all = sdma_disable_channel_async;
++ sdma->dma_device.device_synchronize = sdma_channel_synchronize;
+ sdma->dma_device.src_addr_widths = SDMA_DMA_BUSWIDTHS;
+ sdma->dma_device.dst_addr_widths = SDMA_DMA_BUSWIDTHS;
+ sdma->dma_device.directions = SDMA_DMA_DIRECTIONS;
--- /dev/null
+From 64068853bc77786d1a28abb4087d6a3e93aedbe2 Mon Sep 17 00:00:00 2001
+From: Lucas Stach <l.stach@pengutronix.de>
+Date: Tue, 6 Nov 2018 03:40:37 +0000
+Subject: dmaengine: imx-sdma: use GFP_NOWAIT for dma descriptor allocations
+
+From: Lucas Stach <l.stach@pengutronix.de>
+
+commit 64068853bc77786d1a28abb4087d6a3e93aedbe2 upstream.
+
+DMA buffer descriptors aren't allocated from atomic context, so they
+can use the less heavyweigth GFP_NOWAIT.
+
+Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
+Signed-off-by: Robin Gong <yibin.gong@nxp.com>
+Cc: stable <stable@vger.kernel.org>
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/dma/imx-sdma.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/dma/imx-sdma.c
++++ b/drivers/dma/imx-sdma.c
+@@ -1214,7 +1214,7 @@ static int sdma_alloc_bd(struct sdma_des
+ int ret = 0;
+
+ desc->bd = dma_zalloc_coherent(NULL, bd_size, &desc->bd_phys,
+- GFP_ATOMIC);
++ GFP_NOWAIT);
+ if (!desc->bd) {
+ ret = -ENOMEM;
+ goto out;
--- /dev/null
+From ebb853b1bd5f659b92c71dc6a9de44cfc37c78c0 Mon Sep 17 00:00:00 2001
+From: Lucas Stach <l.stach@pengutronix.de>
+Date: Tue, 6 Nov 2018 03:40:28 +0000
+Subject: Revert "dmaengine: imx-sdma: alloclate bd memory from dma pool"
+
+From: Lucas Stach <l.stach@pengutronix.de>
+
+commit ebb853b1bd5f659b92c71dc6a9de44cfc37c78c0 upstream.
+
+This reverts commit fe5b85c656bc. The SDMA engine needs the descriptors to
+be contiguous in memory. As the dma pool API is only able to provide a
+single descriptor per alloc invocation there is no guarantee that multiple
+descriptors satisfy this requirement. Also the code in question is broken
+as it only allocates memory for a single descriptor, without looking at the
+number of descriptors required for the transfer, leading to out-of-bounds
+accesses when the descriptors are written.
+
+Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
+Signed-off-by: Robin Gong <yibin.gong@nxp.com>
+Cc: stable <stable@vger.kernel.org>
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/dma/imx-sdma.c | 18 ++++++------------
+ 1 file changed, 6 insertions(+), 12 deletions(-)
+
+--- a/drivers/dma/imx-sdma.c
++++ b/drivers/dma/imx-sdma.c
+@@ -24,7 +24,6 @@
+ #include <linux/spinlock.h>
+ #include <linux/device.h>
+ #include <linux/dma-mapping.h>
+-#include <linux/dmapool.h>
+ #include <linux/firmware.h>
+ #include <linux/slab.h>
+ #include <linux/platform_device.h>
+@@ -376,7 +375,6 @@ struct sdma_channel {
+ u32 shp_addr, per_addr;
+ enum dma_status status;
+ struct imx_dma_data data;
+- struct dma_pool *bd_pool;
+ };
+
+ #define IMX_DMA_SG_LOOP BIT(0)
+@@ -1192,10 +1190,11 @@ out:
+
+ static int sdma_alloc_bd(struct sdma_desc *desc)
+ {
++ u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
+ int ret = 0;
+
+- desc->bd = dma_pool_alloc(desc->sdmac->bd_pool, GFP_ATOMIC,
+- &desc->bd_phys);
++ desc->bd = dma_zalloc_coherent(NULL, bd_size, &desc->bd_phys,
++ GFP_ATOMIC);
+ if (!desc->bd) {
+ ret = -ENOMEM;
+ goto out;
+@@ -1206,7 +1205,9 @@ out:
+
+ static void sdma_free_bd(struct sdma_desc *desc)
+ {
+- dma_pool_free(desc->sdmac->bd_pool, desc->bd, desc->bd_phys);
++ u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
++
++ dma_free_coherent(NULL, bd_size, desc->bd, desc->bd_phys);
+ }
+
+ static void sdma_desc_free(struct virt_dma_desc *vd)
+@@ -1272,10 +1273,6 @@ static int sdma_alloc_chan_resources(str
+ if (ret)
+ goto disable_clk_ahb;
+
+- sdmac->bd_pool = dma_pool_create("bd_pool", chan->device->dev,
+- sizeof(struct sdma_buffer_descriptor),
+- 32, 0);
+-
+ return 0;
+
+ disable_clk_ahb:
+@@ -1304,9 +1301,6 @@ static void sdma_free_chan_resources(str
+
+ clk_disable(sdma->clk_ipg);
+ clk_disable(sdma->clk_ahb);
+-
+- dma_pool_destroy(sdmac->bd_pool);
+- sdmac->bd_pool = NULL;
+ }
+
+ static struct sdma_desc *sdma_transfer_init(struct sdma_channel *sdmac,
--- /dev/null
+From c06abca69218ac42fa58d1ba7a7b0d9bab5f1b18 Mon Sep 17 00:00:00 2001
+From: Lucas Stach <l.stach@pengutronix.de>
+Date: Tue, 6 Nov 2018 03:40:25 +0000
+Subject: Revert "dmaengine: imx-sdma: Use GFP_NOWAIT for dma allocations"
+
+From: Lucas Stach <l.stach@pengutronix.de>
+
+commit c06abca69218ac42fa58d1ba7a7b0d9bab5f1b18 upstream.
+
+This reverts commit c1199875d327, as this depends on another commit
+that is going to be reverted.
+
+Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
+Signed-off-by: Robin Gong <yibin.gong@nxp.com>
+Cc: stable <stable@vger.kernel.org>
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/dma/imx-sdma.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/dma/imx-sdma.c
++++ b/drivers/dma/imx-sdma.c
+@@ -1194,8 +1194,8 @@ static int sdma_alloc_bd(struct sdma_des
+ {
+ int ret = 0;
+
+- desc->bd = dma_pool_alloc(desc->sdmac->bd_pool, GFP_NOWAIT,
+- &desc->bd_phys);
++ desc->bd = dma_pool_alloc(desc->sdmac->bd_pool, GFP_ATOMIC,
++ &desc->bd_phys);
+ if (!desc->bd) {
+ ret = -ENOMEM;
+ goto out;
media-dvb-pll-don-t-re-validate-tuner-frequencies.patch
revert-mfd-cros_ec-use-devm_kzalloc-for-private-data.patch
parisc-enable-ffunction-sections-for-modules-on-32-bit-kernel.patch
+virtio-s390-avoid-race-on-vcdev-config.patch
+virtio-s390-fix-race-in-ccw_io_helper.patch
+vhost-vsock-fix-use-after-free-in-network-stack-callers.patch
+arm64-hibernate-avoid-sending-cross-calling-with-interrupts-disabled.patch
+sunrpc-fix-leak-of-krb5p-encode-pages.patch
+dmaengine-dw-fix-fifo-size-for-intel-merrifield.patch
+revert-dmaengine-imx-sdma-use-gfp_nowait-for-dma-allocations.patch
+revert-dmaengine-imx-sdma-alloclate-bd-memory-from-dma-pool.patch
+dmaengine-imx-sdma-implement-channel-termination-via-worker.patch
+dmaengine-imx-sdma-use-gfp_nowait-for-dma-descriptor-allocations.patch
+dmaengine-cppi41-delete-channel-from-pending-list-when-stop-channel.patch
+arm-8806-1-kprobes-fix-false-positive-with-fortify_source.patch
+xhci-workaround-css-timeout-on-amd-snps-3.0-xhc.patch
+xhci-prevent-u1-u2-link-pm-states-if-exit-latency-is-too-long.patch
--- /dev/null
+From 8dae5398ab1ac107b1517e8195ed043d5f422bd0 Mon Sep 17 00:00:00 2001
+From: Chuck Lever <chuck.lever@oracle.com>
+Date: Fri, 30 Nov 2018 15:39:57 -0500
+Subject: SUNRPC: Fix leak of krb5p encode pages
+
+From: Chuck Lever <chuck.lever@oracle.com>
+
+commit 8dae5398ab1ac107b1517e8195ed043d5f422bd0 upstream.
+
+call_encode can be invoked more than once per RPC call. Ensure that
+each call to gss_wrap_req_priv does not overwrite pointers to
+previously allocated memory.
+
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Cc: stable@kernel.org
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/sunrpc/auth_gss/auth_gss.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/net/sunrpc/auth_gss/auth_gss.c
++++ b/net/sunrpc/auth_gss/auth_gss.c
+@@ -1768,6 +1768,7 @@ priv_release_snd_buf(struct rpc_rqst *rq
+ for (i=0; i < rqstp->rq_enc_pages_num; i++)
+ __free_page(rqstp->rq_enc_pages[i]);
+ kfree(rqstp->rq_enc_pages);
++ rqstp->rq_release_snd_buf = NULL;
+ }
+
+ static int
+@@ -1776,6 +1777,9 @@ alloc_enc_pages(struct rpc_rqst *rqstp)
+ struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
+ int first, last, i;
+
++ if (rqstp->rq_release_snd_buf)
++ rqstp->rq_release_snd_buf(rqstp);
++
+ if (snd_buf->page_len == 0) {
+ rqstp->rq_enc_pages_num = 0;
+ return 0;
--- /dev/null
+From 834e772c8db0c6a275d75315d90aba4ebbb1e249 Mon Sep 17 00:00:00 2001
+From: Stefan Hajnoczi <stefanha@redhat.com>
+Date: Mon, 5 Nov 2018 10:35:47 +0000
+Subject: vhost/vsock: fix use-after-free in network stack callers
+
+From: Stefan Hajnoczi <stefanha@redhat.com>
+
+commit 834e772c8db0c6a275d75315d90aba4ebbb1e249 upstream.
+
+If the network stack calls .send_pkt()/.cancel_pkt() during .release(),
+a struct vhost_vsock use-after-free is possible. This occurs because
+.release() does not wait for other CPUs to stop using struct
+vhost_vsock.
+
+Switch to an RCU-enabled hashtable (indexed by guest CID) so that
+.release() can wait for other CPUs by calling synchronize_rcu(). This
+also eliminates vhost_vsock_lock acquisition in the data path so it
+could have a positive effect on performance.
+
+This is CVE-2018-14625 "kernel: use-after-free Read in vhost_transport_send_pkt".
+
+Cc: stable@vger.kernel.org
+Reported-and-tested-by: syzbot+bd391451452fb0b93039@syzkaller.appspotmail.com
+Reported-by: syzbot+e3e074963495f92a89ed@syzkaller.appspotmail.com
+Reported-by: syzbot+d5a0a170c5069658b141@syzkaller.appspotmail.com
+Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Acked-by: Jason Wang <jasowang@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/vhost/vsock.c | 57 ++++++++++++++++++++++++++++----------------------
+ 1 file changed, 33 insertions(+), 24 deletions(-)
+
+--- a/drivers/vhost/vsock.c
++++ b/drivers/vhost/vsock.c
+@@ -15,6 +15,7 @@
+ #include <net/sock.h>
+ #include <linux/virtio_vsock.h>
+ #include <linux/vhost.h>
++#include <linux/hashtable.h>
+
+ #include <net/af_vsock.h>
+ #include "vhost.h"
+@@ -27,14 +28,14 @@ enum {
+
+ /* Used to track all the vhost_vsock instances on the system. */
+ static DEFINE_SPINLOCK(vhost_vsock_lock);
+-static LIST_HEAD(vhost_vsock_list);
++static DEFINE_READ_MOSTLY_HASHTABLE(vhost_vsock_hash, 8);
+
+ struct vhost_vsock {
+ struct vhost_dev dev;
+ struct vhost_virtqueue vqs[2];
+
+- /* Link to global vhost_vsock_list, protected by vhost_vsock_lock */
+- struct list_head list;
++ /* Link to global vhost_vsock_hash, writes use vhost_vsock_lock */
++ struct hlist_node hash;
+
+ struct vhost_work send_pkt_work;
+ spinlock_t send_pkt_list_lock;
+@@ -50,11 +51,14 @@ static u32 vhost_transport_get_local_cid
+ return VHOST_VSOCK_DEFAULT_HOST_CID;
+ }
+
+-static struct vhost_vsock *__vhost_vsock_get(u32 guest_cid)
++/* Callers that dereference the return value must hold vhost_vsock_lock or the
++ * RCU read lock.
++ */
++static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
+ {
+ struct vhost_vsock *vsock;
+
+- list_for_each_entry(vsock, &vhost_vsock_list, list) {
++ hash_for_each_possible_rcu(vhost_vsock_hash, vsock, hash, guest_cid) {
+ u32 other_cid = vsock->guest_cid;
+
+ /* Skip instances that have no CID yet */
+@@ -69,17 +73,6 @@ static struct vhost_vsock *__vhost_vsock
+ return NULL;
+ }
+
+-static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
+-{
+- struct vhost_vsock *vsock;
+-
+- spin_lock_bh(&vhost_vsock_lock);
+- vsock = __vhost_vsock_get(guest_cid);
+- spin_unlock_bh(&vhost_vsock_lock);
+-
+- return vsock;
+-}
+-
+ static void
+ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
+ struct vhost_virtqueue *vq)
+@@ -210,9 +203,12 @@ vhost_transport_send_pkt(struct virtio_v
+ struct vhost_vsock *vsock;
+ int len = pkt->len;
+
++ rcu_read_lock();
++
+ /* Find the vhost_vsock according to guest context id */
+ vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid));
+ if (!vsock) {
++ rcu_read_unlock();
+ virtio_transport_free_pkt(pkt);
+ return -ENODEV;
+ }
+@@ -225,6 +221,8 @@ vhost_transport_send_pkt(struct virtio_v
+ spin_unlock_bh(&vsock->send_pkt_list_lock);
+
+ vhost_work_queue(&vsock->dev, &vsock->send_pkt_work);
++
++ rcu_read_unlock();
+ return len;
+ }
+
+@@ -234,12 +232,15 @@ vhost_transport_cancel_pkt(struct vsock_
+ struct vhost_vsock *vsock;
+ struct virtio_vsock_pkt *pkt, *n;
+ int cnt = 0;
++ int ret = -ENODEV;
+ LIST_HEAD(freeme);
+
++ rcu_read_lock();
++
+ /* Find the vhost_vsock according to guest context id */
+ vsock = vhost_vsock_get(vsk->remote_addr.svm_cid);
+ if (!vsock)
+- return -ENODEV;
++ goto out;
+
+ spin_lock_bh(&vsock->send_pkt_list_lock);
+ list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
+@@ -265,7 +266,10 @@ vhost_transport_cancel_pkt(struct vsock_
+ vhost_poll_queue(&tx_vq->poll);
+ }
+
+- return 0;
++ ret = 0;
++out:
++ rcu_read_unlock();
++ return ret;
+ }
+
+ static struct virtio_vsock_pkt *
+@@ -533,10 +537,6 @@ static int vhost_vsock_dev_open(struct i
+ spin_lock_init(&vsock->send_pkt_list_lock);
+ INIT_LIST_HEAD(&vsock->send_pkt_list);
+ vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work);
+-
+- spin_lock_bh(&vhost_vsock_lock);
+- list_add_tail(&vsock->list, &vhost_vsock_list);
+- spin_unlock_bh(&vhost_vsock_lock);
+ return 0;
+
+ out:
+@@ -577,9 +577,13 @@ static int vhost_vsock_dev_release(struc
+ struct vhost_vsock *vsock = file->private_data;
+
+ spin_lock_bh(&vhost_vsock_lock);
+- list_del(&vsock->list);
++ if (vsock->guest_cid)
++ hash_del_rcu(&vsock->hash);
+ spin_unlock_bh(&vhost_vsock_lock);
+
++ /* Wait for other CPUs to finish using vsock */
++ synchronize_rcu();
++
+ /* Iterating over all connections for all CIDs to find orphans is
+ * inefficient. Room for improvement here. */
+ vsock_for_each_connected_socket(vhost_vsock_reset_orphans);
+@@ -620,12 +624,17 @@ static int vhost_vsock_set_cid(struct vh
+
+ /* Refuse if CID is already in use */
+ spin_lock_bh(&vhost_vsock_lock);
+- other = __vhost_vsock_get(guest_cid);
++ other = vhost_vsock_get(guest_cid);
+ if (other && other != vsock) {
+ spin_unlock_bh(&vhost_vsock_lock);
+ return -EADDRINUSE;
+ }
++
++ if (vsock->guest_cid)
++ hash_del_rcu(&vsock->hash);
++
+ vsock->guest_cid = guest_cid;
++ hash_add_rcu(vhost_vsock_hash, &vsock->hash, guest_cid);
+ spin_unlock_bh(&vhost_vsock_lock);
+
+ return 0;
--- /dev/null
+From 2448a299ec416a80f699940a86f4a6d9a4f643b1 Mon Sep 17 00:00:00 2001
+From: Halil Pasic <pasic@linux.ibm.com>
+Date: Wed, 26 Sep 2018 18:48:29 +0200
+Subject: virtio/s390: avoid race on vcdev->config
+
+From: Halil Pasic <pasic@linux.ibm.com>
+
+commit 2448a299ec416a80f699940a86f4a6d9a4f643b1 upstream.
+
+Currently we have a race on vcdev->config in virtio_ccw_get_config() and
+in virtio_ccw_set_config().
+
+This normally does not cause problems, as these are usually infrequent
+operations. However, for some devices writing to/reading from the config
+space can be triggered through sysfs attributes. For these, userspace can
+force the race by increasing the frequency.
+
+Signed-off-by: Halil Pasic <pasic@linux.ibm.com>
+Cc: stable@vger.kernel.org
+Message-Id: <20180925121309.58524-2-pasic@linux.ibm.com>
+Signed-off-by: Cornelia Huck <cohuck@redhat.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/s390/virtio/virtio_ccw.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+--- a/drivers/s390/virtio/virtio_ccw.c
++++ b/drivers/s390/virtio/virtio_ccw.c
+@@ -828,6 +828,7 @@ static void virtio_ccw_get_config(struct
+ int ret;
+ struct ccw1 *ccw;
+ void *config_area;
++ unsigned long flags;
+
+ ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ if (!ccw)
+@@ -846,11 +847,13 @@ static void virtio_ccw_get_config(struct
+ if (ret)
+ goto out_free;
+
++ spin_lock_irqsave(&vcdev->lock, flags);
+ memcpy(vcdev->config, config_area, offset + len);
+- if (buf)
+- memcpy(buf, &vcdev->config[offset], len);
+ if (vcdev->config_ready < offset + len)
+ vcdev->config_ready = offset + len;
++ spin_unlock_irqrestore(&vcdev->lock, flags);
++ if (buf)
++ memcpy(buf, config_area + offset, len);
+
+ out_free:
+ kfree(config_area);
+@@ -864,6 +867,7 @@ static void virtio_ccw_set_config(struct
+ struct virtio_ccw_device *vcdev = to_vc_device(vdev);
+ struct ccw1 *ccw;
+ void *config_area;
++ unsigned long flags;
+
+ ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ if (!ccw)
+@@ -876,9 +880,11 @@ static void virtio_ccw_set_config(struct
+ /* Make sure we don't overwrite fields. */
+ if (vcdev->config_ready < offset)
+ virtio_ccw_get_config(vdev, 0, NULL, offset);
++ spin_lock_irqsave(&vcdev->lock, flags);
+ memcpy(&vcdev->config[offset], buf, len);
+ /* Write the config area to the host. */
+ memcpy(config_area, vcdev->config, sizeof(vcdev->config));
++ spin_unlock_irqrestore(&vcdev->lock, flags);
+ ccw->cmd_code = CCW_CMD_WRITE_CONF;
+ ccw->flags = 0;
+ ccw->count = offset + len;
--- /dev/null
+From 78b1a52e05c9db11d293342e8d6d8a230a04b4e7 Mon Sep 17 00:00:00 2001
+From: Halil Pasic <pasic@linux.ibm.com>
+Date: Wed, 26 Sep 2018 18:48:30 +0200
+Subject: virtio/s390: fix race in ccw_io_helper()
+
+From: Halil Pasic <pasic@linux.ibm.com>
+
+commit 78b1a52e05c9db11d293342e8d6d8a230a04b4e7 upstream.
+
+While ccw_io_helper() seems like intended to be exclusive in a sense that
+it is supposed to facilitate I/O for at most one thread at any given
+time, there is actually nothing ensuring that threads won't pile up at
+vcdev->wait_q. If they do, all threads get woken up and see the status
+that belongs to some other request than their own. This can lead to bugs.
+For an example see:
+https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1788432
+
+This race normally does not cause any problems. The operations provided
+by struct virtio_config_ops are usually invoked in a well defined
+sequence, normally don't fail, and are normally used quite infrequent
+too.
+
+Yet, if some of the these operations are directly triggered via sysfs
+attributes, like in the case described by the referenced bug, userspace
+is given an opportunity to force races by increasing the frequency of the
+given operations.
+
+Let us fix the problem by ensuring, that for each device, we finish
+processing the previous request before starting with a new one.
+
+Signed-off-by: Halil Pasic <pasic@linux.ibm.com>
+Reported-by: Colin Ian King <colin.king@canonical.com>
+Cc: stable@vger.kernel.org
+Message-Id: <20180925121309.58524-3-pasic@linux.ibm.com>
+Signed-off-by: Cornelia Huck <cohuck@redhat.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/s390/virtio/virtio_ccw.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/drivers/s390/virtio/virtio_ccw.c
++++ b/drivers/s390/virtio/virtio_ccw.c
+@@ -56,6 +56,7 @@ struct virtio_ccw_device {
+ unsigned int revision; /* Transport revision */
+ wait_queue_head_t wait_q;
+ spinlock_t lock;
++ struct mutex io_lock; /* Serializes I/O requests */
+ struct list_head virtqueues;
+ unsigned long indicators;
+ unsigned long indicators2;
+@@ -296,6 +297,7 @@ static int ccw_io_helper(struct virtio_c
+ unsigned long flags;
+ int flag = intparm & VIRTIO_CCW_INTPARM_MASK;
+
++ mutex_lock(&vcdev->io_lock);
+ do {
+ spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags);
+ ret = ccw_device_start(vcdev->cdev, ccw, intparm, 0, 0);
+@@ -308,7 +310,9 @@ static int ccw_io_helper(struct virtio_c
+ cpu_relax();
+ } while (ret == -EBUSY);
+ wait_event(vcdev->wait_q, doing_io(vcdev, flag) == 0);
+- return ret ? ret : vcdev->err;
++ ret = ret ? ret : vcdev->err;
++ mutex_unlock(&vcdev->io_lock);
++ return ret;
+ }
+
+ static void virtio_ccw_drop_indicator(struct virtio_ccw_device *vcdev,
+@@ -1253,6 +1257,7 @@ static int virtio_ccw_online(struct ccw_
+ init_waitqueue_head(&vcdev->wait_q);
+ INIT_LIST_HEAD(&vcdev->virtqueues);
+ spin_lock_init(&vcdev->lock);
++ mutex_init(&vcdev->io_lock);
+
+ spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+ dev_set_drvdata(&cdev->dev, vcdev);
--- /dev/null
+From 0472bf06c6fd33c1a18aaead4c8f91e5a03d8d7b Mon Sep 17 00:00:00 2001
+From: Mathias Nyman <mathias.nyman@linux.intel.com>
+Date: Wed, 5 Dec 2018 14:22:39 +0200
+Subject: xhci: Prevent U1/U2 link pm states if exit latency is too long
+
+From: Mathias Nyman <mathias.nyman@linux.intel.com>
+
+commit 0472bf06c6fd33c1a18aaead4c8f91e5a03d8d7b upstream.
+
+Don't allow USB3 U1 or U2 if the latency to wake up from the U-state
+reaches the service interval for a periodic endpoint.
+
+This is according to xhci 1.1 specification section 4.23.5.2 extra note:
+
+"Software shall ensure that a device is prevented from entering a U-state
+ where its worst case exit latency approaches the ESIT."
+
+Allowing too long exit latencies for periodic endpoint confuses xHC
+internal scheduling, and new devices may fail to enumerate with a
+"Not enough bandwidth for new device state" error from the host.
+
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Mathias Nyman <mathias.nyman@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/usb/host/xhci.c | 16 ++++++++++++++++
+ 1 file changed, 16 insertions(+)
+
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -4514,6 +4514,14 @@ static u16 xhci_calculate_u1_timeout(str
+ {
+ unsigned long long timeout_ns;
+
++ /* Prevent U1 if service interval is shorter than U1 exit latency */
++ if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
++ if (xhci_service_interval_to_ns(desc) <= udev->u1_params.mel) {
++ dev_dbg(&udev->dev, "Disable U1, ESIT shorter than exit latency\n");
++ return USB3_LPM_DISABLED;
++ }
++ }
++
+ if (xhci->quirks & XHCI_INTEL_HOST)
+ timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
+ else
+@@ -4570,6 +4578,14 @@ static u16 xhci_calculate_u2_timeout(str
+ {
+ unsigned long long timeout_ns;
+
++ /* Prevent U2 if service interval is shorter than U2 exit latency */
++ if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
++ if (xhci_service_interval_to_ns(desc) <= udev->u2_params.mel) {
++ dev_dbg(&udev->dev, "Disable U2, ESIT shorter than exit latency\n");
++ return USB3_LPM_DISABLED;
++ }
++ }
++
+ if (xhci->quirks & XHCI_INTEL_HOST)
+ timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
+ else
--- /dev/null
+From a7d57abcc8a5bdeb53bbf8e87558e8e0a2c2a29d Mon Sep 17 00:00:00 2001
+From: Sandeep Singh <sandeep.singh@amd.com>
+Date: Wed, 5 Dec 2018 14:22:38 +0200
+Subject: xhci: workaround CSS timeout on AMD SNPS 3.0 xHC
+
+From: Sandeep Singh <sandeep.singh@amd.com>
+
+commit a7d57abcc8a5bdeb53bbf8e87558e8e0a2c2a29d upstream.
+
+Occasionally AMD SNPS 3.0 xHC does not respond to
+CSS when set, also it does not flag anything on SRE and HCE
+to point the internal xHC errors on USBSTS register. This stalls
+the entire system wide suspend and there is no point in stalling
+just because of xHC CSS is not responding.
+
+To work around this problem, if the xHC does not flag
+anything on SRE and HCE, we can skip the CSS
+timeout and allow the system to continue the suspend. Once the
+system resume happens we can internally reset the controller
+using XHCI_RESET_ON_RESUME quirk
+
+Signed-off-by: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+Signed-off-by: Sandeep Singh <Sandeep.Singh@amd.com>
+cc: Nehal Shah <Nehal-bakulchandra.Shah@amd.com>
+Cc: <stable@vger.kernel.org>
+Tested-by: Kai-Heng Feng <kai.heng.feng@canonical.com>
+Signed-off-by: Mathias Nyman <mathias.nyman@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/usb/host/xhci-pci.c | 4 ++++
+ drivers/usb/host/xhci.c | 26 ++++++++++++++++++++++----
+ drivers/usb/host/xhci.h | 3 +++
+ 3 files changed, 29 insertions(+), 4 deletions(-)
+
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -132,6 +132,10 @@ static void xhci_pci_quirks(struct devic
+ pdev->device == 0x43bb))
+ xhci->quirks |= XHCI_SUSPEND_DELAY;
+
++ if (pdev->vendor == PCI_VENDOR_ID_AMD &&
++ (pdev->device == 0x15e0 || pdev->device == 0x15e1))
++ xhci->quirks |= XHCI_SNPS_BROKEN_SUSPEND;
++
+ if (pdev->vendor == PCI_VENDOR_ID_AMD)
+ xhci->quirks |= XHCI_TRUST_TX_LENGTH;
+
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -968,6 +968,7 @@ int xhci_suspend(struct xhci_hcd *xhci,
+ unsigned int delay = XHCI_MAX_HALT_USEC;
+ struct usb_hcd *hcd = xhci_to_hcd(xhci);
+ u32 command;
++ u32 res;
+
+ if (!hcd->state)
+ return 0;
+@@ -1021,11 +1022,28 @@ int xhci_suspend(struct xhci_hcd *xhci,
+ command = readl(&xhci->op_regs->command);
+ command |= CMD_CSS;
+ writel(command, &xhci->op_regs->command);
++ xhci->broken_suspend = 0;
+ if (xhci_handshake(&xhci->op_regs->status,
+ STS_SAVE, 0, 10 * 1000)) {
+- xhci_warn(xhci, "WARN: xHC save state timeout\n");
+- spin_unlock_irq(&xhci->lock);
+- return -ETIMEDOUT;
++ /*
++ * AMD SNPS xHC 3.0 occasionally does not clear the
++ * SSS bit of USBSTS and when driver tries to poll
++ * to see if the xHC clears BIT(8) which never happens
++ * and driver assumes that controller is not responding
++ * and times out. To workaround this, its good to check
++ * if SRE and HCE bits are not set (as per xhci
++ * Section 5.4.2) and bypass the timeout.
++ */
++ res = readl(&xhci->op_regs->status);
++ if ((xhci->quirks & XHCI_SNPS_BROKEN_SUSPEND) &&
++ (((res & STS_SRE) == 0) &&
++ ((res & STS_HCE) == 0))) {
++ xhci->broken_suspend = 1;
++ } else {
++ xhci_warn(xhci, "WARN: xHC save state timeout\n");
++ spin_unlock_irq(&xhci->lock);
++ return -ETIMEDOUT;
++ }
+ }
+ spin_unlock_irq(&xhci->lock);
+
+@@ -1078,7 +1096,7 @@ int xhci_resume(struct xhci_hcd *xhci, b
+ set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
+
+ spin_lock_irq(&xhci->lock);
+- if (xhci->quirks & XHCI_RESET_ON_RESUME)
++ if ((xhci->quirks & XHCI_RESET_ON_RESUME) || xhci->broken_suspend)
+ hibernated = true;
+
+ if (!hibernated) {
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -1847,6 +1847,7 @@ struct xhci_hcd {
+ #define XHCI_INTEL_USB_ROLE_SW BIT_ULL(31)
+ #define XHCI_ZERO_64B_REGS BIT_ULL(32)
+ #define XHCI_RESET_PLL_ON_DISCONNECT BIT_ULL(34)
++#define XHCI_SNPS_BROKEN_SUSPEND BIT_ULL(35)
+
+ unsigned int num_active_eps;
+ unsigned int limit_active_eps;
+@@ -1876,6 +1877,8 @@ struct xhci_hcd {
+ void *dbc;
+ /* platform-specific data -- must come last */
+ unsigned long priv[0] __aligned(sizeof(s64));
++ /* Broken Suspend flag for SNPS Suspend resume issue */
++ u8 broken_suspend;
+ };
+
+ /* Platform specific overrides to generic XHCI hc_driver ops */