--- /dev/null
+From b8c9c6fa2002b8fd4a9710f76f80f99c6046d48c Mon Sep 17 00:00:00 2001
+From: Roger Quadros <rogerq@ti.com>
+Date: Tue, 31 Oct 2017 15:26:00 +0200
+Subject: ARM: dts: dra7: Disable USB metastability workaround for USB2
+
+From: Roger Quadros <rogerq@ti.com>
+
+commit b8c9c6fa2002b8fd4a9710f76f80f99c6046d48c upstream.
+
+The metastability workaround causes Erratic errors [1]
+on the HighSpeed USB PHY which can cause upto 2 seconds
+delay in enumerating to a USB host while in Gadget mode.
+
+Disable the Run/Stop metastability workaround to avoid this
+ill effect. We are aware that this opens up the opportunity
+for Run/Stop metastability, however this issue has never been
+observed in TI releases so we think that Run/Stop metastability
+is a lesser evil than the PHY Erratic errors. So disable it.
+
+[1] USB controller trace during gadget enumeration
+ irq/90-dwc3-969 [000] d... 52.323145: dwc3_event: event
+ (00000901): Erratic Error [U0]
+ irq/90-dwc3-969 [000] d... 52.560646: dwc3_event: event
+ (00000901): Erratic Error [U0]
+ irq/90-dwc3-969 [000] d... 52.798144: dwc3_event: event
+ (00000901): Erratic Error [U0]
+
+Signed-off-by: Roger Quadros <rogerq@ti.com>
+Acked-by: Felipe Balbi <felipe.balbi@linux/intel.com>
+Signed-off-by: Tony Lindgren <tony@atomide.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/boot/dts/dra7.dtsi | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/arm/boot/dts/dra7.dtsi
++++ b/arch/arm/boot/dts/dra7.dtsi
+@@ -1540,6 +1540,7 @@
+ dr_mode = "otg";
+ snps,dis_u3_susphy_quirk;
+ snps,dis_u2_susphy_quirk;
++ snps,dis_metastability_quirk;
+ };
+ };
+
--- /dev/null
+From foo@baz Mon 11 Nov 2019 10:07:22 AM CET
+From: Mathieu Poirier <mathieu.poirier@linaro.org>
+Date: Thu, 5 Sep 2019 10:17:56 -0600
+Subject: ASoC: davinci: Kill BUG_ON() usage
+To: stable@vger.kernel.org
+Cc: linux-usb@vger.kernel.org, linux-kernel@vger.kernel.org, linux-pm@vger.kernel.org, dri-devel@lists.freedesktop.org, linux-omap@vger.kernel.org, linux-i2c@vger.kernel.org, linux-pci@vger.kernel.org, linux-mtd@lists.infradead.org
+Message-ID: <20190905161759.28036-16-mathieu.poirier@linaro.org>
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit befff4fbc27e19b14b343eb4a65d8f75d38b6230 upstream
+
+Don't use BUG_ON() for a non-critical sanity check on production
+systems. This patch replaces with a softer WARN_ON() and an error
+path.
+
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Mathieu Poirier <mathieu.poirier@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/soc/davinci/davinci-mcasp.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/sound/soc/davinci/davinci-mcasp.c
++++ b/sound/soc/davinci/davinci-mcasp.c
+@@ -1748,7 +1748,8 @@ static int davinci_mcasp_get_dma_type(st
+ PTR_ERR(chan));
+ return PTR_ERR(chan);
+ }
+- BUG_ON(!chan->device || !chan->device->dev);
++ if (WARN_ON(!chan->device || !chan->device->dev))
++ return -EINVAL;
+
+ if (chan->device->dev->of_node)
+ ret = of_property_read_string(chan->device->dev->of_node,
--- /dev/null
+From foo@baz Mon 11 Nov 2019 10:07:22 AM CET
+From: Mathieu Poirier <mathieu.poirier@linaro.org>
+Date: Thu, 5 Sep 2019 10:17:57 -0600
+Subject: ASoC: davinci-mcasp: Fix an error handling path in 'davinci_mcasp_probe()'
+To: stable@vger.kernel.org
+Cc: linux-usb@vger.kernel.org, linux-kernel@vger.kernel.org, linux-pm@vger.kernel.org, dri-devel@lists.freedesktop.org, linux-omap@vger.kernel.org, linux-i2c@vger.kernel.org, linux-pci@vger.kernel.org, linux-mtd@lists.infradead.org
+Message-ID: <20190905161759.28036-17-mathieu.poirier@linaro.org>
+
+From: Christophe Jaillet <christophe.jaillet@wanadoo.fr>
+
+commit 1b8b68b05d1868404316d32e20782b00442aba90 upstream
+
+All error handling paths in this function 'goto err' except this one.
+
+If one of the 2 previous memory allocations fails, we should go through
+the existing error handling path. Otherwise there is an unbalanced
+pm_runtime_enable()/pm_runtime_disable().
+
+Fixes: dd55ff8346a9 ("ASoC: davinci-mcasp: Add set_tdm_slots() support")
+Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Acked-by: Peter Ujfalusi <peter.ujfalusi@ti.com>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Mathieu Poirier <mathieu.poirier@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/soc/davinci/davinci-mcasp.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/sound/soc/davinci/davinci-mcasp.c
++++ b/sound/soc/davinci/davinci-mcasp.c
+@@ -2022,8 +2022,10 @@ static int davinci_mcasp_probe(struct pl
+ GFP_KERNEL);
+
+ if (!mcasp->chconstr[SNDRV_PCM_STREAM_PLAYBACK].list ||
+- !mcasp->chconstr[SNDRV_PCM_STREAM_CAPTURE].list)
+- return -ENOMEM;
++ !mcasp->chconstr[SNDRV_PCM_STREAM_CAPTURE].list) {
++ ret = -ENOMEM;
++ goto err;
++ }
+
+ ret = davinci_mcasp_set_ch_constraints(mcasp);
+ if (ret)
--- /dev/null
+From foo@baz Mon 11 Nov 2019 10:07:22 AM CET
+From: Mathieu Poirier <mathieu.poirier@linaro.org>
+Date: Thu, 5 Sep 2019 10:17:55 -0600
+Subject: ASoC: davinci-mcasp: Handle return value of devm_kasprintf
+To: stable@vger.kernel.org
+Cc: linux-usb@vger.kernel.org, linux-kernel@vger.kernel.org, linux-pm@vger.kernel.org, dri-devel@lists.freedesktop.org, linux-omap@vger.kernel.org, linux-i2c@vger.kernel.org, linux-pci@vger.kernel.org, linux-mtd@lists.infradead.org
+Message-ID: <20190905161759.28036-15-mathieu.poirier@linaro.org>
+
+From: Arvind Yadav <arvind.yadav.cs@gmail.com>
+
+commit 0c8b794c4a10aaf7ac0d4a49be2b2638e2038adb upstream
+
+devm_kasprintf() can fail here and we must check its return value.
+
+Signed-off-by: Arvind Yadav <arvind.yadav.cs@gmail.com>
+Acked-by: Peter Ujfalusi <peter.ujfalusi@ti.com>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Mathieu Poirier <mathieu.poirier@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/soc/davinci/davinci-mcasp.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+--- a/sound/soc/davinci/davinci-mcasp.c
++++ b/sound/soc/davinci/davinci-mcasp.c
+@@ -1894,6 +1894,10 @@ static int davinci_mcasp_probe(struct pl
+ if (irq >= 0) {
+ irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_common",
+ dev_name(&pdev->dev));
++ if (!irq_name) {
++ ret = -ENOMEM;
++ goto err;
++ }
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ davinci_mcasp_common_irq_handler,
+ IRQF_ONESHOT | IRQF_SHARED,
+@@ -1911,6 +1915,10 @@ static int davinci_mcasp_probe(struct pl
+ if (irq >= 0) {
+ irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_rx",
+ dev_name(&pdev->dev));
++ if (!irq_name) {
++ ret = -ENOMEM;
++ goto err;
++ }
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ davinci_mcasp_rx_irq_handler,
+ IRQF_ONESHOT, irq_name, mcasp);
+@@ -1926,6 +1934,10 @@ static int davinci_mcasp_probe(struct pl
+ if (irq >= 0) {
+ irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_tx",
+ dev_name(&pdev->dev));
++ if (!irq_name) {
++ ret = -ENOMEM;
++ goto err;
++ }
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ davinci_mcasp_tx_irq_handler,
+ IRQF_ONESHOT, irq_name, mcasp);
--- /dev/null
+From foo@baz Mon 11 Nov 2019 10:07:22 AM CET
+From: Mathieu Poirier <mathieu.poirier@linaro.org>
+Date: Thu, 5 Sep 2019 10:17:47 -0600
+Subject: ASoC: tlv320aic31xx: Handle inverted BCLK in non-DSP modes
+To: stable@vger.kernel.org
+Cc: linux-usb@vger.kernel.org, linux-kernel@vger.kernel.org, linux-pm@vger.kernel.org, dri-devel@lists.freedesktop.org, linux-omap@vger.kernel.org, linux-i2c@vger.kernel.org, linux-pci@vger.kernel.org, linux-mtd@lists.infradead.org
+Message-ID: <20190905161759.28036-7-mathieu.poirier@linaro.org>
+
+From: "Andrew F. Davis" <afd@ti.com>
+
+commit dcb407b257af06fa58b0544ec01ec9e0d3927e02 upstream
+
+Currently BCLK inverting is only handled when the DAI format is
+DSP, but the BCLK may be inverted in any supported mode. Without
+this using this CODEC in any other mode than DSP with the BCLK
+inverted leads to bad sampling timing and very poor audio quality.
+
+Signed-off-by: Andrew F. Davis <afd@ti.com>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Mathieu Poirier <mathieu.poirier@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/soc/codecs/tlv320aic31xx.c | 28 ++++++++++++++++++----------
+ 1 file changed, 18 insertions(+), 10 deletions(-)
+
+--- a/sound/soc/codecs/tlv320aic31xx.c
++++ b/sound/soc/codecs/tlv320aic31xx.c
+@@ -924,6 +924,18 @@ static int aic31xx_set_dai_fmt(struct sn
+ return -EINVAL;
+ }
+
++ /* signal polarity */
++ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
++ case SND_SOC_DAIFMT_NB_NF:
++ break;
++ case SND_SOC_DAIFMT_IB_NF:
++ iface_reg2 |= AIC31XX_BCLKINV_MASK;
++ break;
++ default:
++ dev_err(codec->dev, "Invalid DAI clock signal polarity\n");
++ return -EINVAL;
++ }
++
+ /* interface format */
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+@@ -931,16 +943,12 @@ static int aic31xx_set_dai_fmt(struct sn
+ case SND_SOC_DAIFMT_DSP_A:
+ dsp_a_val = 0x1;
+ case SND_SOC_DAIFMT_DSP_B:
+- /* NOTE: BCLKINV bit value 1 equas NB and 0 equals IB */
+- switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+- case SND_SOC_DAIFMT_NB_NF:
+- iface_reg2 |= AIC31XX_BCLKINV_MASK;
+- break;
+- case SND_SOC_DAIFMT_IB_NF:
+- break;
+- default:
+- return -EINVAL;
+- }
++ /*
++ * NOTE: This CODEC samples on the falling edge of BCLK in
++ * DSP mode, this is inverted compared to what most DAIs
++ * expect, so we invert for this mode
++ */
++ iface_reg2 ^= AIC31XX_BCLKINV_MASK;
+ iface_reg1 |= (AIC31XX_DSP_MODE <<
+ AIC31XX_IFACE1_DATATYPE_SHIFT);
+ break;
--- /dev/null
+From foo@baz Mon 11 Nov 2019 10:07:22 AM CET
+From: Mathieu Poirier <mathieu.poirier@linaro.org>
+Date: Thu, 5 Sep 2019 10:17:54 -0600
+Subject: ASoC: tlv320dac31xx: mark expected switch fall-through
+To: stable@vger.kernel.org
+Cc: linux-usb@vger.kernel.org, linux-kernel@vger.kernel.org, linux-pm@vger.kernel.org, dri-devel@lists.freedesktop.org, linux-omap@vger.kernel.org, linux-i2c@vger.kernel.org, linux-pci@vger.kernel.org, linux-mtd@lists.infradead.org
+Message-ID: <20190905161759.28036-14-mathieu.poirier@linaro.org>
+
+From: "Gustavo A. R. Silva" <garsilva@embeddedor.com>
+
+commit 09fc38c1af4cb888255e9ecf267bf9757c12885d upstream
+
+In preparation to enabling -Wimplicit-fallthrough, mark switch cases
+where we are expecting to fall through.
+
+Addresses-Coverity-ID: 1195220
+Signed-off-by: Gustavo A. R. Silva <garsilva@embeddedor.com>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Mathieu Poirier <mathieu.poirier@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/soc/codecs/tlv320aic31xx.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/sound/soc/codecs/tlv320aic31xx.c
++++ b/sound/soc/codecs/tlv320aic31xx.c
+@@ -941,7 +941,7 @@ static int aic31xx_set_dai_fmt(struct sn
+ case SND_SOC_DAIFMT_I2S:
+ break;
+ case SND_SOC_DAIFMT_DSP_A:
+- dsp_a_val = 0x1;
++ dsp_a_val = 0x1; /* fall through */
+ case SND_SOC_DAIFMT_DSP_B:
+ /*
+ * NOTE: This CODEC samples on the falling edge of BCLK in
--- /dev/null
+From foo@baz Mon 11 Nov 2019 10:07:22 AM CET
+From: Mathieu Poirier <mathieu.poirier@linaro.org>
+Date: Thu, 5 Sep 2019 10:17:59 -0600
+Subject: cpufreq: ti-cpufreq: add missing of_node_put()
+To: stable@vger.kernel.org
+Cc: linux-usb@vger.kernel.org, linux-kernel@vger.kernel.org, linux-pm@vger.kernel.org, dri-devel@lists.freedesktop.org, linux-omap@vger.kernel.org, linux-i2c@vger.kernel.org, linux-pci@vger.kernel.org, linux-mtd@lists.infradead.org
+Message-ID: <20190905161759.28036-19-mathieu.poirier@linaro.org>
+
+From: Zumeng Chen <zumeng.chen@gmail.com>
+
+commit 248aefdcc3a7e0cfbd014946b4dead63e750e71b upstream
+
+call of_node_put to release the refcount of np.
+
+Signed-off-by: Zumeng Chen <zumeng.chen@gmail.com>
+Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Mathieu Poirier <mathieu.poirier@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/cpufreq/ti-cpufreq.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/cpufreq/ti-cpufreq.c
++++ b/drivers/cpufreq/ti-cpufreq.c
+@@ -205,6 +205,7 @@ static int ti_cpufreq_init(void)
+
+ np = of_find_node_by_path("/");
+ match = of_match_node(ti_cpufreq_of_match, np);
++ of_node_put(np);
+ if (!match)
+ return -ENODEV;
+
--- /dev/null
+From foo@baz Mon 11 Nov 2019 10:07:22 AM CET
+From: Mathieu Poirier <mathieu.poirier@linaro.org>
+Date: Thu, 5 Sep 2019 10:17:58 -0600
+Subject: i2c: omap: Trigger bus recovery in lockup case
+To: stable@vger.kernel.org
+Cc: linux-usb@vger.kernel.org, linux-kernel@vger.kernel.org, linux-pm@vger.kernel.org, dri-devel@lists.freedesktop.org, linux-omap@vger.kernel.org, linux-i2c@vger.kernel.org, linux-pci@vger.kernel.org, linux-mtd@lists.infradead.org
+Message-ID: <20190905161759.28036-18-mathieu.poirier@linaro.org>
+
+From: Claudio Foellmi <claudio.foellmi@ergon.ch>
+
+commit 93367bfca98f36cece57c01dbce6ea1b4ac58245 upstream
+
+A very conservative check for bus activity (to prevent interference
+in multimaster setups) prevented the bus recovery methods from being
+triggered in the case that SDA or SCL was stuck low.
+This defeats the purpose of the recovery mechanism, which was introduced
+for exactly this situation (a slave device keeping SDA pulled down).
+
+Also added a check to make sure SDA is low before attempting recovery.
+If SDA is not stuck low, recovery will not help, so we can skip it.
+
+Note that bus lockups can persist across reboots. The only other options
+are to reset or power cycle the offending slave device, and many i2c
+slaves do not even have a reset pin.
+
+If we see that one of the lines is low for the entire timeout duration,
+we can actually be sure that there is no other master driving the bus.
+It is therefore save for us to attempt a bus recovery.
+
+Signed-off-by: Claudio Foellmi <claudio.foellmi@ergon.ch>
+Tested-by: Vignesh R <vigneshr@ti.com>
+Reviewed-by: Grygorii Strashko <grygorii.strashko@ti.com>
+[wsa: fixed one return code to -EBUSY]
+Signed-off-by: Wolfram Sang <wsa@the-dreams.de>
+Signed-off-by: Mathieu Poirier <mathieu.poirier@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/i2c/busses/i2c-omap.c | 25 +++++++++++++++++++++++--
+ 1 file changed, 23 insertions(+), 2 deletions(-)
+
+--- a/drivers/i2c/busses/i2c-omap.c
++++ b/drivers/i2c/busses/i2c-omap.c
+@@ -487,6 +487,22 @@ static int omap_i2c_init(struct omap_i2c
+ }
+
+ /*
++ * Try bus recovery, but only if SDA is actually low.
++ */
++static int omap_i2c_recover_bus(struct omap_i2c_dev *omap)
++{
++ u16 systest;
++
++ systest = omap_i2c_read_reg(omap, OMAP_I2C_SYSTEST_REG);
++ if ((systest & OMAP_I2C_SYSTEST_SCL_I_FUNC) &&
++ (systest & OMAP_I2C_SYSTEST_SDA_I_FUNC))
++ return 0; /* bus seems to already be fine */
++ if (!(systest & OMAP_I2C_SYSTEST_SCL_I_FUNC))
++ return -EBUSY; /* recovery would not fix SCL */
++ return i2c_recover_bus(&omap->adapter);
++}
++
++/*
+ * Waiting on Bus Busy
+ */
+ static int omap_i2c_wait_for_bb(struct omap_i2c_dev *omap)
+@@ -496,7 +512,7 @@ static int omap_i2c_wait_for_bb(struct o
+ timeout = jiffies + OMAP_I2C_TIMEOUT;
+ while (omap_i2c_read_reg(omap, OMAP_I2C_STAT_REG) & OMAP_I2C_STAT_BB) {
+ if (time_after(jiffies, timeout))
+- return i2c_recover_bus(&omap->adapter);
++ return omap_i2c_recover_bus(omap);
+ msleep(1);
+ }
+
+@@ -577,8 +593,13 @@ static int omap_i2c_wait_for_bb_valid(st
+ }
+
+ if (time_after(jiffies, timeout)) {
++ /*
++ * SDA or SCL were low for the entire timeout without
++ * any activity detected. Most likely, a slave is
++ * locking up the bus with no master driving the clock.
++ */
+ dev_warn(omap->dev, "timeout waiting for bus ready\n");
+- return -ETIMEDOUT;
++ return omap_i2c_recover_bus(omap);
+ }
+
+ msleep(1);
--- /dev/null
+From foo@baz Mon 11 Nov 2019 10:07:22 AM CET
+From: Mathieu Poirier <mathieu.poirier@linaro.org>
+Date: Thu, 5 Sep 2019 10:17:53 -0600
+Subject: mailbox: reset txdone_method TXDONE_BY_POLL if client knows_txdone
+To: stable@vger.kernel.org
+Cc: linux-usb@vger.kernel.org, linux-kernel@vger.kernel.org, linux-pm@vger.kernel.org, dri-devel@lists.freedesktop.org, linux-omap@vger.kernel.org, linux-i2c@vger.kernel.org, linux-pci@vger.kernel.org, linux-mtd@lists.infradead.org
+Message-ID: <20190905161759.28036-13-mathieu.poirier@linaro.org>
+
+From: Sudeep Holla <sudeep.holla@arm.com>
+
+commit 33cd7123ac0ba5360656ae27db453de5b9aa711f upstream
+
+Currently the mailbox framework sets txdone_method to TXDONE_BY_POLL if
+the controller sets txdone_by_poll. However some clients can have a
+mechanism to do TXDONE_BY_ACK which they can specify by knows_txdone.
+However, we endup setting both TXDONE_BY_POLL and TXDONE_BY_ACK in that
+case. In such scenario, we may end up with below warnings as the tx
+ticker is run both by mailbox framework and the client.
+
+WARNING: CPU: 1 PID: 0 at kernel/time/hrtimer.c:805 hrtimer_forward+0x88/0xd8
+CPU: 1 PID: 0 Comm: swapper/1 Not tainted 4.12.0-rc5 #242
+Hardware name: ARM LTD ARM Juno Development Platform
+task: ffff8009768ca700 task.stack: ffff8009768f8000
+PC is at hrtimer_forward+0x88/0xd8
+LR is at txdone_hrtimer+0xd4/0xf8
+Call trace:
+ hrtimer_forward+0x88/0xd8
+ __hrtimer_run_queues+0xe4/0x158
+ hrtimer_interrupt+0xa4/0x220
+ arch_timer_handler_phys+0x30/0x40
+ handle_percpu_devid_irq+0x78/0x130
+ generic_handle_irq+0x24/0x38
+ __handle_domain_irq+0x5c/0xb8
+ gic_handle_irq+0x54/0xa8
+
+This patch fixes the issue by resetting TXDONE_BY_POLL if client has set
+knows_txdone.
+
+Cc: Alexey Klimov <alexey.klimov@arm.com>
+Signed-off-by: Sudeep Holla <sudeep.holla@arm.com>
+Signed-off-by: Jassi Brar <jaswinder.singh@linaro.org>
+Signed-off-by: Mathieu Poirier <mathieu.poirier@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mailbox/mailbox.c | 4 ++--
+ drivers/mailbox/pcc.c | 4 ++--
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/mailbox/mailbox.c
++++ b/drivers/mailbox/mailbox.c
+@@ -351,7 +351,7 @@ struct mbox_chan *mbox_request_channel(s
+ init_completion(&chan->tx_complete);
+
+ if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone)
+- chan->txdone_method |= TXDONE_BY_ACK;
++ chan->txdone_method = TXDONE_BY_ACK;
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+@@ -420,7 +420,7 @@ void mbox_free_channel(struct mbox_chan
+ spin_lock_irqsave(&chan->lock, flags);
+ chan->cl = NULL;
+ chan->active_req = NULL;
+- if (chan->txdone_method == (TXDONE_BY_POLL | TXDONE_BY_ACK))
++ if (chan->txdone_method == TXDONE_BY_ACK)
+ chan->txdone_method = TXDONE_BY_POLL;
+
+ module_put(chan->mbox->dev->driver->owner);
+--- a/drivers/mailbox/pcc.c
++++ b/drivers/mailbox/pcc.c
+@@ -266,7 +266,7 @@ struct mbox_chan *pcc_mbox_request_chann
+ init_completion(&chan->tx_complete);
+
+ if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone)
+- chan->txdone_method |= TXDONE_BY_ACK;
++ chan->txdone_method = TXDONE_BY_ACK;
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+@@ -312,7 +312,7 @@ void pcc_mbox_free_channel(struct mbox_c
+ spin_lock_irqsave(&chan->lock, flags);
+ chan->cl = NULL;
+ chan->active_req = NULL;
+- if (chan->txdone_method == (TXDONE_BY_POLL | TXDONE_BY_ACK))
++ if (chan->txdone_method == TXDONE_BY_ACK)
+ chan->txdone_method = TXDONE_BY_POLL;
+
+ spin_unlock_irqrestore(&chan->lock, flags);
--- /dev/null
+From foo@baz Mon 11 Nov 2019 10:07:22 AM CET
+From: Mathieu Poirier <mathieu.poirier@linaro.org>
+Date: Thu, 5 Sep 2019 10:17:46 -0600
+Subject: mfd: palmas: Assign the right powerhold mask for tps65917
+To: stable@vger.kernel.org
+Cc: linux-usb@vger.kernel.org, linux-kernel@vger.kernel.org, linux-pm@vger.kernel.org, dri-devel@lists.freedesktop.org, linux-omap@vger.kernel.org, linux-i2c@vger.kernel.org, linux-pci@vger.kernel.org, linux-mtd@lists.infradead.org
+Message-ID: <20190905161759.28036-6-mathieu.poirier@linaro.org>
+
+From: Keerthy <j-keerthy@ti.com>
+
+commit 572ff4d560be3784205b224cd67d6715620092d7 upstream
+
+The powerhold mask for TPS65917 is different when comapred to
+the other palmas versions. Hence assign the right mask that enables
+power off of tps65917 pmic correctly.
+
+Signed-off-by: Keerthy <j-keerthy@ti.com>
+Signed-off-by: Lee Jones <lee.jones@linaro.org>
+Signed-off-by: Mathieu Poirier <mathieu.poirier@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mfd/palmas.c | 10 +++++++++-
+ include/linux/mfd/palmas.h | 3 +++
+ 2 files changed, 12 insertions(+), 1 deletion(-)
+
+--- a/drivers/mfd/palmas.c
++++ b/drivers/mfd/palmas.c
+@@ -430,6 +430,7 @@ static void palmas_power_off(void)
+ {
+ unsigned int addr;
+ int ret, slave;
++ u8 powerhold_mask;
+ struct device_node *np = palmas_dev->dev->of_node;
+
+ if (of_property_read_bool(np, "ti,palmas-override-powerhold")) {
+@@ -437,8 +438,15 @@ static void palmas_power_off(void)
+ PALMAS_PRIMARY_SECONDARY_PAD2);
+ slave = PALMAS_BASE_TO_SLAVE(PALMAS_PU_PD_OD_BASE);
+
++ if (of_device_is_compatible(np, "ti,tps65917"))
++ powerhold_mask =
++ TPS65917_PRIMARY_SECONDARY_PAD2_GPIO_5_MASK;
++ else
++ powerhold_mask =
++ PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_7_MASK;
++
+ ret = regmap_update_bits(palmas_dev->regmap[slave], addr,
+- PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_7_MASK, 0);
++ powerhold_mask, 0);
+ if (ret)
+ dev_err(palmas_dev->dev,
+ "Unable to write PRIMARY_SECONDARY_PAD2 %d\n",
+--- a/include/linux/mfd/palmas.h
++++ b/include/linux/mfd/palmas.h
+@@ -3733,6 +3733,9 @@ enum usb_irq_events {
+ #define TPS65917_REGEN3_CTRL_MODE_ACTIVE 0x01
+ #define TPS65917_REGEN3_CTRL_MODE_ACTIVE_SHIFT 0x00
+
++/* POWERHOLD Mask field for PRIMARY_SECONDARY_PAD2 register */
++#define TPS65917_PRIMARY_SECONDARY_PAD2_GPIO_5_MASK 0xC
++
+ /* Registers for function RESOURCE */
+ #define TPS65917_REGEN1_CTRL 0x2
+ #define TPS65917_PLLEN_CTRL 0x3
--- /dev/null
+From foo@baz Mon 11 Nov 2019 10:07:22 AM CET
+From: Mathieu Poirier <mathieu.poirier@linaro.org>
+Date: Thu, 5 Sep 2019 10:17:52 -0600
+Subject: misc: pci_endpoint_test: Fix BUG_ON error during pci_disable_msi()
+To: stable@vger.kernel.org
+Cc: linux-usb@vger.kernel.org, linux-kernel@vger.kernel.org, linux-pm@vger.kernel.org, dri-devel@lists.freedesktop.org, linux-omap@vger.kernel.org, linux-i2c@vger.kernel.org, linux-pci@vger.kernel.org, linux-mtd@lists.infradead.org
+Message-ID: <20190905161759.28036-12-mathieu.poirier@linaro.org>
+
+From: Kishon Vijay Abraham I <kishon@ti.com>
+
+commit b7636e816adcb52bc96b6fb7bc9d141cbfd17ddb upstream
+
+pci_disable_msi() throws a Kernel BUG if the driver has successfully
+requested an IRQ and not released it. Fix it here by freeing IRQs before
+invoking pci_disable_msi().
+
+Signed-off-by: Kishon Vijay Abraham I <kishon@ti.com>
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Signed-off-by: Mathieu Poirier <mathieu.poirier@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/misc/pci_endpoint_test.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/drivers/misc/pci_endpoint_test.c
++++ b/drivers/misc/pci_endpoint_test.c
+@@ -92,6 +92,7 @@ struct pci_endpoint_test {
+ void __iomem *bar[6];
+ struct completion irq_raised;
+ int last_irq;
++ int num_irqs;
+ /* mutex to protect the ioctls */
+ struct mutex mutex;
+ struct miscdevice miscdev;
+@@ -514,6 +515,7 @@ static int pci_endpoint_test_probe(struc
+ irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
+ if (irq < 0)
+ dev_err(dev, "failed to get MSI interrupts\n");
++ test->num_irqs = irq;
+ }
+
+ err = devm_request_irq(dev, pdev->irq, pci_endpoint_test_irqhandler,
+@@ -581,6 +583,9 @@ err_iounmap:
+ pci_iounmap(pdev, test->bar[bar]);
+ }
+
++ for (i = 0; i < irq; i++)
++ devm_free_irq(dev, pdev->irq + i, test);
++
+ err_disable_msi:
+ pci_disable_msi(pdev);
+ pci_release_regions(pdev);
+@@ -594,6 +599,7 @@ err_disable_pdev:
+ static void pci_endpoint_test_remove(struct pci_dev *pdev)
+ {
+ int id;
++ int i;
+ enum pci_barno bar;
+ struct pci_endpoint_test *test = pci_get_drvdata(pdev);
+ struct miscdevice *misc_device = &test->miscdev;
+@@ -609,6 +615,8 @@ static void pci_endpoint_test_remove(str
+ if (test->bar[bar])
+ pci_iounmap(pdev, test->bar[bar]);
+ }
++ for (i = 0; i < test->num_irqs; i++)
++ devm_free_irq(&pdev->dev, pdev->irq + i, test);
+ pci_disable_msi(pdev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
--- /dev/null
+From foo@baz Mon 11 Nov 2019 10:07:22 AM CET
+From: Mathieu Poirier <mathieu.poirier@linaro.org>
+Date: Thu, 5 Sep 2019 10:17:50 -0600
+Subject: misc: pci_endpoint_test: Prevent some integer overflows
+To: stable@vger.kernel.org
+Cc: linux-usb@vger.kernel.org, linux-kernel@vger.kernel.org, linux-pm@vger.kernel.org, dri-devel@lists.freedesktop.org, linux-omap@vger.kernel.org, linux-i2c@vger.kernel.org, linux-pci@vger.kernel.org, linux-mtd@lists.infradead.org
+Message-ID: <20190905161759.28036-10-mathieu.poirier@linaro.org>
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+commit 378f79cab12b669928f3a4037f023837ead2ce0c upstream
+
+"size + max" can have an arithmetic overflow when we're allocating:
+
+ orig_src_addr = dma_alloc_coherent(dev, size + alignment, ...
+
+I've added a few checks to prevent that.
+
+Fixes: 13107c60681f ("misc: pci_endpoint_test: Add support to provide aligned buffer addresses")
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Mathieu Poirier <mathieu.poirier@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/misc/pci_endpoint_test.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/drivers/misc/pci_endpoint_test.c
++++ b/drivers/misc/pci_endpoint_test.c
+@@ -226,6 +226,9 @@ static bool pci_endpoint_test_copy(struc
+ u32 src_crc32;
+ u32 dst_crc32;
+
++ if (size > SIZE_MAX - alignment)
++ goto err;
++
+ orig_src_addr = dma_alloc_coherent(dev, size + alignment,
+ &orig_src_phys_addr, GFP_KERNEL);
+ if (!orig_src_addr) {
+@@ -311,6 +314,9 @@ static bool pci_endpoint_test_write(stru
+ size_t alignment = test->alignment;
+ u32 crc32;
+
++ if (size > SIZE_MAX - alignment)
++ goto err;
++
+ orig_addr = dma_alloc_coherent(dev, size + alignment, &orig_phys_addr,
+ GFP_KERNEL);
+ if (!orig_addr) {
+@@ -369,6 +375,9 @@ static bool pci_endpoint_test_read(struc
+ size_t alignment = test->alignment;
+ u32 crc32;
+
++ if (size > SIZE_MAX - alignment)
++ goto err;
++
+ orig_addr = dma_alloc_coherent(dev, size + alignment, &orig_phys_addr,
+ GFP_KERNEL);
+ if (!orig_addr) {
--- /dev/null
+From foo@baz Mon 11 Nov 2019 10:07:22 AM CET
+From: Mathieu Poirier <mathieu.poirier@linaro.org>
+Date: Thu, 5 Sep 2019 10:17:49 -0600
+Subject: mtd: spi-nor: cadence-quadspi: add a delay in write sequence
+To: stable@vger.kernel.org
+Cc: linux-usb@vger.kernel.org, linux-kernel@vger.kernel.org, linux-pm@vger.kernel.org, dri-devel@lists.freedesktop.org, linux-omap@vger.kernel.org, linux-i2c@vger.kernel.org, linux-pci@vger.kernel.org, linux-mtd@lists.infradead.org
+Message-ID: <20190905161759.28036-9-mathieu.poirier@linaro.org>
+
+From: Vignesh R <vigneshr@ti.com>
+
+commit 61dc8493bae9ba82a1c72edbc6c6065f6a94456a upstream
+
+As per 66AK2G02 TRM[1] SPRUHY8F section 11.15.5.3 Indirect Access
+Controller programming sequence, a delay equal to couple of QSPI master
+clock(~5ns) is required after setting CQSPI_REG_INDIRECTWR_START bit and
+writing data to the flash. Introduce a quirk flag CQSPI_NEEDS_WR_DELAY
+to handle this and set this flag for TI 66AK2G SoC.
+
+[1]http://www.ti.com/lit/ug/spruhy8f/spruhy8f.pdf
+
+Signed-off-by: Vignesh R <vigneshr@ti.com>
+Acked-by: Marek Vasut <marek.vasut@gmail.com>
+Signed-off-by: Cyrille Pitchen <cyrille.pitchen@wedev4u.fr>
+Signed-off-by: Mathieu Poirier <mathieu.poirier@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mtd/spi-nor/cadence-quadspi.c | 27 ++++++++++++++++++++++++++-
+ 1 file changed, 26 insertions(+), 1 deletion(-)
+
+--- a/drivers/mtd/spi-nor/cadence-quadspi.c
++++ b/drivers/mtd/spi-nor/cadence-quadspi.c
+@@ -38,6 +38,9 @@
+ #define CQSPI_NAME "cadence-qspi"
+ #define CQSPI_MAX_CHIPSELECT 16
+
++/* Quirks */
++#define CQSPI_NEEDS_WR_DELAY BIT(0)
++
+ struct cqspi_st;
+
+ struct cqspi_flash_pdata {
+@@ -76,6 +79,7 @@ struct cqspi_st {
+ u32 fifo_depth;
+ u32 fifo_width;
+ u32 trigger_address;
++ u32 wr_delay;
+ struct cqspi_flash_pdata f_pdata[CQSPI_MAX_CHIPSELECT];
+ };
+
+@@ -623,6 +627,15 @@ static int cqspi_indirect_write_execute(
+ reinit_completion(&cqspi->transfer_complete);
+ writel(CQSPI_REG_INDIRECTWR_START_MASK,
+ reg_base + CQSPI_REG_INDIRECTWR);
++ /*
++ * As per 66AK2G02 TRM SPRUHY8F section 11.15.5.3 Indirect Access
++ * Controller programming sequence, couple of cycles of
++ * QSPI_REF_CLK delay is required for the above bit to
++ * be internally synchronized by the QSPI module. Provide 5
++ * cycles of delay.
++ */
++ if (cqspi->wr_delay)
++ ndelay(cqspi->wr_delay);
+
+ while (remaining > 0) {
+ size_t write_words, mod_bytes;
+@@ -1184,6 +1197,7 @@ static int cqspi_probe(struct platform_d
+ struct cqspi_st *cqspi;
+ struct resource *res;
+ struct resource *res_ahb;
++ unsigned long data;
+ int ret;
+ int irq;
+
+@@ -1241,6 +1255,10 @@ static int cqspi_probe(struct platform_d
+ }
+
+ cqspi->master_ref_clk_hz = clk_get_rate(cqspi->clk);
++ data = (unsigned long)of_device_get_match_data(dev);
++ if (data & CQSPI_NEEDS_WR_DELAY)
++ cqspi->wr_delay = 5 * DIV_ROUND_UP(NSEC_PER_SEC,
++ cqspi->master_ref_clk_hz);
+
+ ret = devm_request_irq(dev, irq, cqspi_irq_handler, 0,
+ pdev->name, cqspi);
+@@ -1312,7 +1330,14 @@ static const struct dev_pm_ops cqspi__de
+ #endif
+
+ static const struct of_device_id cqspi_dt_ids[] = {
+- {.compatible = "cdns,qspi-nor",},
++ {
++ .compatible = "cdns,qspi-nor",
++ .data = (void *)0,
++ },
++ {
++ .compatible = "ti,k2g-qspi",
++ .data = (void *)CQSPI_NEEDS_WR_DELAY,
++ },
+ { /* end of table */ }
+ };
+
--- /dev/null
+From foo@baz Mon 11 Nov 2019 10:07:22 AM CET
+From: Mathieu Poirier <mathieu.poirier@linaro.org>
+Date: Thu, 5 Sep 2019 10:17:48 -0600
+Subject: mtd: spi-nor: enable 4B opcodes for mx66l51235l
+To: stable@vger.kernel.org
+Cc: linux-usb@vger.kernel.org, linux-kernel@vger.kernel.org, linux-pm@vger.kernel.org, dri-devel@lists.freedesktop.org, linux-omap@vger.kernel.org, linux-i2c@vger.kernel.org, linux-pci@vger.kernel.org, linux-mtd@lists.infradead.org
+Message-ID: <20190905161759.28036-8-mathieu.poirier@linaro.org>
+
+From: Roman Yeryomin <leroi.lists@gmail.com>
+
+commit d342b6a973af459f6104cad6effc8efc71a0558d upstream
+
+Signed-off-by: Roman Yeryomin <roman@advem.lv>
+Signed-off-by: Cyrille Pitchen <cyrille.pitchen@wedev4u.fr>
+Signed-off-by: Mathieu Poirier <mathieu.poirier@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mtd/spi-nor/spi-nor.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/mtd/spi-nor/spi-nor.c
++++ b/drivers/mtd/spi-nor/spi-nor.c
+@@ -1030,7 +1030,7 @@ static const struct flash_info spi_nor_i
+ { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_4B_OPCODES) },
+ { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
+- { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
++ { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+ { "mx66u51235f", INFO(0xc2253a, 0, 64 * 1024, 1024, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+ { "mx66l1g45g", INFO(0xc2201b, 0, 64 * 1024, 2048, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) },
--- /dev/null
+From foo@baz Mon 11 Nov 2019 10:07:22 AM CET
+From: Mathieu Poirier <mathieu.poirier@linaro.org>
+Date: Thu, 5 Sep 2019 10:17:51 -0600
+Subject: PCI: dra7xx: Add shutdown handler to cleanly turn off clocks
+To: stable@vger.kernel.org
+Cc: linux-usb@vger.kernel.org, linux-kernel@vger.kernel.org, linux-pm@vger.kernel.org, dri-devel@lists.freedesktop.org, linux-omap@vger.kernel.org, linux-i2c@vger.kernel.org, linux-pci@vger.kernel.org, linux-mtd@lists.infradead.org
+Message-ID: <20190905161759.28036-11-mathieu.poirier@linaro.org>
+
+From: Keerthy <j-keerthy@ti.com>
+
+commit 9c049bea083fea21373b8baf51fe49acbe24e105 upstream
+
+Add shutdown handler to cleanly turn off clocks. This will help in cases of
+kexec where in a new kernel can boot abruptly.
+
+Signed-off-by: Keerthy <j-keerthy@ti.com>
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Acked-by: Kishon Vijay Abraham I <kishon@ti.com>
+Signed-off-by: Mathieu Poirier <mathieu.poirier@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/pci/dwc/pci-dra7xx.c | 17 +++++++++++++++++
+ 1 file changed, 17 insertions(+)
+
+--- a/drivers/pci/dwc/pci-dra7xx.c
++++ b/drivers/pci/dwc/pci-dra7xx.c
+@@ -817,6 +817,22 @@ static int dra7xx_pcie_resume_noirq(stru
+ }
+ #endif
+
++void dra7xx_pcie_shutdown(struct platform_device *pdev)
++{
++ struct device *dev = &pdev->dev;
++ struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
++ int ret;
++
++ dra7xx_pcie_stop_link(dra7xx->pci);
++
++ ret = pm_runtime_put_sync(dev);
++ if (ret < 0)
++ dev_dbg(dev, "pm_runtime_put_sync failed\n");
++
++ pm_runtime_disable(dev);
++ dra7xx_pcie_disable_phy(dra7xx);
++}
++
+ static const struct dev_pm_ops dra7xx_pcie_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend, dra7xx_pcie_resume)
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend_noirq,
+@@ -830,5 +846,6 @@ static struct platform_driver dra7xx_pci
+ .suppress_bind_attrs = true,
+ .pm = &dra7xx_pcie_pm_ops,
+ },
++ .shutdown = dra7xx_pcie_shutdown,
+ };
+ builtin_platform_driver_probe(dra7xx_pcie_driver, dra7xx_pcie_probe);
--- /dev/null
+From de53fd7aedb100f03e5d2231cfce0e4993282425 Mon Sep 17 00:00:00 2001
+From: Dave Chiluk <chiluk+linux@indeed.com>
+Date: Tue, 23 Jul 2019 11:44:26 -0500
+Subject: sched/fair: Fix low cpu usage with high throttling by removing expiration of cpu-local slices
+
+From: Dave Chiluk <chiluk+linux@indeed.com>
+
+commit de53fd7aedb100f03e5d2231cfce0e4993282425 upstream.
+
+It has been observed, that highly-threaded, non-cpu-bound applications
+running under cpu.cfs_quota_us constraints can hit a high percentage of
+periods throttled while simultaneously not consuming the allocated
+amount of quota. This use case is typical of user-interactive non-cpu
+bound applications, such as those running in kubernetes or mesos when
+run on multiple cpu cores.
+
+This has been root caused to cpu-local run queue being allocated per cpu
+bandwidth slices, and then not fully using that slice within the period.
+At which point the slice and quota expires. This expiration of unused
+slice results in applications not being able to utilize the quota for
+which they are allocated.
+
+The non-expiration of per-cpu slices was recently fixed by
+'commit 512ac999d275 ("sched/fair: Fix bandwidth timer clock drift
+condition")'. Prior to that it appears that this had been broken since
+at least 'commit 51f2176d74ac ("sched/fair: Fix unlocked reads of some
+cfs_b->quota/period")' which was introduced in v3.16-rc1 in 2014. That
+added the following conditional which resulted in slices never being
+expired.
+
+if (cfs_rq->runtime_expires != cfs_b->runtime_expires) {
+ /* extend local deadline, drift is bounded above by 2 ticks */
+ cfs_rq->runtime_expires += TICK_NSEC;
+
+Because this was broken for nearly 5 years, and has recently been fixed
+and is now being noticed by many users running kubernetes
+(https://github.com/kubernetes/kubernetes/issues/67577) it is my opinion
+that the mechanisms around expiring runtime should be removed
+altogether.
+
+This allows quota already allocated to per-cpu run-queues to live longer
+than the period boundary. This allows threads on runqueues that do not
+use much CPU to continue to use their remaining slice over a longer
+period of time than cpu.cfs_period_us. However, this helps prevent the
+above condition of hitting throttling while also not fully utilizing
+your cpu quota.
+
+This theoretically allows a machine to use slightly more than its
+allotted quota in some periods. This overflow would be bounded by the
+remaining quota left on each per-cpu runqueueu. This is typically no
+more than min_cfs_rq_runtime=1ms per cpu. For CPU bound tasks this will
+change nothing, as they should theoretically fully utilize all of their
+quota in each period. For user-interactive tasks as described above this
+provides a much better user/application experience as their cpu
+utilization will more closely match the amount they requested when they
+hit throttling. This means that cpu limits no longer strictly apply per
+period for non-cpu bound applications, but that they are still accurate
+over longer timeframes.
+
+This greatly improves performance of high-thread-count, non-cpu bound
+applications with low cfs_quota_us allocation on high-core-count
+machines. In the case of an artificial testcase (10ms/100ms of quota on
+80 CPU machine), this commit resulted in almost 30x performance
+improvement, while still maintaining correct cpu quota restrictions.
+That testcase is available at https://github.com/indeedeng/fibtest.
+
+Fixes: 512ac999d275 ("sched/fair: Fix bandwidth timer clock drift condition")
+Signed-off-by: Dave Chiluk <chiluk+linux@indeed.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Phil Auld <pauld@redhat.com>
+Reviewed-by: Ben Segall <bsegall@google.com>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: John Hammond <jhammond@indeed.com>
+Cc: Jonathan Corbet <corbet@lwn.net>
+Cc: Kyle Anderson <kwa@yelp.com>
+Cc: Gabriel Munos <gmunoz@netflix.com>
+Cc: Peter Oskolkov <posk@posk.io>
+Cc: Cong Wang <xiyou.wangcong@gmail.com>
+Cc: Brendan Gregg <bgregg@netflix.com>
+Link: https://lkml.kernel.org/r/1563900266-19734-2-git-send-email-chiluk+linux@indeed.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ Documentation/scheduler/sched-bwc.txt | 45 +++++++++++++++++++++
+ kernel/sched/fair.c | 70 +++-------------------------------
+ kernel/sched/sched.h | 4 -
+ 3 files changed, 52 insertions(+), 67 deletions(-)
+
+--- a/Documentation/scheduler/sched-bwc.txt
++++ b/Documentation/scheduler/sched-bwc.txt
+@@ -90,6 +90,51 @@ There are two ways in which a group may
+ In case b) above, even though the child may have runtime remaining it will not
+ be allowed to until the parent's runtime is refreshed.
+
++CFS Bandwidth Quota Caveats
++---------------------------
++Once a slice is assigned to a cpu it does not expire. However all but 1ms of
++the slice may be returned to the global pool if all threads on that cpu become
++unrunnable. This is configured at compile time by the min_cfs_rq_runtime
++variable. This is a performance tweak that helps prevent added contention on
++the global lock.
++
++The fact that cpu-local slices do not expire results in some interesting corner
++cases that should be understood.
++
++For cgroup cpu constrained applications that are cpu limited this is a
++relatively moot point because they will naturally consume the entirety of their
++quota as well as the entirety of each cpu-local slice in each period. As a
++result it is expected that nr_periods roughly equal nr_throttled, and that
++cpuacct.usage will increase roughly equal to cfs_quota_us in each period.
++
++For highly-threaded, non-cpu bound applications this non-expiration nuance
++allows applications to briefly burst past their quota limits by the amount of
++unused slice on each cpu that the task group is running on (typically at most
++1ms per cpu or as defined by min_cfs_rq_runtime). This slight burst only
++applies if quota had been assigned to a cpu and then not fully used or returned
++in previous periods. This burst amount will not be transferred between cores.
++As a result, this mechanism still strictly limits the task group to quota
++average usage, albeit over a longer time window than a single period. This
++also limits the burst ability to no more than 1ms per cpu. This provides
++better more predictable user experience for highly threaded applications with
++small quota limits on high core count machines. It also eliminates the
++propensity to throttle these applications while simultanously using less than
++quota amounts of cpu. Another way to say this, is that by allowing the unused
++portion of a slice to remain valid across periods we have decreased the
++possibility of wastefully expiring quota on cpu-local silos that don't need a
++full slice's amount of cpu time.
++
++The interaction between cpu-bound and non-cpu-bound-interactive applications
++should also be considered, especially when single core usage hits 100%. If you
++gave each of these applications half of a cpu-core and they both got scheduled
++on the same CPU it is theoretically possible that the non-cpu bound application
++will use up to 1ms additional quota in some periods, thereby preventing the
++cpu-bound application from fully using its quota by that same amount. In these
++instances it will be up to the CFS algorithm (see sched-design-CFS.rst) to
++decide which application is chosen to run, as they will both be runnable and
++have remaining quota. This runtime discrepancy will be made up in the following
++periods when the interactive application idles.
++
+ Examples
+ --------
+ 1. Limit a group to 1 CPU worth of runtime.
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -4106,8 +4106,6 @@ void __refill_cfs_bandwidth_runtime(stru
+
+ now = sched_clock_cpu(smp_processor_id());
+ cfs_b->runtime = cfs_b->quota;
+- cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period);
+- cfs_b->expires_seq++;
+ }
+
+ static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
+@@ -4129,8 +4127,7 @@ static int assign_cfs_rq_runtime(struct
+ {
+ struct task_group *tg = cfs_rq->tg;
+ struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
+- u64 amount = 0, min_amount, expires;
+- int expires_seq;
++ u64 amount = 0, min_amount;
+
+ /* note: this is a positive sum as runtime_remaining <= 0 */
+ min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining;
+@@ -4147,61 +4144,17 @@ static int assign_cfs_rq_runtime(struct
+ cfs_b->idle = 0;
+ }
+ }
+- expires_seq = cfs_b->expires_seq;
+- expires = cfs_b->runtime_expires;
+ raw_spin_unlock(&cfs_b->lock);
+
+ cfs_rq->runtime_remaining += amount;
+- /*
+- * we may have advanced our local expiration to account for allowed
+- * spread between our sched_clock and the one on which runtime was
+- * issued.
+- */
+- if (cfs_rq->expires_seq != expires_seq) {
+- cfs_rq->expires_seq = expires_seq;
+- cfs_rq->runtime_expires = expires;
+- }
+
+ return cfs_rq->runtime_remaining > 0;
+ }
+
+-/*
+- * Note: This depends on the synchronization provided by sched_clock and the
+- * fact that rq->clock snapshots this value.
+- */
+-static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq)
+-{
+- struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
+-
+- /* if the deadline is ahead of our clock, nothing to do */
+- if (likely((s64)(rq_clock(rq_of(cfs_rq)) - cfs_rq->runtime_expires) < 0))
+- return;
+-
+- if (cfs_rq->runtime_remaining < 0)
+- return;
+-
+- /*
+- * If the local deadline has passed we have to consider the
+- * possibility that our sched_clock is 'fast' and the global deadline
+- * has not truly expired.
+- *
+- * Fortunately we can check determine whether this the case by checking
+- * whether the global deadline(cfs_b->expires_seq) has advanced.
+- */
+- if (cfs_rq->expires_seq == cfs_b->expires_seq) {
+- /* extend local deadline, drift is bounded above by 2 ticks */
+- cfs_rq->runtime_expires += TICK_NSEC;
+- } else {
+- /* global deadline is ahead, expiration has passed */
+- cfs_rq->runtime_remaining = 0;
+- }
+-}
+-
+ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
+ {
+ /* dock delta_exec before expiring quota (as it could span periods) */
+ cfs_rq->runtime_remaining -= delta_exec;
+- expire_cfs_rq_runtime(cfs_rq);
+
+ if (likely(cfs_rq->runtime_remaining > 0))
+ return;
+@@ -4387,8 +4340,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cf
+ resched_curr(rq);
+ }
+
+-static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
+- u64 remaining, u64 expires)
++static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, u64 remaining)
+ {
+ struct cfs_rq *cfs_rq;
+ u64 runtime;
+@@ -4413,7 +4365,6 @@ static u64 distribute_cfs_runtime(struct
+ remaining -= runtime;
+
+ cfs_rq->runtime_remaining += runtime;
+- cfs_rq->runtime_expires = expires;
+
+ /* we check whether we're throttled above */
+ if (cfs_rq->runtime_remaining > 0)
+@@ -4438,7 +4389,7 @@ next:
+ */
+ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
+ {
+- u64 runtime, runtime_expires;
++ u64 runtime;
+ int throttled;
+
+ /* no need to continue the timer with no bandwidth constraint */
+@@ -4466,8 +4417,6 @@ static int do_sched_cfs_period_timer(str
+ /* account preceding periods in which throttling occurred */
+ cfs_b->nr_throttled += overrun;
+
+- runtime_expires = cfs_b->runtime_expires;
+-
+ /*
+ * This check is repeated as we are holding onto the new bandwidth while
+ * we unthrottle. This can potentially race with an unthrottled group
+@@ -4480,8 +4429,7 @@ static int do_sched_cfs_period_timer(str
+ cfs_b->distribute_running = 1;
+ raw_spin_unlock(&cfs_b->lock);
+ /* we can't nest cfs_b->lock while distributing bandwidth */
+- runtime = distribute_cfs_runtime(cfs_b, runtime,
+- runtime_expires);
++ runtime = distribute_cfs_runtime(cfs_b, runtime);
+ raw_spin_lock(&cfs_b->lock);
+
+ cfs_b->distribute_running = 0;
+@@ -4558,8 +4506,7 @@ static void __return_cfs_rq_runtime(stru
+ return;
+
+ raw_spin_lock(&cfs_b->lock);
+- if (cfs_b->quota != RUNTIME_INF &&
+- cfs_rq->runtime_expires == cfs_b->runtime_expires) {
++ if (cfs_b->quota != RUNTIME_INF) {
+ cfs_b->runtime += slack_runtime;
+
+ /* we are under rq->lock, defer unthrottling using a timer */
+@@ -4591,7 +4538,6 @@ static __always_inline void return_cfs_r
+ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
+ {
+ u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
+- u64 expires;
+
+ /* confirm we're still not at a refresh boundary */
+ raw_spin_lock(&cfs_b->lock);
+@@ -4608,7 +4554,6 @@ static void do_sched_cfs_slack_timer(str
+ if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice)
+ runtime = cfs_b->runtime;
+
+- expires = cfs_b->runtime_expires;
+ if (runtime)
+ cfs_b->distribute_running = 1;
+
+@@ -4617,11 +4562,10 @@ static void do_sched_cfs_slack_timer(str
+ if (!runtime)
+ return;
+
+- runtime = distribute_cfs_runtime(cfs_b, runtime, expires);
++ runtime = distribute_cfs_runtime(cfs_b, runtime);
+
+ raw_spin_lock(&cfs_b->lock);
+- if (expires == cfs_b->runtime_expires)
+- cfs_b->runtime -= min(runtime, cfs_b->runtime);
++ cfs_b->runtime -= min(runtime, cfs_b->runtime);
+ cfs_b->distribute_running = 0;
+ raw_spin_unlock(&cfs_b->lock);
+ }
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -280,8 +280,6 @@ struct cfs_bandwidth {
+ ktime_t period;
+ u64 quota, runtime;
+ s64 hierarchical_quota;
+- u64 runtime_expires;
+- int expires_seq;
+
+ short idle, period_active;
+ struct hrtimer period_timer, slack_timer;
+@@ -489,8 +487,6 @@ struct cfs_rq {
+
+ #ifdef CONFIG_CFS_BANDWIDTH
+ int runtime_enabled;
+- int expires_seq;
+- u64 runtime_expires;
+ s64 runtime_remaining;
+
+ u64 throttled_clock, throttled_clock_task;
--- /dev/null
+From 763a9ec06c409dcde2a761aac4bb83ff3938e0b3 Mon Sep 17 00:00:00 2001
+From: Qian Cai <cai@lca.pw>
+Date: Tue, 20 Aug 2019 14:40:55 -0400
+Subject: sched/fair: Fix -Wunused-but-set-variable warnings
+
+From: Qian Cai <cai@lca.pw>
+
+commit 763a9ec06c409dcde2a761aac4bb83ff3938e0b3 upstream.
+
+Commit:
+
+ de53fd7aedb1 ("sched/fair: Fix low cpu usage with high throttling by removing expiration of cpu-local slices")
+
+introduced a few compilation warnings:
+
+ kernel/sched/fair.c: In function '__refill_cfs_bandwidth_runtime':
+ kernel/sched/fair.c:4365:6: warning: variable 'now' set but not used [-Wunused-but-set-variable]
+ kernel/sched/fair.c: In function 'start_cfs_bandwidth':
+ kernel/sched/fair.c:4992:6: warning: variable 'overrun' set but not used [-Wunused-but-set-variable]
+
+Also, __refill_cfs_bandwidth_runtime() does no longer update the
+expiration time, so fix the comments accordingly.
+
+Signed-off-by: Qian Cai <cai@lca.pw>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Ben Segall <bsegall@google.com>
+Reviewed-by: Dave Chiluk <chiluk+linux@indeed.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: pauld@redhat.com
+Fixes: de53fd7aedb1 ("sched/fair: Fix low cpu usage with high throttling by removing expiration of cpu-local slices")
+Link: https://lkml.kernel.org/r/1566326455-8038-1-git-send-email-cai@lca.pw
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/sched/fair.c | 15 +++++----------
+ 1 file changed, 5 insertions(+), 10 deletions(-)
+
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -4091,21 +4091,16 @@ static inline u64 sched_cfs_bandwidth_sl
+ }
+
+ /*
+- * Replenish runtime according to assigned quota and update expiration time.
+- * We use sched_clock_cpu directly instead of rq->clock to avoid adding
+- * additional synchronization around rq->lock.
++ * Replenish runtime according to assigned quota. We use sched_clock_cpu
++ * directly instead of rq->clock to avoid adding additional synchronization
++ * around rq->lock.
+ *
+ * requires cfs_b->lock
+ */
+ void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
+ {
+- u64 now;
+-
+- if (cfs_b->quota == RUNTIME_INF)
+- return;
+-
+- now = sched_clock_cpu(smp_processor_id());
+- cfs_b->runtime = cfs_b->quota;
++ if (cfs_b->quota != RUNTIME_INF)
++ cfs_b->runtime = cfs_b->quota;
+ }
+
+ static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
configfs-new-object-reprsenting-tree-fragments.patch
configfs-provide-exclusion-between-io-and-removals.patch
configfs-fix-a-deadlock-in-configfs_symlink.patch
+usb-dwc3-allow-disabling-of-metastability-workaround.patch
+mfd-palmas-assign-the-right-powerhold-mask-for-tps65917.patch
+asoc-tlv320aic31xx-handle-inverted-bclk-in-non-dsp-modes.patch
+mtd-spi-nor-enable-4b-opcodes-for-mx66l51235l.patch
+mtd-spi-nor-cadence-quadspi-add-a-delay-in-write-sequence.patch
+misc-pci_endpoint_test-prevent-some-integer-overflows.patch
+pci-dra7xx-add-shutdown-handler-to-cleanly-turn-off-clocks.patch
+misc-pci_endpoint_test-fix-bug_on-error-during-pci_disable_msi.patch
+mailbox-reset-txdone_method-txdone_by_poll-if-client-knows_txdone.patch
+asoc-tlv320dac31xx-mark-expected-switch-fall-through.patch
+asoc-davinci-mcasp-handle-return-value-of-devm_kasprintf.patch
+asoc-davinci-kill-bug_on-usage.patch
+asoc-davinci-mcasp-fix-an-error-handling-path-in-davinci_mcasp_probe.patch
+i2c-omap-trigger-bus-recovery-in-lockup-case.patch
+cpufreq-ti-cpufreq-add-missing-of_node_put.patch
+arm-dts-dra7-disable-usb-metastability-workaround-for-usb2.patch
+sched-fair-fix-low-cpu-usage-with-high-throttling-by-removing-expiration-of-cpu-local-slices.patch
+sched-fair-fix-wunused-but-set-variable-warnings.patch
+usbip-fix-vhci_urb_enqueue-urb-null-transfer-buffer-error-path.patch
+usbip-stub_rx-fix-static-checker-warning-on-unnecessary-checks.patch
+usbip-implement-sg-support-to-vhci-hcd-and-stub-driver.patch
--- /dev/null
+From foo@baz Mon 11 Nov 2019 10:07:22 AM CET
+From: Mathieu Poirier <mathieu.poirier@linaro.org>
+Date: Thu, 5 Sep 2019 10:17:45 -0600
+Subject: usb: dwc3: Allow disabling of metastability workaround
+To: stable@vger.kernel.org
+Cc: linux-usb@vger.kernel.org, linux-kernel@vger.kernel.org, linux-pm@vger.kernel.org, dri-devel@lists.freedesktop.org, linux-omap@vger.kernel.org, linux-i2c@vger.kernel.org, linux-pci@vger.kernel.org, linux-mtd@lists.infradead.org
+Message-ID: <20190905161759.28036-5-mathieu.poirier@linaro.org>
+
+From: Roger Quadros <rogerq@ti.com>
+
+commit 42bf02ec6e420e541af9a47437d0bdf961ca2972 upstream
+
+Some platforms (e.g. TI's DRA7 USB2 instance) have more trouble
+with the metastability workaround as it supports only
+a High-Speed PHY and the PHY can enter into an Erratic state [1]
+when the controller is set in SuperSpeed mode as part of
+the metastability workaround.
+
+This causes upto 2 seconds delay in enumeration on DRA7's USB2
+instance in gadget mode.
+
+If these platforms can be better off without the workaround,
+provide a device tree property to suggest that so the workaround
+is avoided.
+
+[1] Device mode enumeration trace showing PHY Erratic Error.
+ irq/90-dwc3-969 [000] d... 52.323145: dwc3_event: event (00000901): Erratic Error [U0]
+ irq/90-dwc3-969 [000] d... 52.560646: dwc3_event: event (00000901): Erratic Error [U0]
+ irq/90-dwc3-969 [000] d... 52.798144: dwc3_event: event (00000901): Erratic Error [U0]
+
+Signed-off-by: Roger Quadros <rogerq@ti.com>
+Signed-off-by: Felipe Balbi <felipe.balbi@linux.intel.com>
+Signed-off-by: Mathieu Poirier <mathieu.poirier@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/devicetree/bindings/usb/dwc3.txt | 2 ++
+ drivers/usb/dwc3/core.c | 3 +++
+ drivers/usb/dwc3/core.h | 3 +++
+ drivers/usb/dwc3/gadget.c | 6 ++++--
+ 4 files changed, 12 insertions(+), 2 deletions(-)
+
+--- a/Documentation/devicetree/bindings/usb/dwc3.txt
++++ b/Documentation/devicetree/bindings/usb/dwc3.txt
+@@ -47,6 +47,8 @@ Optional properties:
+ from P0 to P1/P2/P3 without delay.
+ - snps,dis-tx-ipgap-linecheck-quirk: when set, disable u2mac linestate check
+ during HS transmit.
++ - snps,dis_metastability_quirk: when set, disable metastability workaround.
++ CAUTION: use only if you are absolutely sure of it.
+ - snps,is-utmi-l1-suspend: true when DWC3 asserts output signal
+ utmi_l1_suspend_n, false when asserts utmi_sleep_n
+ - snps,hird-threshold: HIRD threshold
+--- a/drivers/usb/dwc3/core.c
++++ b/drivers/usb/dwc3/core.c
+@@ -1115,6 +1115,9 @@ static void dwc3_get_properties(struct d
+ device_property_read_u32(dev, "snps,quirk-frame-length-adjustment",
+ &dwc->fladj);
+
++ dwc->dis_metastability_quirk = device_property_read_bool(dev,
++ "snps,dis_metastability_quirk");
++
+ dwc->lpm_nyet_threshold = lpm_nyet_threshold;
+ dwc->tx_de_emphasis = tx_de_emphasis;
+
+--- a/drivers/usb/dwc3/core.h
++++ b/drivers/usb/dwc3/core.h
+@@ -869,6 +869,7 @@ struct dwc3_scratchpad_array {
+ * 1 - -3.5dB de-emphasis
+ * 2 - No de-emphasis
+ * 3 - Reserved
++ * @dis_metastability_quirk: set to disable metastability quirk.
+ * @imod_interval: set the interrupt moderation interval in 250ns
+ * increments or 0 to disable.
+ */
+@@ -1025,6 +1026,8 @@ struct dwc3 {
+ unsigned tx_de_emphasis_quirk:1;
+ unsigned tx_de_emphasis:2;
+
++ unsigned dis_metastability_quirk:1;
++
+ u16 imod_interval;
+ };
+
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -2034,7 +2034,8 @@ static void dwc3_gadget_set_speed(struct
+ * STAR#9000525659: Clock Domain Crossing on DCTL in
+ * USB 2.0 Mode
+ */
+- if (dwc->revision < DWC3_REVISION_220A) {
++ if (dwc->revision < DWC3_REVISION_220A &&
++ !dwc->dis_metastability_quirk) {
+ reg |= DWC3_DCFG_SUPERSPEED;
+ } else {
+ switch (speed) {
+@@ -3265,7 +3266,8 @@ int dwc3_gadget_init(struct dwc3 *dwc)
+ * is less than super speed because we don't have means, yet, to tell
+ * composite.c that we are USB 2.0 + LPM ECN.
+ */
+- if (dwc->revision < DWC3_REVISION_220A)
++ if (dwc->revision < DWC3_REVISION_220A &&
++ !dwc->dis_metastability_quirk)
+ dev_info(dwc->dev, "changing max_speed on rev %08x\n",
+ dwc->revision);
+
--- /dev/null
+From 2c904963b1dd2acd4bc785b6c72e10a6283c2081 Mon Sep 17 00:00:00 2001
+From: Shuah Khan <shuah@kernel.org>
+Date: Thu, 24 Jan 2019 14:46:42 -0700
+Subject: usbip: Fix vhci_urb_enqueue() URB null transfer buffer error path
+
+From: Shuah Khan <shuah@kernel.org>
+
+commit 2c904963b1dd2acd4bc785b6c72e10a6283c2081 upstream.
+
+Fix vhci_urb_enqueue() to print debug msg and return error instead of
+failing with BUG_ON.
+
+Signed-off-by: Shuah Khan <shuah@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
+index 1e592ec94ba4..f46ee1fefe02 100644
+--- a/drivers/usb/usbip/vhci_hcd.c
++++ b/drivers/usb/usbip/vhci_hcd.c
+@@ -702,8 +702,10 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
+ }
+ vdev = &vhci_hcd->vdev[portnum-1];
+
+- /* patch to usb_sg_init() is in 2.5.60 */
+- BUG_ON(!urb->transfer_buffer && urb->transfer_buffer_length);
++ if (!urb->transfer_buffer && urb->transfer_buffer_length) {
++ dev_dbg(dev, "Null URB transfer buffer\n");
++ return -EINVAL;
++ }
+
+ spin_lock_irqsave(&vhci->lock, flags);
+
--- /dev/null
+From ea44d190764b4422af4d1c29eaeb9e69e353b406 Mon Sep 17 00:00:00 2001
+From: Suwan Kim <suwan.kim027@gmail.com>
+Date: Wed, 28 Aug 2019 12:27:41 +0900
+Subject: usbip: Implement SG support to vhci-hcd and stub driver
+
+From: Suwan Kim <suwan.kim027@gmail.com>
+
+commit ea44d190764b4422af4d1c29eaeb9e69e353b406 upstream.
+
+There are bugs on vhci with usb 3.0 storage device. In USB, each SG
+list entry buffer should be divisible by the bulk max packet size.
+But with native SG support, this problem doesn't matter because the
+SG buffer is treated as contiguous buffer. But without native SG
+support, USB storage driver breaks SG list into several URBs and the
+error occurs because of a buffer size of URB that cannot be divided
+by the bulk max packet size. The error situation is as follows.
+
+When USB Storage driver requests 31.5 KB data and has SG list which
+has 3584 bytes buffer followed by 7 4096 bytes buffer for some
+reason. USB Storage driver splits this SG list into several URBs
+because VHCI doesn't support SG and sends them separately. So the
+first URB buffer size is 3584 bytes. When receiving data from device,
+USB 3.0 device sends data packet of 1024 bytes size because the max
+packet size of BULK pipe is 1024 bytes. So device sends 4096 bytes.
+But the first URB buffer has only 3584 bytes buffer size. So host
+controller terminates the transfer even though there is more data to
+receive. So, vhci needs to support SG transfer to prevent this error.
+
+In this patch, vhci supports SG regardless of whether the server's
+host controller supports SG or not, because stub driver splits SG
+list into several URBs if the server's host controller doesn't
+support SG.
+
+To support SG, vhci sets URB_DMA_MAP_SG flag in urb->transfer_flags
+if URB has SG list and this flag will tell stub driver to use SG
+list. After receiving urb from stub driver, vhci clear URB_DMA_MAP_SG
+flag to avoid unnecessary DMA unmapping in HCD.
+
+vhci sends each SG list entry to stub driver. Then, stub driver sees
+the total length of the buffer and allocates SG table and pages
+according to the total buffer length calling sgl_alloc(). After stub
+driver receives completed URB, it again sends each SG list entry to
+vhci.
+
+If the server's host controller doesn't support SG, stub driver
+breaks a single SG request into several URBs and submits them to
+the server's host controller. When all the split URBs are completed,
+stub driver reassembles the URBs into a single return command and
+sends it to vhci.
+
+Moreover, in the situation where vhci supports SG, but stub driver
+does not, or vice versa, usbip works normally. Because there is no
+protocol modification, there is no problem in communication between
+server and client even if the one has a kernel without SG support.
+
+In the case of vhci supports SG and stub driver doesn't, because
+vhci sends only the total length of the buffer to stub driver as
+it did before the patch applied, stub driver only needs to allocate
+the required length of buffers using only kmalloc() regardless of
+whether vhci supports SG or not. But stub driver has to allocate
+buffer with kmalloc() as much as the total length of SG buffer which
+is quite huge when vhci sends SG request, so it has overhead in
+buffer allocation in this situation.
+
+If stub driver needs to send data buffer to vhci because of IN pipe,
+stub driver also sends only total length of buffer as metadata and
+then sends real data as vhci does. Then vhci receive data from stub
+driver and store it to the corresponding buffer of SG list entry.
+
+And for the case of stub driver supports SG and vhci doesn't, since
+the USB storage driver checks that vhci doesn't support SG and sends
+the request to stub driver by splitting the SG list into multiple
+URBs, stub driver allocates a buffer for each URB with kmalloc() as
+it did before this patch.
+
+* Test environment
+
+Test uses two difference machines and two different kernel version
+to make mismatch situation between the client and the server where
+vhci supports SG, but stub driver does not, or vice versa. All tests
+are conducted in both full SG support that both vhci and stub support
+SG and half SG support that is the mismatch situation. Test kernel
+version is 5.3-rc6 with commit "usb: add a HCD_DMA flag instead of
+guestimating DMA capabilities" to avoid unnecessary DMA mapping and
+unmapping.
+
+ - Test kernel version
+ - 5.3-rc6 with SG support
+ - 5.1.20-200.fc29.x86_64 without SG support
+
+* SG support test
+
+ - Test devices
+ - Super-speed storage device - SanDisk Ultra USB 3.0
+ - High-speed storage device - SMI corporation USB 2.0 flash drive
+
+ - Test description
+
+Test read and write operation of mass storage device that uses the
+BULK transfer. In test, the client reads and writes files whose size
+is over 1G and it works normally.
+
+* Regression test
+
+ - Test devices
+ - Super-speed device - Logitech Brio webcam
+ - High-speed device - Logitech C920 HD Pro webcam
+ - Full-speed device - Logitech bluetooth mouse
+ - Britz BR-Orion speaker
+ - Low-speed device - Logitech wired mouse
+
+ - Test description
+
+Moving and click test for mouse. To test the webcam, use gnome-cheese.
+To test the speaker, play music and video on the client. All works
+normally.
+
+* VUDC compatibility test
+
+VUDC also works well with this patch. Tests are done with two USB
+gadget created by CONFIGFS USB gadget. Both use the BULK pipe.
+
+ 1. Serial gadget
+ 2. Mass storage gadget
+
+ - Serial gadget test
+
+Serial gadget on the host sends and receives data using cat command
+on the /dev/ttyGS<N>. The client uses minicom to communicate with
+the serial gadget.
+
+ - Mass storage gadget test
+
+After connecting the gadget with vhci, use "dd" to test read and
+write operation on the client side.
+
+Read - dd if=/dev/sd<N> iflag=direct of=/dev/null bs=1G count=1
+Write - dd if=<my file path> iflag=direct of=/dev/sd<N> bs=1G count=1
+
+Signed-off-by: Suwan Kim <suwan.kim027@gmail.com>
+Acked-by: Shuah khan <skhan@linuxfoundation.org>
+Link: https://lore.kernel.org/r/20190828032741.12234-1-suwan.kim027@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/usb/usbip/stub.h | 7 +
+ drivers/usb/usbip/stub_main.c | 57 ++++++++---
+ drivers/usb/usbip/stub_rx.c | 202 +++++++++++++++++++++++++++------------
+ drivers/usb/usbip/stub_tx.c | 99 ++++++++++++++-----
+ drivers/usb/usbip/usbip_common.c | 59 +++++++----
+ drivers/usb/usbip/vhci_hcd.c | 12 ++
+ drivers/usb/usbip/vhci_rx.c | 3
+ drivers/usb/usbip/vhci_tx.c | 66 ++++++++++--
+ 8 files changed, 379 insertions(+), 126 deletions(-)
+
+--- a/drivers/usb/usbip/stub.h
++++ b/drivers/usb/usbip/stub.h
+@@ -66,7 +66,11 @@ struct stub_priv {
+ unsigned long seqnum;
+ struct list_head list;
+ struct stub_device *sdev;
+- struct urb *urb;
++ struct urb **urbs;
++ struct scatterlist *sgl;
++ int num_urbs;
++ int completed_urbs;
++ int urb_status;
+
+ int unlinking;
+ };
+@@ -100,6 +104,7 @@ extern struct usb_device_driver stub_dri
+ struct bus_id_priv *get_busid_priv(const char *busid);
+ void put_busid_priv(struct bus_id_priv *bid);
+ int del_match_busid(char *busid);
++void stub_free_priv_and_urb(struct stub_priv *priv);
+ void stub_device_cleanup_urbs(struct stub_device *sdev);
+
+ /* stub_rx.c */
+--- a/drivers/usb/usbip/stub_main.c
++++ b/drivers/usb/usbip/stub_main.c
+@@ -20,6 +20,7 @@
+ #include <linux/string.h>
+ #include <linux/module.h>
+ #include <linux/device.h>
++#include <linux/scatterlist.h>
+
+ #include "usbip_common.h"
+ #include "stub.h"
+@@ -297,13 +298,49 @@ static struct stub_priv *stub_priv_pop_f
+ struct stub_priv *priv, *tmp;
+
+ list_for_each_entry_safe(priv, tmp, listhead, list) {
+- list_del(&priv->list);
++ list_del_init(&priv->list);
+ return priv;
+ }
+
+ return NULL;
+ }
+
++void stub_free_priv_and_urb(struct stub_priv *priv)
++{
++ struct urb *urb;
++ int i;
++
++ for (i = 0; i < priv->num_urbs; i++) {
++ urb = priv->urbs[i];
++
++ if (!urb)
++ return;
++
++ kfree(urb->setup_packet);
++ urb->setup_packet = NULL;
++
++
++ if (urb->transfer_buffer && !priv->sgl) {
++ kfree(urb->transfer_buffer);
++ urb->transfer_buffer = NULL;
++ }
++
++ if (urb->num_sgs) {
++ sgl_free(urb->sg);
++ urb->sg = NULL;
++ urb->num_sgs = 0;
++ }
++
++ usb_free_urb(urb);
++ }
++ if (!list_empty(&priv->list))
++ list_del(&priv->list);
++ if (priv->sgl)
++ sgl_free(priv->sgl);
++ kfree(priv->urbs);
++ kmem_cache_free(stub_priv_cache, priv);
++}
++
+ static struct stub_priv *stub_priv_pop(struct stub_device *sdev)
+ {
+ unsigned long flags;
+@@ -330,25 +367,15 @@ done:
+ void stub_device_cleanup_urbs(struct stub_device *sdev)
+ {
+ struct stub_priv *priv;
+- struct urb *urb;
++ int i;
+
+ dev_dbg(&sdev->udev->dev, "Stub device cleaning up urbs\n");
+
+ while ((priv = stub_priv_pop(sdev))) {
+- urb = priv->urb;
+- dev_dbg(&sdev->udev->dev, "free urb seqnum %lu\n",
+- priv->seqnum);
+- usb_kill_urb(urb);
+-
+- kmem_cache_free(stub_priv_cache, priv);
++ for (i = 0; i < priv->num_urbs; i++)
++ usb_kill_urb(priv->urbs[i]);
+
+- kfree(urb->transfer_buffer);
+- urb->transfer_buffer = NULL;
+-
+- kfree(urb->setup_packet);
+- urb->setup_packet = NULL;
+-
+- usb_free_urb(urb);
++ stub_free_priv_and_urb(priv);
+ }
+ }
+
+--- a/drivers/usb/usbip/stub_rx.c
++++ b/drivers/usb/usbip/stub_rx.c
+@@ -21,6 +21,7 @@
+ #include <linux/kthread.h>
+ #include <linux/usb.h>
+ #include <linux/usb/hcd.h>
++#include <linux/scatterlist.h>
+
+ #include "usbip_common.h"
+ #include "stub.h"
+@@ -215,7 +216,7 @@ static void tweak_special_requests(struc
+ static int stub_recv_cmd_unlink(struct stub_device *sdev,
+ struct usbip_header *pdu)
+ {
+- int ret;
++ int ret, i;
+ unsigned long flags;
+ struct stub_priv *priv;
+
+@@ -260,12 +261,14 @@ static int stub_recv_cmd_unlink(struct s
+ * so a driver in a client host will know the failure
+ * of the unlink request ?
+ */
+- ret = usb_unlink_urb(priv->urb);
+- if (ret != -EINPROGRESS)
+- dev_err(&priv->urb->dev->dev,
+- "failed to unlink a urb # %lu, ret %d\n",
+- priv->seqnum, ret);
+-
++ for (i = priv->completed_urbs; i < priv->num_urbs; i++) {
++ ret = usb_unlink_urb(priv->urbs[i]);
++ if (ret != -EINPROGRESS)
++ dev_err(&priv->urbs[i]->dev->dev,
++ "failed to unlink %d/%d urb of seqnum %lu, ret %d\n",
++ i + 1, priv->num_urbs,
++ priv->seqnum, ret);
++ }
+ return 0;
+ }
+
+@@ -450,14 +453,36 @@ static void masking_bogus_flags(struct u
+ urb->transfer_flags &= allowed;
+ }
+
++static int stub_recv_xbuff(struct usbip_device *ud, struct stub_priv *priv)
++{
++ int ret;
++ int i;
++
++ for (i = 0; i < priv->num_urbs; i++) {
++ ret = usbip_recv_xbuff(ud, priv->urbs[i]);
++ if (ret < 0)
++ break;
++ }
++
++ return ret;
++}
++
+ static void stub_recv_cmd_submit(struct stub_device *sdev,
+ struct usbip_header *pdu)
+ {
+- int ret;
+ struct stub_priv *priv;
+ struct usbip_device *ud = &sdev->ud;
+ struct usb_device *udev = sdev->udev;
++ struct scatterlist *sgl = NULL, *sg;
++ void *buffer = NULL;
++ unsigned long long buf_len;
++ int nents;
++ int num_urbs = 1;
+ int pipe = get_pipe(sdev, pdu);
++ int use_sg = pdu->u.cmd_submit.transfer_flags & URB_DMA_MAP_SG;
++ int support_sg = 1;
++ int np = 0;
++ int ret, i;
+
+ if (pipe == -1)
+ return;
+@@ -466,76 +491,139 @@ static void stub_recv_cmd_submit(struct
+ if (!priv)
+ return;
+
+- /* setup a urb */
+- if (usb_pipeisoc(pipe))
+- priv->urb = usb_alloc_urb(pdu->u.cmd_submit.number_of_packets,
+- GFP_KERNEL);
+- else
+- priv->urb = usb_alloc_urb(0, GFP_KERNEL);
++ buf_len = (unsigned long long)pdu->u.cmd_submit.transfer_buffer_length;
+
+- if (!priv->urb) {
+- usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
+- return;
++ /* allocate urb transfer buffer, if needed */
++ if (buf_len) {
++ if (use_sg) {
++ sgl = sgl_alloc(buf_len, GFP_KERNEL, &nents);
++ if (!sgl)
++ goto err_malloc;
++ } else {
++ buffer = kzalloc(buf_len, GFP_KERNEL);
++ if (!buffer)
++ goto err_malloc;
++ }
+ }
+
+- /* allocate urb transfer buffer, if needed */
+- if (pdu->u.cmd_submit.transfer_buffer_length > 0) {
+- priv->urb->transfer_buffer =
+- kzalloc(pdu->u.cmd_submit.transfer_buffer_length,
+- GFP_KERNEL);
+- if (!priv->urb->transfer_buffer) {
++ /* Check if the server's HCD supports SG */
++ if (use_sg && !udev->bus->sg_tablesize) {
++ /*
++ * If the server's HCD doesn't support SG, break a single SG
++ * request into several URBs and map each SG list entry to
++ * corresponding URB buffer. The previously allocated SG
++ * list is stored in priv->sgl (If the server's HCD support SG,
++ * SG list is stored only in urb->sg) and it is used as an
++ * indicator that the server split single SG request into
++ * several URBs. Later, priv->sgl is used by stub_complete() and
++ * stub_send_ret_submit() to reassemble the divied URBs.
++ */
++ support_sg = 0;
++ num_urbs = nents;
++ priv->completed_urbs = 0;
++ pdu->u.cmd_submit.transfer_flags &= ~URB_DMA_MAP_SG;
++ }
++
++ /* allocate urb array */
++ priv->num_urbs = num_urbs;
++ priv->urbs = kmalloc_array(num_urbs, sizeof(*priv->urbs), GFP_KERNEL);
++ if (!priv->urbs)
++ goto err_urbs;
++
++ /* setup a urb */
++ if (support_sg) {
++ if (usb_pipeisoc(pipe))
++ np = pdu->u.cmd_submit.number_of_packets;
++
++ priv->urbs[0] = usb_alloc_urb(np, GFP_KERNEL);
++ if (!priv->urbs[0])
++ goto err_urb;
++
++ if (buf_len) {
++ if (use_sg) {
++ priv->urbs[0]->sg = sgl;
++ priv->urbs[0]->num_sgs = nents;
++ priv->urbs[0]->transfer_buffer = NULL;
++ } else {
++ priv->urbs[0]->transfer_buffer = buffer;
++ }
++ }
++
++ /* copy urb setup packet */
++ priv->urbs[0]->setup_packet = kmemdup(&pdu->u.cmd_submit.setup,
++ 8, GFP_KERNEL);
++ if (!priv->urbs[0]->setup_packet) {
+ usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
+ return;
+ }
+- }
+
+- /* copy urb setup packet */
+- priv->urb->setup_packet = kmemdup(&pdu->u.cmd_submit.setup, 8,
+- GFP_KERNEL);
+- if (!priv->urb->setup_packet) {
+- dev_err(&udev->dev, "allocate setup_packet\n");
+- usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
+- return;
++ usbip_pack_pdu(pdu, priv->urbs[0], USBIP_CMD_SUBMIT, 0);
++ } else {
++ for_each_sg(sgl, sg, nents, i) {
++ priv->urbs[i] = usb_alloc_urb(0, GFP_KERNEL);
++ /* The URBs which is previously allocated will be freed
++ * in stub_device_cleanup_urbs() if error occurs.
++ */
++ if (!priv->urbs[i])
++ goto err_urb;
++
++ usbip_pack_pdu(pdu, priv->urbs[i], USBIP_CMD_SUBMIT, 0);
++ priv->urbs[i]->transfer_buffer = sg_virt(sg);
++ priv->urbs[i]->transfer_buffer_length = sg->length;
++ }
++ priv->sgl = sgl;
+ }
+
+- /* set other members from the base header of pdu */
+- priv->urb->context = (void *) priv;
+- priv->urb->dev = udev;
+- priv->urb->pipe = pipe;
+- priv->urb->complete = stub_complete;
++ for (i = 0; i < num_urbs; i++) {
++ /* set other members from the base header of pdu */
++ priv->urbs[i]->context = (void *) priv;
++ priv->urbs[i]->dev = udev;
++ priv->urbs[i]->pipe = pipe;
++ priv->urbs[i]->complete = stub_complete;
+
+- usbip_pack_pdu(pdu, priv->urb, USBIP_CMD_SUBMIT, 0);
++ /* no need to submit an intercepted request, but harmless? */
++ tweak_special_requests(priv->urbs[i]);
+
++ masking_bogus_flags(priv->urbs[i]);
++ }
+
+- if (usbip_recv_xbuff(ud, priv->urb) < 0)
++ if (stub_recv_xbuff(ud, priv) < 0)
+ return;
+
+- if (usbip_recv_iso(ud, priv->urb) < 0)
++ if (usbip_recv_iso(ud, priv->urbs[0]) < 0)
+ return;
+
+- /* no need to submit an intercepted request, but harmless? */
+- tweak_special_requests(priv->urb);
+-
+- masking_bogus_flags(priv->urb);
+ /* urb is now ready to submit */
+- ret = usb_submit_urb(priv->urb, GFP_KERNEL);
+-
+- if (ret == 0)
+- usbip_dbg_stub_rx("submit urb ok, seqnum %u\n",
+- pdu->base.seqnum);
+- else {
+- dev_err(&udev->dev, "submit_urb error, %d\n", ret);
+- usbip_dump_header(pdu);
+- usbip_dump_urb(priv->urb);
++ for (i = 0; i < priv->num_urbs; i++) {
++ ret = usb_submit_urb(priv->urbs[i], GFP_KERNEL);
+
+- /*
+- * Pessimistic.
+- * This connection will be discarded.
+- */
+- usbip_event_add(ud, SDEV_EVENT_ERROR_SUBMIT);
++ if (ret == 0)
++ usbip_dbg_stub_rx("submit urb ok, seqnum %u\n",
++ pdu->base.seqnum);
++ else {
++ dev_err(&udev->dev, "submit_urb error, %d\n", ret);
++ usbip_dump_header(pdu);
++ usbip_dump_urb(priv->urbs[i]);
++
++ /*
++ * Pessimistic.
++ * This connection will be discarded.
++ */
++ usbip_event_add(ud, SDEV_EVENT_ERROR_SUBMIT);
++ break;
++ }
+ }
+
+ usbip_dbg_stub_rx("Leave\n");
++ return;
++
++err_urb:
++ kfree(priv->urbs);
++err_urbs:
++ kfree(buffer);
++ sgl_free(sgl);
++err_malloc:
++ usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
+ }
+
+ /* recv a pdu */
+--- a/drivers/usb/usbip/stub_tx.c
++++ b/drivers/usb/usbip/stub_tx.c
+@@ -19,25 +19,11 @@
+
+ #include <linux/kthread.h>
+ #include <linux/socket.h>
++#include <linux/scatterlist.h>
+
+ #include "usbip_common.h"
+ #include "stub.h"
+
+-static void stub_free_priv_and_urb(struct stub_priv *priv)
+-{
+- struct urb *urb = priv->urb;
+-
+- kfree(urb->setup_packet);
+- urb->setup_packet = NULL;
+-
+- kfree(urb->transfer_buffer);
+- urb->transfer_buffer = NULL;
+-
+- list_del(&priv->list);
+- kmem_cache_free(stub_priv_cache, priv);
+- usb_free_urb(urb);
+-}
+-
+ /* be in spin_lock_irqsave(&sdev->priv_lock, flags) */
+ void stub_enqueue_ret_unlink(struct stub_device *sdev, __u32 seqnum,
+ __u32 status)
+@@ -99,6 +85,22 @@ void stub_complete(struct urb *urb)
+ break;
+ }
+
++ /*
++ * If the server breaks single SG request into the several URBs, the
++ * URBs must be reassembled before sending completed URB to the vhci.
++ * Don't wake up the tx thread until all the URBs are completed.
++ */
++ if (priv->sgl) {
++ priv->completed_urbs++;
++
++ /* Only save the first error status */
++ if (urb->status && !priv->urb_status)
++ priv->urb_status = urb->status;
++
++ if (priv->completed_urbs < priv->num_urbs)
++ return;
++ }
++
+ /* link a urb to the queue of tx. */
+ spin_lock_irqsave(&sdev->priv_lock, flags);
+ if (sdev->ud.tcp_socket == NULL) {
+@@ -170,18 +172,22 @@ static int stub_send_ret_submit(struct s
+ size_t total_size = 0;
+
+ while ((priv = dequeue_from_priv_tx(sdev)) != NULL) {
+- int ret;
+- struct urb *urb = priv->urb;
++ struct urb *urb = priv->urbs[0];
+ struct usbip_header pdu_header;
+ struct usbip_iso_packet_descriptor *iso_buffer = NULL;
+ struct kvec *iov = NULL;
++ struct scatterlist *sg;
++ u32 actual_length = 0;
+ int iovnum = 0;
++ int ret;
++ int i;
+
+ txsize = 0;
+ memset(&pdu_header, 0, sizeof(pdu_header));
+ memset(&msg, 0, sizeof(msg));
+
+- if (urb->actual_length > 0 && !urb->transfer_buffer) {
++ if (urb->actual_length > 0 && !urb->transfer_buffer &&
++ !urb->num_sgs) {
+ dev_err(&sdev->udev->dev,
+ "urb: actual_length %d transfer_buffer null\n",
+ urb->actual_length);
+@@ -190,6 +196,11 @@ static int stub_send_ret_submit(struct s
+
+ if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
+ iovnum = 2 + urb->number_of_packets;
++ else if (usb_pipein(urb->pipe) && urb->actual_length > 0 &&
++ urb->num_sgs)
++ iovnum = 1 + urb->num_sgs;
++ else if (usb_pipein(urb->pipe) && priv->sgl)
++ iovnum = 1 + priv->num_urbs;
+ else
+ iovnum = 2;
+
+@@ -206,6 +217,15 @@ static int stub_send_ret_submit(struct s
+ setup_ret_submit_pdu(&pdu_header, urb);
+ usbip_dbg_stub_tx("setup txdata seqnum: %d\n",
+ pdu_header.base.seqnum);
++
++ if (priv->sgl) {
++ for (i = 0; i < priv->num_urbs; i++)
++ actual_length += priv->urbs[i]->actual_length;
++
++ pdu_header.u.ret_submit.status = priv->urb_status;
++ pdu_header.u.ret_submit.actual_length = actual_length;
++ }
++
+ usbip_header_correct_endian(&pdu_header, 1);
+
+ iov[iovnum].iov_base = &pdu_header;
+@@ -214,12 +234,47 @@ static int stub_send_ret_submit(struct s
+ txsize += sizeof(pdu_header);
+
+ /* 2. setup transfer buffer */
+- if (usb_pipein(urb->pipe) &&
++ if (usb_pipein(urb->pipe) && priv->sgl) {
++ /* If the server split a single SG request into several
++ * URBs because the server's HCD doesn't support SG,
++ * reassemble the split URB buffers into a single
++ * return command.
++ */
++ for (i = 0; i < priv->num_urbs; i++) {
++ iov[iovnum].iov_base =
++ priv->urbs[i]->transfer_buffer;
++ iov[iovnum].iov_len =
++ priv->urbs[i]->actual_length;
++ iovnum++;
++ }
++ txsize += actual_length;
++ } else if (usb_pipein(urb->pipe) &&
+ usb_pipetype(urb->pipe) != PIPE_ISOCHRONOUS &&
+ urb->actual_length > 0) {
+- iov[iovnum].iov_base = urb->transfer_buffer;
+- iov[iovnum].iov_len = urb->actual_length;
+- iovnum++;
++ if (urb->num_sgs) {
++ unsigned int copy = urb->actual_length;
++ int size;
++
++ for_each_sg(urb->sg, sg, urb->num_sgs, i) {
++ if (copy == 0)
++ break;
++
++ if (copy < sg->length)
++ size = copy;
++ else
++ size = sg->length;
++
++ iov[iovnum].iov_base = sg_virt(sg);
++ iov[iovnum].iov_len = size;
++
++ iovnum++;
++ copy -= size;
++ }
++ } else {
++ iov[iovnum].iov_base = urb->transfer_buffer;
++ iov[iovnum].iov_len = urb->actual_length;
++ iovnum++;
++ }
+ txsize += urb->actual_length;
+ } else if (usb_pipein(urb->pipe) &&
+ usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
+--- a/drivers/usb/usbip/usbip_common.c
++++ b/drivers/usb/usbip/usbip_common.c
+@@ -695,8 +695,12 @@ EXPORT_SYMBOL_GPL(usbip_pad_iso);
+ /* some members of urb must be substituted before. */
+ int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb)
+ {
+- int ret;
++ struct scatterlist *sg;
++ int ret = 0;
++ int recv;
+ int size;
++ int copy;
++ int i;
+
+ if (ud->side == USBIP_STUB || ud->side == USBIP_VUDC) {
+ /* the direction of urb must be OUT. */
+@@ -716,29 +720,48 @@ int usbip_recv_xbuff(struct usbip_device
+ if (!(size > 0))
+ return 0;
+
+- if (size > urb->transfer_buffer_length) {
++ if (size > urb->transfer_buffer_length)
+ /* should not happen, probably malicious packet */
+- if (ud->side == USBIP_STUB) {
+- usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
+- return 0;
+- } else {
+- usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
+- return -EPIPE;
+- }
+- }
++ goto error;
++
++ if (urb->num_sgs) {
++ copy = size;
++ for_each_sg(urb->sg, sg, urb->num_sgs, i) {
++ int recv_size;
++
++ if (copy < sg->length)
++ recv_size = copy;
++ else
++ recv_size = sg->length;
++
++ recv = usbip_recv(ud->tcp_socket, sg_virt(sg),
++ recv_size);
+
+- ret = usbip_recv(ud->tcp_socket, urb->transfer_buffer, size);
+- if (ret != size) {
+- dev_err(&urb->dev->dev, "recv xbuf, %d\n", ret);
+- if (ud->side == USBIP_STUB || ud->side == USBIP_VUDC) {
+- usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
+- } else {
+- usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
+- return -EPIPE;
++ if (recv != recv_size)
++ goto error;
++
++ copy -= recv;
++ ret += recv;
+ }
++
++ if (ret != size)
++ goto error;
++ } else {
++ ret = usbip_recv(ud->tcp_socket, urb->transfer_buffer, size);
++ if (ret != size)
++ goto error;
+ }
+
+ return ret;
++
++error:
++ dev_err(&urb->dev->dev, "recv xbuf, %d\n", ret);
++ if (ud->side == USBIP_STUB || ud->side == USBIP_VUDC)
++ usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
++ else
++ usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
++
++ return -EPIPE;
+ }
+ EXPORT_SYMBOL_GPL(usbip_recv_xbuff);
+
+--- a/drivers/usb/usbip/vhci_hcd.c
++++ b/drivers/usb/usbip/vhci_hcd.c
+@@ -716,7 +716,8 @@ static int vhci_urb_enqueue(struct usb_h
+ }
+ vdev = &vhci_hcd->vdev[portnum-1];
+
+- if (!urb->transfer_buffer && urb->transfer_buffer_length) {
++ if (!urb->transfer_buffer && !urb->num_sgs &&
++ urb->transfer_buffer_length) {
+ dev_dbg(dev, "Null URB transfer buffer\n");
+ return -EINVAL;
+ }
+@@ -1162,6 +1163,15 @@ static int vhci_setup(struct usb_hcd *hc
+ hcd->speed = HCD_USB3;
+ hcd->self.root_hub->speed = USB_SPEED_SUPER;
+ }
++
++ /*
++ * Support SG.
++ * sg_tablesize is an arbitrary value to alleviate memory pressure
++ * on the host.
++ */
++ hcd->self.sg_tablesize = 32;
++ hcd->self.no_sg_constraint = 1;
++
+ return 0;
+ }
+
+--- a/drivers/usb/usbip/vhci_rx.c
++++ b/drivers/usb/usbip/vhci_rx.c
+@@ -104,6 +104,9 @@ static void vhci_recv_ret_submit(struct
+ if (usbip_dbg_flag_vhci_rx)
+ usbip_dump_urb(urb);
+
++ if (urb->num_sgs)
++ urb->transfer_flags &= ~URB_DMA_MAP_SG;
++
+ usbip_dbg_vhci_rx("now giveback urb %u\n", pdu->base.seqnum);
+
+ spin_lock_irqsave(&vhci->lock, flags);
+--- a/drivers/usb/usbip/vhci_tx.c
++++ b/drivers/usb/usbip/vhci_tx.c
+@@ -19,6 +19,7 @@
+
+ #include <linux/kthread.h>
+ #include <linux/slab.h>
++#include <linux/scatterlist.h>
+
+ #include "usbip_common.h"
+ #include "vhci.h"
+@@ -64,19 +65,23 @@ static struct vhci_priv *dequeue_from_pr
+
+ static int vhci_send_cmd_submit(struct vhci_device *vdev)
+ {
++ struct usbip_iso_packet_descriptor *iso_buffer = NULL;
+ struct vhci_priv *priv = NULL;
++ struct scatterlist *sg;
+
+ struct msghdr msg;
+- struct kvec iov[3];
++ struct kvec *iov;
+ size_t txsize;
+
+ size_t total_size = 0;
++ int iovnum;
++ int err = -ENOMEM;
++ int i;
+
+ while ((priv = dequeue_from_priv_tx(vdev)) != NULL) {
+ int ret;
+ struct urb *urb = priv->urb;
+ struct usbip_header pdu_header;
+- struct usbip_iso_packet_descriptor *iso_buffer = NULL;
+
+ txsize = 0;
+ memset(&pdu_header, 0, sizeof(pdu_header));
+@@ -86,18 +91,45 @@ static int vhci_send_cmd_submit(struct v
+ usbip_dbg_vhci_tx("setup txdata urb seqnum %lu\n",
+ priv->seqnum);
+
++ if (urb->num_sgs && usb_pipeout(urb->pipe))
++ iovnum = 2 + urb->num_sgs;
++ else
++ iovnum = 3;
++
++ iov = kcalloc(iovnum, sizeof(*iov), GFP_KERNEL);
++ if (!iov) {
++ usbip_event_add(&vdev->ud, SDEV_EVENT_ERROR_MALLOC);
++ return -ENOMEM;
++ }
++
++ if (urb->num_sgs)
++ urb->transfer_flags |= URB_DMA_MAP_SG;
++
+ /* 1. setup usbip_header */
+ setup_cmd_submit_pdu(&pdu_header, urb);
+ usbip_header_correct_endian(&pdu_header, 1);
++ iovnum = 0;
+
+- iov[0].iov_base = &pdu_header;
+- iov[0].iov_len = sizeof(pdu_header);
++ iov[iovnum].iov_base = &pdu_header;
++ iov[iovnum].iov_len = sizeof(pdu_header);
+ txsize += sizeof(pdu_header);
++ iovnum++;
+
+ /* 2. setup transfer buffer */
+ if (!usb_pipein(urb->pipe) && urb->transfer_buffer_length > 0) {
+- iov[1].iov_base = urb->transfer_buffer;
+- iov[1].iov_len = urb->transfer_buffer_length;
++ if (urb->num_sgs &&
++ !usb_endpoint_xfer_isoc(&urb->ep->desc)) {
++ for_each_sg(urb->sg, sg, urb->num_sgs, i) {
++ iov[iovnum].iov_base = sg_virt(sg);
++ iov[iovnum].iov_len = sg->length;
++ iovnum++;
++ }
++ } else {
++ iov[iovnum].iov_base = urb->transfer_buffer;
++ iov[iovnum].iov_len =
++ urb->transfer_buffer_length;
++ iovnum++;
++ }
+ txsize += urb->transfer_buffer_length;
+ }
+
+@@ -109,23 +141,26 @@ static int vhci_send_cmd_submit(struct v
+ if (!iso_buffer) {
+ usbip_event_add(&vdev->ud,
+ SDEV_EVENT_ERROR_MALLOC);
+- return -1;
++ goto err_iso_buffer;
+ }
+
+- iov[2].iov_base = iso_buffer;
+- iov[2].iov_len = len;
++ iov[iovnum].iov_base = iso_buffer;
++ iov[iovnum].iov_len = len;
++ iovnum++;
+ txsize += len;
+ }
+
+- ret = kernel_sendmsg(vdev->ud.tcp_socket, &msg, iov, 3, txsize);
++ ret = kernel_sendmsg(vdev->ud.tcp_socket, &msg, iov, iovnum,
++ txsize);
+ if (ret != txsize) {
+ pr_err("sendmsg failed!, ret=%d for %zd\n", ret,
+ txsize);
+- kfree(iso_buffer);
+ usbip_event_add(&vdev->ud, VDEV_EVENT_ERROR_TCP);
+- return -1;
++ err = -EPIPE;
++ goto err_tx;
+ }
+
++ kfree(iov);
+ kfree(iso_buffer);
+ usbip_dbg_vhci_tx("send txdata\n");
+
+@@ -133,6 +168,13 @@ static int vhci_send_cmd_submit(struct v
+ }
+
+ return total_size;
++
++err_tx:
++ kfree(iso_buffer);
++err_iso_buffer:
++ kfree(iov);
++
++ return err;
+ }
+
+ static struct vhci_unlink *dequeue_from_unlink_tx(struct vhci_device *vdev)
--- /dev/null
+From 10c90120930628e8b959bf58d4a0aaef3ae5d945 Mon Sep 17 00:00:00 2001
+From: Shuah Khan <shuah@kernel.org>
+Date: Fri, 15 Dec 2017 10:05:15 -0700
+Subject: usbip: stub_rx: fix static checker warning on unnecessary checks
+
+From: Shuah Khan <shuahkh@osg.samsung.com>
+
+commit 10c90120930628e8b959bf58d4a0aaef3ae5d945 upstream.
+
+Fix the following static checker warnings:
+
+The patch c6688ef9f297: "usbip: fix stub_rx: harden CMD_SUBMIT path
+to handle malicious input" from Dec 7, 2017, leads to the following
+static checker warning:
+
+ drivers/usb/usbip/stub_rx.c:346 get_pipe()
+ warn: impossible condition
+'(pdu->u.cmd_submit.transfer_buffer_length > ((~0 >> 1))) =>
+(s32min-s32max > s32max)'
+ drivers/usb/usbip/stub_rx.c:486 stub_recv_cmd_submit()
+ warn: always true condition
+'(pdu->u.cmd_submit.transfer_buffer_length <= ((~0 >> 1))) =>
+(s32min-s32max <= s32max)'
+
+Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: Shuah Khan <shuahkh@osg.samsung.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/usb/usbip/stub_rx.c | 11 +----------
+ 1 file changed, 1 insertion(+), 10 deletions(-)
+
+--- a/drivers/usb/usbip/stub_rx.c
++++ b/drivers/usb/usbip/stub_rx.c
+@@ -353,14 +353,6 @@ static int get_pipe(struct stub_device *
+
+ epd = &ep->desc;
+
+- /* validate transfer_buffer_length */
+- if (pdu->u.cmd_submit.transfer_buffer_length > INT_MAX) {
+- dev_err(&sdev->udev->dev,
+- "CMD_SUBMIT: -EMSGSIZE transfer_buffer_length %d\n",
+- pdu->u.cmd_submit.transfer_buffer_length);
+- return -1;
+- }
+-
+ if (usb_endpoint_xfer_control(epd)) {
+ if (dir == USBIP_DIR_OUT)
+ return usb_sndctrlpipe(udev, epnum);
+@@ -487,8 +479,7 @@ static void stub_recv_cmd_submit(struct
+ }
+
+ /* allocate urb transfer buffer, if needed */
+- if (pdu->u.cmd_submit.transfer_buffer_length > 0 &&
+- pdu->u.cmd_submit.transfer_buffer_length <= INT_MAX) {
++ if (pdu->u.cmd_submit.transfer_buffer_length > 0) {
+ priv->urb->transfer_buffer =
+ kzalloc(pdu->u.cmd_submit.transfer_buffer_length,
+ GFP_KERNEL);