--- /dev/null
+From 9401d5aa328e64617d87abd59af1c91cace4c3e4 Mon Sep 17 00:00:00 2001
+From: Paul Cercueil <paul@crapouillou.net>
+Date: Fri, 6 Mar 2020 23:29:27 +0100
+Subject: ASoC: jz4740-i2s: Fix divider written at incorrect offset in register
+
+From: Paul Cercueil <paul@crapouillou.net>
+
+commit 9401d5aa328e64617d87abd59af1c91cace4c3e4 upstream.
+
+The 4-bit divider value was written at offset 8, while the jz4740
+programming manual locates it at offset 0.
+
+Fixes: 26b0aad80a86 ("ASoC: jz4740: Add dynamic sampling rate support to jz4740-i2s")
+Signed-off-by: Paul Cercueil <paul@crapouillou.net>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20200306222931.39664-2-paul@crapouillou.net
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/soc/jz4740/jz4740-i2s.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/sound/soc/jz4740/jz4740-i2s.c
++++ b/sound/soc/jz4740/jz4740-i2s.c
+@@ -83,7 +83,7 @@
+ #define JZ_AIC_I2S_STATUS_BUSY BIT(2)
+
+ #define JZ_AIC_CLK_DIV_MASK 0xf
+-#define I2SDIV_DV_SHIFT 8
++#define I2SDIV_DV_SHIFT 0
+ #define I2SDIV_DV_MASK (0xf << I2SDIV_DV_SHIFT)
+ #define I2SDIV_IDV_SHIFT 8
+ #define I2SDIV_IDV_MASK (0xf << I2SDIV_IDV_SHIFT)
--- /dev/null
+From eedf8a126629bf9db8ad3a2a5dc9dc1798fb2302 Mon Sep 17 00:00:00 2001
+From: Jonghwan Choi <charlie.jh@kakaocorp.com>
+Date: Thu, 19 Mar 2020 23:00:44 +0900
+Subject: ASoC: tas2562: Fixed incorrect amp_level setting.
+
+From: Jonghwan Choi <charlie.jh@kakaocorp.com>
+
+commit eedf8a126629bf9db8ad3a2a5dc9dc1798fb2302 upstream.
+
+According to the tas2562 datasheet,the bits[5:1] represents the amp_level value.
+So to set the amp_level value correctly,the shift value should be set to 1.
+
+Signed-off-by: Jonghwan Choi <charlie.jh@kakaocorp.com>
+Acked-by: Dan Murphy <dmurphy@ti.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20200319140043.GA6688@jhbirdchoi-MS-7B79
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/soc/codecs/tas2562.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/sound/soc/codecs/tas2562.c
++++ b/sound/soc/codecs/tas2562.c
+@@ -409,7 +409,7 @@ static const struct snd_kcontrol_new vse
+ 1, 1);
+
+ static const struct snd_kcontrol_new tas2562_snd_controls[] = {
+- SOC_SINGLE_TLV("Amp Gain Volume", TAS2562_PB_CFG1, 0, 0x1c, 0,
++ SOC_SINGLE_TLV("Amp Gain Volume", TAS2562_PB_CFG1, 1, 0x1c, 0,
+ tas2562_dac_tlv),
+ };
+
--- /dev/null
+From 47a1f8e8b3637ff5f7806587883d7d94068d9ee8 Mon Sep 17 00:00:00 2001
+From: Martin Kaiser <martin@kaiser.cx>
+Date: Thu, 5 Mar 2020 21:58:20 +0100
+Subject: hwrng: imx-rngc - fix an error path
+
+From: Martin Kaiser <martin@kaiser.cx>
+
+commit 47a1f8e8b3637ff5f7806587883d7d94068d9ee8 upstream.
+
+Make sure that the rngc interrupt is masked if the rngc self test fails.
+Self test failure means that probe fails as well. Interrupts should be
+masked in this case, regardless of the error.
+
+Cc: stable@vger.kernel.org
+Fixes: 1d5449445bd0 ("hwrng: mx-rngc - add a driver for Freescale RNGC")
+Reviewed-by: PrasannaKumar Muralidharan <prasannatsmkumar@gmail.com>
+Signed-off-by: Martin Kaiser <martin@kaiser.cx>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/char/hw_random/imx-rngc.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/char/hw_random/imx-rngc.c
++++ b/drivers/char/hw_random/imx-rngc.c
+@@ -105,8 +105,10 @@ static int imx_rngc_self_test(struct imx
+ return -ETIMEDOUT;
+ }
+
+- if (rngc->err_reg != 0)
++ if (rngc->err_reg != 0) {
++ imx_rngc_irq_mask_clear(rngc);
+ return -EIO;
++ }
+
+ return 0;
+ }
--- /dev/null
+From dfb5394f804ed4fcea1fc925be275a38d66712ab Mon Sep 17 00:00:00 2001
+From: Kaike Wan <kaike.wan@intel.com>
+Date: Thu, 26 Mar 2020 12:38:14 -0400
+Subject: IB/hfi1: Call kobject_put() when kobject_init_and_add() fails
+
+From: Kaike Wan <kaike.wan@intel.com>
+
+commit dfb5394f804ed4fcea1fc925be275a38d66712ab upstream.
+
+When kobject_init_and_add() returns an error in the function
+hfi1_create_port_files(), the function kobject_put() is not called for the
+corresponding kobject, which potentially leads to memory leak.
+
+This patch fixes the issue by calling kobject_put() even if
+kobject_init_and_add() fails.
+
+Cc: <stable@vger.kernel.org>
+Link: https://lore.kernel.org/r/20200326163813.21129.44280.stgit@awfm-01.aw.intel.com
+Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Signed-off-by: Kaike Wan <kaike.wan@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/hw/hfi1/sysfs.c | 13 ++++++++-----
+ 1 file changed, 8 insertions(+), 5 deletions(-)
+
+--- a/drivers/infiniband/hw/hfi1/sysfs.c
++++ b/drivers/infiniband/hw/hfi1/sysfs.c
+@@ -674,7 +674,11 @@ int hfi1_create_port_files(struct ib_dev
+ dd_dev_err(dd,
+ "Skipping sc2vl sysfs info, (err %d) port %u\n",
+ ret, port_num);
+- goto bail;
++ /*
++ * Based on the documentation for kobject_init_and_add(), the
++ * caller should call kobject_put even if this call fails.
++ */
++ goto bail_sc2vl;
+ }
+ kobject_uevent(&ppd->sc2vl_kobj, KOBJ_ADD);
+
+@@ -684,7 +688,7 @@ int hfi1_create_port_files(struct ib_dev
+ dd_dev_err(dd,
+ "Skipping sl2sc sysfs info, (err %d) port %u\n",
+ ret, port_num);
+- goto bail_sc2vl;
++ goto bail_sl2sc;
+ }
+ kobject_uevent(&ppd->sl2sc_kobj, KOBJ_ADD);
+
+@@ -694,7 +698,7 @@ int hfi1_create_port_files(struct ib_dev
+ dd_dev_err(dd,
+ "Skipping vl2mtu sysfs info, (err %d) port %u\n",
+ ret, port_num);
+- goto bail_sl2sc;
++ goto bail_vl2mtu;
+ }
+ kobject_uevent(&ppd->vl2mtu_kobj, KOBJ_ADD);
+
+@@ -704,7 +708,7 @@ int hfi1_create_port_files(struct ib_dev
+ dd_dev_err(dd,
+ "Skipping Congestion Control sysfs info, (err %d) port %u\n",
+ ret, port_num);
+- goto bail_vl2mtu;
++ goto bail_cc;
+ }
+
+ kobject_uevent(&ppd->pport_cc_kobj, KOBJ_ADD);
+@@ -742,7 +746,6 @@ bail_sl2sc:
+ kobject_put(&ppd->sl2sc_kobj);
+ bail_sc2vl:
+ kobject_put(&ppd->sc2vl_kobj);
+-bail:
+ return ret;
+ }
+
--- /dev/null
+From 5c15abc4328ad696fa61e2f3604918ed0c207755 Mon Sep 17 00:00:00 2001
+From: Kaike Wan <kaike.wan@intel.com>
+Date: Thu, 26 Mar 2020 12:38:07 -0400
+Subject: IB/hfi1: Fix memory leaks in sysfs registration and unregistration
+
+From: Kaike Wan <kaike.wan@intel.com>
+
+commit 5c15abc4328ad696fa61e2f3604918ed0c207755 upstream.
+
+When the hfi1 driver is unloaded, kmemleak will report the following
+issue:
+
+unreferenced object 0xffff8888461a4c08 (size 8):
+comm "kworker/0:0", pid 5, jiffies 4298601264 (age 2047.134s)
+hex dump (first 8 bytes):
+73 64 6d 61 30 00 ff ff sdma0...
+backtrace:
+[<00000000311a6ef5>] kvasprintf+0x62/0xd0
+[<00000000ade94d9f>] kobject_set_name_vargs+0x1c/0x90
+[<0000000060657dbb>] kobject_init_and_add+0x5d/0xb0
+[<00000000346fe72b>] 0xffffffffa0c5ecba
+[<000000006cfc5819>] 0xffffffffa0c866b9
+[<0000000031c65580>] 0xffffffffa0c38e87
+[<00000000e9739b3f>] local_pci_probe+0x41/0x80
+[<000000006c69911d>] work_for_cpu_fn+0x16/0x20
+[<00000000601267b5>] process_one_work+0x171/0x380
+[<0000000049a0eefa>] worker_thread+0x1d1/0x3f0
+[<00000000909cf2b9>] kthread+0xf8/0x130
+[<0000000058f5f874>] ret_from_fork+0x35/0x40
+
+This patch fixes the issue by:
+
+- Releasing dd->per_sdma[i].kobject in hfi1_unregister_sysfs().
+ - This will fix the memory leak.
+
+- Calling kobject_put() to unwind operations only for those entries in
+ dd->per_sdma[] whose operations have succeeded (including the current
+ one that has just failed) in hfi1_verbs_register_sysfs().
+
+Cc: <stable@vger.kernel.org>
+Fixes: 0cb2aa690c7e ("IB/hfi1: Add sysfs interface for affinity setup")
+Link: https://lore.kernel.org/r/20200326163807.21129.27371.stgit@awfm-01.aw.intel.com
+Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Signed-off-by: Kaike Wan <kaike.wan@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/hw/hfi1/sysfs.c | 13 +++++++++++--
+ 1 file changed, 11 insertions(+), 2 deletions(-)
+
+--- a/drivers/infiniband/hw/hfi1/sysfs.c
++++ b/drivers/infiniband/hw/hfi1/sysfs.c
+@@ -856,8 +856,13 @@ int hfi1_verbs_register_sysfs(struct hfi
+
+ return 0;
+ bail:
+- for (i = 0; i < dd->num_sdma; i++)
+- kobject_del(&dd->per_sdma[i].kobj);
++ /*
++ * The function kobject_put() will call kobject_del() if the kobject
++ * has been added successfully. The sysfs files created under the
++ * kobject directory will also be removed during the process.
++ */
++ for (; i >= 0; i--)
++ kobject_put(&dd->per_sdma[i].kobj);
+
+ return ret;
+ }
+@@ -870,6 +875,10 @@ void hfi1_verbs_unregister_sysfs(struct
+ struct hfi1_pportdata *ppd;
+ int i;
+
++ /* Unwind operations in hfi1_verbs_register_sysfs() */
++ for (i = 0; i < dd->num_sdma; i++)
++ kobject_put(&dd->per_sdma[i].kobj);
++
+ for (i = 0; i < dd->num_pports; i++) {
+ ppd = &dd->pport[i];
+
--- /dev/null
+From 41e684ef3f37ce6e5eac3fb5b9c7c1853f4b0447 Mon Sep 17 00:00:00 2001
+From: Alex Vesker <valex@mellanox.com>
+Date: Thu, 5 Mar 2020 14:38:41 +0200
+Subject: IB/mlx5: Replace tunnel mpls capability bits for tunnel_offloads
+
+From: Alex Vesker <valex@mellanox.com>
+
+commit 41e684ef3f37ce6e5eac3fb5b9c7c1853f4b0447 upstream.
+
+Until now the flex parser capability was used in ib_query_device() to
+indicate tunnel_offloads_caps support for mpls_over_gre/mpls_over_udp.
+
+Newer devices and firmware will have configurations with the flexparser
+but without mpls support.
+
+Testing for the flex parser capability was a mistake, the tunnel_stateless
+capability was intended for detecting mpls and was introduced at the same
+time as the flex parser capability.
+
+Otherwise userspace will be incorrectly informed that a future device
+supports MPLS when it does not.
+
+Link: https://lore.kernel.org/r/20200305123841.196086-1-leon@kernel.org
+Cc: <stable@vger.kernel.org> # 4.17
+Fixes: e818e255a58d ("IB/mlx5: Expose MPLS related tunneling offloads")
+Signed-off-by: Alex Vesker <valex@mellanox.com>
+Reviewed-by: Ariel Levkovich <lariel@mellanox.com>
+Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/hw/mlx5/main.c | 6 ++----
+ include/linux/mlx5/mlx5_ifc.h | 6 +++++-
+ 2 files changed, 7 insertions(+), 5 deletions(-)
+
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -1192,12 +1192,10 @@ static int mlx5_ib_query_device(struct i
+ if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre))
+ resp.tunnel_offloads_caps |=
+ MLX5_IB_TUNNELED_OFFLOADS_GRE;
+- if (MLX5_CAP_GEN(mdev, flex_parser_protocols) &
+- MLX5_FLEX_PROTO_CW_MPLS_GRE)
++ if (MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_gre))
+ resp.tunnel_offloads_caps |=
+ MLX5_IB_TUNNELED_OFFLOADS_MPLS_GRE;
+- if (MLX5_CAP_GEN(mdev, flex_parser_protocols) &
+- MLX5_FLEX_PROTO_CW_MPLS_UDP)
++ if (MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_udp))
+ resp.tunnel_offloads_caps |=
+ MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP;
+ }
+--- a/include/linux/mlx5/mlx5_ifc.h
++++ b/include/linux/mlx5/mlx5_ifc.h
+@@ -875,7 +875,11 @@ struct mlx5_ifc_per_protocol_networking_
+ u8 swp_csum[0x1];
+ u8 swp_lso[0x1];
+ u8 cqe_checksum_full[0x1];
+- u8 reserved_at_24[0x5];
++ u8 tunnel_stateless_geneve_tx[0x1];
++ u8 tunnel_stateless_mpls_over_udp[0x1];
++ u8 tunnel_stateless_mpls_over_gre[0x1];
++ u8 tunnel_stateless_vxlan_gpe[0x1];
++ u8 tunnel_stateless_ipv4_over_vxlan[0x1];
+ u8 tunnel_stateless_ip_over_ip[0x1];
+ u8 reserved_at_2a[0x6];
+ u8 max_vxlan_udp_ports[0x8];
--- /dev/null
+From 767191db8220db29f78c031f4d27375173c336d5 Mon Sep 17 00:00:00 2001
+From: Hans de Goede <hdegoede@redhat.com>
+Date: Fri, 3 Apr 2020 17:48:34 +0200
+Subject: platform/x86: intel_int0002_vgpio: Use acpi_register_wakeup_handler()
+
+From: Hans de Goede <hdegoede@redhat.com>
+
+commit 767191db8220db29f78c031f4d27375173c336d5 upstream.
+
+The Power Management Events (PMEs) the INT0002 driver listens for get
+signalled by the Power Management Controller (PMC) using the same IRQ
+as used for the ACPI SCI.
+
+Since commit fdde0ff8590b ("ACPI: PM: s2idle: Prevent spurious SCIs from
+waking up the system") the SCI triggering, without there being a wakeup
+cause recognized by the ACPI sleep code, will no longer wakeup the system.
+
+This breaks PMEs / wakeups signalled to the INT0002 driver, the system
+never leaves the s2idle_loop() now.
+
+Use acpi_register_wakeup_handler() to register a function which checks
+the GPE0a_STS register for a PME and trigger a wakeup when a PME has
+been signalled.
+
+Fixes: fdde0ff8590b ("ACPI: PM: s2idle: Prevent spurious SCIs from waking up the system")
+Cc: 5.4+ <stable@vger.kernel.org> # 5.4+
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Acked-by: Andy Shevchenko <andy.shevchenko@gmail.com>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/platform/x86/intel_int0002_vgpio.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/drivers/platform/x86/intel_int0002_vgpio.c
++++ b/drivers/platform/x86/intel_int0002_vgpio.c
+@@ -127,6 +127,14 @@ static irqreturn_t int0002_irq(int irq,
+ return IRQ_HANDLED;
+ }
+
++static bool int0002_check_wake(void *data)
++{
++ u32 gpe_sts_reg;
++
++ gpe_sts_reg = inl(GPE0A_STS_PORT);
++ return (gpe_sts_reg & GPE0A_PME_B0_STS_BIT);
++}
++
+ static struct irq_chip int0002_byt_irqchip = {
+ .name = DRV_NAME,
+ .irq_ack = int0002_irq_ack,
+@@ -220,6 +228,7 @@ static int int0002_probe(struct platform
+ return ret;
+ }
+
++ acpi_register_wakeup_handler(irq, int0002_check_wake, NULL);
+ device_init_wakeup(dev, true);
+ return 0;
+ }
+@@ -227,6 +236,7 @@ static int int0002_probe(struct platform
+ static int int0002_remove(struct platform_device *pdev)
+ {
+ device_init_wakeup(&pdev->dev, false);
++ acpi_unregister_wakeup_handler(int0002_check_wake, NULL);
+ return 0;
+ }
+
--- /dev/null
+From 69efea712f5b0489e67d07565aad5c94e09a3e52 Mon Sep 17 00:00:00 2001
+From: "Jason A. Donenfeld" <Jason@zx2c4.com>
+Date: Fri, 21 Feb 2020 21:10:37 +0100
+Subject: random: always use batched entropy for get_random_u{32,64}
+
+From: Jason A. Donenfeld <Jason@zx2c4.com>
+
+commit 69efea712f5b0489e67d07565aad5c94e09a3e52 upstream.
+
+It turns out that RDRAND is pretty slow. Comparing these two
+constructions:
+
+ for (i = 0; i < CHACHA_BLOCK_SIZE; i += sizeof(ret))
+ arch_get_random_long(&ret);
+
+and
+
+ long buf[CHACHA_BLOCK_SIZE / sizeof(long)];
+ extract_crng((u8 *)buf);
+
+it amortizes out to 352 cycles per long for the top one and 107 cycles
+per long for the bottom one, on Coffee Lake Refresh, Intel Core i9-9880H.
+
+And importantly, the top one has the drawback of not benefiting from the
+real rng, whereas the bottom one has all the nice benefits of using our
+own chacha rng. As get_random_u{32,64} gets used in more places (perhaps
+beyond what it was originally intended for when it was introduced as
+get_random_{int,long} back in the md5 monstrosity era), it seems like it
+might be a good thing to strengthen its posture a tiny bit. Doing this
+should only be stronger and not any weaker because that pool is already
+initialized with a bunch of rdrand data (when available). This way, we
+get the benefits of the hardware rng as well as our own rng.
+
+Another benefit of this is that we no longer hit pitfalls of the recent
+stream of AMD bugs in RDRAND. One often used code pattern for various
+things is:
+
+ do {
+ val = get_random_u32();
+ } while (hash_table_contains_key(val));
+
+That recent AMD bug rendered that pattern useless, whereas we're really
+very certain that chacha20 output will give pretty distributed numbers,
+no matter what.
+
+So, this simplification seems better both from a security perspective
+and from a performance perspective.
+
+Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Link: https://lore.kernel.org/r/20200221201037.30231-1-Jason@zx2c4.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/char/random.c | 20 ++++----------------
+ 1 file changed, 4 insertions(+), 16 deletions(-)
+
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -2149,11 +2149,11 @@ struct batched_entropy {
+
+ /*
+ * Get a random word for internal kernel use only. The quality of the random
+- * number is either as good as RDRAND or as good as /dev/urandom, with the
+- * goal of being quite fast and not depleting entropy. In order to ensure
++ * number is good as /dev/urandom, but there is no backtrack protection, with
++ * the goal of being quite fast and not depleting entropy. In order to ensure
+ * that the randomness provided by this function is okay, the function
+- * wait_for_random_bytes() should be called and return 0 at least once
+- * at any point prior.
++ * wait_for_random_bytes() should be called and return 0 at least once at any
++ * point prior.
+ */
+ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
+ .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock),
+@@ -2166,15 +2166,6 @@ u64 get_random_u64(void)
+ struct batched_entropy *batch;
+ static void *previous;
+
+-#if BITS_PER_LONG == 64
+- if (arch_get_random_long((unsigned long *)&ret))
+- return ret;
+-#else
+- if (arch_get_random_long((unsigned long *)&ret) &&
+- arch_get_random_long((unsigned long *)&ret + 1))
+- return ret;
+-#endif
+-
+ warn_unseeded_randomness(&previous);
+
+ batch = raw_cpu_ptr(&batched_entropy_u64);
+@@ -2199,9 +2190,6 @@ u32 get_random_u32(void)
+ struct batched_entropy *batch;
+ static void *previous;
+
+- if (arch_get_random_int(&ret))
+- return ret;
+-
+ warn_unseeded_randomness(&previous);
+
+ batch = raw_cpu_ptr(&batched_entropy_u32);
--- /dev/null
+From 0b38b5e1d0e2f361e418e05c179db05bb688bbd6 Mon Sep 17 00:00:00 2001
+From: Sven Schnelle <svens@linux.ibm.com>
+Date: Wed, 22 Jan 2020 13:38:22 +0100
+Subject: s390: prevent leaking kernel address in BEAR
+
+From: Sven Schnelle <svens@linux.ibm.com>
+
+commit 0b38b5e1d0e2f361e418e05c179db05bb688bbd6 upstream.
+
+When userspace executes a syscall or gets interrupted,
+BEAR contains a kernel address when returning to userspace.
+This make it pretty easy to figure out where the kernel is
+mapped even with KASLR enabled. To fix this, add lpswe to
+lowcore and always execute it there, so userspace sees only
+the lowcore address of lpswe. For this we have to extend
+both critical_cleanup and the SWITCH_ASYNC macro to also check
+for lpswe addresses in lowcore.
+
+Fixes: b2d24b97b2a9 ("s390/kernel: add support for kernel address space layout randomization (KASLR)")
+Cc: <stable@vger.kernel.org> # v5.2+
+Reviewed-by: Gerald Schaefer <gerald.schaefer@de.ibm.com>
+Signed-off-by: Sven Schnelle <svens@linux.ibm.com>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/include/asm/lowcore.h | 4 +-
+ arch/s390/include/asm/processor.h | 1
+ arch/s390/include/asm/setup.h | 7 ++++
+ arch/s390/kernel/asm-offsets.c | 2 +
+ arch/s390/kernel/entry.S | 65 ++++++++++++++++++++++----------------
+ arch/s390/kernel/process.c | 1
+ arch/s390/kernel/setup.c | 3 +
+ arch/s390/kernel/smp.c | 2 +
+ arch/s390/mm/vmem.c | 4 ++
+ 9 files changed, 62 insertions(+), 27 deletions(-)
+
+--- a/arch/s390/include/asm/lowcore.h
++++ b/arch/s390/include/asm/lowcore.h
+@@ -141,7 +141,9 @@ struct lowcore {
+
+ /* br %r1 trampoline */
+ __u16 br_r1_trampoline; /* 0x0400 */
+- __u8 pad_0x0402[0x0e00-0x0402]; /* 0x0402 */
++ __u32 return_lpswe; /* 0x0402 */
++ __u32 return_mcck_lpswe; /* 0x0406 */
++ __u8 pad_0x040a[0x0e00-0x040a]; /* 0x040a */
+
+ /*
+ * 0xe00 contains the address of the IPL Parameter Information
+--- a/arch/s390/include/asm/processor.h
++++ b/arch/s390/include/asm/processor.h
+@@ -161,6 +161,7 @@ typedef struct thread_struct thread_stru
+ #define INIT_THREAD { \
+ .ksp = sizeof(init_stack) + (unsigned long) &init_stack, \
+ .fpu.regs = (void *) init_task.thread.fpu.fprs, \
++ .last_break = 1, \
+ }
+
+ /*
+--- a/arch/s390/include/asm/setup.h
++++ b/arch/s390/include/asm/setup.h
+@@ -8,6 +8,7 @@
+
+ #include <linux/bits.h>
+ #include <uapi/asm/setup.h>
++#include <linux/build_bug.h>
+
+ #define EP_OFFSET 0x10008
+ #define EP_STRING "S390EP"
+@@ -162,6 +163,12 @@ static inline unsigned long kaslr_offset
+ return __kaslr_offset;
+ }
+
++static inline u32 gen_lpswe(unsigned long addr)
++{
++ BUILD_BUG_ON(addr > 0xfff);
++ return 0xb2b20000 | addr;
++}
++
+ #else /* __ASSEMBLY__ */
+
+ #define IPL_DEVICE (IPL_DEVICE_OFFSET)
+--- a/arch/s390/kernel/asm-offsets.c
++++ b/arch/s390/kernel/asm-offsets.c
+@@ -124,6 +124,8 @@ int main(void)
+ OFFSET(__LC_EXT_DAMAGE_CODE, lowcore, external_damage_code);
+ OFFSET(__LC_MCCK_FAIL_STOR_ADDR, lowcore, failing_storage_address);
+ OFFSET(__LC_LAST_BREAK, lowcore, breaking_event_addr);
++ OFFSET(__LC_RETURN_LPSWE, lowcore, return_lpswe);
++ OFFSET(__LC_RETURN_MCCK_LPSWE, lowcore, return_mcck_lpswe);
+ OFFSET(__LC_RST_OLD_PSW, lowcore, restart_old_psw);
+ OFFSET(__LC_EXT_OLD_PSW, lowcore, external_old_psw);
+ OFFSET(__LC_SVC_OLD_PSW, lowcore, svc_old_psw);
+--- a/arch/s390/kernel/entry.S
++++ b/arch/s390/kernel/entry.S
+@@ -115,26 +115,29 @@ _LPP_OFFSET = __LC_LPP
+
+ .macro SWITCH_ASYNC savearea,timer
+ tmhh %r8,0x0001 # interrupting from user ?
+- jnz 1f
++ jnz 2f
+ lgr %r14,%r9
++ cghi %r14,__LC_RETURN_LPSWE
++ je 0f
+ slg %r14,BASED(.Lcritical_start)
+ clg %r14,BASED(.Lcritical_length)
+- jhe 0f
++ jhe 1f
++0:
+ lghi %r11,\savearea # inside critical section, do cleanup
+ brasl %r14,cleanup_critical
+ tmhh %r8,0x0001 # retest problem state after cleanup
+- jnz 1f
+-0: lg %r14,__LC_ASYNC_STACK # are we already on the target stack?
++ jnz 2f
++1: lg %r14,__LC_ASYNC_STACK # are we already on the target stack?
+ slgr %r14,%r15
+ srag %r14,%r14,STACK_SHIFT
+- jnz 2f
++ jnz 3f
+ CHECK_STACK \savearea
+ aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+- j 3f
+-1: UPDATE_VTIME %r14,%r15,\timer
++ j 4f
++2: UPDATE_VTIME %r14,%r15,\timer
+ BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
+-2: lg %r15,__LC_ASYNC_STACK # load async stack
+-3: la %r11,STACK_FRAME_OVERHEAD(%r15)
++3: lg %r15,__LC_ASYNC_STACK # load async stack
++4: la %r11,STACK_FRAME_OVERHEAD(%r15)
+ .endm
+
+ .macro UPDATE_VTIME w1,w2,enter_timer
+@@ -401,7 +404,7 @@ ENTRY(system_call)
+ stpt __LC_EXIT_TIMER
+ mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
+ lmg %r11,%r15,__PT_R11(%r11)
+- lpswe __LC_RETURN_PSW
++ b __LC_RETURN_LPSWE(%r0)
+ .Lsysc_done:
+
+ #
+@@ -608,43 +611,50 @@ ENTRY(pgm_check_handler)
+ BPOFF
+ stmg %r8,%r15,__LC_SAVE_AREA_SYNC
+ lg %r10,__LC_LAST_BREAK
+- lg %r12,__LC_CURRENT
++ srag %r11,%r10,12
++ jnz 0f
++ /* if __LC_LAST_BREAK is < 4096, it contains one of
++ * the lpswe addresses in lowcore. Set it to 1 (initial state)
++ * to prevent leaking that address to userspace.
++ */
++ lghi %r10,1
++0: lg %r12,__LC_CURRENT
+ lghi %r11,0
+ larl %r13,cleanup_critical
+ lmg %r8,%r9,__LC_PGM_OLD_PSW
+ tmhh %r8,0x0001 # test problem state bit
+- jnz 2f # -> fault in user space
++ jnz 3f # -> fault in user space
+ #if IS_ENABLED(CONFIG_KVM)
+ # cleanup critical section for program checks in sie64a
+ lgr %r14,%r9
+ slg %r14,BASED(.Lsie_critical_start)
+ clg %r14,BASED(.Lsie_critical_length)
+- jhe 0f
++ jhe 1f
+ lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer
+ ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
+ lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
+ larl %r9,sie_exit # skip forward to sie_exit
+ lghi %r11,_PIF_GUEST_FAULT
+ #endif
+-0: tmhh %r8,0x4000 # PER bit set in old PSW ?
+- jnz 1f # -> enabled, can't be a double fault
++1: tmhh %r8,0x4000 # PER bit set in old PSW ?
++ jnz 2f # -> enabled, can't be a double fault
+ tm __LC_PGM_ILC+3,0x80 # check for per exception
+ jnz .Lpgm_svcper # -> single stepped svc
+-1: CHECK_STACK __LC_SAVE_AREA_SYNC
++2: CHECK_STACK __LC_SAVE_AREA_SYNC
+ aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+- # CHECK_VMAP_STACK branches to stack_overflow or 4f
+- CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,4f
+-2: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
++ # CHECK_VMAP_STACK branches to stack_overflow or 5f
++ CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,5f
++3: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
+ BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
+ lg %r15,__LC_KERNEL_STACK
+ lgr %r14,%r12
+ aghi %r14,__TASK_thread # pointer to thread_struct
+ lghi %r13,__LC_PGM_TDB
+ tm __LC_PGM_ILC+2,0x02 # check for transaction abort
+- jz 3f
++ jz 4f
+ mvc __THREAD_trap_tdb(256,%r14),0(%r13)
+-3: stg %r10,__THREAD_last_break(%r14)
+-4: lgr %r13,%r11
++4: stg %r10,__THREAD_last_break(%r14)
++5: lgr %r13,%r11
+ la %r11,STACK_FRAME_OVERHEAD(%r15)
+ stmg %r0,%r7,__PT_R0(%r11)
+ # clear user controlled registers to prevent speculative use
+@@ -663,14 +673,14 @@ ENTRY(pgm_check_handler)
+ stg %r13,__PT_FLAGS(%r11)
+ stg %r10,__PT_ARGS(%r11)
+ tm __LC_PGM_ILC+3,0x80 # check for per exception
+- jz 5f
++ jz 6f
+ tmhh %r8,0x0001 # kernel per event ?
+ jz .Lpgm_kprobe
+ oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP
+ mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS
+ mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE
+ mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID
+-5: REENABLE_IRQS
++6: REENABLE_IRQS
+ xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+ larl %r1,pgm_check_table
+ llgh %r10,__PT_INT_CODE+2(%r11)
+@@ -775,7 +785,7 @@ ENTRY(io_int_handler)
+ mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
+ .Lio_exit_kernel:
+ lmg %r11,%r15,__PT_R11(%r11)
+- lpswe __LC_RETURN_PSW
++ b __LC_RETURN_LPSWE(%r0)
+ .Lio_done:
+
+ #
+@@ -1214,7 +1224,7 @@ ENTRY(mcck_int_handler)
+ stpt __LC_EXIT_TIMER
+ mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
+ 0: lmg %r11,%r15,__PT_R11(%r11)
+- lpswe __LC_RETURN_MCCK_PSW
++ b __LC_RETURN_MCCK_LPSWE
+
+ .Lmcck_panic:
+ lg %r15,__LC_NODAT_STACK
+@@ -1271,6 +1281,8 @@ ENDPROC(stack_overflow)
+ #endif
+
+ ENTRY(cleanup_critical)
++ cghi %r9,__LC_RETURN_LPSWE
++ je .Lcleanup_lpswe
+ #if IS_ENABLED(CONFIG_KVM)
+ clg %r9,BASED(.Lcleanup_table_sie) # .Lsie_gmap
+ jl 0f
+@@ -1424,6 +1436,7 @@ ENDPROC(cleanup_critical)
+ mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
+ mvc 0(64,%r11),__PT_R8(%r9)
+ lmg %r0,%r7,__PT_R0(%r9)
++.Lcleanup_lpswe:
+ 1: lmg %r8,%r9,__LC_RETURN_PSW
+ BR_EX %r14,%r11
+ .Lcleanup_sysc_restore_insn:
+--- a/arch/s390/kernel/process.c
++++ b/arch/s390/kernel/process.c
+@@ -106,6 +106,7 @@ int copy_thread_tls(unsigned long clone_
+ p->thread.system_timer = 0;
+ p->thread.hardirq_timer = 0;
+ p->thread.softirq_timer = 0;
++ p->thread.last_break = 1;
+
+ frame->sf.back_chain = 0;
+ /* new return point is ret_from_fork */
+--- a/arch/s390/kernel/setup.c
++++ b/arch/s390/kernel/setup.c
+@@ -73,6 +73,7 @@
+ #include <asm/nospec-branch.h>
+ #include <asm/mem_detect.h>
+ #include <asm/uv.h>
++#include <asm/asm-offsets.h>
+ #include "entry.h"
+
+ /*
+@@ -450,6 +451,8 @@ static void __init setup_lowcore_dat_off
+ lc->spinlock_index = 0;
+ arch_spin_lock_setup(0);
+ lc->br_r1_trampoline = 0x07f1; /* br %r1 */
++ lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
++ lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
+
+ set_prefix((u32)(unsigned long) lc);
+ lowcore_ptr[0] = lc;
+--- a/arch/s390/kernel/smp.c
++++ b/arch/s390/kernel/smp.c
+@@ -212,6 +212,8 @@ static int pcpu_alloc_lowcore(struct pcp
+ lc->spinlock_lockval = arch_spin_lockval(cpu);
+ lc->spinlock_index = 0;
+ lc->br_r1_trampoline = 0x07f1; /* br %r1 */
++ lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
++ lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
+ if (nmi_alloc_per_cpu(lc))
+ goto out_async;
+ if (vdso_alloc_per_cpu(lc))
+--- a/arch/s390/mm/vmem.c
++++ b/arch/s390/mm/vmem.c
+@@ -415,6 +415,10 @@ void __init vmem_map_init(void)
+ SET_MEMORY_RO | SET_MEMORY_X);
+ __set_memory(__stext_dma, (__etext_dma - __stext_dma) >> PAGE_SHIFT,
+ SET_MEMORY_RO | SET_MEMORY_X);
++
++ /* we need lowcore executable for our LPSWE instructions */
++ set_memory_x(0, 1);
++
+ pr_info("Write protected kernel read-only data: %luk\n",
+ (unsigned long)(__end_rodata - _stext) >> 10);
+ }
r8169-change-back-sg-and-tso-to-be-disabled-by-default.patch
cxgb4-free-mqprio-resources-in-shutdown-path.patch
net-phy-at803x-fix-clock-sink-configuration-on-ath8030-and-ath8035.patch
+s390-prevent-leaking-kernel-address-in-bear.patch
+random-always-use-batched-entropy-for-get_random_u-32-64.patch
+usb-dwc3-gadget-wrap-around-when-skip-trbs.patch
+slub-improve-bit-diffusion-for-freelist-ptr-obfuscation.patch
+tools-accounting-getdelays.c-fix-netlink-attribute-length.patch
+hwrng-imx-rngc-fix-an-error-path.patch
+platform-x86-intel_int0002_vgpio-use-acpi_register_wakeup_handler.patch
+asoc-tas2562-fixed-incorrect-amp_level-setting.patch
+asoc-jz4740-i2s-fix-divider-written-at-incorrect-offset-in-register.patch
+ib-hfi1-call-kobject_put-when-kobject_init_and_add-fails.patch
+ib-hfi1-fix-memory-leaks-in-sysfs-registration-and-unregistration.patch
+ib-mlx5-replace-tunnel-mpls-capability-bits-for-tunnel_offloads.patch
--- /dev/null
+From 1ad53d9fa3f6168ebcf48a50e08b170432da2257 Mon Sep 17 00:00:00 2001
+From: Kees Cook <keescook@chromium.org>
+Date: Wed, 1 Apr 2020 21:04:23 -0700
+Subject: slub: improve bit diffusion for freelist ptr obfuscation
+
+From: Kees Cook <keescook@chromium.org>
+
+commit 1ad53d9fa3f6168ebcf48a50e08b170432da2257 upstream.
+
+Under CONFIG_SLAB_FREELIST_HARDENED=y, the obfuscation was relatively weak
+in that the ptr and ptr address were usually so close that the first XOR
+would result in an almost entirely 0-byte value[1], leaving most of the
+"secret" number ultimately being stored after the third XOR. A single
+blind memory content exposure of the freelist was generally sufficient to
+learn the secret.
+
+Add a swab() call to mix bits a little more. This is a cheap way (1
+cycle) to make attacks need more than a single exposure to learn the
+secret (or to know _where_ the exposure is in memory).
+
+kmalloc-32 freelist walk, before:
+
+ptr ptr_addr stored value secret
+ffff90c22e019020@ffff90c22e019000 is 86528eb656b3b5bd (86528eb656b3b59d)
+ffff90c22e019040@ffff90c22e019020 is 86528eb656b3b5fd (86528eb656b3b59d)
+ffff90c22e019060@ffff90c22e019040 is 86528eb656b3b5bd (86528eb656b3b59d)
+ffff90c22e019080@ffff90c22e019060 is 86528eb656b3b57d (86528eb656b3b59d)
+ffff90c22e0190a0@ffff90c22e019080 is 86528eb656b3b5bd (86528eb656b3b59d)
+...
+
+after:
+
+ptr ptr_addr stored value secret
+ffff9eed6e019020@ffff9eed6e019000 is 793d1135d52cda42 (86528eb656b3b59d)
+ffff9eed6e019040@ffff9eed6e019020 is 593d1135d52cda22 (86528eb656b3b59d)
+ffff9eed6e019060@ffff9eed6e019040 is 393d1135d52cda02 (86528eb656b3b59d)
+ffff9eed6e019080@ffff9eed6e019060 is 193d1135d52cdae2 (86528eb656b3b59d)
+ffff9eed6e0190a0@ffff9eed6e019080 is f93d1135d52cdac2 (86528eb656b3b59d)
+
+[1] https://blog.infosectcbr.com.au/2020/03/weaknesses-in-linux-kernel-heap.html
+
+Fixes: 2482ddec670f ("mm: add SLUB free list pointer obfuscation")
+Reported-by: Silvio Cesare <silvio.cesare@gmail.com>
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Cc: Christoph Lameter <cl@linux.com>
+Cc: Pekka Enberg <penberg@kernel.org>
+Cc: David Rientjes <rientjes@google.com>
+Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
+Cc: <stable@vger.kernel.org>
+Link: http://lkml.kernel.org/r/202003051623.AF4F8CB@keescook
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/slub.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -259,7 +259,7 @@ static inline void *freelist_ptr(const s
+ * freepointer to be restored incorrectly.
+ */
+ return (void *)((unsigned long)ptr ^ s->random ^
+- (unsigned long)kasan_reset_tag((void *)ptr_addr));
++ swab((unsigned long)kasan_reset_tag((void *)ptr_addr)));
+ #else
+ return ptr;
+ #endif
--- /dev/null
+From 4054ab64e29bb05b3dfe758fff3c38a74ba753bb Mon Sep 17 00:00:00 2001
+From: David Ahern <dsahern@kernel.org>
+Date: Wed, 1 Apr 2020 21:02:25 -0700
+Subject: tools/accounting/getdelays.c: fix netlink attribute length
+
+From: David Ahern <dsahern@kernel.org>
+
+commit 4054ab64e29bb05b3dfe758fff3c38a74ba753bb upstream.
+
+A recent change to the netlink code: 6e237d099fac ("netlink: Relax attr
+validation for fixed length types") logs a warning when programs send
+messages with invalid attributes (e.g., wrong length for a u32). Yafang
+reported this error message for tools/accounting/getdelays.c.
+
+send_cmd() is wrongly adding 1 to the attribute length. As noted in
+include/uapi/linux/netlink.h nla_len should be NLA_HDRLEN + payload
+length, so drop the +1.
+
+Fixes: 9e06d3f9f6b1 ("per task delay accounting taskstats interface: documentation fix")
+Reported-by: Yafang Shao <laoar.shao@gmail.com>
+Signed-off-by: David Ahern <dsahern@kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Tested-by: Yafang Shao <laoar.shao@gmail.com>
+Cc: Johannes Berg <johannes@sipsolutions.net>
+Cc: Shailabh Nagar <nagar@watson.ibm.com>
+Cc: <stable@vger.kernel.org>
+Link: http://lkml.kernel.org/r/20200327173111.63922-1-dsahern@kernel.org
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ tools/accounting/getdelays.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/tools/accounting/getdelays.c
++++ b/tools/accounting/getdelays.c
+@@ -136,7 +136,7 @@ static int send_cmd(int sd, __u16 nlmsg_
+ msg.g.version = 0x1;
+ na = (struct nlattr *) GENLMSG_DATA(&msg);
+ na->nla_type = nla_type;
+- na->nla_len = nla_len + 1 + NLA_HDRLEN;
++ na->nla_len = nla_len + NLA_HDRLEN;
+ memcpy(NLA_DATA(na), nla_data, nla_len);
+ msg.n.nlmsg_len += NLMSG_ALIGN(na->nla_len);
+
--- /dev/null
+From 2dedea035ae82c5af0595637a6eda4655532b21e Mon Sep 17 00:00:00 2001
+From: Thinh Nguyen <Thinh.Nguyen@synopsys.com>
+Date: Thu, 5 Mar 2020 13:24:01 -0800
+Subject: usb: dwc3: gadget: Wrap around when skip TRBs
+
+From: Thinh Nguyen <Thinh.Nguyen@synopsys.com>
+
+commit 2dedea035ae82c5af0595637a6eda4655532b21e upstream.
+
+When skipping TRBs, we need to account for wrapping around the ring
+buffer and not modifying some invalid TRBs. Without this fix, dwc3 won't
+be able to check for available TRBs.
+
+Cc: stable <stable@vger.kernel.org>
+Fixes: 7746a8dfb3f9 ("usb: dwc3: gadget: extract dwc3_gadget_ep_skip_trbs()")
+Signed-off-by: Thinh Nguyen <thinhn@synopsys.com>
+Signed-off-by: Felipe Balbi <balbi@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/usb/dwc3/gadget.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -1521,7 +1521,7 @@ static void dwc3_gadget_ep_skip_trbs(str
+ for (i = 0; i < req->num_trbs; i++) {
+ struct dwc3_trb *trb;
+
+- trb = req->trb + i;
++ trb = &dep->trb_pool[dep->trb_dequeue];
+ trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
+ dwc3_ep_inc_deq(dep);
+ }