--- /dev/null
+From f21d498e523a26c32aac5e5861469b51430afa89 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 24 Jun 2020 17:00:24 +0100
+Subject: afs: Fix storage of cell names
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit 719fdd32921fb7e3208db8832d32ae1c2d68900f ]
+
+The cell name stored in the afs_cell struct is a 64-char + NUL buffer -
+when it needs to be able to handle up to AFS_MAXCELLNAME (256 chars) + NUL.
+
+Fix this by changing the array to a pointer and allocating the string.
+
+Found using Coverity.
+
+Fixes: 989782dcdc91 ("afs: Overhaul cell database management")
+Reported-by: Colin Ian King <colin.king@canonical.com>
+Signed-off-by: David Howells <dhowells@redhat.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/afs/cell.c | 9 +++++++++
+ fs/afs/internal.h | 2 +-
+ 2 files changed, 10 insertions(+), 1 deletion(-)
+
+diff --git a/fs/afs/cell.c b/fs/afs/cell.c
+index 78ba5f9322879..296b489861a9a 100644
+--- a/fs/afs/cell.c
++++ b/fs/afs/cell.c
+@@ -154,10 +154,17 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net,
+ return ERR_PTR(-ENOMEM);
+ }
+
++ cell->name = kmalloc(namelen + 1, GFP_KERNEL);
++ if (!cell->name) {
++ kfree(cell);
++ return ERR_PTR(-ENOMEM);
++ }
++
+ cell->net = net;
+ cell->name_len = namelen;
+ for (i = 0; i < namelen; i++)
+ cell->name[i] = tolower(name[i]);
++ cell->name[i] = 0;
+
+ atomic_set(&cell->usage, 2);
+ INIT_WORK(&cell->manager, afs_manage_cell);
+@@ -203,6 +210,7 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net,
+ if (ret == -EINVAL)
+ printk(KERN_ERR "kAFS: bad VL server IP address\n");
+ error:
++ kfree(cell->name);
+ kfree(cell);
+ _leave(" = %d", ret);
+ return ERR_PTR(ret);
+@@ -483,6 +491,7 @@ static void afs_cell_destroy(struct rcu_head *rcu)
+
+ afs_put_vlserverlist(cell->net, rcu_access_pointer(cell->vl_servers));
+ key_put(cell->anonymous_key);
++ kfree(cell->name);
+ kfree(cell);
+
+ _leave(" [destroyed]");
+diff --git a/fs/afs/internal.h b/fs/afs/internal.h
+index 555ad7c9afcb6..7fe88d918b238 100644
+--- a/fs/afs/internal.h
++++ b/fs/afs/internal.h
+@@ -397,7 +397,7 @@ struct afs_cell {
+ struct afs_vlserver_list __rcu *vl_servers;
+
+ u8 name_len; /* Length of name */
+- char name[64 + 1]; /* Cell name, case-flattened and NUL-padded */
++ char *name; /* Cell name, case-flattened and NUL-padded */
+ };
+
+ /*
+--
+2.25.1
+
--- /dev/null
+From 10d8abe120697a6d5509686e7cec2b51f705804d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 9 Jun 2020 23:45:21 +0200
+Subject: ARM: dts: am335x-pocketbeagle: Fix mmc0 Write Protect
+
+From: Drew Fustini <drew@beagleboard.org>
+
+[ Upstream commit d7af722344e6dc52d87649100516515263e15c75 ]
+
+AM3358 pin mcasp0_aclkr (ZCZ ball B13) [0] is routed to P1.31 header [1]
+Mode 4 of this pin is mmc0_sdwp (SD Write Protect). A signal connected
+to P1.31 may accidentally trigger mmc0 write protection. To avoid this
+situation, do not put mcasp0_aclkr in mode 4 (mmc0_sdwp) by default.
+
+[0] http://www.ti.com/lit/ds/symlink/am3358.pdf
+[1] https://github.com/beagleboard/pocketbeagle/wiki/System-Reference-Manual#531_Expansion_Headers
+
+Fixes: 047905376a16 (ARM: dts: Add am335x-pocketbeagle)
+Signed-off-by: Robert Nelson <robertcnelson@gmail.com>
+Signed-off-by: Drew Fustini <drew@beagleboard.org>
+Signed-off-by: Tony Lindgren <tony@atomide.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm/boot/dts/am335x-pocketbeagle.dts | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/arch/arm/boot/dts/am335x-pocketbeagle.dts b/arch/arm/boot/dts/am335x-pocketbeagle.dts
+index ff4f919d22f62..abf2badce53d9 100644
+--- a/arch/arm/boot/dts/am335x-pocketbeagle.dts
++++ b/arch/arm/boot/dts/am335x-pocketbeagle.dts
+@@ -88,7 +88,6 @@ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT2, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT3, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_CMD, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_CLK, PIN_INPUT_PULLUP, MUX_MODE0)
+- AM33XX_PADCONF(AM335X_PIN_MCASP0_ACLKR, PIN_INPUT, MUX_MODE4) /* (B12) mcasp0_aclkr.mmc0_sdwp */
+ >;
+ };
+
+--
+2.25.1
+
--- /dev/null
+From 7ed84602827cd0050e07cd294b1cd7d575413d15 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 12 Jun 2020 10:19:50 -0700
+Subject: ARM: dts: Fix duovero smsc interrupt for suspend
+
+From: Tony Lindgren <tony@atomide.com>
+
+[ Upstream commit 9cf28e41f9f768791f54ee18333239fda6927ed8 ]
+
+While testing the recent suspend and resume regressions I noticed that
+duovero can still end up losing edge gpio interrupts on runtime
+suspend. This causes NFSroot easily stopping working after resume on
+duovero.
+
+Let's fix the issue by using gpio level interrupts for smsc as then
+the gpio interrupt state is seen by the gpio controller on resume.
+
+Fixes: 731b409878a3 ("ARM: dts: Configure duovero for to allow core retention during idle")
+Signed-off-by: Tony Lindgren <tony@atomide.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm/boot/dts/omap4-duovero-parlor.dts | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/arm/boot/dts/omap4-duovero-parlor.dts b/arch/arm/boot/dts/omap4-duovero-parlor.dts
+index 8047e8cdb3af0..4548d87534e37 100644
+--- a/arch/arm/boot/dts/omap4-duovero-parlor.dts
++++ b/arch/arm/boot/dts/omap4-duovero-parlor.dts
+@@ -139,7 +139,7 @@ &gpmc {
+ ethernet@gpmc {
+ reg = <5 0 0xff>;
+ interrupt-parent = <&gpio2>;
+- interrupts = <12 IRQ_TYPE_EDGE_FALLING>; /* gpio_44 */
++ interrupts = <12 IRQ_TYPE_LEVEL_LOW>; /* gpio_44 */
+
+ phy-mode = "mii";
+
+--
+2.25.1
+
--- /dev/null
+From 4b25ac50f26be45b76a09ab2098e2a6286473b05 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 14 Jun 2020 15:19:00 -0700
+Subject: ARM: dts: NSP: Correct FA2 mailbox node
+
+From: Matthew Hagan <mnhagan88@gmail.com>
+
+[ Upstream commit ac4e106d8934a5894811fc263f4b03fc8ed0fb7a ]
+
+The FA2 mailbox is specified at 0x18025000 but should actually be
+0x18025c00, length 0x400 according to socregs_nsp.h and board_bu.c. Also
+the interrupt was off by one and should be GIC SPI 151 instead of 150.
+
+Fixes: 17d517172300 ("ARM: dts: NSP: Add mailbox (PDC) to NSP")
+Signed-off-by: Matthew Hagan <mnhagan88@gmail.com>
+Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm/boot/dts/bcm-nsp.dtsi | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/arch/arm/boot/dts/bcm-nsp.dtsi b/arch/arm/boot/dts/bcm-nsp.dtsi
+index da6d70f09ef19..418e6b97cb2ec 100644
+--- a/arch/arm/boot/dts/bcm-nsp.dtsi
++++ b/arch/arm/boot/dts/bcm-nsp.dtsi
+@@ -257,10 +257,10 @@ amac2: ethernet@24000 {
+ status = "disabled";
+ };
+
+- mailbox: mailbox@25000 {
++ mailbox: mailbox@25c00 {
+ compatible = "brcm,iproc-fa2-mbox";
+- reg = <0x25000 0x445>;
+- interrupts = <GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>;
++ reg = <0x25c00 0x400>;
++ interrupts = <GIC_SPI 151 IRQ_TYPE_LEVEL_HIGH>;
+ #mbox-cells = <1>;
+ brcm,rx-status-len = <32>;
+ brcm,use-bcm-hdr;
+--
+2.25.1
+
--- /dev/null
+From e33d99c50244c00bf1817e6da1821f842406713b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 4 Jun 2020 20:42:06 +0800
+Subject: ARM: imx5: add missing put_device() call in imx_suspend_alloc_ocram()
+
+From: yu kuai <yukuai3@huawei.com>
+
+[ Upstream commit 586745f1598ccf71b0a5a6df2222dee0a865954e ]
+
+if of_find_device_by_node() succeed, imx_suspend_alloc_ocram() doesn't
+have a corresponding put_device(). Thus add a jump target to fix the
+exception handling for this function implementation.
+
+Fixes: 1579c7b9fe01 ("ARM: imx53: Set DDR pins to high impedance when in suspend to RAM.")
+Signed-off-by: yu kuai <yukuai3@huawei.com>
+Signed-off-by: Shawn Guo <shawnguo@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm/mach-imx/pm-imx5.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/arch/arm/mach-imx/pm-imx5.c b/arch/arm/mach-imx/pm-imx5.c
+index f057df813f83a..e9962b48e30cb 100644
+--- a/arch/arm/mach-imx/pm-imx5.c
++++ b/arch/arm/mach-imx/pm-imx5.c
+@@ -295,14 +295,14 @@ static int __init imx_suspend_alloc_ocram(
+ if (!ocram_pool) {
+ pr_warn("%s: ocram pool unavailable!\n", __func__);
+ ret = -ENODEV;
+- goto put_node;
++ goto put_device;
+ }
+
+ ocram_base = gen_pool_alloc(ocram_pool, size);
+ if (!ocram_base) {
+ pr_warn("%s: unable to alloc ocram!\n", __func__);
+ ret = -ENOMEM;
+- goto put_node;
++ goto put_device;
+ }
+
+ phys = gen_pool_virt_to_phys(ocram_pool, ocram_base);
+@@ -312,6 +312,8 @@ static int __init imx_suspend_alloc_ocram(
+ if (virt_out)
+ *virt_out = virt;
+
++put_device:
++ put_device(&pdev->dev);
+ put_node:
+ of_node_put(node);
+
+--
+2.25.1
+
--- /dev/null
+From fdb2ca2d66ee49fc0b111528d6847e61b64e0acf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 27 May 2020 16:32:06 -0700
+Subject: ARM: OMAP2+: Fix legacy mode dss_reset
+
+From: Tony Lindgren <tony@atomide.com>
+
+[ Upstream commit 77cad9dbc957f23a73169e8a8971186744296614 ]
+
+We must check for "dss_core" instead of "dss" to avoid also matching
+also "dss_dispc". This only matters for the mixed case of data
+configured in device tree but with legacy booting ti,hwmods property
+still enabled.
+
+Fixes: 8b30919a4e3c ("ARM: OMAP2+: Handle reset quirks for dynamically allocated modules")
+Cc: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Cc: Tomi Valkeinen <tomi.valkeinen@ti.com>
+Signed-off-by: Tony Lindgren <tony@atomide.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm/mach-omap2/omap_hwmod.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
+index 203664c40d3d2..eb74aa1826614 100644
+--- a/arch/arm/mach-omap2/omap_hwmod.c
++++ b/arch/arm/mach-omap2/omap_hwmod.c
+@@ -3535,7 +3535,7 @@ static const struct omap_hwmod_reset dra7_reset_quirks[] = {
+ };
+
+ static const struct omap_hwmod_reset omap_reset_quirks[] = {
+- { .match = "dss", .len = 3, .reset = omap_dss_reset, },
++ { .match = "dss_core", .len = 8, .reset = omap_dss_reset, },
+ { .match = "hdq1w", .len = 5, .reset = omap_hdq1w_reset, },
+ { .match = "i2c", .len = 3, .reset = omap_i2c_reset, },
+ { .match = "wd_timer", .len = 8, .reset = omap2_wd_timer_reset, },
+--
+2.25.1
+
--- /dev/null
+From cd761775af90be0d172b2ecd6608efb9aeac9b91 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 10 Jun 2020 18:03:10 +0100
+Subject: arm64/sve: Eliminate data races on sve_default_vl
+
+From: Dave Martin <Dave.Martin@arm.com>
+
+[ Upstream commit 1e570f512cbdc5e9e401ba640d9827985c1bea1e ]
+
+sve_default_vl can be modified via the /proc/sys/abi/sve_default_vl
+sysctl concurrently with use, and modified concurrently by multiple
+threads.
+
+Adding a lock for this seems overkill, and I don't want to think any
+more than necessary, so just define wrappers using READ_ONCE()/
+WRITE_ONCE().
+
+This will avoid the possibility of torn accesses and repeated loads
+and stores.
+
+There's no evidence yet that this is going wrong in practice: this
+is just hygiene. For generic sysctl users, it would be better to
+build this kind of thing into the sysctl common code somehow.
+
+Reported-by: Will Deacon <will@kernel.org>
+Signed-off-by: Dave Martin <Dave.Martin@arm.com>
+Link: https://lore.kernel.org/r/1591808590-20210-3-git-send-email-Dave.Martin@arm.com
+[will: move set_sve_default_vl() inside #ifdef to squash allnoconfig warning]
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/kernel/fpsimd.c | 25 ++++++++++++++++++-------
+ 1 file changed, 18 insertions(+), 7 deletions(-)
+
+diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
+index 1765e5284994f..d8895251a2aac 100644
+--- a/arch/arm64/kernel/fpsimd.c
++++ b/arch/arm64/kernel/fpsimd.c
+@@ -12,6 +12,7 @@
+ #include <linux/bug.h>
+ #include <linux/cache.h>
+ #include <linux/compat.h>
++#include <linux/compiler.h>
+ #include <linux/cpu.h>
+ #include <linux/cpu_pm.h>
+ #include <linux/kernel.h>
+@@ -119,10 +120,20 @@ struct fpsimd_last_state_struct {
+ static DEFINE_PER_CPU(struct fpsimd_last_state_struct, fpsimd_last_state);
+
+ /* Default VL for tasks that don't set it explicitly: */
+-static int sve_default_vl = -1;
++static int __sve_default_vl = -1;
++
++static int get_sve_default_vl(void)
++{
++ return READ_ONCE(__sve_default_vl);
++}
+
+ #ifdef CONFIG_ARM64_SVE
+
++static void set_sve_default_vl(int val)
++{
++ WRITE_ONCE(__sve_default_vl, val);
++}
++
+ /* Maximum supported vector length across all CPUs (initially poisoned) */
+ int __ro_after_init sve_max_vl = SVE_VL_MIN;
+ int __ro_after_init sve_max_virtualisable_vl = SVE_VL_MIN;
+@@ -345,7 +356,7 @@ static int sve_proc_do_default_vl(struct ctl_table *table, int write,
+ loff_t *ppos)
+ {
+ int ret;
+- int vl = sve_default_vl;
++ int vl = get_sve_default_vl();
+ struct ctl_table tmp_table = {
+ .data = &vl,
+ .maxlen = sizeof(vl),
+@@ -362,7 +373,7 @@ static int sve_proc_do_default_vl(struct ctl_table *table, int write,
+ if (!sve_vl_valid(vl))
+ return -EINVAL;
+
+- sve_default_vl = find_supported_vector_length(vl);
++ set_sve_default_vl(find_supported_vector_length(vl));
+ return 0;
+ }
+
+@@ -869,7 +880,7 @@ void __init sve_setup(void)
+ * For the default VL, pick the maximum supported value <= 64.
+ * VL == 64 is guaranteed not to grow the signal frame.
+ */
+- sve_default_vl = find_supported_vector_length(64);
++ set_sve_default_vl(find_supported_vector_length(64));
+
+ bitmap_andnot(tmp_map, sve_vq_partial_map, sve_vq_map,
+ SVE_VQ_MAX);
+@@ -890,7 +901,7 @@ void __init sve_setup(void)
+ pr_info("SVE: maximum available vector length %u bytes per vector\n",
+ sve_max_vl);
+ pr_info("SVE: default vector length %u bytes per vector\n",
+- sve_default_vl);
++ get_sve_default_vl());
+
+ /* KVM decides whether to support mismatched systems. Just warn here: */
+ if (sve_max_virtualisable_vl < sve_max_vl)
+@@ -1030,13 +1041,13 @@ void fpsimd_flush_thread(void)
+ * vector length configured: no kernel task can become a user
+ * task without an exec and hence a call to this function.
+ * By the time the first call to this function is made, all
+- * early hardware probing is complete, so sve_default_vl
++ * early hardware probing is complete, so __sve_default_vl
+ * should be valid.
+ * If a bug causes this to go wrong, we make some noise and
+ * try to fudge thread.sve_vl to a safe value here.
+ */
+ vl = current->thread.sve_vl_onexec ?
+- current->thread.sve_vl_onexec : sve_default_vl;
++ current->thread.sve_vl_onexec : get_sve_default_vl();
+
+ if (WARN_ON(!sve_vl_valid(vl)))
+ vl = SVE_VL_MIN;
+--
+2.25.1
+
--- /dev/null
+From cf848a673681f59cd115aa2511bb62b1e264a1af Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 16 Jun 2020 18:29:11 +0100
+Subject: arm64: sve: Fix build failure when ARM64_SVE=y and SYSCTL=n
+
+From: Will Deacon <will@kernel.org>
+
+[ Upstream commit e575fb9e76c8e33440fb859572a8b7d430f053d6 ]
+
+When I squashed the 'allnoconfig' compiler warning about the
+set_sve_default_vl() function being defined but not used in commit
+1e570f512cbd ("arm64/sve: Eliminate data races on sve_default_vl"), I
+accidentally broke the build for configs where ARM64_SVE is enabled, but
+SYSCTL is not.
+
+Fix this by only compiling the SVE sysctl support if both CONFIG_SVE=y
+and CONFIG_SYSCTL=y.
+
+Cc: Dave Martin <Dave.Martin@arm.com>
+Reported-by: Qian Cai <cai@lca.pw>
+Link: https://lore.kernel.org/r/20200616131808.GA1040@lca.pw
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/kernel/fpsimd.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
+index d8895251a2aac..338e0966d3ca2 100644
+--- a/arch/arm64/kernel/fpsimd.c
++++ b/arch/arm64/kernel/fpsimd.c
+@@ -349,7 +349,7 @@ static unsigned int find_supported_vector_length(unsigned int vl)
+ return sve_vl_from_vq(__bit_to_vq(bit));
+ }
+
+-#ifdef CONFIG_SYSCTL
++#if defined(CONFIG_ARM64_SVE) && defined(CONFIG_SYSCTL)
+
+ static int sve_proc_do_default_vl(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+@@ -395,9 +395,9 @@ static int __init sve_sysctl_init(void)
+ return 0;
+ }
+
+-#else /* ! CONFIG_SYSCTL */
++#else /* ! (CONFIG_ARM64_SVE && CONFIG_SYSCTL) */
+ static int __init sve_sysctl_init(void) { return 0; }
+-#endif /* ! CONFIG_SYSCTL */
++#endif /* ! (CONFIG_ARM64_SVE && CONFIG_SYSCTL) */
+
+ #define ZREG(sve_state, vq, n) ((char *)(sve_state) + \
+ (SVE_SIG_ZREG_OFFSET(vq, n) - SVE_SIG_REGS_OFFSET))
+--
+2.25.1
+
--- /dev/null
+From 70fc02d7092b2a573b3134c66b9cfbd849555969 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 16 Jun 2020 10:53:48 +0800
+Subject: ASoC: fsl_ssi: Fix bclk calculation for mono channel
+
+From: Shengjiu Wang <shengjiu.wang@nxp.com>
+
+[ Upstream commit ed1220df6e666500ebf58c4f2fccc681941646fb ]
+
+For mono channel, SSI will switch to Normal mode.
+
+In Normal mode and Network mode, the Word Length Control bits
+control the word length divider in clock generator, which is
+different with I2S Master mode (the word length is fixed to
+32bit), it should be the value of params_width(hw_params).
+
+The condition "slots == 2" is not good for I2S Master mode,
+because for Network mode and Normal mode, the slots can also
+be 2. Then we need to use (ssi->i2s_net & SSI_SCR_I2S_MODE_MASK)
+to check if it is I2S Master mode.
+
+So we refine the formula for mono channel, otherwise there
+will be sound issue for S24_LE.
+
+Fixes: b0a7043d5c2c ("ASoC: fsl_ssi: Caculate bit clock rate using slot number and width")
+Signed-off-by: Shengjiu Wang <shengjiu.wang@nxp.com>
+Reviewed-by: Nicolin Chen <nicoleotsuka@gmail.com>
+Link: https://lore.kernel.org/r/034eff1435ff6ce300b6c781130cefd9db22ab9a.1592276147.git.shengjiu.wang@nxp.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/fsl/fsl_ssi.c | 13 +++++++++----
+ 1 file changed, 9 insertions(+), 4 deletions(-)
+
+diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
+index 537dc69256f0e..a4ebd6ddaba10 100644
+--- a/sound/soc/fsl/fsl_ssi.c
++++ b/sound/soc/fsl/fsl_ssi.c
+@@ -678,8 +678,9 @@ static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream,
+ struct regmap *regs = ssi->regs;
+ u32 pm = 999, div2, psr, stccr, mask, afreq, factor, i;
+ unsigned long clkrate, baudrate, tmprate;
+- unsigned int slots = params_channels(hw_params);
+- unsigned int slot_width = 32;
++ unsigned int channels = params_channels(hw_params);
++ unsigned int slot_width = params_width(hw_params);
++ unsigned int slots = 2;
+ u64 sub, savesub = 100000;
+ unsigned int freq;
+ bool baudclk_is_used;
+@@ -688,10 +689,14 @@ static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream,
+ /* Override slots and slot_width if being specifically set... */
+ if (ssi->slots)
+ slots = ssi->slots;
+- /* ...but keep 32 bits if slots is 2 -- I2S Master mode */
+- if (ssi->slot_width && slots != 2)
++ if (ssi->slot_width)
+ slot_width = ssi->slot_width;
+
++ /* ...but force 32 bits for stereo audio using I2S Master Mode */
++ if (channels == 2 &&
++ (ssi->i2s_net & SSI_SCR_I2S_MODE_MASK) == SSI_SCR_I2S_MODE_MASTER)
++ slot_width = 32;
++
+ /* Generate bit clock based on the slot number and slot width */
+ freq = slots * slot_width * params_rate(hw_params);
+
+--
+2.25.1
+
--- /dev/null
+From 7c886d29890c071f64cb78ec9bce05d23537d887 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 12 Jun 2020 13:37:10 +0100
+Subject: ASoc: q6afe: add support to get port direction
+
+From: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+
+[ Upstream commit 4a95737440d426e93441d49d11abf4c6526d4666 ]
+
+This patch adds support to q6afe_is_rx_port() to get direction
+of DSP BE dai port, this is useful for setting dailink
+directions correctly.
+
+Fixes: c25e295cd77b (ASoC: qcom: Add support to parse common audio device nodes)
+Reported-by: John Stultz <john.stultz@linaro.org>
+Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+Reviewed-by: Vinod Koul <vkoul@kernel.org>
+Link: https://lore.kernel.org/r/20200612123711.29130-1-srinivas.kandagatla@linaro.org
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/qcom/qdsp6/q6afe.c | 8 ++++++++
+ sound/soc/qcom/qdsp6/q6afe.h | 1 +
+ 2 files changed, 9 insertions(+)
+
+diff --git a/sound/soc/qcom/qdsp6/q6afe.c b/sound/soc/qcom/qdsp6/q6afe.c
+index e0945f7a58c81..0ce4eb60f9848 100644
+--- a/sound/soc/qcom/qdsp6/q6afe.c
++++ b/sound/soc/qcom/qdsp6/q6afe.c
+@@ -800,6 +800,14 @@ int q6afe_get_port_id(int index)
+ }
+ EXPORT_SYMBOL_GPL(q6afe_get_port_id);
+
++int q6afe_is_rx_port(int index)
++{
++ if (index < 0 || index >= AFE_PORT_MAX)
++ return -EINVAL;
++
++ return port_maps[index].is_rx;
++}
++EXPORT_SYMBOL_GPL(q6afe_is_rx_port);
+ static int afe_apr_send_pkt(struct q6afe *afe, struct apr_pkt *pkt,
+ struct q6afe_port *port)
+ {
+diff --git a/sound/soc/qcom/qdsp6/q6afe.h b/sound/soc/qcom/qdsp6/q6afe.h
+index c7ed5422baffd..1a0f80a14afea 100644
+--- a/sound/soc/qcom/qdsp6/q6afe.h
++++ b/sound/soc/qcom/qdsp6/q6afe.h
+@@ -198,6 +198,7 @@ int q6afe_port_start(struct q6afe_port *port);
+ int q6afe_port_stop(struct q6afe_port *port);
+ void q6afe_port_put(struct q6afe_port *port);
+ int q6afe_get_port_id(int index);
++int q6afe_is_rx_port(int index);
+ void q6afe_hdmi_port_prepare(struct q6afe_port *port,
+ struct q6afe_hdmi_cfg *cfg);
+ void q6afe_slim_port_prepare(struct q6afe_port *port,
+--
+2.25.1
+
--- /dev/null
+From e940c0b856b83cecfd32c37d0b75374a1c8b7cdd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 11 Jun 2020 13:41:53 +0100
+Subject: ASoC: q6asm: handle EOS correctly
+
+From: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+
+[ Upstream commit 6476b60f32866be49d05e2e0163f337374c55b06 ]
+
+Successful send of EOS command does not indicate that EOS is actually
+finished, correct event to wait EOS is finished is EOS_RENDERED event.
+EOS_RENDERED means that the DSP has finished processing all the buffers
+for that particular session and stream.
+
+This patch fixes EOS handling!
+
+Fixes: 68fd8480bb7b ("ASoC: qdsp6: q6asm: Add support to audio stream apis")
+Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+Link: https://lore.kernel.org/r/20200611124159.20742-3-srinivas.kandagatla@linaro.org
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/qcom/qdsp6/q6asm.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/sound/soc/qcom/qdsp6/q6asm.c b/sound/soc/qcom/qdsp6/q6asm.c
+index e8141a33a55e5..835ac98a789cd 100644
+--- a/sound/soc/qcom/qdsp6/q6asm.c
++++ b/sound/soc/qcom/qdsp6/q6asm.c
+@@ -25,6 +25,7 @@
+ #define ASM_STREAM_CMD_FLUSH 0x00010BCE
+ #define ASM_SESSION_CMD_PAUSE 0x00010BD3
+ #define ASM_DATA_CMD_EOS 0x00010BDB
++#define ASM_DATA_EVENT_RENDERED_EOS 0x00010C1C
+ #define ASM_NULL_POPP_TOPOLOGY 0x00010C68
+ #define ASM_STREAM_CMD_FLUSH_READBUFS 0x00010C09
+ #define ASM_STREAM_CMD_SET_ENCDEC_PARAM 0x00010C10
+@@ -546,9 +547,6 @@ static int32_t q6asm_stream_callback(struct apr_device *adev,
+ case ASM_SESSION_CMD_SUSPEND:
+ client_event = ASM_CLIENT_EVENT_CMD_SUSPEND_DONE;
+ break;
+- case ASM_DATA_CMD_EOS:
+- client_event = ASM_CLIENT_EVENT_CMD_EOS_DONE;
+- break;
+ case ASM_STREAM_CMD_FLUSH:
+ client_event = ASM_CLIENT_EVENT_CMD_FLUSH_DONE;
+ break;
+@@ -651,6 +649,9 @@ static int32_t q6asm_stream_callback(struct apr_device *adev,
+ spin_unlock_irqrestore(&ac->lock, flags);
+ }
+
++ break;
++ case ASM_DATA_EVENT_RENDERED_EOS:
++ client_event = ASM_CLIENT_EVENT_CMD_EOS_DONE;
+ break;
+ }
+
+--
+2.25.1
+
--- /dev/null
+From 8586612c9c04f70e905867a4848ad280b855d0e8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 12 Jun 2020 13:37:11 +0100
+Subject: ASoC: qcom: common: set correct directions for dailinks
+
+From: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+
+[ Upstream commit a2120089251f1fe221305e88df99af16f940e236 ]
+
+Currently both FE and BE dai-links are configured bi-directional,
+However the DSP BE dais are only single directional,
+so set the directions as supported by the BE dais.
+
+Fixes: c25e295cd77b (ASoC: qcom: Add support to parse common audio device nodes)
+Reported-by: John Stultz <john.stultz@linaro.org>
+Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+Tested-by: John Stultz <john.stultz@linaro.org>
+Reviewed-by: Vinod Koul <vkoul@kernel.org>
+Link: https://lore.kernel.org/r/20200612123711.29130-2-srinivas.kandagatla@linaro.org
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/qcom/common.c | 14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+diff --git a/sound/soc/qcom/common.c b/sound/soc/qcom/common.c
+index 6c20bdd850f33..8ada4ecba8472 100644
+--- a/sound/soc/qcom/common.c
++++ b/sound/soc/qcom/common.c
+@@ -4,6 +4,7 @@
+
+ #include <linux/module.h>
+ #include "common.h"
++#include "qdsp6/q6afe.h"
+
+ int qcom_snd_parse_of(struct snd_soc_card *card)
+ {
+@@ -101,6 +102,15 @@ int qcom_snd_parse_of(struct snd_soc_card *card)
+ }
+ link->no_pcm = 1;
+ link->ignore_pmdown_time = 1;
++
++ if (q6afe_is_rx_port(link->id)) {
++ link->dpcm_playback = 1;
++ link->dpcm_capture = 0;
++ } else {
++ link->dpcm_playback = 0;
++ link->dpcm_capture = 1;
++ }
++
+ } else {
+ dlc = devm_kzalloc(dev, sizeof(*dlc), GFP_KERNEL);
+ if (!dlc)
+@@ -113,12 +123,12 @@ int qcom_snd_parse_of(struct snd_soc_card *card)
+ link->codecs->dai_name = "snd-soc-dummy-dai";
+ link->codecs->name = "snd-soc-dummy";
+ link->dynamic = 1;
++ link->dpcm_playback = 1;
++ link->dpcm_capture = 1;
+ }
+
+ link->ignore_suspend = 1;
+ link->nonatomic = 1;
+- link->dpcm_playback = 1;
+- link->dpcm_capture = 1;
+ link->stream_name = link->name;
+ link++;
+
+--
+2.25.1
+
--- /dev/null
+From fa79a12baeac9934e5314f693c66591572c068f6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 13 Jun 2020 15:51:58 -0500
+Subject: ASoC: rockchip: Fix a reference count leak.
+
+From: Qiushi Wu <wu000273@umn.edu>
+
+[ Upstream commit f141a422159a199f4c8dedb7e0df55b3b2cf16cd ]
+
+Calling pm_runtime_get_sync increments the counter even in case of
+failure, causing incorrect ref count if pm_runtime_put is not called in
+error handling paths. Call pm_runtime_put if pm_runtime_get_sync fails.
+
+Fixes: fc05a5b22253 ("ASoC: rockchip: add support for pdm controller")
+Signed-off-by: Qiushi Wu <wu000273@umn.edu>
+Reviewed-by: Heiko Stuebner <heiko@sntech.de>
+Link: https://lore.kernel.org/r/20200613205158.27296-1-wu000273@umn.edu
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/rockchip/rockchip_pdm.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/sound/soc/rockchip/rockchip_pdm.c b/sound/soc/rockchip/rockchip_pdm.c
+index 7cd42fcfcf38a..1707414cfa921 100644
+--- a/sound/soc/rockchip/rockchip_pdm.c
++++ b/sound/soc/rockchip/rockchip_pdm.c
+@@ -590,8 +590,10 @@ static int rockchip_pdm_resume(struct device *dev)
+ int ret;
+
+ ret = pm_runtime_get_sync(dev);
+- if (ret < 0)
++ if (ret < 0) {
++ pm_runtime_put(dev);
+ return ret;
++ }
+
+ ret = regcache_sync(pdm->regmap);
+
+--
+2.25.1
+
--- /dev/null
+From 14d6d6b4fa1dc6ae3fb806f534aa6cc2feb542f6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 5 Jun 2020 09:41:49 +0800
+Subject: ata/libata: Fix usage of page address by page_address in
+ ata_scsi_mode_select_xlat function
+
+From: Ye Bin <yebin10@huawei.com>
+
+[ Upstream commit f650ef61e040bcb175dd8762164b00a5d627f20e ]
+
+BUG: KASAN: use-after-free in ata_scsi_mode_select_xlat+0x10bd/0x10f0
+drivers/ata/libata-scsi.c:4045
+Read of size 1 at addr ffff88803b8cd003 by task syz-executor.6/12621
+
+CPU: 1 PID: 12621 Comm: syz-executor.6 Not tainted 4.19.95 #1
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS
+1.10.2-1ubuntu1 04/01/2014
+Call Trace:
+__dump_stack lib/dump_stack.c:77 [inline]
+dump_stack+0xac/0xee lib/dump_stack.c:118
+print_address_description+0x60/0x223 mm/kasan/report.c:253
+kasan_report_error mm/kasan/report.c:351 [inline]
+kasan_report mm/kasan/report.c:409 [inline]
+kasan_report.cold+0xae/0x2d8 mm/kasan/report.c:393
+ata_scsi_mode_select_xlat+0x10bd/0x10f0 drivers/ata/libata-scsi.c:4045
+ata_scsi_translate+0x2da/0x680 drivers/ata/libata-scsi.c:2035
+__ata_scsi_queuecmd drivers/ata/libata-scsi.c:4360 [inline]
+ata_scsi_queuecmd+0x2e4/0x790 drivers/ata/libata-scsi.c:4409
+scsi_dispatch_cmd+0x2ee/0x6c0 drivers/scsi/scsi_lib.c:1867
+scsi_queue_rq+0xfd7/0x1990 drivers/scsi/scsi_lib.c:2170
+blk_mq_dispatch_rq_list+0x1e1/0x19a0 block/blk-mq.c:1186
+blk_mq_do_dispatch_sched+0x147/0x3d0 block/blk-mq-sched.c:108
+blk_mq_sched_dispatch_requests+0x427/0x680 block/blk-mq-sched.c:204
+__blk_mq_run_hw_queue+0xbc/0x200 block/blk-mq.c:1308
+__blk_mq_delay_run_hw_queue+0x3c0/0x460 block/blk-mq.c:1376
+blk_mq_run_hw_queue+0x152/0x310 block/blk-mq.c:1413
+blk_mq_sched_insert_request+0x337/0x6c0 block/blk-mq-sched.c:397
+blk_execute_rq_nowait+0x124/0x320 block/blk-exec.c:64
+blk_execute_rq+0xc5/0x112 block/blk-exec.c:101
+sg_scsi_ioctl+0x3b0/0x6a0 block/scsi_ioctl.c:507
+sg_ioctl+0xd37/0x23f0 drivers/scsi/sg.c:1106
+vfs_ioctl fs/ioctl.c:46 [inline]
+file_ioctl fs/ioctl.c:501 [inline]
+do_vfs_ioctl+0xae6/0x1030 fs/ioctl.c:688
+ksys_ioctl+0x76/0xa0 fs/ioctl.c:705
+__do_sys_ioctl fs/ioctl.c:712 [inline]
+__se_sys_ioctl fs/ioctl.c:710 [inline]
+__x64_sys_ioctl+0x6f/0xb0 fs/ioctl.c:710
+do_syscall_64+0xa0/0x2e0 arch/x86/entry/common.c:293
+entry_SYSCALL_64_after_hwframe+0x44/0xa9
+RIP: 0033:0x45c479
+Code: ad b6 fb ff c3 66 2e 0f 1f 84 00 00 00 00 00 66 90 48 89 f8 48 89
+f7 48
+89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff
+ff 0f
+83 7b b6 fb ff c3 66 2e 0f 1f 84 00 00 00 00
+RSP: 002b:00007fb0e9602c78 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
+RAX: ffffffffffffffda RBX: 00007fb0e96036d4 RCX: 000000000045c479
+RDX: 0000000020000040 RSI: 0000000000000001 RDI: 0000000000000003
+RBP: 000000000076bfc0 R08: 0000000000000000 R09: 0000000000000000
+R10: 0000000000000000 R11: 0000000000000246 R12: 00000000ffffffff
+R13: 000000000000046d R14: 00000000004c6e1a R15: 000000000076bfcc
+
+Allocated by task 12577:
+set_track mm/kasan/kasan.c:460 [inline]
+kasan_kmalloc mm/kasan/kasan.c:553 [inline]
+kasan_kmalloc+0xbf/0xe0 mm/kasan/kasan.c:531
+__kmalloc+0xf3/0x1e0 mm/slub.c:3749
+kmalloc include/linux/slab.h:520 [inline]
+load_elf_phdrs+0x118/0x1b0 fs/binfmt_elf.c:441
+load_elf_binary+0x2de/0x4610 fs/binfmt_elf.c:737
+search_binary_handler fs/exec.c:1654 [inline]
+search_binary_handler+0x15c/0x4e0 fs/exec.c:1632
+exec_binprm fs/exec.c:1696 [inline]
+__do_execve_file.isra.0+0xf52/0x1a90 fs/exec.c:1820
+do_execveat_common fs/exec.c:1866 [inline]
+do_execve fs/exec.c:1883 [inline]
+__do_sys_execve fs/exec.c:1964 [inline]
+__se_sys_execve fs/exec.c:1959 [inline]
+__x64_sys_execve+0x8a/0xb0 fs/exec.c:1959
+do_syscall_64+0xa0/0x2e0 arch/x86/entry/common.c:293
+entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+Freed by task 12577:
+set_track mm/kasan/kasan.c:460 [inline]
+__kasan_slab_free+0x129/0x170 mm/kasan/kasan.c:521
+slab_free_hook mm/slub.c:1370 [inline]
+slab_free_freelist_hook mm/slub.c:1397 [inline]
+slab_free mm/slub.c:2952 [inline]
+kfree+0x8b/0x1a0 mm/slub.c:3904
+load_elf_binary+0x1be7/0x4610 fs/binfmt_elf.c:1118
+search_binary_handler fs/exec.c:1654 [inline]
+search_binary_handler+0x15c/0x4e0 fs/exec.c:1632
+exec_binprm fs/exec.c:1696 [inline]
+__do_execve_file.isra.0+0xf52/0x1a90 fs/exec.c:1820
+do_execveat_common fs/exec.c:1866 [inline]
+do_execve fs/exec.c:1883 [inline]
+__do_sys_execve fs/exec.c:1964 [inline]
+__se_sys_execve fs/exec.c:1959 [inline]
+__x64_sys_execve+0x8a/0xb0 fs/exec.c:1959
+do_syscall_64+0xa0/0x2e0 arch/x86/entry/common.c:293
+entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+The buggy address belongs to the object at ffff88803b8ccf00
+which belongs to the cache kmalloc-512 of size 512
+The buggy address is located 259 bytes inside of
+512-byte region [ffff88803b8ccf00, ffff88803b8cd100)
+The buggy address belongs to the page:
+page:ffffea0000ee3300 count:1 mapcount:0 mapping:ffff88806cc03080
+index:0xffff88803b8cc780 compound_mapcount: 0
+flags: 0x100000000008100(slab|head)
+raw: 0100000000008100 ffffea0001104080 0000000200000002 ffff88806cc03080
+raw: ffff88803b8cc780 00000000800c000b 00000001ffffffff 0000000000000000
+page dumped because: kasan: bad access detected
+
+Memory state around the buggy address:
+ffff88803b8ccf00: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+ffff88803b8ccf80: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+>ffff88803b8cd000: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+^
+ffff88803b8cd080: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+ffff88803b8cd100: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
+
+You can refer to "https://www.lkml.org/lkml/2019/1/17/474" reproduce
+this error.
+
+The exception code is "bd_len = p[3];", "p" value is ffff88803b8cd000
+which belongs to the cache kmalloc-512 of size 512. The "page_address(sg_page(scsi_sglist(scmd)))"
+maybe from sg_scsi_ioctl function "buffer" which allocated by kzalloc, so "buffer"
+may not page aligned.
+This also looks completely buggy on highmem systems and really needs to use a
+kmap_atomic. --Christoph Hellwig
+To address above bugs, Paolo Bonzini advise to simpler to just make a char array
+of size CACHE_MPAGE_LEN+8+8+4-2(or just 64 to make it easy), use sg_copy_to_buffer
+to copy from the sglist into the buffer, and workthere.
+
+Signed-off-by: Ye Bin <yebin10@huawei.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/ata/libata-scsi.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index 5af34a3201ed2..5596c9b6ebf23 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -3978,12 +3978,13 @@ static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc)
+ {
+ struct scsi_cmnd *scmd = qc->scsicmd;
+ const u8 *cdb = scmd->cmnd;
+- const u8 *p;
+ u8 pg, spg;
+ unsigned six_byte, pg_len, hdr_len, bd_len;
+ int len;
+ u16 fp = (u16)-1;
+ u8 bp = 0xff;
++ u8 buffer[64];
++ const u8 *p = buffer;
+
+ VPRINTK("ENTER\n");
+
+@@ -4017,12 +4018,14 @@ static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc)
+ if (!scsi_sg_count(scmd) || scsi_sglist(scmd)->length < len)
+ goto invalid_param_len;
+
+- p = page_address(sg_page(scsi_sglist(scmd)));
+-
+ /* Move past header and block descriptors. */
+ if (len < hdr_len)
+ goto invalid_param_len;
+
++ if (!sg_copy_to_buffer(scsi_sglist(scmd), scsi_sg_count(scmd),
++ buffer, sizeof(buffer)))
++ goto invalid_param_len;
++
+ if (six_byte)
+ bd_len = p[3];
+ else
+--
+2.25.1
+
--- /dev/null
+From 51d97dc9afb21c16cff5a6606f44717fedbc2218 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 5 Jun 2020 16:58:36 +0200
+Subject: blktrace: break out of blktrace setup on concurrent calls
+
+From: Luis Chamberlain <mcgrof@kernel.org>
+
+[ Upstream commit 1b0b283648163dae2a214ca28ed5a99f62a77319 ]
+
+We use one blktrace per request_queue, that means one per the entire
+disk. So we cannot run one blktrace on say /dev/vda and then /dev/vda1,
+or just two calls on /dev/vda.
+
+We check for concurrent setup only at the very end of the blktrace setup though.
+
+If we try to run two concurrent blktraces on the same block device the
+second one will fail, and the first one seems to go on. However when
+one tries to kill the first one one will see things like this:
+
+The kernel will show these:
+
+```
+debugfs: File 'dropped' in directory 'nvme1n1' already present!
+debugfs: File 'msg' in directory 'nvme1n1' already present!
+debugfs: File 'trace0' in directory 'nvme1n1' already present!
+``
+
+And userspace just sees this error message for the second call:
+
+```
+blktrace /dev/nvme1n1
+BLKTRACESETUP(2) /dev/nvme1n1 failed: 5/Input/output error
+```
+
+The first userspace process #1 will also claim that the files
+were taken underneath their nose as well. The files are taken
+away form the first process given that when the second blktrace
+fails, it will follow up with a BLKTRACESTOP and BLKTRACETEARDOWN.
+This means that even if go-happy process #1 is waiting for blktrace
+data, we *have* been asked to take teardown the blktrace.
+
+This can easily be reproduced with break-blktrace [0] run_0005.sh test.
+
+Just break out early if we know we're already going to fail, this will
+prevent trying to create the files all over again, which we know still
+exist.
+
+[0] https://github.com/mcgrof/break-blktrace
+
+Signed-off-by: Luis Chamberlain <mcgrof@kernel.org>
+Signed-off-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Bart Van Assche <bvanassche@acm.org>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/trace/blktrace.c | 13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
+index a677aa84ccb6e..eaee960153e1e 100644
+--- a/kernel/trace/blktrace.c
++++ b/kernel/trace/blktrace.c
+@@ -3,6 +3,9 @@
+ * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
+ *
+ */
++
++#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
++
+ #include <linux/kernel.h>
+ #include <linux/blkdev.h>
+ #include <linux/blktrace_api.h>
+@@ -495,6 +498,16 @@ static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
+ */
+ strreplace(buts->name, '/', '_');
+
++ /*
++ * bdev can be NULL, as with scsi-generic, this is a helpful as
++ * we can be.
++ */
++ if (q->blk_trace) {
++ pr_warn("Concurrent blktraces are not allowed on %s\n",
++ buts->name);
++ return -EBUSY;
++ }
++
+ bt = kzalloc(sizeof(*bt), GFP_KERNEL);
+ if (!bt)
+ return -ENOMEM;
+--
+2.25.1
+
--- /dev/null
+From 883a94a27b75444698238b97f5157010863cebfd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 17 Jun 2020 14:18:37 +0800
+Subject: block: update hctx map when use multiple maps
+
+From: Weiping Zhang <zhangweiping@didiglobal.com>
+
+[ Upstream commit fe35ec58f0d339221643287bbb7cee15c93a5389 ]
+
+There is an issue when tune the number for read and write queues,
+if the total queue count was not changed. The hctx->type cannot
+be updated, since __blk_mq_update_nr_hw_queues will return directly
+if the total queue count has not been changed.
+
+Reproduce:
+
+dmesg | grep "default/read/poll"
+[ 2.607459] nvme nvme0: 48/0/0 default/read/poll queues
+cat /sys/kernel/debug/block/nvme0n1/hctx*/type | sort | uniq -c
+ 48 default
+
+tune the write queues to 24:
+echo 24 > /sys/module/nvme/parameters/write_queues
+echo 1 > /sys/block/nvme0n1/device/reset_controller
+
+dmesg | grep "default/read/poll"
+[ 433.547235] nvme nvme0: 24/24/0 default/read/poll queues
+
+cat /sys/kernel/debug/block/nvme0n1/hctx*/type | sort | uniq -c
+ 48 default
+
+The driver's hardware queue mapping is not same as block layer.
+
+Signed-off-by: Weiping Zhang <zhangweiping@didiglobal.com>
+Reviewed-by: Ming Lei <ming.lei@redhat.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ block/blk-mq.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index 0550366e25d8b..f1b930a300a38 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -3279,7 +3279,9 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
+
+ if (set->nr_maps == 1 && nr_hw_queues > nr_cpu_ids)
+ nr_hw_queues = nr_cpu_ids;
+- if (nr_hw_queues < 1 || nr_hw_queues == set->nr_hw_queues)
++ if (nr_hw_queues < 1)
++ return;
++ if (set->nr_maps == 1 && nr_hw_queues == set->nr_hw_queues)
+ return;
+
+ list_for_each_entry(q, &set->tag_list, tag_set_list)
+--
+2.25.1
+
--- /dev/null
+From cfe8b6431f972a29ba3294768f94b0023ea79347 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 16 Jun 2020 18:04:14 -0700
+Subject: bpf: Don't return EINVAL from {get,set}sockopt when optlen >
+ PAGE_SIZE
+
+From: Stanislav Fomichev <sdf@google.com>
+
+[ Upstream commit d8fe449a9c51a37d844ab607e14e2f5c657d3cf2 ]
+
+Attaching to these hooks can break iptables because its optval is
+usually quite big, or at least bigger than the current PAGE_SIZE limit.
+David also mentioned some SCTP options can be big (around 256k).
+
+For such optvals we expose only the first PAGE_SIZE bytes to
+the BPF program. BPF program has two options:
+1. Set ctx->optlen to 0 to indicate that the BPF's optval
+ should be ignored and the kernel should use original userspace
+ value.
+2. Set ctx->optlen to something that's smaller than the PAGE_SIZE.
+
+v5:
+* use ctx->optlen == 0 with trimmed buffer (Alexei Starovoitov)
+* update the docs accordingly
+
+v4:
+* use temporary buffer to avoid optval == optval_end == NULL;
+ this removes the corner case in the verifier that might assume
+ non-zero PTR_TO_PACKET/PTR_TO_PACKET_END.
+
+v3:
+* don't increase the limit, bypass the argument
+
+v2:
+* proper comments formatting (Jakub Kicinski)
+
+Fixes: 0d01da6afc54 ("bpf: implement getsockopt and setsockopt hooks")
+Signed-off-by: Stanislav Fomichev <sdf@google.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Cc: David Laight <David.Laight@ACULAB.COM>
+Link: https://lore.kernel.org/bpf/20200617010416.93086-1-sdf@google.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/bpf/cgroup.c | 53 ++++++++++++++++++++++++++++-----------------
+ 1 file changed, 33 insertions(+), 20 deletions(-)
+
+diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
+index 869e2e1860e84..b701af27a7799 100644
+--- a/kernel/bpf/cgroup.c
++++ b/kernel/bpf/cgroup.c
+@@ -966,16 +966,23 @@ static bool __cgroup_bpf_prog_array_is_empty(struct cgroup *cgrp,
+
+ static int sockopt_alloc_buf(struct bpf_sockopt_kern *ctx, int max_optlen)
+ {
+- if (unlikely(max_optlen > PAGE_SIZE) || max_optlen < 0)
++ if (unlikely(max_optlen < 0))
+ return -EINVAL;
+
++ if (unlikely(max_optlen > PAGE_SIZE)) {
++ /* We don't expose optvals that are greater than PAGE_SIZE
++ * to the BPF program.
++ */
++ max_optlen = PAGE_SIZE;
++ }
++
+ ctx->optval = kzalloc(max_optlen, GFP_USER);
+ if (!ctx->optval)
+ return -ENOMEM;
+
+ ctx->optval_end = ctx->optval + max_optlen;
+
+- return 0;
++ return max_optlen;
+ }
+
+ static void sockopt_free_buf(struct bpf_sockopt_kern *ctx)
+@@ -1009,13 +1016,13 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
+ */
+ max_optlen = max_t(int, 16, *optlen);
+
+- ret = sockopt_alloc_buf(&ctx, max_optlen);
+- if (ret)
+- return ret;
++ max_optlen = sockopt_alloc_buf(&ctx, max_optlen);
++ if (max_optlen < 0)
++ return max_optlen;
+
+ ctx.optlen = *optlen;
+
+- if (copy_from_user(ctx.optval, optval, *optlen) != 0) {
++ if (copy_from_user(ctx.optval, optval, min(*optlen, max_optlen)) != 0) {
+ ret = -EFAULT;
+ goto out;
+ }
+@@ -1043,8 +1050,14 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
+ /* export any potential modifications */
+ *level = ctx.level;
+ *optname = ctx.optname;
+- *optlen = ctx.optlen;
+- *kernel_optval = ctx.optval;
++
++ /* optlen == 0 from BPF indicates that we should
++ * use original userspace data.
++ */
++ if (ctx.optlen != 0) {
++ *optlen = ctx.optlen;
++ *kernel_optval = ctx.optval;
++ }
+ }
+
+ out:
+@@ -1076,12 +1089,12 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
+ __cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_GETSOCKOPT))
+ return retval;
+
+- ret = sockopt_alloc_buf(&ctx, max_optlen);
+- if (ret)
+- return ret;
+-
+ ctx.optlen = max_optlen;
+
++ max_optlen = sockopt_alloc_buf(&ctx, max_optlen);
++ if (max_optlen < 0)
++ return max_optlen;
++
+ if (!retval) {
+ /* If kernel getsockopt finished successfully,
+ * copy whatever was returned to the user back
+@@ -1095,10 +1108,8 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
+ goto out;
+ }
+
+- if (ctx.optlen > max_optlen)
+- ctx.optlen = max_optlen;
+-
+- if (copy_from_user(ctx.optval, optval, ctx.optlen) != 0) {
++ if (copy_from_user(ctx.optval, optval,
++ min(ctx.optlen, max_optlen)) != 0) {
+ ret = -EFAULT;
+ goto out;
+ }
+@@ -1127,10 +1138,12 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
+ goto out;
+ }
+
+- if (copy_to_user(optval, ctx.optval, ctx.optlen) ||
+- put_user(ctx.optlen, optlen)) {
+- ret = -EFAULT;
+- goto out;
++ if (ctx.optlen != 0) {
++ if (copy_to_user(optval, ctx.optval, ctx.optlen) ||
++ put_user(ctx.optlen, optlen)) {
++ ret = -EFAULT;
++ goto out;
++ }
+ }
+
+ ret = ctx.retval;
+--
+2.25.1
+
--- /dev/null
+From fb3b8be4e4d7d5d5b798281c9712074d30a3b765 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 12 Jun 2020 14:53:27 -0400
+Subject: bpf, xdp, samples: Fix null pointer dereference in *_user code
+
+From: Gaurav Singh <gaurav1086@gmail.com>
+
+[ Upstream commit 6903cdae9f9f08d61e49c16cbef11c293e33a615 ]
+
+Memset on the pointer right after malloc can cause a NULL pointer
+deference if it failed to allocate memory. A simple fix is to
+replace malloc()/memset() pair with a simple call to calloc().
+
+Fixes: 0fca931a6f21 ("samples/bpf: program demonstrating access to xdp_rxq_info")
+Signed-off-by: Gaurav Singh <gaurav1086@gmail.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
+Acked-by: John Fastabend <john.fastabend@gmail.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ samples/bpf/xdp_monitor_user.c | 8 ++------
+ samples/bpf/xdp_redirect_cpu_user.c | 7 ++-----
+ samples/bpf/xdp_rxq_info_user.c | 13 +++----------
+ 3 files changed, 7 insertions(+), 21 deletions(-)
+
+diff --git a/samples/bpf/xdp_monitor_user.c b/samples/bpf/xdp_monitor_user.c
+index dd558cbb23094..ef53b93db5732 100644
+--- a/samples/bpf/xdp_monitor_user.c
++++ b/samples/bpf/xdp_monitor_user.c
+@@ -509,11 +509,8 @@ static void *alloc_rec_per_cpu(int record_size)
+ {
+ unsigned int nr_cpus = bpf_num_possible_cpus();
+ void *array;
+- size_t size;
+
+- size = record_size * nr_cpus;
+- array = malloc(size);
+- memset(array, 0, size);
++ array = calloc(nr_cpus, record_size);
+ if (!array) {
+ fprintf(stderr, "Mem alloc error (nr_cpus:%u)\n", nr_cpus);
+ exit(EXIT_FAIL_MEM);
+@@ -528,8 +525,7 @@ static struct stats_record *alloc_stats_record(void)
+ int i;
+
+ /* Alloc main stats_record structure */
+- rec = malloc(sizeof(*rec));
+- memset(rec, 0, sizeof(*rec));
++ rec = calloc(1, sizeof(*rec));
+ if (!rec) {
+ fprintf(stderr, "Mem alloc error\n");
+ exit(EXIT_FAIL_MEM);
+diff --git a/samples/bpf/xdp_redirect_cpu_user.c b/samples/bpf/xdp_redirect_cpu_user.c
+index 767869e3b308f..0a76725568225 100644
+--- a/samples/bpf/xdp_redirect_cpu_user.c
++++ b/samples/bpf/xdp_redirect_cpu_user.c
+@@ -210,11 +210,8 @@ static struct datarec *alloc_record_per_cpu(void)
+ {
+ unsigned int nr_cpus = bpf_num_possible_cpus();
+ struct datarec *array;
+- size_t size;
+
+- size = sizeof(struct datarec) * nr_cpus;
+- array = malloc(size);
+- memset(array, 0, size);
++ array = calloc(nr_cpus, sizeof(struct datarec));
+ if (!array) {
+ fprintf(stderr, "Mem alloc error (nr_cpus:%u)\n", nr_cpus);
+ exit(EXIT_FAIL_MEM);
+@@ -229,11 +226,11 @@ static struct stats_record *alloc_stats_record(void)
+
+ size = sizeof(*rec) + n_cpus * sizeof(struct record);
+ rec = malloc(size);
+- memset(rec, 0, size);
+ if (!rec) {
+ fprintf(stderr, "Mem alloc error\n");
+ exit(EXIT_FAIL_MEM);
+ }
++ memset(rec, 0, size);
+ rec->rx_cnt.cpu = alloc_record_per_cpu();
+ rec->redir_err.cpu = alloc_record_per_cpu();
+ rec->kthread.cpu = alloc_record_per_cpu();
+diff --git a/samples/bpf/xdp_rxq_info_user.c b/samples/bpf/xdp_rxq_info_user.c
+index b88df17853b84..21d6e5067a839 100644
+--- a/samples/bpf/xdp_rxq_info_user.c
++++ b/samples/bpf/xdp_rxq_info_user.c
+@@ -198,11 +198,8 @@ static struct datarec *alloc_record_per_cpu(void)
+ {
+ unsigned int nr_cpus = bpf_num_possible_cpus();
+ struct datarec *array;
+- size_t size;
+
+- size = sizeof(struct datarec) * nr_cpus;
+- array = malloc(size);
+- memset(array, 0, size);
++ array = calloc(nr_cpus, sizeof(struct datarec));
+ if (!array) {
+ fprintf(stderr, "Mem alloc error (nr_cpus:%u)\n", nr_cpus);
+ exit(EXIT_FAIL_MEM);
+@@ -214,11 +211,8 @@ static struct record *alloc_record_per_rxq(void)
+ {
+ unsigned int nr_rxqs = bpf_map__def(rx_queue_index_map)->max_entries;
+ struct record *array;
+- size_t size;
+
+- size = sizeof(struct record) * nr_rxqs;
+- array = malloc(size);
+- memset(array, 0, size);
++ array = calloc(nr_rxqs, sizeof(struct record));
+ if (!array) {
+ fprintf(stderr, "Mem alloc error (nr_rxqs:%u)\n", nr_rxqs);
+ exit(EXIT_FAIL_MEM);
+@@ -232,8 +226,7 @@ static struct stats_record *alloc_stats_record(void)
+ struct stats_record *rec;
+ int i;
+
+- rec = malloc(sizeof(*rec));
+- memset(rec, 0, sizeof(*rec));
++ rec = calloc(1, sizeof(struct stats_record));
+ if (!rec) {
+ fprintf(stderr, "Mem alloc error\n");
+ exit(EXIT_FAIL_MEM);
+--
+2.25.1
+
--- /dev/null
+From 86dbe2d864d0d36ac391b36b848aa2de85d0f434 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 27 May 2020 06:49:29 -0700
+Subject: bus: ti-sysc: Flush posted write on enable and disable
+
+From: Tony Lindgren <tony@atomide.com>
+
+[ Upstream commit 5ce8aee81be6c8bc19051d7c7b0d3cbb7ac5fc3f ]
+
+Looks like we're missing flush of posted write after module enable and
+disable. I've seen occasional errors accessing various modules, and it
+is suspected that the lack of posted writes can also cause random reboots.
+
+The errors we can see are similar to the one below from spi for example:
+
+44000000.ocp:L3 Custom Error: MASTER MPU TARGET L4CFG (Read): Data Access
+in User mode during Functional access
+...
+mcspi_wait_for_reg_bit
+omap2_mcspi_transfer_one
+spi_transfer_one_message
+...
+
+We also want to also flush posted write for disable. The clkctrl clock
+disable happens after module disable, and we don't want to have the
+module potentially stay active while we're trying to disable the clock.
+
+Fixes: d59b60564cbf ("bus: ti-sysc: Add generic enable/disable functions")
+Signed-off-by: Tony Lindgren <tony@atomide.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/bus/ti-sysc.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
+index f0bc0841cbc40..c088c6f4adcff 100644
+--- a/drivers/bus/ti-sysc.c
++++ b/drivers/bus/ti-sysc.c
+@@ -938,6 +938,9 @@ static int sysc_enable_module(struct device *dev)
+ sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
+ }
+
++ /* Flush posted write */
++ sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
++
+ if (ddata->module_enable_quirk)
+ ddata->module_enable_quirk(ddata);
+
+@@ -1018,6 +1021,9 @@ static int sysc_disable_module(struct device *dev)
+ reg |= 1 << regbits->autoidle_shift;
+ sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
+
++ /* Flush posted write */
++ sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
++
+ return 0;
+ }
+
+--
+2.25.1
+
--- /dev/null
+From fe58fe364d85feedab86c9f7ae750b3c95dac724 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 31 May 2020 12:37:54 -0700
+Subject: bus: ti-sysc: Ignore clockactivity unless specified as a quirk
+
+From: Tony Lindgren <tony@atomide.com>
+
+[ Upstream commit 08b91dd6e547467fad61a7c201ff71080d7ad65a ]
+
+We must ignore the clockactivity bit for most modules and not set it
+unless specified for the module with SYSC_QUIRK_USE_CLOCKACT. Otherwise
+the interface clock can be automatically gated constantly causing
+unexpected performance issues.
+
+Fixes: ae9ae12e9daa ("bus: ti-sysc: Handle clockactivity for enable and disable")
+Cc: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Cc: Tomi Valkeinen <tomi.valkeinen@ti.com>
+Signed-off-by: Tony Lindgren <tony@atomide.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/bus/ti-sysc.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
+index c088c6f4adcff..553c0e2796217 100644
+--- a/drivers/bus/ti-sysc.c
++++ b/drivers/bus/ti-sysc.c
+@@ -880,10 +880,13 @@ static int sysc_enable_module(struct device *dev)
+ regbits = ddata->cap->regbits;
+ reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
+
+- /* Set CLOCKACTIVITY, we only use it for ick */
++ /*
++ * Set CLOCKACTIVITY, we only use it for ick. And we only configure it
++ * based on the SYSC_QUIRK_USE_CLOCKACT flag, not based on the hardware
++ * capabilities. See the old HWMOD_SET_DEFAULT_CLOCKACT flag.
++ */
+ if (regbits->clkact_shift >= 0 &&
+- (ddata->cfg.quirks & SYSC_QUIRK_USE_CLOCKACT ||
+- ddata->cfg.sysc_val & BIT(regbits->clkact_shift)))
++ (ddata->cfg.quirks & SYSC_QUIRK_USE_CLOCKACT))
+ reg |= SYSC_CLOCACT_ICK << regbits->clkact_shift;
+
+ /* Set SIDLE mode */
+--
+2.25.1
+
--- /dev/null
+From 9ba70940c58d58e3c913f295f5ca055db1b3ec12 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 23 Jun 2020 09:24:17 +0800
+Subject: clk: sifive: allocate sufficient memory for struct __prci_data
+
+From: Vincent Chen <vincent.chen@sifive.com>
+
+[ Upstream commit d0a5fdf4cc83dabcdea668f971b8a2e916437711 ]
+
+The (struct __prci_data).hw_clks.hws is an array with dynamic elements.
+Using struct_size(pd, hw_clks.hws, ARRAY_SIZE(__prci_init_clocks))
+instead of sizeof(*pd) to get the correct memory size of
+struct __prci_data for sifive/fu540-prci. After applying this
+modifications, the kernel runs smoothly with CONFIG_SLAB_FREELIST_RANDOM
+enabled on the HiFive unleashed board.
+
+Fixes: 30b8e27e3b58 ("clk: sifive: add a driver for the SiFive FU540 PRCI IP block")
+Signed-off-by: Vincent Chen <vincent.chen@sifive.com>
+Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/clk/sifive/fu540-prci.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/clk/sifive/fu540-prci.c b/drivers/clk/sifive/fu540-prci.c
+index 6282ee2f361cd..a8901f90a61ac 100644
+--- a/drivers/clk/sifive/fu540-prci.c
++++ b/drivers/clk/sifive/fu540-prci.c
+@@ -586,7 +586,10 @@ static int sifive_fu540_prci_probe(struct platform_device *pdev)
+ struct __prci_data *pd;
+ int r;
+
+- pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
++ pd = devm_kzalloc(dev,
++ struct_size(pd, hw_clks.hws,
++ ARRAY_SIZE(__prci_init_clocks)),
++ GFP_KERNEL);
+ if (!pd)
+ return -ENOMEM;
+
+--
+2.25.1
+
--- /dev/null
+From 5e14a682fa10c8436e7011d5d77f1b9cb0b32204 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 24 Jun 2020 01:51:31 +0530
+Subject: cxgb4: move handling L2T ARP failures to caller
+
+From: Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>
+
+[ Upstream commit 11d8cd5c9f3b46f397f889cefdb66795518aaebd ]
+
+Move code handling L2T ARP failures to the only caller.
+
+Fixes following sparse warning:
+skbuff.h:2091:29: warning: context imbalance in
+'handle_failed_resolution' - unexpected unlock
+
+Fixes: 749cb5fe48bb ("cxgb4: Replace arpq_head/arpq_tail with SKB double link-list code")
+Signed-off-by: Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/chelsio/cxgb4/l2t.c | 52 +++++++++++-------------
+ 1 file changed, 24 insertions(+), 28 deletions(-)
+
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+index e6fe2870137b0..a440c1cf0b61e 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+@@ -506,41 +506,20 @@ u64 cxgb4_select_ntuple(struct net_device *dev,
+ }
+ EXPORT_SYMBOL(cxgb4_select_ntuple);
+
+-/*
+- * Called when address resolution fails for an L2T entry to handle packets
+- * on the arpq head. If a packet specifies a failure handler it is invoked,
+- * otherwise the packet is sent to the device.
+- */
+-static void handle_failed_resolution(struct adapter *adap, struct l2t_entry *e)
+-{
+- struct sk_buff *skb;
+-
+- while ((skb = __skb_dequeue(&e->arpq)) != NULL) {
+- const struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
+-
+- spin_unlock(&e->lock);
+- if (cb->arp_err_handler)
+- cb->arp_err_handler(cb->handle, skb);
+- else
+- t4_ofld_send(adap, skb);
+- spin_lock(&e->lock);
+- }
+-}
+-
+ /*
+ * Called when the host's neighbor layer makes a change to some entry that is
+ * loaded into the HW L2 table.
+ */
+ void t4_l2t_update(struct adapter *adap, struct neighbour *neigh)
+ {
+- struct l2t_entry *e;
+- struct sk_buff_head *arpq = NULL;
+- struct l2t_data *d = adap->l2t;
+ unsigned int addr_len = neigh->tbl->key_len;
+ u32 *addr = (u32 *) neigh->primary_key;
+- int ifidx = neigh->dev->ifindex;
+- int hash = addr_hash(d, addr, addr_len, ifidx);
++ int hash, ifidx = neigh->dev->ifindex;
++ struct sk_buff_head *arpq = NULL;
++ struct l2t_data *d = adap->l2t;
++ struct l2t_entry *e;
+
++ hash = addr_hash(d, addr, addr_len, ifidx);
+ read_lock_bh(&d->lock);
+ for (e = d->l2tab[hash].first; e; e = e->next)
+ if (!addreq(e, addr) && e->ifindex == ifidx) {
+@@ -573,8 +552,25 @@ void t4_l2t_update(struct adapter *adap, struct neighbour *neigh)
+ write_l2e(adap, e, 0);
+ }
+
+- if (arpq)
+- handle_failed_resolution(adap, e);
++ if (arpq) {
++ struct sk_buff *skb;
++
++ /* Called when address resolution fails for an L2T
++ * entry to handle packets on the arpq head. If a
++ * packet specifies a failure handler it is invoked,
++ * otherwise the packet is sent to the device.
++ */
++ while ((skb = __skb_dequeue(&e->arpq)) != NULL) {
++ const struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
++
++ spin_unlock(&e->lock);
++ if (cb->arp_err_handler)
++ cb->arp_err_handler(cb->handle, skb);
++ else
++ t4_ofld_send(adap, skb);
++ spin_lock(&e->lock);
++ }
++ }
+ spin_unlock_bh(&e->lock);
+ }
+
+--
+2.25.1
+
--- /dev/null
+From abffbb10a41e1b6608341a913e9f5f0ea6ae5974 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 16 Jun 2020 16:28:29 +0200
+Subject: devmap: Use bpf_map_area_alloc() for allocating hash buckets
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Toke Høiland-Jørgensen <toke@redhat.com>
+
+[ Upstream commit 99c51064fb06146b3d494b745c947e438a10aaa7 ]
+
+Syzkaller discovered that creating a hash of type devmap_hash with a large
+number of entries can hit the memory allocator limit for allocating
+contiguous memory regions. There's really no reason to use kmalloc_array()
+directly in the devmap code, so just switch it to the existing
+bpf_map_area_alloc() function that is used elsewhere.
+
+Fixes: 6f9d451ab1a3 ("xdp: Add devmap_hash map type for looking up devices by hashed index")
+Reported-by: Xiumei Mu <xmu@redhat.com>
+Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Acked-by: John Fastabend <john.fastabend@gmail.com>
+Link: https://lore.kernel.org/bpf/20200616142829.114173-1-toke@redhat.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/bpf/devmap.c | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
+index b4b6b77f309c6..6684696fa4571 100644
+--- a/kernel/bpf/devmap.c
++++ b/kernel/bpf/devmap.c
+@@ -88,12 +88,13 @@ struct bpf_dtab {
+ static DEFINE_SPINLOCK(dev_map_lock);
+ static LIST_HEAD(dev_map_list);
+
+-static struct hlist_head *dev_map_create_hash(unsigned int entries)
++static struct hlist_head *dev_map_create_hash(unsigned int entries,
++ int numa_node)
+ {
+ int i;
+ struct hlist_head *hash;
+
+- hash = kmalloc_array(entries, sizeof(*hash), GFP_KERNEL);
++ hash = bpf_map_area_alloc(entries * sizeof(*hash), numa_node);
+ if (hash != NULL)
+ for (i = 0; i < entries; i++)
+ INIT_HLIST_HEAD(&hash[i]);
+@@ -151,7 +152,8 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
+ INIT_LIST_HEAD(per_cpu_ptr(dtab->flush_list, cpu));
+
+ if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
+- dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets);
++ dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets,
++ dtab->map.numa_node);
+ if (!dtab->dev_index_head)
+ goto free_percpu;
+
+@@ -249,7 +251,7 @@ static void dev_map_free(struct bpf_map *map)
+ }
+ }
+
+- kfree(dtab->dev_index_head);
++ bpf_map_area_free(dtab->dev_index_head);
+ } else {
+ for (i = 0; i < dtab->map.max_entries; i++) {
+ struct bpf_dtab_netdev *dev;
+--
+2.25.1
+
--- /dev/null
+From 3f3646673168d7b8e232c4786b399ce82b4077ea Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 5 Jun 2020 20:37:44 +0300
+Subject: drm/amd/display: Use kfree() to free rgb_user in
+ calculate_user_regamma_ramp()
+
+From: Denis Efremov <efremov@linux.com>
+
+[ Upstream commit 43a562774fceba867e8eebba977d7d42f8a2eac7 ]
+
+Use kfree() instead of kvfree() to free rgb_user in
+calculate_user_regamma_ramp() because the memory is allocated with
+kcalloc().
+
+Signed-off-by: Denis Efremov <efremov@linux.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/modules/color/color_gamma.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+index 207435fa4f2c6..51d07a4561ce9 100644
+--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
++++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+@@ -1862,7 +1862,7 @@ bool calculate_user_regamma_ramp(struct dc_transfer_func *output_tf,
+
+ kfree(rgb_regamma);
+ rgb_regamma_alloc_fail:
+- kvfree(rgb_user);
++ kfree(rgb_user);
+ rgb_user_alloc_fail:
+ return ret;
+ }
+--
+2.25.1
+
--- /dev/null
+From 6f61b58dba15e53603c1fd1db0d449458a1f8a19 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 28 May 2020 13:38:04 -0500
+Subject: efi/esrt: Fix reference count leak in esre_create_sysfs_entry.
+
+From: Qiushi Wu <wu000273@umn.edu>
+
+[ Upstream commit 4ddf4739be6e375116c375f0a68bf3893ffcee21 ]
+
+kobject_init_and_add() takes reference even when it fails.
+If this function returns an error, kobject_put() must be called to
+properly clean up the memory associated with the object. Previous
+commit "b8eb718348b8" fixed a similar problem.
+
+Fixes: 0bb549052d33 ("efi: Add esrt support")
+Signed-off-by: Qiushi Wu <wu000273@umn.edu>
+Link: https://lore.kernel.org/r/20200528183804.4497-1-wu000273@umn.edu
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/firmware/efi/esrt.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c
+index d6dd5f503fa23..e8f71a50ba896 100644
+--- a/drivers/firmware/efi/esrt.c
++++ b/drivers/firmware/efi/esrt.c
+@@ -181,7 +181,7 @@ static int esre_create_sysfs_entry(void *esre, int entry_num)
+ rc = kobject_init_and_add(&entry->kobj, &esre1_ktype, NULL,
+ "entry%d", entry_num);
+ if (rc) {
+- kfree(entry);
++ kobject_put(&entry->kobj);
+ return rc;
+ }
+ }
+--
+2.25.1
+
--- /dev/null
+From 39018c9e73268efe2964e91f6c3d062181e90f83 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 15 Jun 2020 09:16:36 +0200
+Subject: efi/tpm: Verify event log header before parsing
+
+From: Fabian Vogt <fvogt@suse.de>
+
+[ Upstream commit 7dfc06a0f25b593a9f51992f540c0f80a57f3629 ]
+
+It is possible that the first event in the event log is not actually a
+log header at all, but rather a normal event. This leads to the cast in
+__calc_tpm2_event_size being an invalid conversion, which means that
+the values read are effectively garbage. Depending on the first event's
+contents, this leads either to apparently normal behaviour, a crash or
+a freeze.
+
+While this behaviour of the firmware is not in accordance with the
+TCG Client EFI Specification, this happens on a Dell Precision 5510
+with the TPM enabled but hidden from the OS ("TPM On" disabled, state
+otherwise untouched). The EFI firmware claims that the TPM is present
+and active and that it supports the TCG 2.0 event log format.
+
+Fortunately, this can be worked around by simply checking the header
+of the first event and the event log header signature itself.
+
+Commit b4f1874c6216 ("tpm: check event log version before reading final
+events") addressed a similar issue also found on Dell models.
+
+Fixes: 6b0326190205 ("efi: Attempt to get the TCG2 event log in the boot stub")
+Signed-off-by: Fabian Vogt <fvogt@suse.de>
+Link: https://lore.kernel.org/r/1927248.evlx2EsYKh@linux-e202.suse.de
+Bugzilla: https://bugzilla.suse.com/show_bug.cgi?id=1165773
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/tpm_eventlog.h | 14 +++++++++++++-
+ 1 file changed, 13 insertions(+), 1 deletion(-)
+
+diff --git a/include/linux/tpm_eventlog.h b/include/linux/tpm_eventlog.h
+index 131ea1bad458b..eccfd3a4e4c85 100644
+--- a/include/linux/tpm_eventlog.h
++++ b/include/linux/tpm_eventlog.h
+@@ -81,6 +81,8 @@ struct tcg_efi_specid_event_algs {
+ u16 digest_size;
+ } __packed;
+
++#define TCG_SPECID_SIG "Spec ID Event03"
++
+ struct tcg_efi_specid_event_head {
+ u8 signature[16];
+ u32 platform_class;
+@@ -171,6 +173,7 @@ static inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
+ int i;
+ int j;
+ u32 count, event_type;
++ const u8 zero_digest[sizeof(event_header->digest)] = {0};
+
+ marker = event;
+ marker_start = marker;
+@@ -198,10 +201,19 @@ static inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
+ count = READ_ONCE(event->count);
+ event_type = READ_ONCE(event->event_type);
+
++ /* Verify that it's the log header */
++ if (event_header->pcr_idx != 0 ||
++ event_header->event_type != NO_ACTION ||
++ memcmp(event_header->digest, zero_digest, sizeof(zero_digest))) {
++ size = 0;
++ goto out;
++ }
++
+ efispecid = (struct tcg_efi_specid_event_head *)event_header->event;
+
+ /* Check if event is malformed. */
+- if (count > efispecid->num_algs) {
++ if (memcmp(efispecid->signature, TCG_SPECID_SIG,
++ sizeof(TCG_SPECID_SIG)) || count > efispecid->num_algs) {
+ size = 0;
+ goto out;
+ }
+--
+2.25.1
+
--- /dev/null
+From 51ed647c2e3e26cf9b2b5eb3056709c06e48840f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 28 May 2020 15:21:04 +0800
+Subject: hwrng: ks-sa - Fix runtime PM imbalance on error
+
+From: Dinghao Liu <dinghao.liu@zju.edu.cn>
+
+[ Upstream commit 95459261c99f1621d90bc628c2a48e60b7cf9a88 ]
+
+pm_runtime_get_sync() increments the runtime PM usage counter even
+the call returns an error code. Thus a pairing decrement is needed
+on the error handling path to keep the counter balanced.
+
+Signed-off-by: Dinghao Liu <dinghao.liu@zju.edu.cn>
+Reviewed-by: Alexander Sverdlin <alexander.sverdlin@nokia.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/char/hw_random/ks-sa-rng.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/char/hw_random/ks-sa-rng.c b/drivers/char/hw_random/ks-sa-rng.c
+index a67430010aa68..5c7d3dfcfdd04 100644
+--- a/drivers/char/hw_random/ks-sa-rng.c
++++ b/drivers/char/hw_random/ks-sa-rng.c
+@@ -208,6 +208,7 @@ static int ks_sa_rng_probe(struct platform_device *pdev)
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable SA power-domain\n");
++ pm_runtime_put_noidle(dev);
+ pm_runtime_disable(dev);
+ return ret;
+ }
+--
+2.25.1
+
--- /dev/null
+From c195d07910525c6de06be1ca44d94a9fb9897fc8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 13 Jun 2020 11:41:09 +0100
+Subject: i2c: core: check returned size of emulated smbus block read
+
+From: Mans Rullgard <mans@mansr.com>
+
+[ Upstream commit 40e05200593af06633f64ab0effff052eee6f076 ]
+
+If the i2c bus driver ignores the I2C_M_RECV_LEN flag (as some of
+them do), it is possible for an I2C_SMBUS_BLOCK_DATA read issued
+on some random device to return an arbitrary value in the first
+byte (and nothing else). When this happens, i2c_smbus_xfer_emulated()
+will happily write past the end of the supplied data buffer, thus
+causing Bad Things to happen. To prevent this, check the size
+before copying the data block and return an error if it is too large.
+
+Fixes: 209d27c3b167 ("i2c: Emulate SMBus block read over I2C")
+Signed-off-by: Mans Rullgard <mans@mansr.com>
+[wsa: use better errno]
+Signed-off-by: Wolfram Sang <wsa@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/i2c/i2c-core-smbus.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/drivers/i2c/i2c-core-smbus.c b/drivers/i2c/i2c-core-smbus.c
+index 3ac426a8ab5ab..c2ae8c8cd4295 100644
+--- a/drivers/i2c/i2c-core-smbus.c
++++ b/drivers/i2c/i2c-core-smbus.c
+@@ -495,6 +495,13 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
+ break;
+ case I2C_SMBUS_BLOCK_DATA:
+ case I2C_SMBUS_BLOCK_PROC_CALL:
++ if (msg[1].buf[0] > I2C_SMBUS_BLOCK_MAX) {
++ dev_err(&adapter->dev,
++ "Invalid block size returned: %d\n",
++ msg[1].buf[0]);
++ status = -EPROTO;
++ goto cleanup;
++ }
+ for (i = 0; i < msg[1].buf[0] + 1; i++)
+ data->block[i] = msg[1].buf[i];
+ break;
+--
+2.25.1
+
--- /dev/null
+From 0fd9409e8419c76054524253177b98cc66933976 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 9 Jun 2020 15:15:54 -0500
+Subject: i2c: fsi: Fix the port number field in status register
+
+From: Eddie James <eajames@linux.ibm.com>
+
+[ Upstream commit 502035e284cc7e9efef22b01771d822d49698ab9 ]
+
+The port number field in the status register was not correct, so fix it.
+
+Fixes: d6ffb6300116 ("i2c: Add FSI-attached I2C master algorithm")
+Signed-off-by: Eddie James <eajames@linux.ibm.com>
+Signed-off-by: Joel Stanley <joel@jms.id.au>
+Signed-off-by: Wolfram Sang <wsa@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/i2c/busses/i2c-fsi.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/i2c/busses/i2c-fsi.c b/drivers/i2c/busses/i2c-fsi.c
+index e0c256922d4f1..977d6f524649c 100644
+--- a/drivers/i2c/busses/i2c-fsi.c
++++ b/drivers/i2c/busses/i2c-fsi.c
+@@ -98,7 +98,7 @@
+ #define I2C_STAT_DAT_REQ BIT(25)
+ #define I2C_STAT_CMD_COMP BIT(24)
+ #define I2C_STAT_STOP_ERR BIT(23)
+-#define I2C_STAT_MAX_PORT GENMASK(19, 16)
++#define I2C_STAT_MAX_PORT GENMASK(22, 16)
+ #define I2C_STAT_ANY_INT BIT(15)
+ #define I2C_STAT_SCL_IN BIT(11)
+ #define I2C_STAT_SDA_IN BIT(10)
+--
+2.25.1
+
--- /dev/null
+From 3321825176d4d9f7b82c423588af4436be56a131 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 21 Jun 2020 13:47:35 +0300
+Subject: IB/mad: Fix use after free when destroying MAD agent
+
+From: Shay Drory <shayd@mellanox.com>
+
+[ Upstream commit 116a1b9f1cb769b83e5adff323f977a62b1dcb2e ]
+
+Currently, when RMPP MADs are processed while the MAD agent is destroyed,
+it could result in use after free of rmpp_recv, as decribed below:
+
+ cpu-0 cpu-1
+ ----- -----
+ib_mad_recv_done()
+ ib_mad_complete_recv()
+ ib_process_rmpp_recv_wc()
+ unregister_mad_agent()
+ ib_cancel_rmpp_recvs()
+ cancel_delayed_work()
+ process_rmpp_data()
+ start_rmpp()
+ queue_delayed_work(rmpp_recv->cleanup_work)
+ destroy_rmpp_recv()
+ free_rmpp_recv()
+ cleanup_work()[1]
+ spin_lock_irqsave(&rmpp_recv->agent->lock) <-- use after free
+
+[1] cleanup_work() == recv_cleanup_handler
+
+Fix it by waiting for the MAD agent reference count becoming zero before
+calling to ib_cancel_rmpp_recvs().
+
+Fixes: 9a41e38a467c ("IB/mad: Use IDR for agent IDs")
+Link: https://lore.kernel.org/r/20200621104738.54850-2-leon@kernel.org
+Signed-off-by: Shay Drory <shayd@mellanox.com>
+Reviewed-by: Maor Gottlieb <maorg@mellanox.com>
+Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/core/mad.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
+index 455ecff54a8df..2284930b5f915 100644
+--- a/drivers/infiniband/core/mad.c
++++ b/drivers/infiniband/core/mad.c
+@@ -639,10 +639,10 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
+ xa_erase(&ib_mad_clients, mad_agent_priv->agent.hi_tid);
+
+ flush_workqueue(port_priv->wq);
+- ib_cancel_rmpp_recvs(mad_agent_priv);
+
+ deref_mad_agent(mad_agent_priv);
+ wait_for_completion(&mad_agent_priv->comp);
++ ib_cancel_rmpp_recvs(mad_agent_priv);
+
+ ib_mad_agent_security_cleanup(&mad_agent_priv->agent);
+
+--
+2.25.1
+
--- /dev/null
+From 6723ff8a2f090c35e22e910c0c84aa3c26fefac2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 15 Jun 2020 10:29:23 -0500
+Subject: ibmvnic: Harden device login requests
+
+From: Thomas Falcon <tlfalcon@linux.ibm.com>
+
+[ Upstream commit dff515a3e71dc8ab3b9dcc2e23a9b5fca88b3c18 ]
+
+The VNIC driver's "login" command sequence is the final step
+in the driver's initialization process with device firmware,
+confirming the available device queue resources to be utilized
+by the driver. Under high system load, firmware may not respond
+to the request in a timely manner or may abort the request. In
+such cases, the driver should reattempt the login command
+sequence. In case of a device error, the number of retries
+is bounded.
+
+Signed-off-by: Thomas Falcon <tlfalcon@linux.ibm.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/ibm/ibmvnic.c | 21 +++++++++++++++++----
+ 1 file changed, 17 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
+index 5a42ddeecfe50..4f503b9a674c4 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.c
++++ b/drivers/net/ethernet/ibm/ibmvnic.c
+@@ -779,12 +779,13 @@ static int ibmvnic_login(struct net_device *netdev)
+ struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+ unsigned long timeout = msecs_to_jiffies(30000);
+ int retry_count = 0;
++ int retries = 10;
+ bool retry;
+ int rc;
+
+ do {
+ retry = false;
+- if (retry_count > IBMVNIC_MAX_QUEUES) {
++ if (retry_count > retries) {
+ netdev_warn(netdev, "Login attempts exceeded\n");
+ return -1;
+ }
+@@ -799,11 +800,23 @@ static int ibmvnic_login(struct net_device *netdev)
+
+ if (!wait_for_completion_timeout(&adapter->init_done,
+ timeout)) {
+- netdev_warn(netdev, "Login timed out\n");
+- return -1;
++ netdev_warn(netdev, "Login timed out, retrying...\n");
++ retry = true;
++ adapter->init_done_rc = 0;
++ retry_count++;
++ continue;
+ }
+
+- if (adapter->init_done_rc == PARTIALSUCCESS) {
++ if (adapter->init_done_rc == ABORTED) {
++ netdev_warn(netdev, "Login aborted, retrying...\n");
++ retry = true;
++ adapter->init_done_rc = 0;
++ retry_count++;
++ /* FW or device may be busy, so
++ * wait a bit before retrying login
++ */
++ msleep(500);
++ } else if (adapter->init_done_rc == PARTIALSUCCESS) {
+ retry_count++;
+ release_sub_crqs(adapter, 1);
+
+--
+2.25.1
+
--- /dev/null
+From 6561e2ccd23ce6394a13047abe2526f279fb652e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 23 Jun 2020 07:13:43 +0800
+Subject: iommu/vt-d: Enable PCI ACS for platform opt in hint
+
+From: Lu Baolu <baolu.lu@linux.intel.com>
+
+[ Upstream commit 50310600ebda74b9988467e2e6128711c7ba56fc ]
+
+PCI ACS is disabled if Intel IOMMU is off by default or intel_iommu=off
+is used in command line. Unfortunately, Intel IOMMU will be forced on if
+there're devices sitting on an external facing PCI port that is marked
+as untrusted (for example, thunderbolt peripherals). That means, PCI ACS
+is disabled while Intel IOMMU is forced on to isolate those devices. As
+the result, the devices of an MFD will be grouped by a single group even
+the ACS is supported on device.
+
+[ 0.691263] pci 0000:00:07.1: Adding to iommu group 3
+[ 0.691277] pci 0000:00:07.2: Adding to iommu group 3
+[ 0.691292] pci 0000:00:07.3: Adding to iommu group 3
+
+Fix it by requesting PCI ACS when Intel IOMMU is detected with platform
+opt in hint.
+
+Fixes: 89a6079df791a ("iommu/vt-d: Force IOMMU on for platform opt in hint")
+Co-developed-by: Lalithambika Krishnakumar <lalithambika.krishnakumar@intel.com>
+Signed-off-by: Lalithambika Krishnakumar <lalithambika.krishnakumar@intel.com>
+Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
+Reviewed-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Cc: Mika Westerberg <mika.westerberg@linux.intel.com>
+Cc: Ashok Raj <ashok.raj@intel.com>
+Link: https://lore.kernel.org/r/20200622231345.29722-5-baolu.lu@linux.intel.com
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iommu/dmar.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
+index 9e393b9c50911..30ac0ba55864e 100644
+--- a/drivers/iommu/dmar.c
++++ b/drivers/iommu/dmar.c
+@@ -898,7 +898,8 @@ int __init detect_intel_iommu(void)
+ if (!ret)
+ ret = dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl,
+ &validate_drhd_cb);
+- if (!ret && !no_iommu && !iommu_detected && !dmar_disabled) {
++ if (!ret && !no_iommu && !iommu_detected &&
++ (!dmar_disabled || dmar_platform_optin())) {
+ iommu_detected = 1;
+ /* Make sure ACS will be enabled */
+ pci_request_acs();
+--
+2.25.1
+
--- /dev/null
+From 2a8c0d3862e692380e4386fc8a3ac0b323c7dbf5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 23 Jun 2020 07:13:44 +0800
+Subject: iommu/vt-d: Update scalable mode paging structure coherency
+
+From: Lu Baolu <baolu.lu@linux.intel.com>
+
+[ Upstream commit 04c00956ee3cd138fd38560a91452a804a8c5550 ]
+
+The Scalable-mode Page-walk Coherency (SMPWC) field in the VT-d extended
+capability register indicates the hardware coherency behavior on paging
+structures accessed through the pasid table entry. This is ignored in
+current code and using ECAP.C instead which is only valid in legacy mode.
+Fix this so that paging structure updates could be manually flushed from
+the cache line if hardware page walking is not snooped.
+
+Fixes: 765b6a98c1de3 ("iommu/vt-d: Enumerate the scalable mode capability")
+Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
+Cc: Ashok Raj <ashok.raj@intel.com>
+Cc: Kevin Tian <kevin.tian@intel.com>
+Cc: Jacob Pan <jacob.jun.pan@linux.intel.com>
+Link: https://lore.kernel.org/r/20200622231345.29722-6-baolu.lu@linux.intel.com
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iommu/intel-iommu.c | 13 ++++++++++---
+ 1 file changed, 10 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
+index 773ac2b0d6068..6366b5fbb3a46 100644
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -611,6 +611,12 @@ struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
+ return g_iommus[iommu_id];
+ }
+
++static inline bool iommu_paging_structure_coherency(struct intel_iommu *iommu)
++{
++ return sm_supported(iommu) ?
++ ecap_smpwc(iommu->ecap) : ecap_coherent(iommu->ecap);
++}
++
+ static void domain_update_iommu_coherency(struct dmar_domain *domain)
+ {
+ struct dmar_drhd_unit *drhd;
+@@ -622,7 +628,7 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain)
+
+ for_each_domain_iommu(i, domain) {
+ found = true;
+- if (!ecap_coherent(g_iommus[i]->ecap)) {
++ if (!iommu_paging_structure_coherency(g_iommus[i])) {
+ domain->iommu_coherency = 0;
+ break;
+ }
+@@ -633,7 +639,7 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain)
+ /* No hardware attached; use lowest common denominator */
+ rcu_read_lock();
+ for_each_active_iommu(iommu, drhd) {
+- if (!ecap_coherent(iommu->ecap)) {
++ if (!iommu_paging_structure_coherency(iommu)) {
+ domain->iommu_coherency = 0;
+ break;
+ }
+@@ -2090,7 +2096,8 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
+
+ context_set_fault_enable(context);
+ context_set_present(context);
+- domain_flush_cache(domain, context, sizeof(*context));
++ if (!ecap_coherent(iommu->ecap))
++ clflush_cache_range(context, sizeof(*context));
+
+ /*
+ * It's a non-present to present mapping. If hardware doesn't cache
+--
+2.25.1
+
--- /dev/null
+From 52c8c940a42fe1f4030e7c563fa19f2c85b1767c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 14 Jun 2020 23:43:40 +0900
+Subject: kbuild: improve cc-option to clean up all temporary files
+
+From: Masahiro Yamada <masahiroy@kernel.org>
+
+[ Upstream commit f2f02ebd8f3833626642688b2d2c6a7b3c141fa9 ]
+
+When cc-option and friends evaluate compiler flags, the temporary file
+$$TMP is created as an output object, and automatically cleaned up.
+The actual file path of $$TMP is .<pid>.tmp, here <pid> is the process
+ID of $(shell ...) invoked from cc-option. (Please note $$$$ is the
+escape sequence of $$).
+
+Such garbage files are cleaned up in most cases, but some compiler flags
+create additional output files.
+
+For example, -gsplit-dwarf creates a .dwo file.
+
+When CONFIG_DEBUG_INFO_SPLIT=y, you will see a bunch of .<pid>.dwo files
+left in the top of build directories. You may not notice them unless you
+do 'ls -a', but the garbage files will increase every time you run 'make'.
+
+This commit changes the temporary object path to .tmp_<pid>/tmp, and
+removes .tmp_<pid> directory when exiting. Separate build artifacts such
+as *.dwo will be cleaned up all together because their file paths are
+usually determined based on the base name of the object.
+
+Another example is -ftest-coverage, which outputs the coverage data into
+<base-name-of-object>.gcno
+
+Signed-off-by: Masahiro Yamada <masahiroy@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ scripts/Kbuild.include | 11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
+index d1dd4a6b6adb6..7da10afc92c61 100644
+--- a/scripts/Kbuild.include
++++ b/scripts/Kbuild.include
+@@ -82,20 +82,21 @@ cc-cross-prefix = $(firstword $(foreach c, $(1), \
+ $(if $(shell command -v -- $(c)gcc 2>/dev/null), $(c))))
+
+ # output directory for tests below
+-TMPOUT := $(if $(KBUILD_EXTMOD),$(firstword $(KBUILD_EXTMOD))/)
++TMPOUT = $(if $(KBUILD_EXTMOD),$(firstword $(KBUILD_EXTMOD))/).tmp_$$$$
+
+ # try-run
+ # Usage: option = $(call try-run, $(CC)...-o "$$TMP",option-ok,otherwise)
+ # Exit code chooses option. "$$TMP" serves as a temporary file and is
+ # automatically cleaned up.
+ try-run = $(shell set -e; \
+- TMP="$(TMPOUT).$$$$.tmp"; \
+- TMPO="$(TMPOUT).$$$$.o"; \
++ TMP=$(TMPOUT)/tmp; \
++ TMPO=$(TMPOUT)/tmp.o; \
++ mkdir -p $(TMPOUT); \
++ trap "rm -rf $(TMPOUT)" EXIT; \
+ if ($(1)) >/dev/null 2>&1; \
+ then echo "$(2)"; \
+ else echo "$(3)"; \
+- fi; \
+- rm -f "$$TMP" "$$TMPO")
++ fi)
+
+ # as-option
+ # Usage: cflags-y += $(call as-option,-Wa$(comma)-isa=foo,)
+--
+2.25.1
+
--- /dev/null
+From 5770bfc7e7bf700470a76aade35b3ac1057b2caf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 12 May 2020 17:02:33 +0900
+Subject: kprobes: Suppress the suspicious RCU warning on kprobes
+
+From: Masami Hiramatsu <mhiramat@kernel.org>
+
+[ Upstream commit 6743ad432ec92e680cd0d9db86cb17b949cf5a43 ]
+
+Anders reported that the lockdep warns that suspicious
+RCU list usage in register_kprobe() (detected by
+CONFIG_PROVE_RCU_LIST.) This is because get_kprobe()
+access kprobe_table[] by hlist_for_each_entry_rcu()
+without rcu_read_lock.
+
+If we call get_kprobe() from the breakpoint handler context,
+it is run with preempt disabled, so this is not a problem.
+But in other cases, instead of rcu_read_lock(), we locks
+kprobe_mutex so that the kprobe_table[] is not updated.
+So, current code is safe, but still not good from the view
+point of RCU.
+
+Joel suggested that we can silent that warning by passing
+lockdep_is_held() to the last argument of
+hlist_for_each_entry_rcu().
+
+Add lockdep_is_held(&kprobe_mutex) at the end of the
+hlist_for_each_entry_rcu() to suppress the warning.
+
+Link: http://lkml.kernel.org/r/158927055350.27680.10261450713467997503.stgit@devnote2
+
+Reported-by: Anders Roxell <anders.roxell@linaro.org>
+Suggested-by: Joel Fernandes <joel@joelfernandes.org>
+Reviewed-by: Joel Fernandes (Google) <joel@joelfernandes.org>
+Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/kprobes.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/kprobes.c b/kernel/kprobes.c
+index 195ecb955fcc5..950a5cfd262ce 100644
+--- a/kernel/kprobes.c
++++ b/kernel/kprobes.c
+@@ -326,7 +326,8 @@ struct kprobe *get_kprobe(void *addr)
+ struct kprobe *p;
+
+ head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
+- hlist_for_each_entry_rcu(p, head, hlist) {
++ hlist_for_each_entry_rcu(p, head, hlist,
++ lockdep_is_held(&kprobe_mutex)) {
+ if (p->addr == addr)
+ return p;
+ }
+--
+2.25.1
+
--- /dev/null
+From f57b36ca9f9da9b1b99946089c18ea9e74c80443 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 15 Jun 2020 11:50:29 -0400
+Subject: net: alx: fix race condition in alx_remove
+
+From: Zekun Shen <bruceshenzk@gmail.com>
+
+[ Upstream commit e89df5c4322c1bf495f62d74745895b5fd2a4393 ]
+
+There is a race condition exist during termination. The path is
+alx_stop and then alx_remove. An alx_schedule_link_check could be called
+before alx_stop by interrupt handler and invoke alx_link_check later.
+Alx_stop frees the napis, and alx_remove cancels any pending works.
+If any of the work is scheduled before termination and invoked before
+alx_remove, a null-ptr-deref occurs because both expect alx->napis[i].
+
+This patch fix the race condition by moving cancel_work_sync functions
+before alx_free_napis inside alx_stop. Because interrupt handler can call
+alx_schedule_link_check again, alx_free_irq is moved before
+cancel_work_sync calls too.
+
+Signed-off-by: Zekun Shen <bruceshenzk@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/atheros/alx/main.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
+index d4bbcdfd691af..aa693c8e285ab 100644
+--- a/drivers/net/ethernet/atheros/alx/main.c
++++ b/drivers/net/ethernet/atheros/alx/main.c
+@@ -1249,8 +1249,12 @@ static int __alx_open(struct alx_priv *alx, bool resume)
+
+ static void __alx_stop(struct alx_priv *alx)
+ {
+- alx_halt(alx);
+ alx_free_irq(alx);
++
++ cancel_work_sync(&alx->link_check_wk);
++ cancel_work_sync(&alx->reset_wk);
++
++ alx_halt(alx);
+ alx_free_rings(alx);
+ alx_free_napis(alx);
+ }
+@@ -1858,9 +1862,6 @@ static void alx_remove(struct pci_dev *pdev)
+ struct alx_priv *alx = pci_get_drvdata(pdev);
+ struct alx_hw *hw = &alx->hw;
+
+- cancel_work_sync(&alx->link_check_wk);
+- cancel_work_sync(&alx->reset_wk);
+-
+ /* restore permanent mac address */
+ alx_set_macaddr(hw, hw->perm_addr);
+
+--
+2.25.1
+
--- /dev/null
+From 0ac22e2588fa954d77753a09dc8b64d38262e70d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 24 Jun 2020 18:14:55 -0700
+Subject: net: bcmgenet: use hardware padding of runt frames
+
+From: Doug Berger <opendmb@gmail.com>
+
+[ Upstream commit 20d1f2d1b024f6be199a3bedf1578a1d21592bc5 ]
+
+When commit 474ea9cafc45 ("net: bcmgenet: correctly pad short
+packets") added the call to skb_padto() it should have been
+located before the nr_frags parameter was read since that value
+could be changed when padding packets with lengths between 55
+and 59 bytes (inclusive).
+
+The use of a stale nr_frags value can cause corruption of the
+pad data when tx-scatter-gather is enabled. This corruption of
+the pad can cause invalid checksum computation when hardware
+offload of tx-checksum is also enabled.
+
+Since the original reason for the padding was corrected by
+commit 7dd399130efb ("net: bcmgenet: fix skb_len in
+bcmgenet_xmit_single()") we can remove the software padding all
+together and make use of hardware padding of short frames as
+long as the hardware also always appends the FCS value to the
+frame.
+
+Fixes: 474ea9cafc45 ("net: bcmgenet: correctly pad short packets")
+Signed-off-by: Doug Berger <opendmb@gmail.com>
+Acked-by: Florian Fainelli <f.fainelli@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/broadcom/genet/bcmgenet.c | 8 +++-----
+ 1 file changed, 3 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+index 3d3b1005d0761..03f82786c0b98 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+@@ -1591,11 +1591,6 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
+ goto out;
+ }
+
+- if (skb_padto(skb, ETH_ZLEN)) {
+- ret = NETDEV_TX_OK;
+- goto out;
+- }
+-
+ /* Retain how many bytes will be sent on the wire, without TSB inserted
+ * by transmit checksum offload
+ */
+@@ -1644,6 +1639,9 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
+ len_stat = (size << DMA_BUFLENGTH_SHIFT) |
+ (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT);
+
++ /* Note: if we ever change from DMA_TX_APPEND_CRC below we
++ * will need to restore software padding of "runt" packets
++ */
+ if (!i) {
+ len_stat |= DMA_TX_APPEND_CRC | DMA_SOP;
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+--
+2.25.1
+
--- /dev/null
+From f6a74f4cd1c0d5ac13a031bb962646aacdfb7020 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 23 Jun 2020 16:51:30 +0300
+Subject: net: qed: fix async event callbacks unregistering
+
+From: Alexander Lobakin <alobakin@marvell.com>
+
+[ Upstream commit 31333c1a2521ff4b4ceb0c29de492549cd4a8de3 ]
+
+qed_spq_unregister_async_cb() should be called before
+qed_rdma_info_free() to avoid crash-spawning uses-after-free.
+Instead of calling it from each subsystem exit code, do it in one place
+on PF down.
+
+Fixes: 291d57f67d24 ("qed: Fix rdma_info structure allocation")
+Signed-off-by: Alexander Lobakin <alobakin@marvell.com>
+Signed-off-by: Igor Russkikh <irusskikh@marvell.com>
+Signed-off-by: Michal Kalderon <michal.kalderon@marvell.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/qlogic/qed/qed_dev.c | 9 +++++++--
+ drivers/net/ethernet/qlogic/qed/qed_iwarp.c | 2 --
+ drivers/net/ethernet/qlogic/qed/qed_roce.c | 1 -
+ 3 files changed, 7 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
+index 0bf91df80d47f..ecd14474a6031 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
+@@ -1368,6 +1368,8 @@ static void qed_dbg_user_data_free(struct qed_hwfn *p_hwfn)
+
+ void qed_resc_free(struct qed_dev *cdev)
+ {
++ struct qed_rdma_info *rdma_info;
++ struct qed_hwfn *p_hwfn;
+ int i;
+
+ if (IS_VF(cdev)) {
+@@ -1385,7 +1387,8 @@ void qed_resc_free(struct qed_dev *cdev)
+ qed_llh_free(cdev);
+
+ for_each_hwfn(cdev, i) {
+- struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
++ p_hwfn = cdev->hwfns + i;
++ rdma_info = p_hwfn->p_rdma_info;
+
+ qed_cxt_mngr_free(p_hwfn);
+ qed_qm_info_free(p_hwfn);
+@@ -1404,8 +1407,10 @@ void qed_resc_free(struct qed_dev *cdev)
+ qed_ooo_free(p_hwfn);
+ }
+
+- if (QED_IS_RDMA_PERSONALITY(p_hwfn))
++ if (QED_IS_RDMA_PERSONALITY(p_hwfn) && rdma_info) {
++ qed_spq_unregister_async_cb(p_hwfn, rdma_info->proto);
+ qed_rdma_info_free(p_hwfn);
++ }
+
+ qed_iov_free(p_hwfn);
+ qed_l2_free(p_hwfn);
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+index 65ec16a316584..2b3102a2fe5c9 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+@@ -2832,8 +2832,6 @@ int qed_iwarp_stop(struct qed_hwfn *p_hwfn)
+ if (rc)
+ return rc;
+
+- qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_IWARP);
+-
+ return qed_iwarp_ll2_stop(p_hwfn);
+ }
+
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
+index e49fada854108..83817bb50e9fa 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
+@@ -113,7 +113,6 @@ void qed_roce_stop(struct qed_hwfn *p_hwfn)
+ break;
+ }
+ }
+- qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_ROCE);
+ }
+
+ static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid,
+--
+2.25.1
+
--- /dev/null
+From 15712b1960675338eaaa7b53af986e3bd9d3570e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 23 Jun 2020 16:51:33 +0300
+Subject: net: qed: fix excessive QM ILT lines consumption
+
+From: Alexander Lobakin <alobakin@marvell.com>
+
+[ Upstream commit d434d02f7e7c24c721365fd594ed781acb18e0da ]
+
+This is likely a copy'n'paste mistake. The amount of ILT lines to
+reserve for a single VF was being multiplied by the total VFs count.
+This led to a huge redundancy in reservation and potential lines
+drainouts.
+
+Fixes: 1408cc1fa48c ("qed: Introduce VFs")
+Signed-off-by: Alexander Lobakin <alobakin@marvell.com>
+Signed-off-by: Igor Russkikh <irusskikh@marvell.com>
+Signed-off-by: Michal Kalderon <michal.kalderon@marvell.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/qlogic/qed/qed_cxt.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+index 8e1bdf58b9e77..1d6dfba0c034d 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+@@ -396,7 +396,7 @@ static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn,
+ vf_tids += segs[NUM_TASK_PF_SEGMENTS].count;
+ }
+
+- iids->vf_cids += vf_cids * p_mngr->vf_count;
++ iids->vf_cids = vf_cids;
+ iids->tids += vf_tids * p_mngr->vf_count;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_ILT,
+--
+2.25.1
+
--- /dev/null
+From 3b2e7e457133193902b7cfdaf33a00d857956f62 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 23 Jun 2020 16:51:29 +0300
+Subject: net: qed: fix left elements count calculation
+
+From: Alexander Lobakin <alobakin@marvell.com>
+
+[ Upstream commit 97dd1abd026ae4e6a82fa68645928404ad483409 ]
+
+qed_chain_get_element_left{,_u32} returned 0 when the difference
+between producer and consumer page count was equal to the total
+page count.
+Fix this by conditional expanding of producer value (vs
+unconditional). This allowed to eliminate normalizaton against
+total page count, which was the cause of this bug.
+
+Misc: replace open-coded constants with common defines.
+
+Fixes: a91eb52abb50 ("qed: Revisit chain implementation")
+Signed-off-by: Alexander Lobakin <alobakin@marvell.com>
+Signed-off-by: Igor Russkikh <irusskikh@marvell.com>
+Signed-off-by: Michal Kalderon <michal.kalderon@marvell.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/qed/qed_chain.h | 26 ++++++++++++++++----------
+ 1 file changed, 16 insertions(+), 10 deletions(-)
+
+diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h
+index 733fad7dfbed9..6d15040c642cb 100644
+--- a/include/linux/qed/qed_chain.h
++++ b/include/linux/qed/qed_chain.h
+@@ -207,28 +207,34 @@ static inline u32 qed_chain_get_cons_idx_u32(struct qed_chain *p_chain)
+
+ static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain)
+ {
++ u16 elem_per_page = p_chain->elem_per_page;
++ u32 prod = p_chain->u.chain16.prod_idx;
++ u32 cons = p_chain->u.chain16.cons_idx;
+ u16 used;
+
+- used = (u16) (((u32)0x10000 +
+- (u32)p_chain->u.chain16.prod_idx) -
+- (u32)p_chain->u.chain16.cons_idx);
++ if (prod < cons)
++ prod += (u32)U16_MAX + 1;
++
++ used = (u16)(prod - cons);
+ if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
+- used -= p_chain->u.chain16.prod_idx / p_chain->elem_per_page -
+- p_chain->u.chain16.cons_idx / p_chain->elem_per_page;
++ used -= prod / elem_per_page - cons / elem_per_page;
+
+ return (u16)(p_chain->capacity - used);
+ }
+
+ static inline u32 qed_chain_get_elem_left_u32(struct qed_chain *p_chain)
+ {
++ u16 elem_per_page = p_chain->elem_per_page;
++ u64 prod = p_chain->u.chain32.prod_idx;
++ u64 cons = p_chain->u.chain32.cons_idx;
+ u32 used;
+
+- used = (u32) (((u64)0x100000000ULL +
+- (u64)p_chain->u.chain32.prod_idx) -
+- (u64)p_chain->u.chain32.cons_idx);
++ if (prod < cons)
++ prod += (u64)U32_MAX + 1;
++
++ used = (u32)(prod - cons);
+ if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
+- used -= p_chain->u.chain32.prod_idx / p_chain->elem_per_page -
+- p_chain->u.chain32.cons_idx / p_chain->elem_per_page;
++ used -= (u32)(prod / elem_per_page - cons / elem_per_page);
+
+ return p_chain->capacity - used;
+ }
+--
+2.25.1
+
--- /dev/null
+From fdc133113790657e92a2c45f3ba94f3cd2548b4e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 23 Jun 2020 16:51:32 +0300
+Subject: net: qed: fix NVMe login fails over VFs
+
+From: Alexander Lobakin <alobakin@marvell.com>
+
+[ Upstream commit ccd7c7ce167a21dbf2b698ffcf00f11d96d44f9b ]
+
+25ms sleep cycles in waiting for PF response are excessive and may lead
+to different timeout failures.
+
+Start to wait with short udelays, and in most cases polling will end
+here. If the time was not sufficient, switch to msleeps.
+usleep_range() may go far beyond 100us depending on platform and tick
+configuration, hence atomic udelays for consistency.
+
+Also add explicit DMA barriers since 'done' always comes from a shared
+request-response DMA pool, and note that in the comment nearby.
+
+Fixes: 1408cc1fa48c ("qed: Introduce VFs")
+Signed-off-by: Alexander Lobakin <alobakin@marvell.com>
+Signed-off-by: Igor Russkikh <irusskikh@marvell.com>
+Signed-off-by: Michal Kalderon <michal.kalderon@marvell.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/qlogic/qed/qed_vf.c | 23 ++++++++++++++++++-----
+ 1 file changed, 18 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
+index 856051f50eb75..adc2c8f3d48ef 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
+@@ -81,12 +81,17 @@ static void qed_vf_pf_req_end(struct qed_hwfn *p_hwfn, int req_status)
+ mutex_unlock(&(p_hwfn->vf_iov_info->mutex));
+ }
+
++#define QED_VF_CHANNEL_USLEEP_ITERATIONS 90
++#define QED_VF_CHANNEL_USLEEP_DELAY 100
++#define QED_VF_CHANNEL_MSLEEP_ITERATIONS 10
++#define QED_VF_CHANNEL_MSLEEP_DELAY 25
++
+ static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size)
+ {
+ union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request;
+ struct ustorm_trigger_vf_zone trigger;
+ struct ustorm_vf_zone *zone_data;
+- int rc = 0, time = 100;
++ int iter, rc = 0;
+
+ zone_data = (struct ustorm_vf_zone *)PXP_VF_BAR0_START_USDM_ZONE_B;
+
+@@ -126,11 +131,19 @@ static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size)
+ REG_WR(p_hwfn, (uintptr_t)&zone_data->trigger, *((u32 *)&trigger));
+
+ /* When PF would be done with the response, it would write back to the
+- * `done' address. Poll until then.
++ * `done' address from a coherent DMA zone. Poll until then.
+ */
+- while ((!*done) && time) {
+- msleep(25);
+- time--;
++
++ iter = QED_VF_CHANNEL_USLEEP_ITERATIONS;
++ while (!*done && iter--) {
++ udelay(QED_VF_CHANNEL_USLEEP_DELAY);
++ dma_rmb();
++ }
++
++ iter = QED_VF_CHANNEL_MSLEEP_ITERATIONS;
++ while (!*done && iter--) {
++ msleep(QED_VF_CHANNEL_MSLEEP_DELAY);
++ dma_rmb();
+ }
+
+ if (!*done) {
+--
+2.25.1
+
--- /dev/null
+From 6382673083da095143e9e9942b261f763360ab6a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 23 Jun 2020 16:51:34 +0300
+Subject: net: qede: fix PTP initialization on recovery
+
+From: Alexander Lobakin <alobakin@marvell.com>
+
+[ Upstream commit 1c85f394c2206ea3835f43534d5675f0574e1b70 ]
+
+Currently PTP cyclecounter and timecounter are initialized only on
+the first probing and are cleaned up during removal. This means that
+PTP becomes non-functional after device recovery.
+Fix this by unconditional PTP initialization on probing and clearing
+Tx pending bit on exiting.
+
+Fixes: ccc67ef50b90 ("qede: Error recovery process")
+Signed-off-by: Alexander Lobakin <alobakin@marvell.com>
+Signed-off-by: Igor Russkikh <irusskikh@marvell.com>
+Signed-off-by: Michal Kalderon <michal.kalderon@marvell.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/qlogic/qede/qede_main.c | 2 +-
+ drivers/net/ethernet/qlogic/qede/qede_ptp.c | 31 ++++++++------------
+ drivers/net/ethernet/qlogic/qede/qede_ptp.h | 2 +-
+ 3 files changed, 15 insertions(+), 20 deletions(-)
+
+diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
+index 1da6b5bda80aa..361a1d759b0b5 100644
+--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
++++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
+@@ -1158,7 +1158,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
+
+ /* PTP not supported on VFs */
+ if (!is_vf)
+- qede_ptp_enable(edev, (mode == QEDE_PROBE_NORMAL));
++ qede_ptp_enable(edev);
+
+ edev->ops->register_ops(cdev, &qede_ll_ops, edev);
+
+diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
+index f815435cf1061..2d3b2fa92df51 100644
+--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c
++++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
+@@ -411,6 +411,7 @@ void qede_ptp_disable(struct qede_dev *edev)
+ if (ptp->tx_skb) {
+ dev_kfree_skb_any(ptp->tx_skb);
+ ptp->tx_skb = NULL;
++ clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags);
+ }
+
+ /* Disable PTP in HW */
+@@ -422,7 +423,7 @@ void qede_ptp_disable(struct qede_dev *edev)
+ edev->ptp = NULL;
+ }
+
+-static int qede_ptp_init(struct qede_dev *edev, bool init_tc)
++static int qede_ptp_init(struct qede_dev *edev)
+ {
+ struct qede_ptp *ptp;
+ int rc;
+@@ -443,25 +444,19 @@ static int qede_ptp_init(struct qede_dev *edev, bool init_tc)
+ /* Init work queue for Tx timestamping */
+ INIT_WORK(&ptp->work, qede_ptp_task);
+
+- /* Init cyclecounter and timecounter. This is done only in the first
+- * load. If done in every load, PTP application will fail when doing
+- * unload / load (e.g. MTU change) while it is running.
+- */
+- if (init_tc) {
+- memset(&ptp->cc, 0, sizeof(ptp->cc));
+- ptp->cc.read = qede_ptp_read_cc;
+- ptp->cc.mask = CYCLECOUNTER_MASK(64);
+- ptp->cc.shift = 0;
+- ptp->cc.mult = 1;
+-
+- timecounter_init(&ptp->tc, &ptp->cc,
+- ktime_to_ns(ktime_get_real()));
+- }
++ /* Init cyclecounter and timecounter */
++ memset(&ptp->cc, 0, sizeof(ptp->cc));
++ ptp->cc.read = qede_ptp_read_cc;
++ ptp->cc.mask = CYCLECOUNTER_MASK(64);
++ ptp->cc.shift = 0;
++ ptp->cc.mult = 1;
+
+- return rc;
++ timecounter_init(&ptp->tc, &ptp->cc, ktime_to_ns(ktime_get_real()));
++
++ return 0;
+ }
+
+-int qede_ptp_enable(struct qede_dev *edev, bool init_tc)
++int qede_ptp_enable(struct qede_dev *edev)
+ {
+ struct qede_ptp *ptp;
+ int rc;
+@@ -482,7 +477,7 @@ int qede_ptp_enable(struct qede_dev *edev, bool init_tc)
+
+ edev->ptp = ptp;
+
+- rc = qede_ptp_init(edev, init_tc);
++ rc = qede_ptp_init(edev);
+ if (rc)
+ goto err1;
+
+diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.h b/drivers/net/ethernet/qlogic/qede/qede_ptp.h
+index 691a14c4b2c5a..89c7f3cf3ee28 100644
+--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.h
++++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.h
+@@ -41,7 +41,7 @@ void qede_ptp_rx_ts(struct qede_dev *edev, struct sk_buff *skb);
+ void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb);
+ int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *req);
+ void qede_ptp_disable(struct qede_dev *edev);
+-int qede_ptp_enable(struct qede_dev *edev, bool init_tc);
++int qede_ptp_enable(struct qede_dev *edev);
+ int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *ts);
+
+ static inline void qede_ptp_record_rx_ts(struct qede_dev *edev,
+--
+2.25.1
+
--- /dev/null
+From 3555102f541d6cbcdc9c45b5b878e5758de43f23 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 23 Jun 2020 16:51:35 +0300
+Subject: net: qede: fix use-after-free on recovery and AER handling
+
+From: Alexander Lobakin <alobakin@marvell.com>
+
+[ Upstream commit ec6c80590bde6b5dfa4970fffa3572f1acd313ca ]
+
+Set edev->cdev pointer to NULL after calling remove() callback to avoid
+using of already freed object.
+
+Fixes: ccc67ef50b90 ("qede: Error recovery process")
+Signed-off-by: Alexander Lobakin <alobakin@marvell.com>
+Signed-off-by: Igor Russkikh <irusskikh@marvell.com>
+Signed-off-by: Michal Kalderon <michal.kalderon@marvell.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/qlogic/qede/qede_main.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
+index 361a1d759b0b5..2c3d654c84543 100644
+--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
++++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
+@@ -1247,6 +1247,7 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
+ if (system_state == SYSTEM_POWER_OFF)
+ return;
+ qed_ops->common->remove(cdev);
++ edev->cdev = NULL;
+
+ /* Since this can happen out-of-sync with other flows,
+ * don't release the netdevice until after slowpath stop
+--
+2.25.1
+
--- /dev/null
+From 6d1190f5d1116e0abc7c0ff4ec86e3631023ec4d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 23 Jun 2020 16:51:31 +0300
+Subject: net: qede: stop adding events on an already destroyed workqueue
+
+From: Alexander Lobakin <alobakin@marvell.com>
+
+[ Upstream commit 4079c7f7a2a00ab403c177ce723b560de59139c3 ]
+
+Set rdma_wq pointer to NULL after destroying the workqueue and check
+for it when adding new events to fix crashes on driver unload.
+
+Fixes: cee9fbd8e2e9 ("qede: Add qedr framework")
+Signed-off-by: Alexander Lobakin <alobakin@marvell.com>
+Signed-off-by: Igor Russkikh <irusskikh@marvell.com>
+Signed-off-by: Michal Kalderon <michal.kalderon@marvell.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/qlogic/qede/qede_rdma.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/qlogic/qede/qede_rdma.c b/drivers/net/ethernet/qlogic/qede/qede_rdma.c
+index 2d873ae8a234d..668ccc9d49f83 100644
+--- a/drivers/net/ethernet/qlogic/qede/qede_rdma.c
++++ b/drivers/net/ethernet/qlogic/qede/qede_rdma.c
+@@ -105,6 +105,7 @@ static void qede_rdma_destroy_wq(struct qede_dev *edev)
+
+ qede_rdma_cleanup_event(edev);
+ destroy_workqueue(edev->rdma_info.rdma_wq);
++ edev->rdma_info.rdma_wq = NULL;
+ }
+
+ int qede_rdma_dev_add(struct qede_dev *edev, bool recovery)
+@@ -325,7 +326,7 @@ static void qede_rdma_add_event(struct qede_dev *edev,
+ if (edev->rdma_info.exp_recovery)
+ return;
+
+- if (!edev->rdma_info.qedr_dev)
++ if (!edev->rdma_info.qedr_dev || !edev->rdma_info.rdma_wq)
+ return;
+
+ /* We don't want the cleanup flow to start while we're allocating and
+--
+2.25.1
+
--- /dev/null
+From 209fc784db313123165c026c490832bfa68e0b35 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 10 Jun 2020 21:51:11 +0100
+Subject: netfilter: ipset: fix unaligned atomic access
+
+From: Russell King <rmk+kernel@armlinux.org.uk>
+
+[ Upstream commit 715028460082d07a7ec6fcd87b14b46784346a72 ]
+
+When using ip_set with counters and comment, traffic causes the kernel
+to panic on 32-bit ARM:
+
+Alignment trap: not handling instruction e1b82f9f at [<bf01b0dc>]
+Unhandled fault: alignment exception (0x221) at 0xea08133c
+PC is at ip_set_match_extensions+0xe0/0x224 [ip_set]
+
+The problem occurs when we try to update the 64-bit counters - the
+faulting address above is not 64-bit aligned. The problem occurs
+due to the way elements are allocated, for example:
+
+ set->dsize = ip_set_elem_len(set, tb, 0, 0);
+ map = ip_set_alloc(sizeof(*map) + elements * set->dsize);
+
+If the element has a requirement for a member to be 64-bit aligned,
+and set->dsize is not a multiple of 8, but is a multiple of four,
+then every odd numbered elements will be misaligned - and hitting
+an atomic64_add() on that element will cause the kernel to panic.
+
+ip_set_elem_len() must return a size that is rounded to the maximum
+alignment of any extension field stored in the element. This change
+ensures that is the case.
+
+Fixes: 95ad1f4a9358 ("netfilter: ipset: Fix extension alignment")
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+Acked-by: Jozsef Kadlecsik <kadlec@netfilter.org>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/ipset/ip_set_core.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
+index 75da200aa5d87..133a3f1b6f56c 100644
+--- a/net/netfilter/ipset/ip_set_core.c
++++ b/net/netfilter/ipset/ip_set_core.c
+@@ -382,6 +382,8 @@ ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], size_t len,
+ for (id = 0; id < IPSET_EXT_ID_MAX; id++) {
+ if (!add_extension(id, cadt_flags, tb))
+ continue;
++ if (align < ip_set_extensions[id].align)
++ align = ip_set_extensions[id].align;
+ len = ALIGN(len, ip_set_extensions[id].align);
+ set->offset[id] = len;
+ set->extensions |= ip_set_extensions[id].type;
+--
+2.25.1
+
--- /dev/null
+From 1e03f058861e7b2c270e5b7233087d26d9a41bc4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 24 Jun 2020 01:53:10 -0700
+Subject: nvme: don't protect ns mutation with ns->head->lock
+
+From: Sagi Grimberg <sagi@grimberg.me>
+
+[ Upstream commit e164471dcf19308d154adb69e7760d8ba426a77f ]
+
+Right now ns->head->lock is protecting namespace mutation
+which is wrong and unneeded. Move it to only protect
+against head mutations. While we're at it, remove unnecessary
+ns->head reference as we already have head pointer.
+
+The problem with this is that the head->lock spans
+mpath disk node I/O that may block under some conditions (if
+for example the controller is disconnecting or the path
+became inaccessible), The locking scheme does not allow any
+other path to enable itself, preventing blocked I/O to complete
+and forward-progress from there.
+
+This is a preparation patch for the fix in a subsequent patch
+where the disk I/O will also be done outside the head->lock.
+
+Fixes: 0d0b660f214d ("nvme: add ANA support")
+Signed-off-by: Anton Eidelman <anton@lightbitslabs.com>
+Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/host/multipath.c | 12 ++++--------
+ 1 file changed, 4 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
+index 0f08c15553a64..18f0a05c74b56 100644
+--- a/drivers/nvme/host/multipath.c
++++ b/drivers/nvme/host/multipath.c
+@@ -414,11 +414,10 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
+ {
+ struct nvme_ns_head *head = ns->head;
+
+- lockdep_assert_held(&ns->head->lock);
+-
+ if (!head->disk)
+ return;
+
++ mutex_lock(&head->lock);
+ if (!(head->disk->flags & GENHD_FL_UP))
+ device_add_disk(&head->subsys->dev, head->disk,
+ nvme_ns_id_attr_groups);
+@@ -431,9 +430,10 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
+ __nvme_find_path(head, node);
+ srcu_read_unlock(&head->srcu, srcu_idx);
+ }
++ mutex_unlock(&head->lock);
+
+- synchronize_srcu(&ns->head->srcu);
+- kblockd_schedule_work(&ns->head->requeue_work);
++ synchronize_srcu(&head->srcu);
++ kblockd_schedule_work(&head->requeue_work);
+ }
+
+ static int nvme_parse_ana_log(struct nvme_ctrl *ctrl, void *data,
+@@ -484,14 +484,12 @@ static inline bool nvme_state_is_live(enum nvme_ana_state state)
+ static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
+ struct nvme_ns *ns)
+ {
+- mutex_lock(&ns->head->lock);
+ ns->ana_grpid = le32_to_cpu(desc->grpid);
+ ns->ana_state = desc->state;
+ clear_bit(NVME_NS_ANA_PENDING, &ns->flags);
+
+ if (nvme_state_is_live(ns->ana_state))
+ nvme_mpath_set_live(ns);
+- mutex_unlock(&ns->head->lock);
+ }
+
+ static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
+@@ -670,10 +668,8 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id)
+ nvme_update_ns_ana_state(&desc, ns);
+ }
+ } else {
+- mutex_lock(&ns->head->lock);
+ ns->ana_state = NVME_ANA_OPTIMIZED;
+ nvme_mpath_set_live(ns);
+- mutex_unlock(&ns->head->lock);
+ }
+
+ if (bdi_cap_stable_pages_required(ns->queue->backing_dev_info)) {
+--
+2.25.1
+
--- /dev/null
+From ae2e1a951903da948ef80bad65edd4af30c1eac8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 24 Jun 2020 01:53:08 -0700
+Subject: nvme: fix possible deadlock when I/O is blocked
+
+From: Sagi Grimberg <sagi@grimberg.me>
+
+[ Upstream commit 3b4b19721ec652ad2c4fe51dfbe5124212b5f581 ]
+
+Revert fab7772bfbcf ("nvme-multipath: revalidate nvme_ns_head gendisk
+in nvme_validate_ns")
+
+When adding a new namespace to the head disk (via nvme_mpath_set_live)
+we will see partition scan which triggers I/O on the mpath device node.
+This process will usually be triggered from the scan_work which holds
+the scan_lock. If I/O blocks (if we got ana change currently have only
+available paths but none are accessible) this can deadlock on the head
+disk bd_mutex as both partition scan I/O takes it, and head disk revalidation
+takes it to check for resize (also triggered from scan_work on a different
+path). See trace [1].
+
+The mpath disk revalidation was originally added to detect online disk
+size change, but this is no longer needed since commit cb224c3af4df
+("nvme: Convert to use set_capacity_revalidate_and_notify") which already
+updates resize info without unnecessarily revalidating the disk (the
+mpath disk doesn't even implement .revalidate_disk fop).
+
+[1]:
+--
+kernel: INFO: task kworker/u65:9:494 blocked for more than 241 seconds.
+kernel: Tainted: G OE 5.3.5-050305-generic #201910071830
+kernel: "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
+kernel: kworker/u65:9 D 0 494 2 0x80004000
+kernel: Workqueue: nvme-wq nvme_scan_work [nvme_core]
+kernel: Call Trace:
+kernel: __schedule+0x2b9/0x6c0
+kernel: schedule+0x42/0xb0
+kernel: schedule_preempt_disabled+0xe/0x10
+kernel: __mutex_lock.isra.0+0x182/0x4f0
+kernel: __mutex_lock_slowpath+0x13/0x20
+kernel: mutex_lock+0x2e/0x40
+kernel: revalidate_disk+0x63/0xa0
+kernel: __nvme_revalidate_disk+0xfe/0x110 [nvme_core]
+kernel: nvme_revalidate_disk+0xa4/0x160 [nvme_core]
+kernel: ? evict+0x14c/0x1b0
+kernel: revalidate_disk+0x2b/0xa0
+kernel: nvme_validate_ns+0x49/0x940 [nvme_core]
+kernel: ? blk_mq_free_request+0xd2/0x100
+kernel: ? __nvme_submit_sync_cmd+0xbe/0x1e0 [nvme_core]
+kernel: nvme_scan_work+0x24f/0x380 [nvme_core]
+kernel: process_one_work+0x1db/0x380
+kernel: worker_thread+0x249/0x400
+kernel: kthread+0x104/0x140
+kernel: ? process_one_work+0x380/0x380
+kernel: ? kthread_park+0x80/0x80
+kernel: ret_from_fork+0x1f/0x40
+...
+kernel: INFO: task kworker/u65:1:2630 blocked for more than 241 seconds.
+kernel: Tainted: G OE 5.3.5-050305-generic #201910071830
+kernel: "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
+kernel: kworker/u65:1 D 0 2630 2 0x80004000
+kernel: Workqueue: nvme-wq nvme_scan_work [nvme_core]
+kernel: Call Trace:
+kernel: __schedule+0x2b9/0x6c0
+kernel: schedule+0x42/0xb0
+kernel: io_schedule+0x16/0x40
+kernel: do_read_cache_page+0x438/0x830
+kernel: ? __switch_to_asm+0x34/0x70
+kernel: ? file_fdatawait_range+0x30/0x30
+kernel: read_cache_page+0x12/0x20
+kernel: read_dev_sector+0x27/0xc0
+kernel: read_lba+0xc1/0x220
+kernel: ? kmem_cache_alloc_trace+0x19c/0x230
+kernel: efi_partition+0x1e6/0x708
+kernel: ? vsnprintf+0x39e/0x4e0
+kernel: ? snprintf+0x49/0x60
+kernel: check_partition+0x154/0x244
+kernel: rescan_partitions+0xae/0x280
+kernel: __blkdev_get+0x40f/0x560
+kernel: blkdev_get+0x3d/0x140
+kernel: __device_add_disk+0x388/0x480
+kernel: device_add_disk+0x13/0x20
+kernel: nvme_mpath_set_live+0x119/0x140 [nvme_core]
+kernel: nvme_update_ns_ana_state+0x5c/0x60 [nvme_core]
+kernel: nvme_set_ns_ana_state+0x1e/0x30 [nvme_core]
+kernel: nvme_parse_ana_log+0xa1/0x180 [nvme_core]
+kernel: ? nvme_update_ns_ana_state+0x60/0x60 [nvme_core]
+kernel: nvme_mpath_add_disk+0x47/0x90 [nvme_core]
+kernel: nvme_validate_ns+0x396/0x940 [nvme_core]
+kernel: ? blk_mq_free_request+0xd2/0x100
+kernel: nvme_scan_work+0x24f/0x380 [nvme_core]
+kernel: process_one_work+0x1db/0x380
+kernel: worker_thread+0x249/0x400
+kernel: kthread+0x104/0x140
+kernel: ? process_one_work+0x380/0x380
+kernel: ? kthread_park+0x80/0x80
+kernel: ret_from_fork+0x1f/0x40
+--
+
+Fixes: fab7772bfbcf ("nvme-multipath: revalidate nvme_ns_head gendisk
+in nvme_validate_ns")
+Signed-off-by: Anton Eidelman <anton@lightbitslabs.com>
+Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/host/core.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index d4b388793f40d..c44c00b9e1d85 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -1870,7 +1870,6 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
+ if (ns->head->disk) {
+ nvme_update_disk_info(ns->head->disk, ns, id);
+ blk_queue_stack_limits(ns->head->disk->queue, ns->queue);
+- revalidate_disk(ns->head->disk);
+ }
+ #endif
+ }
+--
+2.25.1
+
--- /dev/null
+From f954b14a2e48ad296fe233dcc6f04c824aef6fda Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 24 Jun 2020 01:53:09 -0700
+Subject: nvme-multipath: fix deadlock between ana_work and scan_work
+
+From: Anton Eidelman <anton@lightbitslabs.com>
+
+[ Upstream commit 489dd102a2c7c94d783a35f9412eb085b8da1aa4 ]
+
+When scan_work calls nvme_mpath_add_disk() this holds ana_lock
+and invokes nvme_parse_ana_log(), which may issue IO
+in device_add_disk() and hang waiting for an accessible path.
+While nvme_mpath_set_live() only called when nvme_state_is_live(),
+a transition may cause NVME_SC_ANA_TRANSITION and requeue the IO.
+
+In order to recover and complete the IO ana_work on the same ctrl
+should be able to update the path state and remove NVME_NS_ANA_PENDING.
+
+The deadlock occurs because scan_work keeps holding ana_lock,
+so ana_work hangs [1].
+
+Fix:
+Now nvme_mpath_add_disk() uses nvme_parse_ana_log() to obtain a copy
+of the ANA group desc, and then calls nvme_update_ns_ana_state() without
+holding ana_lock.
+
+[1]:
+kernel: Workqueue: nvme-wq nvme_scan_work [nvme_core]
+kernel: Call Trace:
+kernel: __schedule+0x2b9/0x6c0
+kernel: schedule+0x42/0xb0
+kernel: io_schedule+0x16/0x40
+kernel: do_read_cache_page+0x438/0x830
+kernel: read_cache_page+0x12/0x20
+kernel: read_dev_sector+0x27/0xc0
+kernel: read_lba+0xc1/0x220
+kernel: efi_partition+0x1e6/0x708
+kernel: check_partition+0x154/0x244
+kernel: rescan_partitions+0xae/0x280
+kernel: __blkdev_get+0x40f/0x560
+kernel: blkdev_get+0x3d/0x140
+kernel: __device_add_disk+0x388/0x480
+kernel: device_add_disk+0x13/0x20
+kernel: nvme_mpath_set_live+0x119/0x140 [nvme_core]
+kernel: nvme_update_ns_ana_state+0x5c/0x60 [nvme_core]
+kernel: nvme_set_ns_ana_state+0x1e/0x30 [nvme_core]
+kernel: nvme_parse_ana_log+0xa1/0x180 [nvme_core]
+kernel: nvme_mpath_add_disk+0x47/0x90 [nvme_core]
+kernel: nvme_validate_ns+0x396/0x940 [nvme_core]
+kernel: nvme_scan_work+0x24f/0x380 [nvme_core]
+kernel: process_one_work+0x1db/0x380
+kernel: worker_thread+0x249/0x400
+kernel: kthread+0x104/0x140
+
+kernel: Workqueue: nvme-wq nvme_ana_work [nvme_core]
+kernel: Call Trace:
+kernel: __schedule+0x2b9/0x6c0
+kernel: schedule+0x42/0xb0
+kernel: schedule_preempt_disabled+0xe/0x10
+kernel: __mutex_lock.isra.0+0x182/0x4f0
+kernel: ? __switch_to_asm+0x34/0x70
+kernel: ? select_task_rq_fair+0x1aa/0x5c0
+kernel: ? kvm_sched_clock_read+0x11/0x20
+kernel: ? sched_clock+0x9/0x10
+kernel: __mutex_lock_slowpath+0x13/0x20
+kernel: mutex_lock+0x2e/0x40
+kernel: nvme_read_ana_log+0x3a/0x100 [nvme_core]
+kernel: nvme_ana_work+0x15/0x20 [nvme_core]
+kernel: process_one_work+0x1db/0x380
+kernel: worker_thread+0x4d/0x400
+kernel: kthread+0x104/0x140
+kernel: ? process_one_work+0x380/0x380
+kernel: ? kthread_park+0x80/0x80
+kernel: ret_from_fork+0x35/0x40
+
+Fixes: 0d0b660f214d ("nvme: add ANA support")
+Signed-off-by: Anton Eidelman <anton@lightbitslabs.com>
+Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/host/multipath.c | 24 ++++++++++++++++--------
+ 1 file changed, 16 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
+index c17cf8f00f536..0f08c15553a64 100644
+--- a/drivers/nvme/host/multipath.c
++++ b/drivers/nvme/host/multipath.c
+@@ -641,26 +641,34 @@ static ssize_t ana_state_show(struct device *dev, struct device_attribute *attr,
+ }
+ DEVICE_ATTR_RO(ana_state);
+
+-static int nvme_set_ns_ana_state(struct nvme_ctrl *ctrl,
++static int nvme_lookup_ana_group_desc(struct nvme_ctrl *ctrl,
+ struct nvme_ana_group_desc *desc, void *data)
+ {
+- struct nvme_ns *ns = data;
++ struct nvme_ana_group_desc *dst = data;
+
+- if (ns->ana_grpid == le32_to_cpu(desc->grpid)) {
+- nvme_update_ns_ana_state(desc, ns);
+- return -ENXIO; /* just break out of the loop */
+- }
++ if (desc->grpid != dst->grpid)
++ return 0;
+
+- return 0;
++ *dst = *desc;
++ return -ENXIO; /* just break out of the loop */
+ }
+
+ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id)
+ {
+ if (nvme_ctrl_use_ana(ns->ctrl)) {
++ struct nvme_ana_group_desc desc = {
++ .grpid = id->anagrpid,
++ .state = 0,
++ };
++
+ mutex_lock(&ns->ctrl->ana_lock);
+ ns->ana_grpid = le32_to_cpu(id->anagrpid);
+- nvme_parse_ana_log(ns->ctrl, ns, nvme_set_ns_ana_state);
++ nvme_parse_ana_log(ns->ctrl, &desc, nvme_lookup_ana_group_desc);
+ mutex_unlock(&ns->ctrl->ana_lock);
++ if (desc.state) {
++ /* found the group desc: update */
++ nvme_update_ns_ana_state(&desc, ns);
++ }
+ } else {
+ mutex_lock(&ns->head->lock);
+ ns->ana_state = NVME_ANA_OPTIMIZED;
+--
+2.25.1
+
--- /dev/null
+From 56d18bff5ef7a8498c7028ad568c34078611296e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 24 Jun 2020 01:53:11 -0700
+Subject: nvme-multipath: fix deadlock due to head->lock
+
+From: Anton Eidelman <anton@lightbitslabs.com>
+
+[ Upstream commit d8a22f85609fadb46ba699e0136cc3ebdeebff79 ]
+
+In the following scenario scan_work and ana_work will deadlock:
+
+When scan_work calls nvme_mpath_add_disk() this holds ana_lock
+and invokes nvme_parse_ana_log(), which may issue IO
+in device_add_disk() and hang waiting for an accessible path.
+
+While nvme_mpath_set_live() only called when nvme_state_is_live(),
+a transition may cause NVME_SC_ANA_TRANSITION and requeue the IO.
+
+Since nvme_mpath_set_live() holds ns->head->lock, an ana_work on
+ANY ctrl will not be able to complete nvme_mpath_set_live()
+on the same ns->head, which is required in order to update
+the new accessible path and remove NVME_NS_ANA_PENDING..
+Therefore IO never completes: deadlock [1].
+
+Fix:
+Move device_add_disk out of the head->lock and protect it with an
+atomic test_and_set for a new NVME_NS_HEAD_HAS_DISK bit.
+
+[1]:
+kernel: INFO: task kworker/u8:2:160 blocked for more than 120 seconds.
+kernel: Tainted: G OE 5.3.5-050305-generic #201910071830
+kernel: "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
+kernel: kworker/u8:2 D 0 160 2 0x80004000
+kernel: Workqueue: nvme-wq nvme_ana_work [nvme_core]
+kernel: Call Trace:
+kernel: __schedule+0x2b9/0x6c0
+kernel: schedule+0x42/0xb0
+kernel: schedule_preempt_disabled+0xe/0x10
+kernel: __mutex_lock.isra.0+0x182/0x4f0
+kernel: __mutex_lock_slowpath+0x13/0x20
+kernel: mutex_lock+0x2e/0x40
+kernel: nvme_update_ns_ana_state+0x22/0x60 [nvme_core]
+kernel: nvme_update_ana_state+0xca/0xe0 [nvme_core]
+kernel: nvme_parse_ana_log+0xa1/0x180 [nvme_core]
+kernel: nvme_read_ana_log+0x76/0x100 [nvme_core]
+kernel: nvme_ana_work+0x15/0x20 [nvme_core]
+kernel: process_one_work+0x1db/0x380
+kernel: worker_thread+0x4d/0x400
+kernel: kthread+0x104/0x140
+kernel: ret_from_fork+0x35/0x40
+kernel: INFO: task kworker/u8:4:439 blocked for more than 120 seconds.
+kernel: Tainted: G OE 5.3.5-050305-generic #201910071830
+kernel: "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
+kernel: kworker/u8:4 D 0 439 2 0x80004000
+kernel: Workqueue: nvme-wq nvme_scan_work [nvme_core]
+kernel: Call Trace:
+kernel: __schedule+0x2b9/0x6c0
+kernel: schedule+0x42/0xb0
+kernel: io_schedule+0x16/0x40
+kernel: do_read_cache_page+0x438/0x830
+kernel: read_cache_page+0x12/0x20
+kernel: read_dev_sector+0x27/0xc0
+kernel: read_lba+0xc1/0x220
+kernel: efi_partition+0x1e6/0x708
+kernel: check_partition+0x154/0x244
+kernel: rescan_partitions+0xae/0x280
+kernel: __blkdev_get+0x40f/0x560
+kernel: blkdev_get+0x3d/0x140
+kernel: __device_add_disk+0x388/0x480
+kernel: device_add_disk+0x13/0x20
+kernel: nvme_mpath_set_live+0x119/0x140 [nvme_core]
+kernel: nvme_update_ns_ana_state+0x5c/0x60 [nvme_core]
+kernel: nvme_mpath_add_disk+0xbe/0x100 [nvme_core]
+kernel: nvme_validate_ns+0x396/0x940 [nvme_core]
+kernel: nvme_scan_work+0x256/0x390 [nvme_core]
+kernel: process_one_work+0x1db/0x380
+kernel: worker_thread+0x4d/0x400
+kernel: kthread+0x104/0x140
+kernel: ret_from_fork+0x35/0x40
+
+Fixes: 0d0b660f214d ("nvme: add ANA support")
+Signed-off-by: Anton Eidelman <anton@lightbitslabs.com>
+Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/host/multipath.c | 4 ++--
+ drivers/nvme/host/nvme.h | 2 ++
+ 2 files changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
+index 18f0a05c74b56..574b52e911f08 100644
+--- a/drivers/nvme/host/multipath.c
++++ b/drivers/nvme/host/multipath.c
+@@ -417,11 +417,11 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
+ if (!head->disk)
+ return;
+
+- mutex_lock(&head->lock);
+- if (!(head->disk->flags & GENHD_FL_UP))
++ if (!test_and_set_bit(NVME_NSHEAD_DISK_LIVE, &head->flags))
+ device_add_disk(&head->subsys->dev, head->disk,
+ nvme_ns_id_attr_groups);
+
++ mutex_lock(&head->lock);
+ if (nvme_path_is_optimized(ns)) {
+ int node, srcu_idx;
+
+diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
+index 22e8401352c22..ed02260862cb5 100644
+--- a/drivers/nvme/host/nvme.h
++++ b/drivers/nvme/host/nvme.h
+@@ -345,6 +345,8 @@ struct nvme_ns_head {
+ spinlock_t requeue_lock;
+ struct work_struct requeue_work;
+ struct mutex lock;
++ unsigned long flags;
++#define NVME_NSHEAD_DISK_LIVE 0
+ struct nvme_ns __rcu *current_path[];
+ #endif
+ };
+--
+2.25.1
+
--- /dev/null
+From c260fa891f7a213e9260b4cda7a21bda02e0f42a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 9 Apr 2020 09:09:04 -0700
+Subject: nvme-multipath: set bdi capabilities once
+
+From: Keith Busch <kbusch@kernel.org>
+
+[ Upstream commit b2ce4d90690bd29ce5b554e203cd03682dd59697 ]
+
+The queues' backing device info capabilities don't change with each
+namespace revalidation. Set it only when each path's request_queue
+is initially added to a multipath queue.
+
+Signed-off-by: Keith Busch <kbusch@kernel.org>
+Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/host/multipath.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
+index 56caddeabb5e5..c17cf8f00f536 100644
+--- a/drivers/nvme/host/multipath.c
++++ b/drivers/nvme/host/multipath.c
+@@ -3,6 +3,7 @@
+ * Copyright (c) 2017-2018 Christoph Hellwig.
+ */
+
++#include <linux/backing-dev.h>
+ #include <linux/moduleparam.h>
+ #include <trace/events/block.h>
+ #include "nvme.h"
+@@ -666,6 +667,13 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id)
+ nvme_mpath_set_live(ns);
+ mutex_unlock(&ns->head->lock);
+ }
++
++ if (bdi_cap_stable_pages_required(ns->queue->backing_dev_info)) {
++ struct backing_dev_info *info =
++ ns->head->disk->queue->backing_dev_info;
++
++ info->capabilities |= BDI_CAP_STABLE_WRITES;
++ }
+ }
+
+ void nvme_mpath_remove_disk(struct nvme_ns_head *head)
+--
+2.25.1
+
--- /dev/null
+From b08827d9fac6f9832f0596590716a05e0b305b0c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 4 Jun 2020 03:28:17 +0300
+Subject: pinctrl: qcom: spmi-gpio: fix warning about irq chip reusage
+
+From: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+
+[ Upstream commit 5e50311556c9f409a85740e3cb4c4511e7e27da0 ]
+
+Fix the following warnings caused by reusage of the same irq_chip
+instance for all spmi-gpio gpio_irq_chip instances. Instead embed
+irq_chip into pmic_gpio_state struct.
+
+gpio gpiochip2: (c440000.qcom,spmi:pmic@2:gpio@c000): detected irqchip that is shared with multiple gpiochips: please fix the driver.
+gpio gpiochip3: (c440000.qcom,spmi:pmic@4:gpio@c000): detected irqchip that is shared with multiple gpiochips: please fix the driver.
+gpio gpiochip4: (c440000.qcom,spmi:pmic@a:gpio@c000): detected irqchip that is shared with multiple gpiochips: please fix the driver.
+
+Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+Acked-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Link: https://lore.kernel.org/r/20200604002817.667160-1-dmitry.baryshkov@linaro.org
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pinctrl/qcom/pinctrl-spmi-gpio.c | 21 ++++++++++-----------
+ 1 file changed, 10 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+index f1fece5b9c06a..3769ad08eadfe 100644
+--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
++++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+@@ -170,6 +170,7 @@ struct pmic_gpio_state {
+ struct regmap *map;
+ struct pinctrl_dev *ctrl;
+ struct gpio_chip chip;
++ struct irq_chip irq;
+ };
+
+ static const struct pinconf_generic_params pmic_gpio_bindings[] = {
+@@ -917,16 +918,6 @@ static int pmic_gpio_populate(struct pmic_gpio_state *state,
+ return 0;
+ }
+
+-static struct irq_chip pmic_gpio_irq_chip = {
+- .name = "spmi-gpio",
+- .irq_ack = irq_chip_ack_parent,
+- .irq_mask = irq_chip_mask_parent,
+- .irq_unmask = irq_chip_unmask_parent,
+- .irq_set_type = irq_chip_set_type_parent,
+- .irq_set_wake = irq_chip_set_wake_parent,
+- .flags = IRQCHIP_MASK_ON_SUSPEND,
+-};
+-
+ static int pmic_gpio_domain_translate(struct irq_domain *domain,
+ struct irq_fwspec *fwspec,
+ unsigned long *hwirq,
+@@ -1053,8 +1044,16 @@ static int pmic_gpio_probe(struct platform_device *pdev)
+ if (!parent_domain)
+ return -ENXIO;
+
++ state->irq.name = "spmi-gpio",
++ state->irq.irq_ack = irq_chip_ack_parent,
++ state->irq.irq_mask = irq_chip_mask_parent,
++ state->irq.irq_unmask = irq_chip_unmask_parent,
++ state->irq.irq_set_type = irq_chip_set_type_parent,
++ state->irq.irq_set_wake = irq_chip_set_wake_parent,
++ state->irq.flags = IRQCHIP_MASK_ON_SUSPEND,
++
+ girq = &state->chip.irq;
+- girq->chip = &pmic_gpio_irq_chip;
++ girq->chip = &state->irq;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_level_irq;
+ girq->fwnode = of_node_to_fwnode(state->dev->of_node);
+--
+2.25.1
+
--- /dev/null
+From 24ce7b0edca076b7c5a3fdeac40c7b148e933fb9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 4 Jun 2020 23:19:35 +0530
+Subject: pinctrl: tegra: Use noirq suspend/resume callbacks
+
+From: Vidya Sagar <vidyas@nvidia.com>
+
+[ Upstream commit 782b6b69847f34dda330530493ea62b7de3fd06a ]
+
+Use noirq suspend/resume callbacks as other drivers which implement
+noirq suspend/resume callbacks (Ex:- PCIe) depend on pinctrl driver to
+configure the signals used by their respective devices in the noirq phase.
+
+Signed-off-by: Vidya Sagar <vidyas@nvidia.com>
+Reviewed-by: Dmitry Osipenko <digetx@gmail.com>
+Link: https://lore.kernel.org/r/20200604174935.26560-1-vidyas@nvidia.com
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pinctrl/tegra/pinctrl-tegra.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.c b/drivers/pinctrl/tegra/pinctrl-tegra.c
+index e9a7cbb9aa336..01bcef2c01bcf 100644
+--- a/drivers/pinctrl/tegra/pinctrl-tegra.c
++++ b/drivers/pinctrl/tegra/pinctrl-tegra.c
+@@ -685,8 +685,8 @@ static int tegra_pinctrl_resume(struct device *dev)
+ }
+
+ const struct dev_pm_ops tegra_pinctrl_pm = {
+- .suspend = &tegra_pinctrl_suspend,
+- .resume = &tegra_pinctrl_resume
++ .suspend_noirq = &tegra_pinctrl_suspend,
++ .resume_noirq = &tegra_pinctrl_resume
+ };
+
+ static bool gpio_node_has_range(const char *compatible)
+--
+2.25.1
+
--- /dev/null
+From 857e1f1f3c54a35576bd7b0799ed7886bba23744 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 16 Jun 2020 13:43:04 +0300
+Subject: RDMA/cma: Protect bind_list and listen_list while finding matching cm
+ id
+
+From: Mark Zhang <markz@mellanox.com>
+
+[ Upstream commit 730c8912484186d4623d0c76509066d285c3a755 ]
+
+The bind_list and listen_list must be accessed under a lock, add the
+missing locking around the access in cm_ib_id_from_event()
+
+In addition add lockdep asserts to make it clearer what the locking
+semantic is here.
+
+ general protection fault: 0000 [#1] SMP NOPTI
+ CPU: 226 PID: 126135 Comm: kworker/226:1 Tainted: G OE 4.12.14-150.47-default #1 SLE15
+ Hardware name: Cray Inc. Windom/Windom, BIOS 0.8.7 01-10-2020
+ Workqueue: ib_cm cm_work_handler [ib_cm]
+ task: ffff9c5a60a1d2c0 task.stack: ffffc1d91f554000
+ RIP: 0010:cma_ib_req_handler+0x3f1/0x11b0 [rdma_cm]
+ RSP: 0018:ffffc1d91f557b40 EFLAGS: 00010286
+ RAX: deacffffffffff30 RBX: 0000000000000001 RCX: ffff9c2af5bb6000
+ RDX: 00000000000000a9 RSI: ffff9c5aa4ed2f10 RDI: ffffc1d91f557b08
+ RBP: ffffc1d91f557d90 R08: ffff9c340cc80000 R09: ffff9c2c0f901900
+ R10: 0000000000000000 R11: 0000000000000001 R12: deacffffffffff30
+ R13: ffff9c5a48aeec00 R14: ffffc1d91f557c30 R15: ffff9c5c2eea3688
+ FS: 0000000000000000(0000) GS:ffff9c5c2fa80000(0000) knlGS:0000000000000000
+ CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ CR2: 00002b5cc03fa320 CR3: 0000003f8500a000 CR4: 00000000003406e0
+ Call Trace:
+ ? rdma_addr_cancel+0xa0/0xa0 [ib_core]
+ ? cm_process_work+0x28/0x140 [ib_cm]
+ cm_process_work+0x28/0x140 [ib_cm]
+ ? cm_get_bth_pkey.isra.44+0x34/0xa0 [ib_cm]
+ cm_work_handler+0xa06/0x1a6f [ib_cm]
+ ? __switch_to_asm+0x34/0x70
+ ? __switch_to_asm+0x34/0x70
+ ? __switch_to_asm+0x40/0x70
+ ? __switch_to_asm+0x34/0x70
+ ? __switch_to_asm+0x40/0x70
+ ? __switch_to_asm+0x34/0x70
+ ? __switch_to_asm+0x40/0x70
+ ? __switch_to+0x7c/0x4b0
+ ? __switch_to_asm+0x40/0x70
+ ? __switch_to_asm+0x34/0x70
+ process_one_work+0x1da/0x400
+ worker_thread+0x2b/0x3f0
+ ? process_one_work+0x400/0x400
+ kthread+0x118/0x140
+ ? kthread_create_on_node+0x40/0x40
+ ret_from_fork+0x22/0x40
+ Code: 00 66 83 f8 02 0f 84 ca 05 00 00 49 8b 84 24 d0 01 00 00 48 85 c0 0f 84 68 07 00 00 48 2d d0 01
+ 00 00 49 89 c4 0f 84 59 07 00 00 <41> 0f b7 44 24 20 49 8b 77 50 66 83 f8 0a 75 9e 49 8b 7c 24 28
+
+Fixes: 4c21b5bcef73 ("IB/cma: Add net_dev and private data checks to RDMA CM")
+Link: https://lore.kernel.org/r/20200616104304.2426081-1-leon@kernel.org
+Signed-off-by: Mark Zhang <markz@mellanox.com>
+Reviewed-by: Maor Gottlieb <maorg@mellanox.com>
+Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/core/cma.c | 18 ++++++++++++++++++
+ 1 file changed, 18 insertions(+)
+
+diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
+index 8f776b7de45ee..e3cd9d2b0dd2b 100644
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -1631,6 +1631,8 @@ static struct rdma_id_private *cma_find_listener(
+ {
+ struct rdma_id_private *id_priv, *id_priv_dev;
+
++ lockdep_assert_held(&lock);
++
+ if (!bind_list)
+ return ERR_PTR(-EINVAL);
+
+@@ -1677,6 +1679,7 @@ cma_ib_id_from_event(struct ib_cm_id *cm_id,
+ }
+ }
+
++ mutex_lock(&lock);
+ /*
+ * Net namespace might be getting deleted while route lookup,
+ * cm_id lookup is in progress. Therefore, perform netdevice
+@@ -1718,6 +1721,7 @@ cma_ib_id_from_event(struct ib_cm_id *cm_id,
+ id_priv = cma_find_listener(bind_list, cm_id, ib_event, req, *net_dev);
+ err:
+ rcu_read_unlock();
++ mutex_unlock(&lock);
+ if (IS_ERR(id_priv) && *net_dev) {
+ dev_put(*net_dev);
+ *net_dev = NULL;
+@@ -2473,6 +2477,8 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv,
+ struct net *net = id_priv->id.route.addr.dev_addr.net;
+ int ret;
+
++ lockdep_assert_held(&lock);
++
+ if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1))
+ return;
+
+@@ -3245,6 +3251,8 @@ static void cma_bind_port(struct rdma_bind_list *bind_list,
+ u64 sid, mask;
+ __be16 port;
+
++ lockdep_assert_held(&lock);
++
+ addr = cma_src_addr(id_priv);
+ port = htons(bind_list->port);
+
+@@ -3273,6 +3281,8 @@ static int cma_alloc_port(enum rdma_ucm_port_space ps,
+ struct rdma_bind_list *bind_list;
+ int ret;
+
++ lockdep_assert_held(&lock);
++
+ bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
+ if (!bind_list)
+ return -ENOMEM;
+@@ -3299,6 +3309,8 @@ static int cma_port_is_unique(struct rdma_bind_list *bind_list,
+ struct sockaddr *saddr = cma_src_addr(id_priv);
+ __be16 dport = cma_port(daddr);
+
++ lockdep_assert_held(&lock);
++
+ hlist_for_each_entry(cur_id, &bind_list->owners, node) {
+ struct sockaddr *cur_daddr = cma_dst_addr(cur_id);
+ struct sockaddr *cur_saddr = cma_src_addr(cur_id);
+@@ -3338,6 +3350,8 @@ static int cma_alloc_any_port(enum rdma_ucm_port_space ps,
+ unsigned int rover;
+ struct net *net = id_priv->id.route.addr.dev_addr.net;
+
++ lockdep_assert_held(&lock);
++
+ inet_get_local_port_range(net, &low, &high);
+ remaining = (high - low) + 1;
+ rover = prandom_u32() % remaining + low;
+@@ -3385,6 +3399,8 @@ static int cma_check_port(struct rdma_bind_list *bind_list,
+ struct rdma_id_private *cur_id;
+ struct sockaddr *addr, *cur_addr;
+
++ lockdep_assert_held(&lock);
++
+ addr = cma_src_addr(id_priv);
+ hlist_for_each_entry(cur_id, &bind_list->owners, node) {
+ if (id_priv == cur_id)
+@@ -3415,6 +3431,8 @@ static int cma_use_port(enum rdma_ucm_port_space ps,
+ unsigned short snum;
+ int ret;
+
++ lockdep_assert_held(&lock);
++
+ snum = ntohs(cma_port(cma_src_addr(id_priv)));
+ if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
+ return -EACCES;
+--
+2.25.1
+
--- /dev/null
+From 096cd2a3323dde808f6ebfc06e846abc99900922 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 12 Jun 2020 14:38:24 +0800
+Subject: RDMA/mad: Fix possible memory leak in ib_mad_post_receive_mads()
+
+From: Fan Guo <guofan5@huawei.com>
+
+[ Upstream commit a17f4bed811c60712d8131883cdba11a105d0161 ]
+
+If ib_dma_mapping_error() returns non-zero value,
+ib_mad_post_receive_mads() will jump out of loops and return -ENOMEM
+without freeing mad_priv. Fix this memory-leak problem by freeing mad_priv
+in this case.
+
+Fixes: 2c34e68f4261 ("IB/mad: Check and handle potential DMA mapping errors")
+Link: https://lore.kernel.org/r/20200612063824.180611-1-guofan5@huawei.com
+Signed-off-by: Fan Guo <guofan5@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/core/mad.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
+index 9947d16edef21..455ecff54a8df 100644
+--- a/drivers/infiniband/core/mad.c
++++ b/drivers/infiniband/core/mad.c
+@@ -2960,6 +2960,7 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
+ DMA_FROM_DEVICE);
+ if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device,
+ sg_list.addr))) {
++ kfree(mad_priv);
+ ret = -ENOMEM;
+ break;
+ }
+--
+2.25.1
+
--- /dev/null
+From a14956b7bfe9c15e7886bc0c07ea812a1bc7b9f2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 16 Jun 2020 12:34:08 +0300
+Subject: RDMA/qedr: Fix KASAN: use-after-free in ucma_event_handler+0x532
+
+From: Michal Kalderon <michal.kalderon@marvell.com>
+
+[ Upstream commit 0dfbd5ecf28cbcb81674c49d34ee97366db1be44 ]
+
+Private data passed to iwarp_cm_handler is copied for connection request /
+response, but ignored otherwise. If junk is passed, it is stored in the
+event and used later in the event processing.
+
+The driver passes an old junk pointer during connection close which leads
+to a use-after-free on event processing. Set private data to NULL for
+events that don 't have private data.
+
+ BUG: KASAN: use-after-free in ucma_event_handler+0x532/0x560 [rdma_ucm]
+ kernel: Read of size 4 at addr ffff8886caa71200 by task kworker/u128:1/5250
+ kernel:
+ kernel: Workqueue: iw_cm_wq cm_work_handler [iw_cm]
+ kernel: Call Trace:
+ kernel: dump_stack+0x8c/0xc0
+ kernel: print_address_description.constprop.0+0x1b/0x210
+ kernel: ? ucma_event_handler+0x532/0x560 [rdma_ucm]
+ kernel: ? ucma_event_handler+0x532/0x560 [rdma_ucm]
+ kernel: __kasan_report.cold+0x1a/0x33
+ kernel: ? ucma_event_handler+0x532/0x560 [rdma_ucm]
+ kernel: kasan_report+0xe/0x20
+ kernel: check_memory_region+0x130/0x1a0
+ kernel: memcpy+0x20/0x50
+ kernel: ucma_event_handler+0x532/0x560 [rdma_ucm]
+ kernel: ? __rpc_execute+0x608/0x620 [sunrpc]
+ kernel: cma_iw_handler+0x212/0x330 [rdma_cm]
+ kernel: ? iw_conn_req_handler+0x6e0/0x6e0 [rdma_cm]
+ kernel: ? enqueue_timer+0x86/0x140
+ kernel: ? _raw_write_lock_irq+0xd0/0xd0
+ kernel: cm_work_handler+0xd3d/0x1070 [iw_cm]
+
+Fixes: e411e0587e0d ("RDMA/qedr: Add iWARP connection management functions")
+Link: https://lore.kernel.org/r/20200616093408.17827-1-michal.kalderon@marvell.com
+Signed-off-by: Ariel Elior <ariel.elior@marvell.com>
+Signed-off-by: Michal Kalderon <michal.kalderon@marvell.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/qedr/qedr_iw_cm.c | 13 +++++++++++--
+ 1 file changed, 11 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
+index 5e9732990be5c..a7a926b7b5628 100644
+--- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c
++++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
+@@ -150,8 +150,17 @@ qedr_iw_issue_event(void *context,
+ if (params->cm_info) {
+ event.ird = params->cm_info->ird;
+ event.ord = params->cm_info->ord;
+- event.private_data_len = params->cm_info->private_data_len;
+- event.private_data = (void *)params->cm_info->private_data;
++ /* Only connect_request and reply have valid private data
++ * the rest of the events this may be left overs from
++ * connection establishment. CONNECT_REQUEST is issued via
++ * qedr_iw_mpa_request
++ */
++ if (event_type == IW_CM_EVENT_CONNECT_REPLY) {
++ event.private_data_len =
++ params->cm_info->private_data_len;
++ event.private_data =
++ (void *)params->cm_info->private_data;
++ }
+ }
+
+ if (ep->cm_id)
+--
+2.25.1
+
--- /dev/null
+From 82cb9ab505e369b1a2ecbbb4431f572928de9035 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 13 Jun 2020 23:11:48 -0500
+Subject: RDMA/rvt: Fix potential memory leak caused by rvt_alloc_rq
+
+From: Aditya Pakki <pakki001@umn.edu>
+
+[ Upstream commit 90a239ee25fa3a483facec3de7c144361a3d3a51 ]
+
+In case of failure of alloc_ud_wq_attr(), the memory allocated by
+rvt_alloc_rq() is not freed. Fix it by calling rvt_free_rq() using the
+existing clean-up code.
+
+Fixes: d310c4bf8aea ("IB/{rdmavt, hfi1, qib}: Remove AH refcount for UD QPs")
+Link: https://lore.kernel.org/r/20200614041148.131983-1-pakki001@umn.edu
+Signed-off-by: Aditya Pakki <pakki001@umn.edu>
+Acked-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/sw/rdmavt/qp.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
+index d354653893577..19556c62c7ea8 100644
+--- a/drivers/infiniband/sw/rdmavt/qp.c
++++ b/drivers/infiniband/sw/rdmavt/qp.c
+@@ -1196,7 +1196,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
+ err = alloc_ud_wq_attr(qp, rdi->dparms.node);
+ if (err) {
+ ret = (ERR_PTR(err));
+- goto bail_driver_priv;
++ goto bail_rq_rvt;
+ }
+
+ err = alloc_qpn(rdi, &rdi->qp_dev->qpn_table,
+@@ -1300,9 +1300,11 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
+ rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
+
+ bail_rq_wq:
+- rvt_free_rq(&qp->r_rq);
+ free_ud_wq_attr(qp);
+
++bail_rq_rvt:
++ rvt_free_rq(&qp->r_rq);
++
+ bail_driver_priv:
+ rdi->driver_f.qp_priv_free(rdi, qp);
+
+--
+2.25.1
+
--- /dev/null
+From ce4eb3a36baf4d8294e7e8b0972feb828ab8bf5f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 10 Jun 2020 12:47:17 -0500
+Subject: RDMA/siw: Fix pointer-to-int-cast warning in siw_rx_pbl()
+
+From: Tom Seewald <tseewald@gmail.com>
+
+[ Upstream commit 6769b275a313c76ddcd7d94c632032326db5f759 ]
+
+The variable buf_addr is type dma_addr_t, which may not be the same size
+as a pointer. To ensure it is the correct size, cast to a uintptr_t.
+
+Fixes: c536277e0db1 ("RDMA/siw: Fix 64/32bit pointer inconsistency")
+Link: https://lore.kernel.org/r/20200610174717.15932-1-tseewald@gmail.com
+Signed-off-by: Tom Seewald <tseewald@gmail.com>
+Reviewed-by: Bernard Metzler <bmt@zurich.ibm.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/sw/siw/siw_qp_rx.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c
+index c0a8872403258..0520e70084f97 100644
+--- a/drivers/infiniband/sw/siw/siw_qp_rx.c
++++ b/drivers/infiniband/sw/siw/siw_qp_rx.c
+@@ -139,7 +139,8 @@ static int siw_rx_pbl(struct siw_rx_stream *srx, int *pbl_idx,
+ break;
+
+ bytes = min(bytes, len);
+- if (siw_rx_kva(srx, (void *)buf_addr, bytes) == bytes) {
++ if (siw_rx_kva(srx, (void *)(uintptr_t)buf_addr, bytes) ==
++ bytes) {
+ copied += bytes;
+ offset += bytes;
+ len -= bytes;
+--
+2.25.1
+
--- /dev/null
+From e83d9f14d2900509a49d321c2e81323ae8c8e49d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 24 Apr 2020 12:30:46 -0700
+Subject: recordmcount: support >64k sections
+
+From: Sami Tolvanen <samitolvanen@google.com>
+
+[ Upstream commit 4ef57b21d6fb49d2b25c47e4cff467a0c2c8b6b7 ]
+
+When compiling a kernel with Clang and LTO, we need to run
+recordmcount on vmlinux.o with a large number of sections, which
+currently fails as the program doesn't understand extended
+section indexes. This change adds support for processing binaries
+with >64k sections.
+
+Link: https://lkml.kernel.org/r/20200424193046.160744-1-samitolvanen@google.com
+Link: https://lore.kernel.org/lkml/CAK7LNARbZhoaA=Nnuw0=gBrkuKbr_4Ng_Ei57uafujZf7Xazgw@mail.gmail.com/
+
+Cc: Kees Cook <keescook@chromium.org>
+Reviewed-by: Matt Helsley <mhelsley@vmware.com>
+Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ scripts/recordmcount.h | 98 +++++++++++++++++++++++++++++++++++++++---
+ 1 file changed, 92 insertions(+), 6 deletions(-)
+
+diff --git a/scripts/recordmcount.h b/scripts/recordmcount.h
+index 74eab03e31d4d..f9b19524da112 100644
+--- a/scripts/recordmcount.h
++++ b/scripts/recordmcount.h
+@@ -29,6 +29,11 @@
+ #undef has_rel_mcount
+ #undef tot_relsize
+ #undef get_mcountsym
++#undef find_symtab
++#undef get_shnum
++#undef set_shnum
++#undef get_shstrndx
++#undef get_symindex
+ #undef get_sym_str_and_relp
+ #undef do_func
+ #undef Elf_Addr
+@@ -58,6 +63,11 @@
+ # define __has_rel_mcount __has64_rel_mcount
+ # define has_rel_mcount has64_rel_mcount
+ # define tot_relsize tot64_relsize
++# define find_symtab find_symtab64
++# define get_shnum get_shnum64
++# define set_shnum set_shnum64
++# define get_shstrndx get_shstrndx64
++# define get_symindex get_symindex64
+ # define get_sym_str_and_relp get_sym_str_and_relp_64
+ # define do_func do64
+ # define get_mcountsym get_mcountsym_64
+@@ -91,6 +101,11 @@
+ # define __has_rel_mcount __has32_rel_mcount
+ # define has_rel_mcount has32_rel_mcount
+ # define tot_relsize tot32_relsize
++# define find_symtab find_symtab32
++# define get_shnum get_shnum32
++# define set_shnum set_shnum32
++# define get_shstrndx get_shstrndx32
++# define get_symindex get_symindex32
+ # define get_sym_str_and_relp get_sym_str_and_relp_32
+ # define do_func do32
+ # define get_mcountsym get_mcountsym_32
+@@ -173,6 +188,67 @@ static int MIPS_is_fake_mcount(Elf_Rel const *rp)
+ return is_fake;
+ }
+
++static unsigned int get_symindex(Elf_Sym const *sym, Elf32_Word const *symtab,
++ Elf32_Word const *symtab_shndx)
++{
++ unsigned long offset;
++ int index;
++
++ if (sym->st_shndx != SHN_XINDEX)
++ return w2(sym->st_shndx);
++
++ offset = (unsigned long)sym - (unsigned long)symtab;
++ index = offset / sizeof(*sym);
++
++ return w(symtab_shndx[index]);
++}
++
++static unsigned int get_shnum(Elf_Ehdr const *ehdr, Elf_Shdr const *shdr0)
++{
++ if (shdr0 && !ehdr->e_shnum)
++ return w(shdr0->sh_size);
++
++ return w2(ehdr->e_shnum);
++}
++
++static void set_shnum(Elf_Ehdr *ehdr, Elf_Shdr *shdr0, unsigned int new_shnum)
++{
++ if (new_shnum >= SHN_LORESERVE) {
++ ehdr->e_shnum = 0;
++ shdr0->sh_size = w(new_shnum);
++ } else
++ ehdr->e_shnum = w2(new_shnum);
++}
++
++static int get_shstrndx(Elf_Ehdr const *ehdr, Elf_Shdr const *shdr0)
++{
++ if (ehdr->e_shstrndx != SHN_XINDEX)
++ return w2(ehdr->e_shstrndx);
++
++ return w(shdr0->sh_link);
++}
++
++static void find_symtab(Elf_Ehdr *const ehdr, Elf_Shdr const *shdr0,
++ unsigned const nhdr, Elf32_Word **symtab,
++ Elf32_Word **symtab_shndx)
++{
++ Elf_Shdr const *relhdr;
++ unsigned k;
++
++ *symtab = NULL;
++ *symtab_shndx = NULL;
++
++ for (relhdr = shdr0, k = nhdr; k; --k, ++relhdr) {
++ if (relhdr->sh_type == SHT_SYMTAB)
++ *symtab = (void *)ehdr + relhdr->sh_offset;
++ else if (relhdr->sh_type == SHT_SYMTAB_SHNDX)
++ *symtab_shndx = (void *)ehdr + relhdr->sh_offset;
++
++ if (*symtab && *symtab_shndx)
++ break;
++ }
++}
++
+ /* Append the new shstrtab, Elf_Shdr[], __mcount_loc and its relocations. */
+ static int append_func(Elf_Ehdr *const ehdr,
+ Elf_Shdr *const shstr,
+@@ -188,10 +264,12 @@ static int append_func(Elf_Ehdr *const ehdr,
+ char const *mc_name = (sizeof(Elf_Rela) == rel_entsize)
+ ? ".rela__mcount_loc"
+ : ".rel__mcount_loc";
+- unsigned const old_shnum = w2(ehdr->e_shnum);
+ uint_t const old_shoff = _w(ehdr->e_shoff);
+ uint_t const old_shstr_sh_size = _w(shstr->sh_size);
+ uint_t const old_shstr_sh_offset = _w(shstr->sh_offset);
++ Elf_Shdr *const shdr0 = (Elf_Shdr *)(old_shoff + (void *)ehdr);
++ unsigned int const old_shnum = get_shnum(ehdr, shdr0);
++ unsigned int const new_shnum = 2 + old_shnum; /* {.rel,}__mcount_loc */
+ uint_t t = 1 + strlen(mc_name) + _w(shstr->sh_size);
+ uint_t new_e_shoff;
+
+@@ -201,6 +279,8 @@ static int append_func(Elf_Ehdr *const ehdr,
+ t += (_align & -t); /* word-byte align */
+ new_e_shoff = t;
+
++ set_shnum(ehdr, shdr0, new_shnum);
++
+ /* body for new shstrtab */
+ if (ulseek(sb.st_size, SEEK_SET) < 0)
+ return -1;
+@@ -255,7 +335,6 @@ static int append_func(Elf_Ehdr *const ehdr,
+ return -1;
+
+ ehdr->e_shoff = _w(new_e_shoff);
+- ehdr->e_shnum = w2(2 + w2(ehdr->e_shnum)); /* {.rel,}__mcount_loc */
+ if (ulseek(0, SEEK_SET) < 0)
+ return -1;
+ if (uwrite(ehdr, sizeof(*ehdr)) < 0)
+@@ -434,6 +513,8 @@ static int find_secsym_ndx(unsigned const txtndx,
+ uint_t *const recvalp,
+ unsigned int *sym_index,
+ Elf_Shdr const *const symhdr,
++ Elf32_Word const *symtab,
++ Elf32_Word const *symtab_shndx,
+ Elf_Ehdr const *const ehdr)
+ {
+ Elf_Sym const *const sym0 = (Elf_Sym const *)(_w(symhdr->sh_offset)
+@@ -445,7 +526,7 @@ static int find_secsym_ndx(unsigned const txtndx,
+ for (symp = sym0, t = nsym; t; --t, ++symp) {
+ unsigned int const st_bind = ELF_ST_BIND(symp->st_info);
+
+- if (txtndx == w2(symp->st_shndx)
++ if (txtndx == get_symindex(symp, symtab, symtab_shndx)
+ /* avoid STB_WEAK */
+ && (STB_LOCAL == st_bind || STB_GLOBAL == st_bind)) {
+ /* function symbols on ARM have quirks, avoid them */
+@@ -516,21 +597,23 @@ static unsigned tot_relsize(Elf_Shdr const *const shdr0,
+ return totrelsz;
+ }
+
+-
+ /* Overall supervision for Elf32 ET_REL file. */
+ static int do_func(Elf_Ehdr *const ehdr, char const *const fname,
+ unsigned const reltype)
+ {
+ Elf_Shdr *const shdr0 = (Elf_Shdr *)(_w(ehdr->e_shoff)
+ + (void *)ehdr);
+- unsigned const nhdr = w2(ehdr->e_shnum);
+- Elf_Shdr *const shstr = &shdr0[w2(ehdr->e_shstrndx)];
++ unsigned const nhdr = get_shnum(ehdr, shdr0);
++ Elf_Shdr *const shstr = &shdr0[get_shstrndx(ehdr, shdr0)];
+ char const *const shstrtab = (char const *)(_w(shstr->sh_offset)
+ + (void *)ehdr);
+
+ Elf_Shdr const *relhdr;
+ unsigned k;
+
++ Elf32_Word *symtab;
++ Elf32_Word *symtab_shndx;
++
+ /* Upper bound on space: assume all relevant relocs are for mcount. */
+ unsigned totrelsz;
+
+@@ -561,6 +644,8 @@ static int do_func(Elf_Ehdr *const ehdr, char const *const fname,
+ return -1;
+ }
+
++ find_symtab(ehdr, shdr0, nhdr, &symtab, &symtab_shndx);
++
+ for (relhdr = shdr0, k = nhdr; k; --k, ++relhdr) {
+ char const *const txtname = has_rel_mcount(relhdr, shdr0,
+ shstrtab, fname);
+@@ -577,6 +662,7 @@ static int do_func(Elf_Ehdr *const ehdr, char const *const fname,
+ result = find_secsym_ndx(w(relhdr->sh_info), txtname,
+ &recval, &recsym,
+ &shdr0[symsec_sh_link],
++ symtab, symtab_shndx,
+ ehdr);
+ if (result)
+ goto out;
+--
+2.25.1
+
--- /dev/null
+From 9662459cdefdd196220a87d2481fc5631f26e658 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 17 Jun 2020 16:21:29 +0100
+Subject: regmap: Fix memory leak from regmap_register_patch
+
+From: Charles Keepax <ckeepax@opensource.cirrus.com>
+
+[ Upstream commit 95b2c3ec4cb1689db2389c251d39f64490ba641c ]
+
+When a register patch is registered the reg_sequence is copied but the
+memory allocated is never freed. Add a kfree in regmap_exit to clean it
+up.
+
+Fixes: 22f0d90a3482 ("regmap: Support register patch sets")
+Signed-off-by: Charles Keepax <ckeepax@opensource.cirrus.com>
+Link: https://lore.kernel.org/r/20200617152129.19655-1-ckeepax@opensource.cirrus.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/base/regmap/regmap.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
+index 59f911e577192..508bbd6ea4396 100644
+--- a/drivers/base/regmap/regmap.c
++++ b/drivers/base/regmap/regmap.c
+@@ -1356,6 +1356,7 @@ void regmap_exit(struct regmap *map)
+ if (map->hwlock)
+ hwspin_lock_free(map->hwlock);
+ kfree_const(map->name);
++ kfree(map->patch);
+ kfree(map);
+ }
+ EXPORT_SYMBOL_GPL(regmap_exit);
+--
+2.25.1
+
--- /dev/null
+From 74ced24ba4e13cbbf6f34573086516f7d93e77fd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 15 Jun 2020 05:54:08 +0800
+Subject: regualtor: pfuze100: correct sw1a/sw2 on pfuze3000
+
+From: Robin Gong <yibin.gong@nxp.com>
+
+[ Upstream commit 6f1cf5257acc6e6242ddf2f52bc7912aed77b79f ]
+
+PFUZE100_SWB_REG is not proper for sw1a/sw2, because enable_mask/enable_reg
+is not correct. On PFUZE3000, sw1a/sw2 should be the same as sw1a/sw2 on
+pfuze100 except that voltages are not linear, so add new PFUZE3000_SW_REG
+and pfuze3000_sw_regulator_ops which like the non-linear PFUZE100_SW_REG
+and pfuze100_sw_regulator_ops.
+
+Fixes: 1dced996ee70 ("regulator: pfuze100: update voltage setting for pfuze3000 sw1a")
+Reported-by: Christophe Meynard <Christophe.Meynard@ign.fr>
+Signed-off-by: Robin Gong <yibin.gong@nxp.com>
+Link: https://lore.kernel.org/r/1592171648-8752-1-git-send-email-yibin.gong@nxp.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/regulator/pfuze100-regulator.c | 60 +++++++++++++++++---------
+ 1 file changed, 39 insertions(+), 21 deletions(-)
+
+diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c
+index 689537927f6f7..4c8e8b4722872 100644
+--- a/drivers/regulator/pfuze100-regulator.c
++++ b/drivers/regulator/pfuze100-regulator.c
+@@ -209,6 +209,19 @@ static const struct regulator_ops pfuze100_swb_regulator_ops = {
+
+ };
+
++static const struct regulator_ops pfuze3000_sw_regulator_ops = {
++ .enable = regulator_enable_regmap,
++ .disable = regulator_disable_regmap,
++ .is_enabled = regulator_is_enabled_regmap,
++ .list_voltage = regulator_list_voltage_table,
++ .map_voltage = regulator_map_voltage_ascend,
++ .set_voltage_sel = regulator_set_voltage_sel_regmap,
++ .get_voltage_sel = regulator_get_voltage_sel_regmap,
++ .set_voltage_time_sel = regulator_set_voltage_time_sel,
++ .set_ramp_delay = pfuze100_set_ramp_delay,
++
++};
++
+ #define PFUZE100_FIXED_REG(_chip, _name, base, voltage) \
+ [_chip ## _ ## _name] = { \
+ .desc = { \
+@@ -318,23 +331,28 @@ static const struct regulator_ops pfuze100_swb_regulator_ops = {
+ .stby_mask = 0x20, \
+ }
+
+-
+-#define PFUZE3000_SW2_REG(_chip, _name, base, min, max, step) { \
+- .desc = { \
+- .name = #_name,\
+- .n_voltages = ((max) - (min)) / (step) + 1, \
+- .ops = &pfuze100_sw_regulator_ops, \
+- .type = REGULATOR_VOLTAGE, \
+- .id = _chip ## _ ## _name, \
+- .owner = THIS_MODULE, \
+- .min_uV = (min), \
+- .uV_step = (step), \
+- .vsel_reg = (base) + PFUZE100_VOL_OFFSET, \
+- .vsel_mask = 0x7, \
+- }, \
+- .stby_reg = (base) + PFUZE100_STANDBY_OFFSET, \
+- .stby_mask = 0x7, \
+-}
++/* No linar case for the some switches of PFUZE3000 */
++#define PFUZE3000_SW_REG(_chip, _name, base, mask, voltages) \
++ [_chip ## _ ## _name] = { \
++ .desc = { \
++ .name = #_name, \
++ .n_voltages = ARRAY_SIZE(voltages), \
++ .ops = &pfuze3000_sw_regulator_ops, \
++ .type = REGULATOR_VOLTAGE, \
++ .id = _chip ## _ ## _name, \
++ .owner = THIS_MODULE, \
++ .volt_table = voltages, \
++ .vsel_reg = (base) + PFUZE100_VOL_OFFSET, \
++ .vsel_mask = (mask), \
++ .enable_reg = (base) + PFUZE100_MODE_OFFSET, \
++ .enable_mask = 0xf, \
++ .enable_val = 0x8, \
++ .enable_time = 500, \
++ }, \
++ .stby_reg = (base) + PFUZE100_STANDBY_OFFSET, \
++ .stby_mask = (mask), \
++ .sw_reg = true, \
++ }
+
+ #define PFUZE3000_SW3_REG(_chip, _name, base, min, max, step) { \
+ .desc = { \
+@@ -391,9 +409,9 @@ static struct pfuze_regulator pfuze200_regulators[] = {
+ };
+
+ static struct pfuze_regulator pfuze3000_regulators[] = {
+- PFUZE100_SWB_REG(PFUZE3000, SW1A, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a),
++ PFUZE3000_SW_REG(PFUZE3000, SW1A, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a),
+ PFUZE100_SW_REG(PFUZE3000, SW1B, PFUZE100_SW1CVOL, 700000, 1475000, 25000),
+- PFUZE100_SWB_REG(PFUZE3000, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo),
++ PFUZE3000_SW_REG(PFUZE3000, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo),
+ PFUZE3000_SW3_REG(PFUZE3000, SW3, PFUZE100_SW3AVOL, 900000, 1650000, 50000),
+ PFUZE100_SWB_REG(PFUZE3000, SWBST, PFUZE100_SWBSTCON1, 0x3, pfuze100_swbst),
+ PFUZE100_SWB_REG(PFUZE3000, VSNVS, PFUZE100_VSNVSVOL, 0x7, pfuze100_vsnvs),
+@@ -407,8 +425,8 @@ static struct pfuze_regulator pfuze3000_regulators[] = {
+ };
+
+ static struct pfuze_regulator pfuze3001_regulators[] = {
+- PFUZE100_SWB_REG(PFUZE3001, SW1, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a),
+- PFUZE100_SWB_REG(PFUZE3001, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo),
++ PFUZE3000_SW_REG(PFUZE3001, SW1, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a),
++ PFUZE3000_SW_REG(PFUZE3001, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo),
+ PFUZE3000_SW3_REG(PFUZE3001, SW3, PFUZE100_SW3AVOL, 900000, 1650000, 50000),
+ PFUZE100_SWB_REG(PFUZE3001, VSNVS, PFUZE100_VSNVSVOL, 0x7, pfuze100_vsnvs),
+ PFUZE100_VGEN_REG(PFUZE3001, VLDO1, PFUZE100_VGEN1VOL, 1800000, 3300000, 100000),
+--
+2.25.1
+
--- /dev/null
+From 678a8e622be22d0835521c360e4cebbcf8ee6f58 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 16 Jun 2020 19:33:06 +0530
+Subject: RISC-V: Don't allow write+exec only page mapping request in mmap
+
+From: Yash Shah <yash.shah@sifive.com>
+
+[ Upstream commit e0d17c842c0f824fd4df9f4688709fc6907201e1 ]
+
+As per the table 4.4 of version "20190608-Priv-MSU-Ratified" of the
+RISC-V instruction set manual[0], the PTE permission bit combination of
+"write+exec only" is reserved for future use. Hence, don't allow such
+mapping request in mmap call.
+
+An issue is been reported by David Abdurachmanov, that while running
+stress-ng with "sysbadaddr" argument, RCU stalls are observed on RISC-V
+specific kernel.
+
+This issue arises when the stress-sysbadaddr request for pages with
+"write+exec only" permission bits and then passes the address obtain
+from this mmap call to various system call. For the riscv kernel, the
+mmap call should fail for this particular combination of permission bits
+since it's not valid.
+
+[0]: http://dabbelt.com/~palmer/keep/riscv-isa-manual/riscv-privileged-20190608-1.pdf
+
+Signed-off-by: Yash Shah <yash.shah@sifive.com>
+Reported-by: David Abdurachmanov <david.abdurachmanov@gmail.com>
+[Palmer: Refer to the latest ISA specification at the only link I could
+find, and update the terminology.]
+Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/riscv/kernel/sys_riscv.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/arch/riscv/kernel/sys_riscv.c b/arch/riscv/kernel/sys_riscv.c
+index f3619f59d85cc..12f8a7fce78b1 100644
+--- a/arch/riscv/kernel/sys_riscv.c
++++ b/arch/riscv/kernel/sys_riscv.c
+@@ -8,6 +8,7 @@
+ #include <linux/syscalls.h>
+ #include <asm/unistd.h>
+ #include <asm/cacheflush.h>
++#include <asm-generic/mman-common.h>
+
+ static long riscv_sys_mmap(unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+@@ -16,6 +17,11 @@ static long riscv_sys_mmap(unsigned long addr, unsigned long len,
+ {
+ if (unlikely(offset & (~PAGE_MASK >> page_shift_offset)))
+ return -EINVAL;
++
++ if ((prot & PROT_WRITE) && (prot & PROT_EXEC))
++ if (unlikely(!(prot & PROT_READ)))
++ return -EINVAL;
++
+ return ksys_mmap_pgoff(addr, len, prot, flags, fd,
+ offset >> (PAGE_SHIFT - page_shift_offset));
+ }
+--
+2.25.1
+
--- /dev/null
+From c7781a8b2a839a739c84632addf90f10e3cd6083 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 11 Jun 2020 18:32:35 +0000
+Subject: riscv/atomic: Fix sign extension for RV64I
+
+From: Nathan Huckleberry <nhuck@google.com>
+
+[ Upstream commit 6c58f25e6938c073198af8b1e1832f83f8f0df33 ]
+
+The argument passed to cmpxchg is not guaranteed to be sign
+extended, but lr.w sign extends on RV64I. This makes cmpxchg
+fail on clang built kernels when __old is negative.
+
+To fix this, we just cast __old to long which sign extends on
+RV64I. With this fix, clang built RISC-V kernels now boot.
+
+Link: https://github.com/ClangBuiltLinux/linux/issues/867
+Signed-off-by: Nathan Huckleberry <nhuck@google.com>
+Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/riscv/include/asm/cmpxchg.h | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h
+index d969bab4a26b5..262e5bbb27760 100644
+--- a/arch/riscv/include/asm/cmpxchg.h
++++ b/arch/riscv/include/asm/cmpxchg.h
+@@ -179,7 +179,7 @@
+ " bnez %1, 0b\n" \
+ "1:\n" \
+ : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
+- : "rJ" (__old), "rJ" (__new) \
++ : "rJ" ((long)__old), "rJ" (__new) \
+ : "memory"); \
+ break; \
+ case 8: \
+@@ -224,7 +224,7 @@
+ RISCV_ACQUIRE_BARRIER \
+ "1:\n" \
+ : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
+- : "rJ" (__old), "rJ" (__new) \
++ : "rJ" ((long)__old), "rJ" (__new) \
+ : "memory"); \
+ break; \
+ case 8: \
+@@ -270,7 +270,7 @@
+ " bnez %1, 0b\n" \
+ "1:\n" \
+ : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
+- : "rJ" (__old), "rJ" (__new) \
++ : "rJ" ((long)__old), "rJ" (__new) \
+ : "memory"); \
+ break; \
+ case 8: \
+@@ -316,7 +316,7 @@
+ " fence rw, rw\n" \
+ "1:\n" \
+ : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
+- : "rJ" (__old), "rJ" (__new) \
++ : "rJ" ((long)__old), "rJ" (__new) \
+ : "memory"); \
+ break; \
+ case 8: \
+--
+2.25.1
+
--- /dev/null
+From afba1812cf1b9a98ebff55c75a4e35a329a34f34 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 17 Jun 2020 23:01:23 +0100
+Subject: rxrpc: Fix handling of rwind from an ACK packet
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit a2ad7c21ad8cf1ce4ad65e13df1c2a1c29b38ac5 ]
+
+The handling of the receive window size (rwind) from a received ACK packet
+is not correct. The rxrpc_input_ackinfo() function currently checks the
+current Tx window size against the rwind from the ACK to see if it has
+changed, but then limits the rwind size before storing it in the tx_winsize
+member and, if it increased, wake up the transmitting process. This means
+that if rwind > RXRPC_RXTX_BUFF_SIZE - 1, this path will always be
+followed.
+
+Fix this by limiting rwind before we compare it to tx_winsize.
+
+The effect of this can be seen by enabling the rxrpc_rx_rwind_change
+tracepoint.
+
+Fixes: 702f2ac87a9a ("rxrpc: Wake up the transmitter if Rx window size increases on the peer")
+Signed-off-by: David Howells <dhowells@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/rxrpc/input.c | 7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
+index 3be4177baf707..22dec6049e1bb 100644
+--- a/net/rxrpc/input.c
++++ b/net/rxrpc/input.c
+@@ -723,13 +723,12 @@ static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
+ ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU),
+ rwind, ntohl(ackinfo->jumbo_max));
+
++ if (rwind > RXRPC_RXTX_BUFF_SIZE - 1)
++ rwind = RXRPC_RXTX_BUFF_SIZE - 1;
+ if (call->tx_winsize != rwind) {
+- if (rwind > RXRPC_RXTX_BUFF_SIZE - 1)
+- rwind = RXRPC_RXTX_BUFF_SIZE - 1;
+ if (rwind > call->tx_winsize)
+ wake = true;
+- trace_rxrpc_rx_rwind_change(call, sp->hdr.serial,
+- ntohl(ackinfo->rwind), wake);
++ trace_rxrpc_rx_rwind_change(call, sp->hdr.serial, rwind, wake);
+ call->tx_winsize = rwind;
+ }
+
+--
+2.25.1
+
--- /dev/null
+From 14f96310350f82418243ab2c34b99c2dd678f549 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 Mar 2020 16:44:50 +0100
+Subject: s390/ptrace: fix setting syscall number
+
+From: Sven Schnelle <svens@linux.ibm.com>
+
+[ Upstream commit 873e5a763d604c32988c4a78913a8dab3862d2f9 ]
+
+When strace wants to update the syscall number, it sets GPR2
+to the desired number and updates the GPR via PTRACE_SETREGSET.
+It doesn't update regs->int_code which would cause the old syscall
+executed on syscall restart. As we cannot change the ptrace ABI and
+don't have a field for the interruption code, check whether the tracee
+is in a syscall and the last instruction was svc. In that case assume
+that the tracer wants to update the syscall number and copy the GPR2
+value to regs->int_code.
+
+Signed-off-by: Sven Schnelle <svens@linux.ibm.com>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/s390/kernel/ptrace.c | 31 ++++++++++++++++++++++++++++++-
+ 1 file changed, 30 insertions(+), 1 deletion(-)
+
+diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
+index 5a2b1501d9983..5aa786063eb3e 100644
+--- a/arch/s390/kernel/ptrace.c
++++ b/arch/s390/kernel/ptrace.c
+@@ -324,6 +324,25 @@ static inline void __poke_user_per(struct task_struct *child,
+ child->thread.per_user.end = data;
+ }
+
++static void fixup_int_code(struct task_struct *child, addr_t data)
++{
++ struct pt_regs *regs = task_pt_regs(child);
++ int ilc = regs->int_code >> 16;
++ u16 insn;
++
++ if (ilc > 6)
++ return;
++
++ if (ptrace_access_vm(child, regs->psw.addr - (regs->int_code >> 16),
++ &insn, sizeof(insn), FOLL_FORCE) != sizeof(insn))
++ return;
++
++ /* double check that tracee stopped on svc instruction */
++ if ((insn >> 8) != 0xa)
++ return;
++
++ regs->int_code = 0x20000 | (data & 0xffff);
++}
+ /*
+ * Write a word to the user area of a process at location addr. This
+ * operation does have an additional problem compared to peek_user.
+@@ -335,7 +354,9 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
+ struct user *dummy = NULL;
+ addr_t offset;
+
++
+ if (addr < (addr_t) &dummy->regs.acrs) {
++ struct pt_regs *regs = task_pt_regs(child);
+ /*
+ * psw and gprs are stored on the stack
+ */
+@@ -353,7 +374,11 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
+ /* Invalid addressing mode bits */
+ return -EINVAL;
+ }
+- *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
++
++ if (test_pt_regs_flag(regs, PIF_SYSCALL) &&
++ addr == offsetof(struct user, regs.gprs[2]))
++ fixup_int_code(child, data);
++ *(addr_t *)((addr_t) ®s->psw + addr) = data;
+
+ } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
+ /*
+@@ -719,6 +744,10 @@ static int __poke_user_compat(struct task_struct *child,
+ regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) |
+ (__u64)(tmp & PSW32_ADDR_AMODE);
+ } else {
++
++ if (test_pt_regs_flag(regs, PIF_SYSCALL) &&
++ addr == offsetof(struct compat_user, regs.gprs[2]))
++ fixup_int_code(child, data);
+ /* gpr 0-15 */
+ *(__u32*)((addr_t) ®s->psw + addr*2 + 4) = tmp;
+ }
+--
+2.25.1
+
--- /dev/null
+From 726f2c2ed1bacb4c419f6b19601efd118b73f3f7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 6 Mar 2020 13:19:34 +0100
+Subject: s390/ptrace: pass invalid syscall numbers to tracing
+
+From: Sven Schnelle <svens@linux.ibm.com>
+
+[ Upstream commit 00332c16b1604242a56289ff2b26e283dbad0812 ]
+
+tracing expects to see invalid syscalls, so pass it through.
+The syscall path in entry.S checks the syscall number before
+looking up the handler, so it is still safe.
+
+Signed-off-by: Sven Schnelle <svens@linux.ibm.com>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/s390/kernel/entry.S | 2 +-
+ arch/s390/kernel/ptrace.c | 6 ++----
+ 2 files changed, 3 insertions(+), 5 deletions(-)
+
+diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
+index bc85987727f09..c544b7a11ebb3 100644
+--- a/arch/s390/kernel/entry.S
++++ b/arch/s390/kernel/entry.S
+@@ -368,9 +368,9 @@ ENTRY(system_call)
+ jnz .Lsysc_nr_ok
+ # svc 0: system call number in %r1
+ llgfr %r1,%r1 # clear high word in r1
++ sth %r1,__PT_INT_CODE+2(%r11)
+ cghi %r1,NR_syscalls
+ jnl .Lsysc_nr_ok
+- sth %r1,__PT_INT_CODE+2(%r11)
+ slag %r8,%r1,3
+ .Lsysc_nr_ok:
+ xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
+index ad71132374f0c..5a2b1501d9983 100644
+--- a/arch/s390/kernel/ptrace.c
++++ b/arch/s390/kernel/ptrace.c
+@@ -844,11 +844,9 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
+ * call number to gprs[2].
+ */
+ if (test_thread_flag(TIF_SYSCALL_TRACE) &&
+- (tracehook_report_syscall_entry(regs) ||
+- regs->gprs[2] >= NR_syscalls)) {
++ tracehook_report_syscall_entry(regs)) {
+ /*
+- * Tracing decided this syscall should not happen or the
+- * debugger stored an invalid system call number. Skip
++ * Tracing decided this syscall should not happen. Skip
+ * the system call and the system call restart handling.
+ */
+ clear_pt_regs_flag(regs, PIF_SYSCALL);
+--
+2.25.1
+
--- /dev/null
+From dafb8d49f76fb7be88621e7003ee1da92212d55a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 17 Jun 2020 16:54:52 +0200
+Subject: s390/qeth: fix error handling for isolation mode cmds
+
+From: Julian Wiedmann <jwi@linux.ibm.com>
+
+[ Upstream commit e2dfcfba00ba4a414617ef4c5a8501fe21567eb3 ]
+
+Current(?) OSA devices also store their cmd-specific return codes for
+SET_ACCESS_CONTROL cmds into the top-level cmd->hdr.return_code.
+So once we added stricter checking for the top-level field a while ago,
+none of the error logic that rolls back the user's configuration to its
+old state is applied any longer.
+
+For this specific cmd, go back to the old model where we peek into the
+cmd structure even though the top-level field indicated an error.
+
+Fixes: 686c97ee29c8 ("s390/qeth: fix error handling in adapter command callbacks")
+Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/s390/net/qeth_core_main.c | 5 +----
+ 1 file changed, 1 insertion(+), 4 deletions(-)
+
+diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
+index fe70e9875bde0..5043f0fcf399a 100644
+--- a/drivers/s390/net/qeth_core_main.c
++++ b/drivers/s390/net/qeth_core_main.c
+@@ -4163,9 +4163,6 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
+ int fallback = *(int *)reply->param;
+
+ QETH_CARD_TEXT(card, 4, "setaccb");
+- if (cmd->hdr.return_code)
+- return -EIO;
+- qeth_setadpparms_inspect_rc(cmd);
+
+ access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
+ QETH_CARD_TEXT_(card, 2, "rc=%d",
+@@ -4175,7 +4172,7 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
+ QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n",
+ access_ctrl_req->subcmd_code, CARD_DEVID(card),
+ cmd->data.setadapterparms.hdr.return_code);
+- switch (cmd->data.setadapterparms.hdr.return_code) {
++ switch (qeth_setadpparms_inspect_rc(cmd)) {
+ case SET_ACCESS_CTRL_RC_SUCCESS:
+ if (card->options.isolation == ISOLATION_MODE_NONE) {
+ dev_info(&card->gdev->dev,
+--
+2.25.1
+
--- /dev/null
+From 4bd9e8e22701db01567924deda036b77666e3172 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Mar 2020 12:10:27 +0000
+Subject: s390/vdso: fix vDSO clock_getres()
+
+From: Vincenzo Frascino <vincenzo.frascino@arm.com>
+
+[ Upstream commit 478237a595120a18e9b52fd2c57a6e8b7a01e411 ]
+
+clock_getres in the vDSO library has to preserve the same behaviour
+of posix_get_hrtimer_res().
+
+In particular, posix_get_hrtimer_res() does:
+ sec = 0;
+ ns = hrtimer_resolution;
+and hrtimer_resolution depends on the enablement of the high
+resolution timers that can happen either at compile or at run time.
+
+Fix the s390 vdso implementation of clock_getres keeping a copy of
+hrtimer_resolution in vdso data and using that directly.
+
+Link: https://lkml.kernel.org/r/20200324121027.21665-1-vincenzo.frascino@arm.com
+Signed-off-by: Vincenzo Frascino <vincenzo.frascino@arm.com>
+Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+[heiko.carstens@de.ibm.com: use llgf for proper zero extension]
+Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/s390/include/asm/vdso.h | 1 +
+ arch/s390/kernel/asm-offsets.c | 2 +-
+ arch/s390/kernel/time.c | 1 +
+ arch/s390/kernel/vdso64/clock_getres.S | 10 +++++-----
+ 4 files changed, 8 insertions(+), 6 deletions(-)
+
+diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h
+index 169d7604eb804..f3ba84fa9bd18 100644
+--- a/arch/s390/include/asm/vdso.h
++++ b/arch/s390/include/asm/vdso.h
+@@ -36,6 +36,7 @@ struct vdso_data {
+ __u32 tk_shift; /* Shift used for xtime_nsec 0x60 */
+ __u32 ts_dir; /* TOD steering direction 0x64 */
+ __u64 ts_end; /* TOD steering end 0x68 */
++ __u32 hrtimer_res; /* hrtimer resolution 0x70 */
+ };
+
+ struct vdso_per_cpu_data {
+diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
+index b6628586ab702..a65cb4924bdbd 100644
+--- a/arch/s390/kernel/asm-offsets.c
++++ b/arch/s390/kernel/asm-offsets.c
+@@ -76,6 +76,7 @@ int main(void)
+ OFFSET(__VDSO_TK_SHIFT, vdso_data, tk_shift);
+ OFFSET(__VDSO_TS_DIR, vdso_data, ts_dir);
+ OFFSET(__VDSO_TS_END, vdso_data, ts_end);
++ OFFSET(__VDSO_CLOCK_REALTIME_RES, vdso_data, hrtimer_res);
+ OFFSET(__VDSO_ECTG_BASE, vdso_per_cpu_data, ectg_timer_base);
+ OFFSET(__VDSO_ECTG_USER, vdso_per_cpu_data, ectg_user_time);
+ OFFSET(__VDSO_CPU_NR, vdso_per_cpu_data, cpu_nr);
+@@ -87,7 +88,6 @@ int main(void)
+ DEFINE(__CLOCK_REALTIME_COARSE, CLOCK_REALTIME_COARSE);
+ DEFINE(__CLOCK_MONOTONIC_COARSE, CLOCK_MONOTONIC_COARSE);
+ DEFINE(__CLOCK_THREAD_CPUTIME_ID, CLOCK_THREAD_CPUTIME_ID);
+- DEFINE(__CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
+ DEFINE(__CLOCK_COARSE_RES, LOW_RES_NSEC);
+ BLANK();
+ /* idle data offsets */
+diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
+index e8766beee5ad8..8ea9db599d38d 100644
+--- a/arch/s390/kernel/time.c
++++ b/arch/s390/kernel/time.c
+@@ -310,6 +310,7 @@ void update_vsyscall(struct timekeeper *tk)
+
+ vdso_data->tk_mult = tk->tkr_mono.mult;
+ vdso_data->tk_shift = tk->tkr_mono.shift;
++ vdso_data->hrtimer_res = hrtimer_resolution;
+ smp_wmb();
+ ++vdso_data->tb_update_count;
+ }
+diff --git a/arch/s390/kernel/vdso64/clock_getres.S b/arch/s390/kernel/vdso64/clock_getres.S
+index 081435398e0a1..0c79caa32b592 100644
+--- a/arch/s390/kernel/vdso64/clock_getres.S
++++ b/arch/s390/kernel/vdso64/clock_getres.S
+@@ -17,12 +17,14 @@
+ .type __kernel_clock_getres,@function
+ __kernel_clock_getres:
+ CFI_STARTPROC
+- larl %r1,4f
++ larl %r1,3f
++ lg %r0,0(%r1)
+ cghi %r2,__CLOCK_REALTIME_COARSE
+ je 0f
+ cghi %r2,__CLOCK_MONOTONIC_COARSE
+ je 0f
+- larl %r1,3f
++ larl %r1,_vdso_data
++ llgf %r0,__VDSO_CLOCK_REALTIME_RES(%r1)
+ cghi %r2,__CLOCK_REALTIME
+ je 0f
+ cghi %r2,__CLOCK_MONOTONIC
+@@ -36,7 +38,6 @@ __kernel_clock_getres:
+ jz 2f
+ 0: ltgr %r3,%r3
+ jz 1f /* res == NULL */
+- lg %r0,0(%r1)
+ xc 0(8,%r3),0(%r3) /* set tp->tv_sec to zero */
+ stg %r0,8(%r3) /* store tp->tv_usec */
+ 1: lghi %r2,0
+@@ -45,6 +46,5 @@ __kernel_clock_getres:
+ svc 0
+ br %r14
+ CFI_ENDPROC
+-3: .quad __CLOCK_REALTIME_RES
+-4: .quad __CLOCK_COARSE_RES
++3: .quad __CLOCK_COARSE_RES
+ .size __kernel_clock_getres,.-__kernel_clock_getres
+--
+2.25.1
+
--- /dev/null
+From d4e6cb7af9745809667ef2973ecf048b55c06295 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 2 Jun 2020 12:25:24 -0700
+Subject: s390/vdso: Use $(LD) instead of $(CC) to link vDSO
+
+From: Nathan Chancellor <natechancellor@gmail.com>
+
+[ Upstream commit 2b2a25845d534ac6d55086e35c033961fdd83a26 ]
+
+Currently, the VDSO is being linked through $(CC). This does not match
+how the rest of the kernel links objects, which is through the $(LD)
+variable.
+
+When clang is built in a default configuration, it first attempts to use
+the target triple's default linker, which is just ld. However, the user
+can override this through the CLANG_DEFAULT_LINKER cmake define so that
+clang uses another linker by default, such as LLVM's own linker, ld.lld.
+This can be useful to get more optimized links across various different
+projects.
+
+However, this is problematic for the s390 vDSO because ld.lld does not
+have any s390 emulatiom support:
+
+https://github.com/llvm/llvm-project/blob/llvmorg-10.0.1-rc1/lld/ELF/Driver.cpp#L132-L150
+
+Thus, if a user is using a toolchain with ld.lld as the default, they
+will see an error, even if they have specified ld.bfd through the LD
+make variable:
+
+$ make -j"$(nproc)" -s ARCH=s390 CROSS_COMPILE=s390x-linux-gnu- LLVM=1 \
+ LD=s390x-linux-gnu-ld \
+ defconfig arch/s390/kernel/vdso64/
+ld.lld: error: unknown emulation: elf64_s390
+clang-11: error: linker command failed with exit code 1 (use -v to see invocation)
+
+Normally, '-fuse-ld=bfd' could be used to get around this; however, this
+can be fragile, depending on paths and variable naming. The cleaner
+solution for the kernel is to take advantage of the fact that $(LD) can
+be invoked directly, which bypasses the heuristics of $(CC) and respects
+the user's choice. Similar changes have been done for ARM, ARM64, and
+MIPS.
+
+Link: https://lkml.kernel.org/r/20200602192523.32758-1-natechancellor@gmail.com
+Link: https://github.com/ClangBuiltLinux/linux/issues/1041
+Signed-off-by: Nathan Chancellor <natechancellor@gmail.com>
+Reviewed-by: Nick Desaulniers <ndesaulniers@google.com>
+[heiko.carstens@de.ibm.com: add --build-id flag]
+Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/s390/kernel/vdso64/Makefile | 10 ++++------
+ 1 file changed, 4 insertions(+), 6 deletions(-)
+
+diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile
+index bec19e7e6e1cf..4a66a1cb919b1 100644
+--- a/arch/s390/kernel/vdso64/Makefile
++++ b/arch/s390/kernel/vdso64/Makefile
+@@ -18,8 +18,8 @@ KBUILD_AFLAGS_64 += -m64 -s
+
+ KBUILD_CFLAGS_64 := $(filter-out -m64,$(KBUILD_CFLAGS))
+ KBUILD_CFLAGS_64 += -m64 -fPIC -shared -fno-common -fno-builtin
+-KBUILD_CFLAGS_64 += -nostdlib -Wl,-soname=linux-vdso64.so.1 \
+- -Wl,--hash-style=both
++ldflags-y := -fPIC -shared -nostdlib -soname=linux-vdso64.so.1 \
++ --hash-style=both --build-id -T
+
+ $(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_64)
+ $(targets:%=$(obj)/%.dbg): KBUILD_AFLAGS = $(KBUILD_AFLAGS_64)
+@@ -37,8 +37,8 @@ KASAN_SANITIZE := n
+ $(obj)/vdso64_wrapper.o : $(obj)/vdso64.so
+
+ # link rule for the .so file, .lds has to be first
+-$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64) FORCE
+- $(call if_changed,vdso64ld)
++$(obj)/vdso64.so.dbg: $(obj)/vdso64.lds $(obj-vdso64) FORCE
++ $(call if_changed,ld)
+
+ # strip rule for the .so file
+ $(obj)/%.so: OBJCOPYFLAGS := -S
+@@ -50,8 +50,6 @@ $(obj-vdso64): %.o: %.S FORCE
+ $(call if_changed_dep,vdso64as)
+
+ # actual build commands
+-quiet_cmd_vdso64ld = VDSO64L $@
+- cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $(filter %.lds %.o,$^) -o $@
+ quiet_cmd_vdso64as = VDSO64A $@
+ cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $<
+
+--
+2.25.1
+
--- /dev/null
+From 114ee09fda5a5bdc4fe6a34b6442cef5a4f6ea11 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 12 May 2020 18:30:40 +0200
+Subject: samples/bpf: xdp_redirect_cpu: Set MAX_CPUS according to NR_CPUS
+
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+
+[ Upstream commit 6a09815428547657f3ffd2f5c31ac2a191e7fdf3 ]
+
+xdp_redirect_cpu is currently failing in bpf_prog_load_xattr()
+allocating cpu_map map if CONFIG_NR_CPUS is less than 64 since
+cpu_map_alloc() requires max_entries to be less than NR_CPUS.
+Set cpu_map max_entries according to NR_CPUS in xdp_redirect_cpu_kern.c
+and get currently running cpus in xdp_redirect_cpu_user.c
+
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Acked-by: Yonghong Song <yhs@fb.com>
+Link: https://lore.kernel.org/bpf/374472755001c260158c4e4b22f193bdd3c56fb7.1589300442.git.lorenzo@kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ samples/bpf/xdp_redirect_cpu_kern.c | 2 +-
+ samples/bpf/xdp_redirect_cpu_user.c | 29 ++++++++++++++++-------------
+ 2 files changed, 17 insertions(+), 14 deletions(-)
+
+diff --git a/samples/bpf/xdp_redirect_cpu_kern.c b/samples/bpf/xdp_redirect_cpu_kern.c
+index cfcc31e511978..d94a999b4b4b7 100644
+--- a/samples/bpf/xdp_redirect_cpu_kern.c
++++ b/samples/bpf/xdp_redirect_cpu_kern.c
+@@ -15,7 +15,7 @@
+ #include "bpf_helpers.h"
+ #include "hash_func01.h"
+
+-#define MAX_CPUS 64 /* WARNING - sync with _user.c */
++#define MAX_CPUS NR_CPUS
+
+ /* Special map type that can XDP_REDIRECT frames to another CPU */
+ struct {
+diff --git a/samples/bpf/xdp_redirect_cpu_user.c b/samples/bpf/xdp_redirect_cpu_user.c
+index 8b862a7a6c6ac..767869e3b308f 100644
+--- a/samples/bpf/xdp_redirect_cpu_user.c
++++ b/samples/bpf/xdp_redirect_cpu_user.c
+@@ -13,6 +13,7 @@ static const char *__doc__ =
+ #include <unistd.h>
+ #include <locale.h>
+ #include <sys/resource.h>
++#include <sys/sysinfo.h>
+ #include <getopt.h>
+ #include <net/if.h>
+ #include <time.h>
+@@ -24,8 +25,6 @@ static const char *__doc__ =
+ #include <arpa/inet.h>
+ #include <linux/if_link.h>
+
+-#define MAX_CPUS 64 /* WARNING - sync with _kern.c */
+-
+ /* How many xdp_progs are defined in _kern.c */
+ #define MAX_PROG 6
+
+@@ -40,6 +39,7 @@ static char *ifname;
+ static __u32 prog_id;
+
+ static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
++static int n_cpus;
+ static int cpu_map_fd;
+ static int rx_cnt_map_fd;
+ static int redirect_err_cnt_map_fd;
+@@ -170,7 +170,7 @@ struct stats_record {
+ struct record redir_err;
+ struct record kthread;
+ struct record exception;
+- struct record enq[MAX_CPUS];
++ struct record enq[];
+ };
+
+ static bool map_collect_percpu(int fd, __u32 key, struct record *rec)
+@@ -225,10 +225,11 @@ static struct datarec *alloc_record_per_cpu(void)
+ static struct stats_record *alloc_stats_record(void)
+ {
+ struct stats_record *rec;
+- int i;
++ int i, size;
+
+- rec = malloc(sizeof(*rec));
+- memset(rec, 0, sizeof(*rec));
++ size = sizeof(*rec) + n_cpus * sizeof(struct record);
++ rec = malloc(size);
++ memset(rec, 0, size);
+ if (!rec) {
+ fprintf(stderr, "Mem alloc error\n");
+ exit(EXIT_FAIL_MEM);
+@@ -237,7 +238,7 @@ static struct stats_record *alloc_stats_record(void)
+ rec->redir_err.cpu = alloc_record_per_cpu();
+ rec->kthread.cpu = alloc_record_per_cpu();
+ rec->exception.cpu = alloc_record_per_cpu();
+- for (i = 0; i < MAX_CPUS; i++)
++ for (i = 0; i < n_cpus; i++)
+ rec->enq[i].cpu = alloc_record_per_cpu();
+
+ return rec;
+@@ -247,7 +248,7 @@ static void free_stats_record(struct stats_record *r)
+ {
+ int i;
+
+- for (i = 0; i < MAX_CPUS; i++)
++ for (i = 0; i < n_cpus; i++)
+ free(r->enq[i].cpu);
+ free(r->exception.cpu);
+ free(r->kthread.cpu);
+@@ -350,7 +351,7 @@ static void stats_print(struct stats_record *stats_rec,
+ }
+
+ /* cpumap enqueue stats */
+- for (to_cpu = 0; to_cpu < MAX_CPUS; to_cpu++) {
++ for (to_cpu = 0; to_cpu < n_cpus; to_cpu++) {
+ char *fmt = "%-15s %3d:%-3d %'-14.0f %'-11.0f %'-10.2f %s\n";
+ char *fm2 = "%-15s %3s:%-3d %'-14.0f %'-11.0f %'-10.2f %s\n";
+ char *errstr = "";
+@@ -475,7 +476,7 @@ static void stats_collect(struct stats_record *rec)
+ map_collect_percpu(fd, 1, &rec->redir_err);
+
+ fd = cpumap_enqueue_cnt_map_fd;
+- for (i = 0; i < MAX_CPUS; i++)
++ for (i = 0; i < n_cpus; i++)
+ map_collect_percpu(fd, i, &rec->enq[i]);
+
+ fd = cpumap_kthread_cnt_map_fd;
+@@ -549,10 +550,10 @@ static int create_cpu_entry(__u32 cpu, __u32 queue_size,
+ */
+ static void mark_cpus_unavailable(void)
+ {
+- __u32 invalid_cpu = MAX_CPUS;
++ __u32 invalid_cpu = n_cpus;
+ int ret, i;
+
+- for (i = 0; i < MAX_CPUS; i++) {
++ for (i = 0; i < n_cpus; i++) {
+ ret = bpf_map_update_elem(cpus_available_map_fd, &i,
+ &invalid_cpu, 0);
+ if (ret) {
+@@ -688,6 +689,8 @@ int main(int argc, char **argv)
+ int prog_fd;
+ __u32 qsize;
+
++ n_cpus = get_nprocs_conf();
++
+ /* Notice: choosing he queue size is very important with the
+ * ixgbe driver, because it's driver page recycling trick is
+ * dependend on pages being returned quickly. The number of
+@@ -757,7 +760,7 @@ int main(int argc, char **argv)
+ case 'c':
+ /* Add multiple CPUs */
+ add_cpu = strtoul(optarg, NULL, 0);
+- if (add_cpu >= MAX_CPUS) {
++ if (add_cpu >= n_cpus) {
+ fprintf(stderr,
+ "--cpu nr too large for cpumap err(%d):%s\n",
+ errno, strerror(errno));
+--
+2.25.1
+
--- /dev/null
+From da4f2297f6784a2585411ea43d2a62b922defa68 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 4 Jun 2020 22:06:43 -0500
+Subject: sata_rcar: handle pm_runtime_get_sync failure cases
+
+From: Navid Emamdoost <navid.emamdoost@gmail.com>
+
+[ Upstream commit eea1238867205b9e48a67c1a63219529a73c46fd ]
+
+Calling pm_runtime_get_sync increments the counter even in case of
+failure, causing incorrect ref count. Call pm_runtime_put if
+pm_runtime_get_sync fails.
+
+Signed-off-by: Navid Emamdoost <navid.emamdoost@gmail.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/ata/sata_rcar.c | 11 +++++++----
+ 1 file changed, 7 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
+index 3495e1733a8e6..c35b7b993133e 100644
+--- a/drivers/ata/sata_rcar.c
++++ b/drivers/ata/sata_rcar.c
+@@ -905,7 +905,7 @@ static int sata_rcar_probe(struct platform_device *pdev)
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0)
+- goto err_pm_disable;
++ goto err_pm_put;
+
+ host = ata_host_alloc(dev, 1);
+ if (!host) {
+@@ -935,7 +935,6 @@ static int sata_rcar_probe(struct platform_device *pdev)
+
+ err_pm_put:
+ pm_runtime_put(dev);
+-err_pm_disable:
+ pm_runtime_disable(dev);
+ return ret;
+ }
+@@ -989,8 +988,10 @@ static int sata_rcar_resume(struct device *dev)
+ int ret;
+
+ ret = pm_runtime_get_sync(dev);
+- if (ret < 0)
++ if (ret < 0) {
++ pm_runtime_put(dev);
+ return ret;
++ }
+
+ if (priv->type == RCAR_GEN3_SATA) {
+ sata_rcar_init_module(priv);
+@@ -1015,8 +1016,10 @@ static int sata_rcar_restore(struct device *dev)
+ int ret;
+
+ ret = pm_runtime_get_sync(dev);
+- if (ret < 0)
++ if (ret < 0) {
++ pm_runtime_put(dev);
+ return ret;
++ }
+
+ sata_rcar_setup_port(host);
+
+--
+2.25.1
+
--- /dev/null
+From bc9ddafe2cc87dfddf9f301fa97c9eaa32f1f146 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 19 Nov 2018 16:32:01 +0100
+Subject: sched/core: Fix PI boosting between RT and DEADLINE tasks
+
+From: Juri Lelli <juri.lelli@redhat.com>
+
+[ Upstream commit 740797ce3a124b7dd22b7fb832d87bc8fba1cf6f ]
+
+syzbot reported the following warning:
+
+ WARNING: CPU: 1 PID: 6351 at kernel/sched/deadline.c:628
+ enqueue_task_dl+0x22da/0x38a0 kernel/sched/deadline.c:1504
+
+At deadline.c:628 we have:
+
+ 623 static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se)
+ 624 {
+ 625 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
+ 626 struct rq *rq = rq_of_dl_rq(dl_rq);
+ 627
+ 628 WARN_ON(dl_se->dl_boosted);
+ 629 WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline));
+ [...]
+ }
+
+Which means that setup_new_dl_entity() has been called on a task
+currently boosted. This shouldn't happen though, as setup_new_dl_entity()
+is only called when the 'dynamic' deadline of the new entity
+is in the past w.r.t. rq_clock and boosted tasks shouldn't verify this
+condition.
+
+Digging through the PI code I noticed that what above might in fact happen
+if an RT tasks blocks on an rt_mutex hold by a DEADLINE task. In the
+first branch of boosting conditions we check only if a pi_task 'dynamic'
+deadline is earlier than mutex holder's and in this case we set mutex
+holder to be dl_boosted. However, since RT 'dynamic' deadlines are only
+initialized if such tasks get boosted at some point (or if they become
+DEADLINE of course), in general RT 'dynamic' deadlines are usually equal
+to 0 and this verifies the aforementioned condition.
+
+Fix it by checking that the potential donor task is actually (even if
+temporary because in turn boosted) running at DEADLINE priority before
+using its 'dynamic' deadline value.
+
+Fixes: 2d3d891d3344 ("sched/deadline: Add SCHED_DEADLINE inheritance logic")
+Reported-by: syzbot+119ba87189432ead09b4@syzkaller.appspotmail.com
+Signed-off-by: Juri Lelli <juri.lelli@redhat.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Reviewed-by: Daniel Bristot de Oliveira <bristot@redhat.com>
+Tested-by: Daniel Wagner <dwagner@suse.de>
+Link: https://lkml.kernel.org/r/20181119153201.GB2119@localhost.localdomain
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/sched/core.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 361cbc2dc9667..7238ef445dafb 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -4447,7 +4447,8 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
+ */
+ if (dl_prio(prio)) {
+ if (!dl_prio(p->normal_prio) ||
+- (pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) {
++ (pi_task && dl_prio(pi_task->prio) &&
++ dl_entity_preempt(&pi_task->dl, &p->dl))) {
+ p->dl.dl_boosted = 1;
+ queue_flag |= ENQUEUE_REPLENISH;
+ } else
+--
+2.25.1
+
--- /dev/null
+From 1b8a613c1afd081041a68dbcbe1546d8f3c31172 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 17 Jun 2020 09:29:19 +0200
+Subject: sched/deadline: Initialize ->dl_boosted
+
+From: Juri Lelli <juri.lelli@redhat.com>
+
+[ Upstream commit ce9bc3b27f2a21a7969b41ffb04df8cf61bd1592 ]
+
+syzbot reported the following warning triggered via SYSC_sched_setattr():
+
+ WARNING: CPU: 0 PID: 6973 at kernel/sched/deadline.c:593 setup_new_dl_entity /kernel/sched/deadline.c:594 [inline]
+ WARNING: CPU: 0 PID: 6973 at kernel/sched/deadline.c:593 enqueue_dl_entity /kernel/sched/deadline.c:1370 [inline]
+ WARNING: CPU: 0 PID: 6973 at kernel/sched/deadline.c:593 enqueue_task_dl+0x1c17/0x2ba0 /kernel/sched/deadline.c:1441
+
+This happens because the ->dl_boosted flag is currently not initialized by
+__dl_clear_params() (unlike the other flags) and setup_new_dl_entity()
+rightfully complains about it.
+
+Initialize dl_boosted to 0.
+
+Fixes: 2d3d891d3344 ("sched/deadline: Add SCHED_DEADLINE inheritance logic")
+Reported-by: syzbot+5ac8bac25f95e8b221e7@syzkaller.appspotmail.com
+Signed-off-by: Juri Lelli <juri.lelli@redhat.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Tested-by: Daniel Wagner <dwagner@suse.de>
+Link: https://lkml.kernel.org/r/20200617072919.818409-1-juri.lelli@redhat.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/sched/deadline.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
+index 08bdee0480b3e..4cb00538a207b 100644
+--- a/kernel/sched/deadline.c
++++ b/kernel/sched/deadline.c
+@@ -2693,6 +2693,7 @@ void __dl_clear_params(struct task_struct *p)
+ dl_se->dl_bw = 0;
+ dl_se->dl_density = 0;
+
++ dl_se->dl_boosted = 0;
+ dl_se->dl_throttled = 0;
+ dl_se->dl_yielded = 0;
+ dl_se->dl_non_contending = 0;
+--
+2.25.1
+
--- /dev/null
+From df455c8857098918c637520501b3e112ab9fa83f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 23 Jun 2020 10:41:22 +0200
+Subject: scsi: lpfc: Avoid another null dereference in lpfc_sli4_hba_unset()
+
+From: SeongJae Park <sjpark@amazon.de>
+
+[ Upstream commit 46da547e21d6cefceec3fb3dba5ebbca056627fc ]
+
+Commit cdb42becdd40 ("scsi: lpfc: Replace io_channels for nvme and fcp with
+general hdw_queues per cpu") has introduced static checker warnings for
+potential null dereferences in 'lpfc_sli4_hba_unset()' and commit 1ffdd2c0440d
+("scsi: lpfc: resolve static checker warning in lpfc_sli4_hba_unset") has
+tried to fix it. However, yet another potential null dereference is
+remaining. This commit fixes it.
+
+This bug was discovered and resolved using Coverity Static Analysis
+Security Testing (SAST) by Synopsys, Inc.
+
+Link: https://lore.kernel.org/r/20200623084122.30633-1-sjpark@amazon.com
+Fixes: 1ffdd2c0440d ("scsi: lpfc: resolve static checker warning inlpfc_sli4_hba_unset")
+Fixes: cdb42becdd40 ("scsi: lpfc: Replace io_channels for nvme and fcp with general hdw_queues per cpu")
+Reviewed-by: James Smart <james.smart@broadcom.com>
+Signed-off-by: SeongJae Park <sjpark@amazon.de>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/lpfc/lpfc_init.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
+index 14d9f41977f1c..95abffd9ad100 100644
+--- a/drivers/scsi/lpfc/lpfc_init.c
++++ b/drivers/scsi/lpfc/lpfc_init.c
+@@ -11542,7 +11542,8 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
+ lpfc_sli4_xri_exchange_busy_wait(phba);
+
+ /* per-phba callback de-registration for hotplug event */
+- lpfc_cpuhp_remove(phba);
++ if (phba->pport)
++ lpfc_cpuhp_remove(phba);
+
+ /* Disable PCI subsystem interrupt */
+ lpfc_sli4_disable_intr(phba);
+--
+2.25.1
+
--- /dev/null
+From 409e36b4b540d719c77de1f875dfa95ff1692e34 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 18 Jun 2020 12:40:43 -0400
+Subject: selftests/net: report etf errors correctly
+
+From: Willem de Bruijn <willemb@google.com>
+
+[ Upstream commit ca8826095e4d4afc0ccaead27bba6e4b623a12ae ]
+
+The ETF qdisc can queue skbs that it could not pace on the errqueue.
+
+Address a few issues in the selftest
+
+- recv buffer size was too small, and incorrectly calculated
+- compared errno to ee_code instead of ee_errno
+- missed invalid request error type
+
+v2:
+ - fix a few checkpatch --strict indentation warnings
+
+Fixes: ea6a547669b3 ("selftests/net: make so_txtime more robust to timer variance")
+Signed-off-by: Willem de Bruijn <willemb@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/net/so_txtime.c | 33 +++++++++++++++++++------
+ 1 file changed, 26 insertions(+), 7 deletions(-)
+
+diff --git a/tools/testing/selftests/net/so_txtime.c b/tools/testing/selftests/net/so_txtime.c
+index 383bac05ac324..ceaad78e96674 100644
+--- a/tools/testing/selftests/net/so_txtime.c
++++ b/tools/testing/selftests/net/so_txtime.c
+@@ -15,8 +15,9 @@
+ #include <inttypes.h>
+ #include <linux/net_tstamp.h>
+ #include <linux/errqueue.h>
++#include <linux/if_ether.h>
+ #include <linux/ipv6.h>
+-#include <linux/tcp.h>
++#include <linux/udp.h>
+ #include <stdbool.h>
+ #include <stdlib.h>
+ #include <stdio.h>
+@@ -140,8 +141,8 @@ static void do_recv_errqueue_timeout(int fdt)
+ {
+ char control[CMSG_SPACE(sizeof(struct sock_extended_err)) +
+ CMSG_SPACE(sizeof(struct sockaddr_in6))] = {0};
+- char data[sizeof(struct ipv6hdr) +
+- sizeof(struct tcphdr) + 1];
++ char data[sizeof(struct ethhdr) + sizeof(struct ipv6hdr) +
++ sizeof(struct udphdr) + 1];
+ struct sock_extended_err *err;
+ struct msghdr msg = {0};
+ struct iovec iov = {0};
+@@ -159,6 +160,8 @@ static void do_recv_errqueue_timeout(int fdt)
+ msg.msg_controllen = sizeof(control);
+
+ while (1) {
++ const char *reason;
++
+ ret = recvmsg(fdt, &msg, MSG_ERRQUEUE);
+ if (ret == -1 && errno == EAGAIN)
+ break;
+@@ -176,14 +179,30 @@ static void do_recv_errqueue_timeout(int fdt)
+ err = (struct sock_extended_err *)CMSG_DATA(cm);
+ if (err->ee_origin != SO_EE_ORIGIN_TXTIME)
+ error(1, 0, "errqueue: origin 0x%x\n", err->ee_origin);
+- if (err->ee_code != ECANCELED)
+- error(1, 0, "errqueue: code 0x%x\n", err->ee_code);
++
++ switch (err->ee_errno) {
++ case ECANCELED:
++ if (err->ee_code != SO_EE_CODE_TXTIME_MISSED)
++ error(1, 0, "errqueue: unknown ECANCELED %u\n",
++ err->ee_code);
++ reason = "missed txtime";
++ break;
++ case EINVAL:
++ if (err->ee_code != SO_EE_CODE_TXTIME_INVALID_PARAM)
++ error(1, 0, "errqueue: unknown EINVAL %u\n",
++ err->ee_code);
++ reason = "invalid txtime";
++ break;
++ default:
++ error(1, 0, "errqueue: errno %u code %u\n",
++ err->ee_errno, err->ee_code);
++ };
+
+ tstamp = ((int64_t) err->ee_data) << 32 | err->ee_info;
+ tstamp -= (int64_t) glob_tstart;
+ tstamp /= 1000 * 1000;
+- fprintf(stderr, "send: pkt %c at %" PRId64 "ms dropped\n",
+- data[ret - 1], tstamp);
++ fprintf(stderr, "send: pkt %c at %" PRId64 "ms dropped: %s\n",
++ data[ret - 1], tstamp, reason);
+
+ msg.msg_flags = 0;
+ msg.msg_controllen = sizeof(control);
+--
+2.25.1
+
xhci-fix-enumeration-issue-when-setting-max-packet-size-for-fs-devices.patch
xhci-return-if-xhci-doesn-t-support-lpm.patch
cdc-acm-add-disable_echo-quirk-for-microchip-smsc-chip.patch
+bus-ti-sysc-flush-posted-write-on-enable-and-disable.patch
+bus-ti-sysc-ignore-clockactivity-unless-specified-as.patch
+arm-omap2-fix-legacy-mode-dss_reset.patch
+xfrm-fix-double-esp-trailer-insertion-in-ipsec-crypt.patch
+asoc-q6asm-handle-eos-correctly.patch
+efi-tpm-verify-event-log-header-before-parsing.patch
+efi-esrt-fix-reference-count-leak-in-esre_create_sys.patch
+asoc-q6afe-add-support-to-get-port-direction.patch
+asoc-qcom-common-set-correct-directions-for-dailinks.patch
+regualtor-pfuze100-correct-sw1a-sw2-on-pfuze3000.patch
+rdma-siw-fix-pointer-to-int-cast-warning-in-siw_rx_p.patch
+asoc-fsl_ssi-fix-bclk-calculation-for-mono-channel.patch
+samples-bpf-xdp_redirect_cpu-set-max_cpus-according-.patch
+bpf-xdp-samples-fix-null-pointer-dereference-in-_use.patch
+arm-dts-am335x-pocketbeagle-fix-mmc0-write-protect.patch
+arm-dts-fix-duovero-smsc-interrupt-for-suspend.patch
+x86-resctrl-fix-a-null-vs-is_err-static-checker-warn.patch
+regmap-fix-memory-leak-from-regmap_register_patch.patch
+devmap-use-bpf_map_area_alloc-for-allocating-hash-bu.patch
+bpf-don-t-return-einval-from-get-set-sockopt-when-op.patch
+arm-dts-nsp-correct-fa2-mailbox-node.patch
+rxrpc-fix-handling-of-rwind-from-an-ack-packet.patch
+rdma-rvt-fix-potential-memory-leak-caused-by-rvt_all.patch
+rdma-qedr-fix-kasan-use-after-free-in-ucma_event_han.patch
+rdma-cma-protect-bind_list-and-listen_list-while-fin.patch
+asoc-rockchip-fix-a-reference-count-leak.patch
+s390-qeth-fix-error-handling-for-isolation-mode-cmds.patch
+rdma-mad-fix-possible-memory-leak-in-ib_mad_post_rec.patch
+selftests-net-report-etf-errors-correctly.patch
+ib-mad-fix-use-after-free-when-destroying-mad-agent.patch
+iommu-vt-d-enable-pci-acs-for-platform-opt-in-hint.patch
+iommu-vt-d-update-scalable-mode-paging-structure-coh.patch
+net-qed-fix-left-elements-count-calculation.patch
+net-qed-fix-async-event-callbacks-unregistering.patch
+net-qede-stop-adding-events-on-an-already-destroyed-.patch
+net-qed-fix-nvme-login-fails-over-vfs.patch
+net-qed-fix-excessive-qm-ilt-lines-consumption.patch
+net-qede-fix-ptp-initialization-on-recovery.patch
+net-qede-fix-use-after-free-on-recovery-and-aer-hand.patch
+cxgb4-move-handling-l2t-arp-failures-to-caller.patch
+arm-imx5-add-missing-put_device-call-in-imx_suspend_.patch
+scsi-lpfc-avoid-another-null-dereference-in-lpfc_sli.patch
+usb-gadget-udc-potential-oops-in-error-handling-code.patch
+usb-renesas_usbhs-getting-residue-from-callback_resu.patch
+nvme-multipath-set-bdi-capabilities-once.patch
+nvme-fix-possible-deadlock-when-i-o-is-blocked.patch
+nvme-multipath-fix-deadlock-between-ana_work-and-sca.patch
+nvme-don-t-protect-ns-mutation-with-ns-head-lock.patch
+nvme-multipath-fix-deadlock-due-to-head-lock.patch
+netfilter-ipset-fix-unaligned-atomic-access.patch
+net-bcmgenet-use-hardware-padding-of-runt-frames.patch
+clk-sifive-allocate-sufficient-memory-for-struct-__p.patch
+i2c-fsi-fix-the-port-number-field-in-status-register.patch
+i2c-core-check-returned-size-of-emulated-smbus-block.patch
+afs-fix-storage-of-cell-names.patch
+sched-deadline-initialize-dl_boosted.patch
+sched-core-fix-pi-boosting-between-rt-and-deadline-t.patch
+sata_rcar-handle-pm_runtime_get_sync-failure-cases.patch
+ata-libata-fix-usage-of-page-address-by-page_address.patch
+drm-amd-display-use-kfree-to-free-rgb_user-in-calcul.patch
+riscv-atomic-fix-sign-extension-for-rv64i.patch
+hwrng-ks-sa-fix-runtime-pm-imbalance-on-error.patch
+arm64-sve-eliminate-data-races-on-sve_default_vl.patch
+ibmvnic-harden-device-login-requests.patch
+net-alx-fix-race-condition-in-alx_remove.patch
+test_objagg-fix-potential-memory-leak-in-error-handl.patch
+pinctrl-qcom-spmi-gpio-fix-warning-about-irq-chip-re.patch
+pinctrl-tegra-use-noirq-suspend-resume-callbacks.patch
+s390-ptrace-pass-invalid-syscall-numbers-to-tracing.patch
+s390-ptrace-fix-setting-syscall-number.patch
+s390-vdso-use-ld-instead-of-cc-to-link-vdso.patch
+s390-vdso-fix-vdso-clock_getres.patch
+arm64-sve-fix-build-failure-when-arm64_sve-y-and-sys.patch
+kbuild-improve-cc-option-to-clean-up-all-temporary-f.patch
+recordmcount-support-64k-sections.patch
+kprobes-suppress-the-suspicious-rcu-warning-on-kprob.patch
+blktrace-break-out-of-blktrace-setup-on-concurrent-c.patch
+block-update-hctx-map-when-use-multiple-maps.patch
+risc-v-don-t-allow-write-exec-only-page-mapping-requ.patch
--- /dev/null
+From 9f8c12cbdefd338c2c03ca0cfb8d440174858276 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 12 Jun 2020 15:01:54 -0500
+Subject: test_objagg: Fix potential memory leak in error handling
+
+From: Aditya Pakki <pakki001@umn.edu>
+
+[ Upstream commit a6379f0ad6375a707e915518ecd5c2270afcd395 ]
+
+In case of failure of check_expect_hints_stats(), the resources
+allocated by objagg_hints_get should be freed. The patch fixes
+this issue.
+
+Signed-off-by: Aditya Pakki <pakki001@umn.edu>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ lib/test_objagg.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/lib/test_objagg.c b/lib/test_objagg.c
+index 72c1abfa154dc..da137939a4100 100644
+--- a/lib/test_objagg.c
++++ b/lib/test_objagg.c
+@@ -979,10 +979,10 @@ static int test_hints_case(const struct hints_case *hints_case)
+ err_world2_obj_get:
+ for (i--; i >= 0; i--)
+ world_obj_put(&world2, objagg, hints_case->key_ids[i]);
+- objagg_hints_put(hints);
+- objagg_destroy(objagg2);
+ i = hints_case->key_ids_count;
++ objagg_destroy(objagg2);
+ err_check_expect_hints_stats:
++ objagg_hints_put(hints);
+ err_hints_get:
+ err_check_expect_stats:
+ err_world_obj_get:
+--
+2.25.1
+
--- /dev/null
+From 90cd9563636d22a81283fa8a1c4ef0df14535ec1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 15 Jun 2020 14:27:19 +0300
+Subject: usb: gadget: udc: Potential Oops in error handling code
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+[ Upstream commit e55f3c37cb8d31c7e301f46396b2ac6a19eb3a7c ]
+
+If this is in "transceiver" mode the the ->qwork isn't required and is
+a NULL pointer. This can lead to a NULL dereference when we call
+destroy_workqueue(udc->qwork).
+
+Fixes: 3517c31a8ece ("usb: gadget: mv_udc: use devm_xxx for probe")
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: Felipe Balbi <balbi@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/gadget/udc/mv_udc_core.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c
+index cafde053788bb..80a1b52c656e0 100644
+--- a/drivers/usb/gadget/udc/mv_udc_core.c
++++ b/drivers/usb/gadget/udc/mv_udc_core.c
+@@ -2313,7 +2313,8 @@ static int mv_udc_probe(struct platform_device *pdev)
+ return 0;
+
+ err_create_workqueue:
+- destroy_workqueue(udc->qwork);
++ if (udc->qwork)
++ destroy_workqueue(udc->qwork);
+ err_destroy_dma:
+ dma_pool_destroy(udc->dtd_pool);
+ err_free_dma:
+--
+2.25.1
+
--- /dev/null
+From 62ccde9f1ad1fbdf877017818e54b9430cd054ad Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 18 Jun 2020 21:11:17 +0900
+Subject: usb: renesas_usbhs: getting residue from callback_result
+
+From: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+
+[ Upstream commit ea0efd687b01355cd799c8643d0c636ba4859ffc ]
+
+This driver assumed that dmaengine_tx_status() could return
+the residue even if the transfer was completed. However,
+this was not correct usage [1] and this caused to break getting
+the residue after the commit 24461d9792c2 ("dmaengine:
+virt-dma: Fix access after free in vchan_complete()") actually.
+So, this is possible to get wrong received size if the usb
+controller gets a short packet. For example, g_zero driver
+causes "bad OUT byte" errors.
+
+The usb-dmac driver will support the callback_result, so this
+driver can use it to get residue correctly. Note that even if
+the usb-dmac driver has not supported the callback_result yet,
+this patch doesn't cause any side-effects.
+
+[1]
+https://lore.kernel.org/dmaengine/20200616165550.GP2324254@vkoul-mobl/
+
+Reported-by: Hien Dang <hien.dang.eb@renesas.com>
+Fixes: 24461d9792c2 ("dmaengine: virt-dma: Fix access after free in vchan_complete()")
+Signed-off-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+Link: https://lore.kernel.org/r/1592482277-19563-1-git-send-email-yoshihiro.shimoda.uh@renesas.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/renesas_usbhs/fifo.c | 23 +++++++++++++----------
+ drivers/usb/renesas_usbhs/fifo.h | 2 +-
+ 2 files changed, 14 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
+index 86637cd066cfb..05cdad13933b1 100644
+--- a/drivers/usb/renesas_usbhs/fifo.c
++++ b/drivers/usb/renesas_usbhs/fifo.c
+@@ -803,7 +803,8 @@ static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map)
+ return info->dma_map_ctrl(chan->device->dev, pkt, map);
+ }
+
+-static void usbhsf_dma_complete(void *arg);
++static void usbhsf_dma_complete(void *arg,
++ const struct dmaengine_result *result);
+ static void usbhsf_dma_xfer_preparing(struct usbhs_pkt *pkt)
+ {
+ struct usbhs_pipe *pipe = pkt->pipe;
+@@ -813,6 +814,7 @@ static void usbhsf_dma_xfer_preparing(struct usbhs_pkt *pkt)
+ struct dma_chan *chan;
+ struct device *dev = usbhs_priv_to_dev(priv);
+ enum dma_transfer_direction dir;
++ dma_cookie_t cookie;
+
+ fifo = usbhs_pipe_to_fifo(pipe);
+ if (!fifo)
+@@ -827,11 +829,11 @@ static void usbhsf_dma_xfer_preparing(struct usbhs_pkt *pkt)
+ if (!desc)
+ return;
+
+- desc->callback = usbhsf_dma_complete;
+- desc->callback_param = pipe;
++ desc->callback_result = usbhsf_dma_complete;
++ desc->callback_param = pkt;
+
+- pkt->cookie = dmaengine_submit(desc);
+- if (pkt->cookie < 0) {
++ cookie = dmaengine_submit(desc);
++ if (cookie < 0) {
+ dev_err(dev, "Failed to submit dma descriptor\n");
+ return;
+ }
+@@ -1152,12 +1154,10 @@ static size_t usbhs_dma_calc_received_size(struct usbhs_pkt *pkt,
+ struct dma_chan *chan, int dtln)
+ {
+ struct usbhs_pipe *pipe = pkt->pipe;
+- struct dma_tx_state state;
+ size_t received_size;
+ int maxp = usbhs_pipe_get_maxpacket(pipe);
+
+- dmaengine_tx_status(chan, pkt->cookie, &state);
+- received_size = pkt->length - state.residue;
++ received_size = pkt->length - pkt->dma_result->residue;
+
+ if (dtln) {
+ received_size -= USBHS_USB_DMAC_XFER_SIZE;
+@@ -1363,13 +1363,16 @@ static int usbhsf_irq_ready(struct usbhs_priv *priv,
+ return 0;
+ }
+
+-static void usbhsf_dma_complete(void *arg)
++static void usbhsf_dma_complete(void *arg,
++ const struct dmaengine_result *result)
+ {
+- struct usbhs_pipe *pipe = arg;
++ struct usbhs_pkt *pkt = arg;
++ struct usbhs_pipe *pipe = pkt->pipe;
+ struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
+ struct device *dev = usbhs_priv_to_dev(priv);
+ int ret;
+
++ pkt->dma_result = result;
+ ret = usbhsf_pkt_handler(pipe, USBHSF_PKT_DMA_DONE);
+ if (ret < 0)
+ dev_err(dev, "dma_complete run_error %d : %d\n",
+diff --git a/drivers/usb/renesas_usbhs/fifo.h b/drivers/usb/renesas_usbhs/fifo.h
+index c3d3cc35cee0f..4a7dc23ce3d35 100644
+--- a/drivers/usb/renesas_usbhs/fifo.h
++++ b/drivers/usb/renesas_usbhs/fifo.h
+@@ -50,7 +50,7 @@ struct usbhs_pkt {
+ struct usbhs_pkt *pkt);
+ struct work_struct work;
+ dma_addr_t dma;
+- dma_cookie_t cookie;
++ const struct dmaengine_result *dma_result;
+ void *buf;
+ int length;
+ int trans;
+--
+2.25.1
+
--- /dev/null
+From ed630ddc8eead250337f9266247c1e004719ac19 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 2 Jun 2020 22:36:11 +0300
+Subject: x86/resctrl: Fix a NULL vs IS_ERR() static checker warning in
+ rdt_cdp_peer_get()
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+[ Upstream commit cc5277fe66cf3ad68f41f1c539b2ef0d5e432974 ]
+
+The callers don't expect *d_cdp to be set to an error pointer, they only
+check for NULL. This leads to a static checker warning:
+
+ arch/x86/kernel/cpu/resctrl/rdtgroup.c:2648 __init_one_rdt_domain()
+ warn: 'd_cdp' could be an error pointer
+
+This would not trigger a bug in this specific case because
+__init_one_rdt_domain() calls it with a valid domain that would not have
+a negative id and thus not trigger the return of the ERR_PTR(). If this
+was a negative domain id then the call to rdt_find_domain() in
+domain_add_cpu() would have returned the ERR_PTR() much earlier and the
+creation of the domain with an invalid id would have been prevented.
+
+Even though a bug is not triggered currently the right and safe thing to
+do is to set the pointer to NULL because that is what can be checked for
+when the caller is handling the CDP and non-CDP cases.
+
+Fixes: 52eb74339a62 ("x86/resctrl: Fix rdt_find_domain() return value and checks")
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Acked-by: Reinette Chatre <reinette.chatre@intel.com>
+Acked-by: Fenghua Yu <fenghua.yu@intel.com>
+Link: https://lkml.kernel.org/r/20200602193611.GA190851@mwanda
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kernel/cpu/resctrl/rdtgroup.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+index 20856d80dce3b..54b711bc06073 100644
+--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
++++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+@@ -1027,6 +1027,7 @@ static int rdt_cdp_peer_get(struct rdt_resource *r, struct rdt_domain *d,
+ _d_cdp = rdt_find_domain(_r_cdp, d->id, NULL);
+ if (WARN_ON(IS_ERR_OR_NULL(_d_cdp))) {
+ _r_cdp = NULL;
++ _d_cdp = NULL;
+ ret = -EINVAL;
+ }
+
+--
+2.25.1
+
--- /dev/null
+From 91ee5467682fb760f8fb0ca1ca203bdf4d78730a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 1 Jun 2020 16:39:37 -0500
+Subject: xfrm: Fix double ESP trailer insertion in IPsec crypto offload.
+
+From: Huy Nguyen <huyn@mellanox.com>
+
+[ Upstream commit 94579ac3f6d0820adc83b5dc5358ead0158101e9 ]
+
+During IPsec performance testing, we see bad ICMP checksum. The error packet
+has duplicated ESP trailer due to double validate_xmit_xfrm calls. The first call
+is from ip_output, but the packet cannot be sent because
+netif_xmit_frozen_or_stopped is true and the packet gets dev_requeue_skb. The second
+call is from NET_TX softirq. However after the first call, the packet already
+has the ESP trailer.
+
+Fix by marking the skb with XFRM_XMIT bit after the packet is handled by
+validate_xmit_xfrm to avoid duplicate ESP trailer insertion.
+
+Fixes: f6e27114a60a ("net: Add a xfrm validate function to validate_xmit_skb")
+Signed-off-by: Huy Nguyen <huyn@mellanox.com>
+Reviewed-by: Boris Pismenny <borisp@mellanox.com>
+Reviewed-by: Raed Salem <raeds@mellanox.com>
+Reviewed-by: Saeed Mahameed <saeedm@mellanox.com>
+Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/xfrm.h | 1 +
+ net/xfrm/xfrm_device.c | 4 +++-
+ 2 files changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/include/net/xfrm.h b/include/net/xfrm.h
+index aa08a7a5f6ac5..fb391c00c19ac 100644
+--- a/include/net/xfrm.h
++++ b/include/net/xfrm.h
+@@ -1012,6 +1012,7 @@ struct xfrm_offload {
+ #define XFRM_GRO 32
+ #define XFRM_ESP_NO_TRAILER 64
+ #define XFRM_DEV_RESUME 128
++#define XFRM_XMIT 256
+
+ __u32 status;
+ #define CRYPTO_SUCCESS 1
+diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c
+index c365b918be35c..bb2292b5260c2 100644
+--- a/net/xfrm/xfrm_device.c
++++ b/net/xfrm/xfrm_device.c
+@@ -82,7 +82,7 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur
+ struct xfrm_offload *xo = xfrm_offload(skb);
+ struct sec_path *sp;
+
+- if (!xo)
++ if (!xo || (xo->flags & XFRM_XMIT))
+ return skb;
+
+ if (!(features & NETIF_F_HW_ESP))
+@@ -103,6 +103,8 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur
+ return skb;
+ }
+
++ xo->flags |= XFRM_XMIT;
++
+ if (skb_is_gso(skb)) {
+ struct net_device *dev = skb->dev;
+
+--
+2.25.1
+