--- /dev/null
+From 32a1671ff8e84f0dfff3a50d4b2091d25e91f5e2 Mon Sep 17 00:00:00 2001
+From: Marek Szyprowski <m.szyprowski@samsung.com>
+Date: Wed, 4 Mar 2020 15:37:26 +0100
+Subject: ARM: dts: exynos: Fix polarity of the LCD SPI bus on UniversalC210 board
+
+From: Marek Szyprowski <m.szyprowski@samsung.com>
+
+commit 32a1671ff8e84f0dfff3a50d4b2091d25e91f5e2 upstream.
+
+Recent changes in the SPI core and the SPI-GPIO driver revealed that the
+GPIO lines for the LD9040 LCD controller on the UniversalC210 board are
+defined incorrectly. Fix the polarity for those lines to match the old
+behavior and hardware requirements to fix LCD panel operation with
+recent kernels.
+
+Cc: <stable@vger.kernel.org> # 5.0.x
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Reviewed-by: Andrzej Hajda <a.hajda@samsung.com>
+Signed-off-by: Krzysztof Kozlowski <krzk@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/boot/dts/exynos4210-universal_c210.dts | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/arch/arm/boot/dts/exynos4210-universal_c210.dts
++++ b/arch/arm/boot/dts/exynos4210-universal_c210.dts
+@@ -115,7 +115,7 @@
+ gpio-sck = <&gpy3 1 GPIO_ACTIVE_HIGH>;
+ gpio-mosi = <&gpy3 3 GPIO_ACTIVE_HIGH>;
+ num-chipselects = <1>;
+- cs-gpios = <&gpy4 3 GPIO_ACTIVE_HIGH>;
++ cs-gpios = <&gpy4 3 GPIO_ACTIVE_LOW>;
+
+ lcd@0 {
+ compatible = "samsung,ld9040";
+@@ -124,8 +124,6 @@
+ vci-supply = <&ldo17_reg>;
+ reset-gpios = <&gpy4 5 GPIO_ACTIVE_HIGH>;
+ spi-max-frequency = <1200000>;
+- spi-cpol;
+- spi-cpha;
+ power-on-delay = <10>;
+ reset-delay = <10>;
+ panel-width-mm = <90>;
--- /dev/null
+From fc2266011accd5aeb8ebc335c381991f20e26e33 Mon Sep 17 00:00:00 2001
+From: Fredrik Strupe <fredrik@strupe.net>
+Date: Wed, 8 Apr 2020 13:29:41 +0200
+Subject: arm64: armv8_deprecated: Fix undef_hook mask for thumb setend
+
+From: Fredrik Strupe <fredrik@strupe.net>
+
+commit fc2266011accd5aeb8ebc335c381991f20e26e33 upstream.
+
+For thumb instructions, call_undef_hook() in traps.c first reads a u16,
+and if the u16 indicates a T32 instruction (u16 >= 0xe800), a second
+u16 is read, which then makes up the the lower half-word of a T32
+instruction. For T16 instructions, the second u16 is not read,
+which makes the resulting u32 opcode always have the upper half set to
+0.
+
+However, having the upper half of instr_mask in the undef_hook set to 0
+masks out the upper half of all thumb instructions - both T16 and T32.
+This results in trapped T32 instructions with the lower half-word equal
+to the T16 encoding of setend (b650) being matched, even though the upper
+half-word is not 0000 and thus indicates a T32 opcode.
+
+An example of such a T32 instruction is eaa0b650, which should raise a
+SIGILL since T32 instructions with an eaa prefix are unallocated as per
+Arm ARM, but instead works as a SETEND because the second half-word is set
+to b650.
+
+This patch fixes the issue by extending instr_mask to include the
+upper u32 half, which will still match T16 instructions where the upper
+half is 0, but not T32 instructions.
+
+Fixes: 2d888f48e056 ("arm64: Emulate SETEND for AArch32 tasks")
+Cc: <stable@vger.kernel.org> # 4.0.x-
+Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Fredrik Strupe <fredrik@strupe.net>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/kernel/armv8_deprecated.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm64/kernel/armv8_deprecated.c
++++ b/arch/arm64/kernel/armv8_deprecated.c
+@@ -601,7 +601,7 @@ static struct undef_hook setend_hooks[]
+ },
+ {
+ /* Thumb mode */
+- .instr_mask = 0x0000fff7,
++ .instr_mask = 0xfffffff7,
+ .instr_val = 0x0000b650,
+ .pstate_mask = (PSR_AA32_T_BIT | PSR_AA32_MODE_MASK),
+ .pstate_val = (PSR_AA32_T_BIT | PSR_AA32_MODE_USR),
--- /dev/null
+From a81e5442d796ccfa2cc97d205a5477053264d978 Mon Sep 17 00:00:00 2001
+From: Dave Gerlach <d-gerlach@ti.com>
+Date: Wed, 11 Mar 2020 16:41:11 +0200
+Subject: arm64: dts: ti: k3-am65: Add clocks to dwc3 nodes
+
+From: Dave Gerlach <d-gerlach@ti.com>
+
+commit a81e5442d796ccfa2cc97d205a5477053264d978 upstream.
+
+The TI sci-clk driver can scan the DT for all clocks provided by system
+firmware and does this by checking the clocks property of all nodes, so
+we must add this to the dwc3 nodes so USB clocks are available.
+
+Without this USB does not work with latest system firmware i.e.
+[ 1.714662] clk: couldn't get parent clock 0 for /interconnect@100000/dwc3@4020000
+
+Fixes: cc54a99464ccd ("arm64: dts: ti: k3-am6: add USB suppor")
+Signed-off-by: Dave Gerlach <d-gerlach@ti.com>
+Signed-off-by: Roger Quadros <rogerq@ti.com>
+Cc: stable@kernel.org
+Signed-off-by: Tero Kristo <t-kristo@ti.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/boot/dts/ti/k3-am65-main.dtsi | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
+@@ -296,6 +296,7 @@
+ interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
+ dma-coherent;
+ power-domains = <&k3_pds 151 TI_SCI_PD_EXCLUSIVE>;
++ clocks = <&k3_clks 151 2>, <&k3_clks 151 7>;
+ assigned-clocks = <&k3_clks 151 2>, <&k3_clks 151 7>;
+ assigned-clock-parents = <&k3_clks 151 4>, /* set REF_CLK to 20MHz i.e. PER0_PLL/48 */
+ <&k3_clks 151 9>; /* set PIPE3_TXB_CLK to CLK_12M_RC/256 (for HS only) */
+@@ -335,6 +336,7 @@
+ interrupts = <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>;
+ dma-coherent;
+ power-domains = <&k3_pds 152 TI_SCI_PD_EXCLUSIVE>;
++ clocks = <&k3_clks 152 2>;
+ assigned-clocks = <&k3_clks 152 2>;
+ assigned-clock-parents = <&k3_clks 152 4>; /* set REF_CLK to 20MHz i.e. PER0_PLL/48 */
+
--- /dev/null
+From ccfc531695f3a4aada042f6bdb33ac6be24e1aec Mon Sep 17 00:00:00 2001
+From: Mike Willard <mwillard@izotope.com>
+Date: Wed, 1 Apr 2020 20:54:54 +0000
+Subject: ASoC: cs4270: pull reset GPIO low then high
+
+From: Mike Willard <mwillard@izotope.com>
+
+commit ccfc531695f3a4aada042f6bdb33ac6be24e1aec upstream.
+
+Pull the RST line low then high when initializing the driver,
+in order to force a reset of the chip.
+Previously, the line was not pulled low, which could result in
+the chip registers not resetting to their default values on boot.
+
+Signed-off-by: Mike Willard <mwillard@izotope.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20200401205454.79792-1-mwillard@izotope.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/soc/codecs/cs4270.c | 40 +++++++++++++++++++++++++++++++++++-----
+ 1 file changed, 35 insertions(+), 5 deletions(-)
+
+--- a/sound/soc/codecs/cs4270.c
++++ b/sound/soc/codecs/cs4270.c
+@@ -137,6 +137,9 @@ struct cs4270_private {
+
+ /* power domain regulators */
+ struct regulator_bulk_data supplies[ARRAY_SIZE(supply_names)];
++
++ /* reset gpio */
++ struct gpio_desc *reset_gpio;
+ };
+
+ static const struct snd_soc_dapm_widget cs4270_dapm_widgets[] = {
+@@ -649,6 +652,22 @@ static const struct regmap_config cs4270
+ };
+
+ /**
++ * cs4270_i2c_remove - deinitialize the I2C interface of the CS4270
++ * @i2c_client: the I2C client object
++ *
++ * This function puts the chip into low power mode when the i2c device
++ * is removed.
++ */
++static int cs4270_i2c_remove(struct i2c_client *i2c_client)
++{
++ struct cs4270_private *cs4270 = i2c_get_clientdata(i2c_client);
++
++ gpiod_set_value_cansleep(cs4270->reset_gpio, 0);
++
++ return 0;
++}
++
++/**
+ * cs4270_i2c_probe - initialize the I2C interface of the CS4270
+ * @i2c_client: the I2C client object
+ * @id: the I2C device ID (ignored)
+@@ -660,7 +679,6 @@ static int cs4270_i2c_probe(struct i2c_c
+ const struct i2c_device_id *id)
+ {
+ struct cs4270_private *cs4270;
+- struct gpio_desc *reset_gpiod;
+ unsigned int val;
+ int ret, i;
+
+@@ -679,10 +697,21 @@ static int cs4270_i2c_probe(struct i2c_c
+ if (ret < 0)
+ return ret;
+
+- reset_gpiod = devm_gpiod_get_optional(&i2c_client->dev, "reset",
+- GPIOD_OUT_HIGH);
+- if (PTR_ERR(reset_gpiod) == -EPROBE_DEFER)
+- return -EPROBE_DEFER;
++ /* reset the device */
++ cs4270->reset_gpio = devm_gpiod_get_optional(&i2c_client->dev, "reset",
++ GPIOD_OUT_LOW);
++ if (IS_ERR(cs4270->reset_gpio)) {
++ dev_dbg(&i2c_client->dev, "Error getting CS4270 reset GPIO\n");
++ return PTR_ERR(cs4270->reset_gpio);
++ }
++
++ if (cs4270->reset_gpio) {
++ dev_dbg(&i2c_client->dev, "Found reset GPIO\n");
++ gpiod_set_value_cansleep(cs4270->reset_gpio, 1);
++ }
++
++ /* Sleep 500ns before i2c communications */
++ ndelay(500);
+
+ cs4270->regmap = devm_regmap_init_i2c(i2c_client, &cs4270_regmap);
+ if (IS_ERR(cs4270->regmap))
+@@ -735,6 +764,7 @@ static struct i2c_driver cs4270_i2c_driv
+ },
+ .id_table = cs4270_id,
+ .probe = cs4270_i2c_probe,
++ .remove = cs4270_i2c_remove,
+ };
+
+ module_i2c_driver(cs4270_i2c_driver);
--- /dev/null
+From c067b46d731a764fc46ecc466c2967088c97089e Mon Sep 17 00:00:00 2001
+From: Paul Cercueil <paul@crapouillou.net>
+Date: Thu, 13 Feb 2020 13:19:51 -0300
+Subject: clk: ingenic/jz4770: Exit with error if CGU init failed
+
+From: Paul Cercueil <paul@crapouillou.net>
+
+commit c067b46d731a764fc46ecc466c2967088c97089e upstream.
+
+Exit jz4770_cgu_init() if the 'cgu' pointer we get is NULL, since the
+pointer is passed as argument to functions later on.
+
+Fixes: 7a01c19007ad ("clk: Add Ingenic jz4770 CGU driver")
+Cc: stable@vger.kernel.org
+Signed-off-by: Paul Cercueil <paul@crapouillou.net>
+Reported-by: kbuild test robot <lkp@intel.com>
+Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
+Link: https://lkml.kernel.org/r/20200213161952.37460-1-paul@crapouillou.net
+Signed-off-by: Stephen Boyd <sboyd@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/clk/ingenic/jz4770-cgu.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/clk/ingenic/jz4770-cgu.c
++++ b/drivers/clk/ingenic/jz4770-cgu.c
+@@ -432,8 +432,10 @@ static void __init jz4770_cgu_init(struc
+
+ cgu = ingenic_cgu_new(jz4770_cgu_clocks,
+ ARRAY_SIZE(jz4770_cgu_clocks), np);
+- if (!cgu)
++ if (!cgu) {
+ pr_err("%s: failed to initialise CGU\n", __func__);
++ return;
++ }
+
+ retval = ingenic_cgu_register_clocks(cgu);
+ if (retval)
--- /dev/null
+From edcc42945dee85e9dec3737f3dbf59d917ae5418 Mon Sep 17 00:00:00 2001
+From: Paul Cercueil <paul@crapouillou.net>
+Date: Thu, 13 Feb 2020 13:19:52 -0300
+Subject: clk: ingenic/TCU: Fix round_rate returning error
+
+From: Paul Cercueil <paul@crapouillou.net>
+
+commit edcc42945dee85e9dec3737f3dbf59d917ae5418 upstream.
+
+When requesting a rate superior to the parent's rate, it would return
+-EINVAL instead of simply returning the parent's rate like it should.
+
+Fixes: 4f89e4b8f121 ("clk: ingenic: Add driver for the TCU clocks")
+Cc: stable@vger.kernel.org
+Signed-off-by: Paul Cercueil <paul@crapouillou.net>
+Link: https://lkml.kernel.org/r/20200213161952.37460-2-paul@crapouillou.net
+Signed-off-by: Stephen Boyd <sboyd@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/clk/ingenic/tcu.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/clk/ingenic/tcu.c
++++ b/drivers/clk/ingenic/tcu.c
+@@ -189,7 +189,7 @@ static long ingenic_tcu_round_rate(struc
+ u8 prescale;
+
+ if (req_rate > rate)
+- return -EINVAL;
++ return rate;
+
+ prescale = ingenic_tcu_get_prescale(rate, req_rate);
+
--- /dev/null
+From d0a72efac89d1c35ac55197895201b7b94c5e6ef Mon Sep 17 00:00:00 2001
+From: Oliver O'Halloran <oohall@gmail.com>
+Date: Thu, 6 Feb 2020 17:26:21 +1100
+Subject: cpufreq: powernv: Fix use-after-free
+
+From: Oliver O'Halloran <oohall@gmail.com>
+
+commit d0a72efac89d1c35ac55197895201b7b94c5e6ef upstream.
+
+The cpufreq driver has a use-after-free that we can hit if:
+
+a) There's an OCC message pending when the notifier is registered, and
+b) The cpufreq driver fails to register with the core.
+
+When a) occurs the notifier schedules a workqueue item to handle the
+message. The backing work_struct is located on chips[].throttle and
+when b) happens we clean up by freeing the array. Once we get to
+the (now free) queued item and the kernel crashes.
+
+Fixes: c5e29ea7ac14 ("cpufreq: powernv: Fix bugs in powernv_cpufreq_{init/exit}")
+Cc: stable@vger.kernel.org # v4.6+
+Signed-off-by: Oliver O'Halloran <oohall@gmail.com>
+Reviewed-by: Gautham R. Shenoy <ego@linux.vnet.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20200206062622.28235-1-oohall@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/cpufreq/powernv-cpufreq.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/drivers/cpufreq/powernv-cpufreq.c
++++ b/drivers/cpufreq/powernv-cpufreq.c
+@@ -1080,6 +1080,12 @@ free_and_return:
+
+ static inline void clean_chip_info(void)
+ {
++ int i;
++
++ /* flush any pending work items */
++ if (chips)
++ for (i = 0; i < nr_chips; i++)
++ cancel_work_sync(&chips[i].throttle);
+ kfree(chips);
+ }
+
--- /dev/null
+From 3a5a9e1ef37b030b836d92df8264f840988f4a38 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Horia=20Geant=C4=83?= <horia.geanta@nxp.com>
+Date: Fri, 28 Feb 2020 08:51:23 +0200
+Subject: crypto: caam/qi2 - fix chacha20 data size error
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Horia Geantă <horia.geanta@nxp.com>
+
+commit 3a5a9e1ef37b030b836d92df8264f840988f4a38 upstream.
+
+HW generates a Data Size error for chacha20 requests that are not
+a multiple of 64B, since algorithm state (AS) does not have
+the FINAL bit set.
+
+Since updating req->iv (for chaining) is not required,
+modify skcipher descriptors to set the FINAL bit for chacha20.
+
+[Note that for skcipher decryption we know that ctx1_iv_off is 0,
+which allows for an optimization by not checking algorithm type,
+since append_dec_op1() sets FINAL bit for all algorithms except AES.]
+
+Also drop the descriptor operations that save the IV.
+However, in order to keep code logic simple, things like
+S/G tables generation etc. are not touched.
+
+Cc: <stable@vger.kernel.org> # v5.3+
+Fixes: 334d37c9e263 ("crypto: caam - update IV using HW support")
+Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
+Tested-by: Valentin Ciocoi Radulescu <valentin.ciocoi@nxp.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/caam/caamalg_desc.c | 14 ++++++++++----
+ 1 file changed, 10 insertions(+), 4 deletions(-)
+
+--- a/drivers/crypto/caam/caamalg_desc.c
++++ b/drivers/crypto/caam/caamalg_desc.c
+@@ -1379,6 +1379,9 @@ void cnstr_shdsc_skcipher_encap(u32 * co
+ const u32 ctx1_iv_off)
+ {
+ u32 *key_jump_cmd;
++ u32 options = cdata->algtype | OP_ALG_AS_INIT | OP_ALG_ENCRYPT;
++ bool is_chacha20 = ((cdata->algtype & OP_ALG_ALGSEL_MASK) ==
++ OP_ALG_ALGSEL_CHACHA20);
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+ /* Skip if already shared */
+@@ -1417,14 +1420,15 @@ void cnstr_shdsc_skcipher_encap(u32 * co
+ LDST_OFFSET_SHIFT));
+
+ /* Load operation */
+- append_operation(desc, cdata->algtype | OP_ALG_AS_INIT |
+- OP_ALG_ENCRYPT);
++ if (is_chacha20)
++ options |= OP_ALG_AS_FINALIZE;
++ append_operation(desc, options);
+
+ /* Perform operation */
+ skcipher_append_src_dst(desc);
+
+ /* Store IV */
+- if (ivsize)
++ if (!is_chacha20 && ivsize)
+ append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB | (ctx1_iv_off <<
+ LDST_OFFSET_SHIFT));
+@@ -1451,6 +1455,8 @@ void cnstr_shdsc_skcipher_decap(u32 * co
+ const u32 ctx1_iv_off)
+ {
+ u32 *key_jump_cmd;
++ bool is_chacha20 = ((cdata->algtype & OP_ALG_ALGSEL_MASK) ==
++ OP_ALG_ALGSEL_CHACHA20);
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+ /* Skip if already shared */
+@@ -1499,7 +1505,7 @@ void cnstr_shdsc_skcipher_decap(u32 * co
+ skcipher_append_src_dst(desc);
+
+ /* Store IV */
+- if (ivsize)
++ if (!is_chacha20 && ivsize)
+ append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB | (ctx1_iv_off <<
+ LDST_OFFSET_SHIFT));
--- /dev/null
+From 3f142b6a7b573bde6cff926f246da05652c61eb4 Mon Sep 17 00:00:00 2001
+From: Andrei Botila <andrei.botila@nxp.com>
+Date: Fri, 28 Feb 2020 12:46:48 +0200
+Subject: crypto: caam - update xts sector size for large input length
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Andrei Botila <andrei.botila@nxp.com>
+
+commit 3f142b6a7b573bde6cff926f246da05652c61eb4 upstream.
+
+Since in the software implementation of XTS-AES there is
+no notion of sector every input length is processed the same way.
+CAAM implementation has the notion of sector which causes different
+results between the software implementation and the one in CAAM
+for input lengths bigger than 512 bytes.
+Increase sector size to maximum value on 16 bits.
+
+Fixes: c6415a6016bf ("crypto: caam - add support for acipher xts(aes)")
+Cc: <stable@vger.kernel.org> # v4.12+
+Signed-off-by: Andrei Botila <andrei.botila@nxp.com>
+Reviewed-by: Horia Geantă <horia.geanta@nxp.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/caam/caamalg_desc.c | 16 ++++++++++++++--
+ 1 file changed, 14 insertions(+), 2 deletions(-)
+
+--- a/drivers/crypto/caam/caamalg_desc.c
++++ b/drivers/crypto/caam/caamalg_desc.c
+@@ -1524,7 +1524,13 @@ EXPORT_SYMBOL(cnstr_shdsc_skcipher_decap
+ */
+ void cnstr_shdsc_xts_skcipher_encap(u32 * const desc, struct alginfo *cdata)
+ {
+- __be64 sector_size = cpu_to_be64(512);
++ /*
++ * Set sector size to a big value, practically disabling
++ * sector size segmentation in xts implementation. We cannot
++ * take full advantage of this HW feature with existing
++ * crypto API / dm-crypt SW architecture.
++ */
++ __be64 sector_size = cpu_to_be64(BIT(15));
+ u32 *key_jump_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+@@ -1577,7 +1583,13 @@ EXPORT_SYMBOL(cnstr_shdsc_xts_skcipher_e
+ */
+ void cnstr_shdsc_xts_skcipher_decap(u32 * const desc, struct alginfo *cdata)
+ {
+- __be64 sector_size = cpu_to_be64(512);
++ /*
++ * Set sector size to a big value, practically disabling
++ * sector size segmentation in xts implementation. We cannot
++ * take full advantage of this HW feature with existing
++ * crypto API / dm-crypt SW architecture.
++ */
++ __be64 sector_size = cpu_to_be64(BIT(15));
+ u32 *key_jump_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
--- /dev/null
+From 8962c6d2c2b8ca51b0f188109015b15fc5f4da44 Mon Sep 17 00:00:00 2001
+From: Gilad Ben-Yossef <gilad@benyossef.com>
+Date: Sun, 2 Feb 2020 18:19:14 +0200
+Subject: crypto: ccree - dec auth tag size from cryptlen map
+
+From: Gilad Ben-Yossef <gilad@benyossef.com>
+
+commit 8962c6d2c2b8ca51b0f188109015b15fc5f4da44 upstream.
+
+Remove the auth tag size from cryptlen before mapping the destination
+in out-of-place AEAD decryption thus resolving a crash with
+extended testmgr tests.
+
+Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com>
+Reported-by: Geert Uytterhoeven <geert+renesas@glider.be>
+Cc: stable@vger.kernel.org # v4.19+
+Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/ccree/cc_buffer_mgr.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/drivers/crypto/ccree/cc_buffer_mgr.c
++++ b/drivers/crypto/ccree/cc_buffer_mgr.c
+@@ -894,8 +894,12 @@ static int cc_aead_chain_data(struct cc_
+
+ if (req->src != req->dst) {
+ size_for_map = areq_ctx->assoclen + req->cryptlen;
+- size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
+- authsize : 0;
++
++ if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT)
++ size_for_map += authsize;
++ else
++ size_for_map -= authsize;
++
+ if (is_gcm4543)
+ size_for_map += crypto_aead_ivsize(tfm);
+
--- /dev/null
+From 504e84abec7a635b861afd8d7f92ecd13eaa2b09 Mon Sep 17 00:00:00 2001
+From: Gilad Ben-Yossef <gilad@benyossef.com>
+Date: Wed, 29 Jan 2020 16:37:55 +0200
+Subject: crypto: ccree - only try to map auth tag if needed
+
+From: Gilad Ben-Yossef <gilad@benyossef.com>
+
+commit 504e84abec7a635b861afd8d7f92ecd13eaa2b09 upstream.
+
+Make sure to only add the size of the auth tag to the source mapping
+for encryption if it is an in-place operation. Failing to do this
+previously caused us to try and map auth size len bytes from a NULL
+mapping and crashing if both the cryptlen and assoclen are zero.
+
+Reported-by: Geert Uytterhoeven <geert+renesas@glider.be>
+Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
+Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com>
+Cc: stable@vger.kernel.org # v4.19+
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/ccree/cc_buffer_mgr.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/crypto/ccree/cc_buffer_mgr.c
++++ b/drivers/crypto/ccree/cc_buffer_mgr.c
+@@ -1109,9 +1109,11 @@ int cc_map_aead_request(struct cc_drvdat
+ }
+
+ size_to_map = req->cryptlen + areq_ctx->assoclen;
+- if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
++ /* If we do in-place encryption, we also need the auth tag */
++ if ((areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT) &&
++ (req->src == req->dst)) {
+ size_to_map += authsize;
+-
++ }
+ if (is_gcm4543)
+ size_to_map += crypto_aead_ivsize(tfm);
+ rc = cc_map_sg(dev, req->src, size_to_map, DMA_BIDIRECTIONAL,
--- /dev/null
+From ce0fc6db38decf0d2919bfe783de6d6b76e421a9 Mon Sep 17 00:00:00 2001
+From: Gilad Ben-Yossef <gilad@benyossef.com>
+Date: Wed, 29 Jan 2020 16:37:54 +0200
+Subject: crypto: ccree - protect against empty or NULL scatterlists
+
+From: Gilad Ben-Yossef <gilad@benyossef.com>
+
+commit ce0fc6db38decf0d2919bfe783de6d6b76e421a9 upstream.
+
+Deal gracefully with a NULL or empty scatterlist which can happen
+if both cryptlen and assoclen are zero and we're doing in-place
+AEAD encryption.
+
+This fixes a crash when this causes us to try and map a NULL page,
+at least with some platforms / DMA mapping configs.
+
+Cc: stable@vger.kernel.org # v4.19+
+Reported-by: Geert Uytterhoeven <geert+renesas@glider.be>
+Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
+Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/ccree/cc_buffer_mgr.c | 62 +++++++++++++++--------------------
+ drivers/crypto/ccree/cc_buffer_mgr.h | 1
+ 2 files changed, 28 insertions(+), 35 deletions(-)
+
+--- a/drivers/crypto/ccree/cc_buffer_mgr.c
++++ b/drivers/crypto/ccree/cc_buffer_mgr.c
+@@ -87,6 +87,8 @@ static unsigned int cc_get_sgl_nents(str
+ {
+ unsigned int nents = 0;
+
++ *lbytes = 0;
++
+ while (nbytes && sg_list) {
+ nents++;
+ /* get the number of bytes in the last entry */
+@@ -95,6 +97,7 @@ static unsigned int cc_get_sgl_nents(str
+ nbytes : sg_list->length;
+ sg_list = sg_next(sg_list);
+ }
++
+ dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes);
+ return nents;
+ }
+@@ -290,37 +293,25 @@ static int cc_map_sg(struct device *dev,
+ unsigned int nbytes, int direction, u32 *nents,
+ u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents)
+ {
+- if (sg_is_last(sg)) {
+- /* One entry only case -set to DLLI */
+- if (dma_map_sg(dev, sg, 1, direction) != 1) {
+- dev_err(dev, "dma_map_sg() single buffer failed\n");
+- return -ENOMEM;
+- }
+- dev_dbg(dev, "Mapped sg: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
+- &sg_dma_address(sg), sg_page(sg), sg_virt(sg),
+- sg->offset, sg->length);
+- *lbytes = nbytes;
+- *nents = 1;
+- *mapped_nents = 1;
+- } else { /*sg_is_last*/
+- *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes);
+- if (*nents > max_sg_nents) {
+- *nents = 0;
+- dev_err(dev, "Too many fragments. current %d max %d\n",
+- *nents, max_sg_nents);
+- return -ENOMEM;
+- }
+- /* In case of mmu the number of mapped nents might
+- * be changed from the original sgl nents
+- */
+- *mapped_nents = dma_map_sg(dev, sg, *nents, direction);
+- if (*mapped_nents == 0) {
+- *nents = 0;
+- dev_err(dev, "dma_map_sg() sg buffer failed\n");
+- return -ENOMEM;
+- }
++ int ret = 0;
++
++ *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes);
++ if (*nents > max_sg_nents) {
++ *nents = 0;
++ dev_err(dev, "Too many fragments. current %d max %d\n",
++ *nents, max_sg_nents);
++ return -ENOMEM;
+ }
+
++ ret = dma_map_sg(dev, sg, *nents, direction);
++ if (dma_mapping_error(dev, ret)) {
++ *nents = 0;
++ dev_err(dev, "dma_map_sg() sg buffer failed %d\n", ret);
++ return -ENOMEM;
++ }
++
++ *mapped_nents = ret;
++
+ return 0;
+ }
+
+@@ -555,11 +546,12 @@ void cc_unmap_aead_request(struct device
+ sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents,
+ areq_ctx->assoclen, req->cryptlen);
+
+- dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_BIDIRECTIONAL);
++ dma_unmap_sg(dev, req->src, areq_ctx->src.mapped_nents,
++ DMA_BIDIRECTIONAL);
+ if (req->src != req->dst) {
+ dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
+ sg_virt(req->dst));
+- dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
++ dma_unmap_sg(dev, req->dst, areq_ctx->dst.mapped_nents,
+ DMA_BIDIRECTIONAL);
+ }
+ if (drvdata->coherent &&
+@@ -881,7 +873,7 @@ static int cc_aead_chain_data(struct cc_
+ &src_last_bytes);
+ sg_index = areq_ctx->src_sgl->length;
+ //check where the data starts
+- while (sg_index <= size_to_skip) {
++ while (src_mapped_nents && (sg_index <= size_to_skip)) {
+ src_mapped_nents--;
+ offset -= areq_ctx->src_sgl->length;
+ sgl = sg_next(areq_ctx->src_sgl);
+@@ -908,7 +900,7 @@ static int cc_aead_chain_data(struct cc_
+ size_for_map += crypto_aead_ivsize(tfm);
+
+ rc = cc_map_sg(dev, req->dst, size_for_map, DMA_BIDIRECTIONAL,
+- &areq_ctx->dst.nents,
++ &areq_ctx->dst.mapped_nents,
+ LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
+ &dst_mapped_nents);
+ if (rc)
+@@ -921,7 +913,7 @@ static int cc_aead_chain_data(struct cc_
+ offset = size_to_skip;
+
+ //check where the data starts
+- while (sg_index <= size_to_skip) {
++ while (dst_mapped_nents && sg_index <= size_to_skip) {
+ dst_mapped_nents--;
+ offset -= areq_ctx->dst_sgl->length;
+ sgl = sg_next(areq_ctx->dst_sgl);
+@@ -1123,7 +1115,7 @@ int cc_map_aead_request(struct cc_drvdat
+ if (is_gcm4543)
+ size_to_map += crypto_aead_ivsize(tfm);
+ rc = cc_map_sg(dev, req->src, size_to_map, DMA_BIDIRECTIONAL,
+- &areq_ctx->src.nents,
++ &areq_ctx->src.mapped_nents,
+ (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES +
+ LLI_MAX_NUM_OF_DATA_ENTRIES),
+ &dummy, &mapped_nents);
+--- a/drivers/crypto/ccree/cc_buffer_mgr.h
++++ b/drivers/crypto/ccree/cc_buffer_mgr.h
+@@ -25,6 +25,7 @@ enum cc_sg_cpy_direct {
+
+ struct cc_mlli {
+ cc_sram_addr_t sram_addr;
++ unsigned int mapped_nents;
+ unsigned int nents; //sg nents
+ unsigned int mlli_nents; //mlli nents might be different than the above
+ };
--- /dev/null
+From 9fc06ff56845cc5ccafec52f545fc2e08d22f849 Mon Sep 17 00:00:00 2001
+From: Nikos Tsironis <ntsironis@arrikto.com>
+Date: Fri, 27 Mar 2020 16:01:10 +0200
+Subject: dm clone: Add missing casts to prevent overflows and data corruption
+
+From: Nikos Tsironis <ntsironis@arrikto.com>
+
+commit 9fc06ff56845cc5ccafec52f545fc2e08d22f849 upstream.
+
+Add missing casts when converting from regions to sectors.
+
+In case BITS_PER_LONG == 32, the lack of the appropriate casts can lead
+to overflows and miscalculation of the device sector.
+
+As a result, we could end up discarding and/or copying the wrong parts
+of the device, thus corrupting the device's data.
+
+Fixes: 7431b7835f55 ("dm: add clone target")
+Cc: stable@vger.kernel.org # v5.4+
+Signed-off-by: Nikos Tsironis <ntsironis@arrikto.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-clone-target.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/drivers/md/dm-clone-target.c
++++ b/drivers/md/dm-clone-target.c
+@@ -282,7 +282,7 @@ static bool bio_triggers_commit(struct c
+ /* Get the address of the region in sectors */
+ static inline sector_t region_to_sector(struct clone *clone, unsigned long region_nr)
+ {
+- return (region_nr << clone->region_shift);
++ return ((sector_t)region_nr << clone->region_shift);
+ }
+
+ /* Get the region number of the bio */
+@@ -471,7 +471,7 @@ static void complete_discard_bio(struct
+ if (test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags) && success) {
+ remap_to_dest(clone, bio);
+ bio_region_range(clone, bio, &rs, &nr_regions);
+- trim_bio(bio, rs << clone->region_shift,
++ trim_bio(bio, region_to_sector(clone, rs),
+ nr_regions << clone->region_shift);
+ generic_make_request(bio);
+ } else
+@@ -804,11 +804,14 @@ static void hydration_copy(struct dm_clo
+ struct dm_io_region from, to;
+ struct clone *clone = hd->clone;
+
++ if (WARN_ON(!nr_regions))
++ return;
++
+ region_size = clone->region_size;
+ region_start = hd->region_nr;
+ region_end = region_start + nr_regions - 1;
+
+- total_size = (nr_regions - 1) << clone->region_shift;
++ total_size = region_to_sector(clone, nr_regions - 1);
+
+ if (region_end == clone->nr_regions - 1) {
+ /*
--- /dev/null
+From cd481c12269b4d276f1a52eda0ebd419079bfe3a Mon Sep 17 00:00:00 2001
+From: Nikos Tsironis <ntsironis@arrikto.com>
+Date: Fri, 27 Mar 2020 16:01:09 +0200
+Subject: dm clone: Add overflow check for number of regions
+
+From: Nikos Tsironis <ntsironis@arrikto.com>
+
+commit cd481c12269b4d276f1a52eda0ebd419079bfe3a upstream.
+
+Add overflow check for clone->nr_regions variable, which holds the
+number of regions of the target.
+
+The overflow can occur with sufficiently large devices, if BITS_PER_LONG
+== 32. E.g., if the region size is 8 sectors (4K), the overflow would
+occur for device sizes > 34359738360 sectors (~16TB).
+
+This could result in multiple device sectors wrongly mapping to the same
+region number, due to the truncation from 64 bits to 32 bits, which
+would lead to data corruption.
+
+Fixes: 7431b7835f55 ("dm: add clone target")
+Cc: stable@vger.kernel.org # v5.4+
+Signed-off-by: Nikos Tsironis <ntsironis@arrikto.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-clone-target.c | 12 +++++++++++-
+ 1 file changed, 11 insertions(+), 1 deletion(-)
+
+--- a/drivers/md/dm-clone-target.c
++++ b/drivers/md/dm-clone-target.c
+@@ -1790,6 +1790,7 @@ error:
+ static int clone_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ {
+ int r;
++ sector_t nr_regions;
+ struct clone *clone;
+ struct dm_arg_set as;
+
+@@ -1831,7 +1832,16 @@ static int clone_ctr(struct dm_target *t
+ goto out_with_source_dev;
+
+ clone->region_shift = __ffs(clone->region_size);
+- clone->nr_regions = dm_sector_div_up(ti->len, clone->region_size);
++ nr_regions = dm_sector_div_up(ti->len, clone->region_size);
++
++ /* Check for overflow */
++ if (nr_regions != (unsigned long)nr_regions) {
++ ti->error = "Too many regions. Consider increasing the region size";
++ r = -EOVERFLOW;
++ goto out_with_source_dev;
++ }
++
++ clone->nr_regions = nr_regions;
+
+ r = validate_nr_regions(clone->nr_regions, &ti->error);
+ if (r)
--- /dev/null
+From 4b5142905d4ff58a4b93f7c8eaa7ba829c0a53c9 Mon Sep 17 00:00:00 2001
+From: Nikos Tsironis <ntsironis@arrikto.com>
+Date: Fri, 27 Mar 2020 16:01:08 +0200
+Subject: dm clone: Fix handling of partial region discards
+
+From: Nikos Tsironis <ntsironis@arrikto.com>
+
+commit 4b5142905d4ff58a4b93f7c8eaa7ba829c0a53c9 upstream.
+
+There is a bug in the way dm-clone handles discards, which can lead to
+discarding the wrong blocks or trying to discard blocks beyond the end
+of the device.
+
+This could lead to data corruption, if the destination device indeed
+discards the underlying blocks, i.e., if the discard operation results
+in the original contents of a block to be lost.
+
+The root of the problem is the code that calculates the range of regions
+covered by a discard request and decides which regions to discard.
+
+Since dm-clone handles the device in units of regions, we don't discard
+parts of a region, only whole regions.
+
+The range is calculated as:
+
+ rs = dm_sector_div_up(bio->bi_iter.bi_sector, clone->region_size);
+ re = bio_end_sector(bio) >> clone->region_shift;
+
+, where 'rs' is the first region to discard and (re - rs) is the number
+of regions to discard.
+
+The bug manifests when we try to discard part of a single region, i.e.,
+when we try to discard a block with size < region_size, and the discard
+request both starts at an offset with respect to the beginning of that
+region and ends before the end of the region.
+
+The root cause is the following comparison:
+
+ if (rs == re)
+ // skip discard and complete original bio immediately
+
+, which doesn't take into account that 'rs' might be greater than 're'.
+
+Thus, we then issue a discard request for the wrong blocks, instead of
+skipping the discard all together.
+
+Fix the check to also take into account the above case, so we don't end
+up discarding the wrong blocks.
+
+Also, add some range checks to dm_clone_set_region_hydrated() and
+dm_clone_cond_set_range(), which update dm-clone's region bitmap.
+
+Note that the aforementioned bug doesn't cause invalid memory accesses,
+because dm_clone_is_range_hydrated() returns True for this case, so the
+checks are just precautionary.
+
+Fixes: 7431b7835f55 ("dm: add clone target")
+Cc: stable@vger.kernel.org # v5.4+
+Signed-off-by: Nikos Tsironis <ntsironis@arrikto.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-clone-metadata.c | 13 ++++++++++++
+ drivers/md/dm-clone-target.c | 43 +++++++++++++++++++++++++++--------------
+ 2 files changed, 42 insertions(+), 14 deletions(-)
+
+--- a/drivers/md/dm-clone-metadata.c
++++ b/drivers/md/dm-clone-metadata.c
+@@ -850,6 +850,12 @@ int dm_clone_set_region_hydrated(struct
+ struct dirty_map *dmap;
+ unsigned long word, flags;
+
++ if (unlikely(region_nr >= cmd->nr_regions)) {
++ DMERR("Region %lu out of range (total number of regions %lu)",
++ region_nr, cmd->nr_regions);
++ return -ERANGE;
++ }
++
+ word = region_nr / BITS_PER_LONG;
+
+ spin_lock_irqsave(&cmd->bitmap_lock, flags);
+@@ -879,6 +885,13 @@ int dm_clone_cond_set_range(struct dm_cl
+ struct dirty_map *dmap;
+ unsigned long word, region_nr;
+
++ if (unlikely(start >= cmd->nr_regions || (start + nr_regions) < start ||
++ (start + nr_regions) > cmd->nr_regions)) {
++ DMERR("Invalid region range: start %lu, nr_regions %lu (total number of regions %lu)",
++ start, nr_regions, cmd->nr_regions);
++ return -ERANGE;
++ }
++
+ spin_lock_irq(&cmd->bitmap_lock);
+
+ if (cmd->read_only) {
+--- a/drivers/md/dm-clone-target.c
++++ b/drivers/md/dm-clone-target.c
+@@ -293,10 +293,17 @@ static inline unsigned long bio_to_regio
+
+ /* Get the region range covered by the bio */
+ static void bio_region_range(struct clone *clone, struct bio *bio,
+- unsigned long *rs, unsigned long *re)
++ unsigned long *rs, unsigned long *nr_regions)
+ {
++ unsigned long end;
++
+ *rs = dm_sector_div_up(bio->bi_iter.bi_sector, clone->region_size);
+- *re = bio_end_sector(bio) >> clone->region_shift;
++ end = bio_end_sector(bio) >> clone->region_shift;
++
++ if (*rs >= end)
++ *nr_regions = 0;
++ else
++ *nr_regions = end - *rs;
+ }
+
+ /* Check whether a bio overwrites a region */
+@@ -454,7 +461,7 @@ static void trim_bio(struct bio *bio, se
+
+ static void complete_discard_bio(struct clone *clone, struct bio *bio, bool success)
+ {
+- unsigned long rs, re;
++ unsigned long rs, nr_regions;
+
+ /*
+ * If the destination device supports discards, remap and trim the
+@@ -463,9 +470,9 @@ static void complete_discard_bio(struct
+ */
+ if (test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags) && success) {
+ remap_to_dest(clone, bio);
+- bio_region_range(clone, bio, &rs, &re);
++ bio_region_range(clone, bio, &rs, &nr_regions);
+ trim_bio(bio, rs << clone->region_shift,
+- (re - rs) << clone->region_shift);
++ nr_regions << clone->region_shift);
+ generic_make_request(bio);
+ } else
+ bio_endio(bio);
+@@ -473,12 +480,21 @@ static void complete_discard_bio(struct
+
+ static void process_discard_bio(struct clone *clone, struct bio *bio)
+ {
+- unsigned long rs, re;
++ unsigned long rs, nr_regions;
+
+- bio_region_range(clone, bio, &rs, &re);
+- BUG_ON(re > clone->nr_regions);
++ bio_region_range(clone, bio, &rs, &nr_regions);
++ if (!nr_regions) {
++ bio_endio(bio);
++ return;
++ }
+
+- if (unlikely(rs == re)) {
++ if (WARN_ON(rs >= clone->nr_regions || (rs + nr_regions) < rs ||
++ (rs + nr_regions) > clone->nr_regions)) {
++ DMERR("%s: Invalid range (%lu + %lu, total regions %lu) for discard (%llu + %u)",
++ clone_device_name(clone), rs, nr_regions,
++ clone->nr_regions,
++ (unsigned long long)bio->bi_iter.bi_sector,
++ bio_sectors(bio));
+ bio_endio(bio);
+ return;
+ }
+@@ -487,7 +503,7 @@ static void process_discard_bio(struct c
+ * The covered regions are already hydrated so we just need to pass
+ * down the discard.
+ */
+- if (dm_clone_is_range_hydrated(clone->cmd, rs, re - rs)) {
++ if (dm_clone_is_range_hydrated(clone->cmd, rs, nr_regions)) {
+ complete_discard_bio(clone, bio, true);
+ return;
+ }
+@@ -1169,7 +1185,7 @@ static void process_deferred_discards(st
+ int r = -EPERM;
+ struct bio *bio;
+ struct blk_plug plug;
+- unsigned long rs, re;
++ unsigned long rs, nr_regions;
+ struct bio_list discards = BIO_EMPTY_LIST;
+
+ spin_lock_irq(&clone->lock);
+@@ -1185,14 +1201,13 @@ static void process_deferred_discards(st
+
+ /* Update the metadata */
+ bio_list_for_each(bio, &discards) {
+- bio_region_range(clone, bio, &rs, &re);
++ bio_region_range(clone, bio, &rs, &nr_regions);
+ /*
+ * A discard request might cover regions that have been already
+ * hydrated. There is no need to update the metadata for these
+ * regions.
+ */
+- r = dm_clone_cond_set_range(clone->cmd, rs, re - rs);
+-
++ r = dm_clone_cond_set_range(clone->cmd, rs, nr_regions);
+ if (unlikely(r))
+ break;
+ }
--- /dev/null
+From 81d5553d1288c2ec0390f02f84d71ca0f0f9f137 Mon Sep 17 00:00:00 2001
+From: Nikos Tsironis <ntsironis@arrikto.com>
+Date: Fri, 27 Mar 2020 16:01:11 +0200
+Subject: dm clone metadata: Fix return type of dm_clone_nr_of_hydrated_regions()
+
+From: Nikos Tsironis <ntsironis@arrikto.com>
+
+commit 81d5553d1288c2ec0390f02f84d71ca0f0f9f137 upstream.
+
+dm_clone_nr_of_hydrated_regions() returns the number of regions that
+have been hydrated so far. In order to do so it employs bitmap_weight().
+
+Until now, the return type of dm_clone_nr_of_hydrated_regions() was
+unsigned long.
+
+Because bitmap_weight() returns an int, in case BITS_PER_LONG == 64 and
+the return value of bitmap_weight() is 2^31 (the maximum allowed number
+of regions for a device), the result is sign extended from 32 bits to 64
+bits and an incorrect value is displayed, in the status output of
+dm-clone, as the number of hydrated regions.
+
+Fix this by having dm_clone_nr_of_hydrated_regions() return an unsigned
+int.
+
+Fixes: 7431b7835f55 ("dm: add clone target")
+Cc: stable@vger.kernel.org # v5.4+
+Signed-off-by: Nikos Tsironis <ntsironis@arrikto.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-clone-metadata.c | 2 +-
+ drivers/md/dm-clone-metadata.h | 2 +-
+ drivers/md/dm-clone-target.c | 2 +-
+ 3 files changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/md/dm-clone-metadata.c
++++ b/drivers/md/dm-clone-metadata.c
+@@ -656,7 +656,7 @@ bool dm_clone_is_range_hydrated(struct d
+ return (bit >= (start + nr_regions));
+ }
+
+-unsigned long dm_clone_nr_of_hydrated_regions(struct dm_clone_metadata *cmd)
++unsigned int dm_clone_nr_of_hydrated_regions(struct dm_clone_metadata *cmd)
+ {
+ return bitmap_weight(cmd->region_map, cmd->nr_regions);
+ }
+--- a/drivers/md/dm-clone-metadata.h
++++ b/drivers/md/dm-clone-metadata.h
+@@ -156,7 +156,7 @@ bool dm_clone_is_range_hydrated(struct d
+ /*
+ * Returns the number of hydrated regions.
+ */
+-unsigned long dm_clone_nr_of_hydrated_regions(struct dm_clone_metadata *cmd);
++unsigned int dm_clone_nr_of_hydrated_regions(struct dm_clone_metadata *cmd);
+
+ /*
+ * Returns the first unhydrated region with region_nr >= @start
+--- a/drivers/md/dm-clone-target.c
++++ b/drivers/md/dm-clone-target.c
+@@ -1473,7 +1473,7 @@ static void clone_status(struct dm_targe
+ goto error;
+ }
+
+- DMEMIT("%u %llu/%llu %llu %lu/%lu %u ",
++ DMEMIT("%u %llu/%llu %llu %u/%lu %u ",
+ DM_CLONE_METADATA_BLOCK_SIZE,
+ (unsigned long long)(nr_metadata_blocks - nr_free_metadata_blocks),
+ (unsigned long long)nr_metadata_blocks,
--- /dev/null
+From b93b6643e9b5a7f260b931e97f56ffa3fa65e26d Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Sun, 22 Mar 2020 20:42:21 +0100
+Subject: dm integrity: fix a crash with unusually large tag size
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit b93b6643e9b5a7f260b931e97f56ffa3fa65e26d upstream.
+
+If the user specifies tag size larger than HASH_MAX_DIGESTSIZE,
+there's a crash in integrity_metadata().
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-integrity.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/md/dm-integrity.c
++++ b/drivers/md/dm-integrity.c
+@@ -1519,7 +1519,7 @@ static void integrity_metadata(struct wo
+ struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
+ char *checksums;
+ unsigned extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0;
+- char checksums_onstack[HASH_MAX_DIGESTSIZE];
++ char checksums_onstack[max((size_t)HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
+ unsigned sectors_to_process = dio->range.n_sectors;
+ sector_t sector = dio->range.logical_sector;
+
+@@ -1748,7 +1748,7 @@ retry_kmap:
+ } while (++s < ic->sectors_per_block);
+ #ifdef INTERNAL_VERIFY
+ if (ic->internal_hash) {
+- char checksums_onstack[max(HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
++ char checksums_onstack[max((size_t)HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
+
+ integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack);
+ if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) {
--- /dev/null
+From 75fa601934fda23d2f15bf44b09c2401942d8e15 Mon Sep 17 00:00:00 2001
+From: "Shetty, Harshini X (EXT-Sony Mobile)" <Harshini.X.Shetty@sony.com>
+Date: Tue, 17 Mar 2020 09:15:45 +0000
+Subject: dm verity fec: fix memory leak in verity_fec_dtr
+
+From: Shetty, Harshini X (EXT-Sony Mobile) <Harshini.X.Shetty@sony.com>
+
+commit 75fa601934fda23d2f15bf44b09c2401942d8e15 upstream.
+
+Fix below kmemleak detected in verity_fec_ctr. output_pool is
+allocated for each dm-verity-fec device. But it is not freed when
+dm-table for the verity target is removed. Hence free the output
+mempool in destructor function verity_fec_dtr.
+
+unreferenced object 0xffffffffa574d000 (size 4096):
+ comm "init", pid 1667, jiffies 4294894890 (age 307.168s)
+ hex dump (first 32 bytes):
+ 8e 36 00 98 66 a8 0b 9b 00 00 00 00 00 00 00 00 .6..f...........
+ 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
+ backtrace:
+ [<0000000060e82407>] __kmalloc+0x2b4/0x340
+ [<00000000dd99488f>] mempool_kmalloc+0x18/0x20
+ [<000000002560172b>] mempool_init_node+0x98/0x118
+ [<000000006c3574d2>] mempool_init+0x14/0x20
+ [<0000000008cb266e>] verity_fec_ctr+0x388/0x3b0
+ [<000000000887261b>] verity_ctr+0x87c/0x8d0
+ [<000000002b1e1c62>] dm_table_add_target+0x174/0x348
+ [<000000002ad89eda>] table_load+0xe4/0x328
+ [<000000001f06f5e9>] dm_ctl_ioctl+0x3b4/0x5a0
+ [<00000000bee5fbb7>] do_vfs_ioctl+0x5dc/0x928
+ [<00000000b475b8f5>] __arm64_sys_ioctl+0x70/0x98
+ [<000000005361e2e8>] el0_svc_common+0xa0/0x158
+ [<000000001374818f>] el0_svc_handler+0x6c/0x88
+ [<000000003364e9f4>] el0_svc+0x8/0xc
+ [<000000009d84cec9>] 0xffffffffffffffff
+
+Fixes: a739ff3f543af ("dm verity: add support for forward error correction")
+Depends-on: 6f1c819c219f7 ("dm: convert to bioset_init()/mempool_init()")
+Cc: stable@vger.kernel.org
+Signed-off-by: Harshini Shetty <harshini.x.shetty@sony.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-verity-fec.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/md/dm-verity-fec.c
++++ b/drivers/md/dm-verity-fec.c
+@@ -551,6 +551,7 @@ void verity_fec_dtr(struct dm_verity *v)
+ mempool_exit(&f->rs_pool);
+ mempool_exit(&f->prealloc_pool);
+ mempool_exit(&f->extra_pool);
++ mempool_exit(&f->output_pool);
+ kmem_cache_destroy(f->cache);
+
+ if (f->data_bufio)
--- /dev/null
+From 1edaa447d958bec24c6a79685a5790d98976fd16 Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Fri, 27 Mar 2020 07:22:36 -0400
+Subject: dm writecache: add cond_resched to avoid CPU hangs
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit 1edaa447d958bec24c6a79685a5790d98976fd16 upstream.
+
+Initializing a dm-writecache device can take a long time when the
+persistent memory device is large. Add cond_resched() to a few loops
+to avoid warnings that the CPU is stuck.
+
+Cc: stable@vger.kernel.org # v4.18+
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-writecache.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/drivers/md/dm-writecache.c
++++ b/drivers/md/dm-writecache.c
+@@ -876,6 +876,7 @@ static int writecache_alloc_entries(stru
+ struct wc_entry *e = &wc->entries[b];
+ e->index = b;
+ e->write_in_progress = false;
++ cond_resched();
+ }
+
+ return 0;
+@@ -930,6 +931,7 @@ static void writecache_resume(struct dm_
+ e->original_sector = le64_to_cpu(wme.original_sector);
+ e->seq_count = le64_to_cpu(wme.seq_count);
+ }
++ cond_resched();
+ }
+ #endif
+ for (b = 0; b < wc->n_blocks; b++) {
+@@ -1791,8 +1793,10 @@ static int init_memory(struct dm_writeca
+ pmem_assign(sb(wc)->n_blocks, cpu_to_le64(wc->n_blocks));
+ pmem_assign(sb(wc)->seq_count, cpu_to_le64(0));
+
+- for (b = 0; b < wc->n_blocks; b++)
++ for (b = 0; b < wc->n_blocks; b++) {
+ write_original_sector_seq_count(wc, &wc->entries[b], -1, -1);
++ cond_resched();
++ }
+
+ writecache_flush_all_metadata(wc);
+ writecache_commit_flushed(wc, false);
--- /dev/null
+From b8fdd090376a7a46d17db316638fe54b965c2fb0 Mon Sep 17 00:00:00 2001
+From: Bob Liu <bob.liu@oracle.com>
+Date: Tue, 24 Mar 2020 21:22:45 +0800
+Subject: dm zoned: remove duplicate nr_rnd_zones increase in dmz_init_zone()
+
+From: Bob Liu <bob.liu@oracle.com>
+
+commit b8fdd090376a7a46d17db316638fe54b965c2fb0 upstream.
+
+zmd->nr_rnd_zones was increased twice by mistake. The other place it
+is increased in dmz_init_zone() is the only one needed:
+
+1131 zmd->nr_useable_zones++;
+1132 if (dmz_is_rnd(zone)) {
+1133 zmd->nr_rnd_zones++;
+ ^^^
+Fixes: 3b1a94c88b79 ("dm zoned: drive-managed zoned block device target")
+Cc: stable@vger.kernel.org
+Signed-off-by: Bob Liu <bob.liu@oracle.com>
+Reviewed-by: Damien Le Moal <damien.lemoal@wdc.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-zoned-metadata.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/drivers/md/dm-zoned-metadata.c
++++ b/drivers/md/dm-zoned-metadata.c
+@@ -1109,7 +1109,6 @@ static int dmz_init_zone(struct blk_zone
+ switch (blkz->type) {
+ case BLK_ZONE_TYPE_CONVENTIONAL:
+ set_bit(DMZ_RND, &zone->flags);
+- zmd->nr_rnd_zones++;
+ break;
+ case BLK_ZONE_TYPE_SEQWRITE_REQ:
+ case BLK_ZONE_TYPE_SEQWRITE_PREF:
--- /dev/null
+From 72f5b5a308c744573fdbc6c78202c52196d2c162 Mon Sep 17 00:00:00 2001
+From: Michael Strauss <michael.strauss@amd.com>
+Date: Sun, 5 Apr 2020 16:41:12 -0400
+Subject: drm/amd/display: Check for null fclk voltage when parsing clock table
+
+From: Michael Strauss <michael.strauss@amd.com>
+
+commit 72f5b5a308c744573fdbc6c78202c52196d2c162 upstream.
+
+[WHY]
+In cases where a clock table is malformed such that fclk entries have
+frequencies but not voltages listed, we don't catch the error and set
+clocks to 0 instead of using hardcoded values as we should.
+
+[HOW]
+Add check for clock tables fclk entry's voltage as well
+
+Signed-off-by: Michael Strauss <michael.strauss@amd.com>
+Reviewed-by: Eric Yang <eric.yang2@amd.com>
+Acked-by: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+@@ -643,7 +643,7 @@ static void rn_clk_mgr_helper_populate_b
+ /* Find lowest DPM, FCLK is filled in reverse order*/
+
+ for (i = PP_SMU_NUM_FCLK_DPM_LEVELS - 1; i >= 0; i--) {
+- if (clock_table->FClocks[i].Freq != 0) {
++ if (clock_table->FClocks[i].Freq != 0 && clock_table->FClocks[i].Vol != 0) {
+ j = i;
+ break;
+ }
--- /dev/null
+From 4ee2bb22ddb53a2eafc675690d0d67452029ca37 Mon Sep 17 00:00:00 2001
+From: Prike Liang <Prike.Liang@amd.com>
+Date: Fri, 3 Apr 2020 12:26:15 +0800
+Subject: drm/amd/powerplay: implement the is_dpm_running()
+
+From: Prike Liang <Prike.Liang@amd.com>
+
+commit 4ee2bb22ddb53a2eafc675690d0d67452029ca37 upstream.
+
+As the pmfw hasn't exported the interface of SMU feature
+mask to APU SKU so just force on all the features to driver
+inquired interface at early initial stage.
+
+Signed-off-by: Prike Liang <Prike.Liang@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/amd/powerplay/renoir_ppt.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
++++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
+@@ -887,6 +887,17 @@ static int renoir_read_sensor(struct smu
+ return ret;
+ }
+
++static bool renoir_is_dpm_running(struct smu_context *smu)
++{
++ /*
++ * Util now, the pmfw hasn't exported the interface of SMU
++ * feature mask to APU SKU so just force on all the feature
++ * at early initial stage.
++ */
++ return true;
++
++}
++
+ static const struct pptable_funcs renoir_ppt_funcs = {
+ .get_smu_msg_index = renoir_get_smu_msg_index,
+ .get_smu_clk_index = renoir_get_smu_clk_index,
+@@ -928,6 +939,7 @@ static const struct pptable_funcs renoir
+ .mode2_reset = smu_v12_0_mode2_reset,
+ .set_soft_freq_limited_range = smu_v12_0_set_soft_freq_limited_range,
+ .set_driver_table_location = smu_v12_0_set_driver_table_location,
++ .is_dpm_running = renoir_is_dpm_running,
+ };
+
+ void renoir_set_ppt_funcs(struct smu_context *smu)
--- /dev/null
+From 022ac4c9c55be35a2d1f71019a931324c51b0dab Mon Sep 17 00:00:00 2001
+From: Yuxian Dai <Yuxian.Dai@amd.com>
+Date: Wed, 1 Apr 2020 19:26:26 +0800
+Subject: drm/amdgpu/powerplay: using the FCLK DPM table to set the MCLK
+
+From: Yuxian Dai <Yuxian.Dai@amd.com>
+
+commit 022ac4c9c55be35a2d1f71019a931324c51b0dab upstream.
+
+1.Using the FCLK DPM table to set the MCLK for DPM states consist of
+three entities:
+ FCLK
+ UCLK
+ MEMCLK
+All these three clk change together, MEMCLK from FCLK, so use the fclk
+frequency.
+2.we should show the current working clock freqency from clock table metric
+
+Signed-off-by: Yuxian Dai <Yuxian.Dai@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Reviewed-by: Kevin Wang <Kevin1.Wang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/amd/powerplay/renoir_ppt.c | 6 ++++++
+ drivers/gpu/drm/amd/powerplay/renoir_ppt.h | 2 +-
+ 2 files changed, 7 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
++++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
+@@ -240,6 +240,7 @@ static int renoir_print_clk_levels(struc
+ uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0;
+ DpmClocks_t *clk_table = smu->smu_table.clocks_table;
+ SmuMetrics_t metrics;
++ bool cur_value_match_level = false;
+
+ if (!clk_table || clk_type >= SMU_CLK_COUNT)
+ return -EINVAL;
+@@ -298,8 +299,13 @@ static int renoir_print_clk_levels(struc
+ GET_DPM_CUR_FREQ(clk_table, clk_type, i, value);
+ size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
+ cur_value == value ? "*" : "");
++ if (cur_value == value)
++ cur_value_match_level = true;
+ }
+
++ if (!cur_value_match_level)
++ size += sprintf(buf + size, " %uMhz *\n", cur_value);
++
+ return size;
+ }
+
+--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.h
++++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.h
+@@ -37,7 +37,7 @@ extern void renoir_set_ppt_funcs(struct
+ freq = table->SocClocks[dpm_level].Freq; \
+ break; \
+ case SMU_MCLK: \
+- freq = table->MemClocks[dpm_level].Freq; \
++ freq = table->FClocks[dpm_level].Freq; \
+ break; \
+ case SMU_DCEFCLK: \
+ freq = table->DcfClocks[dpm_level].Freq; \
--- /dev/null
+From 2960758cce2310774de60bbbd8d6841d436c54d9 Mon Sep 17 00:00:00 2001
+From: Aaron Liu <aaron.liu@amd.com>
+Date: Tue, 7 Apr 2020 17:46:04 +0800
+Subject: drm/amdgpu: unify fw_write_wait for new gfx9 asics
+
+From: Aaron Liu <aaron.liu@amd.com>
+
+commit 2960758cce2310774de60bbbd8d6841d436c54d9 upstream.
+
+Make the fw_write_wait default case true since presumably all new
+gfx9 asics will have updated firmware. That is using unique WAIT_REG_MEM
+packet with opration=1.
+
+Signed-off-by: Aaron Liu <aaron.liu@amd.com>
+Tested-by: Aaron Liu <aaron.liu@amd.com>
+Tested-by: Yuxian Dai <Yuxian.Dai@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Acked-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -1158,6 +1158,8 @@ static void gfx_v9_0_check_fw_write_wait
+ adev->gfx.mec_fw_write_wait = true;
+ break;
+ default:
++ adev->gfx.me_fw_write_wait = true;
++ adev->gfx.mec_fw_write_wait = true;
+ break;
+ }
+ }
--- /dev/null
+From 3e138a63d6674a4567a018a31e467567c40b14d5 Mon Sep 17 00:00:00 2001
+From: Torsten Duwe <duwe@lst.de>
+Date: Tue, 18 Feb 2020 16:57:44 +0100
+Subject: drm/bridge: analogix-anx78xx: Fix drm_dp_link helper removal
+
+From: Torsten Duwe <duwe@lst.de>
+
+commit 3e138a63d6674a4567a018a31e467567c40b14d5 upstream.
+
+drm_dp_link_rate_to_bw_code and ...bw_code_to_link_rate simply divide by
+and multiply with 27000, respectively. Avoid an overflow in the u8 dpcd[0]
+and the multiply+divide alltogether.
+
+Signed-off-by: Torsten Duwe <duwe@lst.de>
+Fixes: ff1e8fb68ea0 ("drm/bridge: analogix-anx78xx: Avoid drm_dp_link helpers")
+Cc: Thierry Reding <treding@nvidia.com>
+Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
+Cc: Andrzej Hajda <a.hajda@samsung.com>
+Cc: Neil Armstrong <narmstrong@baylibre.com>
+Cc: Laurent Pinchart <Laurent.pinchart@ideasonboard.com>
+Cc: Jonas Karlman <jonas@kwiboo.se>
+Cc: Jernej Skrabec <jernej.skrabec@siol.net>
+Cc: <stable@vger.kernel.org> # v5.5+
+Reviewed-by: Thierry Reding <treding@nvidia.com>
+Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
+Link: https://patchwork.freedesktop.org/patch/msgid/20200218155744.9675368BE1@verein.lst.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+--- a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
++++ b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
+@@ -722,10 +722,9 @@ static int anx78xx_dp_link_training(stru
+ if (err)
+ return err;
+
+- dpcd[0] = drm_dp_max_link_rate(anx78xx->dpcd);
+- dpcd[0] = drm_dp_link_rate_to_bw_code(dpcd[0]);
+ err = regmap_write(anx78xx->map[I2C_IDX_TX_P0],
+- SP_DP_MAIN_LINK_BW_SET_REG, dpcd[0]);
++ SP_DP_MAIN_LINK_BW_SET_REG,
++ anx78xx->dpcd[DP_MAX_LINK_RATE]);
+ if (err)
+ return err;
+
--- /dev/null
+From ed1dd899baa32d47d9a93d98336472da50564346 Mon Sep 17 00:00:00 2001
+From: Christian Gmeiner <christian.gmeiner@gmail.com>
+Date: Fri, 28 Feb 2020 11:37:49 +0100
+Subject: drm/etnaviv: rework perfmon query infrastructure
+
+From: Christian Gmeiner <christian.gmeiner@gmail.com>
+
+commit ed1dd899baa32d47d9a93d98336472da50564346 upstream.
+
+Report the correct perfmon domains and signals depending
+on the supported feature flags.
+
+Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
+Fixes: 9e2c2e273012 ("drm/etnaviv: add infrastructure to query perf counter")
+Cc: stable@vger.kernel.org
+Signed-off-by: Christian Gmeiner <christian.gmeiner@gmail.com>
+Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/etnaviv/etnaviv_perfmon.c | 59 ++++++++++++++++++++++++++----
+ 1 file changed, 52 insertions(+), 7 deletions(-)
+
+--- a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
++++ b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
+@@ -32,6 +32,7 @@ struct etnaviv_pm_domain {
+ };
+
+ struct etnaviv_pm_domain_meta {
++ unsigned int feature;
+ const struct etnaviv_pm_domain *domains;
+ u32 nr_domains;
+ };
+@@ -410,36 +411,78 @@ static const struct etnaviv_pm_domain do
+
+ static const struct etnaviv_pm_domain_meta doms_meta[] = {
+ {
++ .feature = chipFeatures_PIPE_3D,
+ .nr_domains = ARRAY_SIZE(doms_3d),
+ .domains = &doms_3d[0]
+ },
+ {
++ .feature = chipFeatures_PIPE_2D,
+ .nr_domains = ARRAY_SIZE(doms_2d),
+ .domains = &doms_2d[0]
+ },
+ {
++ .feature = chipFeatures_PIPE_VG,
+ .nr_domains = ARRAY_SIZE(doms_vg),
+ .domains = &doms_vg[0]
+ }
+ };
+
++static unsigned int num_pm_domains(const struct etnaviv_gpu *gpu)
++{
++ unsigned int num = 0, i;
++
++ for (i = 0; i < ARRAY_SIZE(doms_meta); i++) {
++ const struct etnaviv_pm_domain_meta *meta = &doms_meta[i];
++
++ if (gpu->identity.features & meta->feature)
++ num += meta->nr_domains;
++ }
++
++ return num;
++}
++
++static const struct etnaviv_pm_domain *pm_domain(const struct etnaviv_gpu *gpu,
++ unsigned int index)
++{
++ const struct etnaviv_pm_domain *domain = NULL;
++ unsigned int offset = 0, i;
++
++ for (i = 0; i < ARRAY_SIZE(doms_meta); i++) {
++ const struct etnaviv_pm_domain_meta *meta = &doms_meta[i];
++
++ if (!(gpu->identity.features & meta->feature))
++ continue;
++
++ if (meta->nr_domains < (index - offset)) {
++ offset += meta->nr_domains;
++ continue;
++ }
++
++ domain = meta->domains + (index - offset);
++ }
++
++ return domain;
++}
++
+ int etnaviv_pm_query_dom(struct etnaviv_gpu *gpu,
+ struct drm_etnaviv_pm_domain *domain)
+ {
+- const struct etnaviv_pm_domain_meta *meta = &doms_meta[domain->pipe];
++ const unsigned int nr_domains = num_pm_domains(gpu);
+ const struct etnaviv_pm_domain *dom;
+
+- if (domain->iter >= meta->nr_domains)
++ if (domain->iter >= nr_domains)
+ return -EINVAL;
+
+- dom = meta->domains + domain->iter;
++ dom = pm_domain(gpu, domain->iter);
++ if (!dom)
++ return -EINVAL;
+
+ domain->id = domain->iter;
+ domain->nr_signals = dom->nr_signals;
+ strncpy(domain->name, dom->name, sizeof(domain->name));
+
+ domain->iter++;
+- if (domain->iter == meta->nr_domains)
++ if (domain->iter == nr_domains)
+ domain->iter = 0xff;
+
+ return 0;
+@@ -448,14 +491,16 @@ int etnaviv_pm_query_dom(struct etnaviv_
+ int etnaviv_pm_query_sig(struct etnaviv_gpu *gpu,
+ struct drm_etnaviv_pm_signal *signal)
+ {
+- const struct etnaviv_pm_domain_meta *meta = &doms_meta[signal->pipe];
++ const unsigned int nr_domains = num_pm_domains(gpu);
+ const struct etnaviv_pm_domain *dom;
+ const struct etnaviv_pm_signal *sig;
+
+- if (signal->domain >= meta->nr_domains)
++ if (signal->domain >= nr_domains)
+ return -EINVAL;
+
+- dom = meta->domains + signal->domain;
++ dom = pm_domain(gpu, signal->domain);
++ if (!dom)
++ return -EINVAL;
+
+ if (signal->iter >= dom->nr_signals)
+ return -EINVAL;
--- /dev/null
+From 1aaea8476d9f014667d2cb24819f9bcaf3ebb7a4 Mon Sep 17 00:00:00 2001
+From: Chris Wilson <chris@chris-wilson.co.uk>
+Date: Mon, 6 Apr 2020 12:48:21 +0100
+Subject: drm/i915/gem: Flush all the reloc_gpu batch
+
+From: Chris Wilson <chris@chris-wilson.co.uk>
+
+commit 1aaea8476d9f014667d2cb24819f9bcaf3ebb7a4 upstream.
+
+__i915_gem_object_flush_map() takes a byte range, so feed it the written
+bytes and do not mistake the u32 index as bytes!
+
+Fixes: a679f58d0510 ("drm/i915: Flush pages on acquisition")
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+Cc: Matthew Auld <matthew.william.auld@gmail.com>
+Cc: <stable@vger.kernel.org> # v5.2+
+Reviewed-by: Matthew Auld <matthew.william.auld@gmail.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20200406114821.10949-1-chris@chris-wilson.co.uk
+(cherry picked from commit 30c88a47f1abd5744908d3681f54dcf823fe2a12)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | 8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+@@ -914,11 +914,13 @@ static inline struct i915_ggtt *cache_to
+
+ static void reloc_gpu_flush(struct reloc_cache *cache)
+ {
+- GEM_BUG_ON(cache->rq_size >= cache->rq->batch->obj->base.size / sizeof(u32));
++ struct drm_i915_gem_object *obj = cache->rq->batch->obj;
++
++ GEM_BUG_ON(cache->rq_size >= obj->base.size / sizeof(u32));
+ cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END;
+
+- __i915_gem_object_flush_map(cache->rq->batch->obj, 0, cache->rq_size);
+- i915_gem_object_unpin_map(cache->rq->batch->obj);
++ __i915_gem_object_flush_map(obj, 0, sizeof(u32) * (cache->rq_size + 1));
++ i915_gem_object_unpin_map(obj);
+
+ intel_gt_chipset_flush(cache->rq->engine->gt);
+
--- /dev/null
+From 98479ada421a8fd2123b98efd398a6f1379307ab Mon Sep 17 00:00:00 2001
+From: Chris Wilson <chris@chris-wilson.co.uk>
+Date: Sun, 22 Mar 2020 16:32:24 +0000
+Subject: drm/i915/gt: Treat idling as a RPS downclock event
+
+From: Chris Wilson <chris@chris-wilson.co.uk>
+
+commit 98479ada421a8fd2123b98efd398a6f1379307ab upstream.
+
+If we park/unpark faster than we can respond to RPS events, we never
+will process a downclock event after expiring a waitboost, and thus we
+will forever restart the GPU at max clocks even if the workload switches
+and doesn't justify full power.
+
+Closes: https://gitlab.freedesktop.org/drm/intel/issues/1500
+Fixes: 3e7abf814193 ("drm/i915: Extract GT render power state management")
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+Cc: Andi Shyti <andi.shyti@intel.com>
+Cc: Lyude Paul <lyude@redhat.com>
+Reviewed-by: Andi Shyti <andi.shyti@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20200322163225.28791-1-chris@chris-wilson.co.uk
+Cc: <stable@vger.kernel.org> # v5.5+
+(cherry picked from commit 21abf0bf168dffff1192e0f072af1dc74ae1ff0e)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/gt/intel_rps.c | 13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+--- a/drivers/gpu/drm/i915/gt/intel_rps.c
++++ b/drivers/gpu/drm/i915/gt/intel_rps.c
+@@ -763,6 +763,19 @@ void intel_rps_park(struct intel_rps *rp
+ intel_uncore_forcewake_get(rps_to_uncore(rps), FORCEWAKE_MEDIA);
+ rps_set(rps, rps->idle_freq, false);
+ intel_uncore_forcewake_put(rps_to_uncore(rps), FORCEWAKE_MEDIA);
++
++ /*
++ * Since we will try and restart from the previously requested
++ * frequency on unparking, treat this idle point as a downclock
++ * interrupt and reduce the frequency for resume. If we park/unpark
++ * more frequently than the rps worker can run, we will not respond
++ * to any EI and never see a change in frequency.
++ *
++ * (Note we accommodate Cherryview's limitation of only using an
++ * even bin by applying it to all.)
++ */
++ rps->cur_freq =
++ max_t(int, round_down(rps->cur_freq - 1, 2), rps->min_freq);
+ }
+
+ void intel_rps_boost(struct i915_request *rq)
--- /dev/null
+From 6e8a36c13382b7165d23928caee8d91c1b301142 Mon Sep 17 00:00:00 2001
+From: Imre Deak <imre.deak@intel.com>
+Date: Mon, 30 Mar 2020 18:22:44 +0300
+Subject: drm/i915/icl+: Don't enable DDI IO power on a TypeC port in TBT mode
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Imre Deak <imre.deak@intel.com>
+
+commit 6e8a36c13382b7165d23928caee8d91c1b301142 upstream.
+
+The DDI IO power well must not be enabled for a TypeC port in TBT mode,
+ensure this during driver loading/system resume.
+
+This gets rid of error messages like
+[drm] *ERROR* power well DDI E TC2 IO state mismatch (refcount 1/enabled 0)
+
+and avoids leaking the power ref when disabling the output.
+
+Cc: <stable@vger.kernel.org> # v5.4+
+Signed-off-by: Imre Deak <imre.deak@intel.com>
+Reviewed-by: José Roberto de Souza <jose.souza@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20200330152244.11316-1-imre.deak@intel.com
+(cherry picked from commit f77a2db27f26c3ccba0681f7e89fef083718f07f)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/display/intel_ddi.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/i915/display/intel_ddi.c
++++ b/drivers/gpu/drm/i915/display/intel_ddi.c
+@@ -2225,7 +2225,11 @@ static void intel_ddi_get_power_domains(
+ return;
+
+ dig_port = enc_to_dig_port(encoder);
+- intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
++
++ if (!intel_phy_is_tc(dev_priv, phy) ||
++ dig_port->tc_mode != TC_PORT_TBT_ALT)
++ intel_display_power_get(dev_priv,
++ dig_port->ddi_io_power_domain);
+
+ /*
+ * AUX power is only needed for (e)DP mode, and for HDMI mode on TC
--- /dev/null
+From c0f83d164fb8f3a2b7bc379a6c1e27d1123a9eab Mon Sep 17 00:00:00 2001
+From: Marek Szyprowski <m.szyprowski@samsung.com>
+Date: Fri, 27 Mar 2020 17:21:26 +0100
+Subject: drm/prime: fix extracting of the DMA addresses from a scatterlist
+
+From: Marek Szyprowski <m.szyprowski@samsung.com>
+
+commit c0f83d164fb8f3a2b7bc379a6c1e27d1123a9eab upstream.
+
+Scatterlist elements contains both pages and DMA addresses, but one
+should not assume 1:1 relation between them. The sg->length is the size
+of the physical memory chunk described by the sg->page, while
+sg_dma_len(sg) is the size of the DMA (IO virtual) chunk described by
+the sg_dma_address(sg).
+
+The proper way of extracting both: pages and DMA addresses of the whole
+buffer described by a scatterlist it to iterate independently over the
+sg->pages/sg->length and sg_dma_address(sg)/sg_dma_len(sg) entries.
+
+Fixes: 42e67b479eab ("drm/prime: use dma length macro when mapping sg")
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20200327162126.29705-1-m.szyprowski@samsung.com
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/drm_prime.c | 37 +++++++++++++++++++++++++------------
+ 1 file changed, 25 insertions(+), 12 deletions(-)
+
+--- a/drivers/gpu/drm/drm_prime.c
++++ b/drivers/gpu/drm/drm_prime.c
+@@ -962,27 +962,40 @@ int drm_prime_sg_to_page_addr_arrays(str
+ unsigned count;
+ struct scatterlist *sg;
+ struct page *page;
+- u32 len, index;
++ u32 page_len, page_index;
+ dma_addr_t addr;
++ u32 dma_len, dma_index;
+
+- index = 0;
++ /*
++ * Scatterlist elements contains both pages and DMA addresses, but
++ * one shoud not assume 1:1 relation between them. The sg->length is
++ * the size of the physical memory chunk described by the sg->page,
++ * while sg_dma_len(sg) is the size of the DMA (IO virtual) chunk
++ * described by the sg_dma_address(sg).
++ */
++ page_index = 0;
++ dma_index = 0;
+ for_each_sg(sgt->sgl, sg, sgt->nents, count) {
+- len = sg_dma_len(sg);
++ page_len = sg->length;
+ page = sg_page(sg);
++ dma_len = sg_dma_len(sg);
+ addr = sg_dma_address(sg);
+
+- while (len > 0) {
+- if (WARN_ON(index >= max_entries))
++ while (pages && page_len > 0) {
++ if (WARN_ON(page_index >= max_entries))
+ return -1;
+- if (pages)
+- pages[index] = page;
+- if (addrs)
+- addrs[index] = addr;
+-
++ pages[page_index] = page;
+ page++;
++ page_len -= PAGE_SIZE;
++ page_index++;
++ }
++ while (addrs && dma_len > 0) {
++ if (WARN_ON(dma_index >= max_entries))
++ return -1;
++ addrs[dma_index] = addr;
+ addr += PAGE_SIZE;
+- len -= PAGE_SIZE;
+- index++;
++ dma_len -= PAGE_SIZE;
++ dma_index++;
+ }
+ }
+ return 0;
--- /dev/null
+From ea36ec8623f56791c6ff6738d0509b7920f85220 Mon Sep 17 00:00:00 2001
+From: Chris Wilson <chris@chris-wilson.co.uk>
+Date: Sun, 2 Feb 2020 17:16:31 +0000
+Subject: drm: Remove PageReserved manipulation from drm_pci_alloc
+
+From: Chris Wilson <chris@chris-wilson.co.uk>
+
+commit ea36ec8623f56791c6ff6738d0509b7920f85220 upstream.
+
+drm_pci_alloc/drm_pci_free are very thin wrappers around the core dma
+facilities, and we have no special reason within the drm layer to behave
+differently. In particular, since
+
+commit de09d31dd38a50fdce106c15abd68432eebbd014
+Author: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Date: Fri Jan 15 16:51:42 2016 -0800
+
+ page-flags: define PG_reserved behavior on compound pages
+
+ As far as I can see there's no users of PG_reserved on compound pages.
+ Let's use PF_NO_COMPOUND here.
+
+it has been illegal to combine GFP_COMP with SetPageReserved, so lets
+stop doing both and leave the dma layer to its own devices.
+
+Reported-by: Taketo Kabe
+Bug: https://gitlab.freedesktop.org/drm/intel/issues/1027
+Fixes: de09d31dd38a ("page-flags: define PG_reserved behavior on compound pages")
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+Cc: <stable@vger.kernel.org> # v4.5+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20200202171635.4039044-1-chris@chris-wilson.co.uk
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/drm_pci.c | 23 ++---------------------
+ 1 file changed, 2 insertions(+), 21 deletions(-)
+
+--- a/drivers/gpu/drm/drm_pci.c
++++ b/drivers/gpu/drm/drm_pci.c
+@@ -51,8 +51,6 @@
+ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align)
+ {
+ drm_dma_handle_t *dmah;
+- unsigned long addr;
+- size_t sz;
+
+ /* pci_alloc_consistent only guarantees alignment to the smallest
+ * PAGE_SIZE order which is greater than or equal to the requested size.
+@@ -68,20 +66,13 @@ drm_dma_handle_t *drm_pci_alloc(struct d
+ dmah->size = size;
+ dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size,
+ &dmah->busaddr,
+- GFP_KERNEL | __GFP_COMP);
++ GFP_KERNEL);
+
+ if (dmah->vaddr == NULL) {
+ kfree(dmah);
+ return NULL;
+ }
+
+- /* XXX - Is virt_to_page() legal for consistent mem? */
+- /* Reserve */
+- for (addr = (unsigned long)dmah->vaddr, sz = size;
+- sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
+- SetPageReserved(virt_to_page((void *)addr));
+- }
+-
+ return dmah;
+ }
+
+@@ -94,19 +85,9 @@ EXPORT_SYMBOL(drm_pci_alloc);
+ */
+ void __drm_legacy_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
+ {
+- unsigned long addr;
+- size_t sz;
+-
+- if (dmah->vaddr) {
+- /* XXX - Is virt_to_page() legal for consistent mem? */
+- /* Unreserve */
+- for (addr = (unsigned long)dmah->vaddr, sz = dmah->size;
+- sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
+- ClearPageReserved(virt_to_page((void *)addr));
+- }
++ if (dmah->vaddr)
+ dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr,
+ dmah->busaddr);
+- }
+ }
+
+ /**
--- /dev/null
+From a65a97b48694d34248195eb89bf3687403261056 Mon Sep 17 00:00:00 2001
+From: Hans de Goede <hdegoede@redhat.com>
+Date: Wed, 25 Mar 2020 15:43:10 +0100
+Subject: drm/vboxvideo: Add missing remove_conflicting_pci_framebuffers call, v2
+
+From: Hans de Goede <hdegoede@redhat.com>
+
+commit a65a97b48694d34248195eb89bf3687403261056 upstream.
+
+The vboxvideo driver is missing a call to remove conflicting framebuffers.
+
+Surprisingly, when using legacy BIOS booting this does not really cause
+any issues. But when using UEFI to boot the VM then plymouth will draw
+on both the efifb /dev/fb0 and /dev/drm/card0 (which has registered
+/dev/fb1 as fbdev emulation).
+
+VirtualBox will actual display the output of both devices (I guess it is
+showing whatever was drawn last), this causes weird artifacts because of
+pitch issues in the efifb when the VM window is not sized at 1024x768
+(the window will resize to its last size once the vboxvideo driver loads,
+changing the pitch).
+
+Adding the missing drm_fb_helper_remove_conflicting_pci_framebuffers()
+call fixes this.
+
+Changes in v2:
+-Make the drm_fb_helper_remove_conflicting_pci_framebuffers() call one of
+ the first things we do in our probe() method
+
+Cc: stable@vger.kernel.org
+Fixes: 2695eae1f6d3 ("drm/vboxvideo: Switch to generic fbdev emulation")
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Link: https://patchwork.freedesktop.org/patch/msgid/20200325144310.36779-1-hdegoede@redhat.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/vboxvideo/vbox_drv.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/gpu/drm/vboxvideo/vbox_drv.c
++++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c
+@@ -41,6 +41,10 @@ static int vbox_pci_probe(struct pci_dev
+ if (!vbox_check_supported(VBE_DISPI_ID_HGSMI))
+ return -ENODEV;
+
++ ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "vboxvideodrmfb");
++ if (ret)
++ return ret;
++
+ vbox = kzalloc(sizeof(*vbox), GFP_KERNEL);
+ if (!vbox)
+ return -ENOMEM;
--- /dev/null
+From 28936b62e71e41600bab319f262ea9f9b1027629 Mon Sep 17 00:00:00 2001
+From: Qian Cai <cai@lca.pw>
+Date: Fri, 21 Feb 2020 23:32:58 -0500
+Subject: ext4: fix a data race at inode->i_blocks
+
+From: Qian Cai <cai@lca.pw>
+
+commit 28936b62e71e41600bab319f262ea9f9b1027629 upstream.
+
+inode->i_blocks could be accessed concurrently as noticed by KCSAN,
+
+ BUG: KCSAN: data-race in ext4_do_update_inode [ext4] / inode_add_bytes
+
+ write to 0xffff9a00d4b982d0 of 8 bytes by task 22100 on cpu 118:
+ inode_add_bytes+0x65/0xf0
+ __inode_add_bytes at fs/stat.c:689
+ (inlined by) inode_add_bytes at fs/stat.c:702
+ ext4_mb_new_blocks+0x418/0xca0 [ext4]
+ ext4_ext_map_blocks+0x1a6b/0x27b0 [ext4]
+ ext4_map_blocks+0x1a9/0x950 [ext4]
+ _ext4_get_block+0xfc/0x270 [ext4]
+ ext4_get_block_unwritten+0x33/0x50 [ext4]
+ __block_write_begin_int+0x22e/0xae0
+ __block_write_begin+0x39/0x50
+ ext4_write_begin+0x388/0xb50 [ext4]
+ ext4_da_write_begin+0x35f/0x8f0 [ext4]
+ generic_perform_write+0x15d/0x290
+ ext4_buffered_write_iter+0x11f/0x210 [ext4]
+ ext4_file_write_iter+0xce/0x9e0 [ext4]
+ new_sync_write+0x29c/0x3b0
+ __vfs_write+0x92/0xa0
+ vfs_write+0x103/0x260
+ ksys_write+0x9d/0x130
+ __x64_sys_write+0x4c/0x60
+ do_syscall_64+0x91/0xb05
+ entry_SYSCALL_64_after_hwframe+0x49/0xbe
+
+ read to 0xffff9a00d4b982d0 of 8 bytes by task 8 on cpu 65:
+ ext4_do_update_inode+0x4a0/0xf60 [ext4]
+ ext4_inode_blocks_set at fs/ext4/inode.c:4815
+ ext4_mark_iloc_dirty+0xaf/0x160 [ext4]
+ ext4_mark_inode_dirty+0x129/0x3e0 [ext4]
+ ext4_convert_unwritten_extents+0x253/0x2d0 [ext4]
+ ext4_convert_unwritten_io_end_vec+0xc5/0x150 [ext4]
+ ext4_end_io_rsv_work+0x22c/0x350 [ext4]
+ process_one_work+0x54f/0xb90
+ worker_thread+0x80/0x5f0
+ kthread+0x1cd/0x1f0
+ ret_from_fork+0x27/0x50
+
+ 4 locks held by kworker/u256:0/8:
+ #0: ffff9a025abc4328 ((wq_completion)ext4-rsv-conversion){+.+.}, at: process_one_work+0x443/0xb90
+ #1: ffffab5a862dbe20 ((work_completion)(&ei->i_rsv_conversion_work)){+.+.}, at: process_one_work+0x443/0xb90
+ #2: ffff9a025a9d0f58 (jbd2_handle){++++}, at: start_this_handle+0x1c1/0x9d0 [jbd2]
+ #3: ffff9a00d4b985d8 (&(&ei->i_raw_lock)->rlock){+.+.}, at: ext4_do_update_inode+0xaa/0xf60 [ext4]
+ irq event stamp: 3009267
+ hardirqs last enabled at (3009267): [<ffffffff980da9b7>] __find_get_block+0x107/0x790
+ hardirqs last disabled at (3009266): [<ffffffff980da8f9>] __find_get_block+0x49/0x790
+ softirqs last enabled at (3009230): [<ffffffff98a0034c>] __do_softirq+0x34c/0x57c
+ softirqs last disabled at (3009223): [<ffffffff97cc67a2>] irq_exit+0xa2/0xc0
+
+ Reported by Kernel Concurrency Sanitizer on:
+ CPU: 65 PID: 8 Comm: kworker/u256:0 Tainted: G L 5.6.0-rc2-next-20200221+ #7
+ Hardware name: HPE ProLiant DL385 Gen10/ProLiant DL385 Gen10, BIOS A40 07/10/2019
+ Workqueue: ext4-rsv-conversion ext4_end_io_rsv_work [ext4]
+
+The plain read is outside of inode->i_lock critical section which
+results in a data race. Fix it by adding READ_ONCE() there.
+
+Link: https://lore.kernel.org/r/20200222043258.2279-1-cai@lca.pw
+Signed-off-by: Qian Cai <cai@lca.pw>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Cc: stable@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/inode.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -4812,7 +4812,7 @@ static int ext4_inode_blocks_set(handle_
+ struct ext4_inode_info *ei)
+ {
+ struct inode *inode = &(ei->vfs_inode);
+- u64 i_blocks = inode->i_blocks;
++ u64 i_blocks = READ_ONCE(inode->i_blocks);
+ struct super_block *sb = inode->i_sb;
+
+ if (i_blocks <= ~0U) {
--- /dev/null
+From 26c5d78c976ca298e59a56f6101a97b618ba3539 Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Fri, 10 Apr 2020 14:33:47 -0700
+Subject: fs/filesystems.c: downgrade user-reachable WARN_ONCE() to pr_warn_once()
+
+From: Eric Biggers <ebiggers@google.com>
+
+commit 26c5d78c976ca298e59a56f6101a97b618ba3539 upstream.
+
+After request_module(), nothing is stopping the module from being
+unloaded until someone takes a reference to it via try_get_module().
+
+The WARN_ONCE() in get_fs_type() is thus user-reachable, via userspace
+running 'rmmod' concurrently.
+
+Since WARN_ONCE() is for kernel bugs only, not for user-reachable
+situations, downgrade this warning to pr_warn_once().
+
+Keep it printed once only, since the intent of this warning is to detect
+a bug in modprobe at boot time. Printing the warning more than once
+wouldn't really provide any useful extra information.
+
+Fixes: 41124db869b7 ("fs: warn in case userspace lied about modprobe return")
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Reviewed-by: Jessica Yu <jeyu@kernel.org>
+Cc: Alexei Starovoitov <ast@kernel.org>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Jeff Vander Stoep <jeffv@google.com>
+Cc: Jessica Yu <jeyu@kernel.org>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: Luis Chamberlain <mcgrof@kernel.org>
+Cc: NeilBrown <neilb@suse.com>
+Cc: <stable@vger.kernel.org> [4.13+]
+Link: http://lkml.kernel.org/r/20200312202552.241885-3-ebiggers@kernel.org
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/filesystems.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/fs/filesystems.c
++++ b/fs/filesystems.c
+@@ -272,7 +272,9 @@ struct file_system_type *get_fs_type(con
+ fs = __get_fs_type(name, len);
+ if (!fs && (request_module("fs-%.*s", len, name) == 0)) {
+ fs = __get_fs_type(name, len);
+- WARN_ONCE(!fs, "request_module fs-%.*s succeeded, but still no fs?\n", len, name);
++ if (!fs)
++ pr_warn_once("request_module fs-%.*s succeeded, but still no fs?\n",
++ len, name);
+ }
+
+ if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
--- /dev/null
+From 6a13a0d7b4d1171ef9b80ad69abc37e1daa941b3 Mon Sep 17 00:00:00 2001
+From: Masami Hiramatsu <mhiramat@kernel.org>
+Date: Tue, 24 Mar 2020 16:34:48 +0900
+Subject: ftrace/kprobe: Show the maxactive number on kprobe_events
+
+From: Masami Hiramatsu <mhiramat@kernel.org>
+
+commit 6a13a0d7b4d1171ef9b80ad69abc37e1daa941b3 upstream.
+
+Show maxactive parameter on kprobe_events.
+This allows user to save the current configuration and
+restore it without losing maxactive parameter.
+
+Link: http://lkml.kernel.org/r/4762764a-6df7-bc93-ed60-e336146dce1f@gmail.com
+Link: http://lkml.kernel.org/r/158503528846.22706.5549974121212526020.stgit@devnote2
+
+Cc: stable@vger.kernel.org
+Fixes: 696ced4fb1d76 ("tracing/kprobes: expose maxactive for kretprobe in kprobe_events")
+Reported-by: Taeung Song <treeze.taeung@gmail.com>
+Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/trace_kprobe.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/kernel/trace/trace_kprobe.c
++++ b/kernel/trace/trace_kprobe.c
+@@ -1078,6 +1078,8 @@ static int trace_kprobe_show(struct seq_
+ int i;
+
+ seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p');
++ if (trace_kprobe_is_return(tk) && tk->rp.maxactive)
++ seq_printf(m, "%d", tk->rp.maxactive);
+ seq_printf(m, ":%s/%s", trace_probe_group_name(&tk->tp),
+ trace_probe_name(&tk->tp));
+
--- /dev/null
+From 25efb2ffdf991177e740b2f63e92b4ec7d310a92 Mon Sep 17 00:00:00 2001
+From: Simon Gander <simon@tuxera.com>
+Date: Fri, 10 Apr 2020 14:32:16 -0700
+Subject: hfsplus: fix crash and filesystem corruption when deleting files
+
+From: Simon Gander <simon@tuxera.com>
+
+commit 25efb2ffdf991177e740b2f63e92b4ec7d310a92 upstream.
+
+When removing files containing extended attributes, the hfsplus driver may
+remove the wrong entries from the attributes b-tree, causing major
+filesystem damage and in some cases even kernel crashes.
+
+To remove a file, all its extended attributes have to be removed as well.
+The driver does this by looking up all keys in the attributes b-tree with
+the cnid of the file. Each of these entries then gets deleted using the
+key used for searching, which doesn't contain the attribute's name when it
+should. Since the key doesn't contain the name, the deletion routine will
+not find the correct entry and instead remove the one in front of it. If
+parent nodes have to be modified, these become corrupt as well. This
+causes invalid links and unsorted entries that not even macOS's fsck_hfs
+is able to fix.
+
+To fix this, modify the search key before an entry is deleted from the
+attributes b-tree by copying the found entry's key into the search key,
+therefore ensuring that the correct entry gets removed from the tree.
+
+Signed-off-by: Simon Gander <simon@tuxera.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Reviewed-by: Anton Altaparmakov <anton@tuxera.com>
+Cc: <stable@vger.kernel.org>
+Link: http://lkml.kernel.org/r/20200327155541.1521-1-simon@tuxera.com
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/hfsplus/attributes.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/fs/hfsplus/attributes.c
++++ b/fs/hfsplus/attributes.c
+@@ -292,6 +292,10 @@ static int __hfsplus_delete_attr(struct
+ return -ENOENT;
+ }
+
++ /* Avoid btree corruption */
++ hfs_bnode_read(fd->bnode, fd->search_key,
++ fd->keyoffset, fd->keylength);
++
+ err = hfs_brec_remove(fd);
+ if (err)
+ return err;
--- /dev/null
+From ebc68cedec4aead47d8d11623d013cca9bf8e825 Mon Sep 17 00:00:00 2001
+From: Hans de Goede <hdegoede@redhat.com>
+Date: Wed, 1 Apr 2020 13:23:06 -0700
+Subject: Input: i8042 - add Acer Aspire 5738z to nomux list
+
+From: Hans de Goede <hdegoede@redhat.com>
+
+commit ebc68cedec4aead47d8d11623d013cca9bf8e825 upstream.
+
+The Acer Aspire 5738z has a button to disable (and re-enable) the
+touchpad next to the touchpad.
+
+When this button is pressed a LED underneath indicates that the touchpad
+is disabled (and an event is send to userspace and GNOME shows its
+touchpad enabled / disable OSD thingie).
+
+So far so good, but after re-enabling the touchpad it no longer works.
+
+The laptop does not have an external ps2 port, so mux mode is not needed
+and disabling mux mode fixes the touchpad no longer working after toggling
+it off and back on again, so lets add this laptop model to the nomux list.
+
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Link: https://lore.kernel.org/r/20200331123947.318908-1-hdegoede@redhat.com
+Cc: stable@vger.kernel.org
+Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/input/serio/i8042-x86ia64io.h | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+--- a/drivers/input/serio/i8042-x86ia64io.h
++++ b/drivers/input/serio/i8042-x86ia64io.h
+@@ -530,6 +530,17 @@ static const struct dmi_system_id __init
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo LaVie Z"),
+ },
+ },
++ {
++ /*
++ * Acer Aspire 5738z
++ * Touchpad stops working in mux mode when dis- + re-enabled
++ * with the touchpad enable/disable toggle hotkey
++ */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5738"),
++ },
++ },
+ { }
+ };
+
--- /dev/null
+From 32830a0534700f86366f371b150b17f0f0d140d7 Mon Sep 17 00:00:00 2001
+From: Wen Yang <wenyang@linux.alibaba.com>
+Date: Fri, 3 Apr 2020 17:04:08 +0800
+Subject: ipmi: fix hung processes in __get_guid()
+
+From: Wen Yang <wenyang@linux.alibaba.com>
+
+commit 32830a0534700f86366f371b150b17f0f0d140d7 upstream.
+
+The wait_event() function is used to detect command completion.
+When send_guid_cmd() returns an error, smi_send() has not been
+called to send data. Therefore, wait_event() should not be used
+on the error path, otherwise it will cause the following warning:
+
+[ 1361.588808] systemd-udevd D 0 1501 1436 0x00000004
+[ 1361.588813] ffff883f4b1298c0 0000000000000000 ffff883f4b188000 ffff887f7e3d9f40
+[ 1361.677952] ffff887f64bd4280 ffffc90037297a68 ffffffff8173ca3b ffffc90000000010
+[ 1361.767077] 00ffc90037297ad0 ffff887f7e3d9f40 0000000000000286 ffff883f4b188000
+[ 1361.856199] Call Trace:
+[ 1361.885578] [<ffffffff8173ca3b>] ? __schedule+0x23b/0x780
+[ 1361.951406] [<ffffffff8173cfb6>] schedule+0x36/0x80
+[ 1362.010979] [<ffffffffa071f178>] get_guid+0x118/0x150 [ipmi_msghandler]
+[ 1362.091281] [<ffffffff810d5350>] ? prepare_to_wait_event+0x100/0x100
+[ 1362.168533] [<ffffffffa071f755>] ipmi_register_smi+0x405/0x940 [ipmi_msghandler]
+[ 1362.258337] [<ffffffffa0230ae9>] try_smi_init+0x529/0x950 [ipmi_si]
+[ 1362.334521] [<ffffffffa022f350>] ? std_irq_setup+0xd0/0xd0 [ipmi_si]
+[ 1362.411701] [<ffffffffa0232bd2>] init_ipmi_si+0x492/0x9e0 [ipmi_si]
+[ 1362.487917] [<ffffffffa0232740>] ? ipmi_pci_probe+0x280/0x280 [ipmi_si]
+[ 1362.568219] [<ffffffff810021a0>] do_one_initcall+0x50/0x180
+[ 1362.636109] [<ffffffff812231b2>] ? kmem_cache_alloc_trace+0x142/0x190
+[ 1362.714330] [<ffffffff811b2ae1>] do_init_module+0x5f/0x200
+[ 1362.781208] [<ffffffff81123ca8>] load_module+0x1898/0x1de0
+[ 1362.848069] [<ffffffff811202e0>] ? __symbol_put+0x60/0x60
+[ 1362.913886] [<ffffffff8130696b>] ? security_kernel_post_read_file+0x6b/0x80
+[ 1362.998514] [<ffffffff81124465>] SYSC_finit_module+0xe5/0x120
+[ 1363.068463] [<ffffffff81124465>] ? SYSC_finit_module+0xe5/0x120
+[ 1363.140513] [<ffffffff811244be>] SyS_finit_module+0xe/0x10
+[ 1363.207364] [<ffffffff81003c04>] do_syscall_64+0x74/0x180
+
+Fixes: 50c812b2b951 ("[PATCH] ipmi: add full sysfs support")
+Signed-off-by: Wen Yang <wenyang@linux.alibaba.com>
+Cc: Corey Minyard <minyard@acm.org>
+Cc: Arnd Bergmann <arnd@arndb.de>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: openipmi-developer@lists.sourceforge.net
+Cc: linux-kernel@vger.kernel.org
+Cc: stable@vger.kernel.org # 2.6.17-
+Message-Id: <20200403090408.58745-1-wenyang@linux.alibaba.com>
+Signed-off-by: Corey Minyard <cminyard@mvista.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/char/ipmi/ipmi_msghandler.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/char/ipmi/ipmi_msghandler.c
++++ b/drivers/char/ipmi/ipmi_msghandler.c
+@@ -3188,8 +3188,8 @@ static void __get_guid(struct ipmi_smi *
+ if (rv)
+ /* Send failed, no GUID available. */
+ bmc->dyn_guid_set = 0;
+-
+- wait_event(intf->waitq, bmc->dyn_guid_set != 2);
++ else
++ wait_event(intf->waitq, bmc->dyn_guid_set != 2);
+
+ /* dyn_guid_set makes the guid data available. */
+ smp_rmb();
--- /dev/null
+From d7d27cfc5cf0766a26a8f56868c5ad5434735126 Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Fri, 10 Apr 2020 14:33:43 -0700
+Subject: kmod: make request_module() return an error when autoloading is disabled
+
+From: Eric Biggers <ebiggers@google.com>
+
+commit d7d27cfc5cf0766a26a8f56868c5ad5434735126 upstream.
+
+Patch series "module autoloading fixes and cleanups", v5.
+
+This series fixes a bug where request_module() was reporting success to
+kernel code when module autoloading had been completely disabled via
+'echo > /proc/sys/kernel/modprobe'.
+
+It also addresses the issues raised on the original thread
+(https://lkml.kernel.org/lkml/20200310223731.126894-1-ebiggers@kernel.org/T/#u)
+bydocumenting the modprobe sysctl, adding a self-test for the empty path
+case, and downgrading a user-reachable WARN_ONCE().
+
+This patch (of 4):
+
+It's long been possible to disable kernel module autoloading completely
+(while still allowing manual module insertion) by setting
+/proc/sys/kernel/modprobe to the empty string.
+
+This can be preferable to setting it to a nonexistent file since it
+avoids the overhead of an attempted execve(), avoids potential
+deadlocks, and avoids the call to security_kernel_module_request() and
+thus on SELinux-based systems eliminates the need to write SELinux rules
+to dontaudit module_request.
+
+However, when module autoloading is disabled in this way,
+request_module() returns 0. This is broken because callers expect 0 to
+mean that the module was successfully loaded.
+
+Apparently this was never noticed because this method of disabling
+module autoloading isn't used much, and also most callers don't use the
+return value of request_module() since it's always necessary to check
+whether the module registered its functionality or not anyway.
+
+But improperly returning 0 can indeed confuse a few callers, for example
+get_fs_type() in fs/filesystems.c where it causes a WARNING to be hit:
+
+ if (!fs && (request_module("fs-%.*s", len, name) == 0)) {
+ fs = __get_fs_type(name, len);
+ WARN_ONCE(!fs, "request_module fs-%.*s succeeded, but still no fs?\n", len, name);
+ }
+
+This is easily reproduced with:
+
+ echo > /proc/sys/kernel/modprobe
+ mount -t NONEXISTENT none /
+
+It causes:
+
+ request_module fs-NONEXISTENT succeeded, but still no fs?
+ WARNING: CPU: 1 PID: 1106 at fs/filesystems.c:275 get_fs_type+0xd6/0xf0
+ [...]
+
+This should actually use pr_warn_once() rather than WARN_ONCE(), since
+it's also user-reachable if userspace immediately unloads the module.
+Regardless, request_module() should correctly return an error when it
+fails. So let's make it return -ENOENT, which matches the error when
+the modprobe binary doesn't exist.
+
+I've also sent patches to document and test this case.
+
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Reviewed-by: Kees Cook <keescook@chromium.org>
+Reviewed-by: Jessica Yu <jeyu@kernel.org>
+Acked-by: Luis Chamberlain <mcgrof@kernel.org>
+Cc: Alexei Starovoitov <ast@kernel.org>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Jeff Vander Stoep <jeffv@google.com>
+Cc: Ben Hutchings <benh@debian.org>
+Cc: Josh Triplett <josh@joshtriplett.org>
+Cc: <stable@vger.kernel.org>
+Link: http://lkml.kernel.org/r/20200310223731.126894-1-ebiggers@kernel.org
+Link: http://lkml.kernel.org/r/20200312202552.241885-1-ebiggers@kernel.org
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/kmod.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/kernel/kmod.c
++++ b/kernel/kmod.c
+@@ -120,7 +120,7 @@ out:
+ * invoke it.
+ *
+ * If module auto-loading support is disabled then this function
+- * becomes a no-operation.
++ * simply returns -ENOENT.
+ */
+ int __request_module(bool wait, const char *fmt, ...)
+ {
+@@ -137,7 +137,7 @@ int __request_module(bool wait, const ch
+ WARN_ON_ONCE(wait && current_is_async());
+
+ if (!modprobe_path[0])
+- return 0;
++ return -ENOENT;
+
+ va_start(args, fmt);
+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
--- /dev/null
+From 8305f72f952cff21ce8109dc1ea4b321c8efc5af Mon Sep 17 00:00:00 2001
+From: Kai-Heng Feng <kai.heng.feng@canonical.com>
+Date: Wed, 27 Mar 2019 17:02:54 +0800
+Subject: libata: Return correct status in sata_pmp_eh_recover_pm() when ATA_DFLAG_DETACH is set
+
+From: Kai-Heng Feng <kai.heng.feng@canonical.com>
+
+commit 8305f72f952cff21ce8109dc1ea4b321c8efc5af upstream.
+
+During system resume from suspend, this can be observed on ASM1062 PMP
+controller:
+
+ata10.01: SATA link down (SStatus 0 SControl 330)
+ata10.02: hard resetting link
+ata10.02: SATA link down (SStatus 0 SControl 330)
+ata10.00: configured for UDMA/133
+Kernel panic - not syncing: stack-protector: Kernel
+ in: sata_pmp_eh_recover+0xa2b/0xa40
+
+CPU: 2 PID: 230 Comm: scsi_eh_9 Tainted: P OE
+#49-Ubuntu
+Hardware name: System manufacturer System Product
+ 1001 12/10/2017
+Call Trace:
+dump_stack+0x63/0x8b
+panic+0xe4/0x244
+? sata_pmp_eh_recover+0xa2b/0xa40
+__stack_chk_fail+0x19/0x20
+sata_pmp_eh_recover+0xa2b/0xa40
+? ahci_do_softreset+0x260/0x260 [libahci]
+? ahci_do_hardreset+0x140/0x140 [libahci]
+? ata_phys_link_offline+0x60/0x60
+? ahci_stop_engine+0xc0/0xc0 [libahci]
+sata_pmp_error_handler+0x22/0x30
+ahci_error_handler+0x45/0x80 [libahci]
+ata_scsi_port_error_handler+0x29b/0x770
+? ata_scsi_cmd_error_handler+0x101/0x140
+ata_scsi_error+0x95/0xd0
+? scsi_try_target_reset+0x90/0x90
+scsi_error_handler+0xd0/0x5b0
+kthread+0x121/0x140
+? scsi_eh_get_sense+0x200/0x200
+? kthread_create_worker_on_cpu+0x70/0x70
+ret_from_fork+0x22/0x40
+Kernel Offset: 0xcc00000 from 0xffffffff81000000
+(relocation range: 0xffffffff80000000-0xffffffffbfffffff)
+
+Since sata_pmp_eh_recover_pmp() doens't set rc when ATA_DFLAG_DETACH is
+set, sata_pmp_eh_recover() continues to run. During retry it triggers
+the stack protector.
+
+Set correct rc in sata_pmp_eh_recover_pmp() to let sata_pmp_eh_recover()
+jump to pmp_fail directly.
+
+BugLink: https://bugs.launchpad.net/bugs/1821434
+Cc: stable@vger.kernel.org
+Signed-off-by: Kai-Heng Feng <kai.heng.feng@canonical.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/ata/libata-pmp.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/ata/libata-pmp.c
++++ b/drivers/ata/libata-pmp.c
+@@ -763,6 +763,7 @@ static int sata_pmp_eh_recover_pmp(struc
+
+ if (dev->flags & ATA_DFLAG_DETACH) {
+ detach = 1;
++ rc = -ENODEV;
+ goto fail;
+ }
+
--- /dev/null
+From 75da98586af75eb80664714a67a9895bf0a5517e Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+Date: Thu, 2 Apr 2020 10:34:36 -0400
+Subject: NFS: finish_automount() requires us to hold 2 refs to the mount record
+
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+
+commit 75da98586af75eb80664714a67a9895bf0a5517e upstream.
+
+We must not return from nfs_d_automount() without holding 2 references
+to the mount record. Doing so, will trigger the BUG() in finish_automount().
+Also ensure that we don't try to reschedule the automount timer with
+a negative or zero timeout value.
+
+Fixes: 22a1ae9a93fb ("NFS: If nfs_mountpoint_expiry_timeout < 0, do not expire submounts")
+Cc: stable@vger.kernel.org # v5.5+
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/nfs/namespace.c | 12 +++++++-----
+ 1 file changed, 7 insertions(+), 5 deletions(-)
+
+--- a/fs/nfs/namespace.c
++++ b/fs/nfs/namespace.c
+@@ -145,6 +145,7 @@ struct vfsmount *nfs_d_automount(struct
+ struct vfsmount *mnt = ERR_PTR(-ENOMEM);
+ struct nfs_server *server = NFS_SERVER(d_inode(path->dentry));
+ struct nfs_client *client = server->nfs_client;
++ int timeout = READ_ONCE(nfs_mountpoint_expiry_timeout);
+ int ret;
+
+ if (IS_ROOT(path->dentry))
+@@ -190,12 +191,12 @@ struct vfsmount *nfs_d_automount(struct
+ if (IS_ERR(mnt))
+ goto out_fc;
+
+- if (nfs_mountpoint_expiry_timeout < 0)
++ mntget(mnt); /* prevent immediate expiration */
++ if (timeout <= 0)
+ goto out_fc;
+
+- mntget(mnt); /* prevent immediate expiration */
+ mnt_set_expiry(mnt, &nfs_automount_list);
+- schedule_delayed_work(&nfs_automount_task, nfs_mountpoint_expiry_timeout);
++ schedule_delayed_work(&nfs_automount_task, timeout);
+
+ out_fc:
+ put_fs_context(fc);
+@@ -233,10 +234,11 @@ const struct inode_operations nfs_referr
+ static void nfs_expire_automounts(struct work_struct *work)
+ {
+ struct list_head *list = &nfs_automount_list;
++ int timeout = READ_ONCE(nfs_mountpoint_expiry_timeout);
+
+ mark_mounts_for_expiry(list);
+- if (!list_empty(list))
+- schedule_delayed_work(&nfs_automount_task, nfs_mountpoint_expiry_timeout);
++ if (!list_empty(list) && timeout > 0)
++ schedule_delayed_work(&nfs_automount_task, timeout);
+ }
+
+ void nfs_release_automount_timer(void)
--- /dev/null
+From 529af90576cfa44aa107e9876e2ebaa053983986 Mon Sep 17 00:00:00 2001
+From: Scott Mayhew <smayhew@redhat.com>
+Date: Thu, 2 Apr 2020 17:20:44 -0400
+Subject: NFS: Fix a few constant_table array definitions
+
+From: Scott Mayhew <smayhew@redhat.com>
+
+commit 529af90576cfa44aa107e9876e2ebaa053983986 upstream.
+
+nfs_vers_tokens, nfs_xprt_protocol_tokens, and nfs_secflavor_tokens were
+all missing an empty item at the end of the array, allowing
+lookup_constant() to potentially walk off the end and trigger and oops.
+
+Reported-by: Olga Kornievskaia <aglo@umich.edu>
+Signed-off-by: Scott Mayhew <smayhew@redhat.com>
+Fixes: e38bb238ed8c ("NFS: Convert mount option parsing to use functionality from fs_parser.h")
+Cc: stable@vger.kernel.org # v5.6
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/nfs/fs_context.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/fs/nfs/fs_context.c
++++ b/fs/nfs/fs_context.c
+@@ -190,6 +190,7 @@ static const struct constant_table nfs_v
+ { "4.0", Opt_vers_4_0 },
+ { "4.1", Opt_vers_4_1 },
+ { "4.2", Opt_vers_4_2 },
++ {}
+ };
+
+ enum {
+@@ -202,13 +203,14 @@ enum {
+ nr__Opt_xprt
+ };
+
+-static const struct constant_table nfs_xprt_protocol_tokens[nr__Opt_xprt] = {
++static const struct constant_table nfs_xprt_protocol_tokens[] = {
+ { "rdma", Opt_xprt_rdma },
+ { "rdma6", Opt_xprt_rdma6 },
+ { "tcp", Opt_xprt_tcp },
+ { "tcp6", Opt_xprt_tcp6 },
+ { "udp", Opt_xprt_udp },
+ { "udp6", Opt_xprt_udp6 },
++ {}
+ };
+
+ enum {
+@@ -239,6 +241,7 @@ static const struct constant_table nfs_s
+ { "spkm3i", Opt_sec_spkmi },
+ { "spkm3p", Opt_sec_spkmp },
+ { "sys", Opt_sec_sys },
++ {}
+ };
+
+ /*
--- /dev/null
+From add42de31721fa29ed77a7ce388674d69f9d31a4 Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+Date: Wed, 1 Apr 2020 10:07:16 -0400
+Subject: NFS: Fix a page leak in nfs_destroy_unlinked_subrequests()
+
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+
+commit add42de31721fa29ed77a7ce388674d69f9d31a4 upstream.
+
+When we detach a subrequest from the list, we must also release the
+reference it holds to the parent.
+
+Fixes: 5b2b5187fa85 ("NFS: Fix nfs_page_group_destroy() and nfs_lock_and_join_requests() race cases")
+Cc: stable@vger.kernel.org # v4.14+
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/nfs/write.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/fs/nfs/write.c
++++ b/fs/nfs/write.c
+@@ -444,6 +444,7 @@ nfs_destroy_unlinked_subrequests(struct
+ }
+
+ subreq->wb_head = subreq;
++ nfs_release_request(old_head);
+
+ if (test_and_clear_bit(PG_INODE_REF, &subreq->wb_flags)) {
+ nfs_release_request(subreq);
--- /dev/null
+From dc9dc2febb17f72e9878eb540ad3996f7984239a Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+Date: Sun, 29 Mar 2020 19:55:05 -0400
+Subject: NFS: Fix use-after-free issues in nfs_pageio_add_request()
+
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+
+commit dc9dc2febb17f72e9878eb540ad3996f7984239a upstream.
+
+We need to ensure that we create the mirror requests before calling
+nfs_pageio_add_request_mirror() on the request we are adding.
+Otherwise, we can end up with a use-after-free if the call to
+nfs_pageio_add_request_mirror() triggers I/O.
+
+Fixes: c917cfaf9bbe ("NFS: Fix up NFS I/O subrequest creation")
+Cc: stable@vger.kernel.org
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/nfs/pagelist.c | 48 ++++++++++++++++++++++++------------------------
+ 1 file changed, 24 insertions(+), 24 deletions(-)
+
+--- a/fs/nfs/pagelist.c
++++ b/fs/nfs/pagelist.c
+@@ -1177,38 +1177,38 @@ int nfs_pageio_add_request(struct nfs_pa
+ if (desc->pg_error < 0)
+ goto out_failed;
+
+- for (midx = 0; midx < desc->pg_mirror_count; midx++) {
+- if (midx) {
+- nfs_page_group_lock(req);
+-
+- /* find the last request */
+- for (lastreq = req->wb_head;
+- lastreq->wb_this_page != req->wb_head;
+- lastreq = lastreq->wb_this_page)
+- ;
+-
+- dupreq = nfs_create_subreq(req, lastreq,
+- pgbase, offset, bytes);
+-
+- nfs_page_group_unlock(req);
+- if (IS_ERR(dupreq)) {
+- desc->pg_error = PTR_ERR(dupreq);
+- goto out_failed;
+- }
+- } else
+- dupreq = req;
++ /* Create the mirror instances first, and fire them off */
++ for (midx = 1; midx < desc->pg_mirror_count; midx++) {
++ nfs_page_group_lock(req);
++
++ /* find the last request */
++ for (lastreq = req->wb_head;
++ lastreq->wb_this_page != req->wb_head;
++ lastreq = lastreq->wb_this_page)
++ ;
++
++ dupreq = nfs_create_subreq(req, lastreq,
++ pgbase, offset, bytes);
++
++ nfs_page_group_unlock(req);
++ if (IS_ERR(dupreq)) {
++ desc->pg_error = PTR_ERR(dupreq);
++ goto out_failed;
++ }
+
+- if (nfs_pgio_has_mirroring(desc))
+- desc->pg_mirror_idx = midx;
++ desc->pg_mirror_idx = midx;
+ if (!nfs_pageio_add_request_mirror(desc, dupreq))
+ goto out_cleanup_subreq;
+ }
+
++ desc->pg_mirror_idx = 0;
++ if (!nfs_pageio_add_request_mirror(desc, req))
++ goto out_failed;
++
+ return 1;
+
+ out_cleanup_subreq:
+- if (req != dupreq)
+- nfs_pageio_cleanup_request(desc, dupreq);
++ nfs_pageio_cleanup_request(desc, dupreq);
+ out_failed:
+ nfs_pageio_error_cleanup(desc);
+ return 0;
--- /dev/null
+From 69afd267982e733a555fede4e85fe30329ed0588 Mon Sep 17 00:00:00 2001
+From: "J. Bruce Fields" <bfields@redhat.com>
+Date: Thu, 19 Mar 2020 11:30:38 -0400
+Subject: nfsd: fsnotify on rmdir under nfsd/clients/
+
+From: J. Bruce Fields <bfields@redhat.com>
+
+commit 69afd267982e733a555fede4e85fe30329ed0588 upstream.
+
+Userspace should be able to monitor nfsd/clients/ to see when clients
+come and go, but we're failing to send fsnotify events.
+
+Cc: stable@kernel.org
+Signed-off-by: J. Bruce Fields <bfields@redhat.com>
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/nfsd/nfsctl.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/fs/nfsd/nfsctl.c
++++ b/fs/nfsd/nfsctl.c
+@@ -1333,6 +1333,7 @@ void nfsd_client_rmdir(struct dentry *de
+ dget(dentry);
+ ret = simple_rmdir(dir, dentry);
+ WARN_ON_ONCE(ret);
++ fsnotify_rmdir(dir, dentry);
+ d_delete(dentry);
+ inode_unlock(dir);
+ }
--- /dev/null
+From 783fda856e1034dee90a873f7654c418212d12d7 Mon Sep 17 00:00:00 2001
+From: Changwei Ge <chge@linux.alibaba.com>
+Date: Fri, 10 Apr 2020 14:32:38 -0700
+Subject: ocfs2: no need try to truncate file beyond i_size
+
+From: Changwei Ge <chge@linux.alibaba.com>
+
+commit 783fda856e1034dee90a873f7654c418212d12d7 upstream.
+
+Linux fallocate(2) with FALLOC_FL_PUNCH_HOLE mode set, its offset can
+exceed the inode size. Ocfs2 now doesn't allow that offset beyond inode
+size. This restriction is not necessary and violates fallocate(2)
+semantics.
+
+If fallocate(2) offset is beyond inode size, just return success and do
+nothing further.
+
+Otherwise, ocfs2 will crash the kernel.
+
+ kernel BUG at fs/ocfs2//alloc.c:7264!
+ ocfs2_truncate_inline+0x20f/0x360 [ocfs2]
+ ocfs2_remove_inode_range+0x23c/0xcb0 [ocfs2]
+ __ocfs2_change_file_space+0x4a5/0x650 [ocfs2]
+ ocfs2_fallocate+0x83/0xa0 [ocfs2]
+ vfs_fallocate+0x148/0x230
+ SyS_fallocate+0x48/0x80
+ do_syscall_64+0x79/0x170
+
+Signed-off-by: Changwei Ge <chge@linux.alibaba.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Reviewed-by: Joseph Qi <joseph.qi@linux.alibaba.com>
+Cc: Mark Fasheh <mark@fasheh.com>
+Cc: Joel Becker <jlbec@evilplan.org>
+Cc: Junxiao Bi <junxiao.bi@oracle.com>
+Cc: Changwei Ge <gechangwei@live.cn>
+Cc: Gang He <ghe@suse.com>
+Cc: Jun Piao <piaojun@huawei.com>
+Cc: <stable@vger.kernel.org>
+Link: http://lkml.kernel.org/r/20200407082754.17565-1-chge@linux.alibaba.com
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ocfs2/alloc.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/fs/ocfs2/alloc.c
++++ b/fs/ocfs2/alloc.c
+@@ -7403,6 +7403,10 @@ int ocfs2_truncate_inline(struct inode *
+ struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
+ struct ocfs2_inline_data *idata = &di->id2.i_data;
+
++ /* No need to punch hole beyond i_size. */
++ if (start >= i_size_read(inode))
++ return 0;
++
+ if (end > i_size_read(inode))
+ end = i_size_read(inode);
+
--- /dev/null
+From b9c9ce4e598e012ca7c1813fae2f4d02395807de Mon Sep 17 00:00:00 2001
+From: Sam Lunt <samueljlunt@gmail.com>
+Date: Fri, 31 Jan 2020 12:11:23 -0600
+Subject: perf tools: Support Python 3.8+ in Makefile
+
+From: Sam Lunt <samueljlunt@gmail.com>
+
+commit b9c9ce4e598e012ca7c1813fae2f4d02395807de upstream.
+
+Python 3.8 changed the output of 'python-config --ldflags' to no longer
+include the '-lpythonX.Y' flag (this apparently fixed an issue loading
+modules with a statically linked Python executable). The libpython
+feature check in linux/build/feature fails if the Python library is not
+included in FEATURE_CHECK_LDFLAGS-libpython variable.
+
+This adds a check in the Makefile to determine if PYTHON_CONFIG accepts
+the '--embed' flag and passes that flag alongside '--ldflags' if so.
+
+tools/perf is the only place the libpython feature check is used.
+
+Signed-off-by: Sam Lunt <samuel.j.lunt@gmail.com>
+Tested-by: He Zhe <zhe.he@windriver.com>
+Link: http://lore.kernel.org/lkml/c56be2e1-8111-9dfe-8298-f7d0f9ab7431@windriver.com
+Acked-by: Jiri Olsa <jolsa@redhat.com>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: trivial@kernel.org
+Cc: stable@kernel.org
+Link: http://lore.kernel.org/lkml/20200131181123.tmamivhq4b7uqasr@gmail.com
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ tools/perf/Makefile.config | 11 ++++++++++-
+ 1 file changed, 10 insertions(+), 1 deletion(-)
+
+--- a/tools/perf/Makefile.config
++++ b/tools/perf/Makefile.config
+@@ -228,8 +228,17 @@ strip-libs = $(filter-out -l%,$(1))
+
+ PYTHON_CONFIG_SQ := $(call shell-sq,$(PYTHON_CONFIG))
+
++# Python 3.8 changed the output of `python-config --ldflags` to not include the
++# '-lpythonX.Y' flag unless '--embed' is also passed. The feature check for
++# libpython fails if that flag is not included in LDFLAGS
++ifeq ($(shell $(PYTHON_CONFIG_SQ) --ldflags --embed 2>&1 1>/dev/null; echo $$?), 0)
++ PYTHON_CONFIG_LDFLAGS := --ldflags --embed
++else
++ PYTHON_CONFIG_LDFLAGS := --ldflags
++endif
++
+ ifdef PYTHON_CONFIG
+- PYTHON_EMBED_LDOPTS := $(shell $(PYTHON_CONFIG_SQ) --ldflags 2>/dev/null)
++ PYTHON_EMBED_LDOPTS := $(shell $(PYTHON_CONFIG_SQ) $(PYTHON_CONFIG_LDFLAGS) 2>/dev/null)
+ PYTHON_EMBED_LDFLAGS := $(call strip-libs,$(PYTHON_EMBED_LDOPTS))
+ PYTHON_EMBED_LIBADD := $(call grep-libs,$(PYTHON_EMBED_LDOPTS)) -lutil
+ PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --includes 2>/dev/null)
--- /dev/null
+From a83836dbc53e96f13fec248ecc201d18e1e3111d Mon Sep 17 00:00:00 2001
+From: Libor Pechacek <lpechacek@suse.cz>
+Date: Fri, 31 Jan 2020 14:28:29 +0100
+Subject: powerpc/pseries: Avoid NULL pointer dereference when drmem is unavailable
+
+From: Libor Pechacek <lpechacek@suse.cz>
+
+commit a83836dbc53e96f13fec248ecc201d18e1e3111d upstream.
+
+In guests without hotplugagble memory drmem structure is only zero
+initialized. Trying to manipulate DLPAR parameters results in a crash.
+
+ $ echo "memory add count 1" > /sys/kernel/dlpar
+ Oops: Kernel access of bad area, sig: 11 [#1]
+ LE PAGE_SIZE=64K MMU=Hash SMP NR_CPUS=2048 NUMA pSeries
+ ...
+ NIP: c0000000000ff294 LR: c0000000000ff248 CTR: 0000000000000000
+ REGS: c0000000fb9d3880 TRAP: 0300 Tainted: G E (5.5.0-rc6-2-default)
+ MSR: 8000000000009033 <SF,EE,ME,IR,DR,RI,LE> CR: 28242428 XER: 20000000
+ CFAR: c0000000009a6c10 DAR: 0000000000000010 DSISR: 40000000 IRQMASK: 0
+ ...
+ NIP dlpar_memory+0x6e4/0xd00
+ LR dlpar_memory+0x698/0xd00
+ Call Trace:
+ dlpar_memory+0x698/0xd00 (unreliable)
+ handle_dlpar_errorlog+0xc0/0x190
+ dlpar_store+0x198/0x4a0
+ kobj_attr_store+0x30/0x50
+ sysfs_kf_write+0x64/0x90
+ kernfs_fop_write+0x1b0/0x290
+ __vfs_write+0x3c/0x70
+ vfs_write+0xd0/0x260
+ ksys_write+0xdc/0x130
+ system_call+0x5c/0x68
+
+Taking closer look at the code, I can see that for_each_drmem_lmb is a
+macro expanding into `for (lmb = &drmem_info->lmbs[0]; lmb <=
+&drmem_info->lmbs[drmem_info->n_lmbs - 1]; lmb++)`. When drmem_info->lmbs
+is NULL, the loop would iterate through the whole address range if it
+weren't stopped by the NULL pointer dereference on the next line.
+
+This patch aligns for_each_drmem_lmb and for_each_drmem_lmb_in_range
+macro behavior with the common C semantics, where the end marker does
+not belong to the scanned range, and alters get_lmb_range() semantics.
+As a side effect, the wraparound observed in the crash is prevented.
+
+Fixes: 6c6ea53725b3 ("powerpc/mm: Separate ibm, dynamic-memory data from DT format")
+Cc: stable@vger.kernel.org # v4.16+
+Signed-off-by: Libor Pechacek <lpechacek@suse.cz>
+Signed-off-by: Michal Suchanek <msuchanek@suse.de>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20200131132829.10281-1-msuchanek@suse.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/include/asm/drmem.h | 4 ++--
+ arch/powerpc/platforms/pseries/hotplug-memory.c | 8 ++++----
+ 2 files changed, 6 insertions(+), 6 deletions(-)
+
+--- a/arch/powerpc/include/asm/drmem.h
++++ b/arch/powerpc/include/asm/drmem.h
+@@ -27,12 +27,12 @@ struct drmem_lmb_info {
+ extern struct drmem_lmb_info *drmem_info;
+
+ #define for_each_drmem_lmb_in_range(lmb, start, end) \
+- for ((lmb) = (start); (lmb) <= (end); (lmb)++)
++ for ((lmb) = (start); (lmb) < (end); (lmb)++)
+
+ #define for_each_drmem_lmb(lmb) \
+ for_each_drmem_lmb_in_range((lmb), \
+ &drmem_info->lmbs[0], \
+- &drmem_info->lmbs[drmem_info->n_lmbs - 1])
++ &drmem_info->lmbs[drmem_info->n_lmbs])
+
+ /*
+ * The of_drconf_cell_v1 struct defines the layout of the LMB data
+--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
++++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
+@@ -223,7 +223,7 @@ static int get_lmb_range(u32 drc_index,
+ struct drmem_lmb **end_lmb)
+ {
+ struct drmem_lmb *lmb, *start, *end;
+- struct drmem_lmb *last_lmb;
++ struct drmem_lmb *limit;
+
+ start = NULL;
+ for_each_drmem_lmb(lmb) {
+@@ -236,10 +236,10 @@ static int get_lmb_range(u32 drc_index,
+ if (!start)
+ return -EINVAL;
+
+- end = &start[n_lmbs - 1];
++ end = &start[n_lmbs];
+
+- last_lmb = &drmem_info->lmbs[drmem_info->n_lmbs - 1];
+- if (end > last_lmb)
++ limit = &drmem_info->lmbs[drmem_info->n_lmbs];
++ if (end > limit)
+ return -EINVAL;
+
+ *start_lmb = start;
--- /dev/null
+From 6c7c851f1b666a8a455678a0b480b9162de86052 Mon Sep 17 00:00:00 2001
+From: Michael Mueller <mimu@linux.ibm.com>
+Date: Tue, 3 Mar 2020 16:42:01 +0100
+Subject: s390/diag: fix display of diagnose call statistics
+
+From: Michael Mueller <mimu@linux.ibm.com>
+
+commit 6c7c851f1b666a8a455678a0b480b9162de86052 upstream.
+
+Show the full diag statistic table and not just parts of it.
+
+The issue surfaced in a KVM guest with a number of vcpus
+defined smaller than NR_DIAG_STAT.
+
+Fixes: 1ec2772e0c3c ("s390/diag: add a statistic for diagnose calls")
+Cc: stable@vger.kernel.org
+Signed-off-by: Michael Mueller <mimu@linux.ibm.com>
+Reviewed-by: Heiko Carstens <heiko.carstens@de.ibm.com>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/kernel/diag.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/s390/kernel/diag.c
++++ b/arch/s390/kernel/diag.c
+@@ -84,7 +84,7 @@ static int show_diag_stat(struct seq_fil
+
+ static void *show_diag_stat_start(struct seq_file *m, loff_t *pos)
+ {
+- return *pos <= nr_cpu_ids ? (void *)((unsigned long) *pos + 1) : NULL;
++ return *pos <= NR_DIAG_STAT ? (void *)((unsigned long) *pos + 1) : NULL;
+ }
+
+ static void *show_diag_stat_next(struct seq_file *m, void *v, loff_t *pos)
--- /dev/null
+From 835214f5d5f516a38069bc077c879c7da00d6108 Mon Sep 17 00:00:00 2001
+From: James Smart <jsmart2021@gmail.com>
+Date: Mon, 27 Jan 2020 16:23:03 -0800
+Subject: scsi: lpfc: Fix broken Credit Recovery after driver load
+
+From: James Smart <jsmart2021@gmail.com>
+
+commit 835214f5d5f516a38069bc077c879c7da00d6108 upstream.
+
+When driver is set to enable bb credit recovery, the switch displayed the
+setting as inactive. If the link bounces, it switches to Active.
+
+During link up processing, the driver currently does a MBX_READ_SPARAM
+followed by a MBX_CONFIG_LINK. These mbox commands are queued to be
+executed, one at a time and the completion is processed by the worker
+thread. Since the MBX_READ_SPARAM is done BEFORE the MBX_CONFIG_LINK, the
+BB_SC_N bit is never set the the returned values. BB Credit recovery status
+only gets set after the driver requests the feature in CONFIG_LINK, which
+is done after the link up. Thus the ordering of READ_SPARAM needs to follow
+the CONFIG_LINK.
+
+Fix by reordering so that READ_SPARAM is done after CONFIG_LINK. Added a
+HBA_DEFER_FLOGI flag so that any FLOGI handling waits until after the
+READ_SPARAM is done so that the proper BB credit value is set in the FLOGI
+payload.
+
+Fixes: 6bfb16208298 ("scsi: lpfc: Fix configuration of BB credit recovery in service parameters")
+Cc: <stable@vger.kernel.org> # v5.4+
+Link: https://lore.kernel.org/r/20200128002312.16346-4-jsmart2021@gmail.com
+Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
+Signed-off-by: James Smart <jsmart2021@gmail.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/lpfc/lpfc.h | 1
+ drivers/scsi/lpfc/lpfc_hbadisc.c | 59 +++++++++++++++++++++++++--------------
+ 2 files changed, 40 insertions(+), 20 deletions(-)
+
+--- a/drivers/scsi/lpfc/lpfc.h
++++ b/drivers/scsi/lpfc/lpfc.h
+@@ -749,6 +749,7 @@ struct lpfc_hba {
+ * capability
+ */
+ #define HBA_FLOGI_ISSUED 0x100000 /* FLOGI was issued */
++#define HBA_DEFER_FLOGI 0x800000 /* Defer FLOGI till read_sparm cmpl */
+
+ uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
+ struct lpfc_dmabuf slim2p;
+--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
++++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
+@@ -1163,13 +1163,16 @@ lpfc_mbx_cmpl_local_config_link(struct l
+ }
+
+ /* Start discovery by sending a FLOGI. port_state is identically
+- * LPFC_FLOGI while waiting for FLOGI cmpl
++ * LPFC_FLOGI while waiting for FLOGI cmpl. Check if sending
++ * the FLOGI is being deferred till after MBX_READ_SPARAM completes.
+ */
+- if (vport->port_state != LPFC_FLOGI)
+- lpfc_initial_flogi(vport);
+- else if (vport->fc_flag & FC_PT2PT)
+- lpfc_disc_start(vport);
+-
++ if (vport->port_state != LPFC_FLOGI) {
++ if (!(phba->hba_flag & HBA_DEFER_FLOGI))
++ lpfc_initial_flogi(vport);
++ } else {
++ if (vport->fc_flag & FC_PT2PT)
++ lpfc_disc_start(vport);
++ }
+ return;
+
+ out:
+@@ -3094,6 +3097,14 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hb
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ mempool_free(pmb, phba->mbox_mem_pool);
++
++ /* Check if sending the FLOGI is being deferred to after we get
++ * up to date CSPs from MBX_READ_SPARAM.
++ */
++ if (phba->hba_flag & HBA_DEFER_FLOGI) {
++ lpfc_initial_flogi(vport);
++ phba->hba_flag &= ~HBA_DEFER_FLOGI;
++ }
+ return;
+
+ out:
+@@ -3224,6 +3235,23 @@ lpfc_mbx_process_link_up(struct lpfc_hba
+ }
+
+ lpfc_linkup(phba);
++ sparam_mbox = NULL;
++
++ if (!(phba->hba_flag & HBA_FCOE_MODE)) {
++ cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
++ if (!cfglink_mbox)
++ goto out;
++ vport->port_state = LPFC_LOCAL_CFG_LINK;
++ lpfc_config_link(phba, cfglink_mbox);
++ cfglink_mbox->vport = vport;
++ cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
++ rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT);
++ if (rc == MBX_NOT_FINISHED) {
++ mempool_free(cfglink_mbox, phba->mbox_mem_pool);
++ goto out;
++ }
++ }
++
+ sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!sparam_mbox)
+ goto out;
+@@ -3244,20 +3272,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba
+ goto out;
+ }
+
+- if (!(phba->hba_flag & HBA_FCOE_MODE)) {
+- cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+- if (!cfglink_mbox)
+- goto out;
+- vport->port_state = LPFC_LOCAL_CFG_LINK;
+- lpfc_config_link(phba, cfglink_mbox);
+- cfglink_mbox->vport = vport;
+- cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
+- rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT);
+- if (rc == MBX_NOT_FINISHED) {
+- mempool_free(cfglink_mbox, phba->mbox_mem_pool);
+- goto out;
+- }
+- } else {
++ if (phba->hba_flag & HBA_FCOE_MODE) {
+ vport->port_state = LPFC_VPORT_UNKNOWN;
+ /*
+ * Add the driver's default FCF record at FCF index 0 now. This
+@@ -3314,6 +3329,10 @@ lpfc_mbx_process_link_up(struct lpfc_hba
+ }
+ /* Reset FCF roundrobin bmask for new discovery */
+ lpfc_sli4_clear_fcf_rr_bmask(phba);
++ } else {
++ if (phba->bbcredit_support && phba->cfg_enable_bbcr &&
++ !(phba->link_flag & LS_LOOPBACK_MODE))
++ phba->hba_flag |= HBA_DEFER_FLOGI;
+ }
+
+ /* Prepare for LINK up registrations */
--- /dev/null
+From 0ab384a49c548baf132ccef249f78d9c6c506380 Mon Sep 17 00:00:00 2001
+From: James Smart <jsmart2021@gmail.com>
+Date: Mon, 27 Jan 2020 16:23:02 -0800
+Subject: scsi: lpfc: Fix lpfc_io_buf resource leak in lpfc_get_scsi_buf_s4 error path
+
+From: James Smart <jsmart2021@gmail.com>
+
+commit 0ab384a49c548baf132ccef249f78d9c6c506380 upstream.
+
+If a call to lpfc_get_cmd_rsp_buf_per_hdwq returns NULL (memory allocation
+failure), a previously allocated lpfc_io_buf resource is leaked.
+
+Fix by releasing the lpfc_io_buf resource in the failure path.
+
+Fixes: d79c9e9d4b3d ("scsi: lpfc: Support dynamic unbounded SGL lists on G7 hardware.")
+Cc: <stable@vger.kernel.org> # v5.4+
+Link: https://lore.kernel.org/r/20200128002312.16346-3-jsmart2021@gmail.com
+Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
+Signed-off-by: James Smart <jsmart2021@gmail.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/lpfc/lpfc_scsi.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/scsi/lpfc/lpfc_scsi.c
++++ b/drivers/scsi/lpfc/lpfc_scsi.c
+@@ -671,8 +671,10 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *ph
+ lpfc_cmd->prot_data_type = 0;
+ #endif
+ tmp = lpfc_get_cmd_rsp_buf_per_hdwq(phba, lpfc_cmd);
+- if (!tmp)
++ if (!tmp) {
++ lpfc_release_io_buf(phba, lpfc_cmd, lpfc_cmd->hdwq);
+ return NULL;
++ }
+
+ lpfc_cmd->fcp_cmnd = tmp->fcp_cmnd;
+ lpfc_cmd->fcp_rsp = tmp->fcp_rsp;
--- /dev/null
+From 5a244e0ea67b293abb1d26c825db2ddde5f2862f Mon Sep 17 00:00:00 2001
+From: Stanley Chu <stanley.chu@mediatek.com>
+Date: Wed, 29 Jan 2020 18:52:50 +0800
+Subject: scsi: ufs: fix Auto-Hibern8 error detection
+
+From: Stanley Chu <stanley.chu@mediatek.com>
+
+commit 5a244e0ea67b293abb1d26c825db2ddde5f2862f upstream.
+
+Auto-Hibern8 may be disabled by some vendors or sysfs in runtime even if
+Auto-Hibern8 capability is supported by host. If Auto-Hibern8 capability is
+supported by host but not actually enabled, Auto-Hibern8 error shall not
+happen.
+
+To fix this, provide a way to detect if Auto-Hibern8 is actually enabled
+first, and bypass Auto-Hibern8 disabling case in
+ufshcd_is_auto_hibern8_error().
+
+Fixes: 821744403913 ("scsi: ufs: Add error-handling of Auto-Hibernate")
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20200129105251.12466-4-stanley.chu@mediatek.com
+Reviewed-by: Bean Huo <beanhuo@micron.com>
+Reviewed-by: Alim Akhtar <alim.akhtar@samsung.com>
+Reviewed-by: Asutosh Das <asutoshd@codeaurora.org>
+Reviewed-by: Can Guo <cang@codeaurora.org>
+Signed-off-by: Stanley Chu <stanley.chu@mediatek.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/ufs/ufshcd.c | 3 ++-
+ drivers/scsi/ufs/ufshcd.h | 6 ++++++
+ 2 files changed, 8 insertions(+), 1 deletion(-)
+
+--- a/drivers/scsi/ufs/ufshcd.c
++++ b/drivers/scsi/ufs/ufshcd.c
+@@ -5486,7 +5486,8 @@ static irqreturn_t ufshcd_update_uic_err
+ static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
+ u32 intr_mask)
+ {
+- if (!ufshcd_is_auto_hibern8_supported(hba))
++ if (!ufshcd_is_auto_hibern8_supported(hba) ||
++ !ufshcd_is_auto_hibern8_enabled(hba))
+ return false;
+
+ if (!(intr_mask & UFSHCD_UIC_HIBERN8_MASK))
+--- a/drivers/scsi/ufs/ufshcd.h
++++ b/drivers/scsi/ufs/ufshcd.h
+@@ -55,6 +55,7 @@
+ #include <linux/clk.h>
+ #include <linux/completion.h>
+ #include <linux/regulator/consumer.h>
++#include <linux/bitfield.h>
+ #include "unipro.h"
+
+ #include <asm/irq.h>
+@@ -773,6 +774,11 @@ static inline bool ufshcd_is_auto_hibern
+ return (hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT);
+ }
+
++static inline bool ufshcd_is_auto_hibern8_enabled(struct ufs_hba *hba)
++{
++ return FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK, hba->ahit) ? true : false;
++}
++
+ #define ufshcd_writel(hba, val, reg) \
+ writel((val), (hba)->mmio_base + (reg))
+ #define ufshcd_readl(hba, reg) \
--- /dev/null
+From 819732be9fea728623e1ed84eba28def7384ad1f Mon Sep 17 00:00:00 2001
+From: Steffen Maier <maier@linux.ibm.com>
+Date: Thu, 12 Mar 2020 18:44:56 +0100
+Subject: scsi: zfcp: fix missing erp_lock in port recovery trigger for point-to-point
+
+From: Steffen Maier <maier@linux.ibm.com>
+
+commit 819732be9fea728623e1ed84eba28def7384ad1f upstream.
+
+v2.6.27 commit cc8c282963bd ("[SCSI] zfcp: Automatically attach remote
+ports") introduced zfcp automatic port scan.
+
+Before that, the user had to use the sysfs attribute "port_add" of an FCP
+device (adapter) to add and open remote (target) ports, even for the remote
+peer port in point-to-point topology. That code path did a proper port open
+recovery trigger taking the erp_lock.
+
+Since above commit, a new helper function zfcp_erp_open_ptp_port()
+performed an UNlocked port open recovery trigger. This can race with other
+parallel recovery triggers. In zfcp_erp_action_enqueue() this could corrupt
+e.g. adapter->erp_total_count or adapter->erp_ready_head.
+
+As already found for fabric topology in v4.17 commit fa89adba1941 ("scsi:
+zfcp: fix infinite iteration on ERP ready list"), there was an endless loop
+during tracing of rport (un)block. A subsequent v4.18 commit 9e156c54ace3
+("scsi: zfcp: assert that the ERP lock is held when tracing a recovery
+trigger") introduced a lockdep assertion for that case.
+
+As a side effect, that lockdep assertion now uncovered the unlocked code
+path for PtP. It is from within an adapter ERP action:
+
+zfcp_erp_strategy[1479] intentionally DROPs erp lock around
+ zfcp_erp_strategy_do_action()
+zfcp_erp_strategy_do_action[1441] NO erp lock
+zfcp_erp_adapter_strategy[876] NO erp lock
+zfcp_erp_adapter_strategy_open[855] NO erp lock
+zfcp_erp_adapter_strategy_open_fsf[806]NO erp lock
+zfcp_erp_adapter_strat_fsf_xconf[772] erp lock only around
+ zfcp_erp_action_to_running(),
+ BUT *_not_* around
+ zfcp_erp_enqueue_ptp_port()
+zfcp_erp_enqueue_ptp_port[728] BUG: *_not_* taking erp lock
+_zfcp_erp_port_reopen[432] assumes to be called with erp lock
+zfcp_erp_action_enqueue[314] assumes to be called with erp lock
+zfcp_dbf_rec_trig[288] _checks_ to be called with erp lock:
+ lockdep_assert_held(&adapter->erp_lock);
+
+It causes the following lockdep warning:
+
+WARNING: CPU: 2 PID: 775 at drivers/s390/scsi/zfcp_dbf.c:288
+ zfcp_dbf_rec_trig+0x16a/0x188
+no locks held by zfcperp0.0.17c0/775.
+
+Fix this by using the proper locked recovery trigger helper function.
+
+Link: https://lore.kernel.org/r/20200312174505.51294-2-maier@linux.ibm.com
+Fixes: cc8c282963bd ("[SCSI] zfcp: Automatically attach remote ports")
+Cc: <stable@vger.kernel.org> #v2.6.27+
+Reviewed-by: Jens Remus <jremus@linux.ibm.com>
+Reviewed-by: Benjamin Block <bblock@linux.ibm.com>
+Signed-off-by: Steffen Maier <maier@linux.ibm.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/s390/scsi/zfcp_erp.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/s390/scsi/zfcp_erp.c
++++ b/drivers/s390/scsi/zfcp_erp.c
+@@ -725,7 +725,7 @@ static void zfcp_erp_enqueue_ptp_port(st
+ adapter->peer_d_id);
+ if (IS_ERR(port)) /* error or port already attached */
+ return;
+- _zfcp_erp_port_reopen(port, 0, "ereptp1");
++ zfcp_erp_port_reopen(port, 0, "ereptp1");
+ }
+
+ static enum zfcp_erp_act_result zfcp_erp_adapter_strat_fsf_xconf(
--- /dev/null
+From 47bf235f324c696395c30541fe4fcf99fcd24188 Mon Sep 17 00:00:00 2001
+From: Christophe Leroy <christophe.leroy@c-s.fr>
+Date: Fri, 28 Feb 2020 00:00:09 +0000
+Subject: selftests/powerpc: Add tlbie_test in .gitignore
+
+From: Christophe Leroy <christophe.leroy@c-s.fr>
+
+commit 47bf235f324c696395c30541fe4fcf99fcd24188 upstream.
+
+The commit identified below added tlbie_test but forgot to add it in
+.gitignore.
+
+Fixes: 93cad5f78995 ("selftests/powerpc: Add test case for tlbie vs mtpidr ordering issue")
+Cc: stable@vger.kernel.org # v5.4+
+Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/259f9c06ed4563c4fa4fa8ffa652347278d769e7.1582847784.git.christophe.leroy@c-s.fr
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ tools/testing/selftests/powerpc/mm/.gitignore | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/tools/testing/selftests/powerpc/mm/.gitignore
++++ b/tools/testing/selftests/powerpc/mm/.gitignore
+@@ -6,3 +6,4 @@ segv_errors
+ wild_bctr
+ large_vm_fork_separation
+ bad_accesses
++tlbie_test
--- /dev/null
+From 9686813f6e9d5568bc045de0be853411e44958c8 Mon Sep 17 00:00:00 2001
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Fri, 27 Mar 2020 20:53:19 +1100
+Subject: selftests/powerpc: Fix try-run when source tree is not writable
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+commit 9686813f6e9d5568bc045de0be853411e44958c8 upstream.
+
+We added a usage of try-run to pmu/ebb/Makefile to detect if the
+toolchain supported the -no-pie option.
+
+This fails if we build out-of-tree and the source tree is not
+writable, as try-run tries to write its temporary files to the current
+directory. That leads to the -no-pie option being silently dropped,
+which leads to broken executables with some toolchains.
+
+If we remove the redirect to /dev/null in try-run, we see the error:
+
+ make[3]: Entering directory '/linux/tools/testing/selftests/powerpc/pmu/ebb'
+ /usr/bin/ld: cannot open output file .54.tmp: Read-only file system
+ collect2: error: ld returned 1 exit status
+ make[3]: Nothing to be done for 'all'.
+
+And looking with strace we see it's trying to use a file that's in the
+source tree:
+
+ lstat("/linux/tools/testing/selftests/powerpc/pmu/ebb/.54.tmp", 0x7ffffc0f83c8)
+
+We can fix it by setting TMPOUT to point to the $(OUTPUT) directory,
+and we can verify with strace it's now trying to write to the output
+directory:
+
+ lstat("/output/kselftest/powerpc/pmu/ebb/.54.tmp", 0x7fffd1bf6bf8)
+
+And also see that the -no-pie option is now correctly detected.
+
+Fixes: 0695f8bca93e ("selftests/powerpc: Handle Makefile for unrecognized option")
+Cc: stable@vger.kernel.org # v5.5+
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20200327095319.2347641-1-mpe@ellerman.id.au
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ tools/testing/selftests/powerpc/pmu/ebb/Makefile | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/tools/testing/selftests/powerpc/pmu/ebb/Makefile
++++ b/tools/testing/selftests/powerpc/pmu/ebb/Makefile
+@@ -7,6 +7,7 @@ noarg:
+ # The EBB handler is 64-bit code and everything links against it
+ CFLAGS += -m64
+
++TMPOUT = $(OUTPUT)/
+ # Toolchains may build PIE by default which breaks the assembly
+ no-pie-option := $(call try-run, echo 'int main() { return 0; }' | \
+ $(CC) -Werror $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) -no-pie -x c - -o "$$TMP", -no-pie)
--- /dev/null
+From eea274d64e6ea8aff2224d33d0851133a84cc7b5 Mon Sep 17 00:00:00 2001
+From: Michal Hocko <mhocko@suse.com>
+Date: Wed, 1 Apr 2020 21:10:25 -0700
+Subject: selftests: vm: drop dependencies on page flags from mlock2 tests
+
+From: Michal Hocko <mhocko@suse.com>
+
+commit eea274d64e6ea8aff2224d33d0851133a84cc7b5 upstream.
+
+It was noticed that mlock2 tests are failing after 9c4e6b1a7027f ("mm,
+mlock, vmscan: no more skipping pagevecs") because the patch has changed
+the timing on when the page is added to the unevictable LRU list and thus
+gains the unevictable page flag.
+
+The test was just too dependent on the implementation details which were
+true at the time when it was introduced. Page flags and the timing when
+they are set is something no userspace should ever depend on. The test
+should be testing only for the user observable contract of the tested
+syscalls. Those are defined pretty well for the mlock and there are other
+means for testing them. In fact this is already done and testing for page
+flags can be safely dropped to achieve the aimed purpose. Present bits
+can be checked by /proc/<pid>/smaps RSS field and the locking state by
+VmFlags although I would argue that Locked: field would be more
+appropriate.
+
+Drop all the page flag machinery and considerably simplify the test. This
+should be more robust for future kernel changes while checking the
+promised contract is still valid.
+
+Fixes: 9c4e6b1a7027f ("mm, mlock, vmscan: no more skipping pagevecs")
+Reported-by: Rafael Aquini <aquini@redhat.com>
+Signed-off-by: Michal Hocko <mhocko@suse.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Acked-by: Rafael Aquini <aquini@redhat.com>
+Cc: Shakeel Butt <shakeelb@google.com>
+Cc: Eric B Munson <emunson@akamai.com>
+Cc: Shuah Khan <shuah@kernel.org>
+Cc: <stable@vger.kernel.org>
+Link: http://lkml.kernel.org/r/20200324154218.GS19542@dhcp22.suse.cz
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ tools/testing/selftests/vm/mlock2-tests.c | 233 ++++--------------------------
+ 1 file changed, 37 insertions(+), 196 deletions(-)
+
+--- a/tools/testing/selftests/vm/mlock2-tests.c
++++ b/tools/testing/selftests/vm/mlock2-tests.c
+@@ -67,59 +67,6 @@ out:
+ return ret;
+ }
+
+-static uint64_t get_pageflags(unsigned long addr)
+-{
+- FILE *file;
+- uint64_t pfn;
+- unsigned long offset;
+-
+- file = fopen("/proc/self/pagemap", "r");
+- if (!file) {
+- perror("fopen pagemap");
+- _exit(1);
+- }
+-
+- offset = addr / getpagesize() * sizeof(pfn);
+-
+- if (fseek(file, offset, SEEK_SET)) {
+- perror("fseek pagemap");
+- _exit(1);
+- }
+-
+- if (fread(&pfn, sizeof(pfn), 1, file) != 1) {
+- perror("fread pagemap");
+- _exit(1);
+- }
+-
+- fclose(file);
+- return pfn;
+-}
+-
+-static uint64_t get_kpageflags(unsigned long pfn)
+-{
+- uint64_t flags;
+- FILE *file;
+-
+- file = fopen("/proc/kpageflags", "r");
+- if (!file) {
+- perror("fopen kpageflags");
+- _exit(1);
+- }
+-
+- if (fseek(file, pfn * sizeof(flags), SEEK_SET)) {
+- perror("fseek kpageflags");
+- _exit(1);
+- }
+-
+- if (fread(&flags, sizeof(flags), 1, file) != 1) {
+- perror("fread kpageflags");
+- _exit(1);
+- }
+-
+- fclose(file);
+- return flags;
+-}
+-
+ #define VMFLAGS "VmFlags:"
+
+ static bool is_vmflag_set(unsigned long addr, const char *vmflag)
+@@ -159,19 +106,13 @@ out:
+ #define RSS "Rss:"
+ #define LOCKED "lo"
+
+-static bool is_vma_lock_on_fault(unsigned long addr)
++static unsigned long get_value_for_name(unsigned long addr, const char *name)
+ {
+- bool ret = false;
+- bool locked;
+- FILE *smaps = NULL;
+- unsigned long vma_size, vma_rss;
+ char *line = NULL;
+- char *value;
+ size_t size = 0;
+-
+- locked = is_vmflag_set(addr, LOCKED);
+- if (!locked)
+- goto out;
++ char *value_ptr;
++ FILE *smaps = NULL;
++ unsigned long value = -1UL;
+
+ smaps = seek_to_smaps_entry(addr);
+ if (!smaps) {
+@@ -180,112 +121,70 @@ static bool is_vma_lock_on_fault(unsigne
+ }
+
+ while (getline(&line, &size, smaps) > 0) {
+- if (!strstr(line, SIZE)) {
++ if (!strstr(line, name)) {
+ free(line);
+ line = NULL;
+ size = 0;
+ continue;
+ }
+
+- value = line + strlen(SIZE);
+- if (sscanf(value, "%lu kB", &vma_size) < 1) {
++ value_ptr = line + strlen(name);
++ if (sscanf(value_ptr, "%lu kB", &value) < 1) {
+ printf("Unable to parse smaps entry for Size\n");
+ goto out;
+ }
+ break;
+ }
+
+- while (getline(&line, &size, smaps) > 0) {
+- if (!strstr(line, RSS)) {
+- free(line);
+- line = NULL;
+- size = 0;
+- continue;
+- }
+-
+- value = line + strlen(RSS);
+- if (sscanf(value, "%lu kB", &vma_rss) < 1) {
+- printf("Unable to parse smaps entry for Rss\n");
+- goto out;
+- }
+- break;
+- }
+-
+- ret = locked && (vma_rss < vma_size);
+ out:
+- free(line);
+ if (smaps)
+ fclose(smaps);
+- return ret;
++ free(line);
++ return value;
+ }
+
+-#define PRESENT_BIT 0x8000000000000000ULL
+-#define PFN_MASK 0x007FFFFFFFFFFFFFULL
+-#define UNEVICTABLE_BIT (1UL << 18)
+-
+-static int lock_check(char *map)
++static bool is_vma_lock_on_fault(unsigned long addr)
+ {
+- unsigned long page_size = getpagesize();
+- uint64_t page1_flags, page2_flags;
++ bool locked;
++ unsigned long vma_size, vma_rss;
++
++ locked = is_vmflag_set(addr, LOCKED);
++ if (!locked)
++ return false;
+
+- page1_flags = get_pageflags((unsigned long)map);
+- page2_flags = get_pageflags((unsigned long)map + page_size);
++ vma_size = get_value_for_name(addr, SIZE);
++ vma_rss = get_value_for_name(addr, RSS);
+
+- /* Both pages should be present */
+- if (((page1_flags & PRESENT_BIT) == 0) ||
+- ((page2_flags & PRESENT_BIT) == 0)) {
+- printf("Failed to make both pages present\n");
+- return 1;
+- }
++ /* only one page is faulted in */
++ return (vma_rss < vma_size);
++}
+
+- page1_flags = get_kpageflags(page1_flags & PFN_MASK);
+- page2_flags = get_kpageflags(page2_flags & PFN_MASK);
++#define PRESENT_BIT 0x8000000000000000ULL
++#define PFN_MASK 0x007FFFFFFFFFFFFFULL
++#define UNEVICTABLE_BIT (1UL << 18)
+
+- /* Both pages should be unevictable */
+- if (((page1_flags & UNEVICTABLE_BIT) == 0) ||
+- ((page2_flags & UNEVICTABLE_BIT) == 0)) {
+- printf("Failed to make both pages unevictable\n");
+- return 1;
+- }
++static int lock_check(unsigned long addr)
++{
++ bool locked;
++ unsigned long vma_size, vma_rss;
+
+- if (!is_vmflag_set((unsigned long)map, LOCKED)) {
+- printf("VMA flag %s is missing on page 1\n", LOCKED);
+- return 1;
+- }
++ locked = is_vmflag_set(addr, LOCKED);
++ if (!locked)
++ return false;
+
+- if (!is_vmflag_set((unsigned long)map + page_size, LOCKED)) {
+- printf("VMA flag %s is missing on page 2\n", LOCKED);
+- return 1;
+- }
++ vma_size = get_value_for_name(addr, SIZE);
++ vma_rss = get_value_for_name(addr, RSS);
+
+- return 0;
++ return (vma_rss == vma_size);
+ }
+
+ static int unlock_lock_check(char *map)
+ {
+- unsigned long page_size = getpagesize();
+- uint64_t page1_flags, page2_flags;
+-
+- page1_flags = get_pageflags((unsigned long)map);
+- page2_flags = get_pageflags((unsigned long)map + page_size);
+- page1_flags = get_kpageflags(page1_flags & PFN_MASK);
+- page2_flags = get_kpageflags(page2_flags & PFN_MASK);
+-
+- if ((page1_flags & UNEVICTABLE_BIT) || (page2_flags & UNEVICTABLE_BIT)) {
+- printf("A page is still marked unevictable after unlock\n");
+- return 1;
+- }
+-
+ if (is_vmflag_set((unsigned long)map, LOCKED)) {
+ printf("VMA flag %s is present on page 1 after unlock\n", LOCKED);
+ return 1;
+ }
+
+- if (is_vmflag_set((unsigned long)map + page_size, LOCKED)) {
+- printf("VMA flag %s is present on page 2 after unlock\n", LOCKED);
+- return 1;
+- }
+-
+ return 0;
+ }
+
+@@ -311,7 +210,7 @@ static int test_mlock_lock()
+ goto unmap;
+ }
+
+- if (lock_check(map))
++ if (!lock_check((unsigned long)map))
+ goto unmap;
+
+ /* Now unlock and recheck attributes */
+@@ -330,64 +229,18 @@ out:
+
+ static int onfault_check(char *map)
+ {
+- unsigned long page_size = getpagesize();
+- uint64_t page1_flags, page2_flags;
+-
+- page1_flags = get_pageflags((unsigned long)map);
+- page2_flags = get_pageflags((unsigned long)map + page_size);
+-
+- /* Neither page should be present */
+- if ((page1_flags & PRESENT_BIT) || (page2_flags & PRESENT_BIT)) {
+- printf("Pages were made present by MLOCK_ONFAULT\n");
+- return 1;
+- }
+-
+ *map = 'a';
+- page1_flags = get_pageflags((unsigned long)map);
+- page2_flags = get_pageflags((unsigned long)map + page_size);
+-
+- /* Only page 1 should be present */
+- if ((page1_flags & PRESENT_BIT) == 0) {
+- printf("Page 1 is not present after fault\n");
+- return 1;
+- } else if (page2_flags & PRESENT_BIT) {
+- printf("Page 2 was made present\n");
+- return 1;
+- }
+-
+- page1_flags = get_kpageflags(page1_flags & PFN_MASK);
+-
+- /* Page 1 should be unevictable */
+- if ((page1_flags & UNEVICTABLE_BIT) == 0) {
+- printf("Failed to make faulted page unevictable\n");
+- return 1;
+- }
+-
+ if (!is_vma_lock_on_fault((unsigned long)map)) {
+ printf("VMA is not marked for lock on fault\n");
+ return 1;
+ }
+
+- if (!is_vma_lock_on_fault((unsigned long)map + page_size)) {
+- printf("VMA is not marked for lock on fault\n");
+- return 1;
+- }
+-
+ return 0;
+ }
+
+ static int unlock_onfault_check(char *map)
+ {
+ unsigned long page_size = getpagesize();
+- uint64_t page1_flags;
+-
+- page1_flags = get_pageflags((unsigned long)map);
+- page1_flags = get_kpageflags(page1_flags & PFN_MASK);
+-
+- if (page1_flags & UNEVICTABLE_BIT) {
+- printf("Page 1 is still marked unevictable after unlock\n");
+- return 1;
+- }
+
+ if (is_vma_lock_on_fault((unsigned long)map) ||
+ is_vma_lock_on_fault((unsigned long)map + page_size)) {
+@@ -445,7 +298,6 @@ static int test_lock_onfault_of_present(
+ char *map;
+ int ret = 1;
+ unsigned long page_size = getpagesize();
+- uint64_t page1_flags, page2_flags;
+
+ map = mmap(NULL, 2 * page_size, PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+@@ -465,17 +317,6 @@ static int test_lock_onfault_of_present(
+ goto unmap;
+ }
+
+- page1_flags = get_pageflags((unsigned long)map);
+- page2_flags = get_pageflags((unsigned long)map + page_size);
+- page1_flags = get_kpageflags(page1_flags & PFN_MASK);
+- page2_flags = get_kpageflags(page2_flags & PFN_MASK);
+-
+- /* Page 1 should be unevictable */
+- if ((page1_flags & UNEVICTABLE_BIT) == 0) {
+- printf("Failed to make present page unevictable\n");
+- goto unmap;
+- }
+-
+ if (!is_vma_lock_on_fault((unsigned long)map) ||
+ !is_vma_lock_on_fault((unsigned long)map + page_size)) {
+ printf("VMA with present pages is not marked lock on fault\n");
+@@ -507,7 +348,7 @@ static int test_munlockall()
+ goto out;
+ }
+
+- if (lock_check(map))
++ if (!lock_check((unsigned long)map))
+ goto unmap;
+
+ if (munlockall()) {
+@@ -549,7 +390,7 @@ static int test_munlockall()
+ goto out;
+ }
+
+- if (lock_check(map))
++ if (!lock_check((unsigned long)map))
+ goto unmap;
+
+ if (munlockall()) {
--- /dev/null
+From cabc30da10e677c67ab9a136b1478175734715c5 Mon Sep 17 00:00:00 2001
+From: Christophe Leroy <christophe.leroy@c-s.fr>
+Date: Wed, 1 Apr 2020 21:11:51 -0700
+Subject: selftests/vm: fix map_hugetlb length used for testing read and write
+
+From: Christophe Leroy <christophe.leroy@c-s.fr>
+
+commit cabc30da10e677c67ab9a136b1478175734715c5 upstream.
+
+Commit fa7b9a805c79 ("tools/selftest/vm: allow choosing mem size and page
+size in map_hugetlb") added the possibility to change the size of memory
+mapped for the test, but left the read and write test using the default
+value. This is unnoticed when mapping a length greater than the default
+one, but segfaults otherwise.
+
+Fix read_bytes() and write_bytes() by giving them the real length.
+
+Also fix the call to munmap().
+
+Fixes: fa7b9a805c79 ("tools/selftest/vm: allow choosing mem size and page size in map_hugetlb")
+Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Reviewed-by: Leonardo Bras <leonardo@linux.ibm.com>
+Cc: Michael Ellerman <mpe@ellerman.id.au>
+Cc: Shuah Khan <shuah@kernel.org>
+Cc: <stable@vger.kernel.org>
+Link: http://lkml.kernel.org/r/9a404a13c871c4bd0ba9ede68f69a1225180dd7e.1580978385.git.christophe.leroy@c-s.fr
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ tools/testing/selftests/vm/map_hugetlb.c | 14 +++++++-------
+ 1 file changed, 7 insertions(+), 7 deletions(-)
+
+--- a/tools/testing/selftests/vm/map_hugetlb.c
++++ b/tools/testing/selftests/vm/map_hugetlb.c
+@@ -45,20 +45,20 @@ static void check_bytes(char *addr)
+ printf("First hex is %x\n", *((unsigned int *)addr));
+ }
+
+-static void write_bytes(char *addr)
++static void write_bytes(char *addr, size_t length)
+ {
+ unsigned long i;
+
+- for (i = 0; i < LENGTH; i++)
++ for (i = 0; i < length; i++)
+ *(addr + i) = (char)i;
+ }
+
+-static int read_bytes(char *addr)
++static int read_bytes(char *addr, size_t length)
+ {
+ unsigned long i;
+
+ check_bytes(addr);
+- for (i = 0; i < LENGTH; i++)
++ for (i = 0; i < length; i++)
+ if (*(addr + i) != (char)i) {
+ printf("Mismatch at %lu\n", i);
+ return 1;
+@@ -96,11 +96,11 @@ int main(int argc, char **argv)
+
+ printf("Returned address is %p\n", addr);
+ check_bytes(addr);
+- write_bytes(addr);
+- ret = read_bytes(addr);
++ write_bytes(addr, length);
++ ret = read_bytes(addr, length);
+
+ /* munmap() length of MAP_HUGETLB memory must be hugepage aligned */
+- if (munmap(addr, LENGTH)) {
++ if (munmap(addr, length)) {
+ perror("munmap");
+ exit(1);
+ }
sched-core-remove-duplicate-assignment-in-sched_tick_remote.patch
arm64-dts-allwinner-h5-fix-pmu-compatible.patch
mm-memcg-do-not-high-throttle-allocators-based-on-wraparound.patch
+dm-writecache-add-cond_resched-to-avoid-cpu-hangs.patch
+dm-integrity-fix-a-crash-with-unusually-large-tag-size.patch
+dm-verity-fec-fix-memory-leak-in-verity_fec_dtr.patch
+dm-zoned-remove-duplicate-nr_rnd_zones-increase-in-dmz_init_zone.patch
+dm-clone-fix-handling-of-partial-region-discards.patch
+dm-clone-add-overflow-check-for-number-of-regions.patch
+dm-clone-add-missing-casts-to-prevent-overflows-and-data-corruption.patch
+dm-clone-metadata-fix-return-type-of-dm_clone_nr_of_hydrated_regions.patch
+xarray-fix-xas_pause-for-large-multi-index-entries.patch
+xarray-fix-early-termination-of-xas_for_each_marked.patch
+crypto-caam-qi2-fix-chacha20-data-size-error.patch
+crypto-caam-update-xts-sector-size-for-large-input-length.patch
+crypto-ccree-protect-against-empty-or-null-scatterlists.patch
+crypto-ccree-only-try-to-map-auth-tag-if-needed.patch
+crypto-ccree-dec-auth-tag-size-from-cryptlen-map.patch
+scsi-zfcp-fix-missing-erp_lock-in-port-recovery-trigger-for-point-to-point.patch
+scsi-ufs-fix-auto-hibern8-error-detection.patch
+scsi-lpfc-fix-lpfc_io_buf-resource-leak-in-lpfc_get_scsi_buf_s4-error-path.patch
+scsi-lpfc-fix-broken-credit-recovery-after-driver-load.patch
+arm-dts-exynos-fix-polarity-of-the-lcd-spi-bus-on-universalc210-board.patch
+arm64-dts-ti-k3-am65-add-clocks-to-dwc3-nodes.patch
+arm64-armv8_deprecated-fix-undef_hook-mask-for-thumb-setend.patch
+selftests-vm-drop-dependencies-on-page-flags-from-mlock2-tests.patch
+selftests-vm-fix-map_hugetlb-length-used-for-testing-read-and-write.patch
+selftests-powerpc-add-tlbie_test-in-.gitignore.patch
+selftests-powerpc-fix-try-run-when-source-tree-is-not-writable.patch
+vfio-platform-switch-to-platform_get_irq_optional.patch
+drm-i915-gem-flush-all-the-reloc_gpu-batch.patch
+drm-bridge-analogix-anx78xx-fix-drm_dp_link-helper-removal.patch
+drm-etnaviv-rework-perfmon-query-infrastructure.patch
+drm-remove-pagereserved-manipulation-from-drm_pci_alloc.patch
+drm-amdgpu-powerplay-using-the-fclk-dpm-table-to-set-the-mclk.patch
+drm-amd-powerplay-implement-the-is_dpm_running.patch
+drm-amdgpu-unify-fw_write_wait-for-new-gfx9-asics.patch
+drm-amd-display-check-for-null-fclk-voltage-when-parsing-clock-table.patch
+drm-prime-fix-extracting-of-the-dma-addresses-from-a-scatterlist.patch
+drm-i915-icl-don-t-enable-ddi-io-power-on-a-typec-port-in-tbt-mode.patch
+powerpc-pseries-avoid-null-pointer-dereference-when-drmem-is-unavailable.patch
+drm-vboxvideo-add-missing-remove_conflicting_pci_framebuffers-call-v2.patch
+nfsd-fsnotify-on-rmdir-under-nfsd-clients.patch
+nfs-fix-use-after-free-issues-in-nfs_pageio_add_request.patch
+nfs-fix-a-page-leak-in-nfs_destroy_unlinked_subrequests.patch
+nfs-finish_automount-requires-us-to-hold-2-refs-to-the-mount-record.patch
+nfs-fix-a-few-constant_table-array-definitions.patch
+ext4-fix-a-data-race-at-inode-i_blocks.patch
+drm-i915-gt-treat-idling-as-a-rps-downclock-event.patch
+asoc-cs4270-pull-reset-gpio-low-then-high.patch
+fs-filesystems.c-downgrade-user-reachable-warn_once-to-pr_warn_once.patch
+ocfs2-no-need-try-to-truncate-file-beyond-i_size.patch
+perf-tools-support-python-3.8-in-makefile.patch
+s390-diag-fix-display-of-diagnose-call-statistics.patch
+input-i8042-add-acer-aspire-5738z-to-nomux-list.patch
+ftrace-kprobe-show-the-maxactive-number-on-kprobe_events.patch
+clk-ingenic-jz4770-exit-with-error-if-cgu-init-failed.patch
+clk-ingenic-tcu-fix-round_rate-returning-error.patch
+kmod-make-request_module-return-an-error-when-autoloading-is-disabled.patch
+cpufreq-powernv-fix-use-after-free.patch
+hfsplus-fix-crash-and-filesystem-corruption-when-deleting-files.patch
+libata-return-correct-status-in-sata_pmp_eh_recover_pm-when-ata_dflag_detach-is-set.patch
+ipmi-fix-hung-processes-in-__get_guid.patch
+xen-blkfront-fix-memory-allocation-flags-in-blkfront_setup_indirect.patch
--- /dev/null
+From 723fe298ad85ad1278bd2312469ad14738953cc6 Mon Sep 17 00:00:00 2001
+From: Eric Auger <eric.auger@redhat.com>
+Date: Tue, 24 Mar 2020 09:26:30 -0600
+Subject: vfio: platform: Switch to platform_get_irq_optional()
+
+From: Eric Auger <eric.auger@redhat.com>
+
+commit 723fe298ad85ad1278bd2312469ad14738953cc6 upstream.
+
+Since commit 7723f4c5ecdb ("driver core: platform: Add an error
+message to platform_get_irq*()"), platform_get_irq() calls dev_err()
+on an error. As we enumerate all interrupts until platform_get_irq()
+fails, we now systematically get a message such as:
+"vfio-platform fff51000.ethernet: IRQ index 3 not found" which is
+a false positive.
+
+Let's use platform_get_irq_optional() instead.
+
+Signed-off-by: Eric Auger <eric.auger@redhat.com>
+Cc: stable@vger.kernel.org # v5.3+
+Reviewed-by: Andre Przywara <andre.przywara@arm.com>
+Tested-by: Andre Przywara <andre.przywara@arm.com>
+Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/vfio/platform/vfio_platform.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/vfio/platform/vfio_platform.c
++++ b/drivers/vfio/platform/vfio_platform.c
+@@ -44,7 +44,7 @@ static int get_platform_irq(struct vfio_
+ {
+ struct platform_device *pdev = (struct platform_device *) vdev->opaque;
+
+- return platform_get_irq(pdev, i);
++ return platform_get_irq_optional(pdev, i);
+ }
+
+ static int vfio_platform_probe(struct platform_device *pdev)
--- /dev/null
+From 7e934cf5ace1dceeb804f7493fa28bb697ed3c52 Mon Sep 17 00:00:00 2001
+From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
+Date: Thu, 12 Mar 2020 17:29:11 -0400
+Subject: xarray: Fix early termination of xas_for_each_marked
+
+From: Matthew Wilcox (Oracle) <willy@infradead.org>
+
+commit 7e934cf5ace1dceeb804f7493fa28bb697ed3c52 upstream.
+
+xas_for_each_marked() is using entry == NULL as a termination condition
+of the iteration. When xas_for_each_marked() is used protected only by
+RCU, this can however race with xas_store(xas, NULL) in the following
+way:
+
+TASK1 TASK2
+page_cache_delete() find_get_pages_range_tag()
+ xas_for_each_marked()
+ xas_find_marked()
+ off = xas_find_chunk()
+
+ xas_store(&xas, NULL)
+ xas_init_marks(&xas);
+ ...
+ rcu_assign_pointer(*slot, NULL);
+ entry = xa_entry(off);
+
+And thus xas_for_each_marked() terminates prematurely possibly leading
+to missed entries in the iteration (translating to missing writeback of
+some pages or a similar problem).
+
+If we find a NULL entry that has been marked, skip it (unless we're trying
+to allocate an entry).
+
+Reported-by: Jan Kara <jack@suse.cz>
+CC: stable@vger.kernel.org
+Fixes: ef8e5717db01 ("page cache: Convert delete_batch to XArray")
+Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/xarray.h | 6 +
+ lib/xarray.c | 2
+ tools/testing/radix-tree/Makefile | 4 -
+ tools/testing/radix-tree/iteration_check_2.c | 87 +++++++++++++++++++++++++++
+ tools/testing/radix-tree/main.c | 1
+ tools/testing/radix-tree/test.h | 1
+ 6 files changed, 98 insertions(+), 3 deletions(-)
+
+--- a/include/linux/xarray.h
++++ b/include/linux/xarray.h
+@@ -1648,6 +1648,7 @@ static inline void *xas_next_marked(stru
+ xa_mark_t mark)
+ {
+ struct xa_node *node = xas->xa_node;
++ void *entry;
+ unsigned int offset;
+
+ if (unlikely(xas_not_node(node) || node->shift))
+@@ -1659,7 +1660,10 @@ static inline void *xas_next_marked(stru
+ return NULL;
+ if (offset == XA_CHUNK_SIZE)
+ return xas_find_marked(xas, max, mark);
+- return xa_entry(xas->xa, node, offset);
++ entry = xa_entry(xas->xa, node, offset);
++ if (!entry)
++ return xas_find_marked(xas, max, mark);
++ return entry;
+ }
+
+ /*
+--- a/lib/xarray.c
++++ b/lib/xarray.c
+@@ -1208,6 +1208,8 @@ void *xas_find_marked(struct xa_state *x
+ }
+
+ entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
++ if (!entry && !(xa_track_free(xas->xa) && mark == XA_FREE_MARK))
++ continue;
+ if (!xa_is_node(entry))
+ return entry;
+ xas->xa_node = xa_to_node(entry);
+--- a/tools/testing/radix-tree/Makefile
++++ b/tools/testing/radix-tree/Makefile
+@@ -7,8 +7,8 @@ LDLIBS+= -lpthread -lurcu
+ TARGETS = main idr-test multiorder xarray
+ CORE_OFILES := xarray.o radix-tree.o idr.o linux.o test.o find_bit.o bitmap.o
+ OFILES = main.o $(CORE_OFILES) regression1.o regression2.o regression3.o \
+- regression4.o \
+- tag_check.o multiorder.o idr-test.o iteration_check.o benchmark.o
++ regression4.o tag_check.o multiorder.o idr-test.o iteration_check.o \
++ iteration_check_2.o benchmark.o
+
+ ifndef SHIFT
+ SHIFT=3
+--- /dev/null
++++ b/tools/testing/radix-tree/iteration_check_2.c
+@@ -0,0 +1,87 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ * iteration_check_2.c: Check that deleting a tagged entry doesn't cause
++ * an RCU walker to finish early.
++ * Copyright (c) 2020 Oracle
++ * Author: Matthew Wilcox <willy@infradead.org>
++ */
++#include <pthread.h>
++#include "test.h"
++
++static volatile bool test_complete;
++
++static void *iterator(void *arg)
++{
++ XA_STATE(xas, arg, 0);
++ void *entry;
++
++ rcu_register_thread();
++
++ while (!test_complete) {
++ xas_set(&xas, 0);
++ rcu_read_lock();
++ xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0)
++ ;
++ rcu_read_unlock();
++ assert(xas.xa_index >= 100);
++ }
++
++ rcu_unregister_thread();
++ return NULL;
++}
++
++static void *throbber(void *arg)
++{
++ struct xarray *xa = arg;
++
++ rcu_register_thread();
++
++ while (!test_complete) {
++ int i;
++
++ for (i = 0; i < 100; i++) {
++ xa_store(xa, i, xa_mk_value(i), GFP_KERNEL);
++ xa_set_mark(xa, i, XA_MARK_0);
++ }
++ for (i = 0; i < 100; i++)
++ xa_erase(xa, i);
++ }
++
++ rcu_unregister_thread();
++ return NULL;
++}
++
++void iteration_test2(unsigned test_duration)
++{
++ pthread_t threads[2];
++ DEFINE_XARRAY(array);
++ int i;
++
++ printv(1, "Running iteration test 2 for %d seconds\n", test_duration);
++
++ test_complete = false;
++
++ xa_store(&array, 100, xa_mk_value(100), GFP_KERNEL);
++ xa_set_mark(&array, 100, XA_MARK_0);
++
++ if (pthread_create(&threads[0], NULL, iterator, &array)) {
++ perror("create iterator thread");
++ exit(1);
++ }
++ if (pthread_create(&threads[1], NULL, throbber, &array)) {
++ perror("create throbber thread");
++ exit(1);
++ }
++
++ sleep(test_duration);
++ test_complete = true;
++
++ for (i = 0; i < 2; i++) {
++ if (pthread_join(threads[i], NULL)) {
++ perror("pthread_join");
++ exit(1);
++ }
++ }
++
++ xa_destroy(&array);
++}
+--- a/tools/testing/radix-tree/main.c
++++ b/tools/testing/radix-tree/main.c
+@@ -311,6 +311,7 @@ int main(int argc, char **argv)
+ regression4_test();
+ iteration_test(0, 10 + 90 * long_run);
+ iteration_test(7, 10 + 90 * long_run);
++ iteration_test2(10 + 90 * long_run);
+ single_thread_tests(long_run);
+
+ /* Free any remaining preallocated nodes */
+--- a/tools/testing/radix-tree/test.h
++++ b/tools/testing/radix-tree/test.h
+@@ -34,6 +34,7 @@ void xarray_tests(void);
+ void tag_check(void);
+ void multiorder_checks(void);
+ void iteration_test(unsigned order, unsigned duration);
++void iteration_test2(unsigned duration);
+ void benchmark(void);
+ void idr_checks(void);
+ void ida_tests(void);
--- /dev/null
+From c36d451ad386b34f452fc3c8621ff14b9eaa31a6 Mon Sep 17 00:00:00 2001
+From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
+Date: Fri, 31 Jan 2020 06:17:09 -0500
+Subject: XArray: Fix xas_pause for large multi-index entries
+
+From: Matthew Wilcox (Oracle) <willy@infradead.org>
+
+commit c36d451ad386b34f452fc3c8621ff14b9eaa31a6 upstream.
+
+Inspired by the recent Coverity report, I looked for other places where
+the offset wasn't being converted to an unsigned long before being
+shifted, and I found one in xas_pause() when the entry being paused is
+of order >32.
+
+Fixes: b803b42823d0 ("xarray: Add XArray iterators")
+Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ lib/test_xarray.c | 37 +++++++++++++++++++++++++++++++++++++
+ lib/xarray.c | 2 +-
+ 2 files changed, 38 insertions(+), 1 deletion(-)
+
+--- a/lib/test_xarray.c
++++ b/lib/test_xarray.c
+@@ -1156,6 +1156,42 @@ static noinline void check_find_entry(st
+ XA_BUG_ON(xa, !xa_empty(xa));
+ }
+
++static noinline void check_pause(struct xarray *xa)
++{
++ XA_STATE(xas, xa, 0);
++ void *entry;
++ unsigned int order;
++ unsigned long index = 1;
++ unsigned int count = 0;
++
++ for (order = 0; order < order_limit; order++) {
++ XA_BUG_ON(xa, xa_store_order(xa, index, order,
++ xa_mk_index(index), GFP_KERNEL));
++ index += 1UL << order;
++ }
++
++ rcu_read_lock();
++ xas_for_each(&xas, entry, ULONG_MAX) {
++ XA_BUG_ON(xa, entry != xa_mk_index(1UL << count));
++ count++;
++ }
++ rcu_read_unlock();
++ XA_BUG_ON(xa, count != order_limit);
++
++ count = 0;
++ xas_set(&xas, 0);
++ rcu_read_lock();
++ xas_for_each(&xas, entry, ULONG_MAX) {
++ XA_BUG_ON(xa, entry != xa_mk_index(1UL << count));
++ count++;
++ xas_pause(&xas);
++ }
++ rcu_read_unlock();
++ XA_BUG_ON(xa, count != order_limit);
++
++ xa_destroy(xa);
++}
++
+ static noinline void check_move_tiny(struct xarray *xa)
+ {
+ XA_STATE(xas, xa, 0);
+@@ -1664,6 +1700,7 @@ static int xarray_checks(void)
+ check_xa_alloc();
+ check_find(&array);
+ check_find_entry(&array);
++ check_pause(&array);
+ check_account(&array);
+ check_destroy(&array);
+ check_move(&array);
+--- a/lib/xarray.c
++++ b/lib/xarray.c
+@@ -970,7 +970,7 @@ void xas_pause(struct xa_state *xas)
+
+ xas->xa_node = XAS_RESTART;
+ if (node) {
+- unsigned int offset = xas->xa_offset;
++ unsigned long offset = xas->xa_offset;
+ while (++offset < XA_CHUNK_SIZE) {
+ if (!xa_is_sibling(xa_entry(xas->xa, node, offset)))
+ break;
--- /dev/null
+From 3a169c0be75b59dd85d159493634870cdec6d3c4 Mon Sep 17 00:00:00 2001
+From: Juergen Gross <jgross@suse.com>
+Date: Fri, 3 Apr 2020 11:00:34 +0200
+Subject: xen/blkfront: fix memory allocation flags in blkfront_setup_indirect()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Juergen Gross <jgross@suse.com>
+
+commit 3a169c0be75b59dd85d159493634870cdec6d3c4 upstream.
+
+Commit 1d5c76e664333 ("xen-blkfront: switch kcalloc to kvcalloc for
+large array allocation") didn't fix the issue it was meant to, as the
+flags for allocating the memory are GFP_NOIO, which will lead the
+memory allocation falling back to kmalloc().
+
+So instead of GFP_NOIO use GFP_KERNEL and do all the memory allocation
+in blkfront_setup_indirect() in a memalloc_noio_{save,restore} section.
+
+Fixes: 1d5c76e664333 ("xen-blkfront: switch kcalloc to kvcalloc for large array allocation")
+Cc: stable@vger.kernel.org
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Acked-by: Roger Pau Monné <roger.pau@citrix.com>
+Link: https://lore.kernel.org/r/20200403090034.8753-1-jgross@suse.com
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/block/xen-blkfront.c | 17 ++++++++++++-----
+ 1 file changed, 12 insertions(+), 5 deletions(-)
+
+--- a/drivers/block/xen-blkfront.c
++++ b/drivers/block/xen-blkfront.c
+@@ -47,6 +47,7 @@
+ #include <linux/bitmap.h>
+ #include <linux/list.h>
+ #include <linux/workqueue.h>
++#include <linux/sched/mm.h>
+
+ #include <xen/xen.h>
+ #include <xen/xenbus.h>
+@@ -2189,10 +2190,12 @@ static void blkfront_setup_discard(struc
+
+ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
+ {
+- unsigned int psegs, grants;
++ unsigned int psegs, grants, memflags;
+ int err, i;
+ struct blkfront_info *info = rinfo->dev_info;
+
++ memflags = memalloc_noio_save();
++
+ if (info->max_indirect_segments == 0) {
+ if (!HAS_EXTRA_REQ)
+ grants = BLKIF_MAX_SEGMENTS_PER_REQUEST;
+@@ -2224,7 +2227,7 @@ static int blkfront_setup_indirect(struc
+
+ BUG_ON(!list_empty(&rinfo->indirect_pages));
+ for (i = 0; i < num; i++) {
+- struct page *indirect_page = alloc_page(GFP_NOIO);
++ struct page *indirect_page = alloc_page(GFP_KERNEL);
+ if (!indirect_page)
+ goto out_of_memory;
+ list_add(&indirect_page->lru, &rinfo->indirect_pages);
+@@ -2235,15 +2238,15 @@ static int blkfront_setup_indirect(struc
+ rinfo->shadow[i].grants_used =
+ kvcalloc(grants,
+ sizeof(rinfo->shadow[i].grants_used[0]),
+- GFP_NOIO);
++ GFP_KERNEL);
+ rinfo->shadow[i].sg = kvcalloc(psegs,
+ sizeof(rinfo->shadow[i].sg[0]),
+- GFP_NOIO);
++ GFP_KERNEL);
+ if (info->max_indirect_segments)
+ rinfo->shadow[i].indirect_grants =
+ kvcalloc(INDIRECT_GREFS(grants),
+ sizeof(rinfo->shadow[i].indirect_grants[0]),
+- GFP_NOIO);
++ GFP_KERNEL);
+ if ((rinfo->shadow[i].grants_used == NULL) ||
+ (rinfo->shadow[i].sg == NULL) ||
+ (info->max_indirect_segments &&
+@@ -2252,6 +2255,7 @@ static int blkfront_setup_indirect(struc
+ sg_init_table(rinfo->shadow[i].sg, psegs);
+ }
+
++ memalloc_noio_restore(memflags);
+
+ return 0;
+
+@@ -2271,6 +2275,9 @@ out_of_memory:
+ __free_page(indirect_page);
+ }
+ }
++
++ memalloc_noio_restore(memflags);
++
+ return -ENOMEM;
+ }
+