--- /dev/null
+From c09c542aafc4dcd9c1b54fd7904131aa40d28f24 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 5 Dec 2023 10:27:35 -0300
+Subject: ARM: dts: imx28-xea: Pass the 'model' property
+
+From: Fabio Estevam <festevam@denx.de>
+
+[ Upstream commit 63ef8fc9bcee6b73ca445a19a7ac6bd544723c9f ]
+
+Per root-node.yaml, 'model' is a required property.
+
+Pass it to fix the following dt-schema warning:
+
+imx28-xea.dtb: /: 'model' is a required property
+ from schema $id: http://devicetree.org/schemas/root-node.yaml#
+
+Signed-off-by: Fabio Estevam <festevam@denx.de>
+Fixes: 445ae16ac1c5 ("ARM: dts: imx28: Add DTS description of imx28 based XEA board")
+Signed-off-by: Shawn Guo <shawnguo@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm/boot/dts/imx28-xea.dts | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/arch/arm/boot/dts/imx28-xea.dts b/arch/arm/boot/dts/imx28-xea.dts
+index a400c108f66a2..6c5e6856648af 100644
+--- a/arch/arm/boot/dts/imx28-xea.dts
++++ b/arch/arm/boot/dts/imx28-xea.dts
+@@ -8,6 +8,7 @@
+ #include "imx28-lwe.dtsi"
+
+ / {
++ model = "Liebherr XEA board";
+ compatible = "lwn,imx28-xea", "fsl,imx28";
+ };
+
+--
+2.42.0
+
--- /dev/null
+From 96b95bc3c1bd27a482c8f22d3a1ffdbf175460f0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 5 Nov 2023 10:32:19 -0300
+Subject: ARM: dts: imx6ul-pico: Describe the Ethernet PHY clock
+
+From: Fabio Estevam <festevam@denx.de>
+
+[ Upstream commit d951f8f5f23a9417b7952f22b33784c73caa1ebb ]
+
+Since commit c7e73b5051d6 ("ARM: imx: mach-imx6ul: remove 14x14 EVK
+specific PHY fixup")thet Ethernet PHY is no longer configured via code
+in board file.
+
+This caused Ethernet to stop working.
+
+Fix this problem by describing the clocks and clock-names to the
+Ethernet PHY node so that the KSZ8081 chip can be clocked correctly.
+
+Fixes: c7e73b5051d6 ("ARM: imx: mach-imx6ul: remove 14x14 EVK specific PHY fixup")
+Signed-off-by: Fabio Estevam <festevam@denx.de>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Signed-off-by: Shawn Guo <shawnguo@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm/boot/dts/imx6ul-pico.dtsi | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/arch/arm/boot/dts/imx6ul-pico.dtsi b/arch/arm/boot/dts/imx6ul-pico.dtsi
+index 357ffb2f5ad61..dd6790852b0d6 100644
+--- a/arch/arm/boot/dts/imx6ul-pico.dtsi
++++ b/arch/arm/boot/dts/imx6ul-pico.dtsi
+@@ -121,6 +121,8 @@
+ max-speed = <100>;
+ interrupt-parent = <&gpio5>;
+ interrupts = <6 IRQ_TYPE_LEVEL_LOW>;
++ clocks = <&clks IMX6UL_CLK_ENET_REF>;
++ clock-names = "rmii-ref";
+ };
+ };
+ };
+--
+2.42.0
+
--- /dev/null
+From e5f422ec295bdf1b961d0db219fd32fbc49835cf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 27 Nov 2023 17:05:01 +0100
+Subject: ARM: dts: imx7: Declare timers compatible with fsl,imx6dl-gpt
+
+From: Philipp Zabel <p.zabel@pengutronix.de>
+
+[ Upstream commit 397caf68e2d36532054cb14ae8995537f27f8b61 ]
+
+The timer nodes declare compatibility with "fsl,imx6sx-gpt", which
+itself is compatible with "fsl,imx6dl-gpt". Switch the fallback
+compatible from "fsl,imx6sx-gpt" to "fsl,imx6dl-gpt".
+
+Fixes: 949673450291 ("ARM: dts: add imx7d soc dtsi file")
+Signed-off-by: Philipp Zabel <p.zabel@pengutronix.de>
+Signed-off-by: Roland Hieber <rhi@pengutronix.de>
+Signed-off-by: Shawn Guo <shawnguo@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm/boot/dts/imx7s.dtsi | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/arch/arm/boot/dts/imx7s.dtsi b/arch/arm/boot/dts/imx7s.dtsi
+index c978aab1d0e3d..406e8870c680d 100644
+--- a/arch/arm/boot/dts/imx7s.dtsi
++++ b/arch/arm/boot/dts/imx7s.dtsi
+@@ -438,7 +438,7 @@
+ };
+
+ gpt1: timer@302d0000 {
+- compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
++ compatible = "fsl,imx7d-gpt", "fsl,imx6dl-gpt";
+ reg = <0x302d0000 0x10000>;
+ interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7D_GPT1_ROOT_CLK>,
+@@ -447,7 +447,7 @@
+ };
+
+ gpt2: timer@302e0000 {
+- compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
++ compatible = "fsl,imx7d-gpt", "fsl,imx6dl-gpt";
+ reg = <0x302e0000 0x10000>;
+ interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7D_GPT2_ROOT_CLK>,
+@@ -457,7 +457,7 @@
+ };
+
+ gpt3: timer@302f0000 {
+- compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
++ compatible = "fsl,imx7d-gpt", "fsl,imx6dl-gpt";
+ reg = <0x302f0000 0x10000>;
+ interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7D_GPT3_ROOT_CLK>,
+@@ -467,7 +467,7 @@
+ };
+
+ gpt4: timer@30300000 {
+- compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
++ compatible = "fsl,imx7d-gpt", "fsl,imx6dl-gpt";
+ reg = <0x30300000 0x10000>;
+ interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7D_GPT4_ROOT_CLK>,
+--
+2.42.0
+
--- /dev/null
+From 906197316fd8da84cbe3eabadcf735c8c4b0c66f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 22 Nov 2023 14:46:36 +0800
+Subject: ARM: imx: Check return value of devm_kasprintf in imx_mmdc_perf_init
+
+From: Kunwu Chan <chentao@kylinos.cn>
+
+[ Upstream commit 1c2b1049af3f86545fcc5fae0fc725fb64b3a09e ]
+
+devm_kasprintf() returns a pointer to dynamically allocated memory
+which can be NULL upon failure. Ensure the allocation was successful
+by checking the pointer validity.
+
+Release the id allocated in 'mmdc_pmu_init' when 'devm_kasprintf'
+return NULL
+
+Suggested-by: Ahmad Fatoum <a.fatoum@pengutronix.de>
+Fixes: e76bdfd7403a ("ARM: imx: Added perf functionality to mmdc driver")
+Signed-off-by: Kunwu Chan <chentao@kylinos.cn>
+Signed-off-by: Shawn Guo <shawnguo@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm/mach-imx/mmdc.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/arch/arm/mach-imx/mmdc.c b/arch/arm/mach-imx/mmdc.c
+index b9efe9da06e0b..3d76e8c28c51d 100644
+--- a/arch/arm/mach-imx/mmdc.c
++++ b/arch/arm/mach-imx/mmdc.c
+@@ -502,6 +502,10 @@ static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_b
+
+ name = devm_kasprintf(&pdev->dev,
+ GFP_KERNEL, "mmdc%d", ret);
++ if (!name) {
++ ret = -ENOMEM;
++ goto pmu_release_id;
++ }
+
+ pmu_mmdc->mmdc_ipg_clk = mmdc_ipg_clk;
+ pmu_mmdc->devtype_data = (struct fsl_mmdc_devtype_data *)of_id->data;
+@@ -524,9 +528,10 @@ static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_b
+
+ pmu_register_err:
+ pr_warn("MMDC Perf PMU failed (%d), disabled\n", ret);
+- ida_simple_remove(&mmdc_ida, pmu_mmdc->id);
+ cpuhp_state_remove_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node);
+ hrtimer_cancel(&pmu_mmdc->hrtimer);
++pmu_release_id:
++ ida_simple_remove(&mmdc_ida, pmu_mmdc->id);
+ pmu_free:
+ kfree(pmu_mmdc);
+ return ret;
+--
+2.42.0
+
--- /dev/null
+From 27e31f365bbd9fa1e21bb312bc4b83fbfc45bfe1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 6 Nov 2023 02:14:36 +0000
+Subject: arm64: dts: imx8mp: imx8mq: Add parkmode-disable-ss-quirk on DWC3
+
+From: Nathan Rossi <nathan.rossi@digi.com>
+
+[ Upstream commit 209043cf092d7b0d4739921b3f11d6d0b451eabf ]
+
+The i.MX8MP and i.MX8MQ devices both use the same DWC3 controller and
+are both affected by a known issue with the controller due to specific
+behaviour when park mode is enabled in SuperSpeed host mode operation.
+
+Under heavy USB traffic from multiple endpoints the controller will
+sometimes incorrectly process transactions such that some transactions
+are lost, or the controller may hang when processing transactions. When
+the controller hangs it does not recover.
+
+This issue is documented partially within the linux-imx vendor kernel
+which references a Synopsys STAR number 9001415732 in commits [1] and
+additional details in [2]. Those commits provide some additional
+controller internal implementation specifics around the incorrect
+behaviour of the SuperSpeed host controller operation when park mode is
+enabled.
+
+The summary of this issue is that the host controller can incorrectly
+enter/exit park mode such that part of the controller is in a state
+which behaves as if in park mode even though it is not. In this state
+the controller incorrectly calculates the number of TRBs available which
+results in incorrect access of the internal caches causing the overwrite
+of pending requests in the cache which should have been processed but
+are ignored. This can cause the controller to drop the requests or hang
+waiting for the pending state of the dropped requests.
+
+The workaround for this issue is to disable park mode for SuperSpeed
+operation of the controller through the GUCTL1[17] bit. This is already
+available as a quirk for the DWC3 controller and can be enabled via the
+'snps,parkmode-disable-ss-quirk' device tree property.
+
+It is possible to replicate this failure on an i.MX8MP EVK with a USB
+Hub connecting 4 SuperSpeed USB flash drives. Performing continuous
+small read operations (dd if=/dev/sd... of=/dev/null bs=16) on the block
+devices will result in device errors initially and will eventually
+result in the controller hanging.
+
+ [13240.896936] xhci-hcd xhci-hcd.0.auto: WARN Event TRB for slot 4 ep 2 with no TDs queued?
+ [13240.990708] usb 2-1.3: reset SuperSpeed USB device number 5 using xhci-hcd
+ [13241.015582] sd 2:0:0:0: [sdc] tag#0 UNKNOWN(0x2003) Result: hostbyte=0x07 driverbyte=DRIVER_OK cmd_age=0s
+ [13241.025198] sd 2:0:0:0: [sdc] tag#0 CDB: opcode=0x28 28 00 00 00 03 e0 00 01 00 00
+ [13241.032949] I/O error, dev sdc, sector 992 op 0x0:(READ) flags 0x80700 phys_seg 25 prio class 2
+ [13272.150710] usb 2-1.2: reset SuperSpeed USB device number 4 using xhci-hcd
+ [13272.175469] sd 1:0:0:0: [sdb] tag#0 UNKNOWN(0x2003) Result: hostbyte=0x03 driverbyte=DRIVER_OK cmd_age=31s
+ [13272.185365] sd 1:0:0:0: [sdb] tag#0 CDB: opcode=0x28 28 00 00 00 03 e0 00 01 00 00
+ [13272.193385] I/O error, dev sdb, sector 992 op 0x0:(READ) flags 0x80700 phys_seg 18 prio class 2
+ [13434.846556] xhci-hcd xhci-hcd.0.auto: xHCI host not responding to stop endpoint command
+ [13434.854592] xhci-hcd xhci-hcd.0.auto: xHCI host controller not responding, assume dead
+ [13434.862553] xhci-hcd xhci-hcd.0.auto: HC died; cleaning up
+
+[1] https://github.com/nxp-imx/linux-imx/commit/97a5349d936b08cf301730b59e4e8855283f815c
+[2] https://github.com/nxp-imx/linux-imx/commit/b4b5cbc5a12d7c3b920d1d7cba0ada3379e4e42b
+
+Fixes: fb8587a2c165 ("arm64: dtsi: imx8mp: add usb nodes")
+Fixes: ad37549cb5dc ("arm64: dts: imx8mq: add USB nodes")
+Signed-off-by: Nathan Rossi <nathan.rossi@digi.com>
+Reviewed-by: Fabio Estevam <festevam@gmail.com>
+Signed-off-by: Shawn Guo <shawnguo@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/boot/dts/freescale/imx8mp.dtsi | 2 ++
+ arch/arm64/boot/dts/freescale/imx8mq.dtsi | 2 ++
+ 2 files changed, 4 insertions(+)
+
+diff --git a/arch/arm64/boot/dts/freescale/imx8mp.dtsi b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
+index ab670b5d641b1..4e8cde8972e82 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mp.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
+@@ -913,6 +913,7 @@
+ phys = <&usb3_phy0>, <&usb3_phy0>;
+ phy-names = "usb2-phy", "usb3-phy";
+ snps,gfladj-refclk-lpm-sel-quirk;
++ snps,parkmode-disable-ss-quirk;
+ };
+
+ };
+@@ -954,6 +955,7 @@
+ phys = <&usb3_phy1>, <&usb3_phy1>;
+ phy-names = "usb2-phy", "usb3-phy";
+ snps,gfladj-refclk-lpm-sel-quirk;
++ snps,parkmode-disable-ss-quirk;
+ };
+ };
+
+diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
+index 2181278f1374a..e41e1c553bd37 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
+@@ -1382,6 +1382,7 @@
+ phys = <&usb3_phy0>, <&usb3_phy0>;
+ phy-names = "usb2-phy", "usb3-phy";
+ power-domains = <&pgc_otg1>;
++ snps,parkmode-disable-ss-quirk;
+ status = "disabled";
+ };
+
+@@ -1413,6 +1414,7 @@
+ phys = <&usb3_phy1>, <&usb3_phy1>;
+ phy-names = "usb2-phy", "usb3-phy";
+ power-domains = <&pgc_otg2>;
++ snps,parkmode-disable-ss-quirk;
+ status = "disabled";
+ };
+
+--
+2.42.0
+
--- /dev/null
+From 8493efa45552d4de0a5a088e4afc972e26877706 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 23 Mar 2023 11:55:31 +0800
+Subject: arm64: dts: imx8mq: drop usb3-resume-missing-cas from usb
+
+From: Peng Fan <peng.fan@nxp.com>
+
+[ Upstream commit fcd3f50845be909c9e0f8ac402874a2fb4b09c6c ]
+
+The property is NXP downstream property that there is no user
+in upstream, drop it.
+
+Signed-off-by: Peng Fan <peng.fan@nxp.com>
+Link: https://lore.kernel.org/r/20230323035531.3808192-2-peng.fan@oss.nxp.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Stable-dep-of: 209043cf092d ("arm64: dts: imx8mp: imx8mq: Add parkmode-disable-ss-quirk on DWC3")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/boot/dts/freescale/imx8mq.dtsi | 2 --
+ 1 file changed, 2 deletions(-)
+
+diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
+index 2a698c5b87bcd..2181278f1374a 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
+@@ -1382,7 +1382,6 @@
+ phys = <&usb3_phy0>, <&usb3_phy0>;
+ phy-names = "usb2-phy", "usb3-phy";
+ power-domains = <&pgc_otg1>;
+- usb3-resume-missing-cas;
+ status = "disabled";
+ };
+
+@@ -1414,7 +1413,6 @@
+ phys = <&usb3_phy1>, <&usb3_phy1>;
+ phy-names = "usb2-phy", "usb3-phy";
+ power-domains = <&pgc_otg2>;
+- usb3-resume-missing-cas;
+ status = "disabled";
+ };
+
+--
+2.42.0
+
--- /dev/null
+From 78a6fe3ea6da72f7363ce2149e17998a2e22377c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 5 Nov 2023 23:36:16 +0000
+Subject: arm64: dts: rockchip: Expand reg size of vdec node for RK3399
+
+From: Alex Bee <knaerzche@gmail.com>
+
+[ Upstream commit 35938c18291b5da7422b2fac6dac0af11aa8d0d7 ]
+
+Expand the reg size for the vdec node to include cache/performance
+registers the rkvdec driver writes to. Also add missing clocks to the
+related power-domain.
+
+Fixes: cbd7214402ec ("arm64: dts: rockchip: Define the rockchip Video Decoder node on rk3399")
+Signed-off-by: Alex Bee <knaerzche@gmail.com>
+Signed-off-by: Jonas Karlman <jonas@kwiboo.se>
+Link: https://lore.kernel.org/r/20231105233630.3927502-10-jonas@kwiboo.se
+Signed-off-by: Heiko Stuebner <heiko@sntech.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/boot/dts/rockchip/rk3399.dtsi | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+index 4255e2d7a72fc..9e33f0e6ed504 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+@@ -993,7 +993,9 @@
+ power-domain@RK3399_PD_VDU {
+ reg = <RK3399_PD_VDU>;
+ clocks = <&cru ACLK_VDU>,
+- <&cru HCLK_VDU>;
++ <&cru HCLK_VDU>,
++ <&cru SCLK_VDU_CA>,
++ <&cru SCLK_VDU_CORE>;
+ pm_qos = <&qos_video_m1_r>,
+ <&qos_video_m1_w>;
+ #power-domain-cells = <0>;
+@@ -1260,7 +1262,7 @@
+
+ vdec: video-codec@ff660000 {
+ compatible = "rockchip,rk3399-vdec";
+- reg = <0x0 0xff660000 0x0 0x400>;
++ reg = <0x0 0xff660000 0x0 0x480>;
+ interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru ACLK_VDU>, <&cru HCLK_VDU>,
+ <&cru SCLK_VDU_CA>, <&cru SCLK_VDU_CORE>;
+--
+2.42.0
+
--- /dev/null
+From 17c72a2346265503a775592617a1f4fc13c87bea Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Dec 2023 15:41:56 +0800
+Subject: ASoC: wm_adsp: fix memleak in wm_adsp_buffer_populate
+
+From: Dinghao Liu <dinghao.liu@zju.edu.cn>
+
+[ Upstream commit 29046a78a3c0a1f8fa0427f164caa222f003cf5b ]
+
+When wm_adsp_buffer_read() fails, we should free buf->regions.
+Otherwise, the callers of wm_adsp_buffer_populate() will
+directly free buf on failure, which makes buf->regions a leaked
+memory.
+
+Fixes: a792af69b08f ("ASoC: wm_adsp: Refactor compress stream initialisation")
+Signed-off-by: Dinghao Liu <dinghao.liu@zju.edu.cn>
+Reviewed-by: Richard Fitzgerald <rf@opensource.cirrus.com>
+Link: https://lore.kernel.org/r/20231204074158.12026-1-dinghao.liu@zju.edu.cn
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/codecs/wm_adsp.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
+index 08fc1a025b1a9..df86cf4f4caed 100644
+--- a/sound/soc/codecs/wm_adsp.c
++++ b/sound/soc/codecs/wm_adsp.c
+@@ -3766,12 +3766,12 @@ static int wm_adsp_buffer_populate(struct wm_adsp_compr_buf *buf)
+ ret = wm_adsp_buffer_read(buf, caps->region_defs[i].base_offset,
+ ®ion->base_addr);
+ if (ret < 0)
+- return ret;
++ goto err;
+
+ ret = wm_adsp_buffer_read(buf, caps->region_defs[i].size_offset,
+ &offset);
+ if (ret < 0)
+- return ret;
++ goto err;
+
+ region->cumulative_size = offset;
+
+@@ -3782,6 +3782,10 @@ static int wm_adsp_buffer_populate(struct wm_adsp_compr_buf *buf)
+ }
+
+ return 0;
++
++err:
++ kfree(buf->regions);
++ return ret;
+ }
+
+ static void wm_adsp_buffer_clear(struct wm_adsp_compr_buf *buf)
+--
+2.42.0
+
--- /dev/null
+From 9a817c85fb2c29fc463b5dde6c429d1ecfb17cf3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 24 Nov 2023 19:27:47 +0100
+Subject: hwmon: (acpi_power_meter) Fix 4.29 MW bug
+
+From: Armin Wolf <W_Armin@gmx.de>
+
+[ Upstream commit 1fefca6c57fb928d2131ff365270cbf863d89c88 ]
+
+The ACPI specification says:
+
+"If an error occurs while obtaining the meter reading or if the value
+is not available then an Integer with all bits set is returned"
+
+Since the "integer" is 32 bits in case of the ACPI power meter,
+userspace will get a power reading of 2^32 * 1000 miliwatts (~4.29 MW)
+in case of such an error. This was discovered due to a lm_sensors
+bugreport (https://github.com/lm-sensors/lm-sensors/issues/460).
+Fix this by returning -ENODATA instead.
+
+Tested-by: <urbinek@gmail.com>
+Fixes: de584afa5e18 ("hwmon driver for ACPI 4.0 power meters")
+Signed-off-by: Armin Wolf <W_Armin@gmx.de>
+Link: https://lore.kernel.org/r/20231124182747.13956-1-W_Armin@gmx.de
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwmon/acpi_power_meter.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
+index 014505b1faf74..1336f77106177 100644
+--- a/drivers/hwmon/acpi_power_meter.c
++++ b/drivers/hwmon/acpi_power_meter.c
+@@ -31,6 +31,7 @@
+ #define POWER_METER_CAN_NOTIFY (1 << 3)
+ #define POWER_METER_IS_BATTERY (1 << 8)
+ #define UNKNOWN_HYSTERESIS 0xFFFFFFFF
++#define UNKNOWN_POWER 0xFFFFFFFF
+
+ #define METER_NOTIFY_CONFIG 0x80
+ #define METER_NOTIFY_TRIP 0x81
+@@ -348,6 +349,9 @@ static ssize_t show_power(struct device *dev,
+ update_meter(resource);
+ mutex_unlock(&resource->lock);
+
++ if (resource->power == UNKNOWN_POWER)
++ return -ENODATA;
++
+ return sprintf(buf, "%llu\n", resource->power * 1000);
+ }
+
+--
+2.42.0
+
--- /dev/null
+From 296c3be3edfc33d5623ff96ae69d991e0199f2b6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 3 Dec 2023 16:24:05 +0100
+Subject: hwmon: (nzxt-kraken2) Fix error handling path in kraken2_probe()
+
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+
+[ Upstream commit 35fe2ad259a3bfca15ab78c8ffb5278cb6149c89 ]
+
+There is no point in calling hid_hw_stop() if hid_hw_start() has failed.
+There is no point in calling hid_hw_close() if hid_hw_open() has failed.
+
+Update the error handling path accordingly.
+
+Fixes: 82e3430dfa8c ("hwmon: add driver for NZXT Kraken X42/X52/X62/X72")
+Reported-by: Aleksa Savic <savicaleksa83@gmail.com>
+Closes: https://lore.kernel.org/all/121470f0-6c1f-418a-844c-7ec2e8a54b8e@gmail.com/
+Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Reviewed-by: Jonas Malaco <jonas@protocubo.io>
+Link: https://lore.kernel.org/r/a768e69851a07a1f4e29f270f4e2559063f07343.1701617030.git.christophe.jaillet@wanadoo.fr
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwmon/nzxt-kraken2.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/hwmon/nzxt-kraken2.c b/drivers/hwmon/nzxt-kraken2.c
+index 89f7ea4f42d47..badbcaf01f90b 100644
+--- a/drivers/hwmon/nzxt-kraken2.c
++++ b/drivers/hwmon/nzxt-kraken2.c
+@@ -161,13 +161,13 @@ static int kraken2_probe(struct hid_device *hdev,
+ ret = hid_hw_start(hdev, HID_CONNECT_HIDRAW);
+ if (ret) {
+ hid_err(hdev, "hid hw start failed with %d\n", ret);
+- goto fail_and_stop;
++ return ret;
+ }
+
+ ret = hid_hw_open(hdev);
+ if (ret) {
+ hid_err(hdev, "hid hw open failed with %d\n", ret);
+- goto fail_and_close;
++ goto fail_and_stop;
+ }
+
+ priv->hwmon_dev = hwmon_device_register_with_info(&hdev->dev, "kraken2",
+--
+2.42.0
+
--- /dev/null
+From 5e53e8f7a69b9a88a3b3236c8faec746a1cb4afc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 5 Dec 2023 17:42:14 +0800
+Subject: md: don't leave 'MD_RECOVERY_FROZEN' in error path of
+ md_set_readonly()
+
+From: Yu Kuai <yukuai3@huawei.com>
+
+[ Upstream commit c9f7cb5b2bc968adcdc686c197ed108f47fd8eb0 ]
+
+If md_set_readonly() failed, the array could still be read-write, however
+'MD_RECOVERY_FROZEN' could still be set, which leave the array in an
+abnormal state that sync or recovery can't continue anymore.
+Hence make sure the flag is cleared after md_set_readonly() returns.
+
+Fixes: 88724bfa68be ("md: wait for pending superblock updates before switching to read-only")
+Signed-off-by: Yu Kuai <yukuai3@huawei.com>
+Acked-by: Xiao Ni <xni@redhat.com>
+Signed-off-by: Song Liu <song@kernel.org>
+Link: https://lore.kernel.org/r/20231205094215.1824240-3-yukuai1@huaweicloud.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/md/md.c | 24 +++++++++++++-----------
+ 1 file changed, 13 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 2d04073174782..aae9ec78c0e8c 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -6311,6 +6311,9 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
+ int err = 0;
+ int did_freeze = 0;
+
++ if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
++ return -EBUSY;
++
+ if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
+ did_freeze = 1;
+ set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+@@ -6323,8 +6326,6 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
+ * which will now never happen */
+ wake_up_process(mddev->sync_thread->tsk);
+
+- if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
+- return -EBUSY;
+ mddev_unlock(mddev);
+ wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING,
+ &mddev->recovery));
+@@ -6337,29 +6338,30 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
+ mddev->sync_thread ||
+ test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
+ pr_warn("md: %s still in use.\n",mdname(mddev));
+- if (did_freeze) {
+- clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+- set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+- md_wakeup_thread(mddev->thread);
+- }
+ err = -EBUSY;
+ goto out;
+ }
++
+ if (mddev->pers) {
+ __md_stop_writes(mddev);
+
+- err = -ENXIO;
+- if (mddev->ro == MD_RDONLY)
++ if (mddev->ro == MD_RDONLY) {
++ err = -ENXIO;
+ goto out;
++ }
++
+ mddev->ro = MD_RDONLY;
+ set_disk_ro(mddev->gendisk, 1);
++ }
++
++out:
++ if ((mddev->pers && !err) || did_freeze) {
+ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ md_wakeup_thread(mddev->thread);
+ sysfs_notify_dirent_safe(mddev->sysfs_state);
+- err = 0;
+ }
+-out:
++
+ mutex_unlock(&mddev->open_mutex);
+ return err;
+ }
+--
+2.42.0
+
--- /dev/null
+From 98398d661c1a2663e6da04b6777de2ff13aefb83 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 20 Sep 2022 10:39:38 +0800
+Subject: md: introduce md_ro_state
+
+From: Ye Bin <yebin10@huawei.com>
+
+[ Upstream commit f97a5528b21eb175d90dce2df9960c8d08e1be82 ]
+
+Introduce md_ro_state for mddev->ro, so it is easy to understand.
+
+Signed-off-by: Ye Bin <yebin10@huawei.com>
+Signed-off-by: Song Liu <song@kernel.org>
+Stable-dep-of: c9f7cb5b2bc9 ("md: don't leave 'MD_RECOVERY_FROZEN' in error path of md_set_readonly()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/md/md.c | 152 ++++++++++++++++++++++++++----------------------
+ 1 file changed, 82 insertions(+), 70 deletions(-)
+
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index a2d9856365958..2d04073174782 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -92,6 +92,18 @@ static int remove_and_add_spares(struct mddev *mddev,
+ struct md_rdev *this);
+ static void mddev_detach(struct mddev *mddev);
+
++enum md_ro_state {
++ MD_RDWR,
++ MD_RDONLY,
++ MD_AUTO_READ,
++ MD_MAX_STATE
++};
++
++static bool md_is_rdwr(struct mddev *mddev)
++{
++ return (mddev->ro == MD_RDWR);
++}
++
+ /*
+ * Default number of read corrections we'll attempt on an rdev
+ * before ejecting it from the array. We divide the read error
+@@ -461,7 +473,7 @@ static blk_qc_t md_submit_bio(struct bio *bio)
+ if (!bio)
+ return BLK_QC_T_NONE;
+
+- if (mddev->ro == 1 && unlikely(rw == WRITE)) {
++ if (mddev->ro == MD_RDONLY && unlikely(rw == WRITE)) {
+ if (bio_sectors(bio) != 0)
+ bio->bi_status = BLK_STS_IOERR;
+ bio_endio(bio);
+@@ -2680,7 +2692,7 @@ void md_update_sb(struct mddev *mddev, int force_change)
+ int any_badblocks_changed = 0;
+ int ret = -1;
+
+- if (mddev->ro) {
++ if (!md_is_rdwr(mddev)) {
+ if (force_change)
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
+ return;
+@@ -3953,7 +3965,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
+ goto out_unlock;
+ }
+ rv = -EROFS;
+- if (mddev->ro)
++ if (!md_is_rdwr(mddev))
+ goto out_unlock;
+
+ /* request to change the personality. Need to ensure:
+@@ -4159,7 +4171,7 @@ layout_store(struct mddev *mddev, const char *buf, size_t len)
+ if (mddev->pers) {
+ if (mddev->pers->check_reshape == NULL)
+ err = -EBUSY;
+- else if (mddev->ro)
++ else if (!md_is_rdwr(mddev))
+ err = -EROFS;
+ else {
+ mddev->new_layout = n;
+@@ -4268,7 +4280,7 @@ chunk_size_store(struct mddev *mddev, const char *buf, size_t len)
+ if (mddev->pers) {
+ if (mddev->pers->check_reshape == NULL)
+ err = -EBUSY;
+- else if (mddev->ro)
++ else if (!md_is_rdwr(mddev))
+ err = -EROFS;
+ else {
+ mddev->new_chunk_sectors = n >> 9;
+@@ -4391,13 +4403,13 @@ array_state_show(struct mddev *mddev, char *page)
+
+ if (mddev->pers && !test_bit(MD_NOT_READY, &mddev->flags)) {
+ switch(mddev->ro) {
+- case 1:
++ case MD_RDONLY:
+ st = readonly;
+ break;
+- case 2:
++ case MD_AUTO_READ:
+ st = read_auto;
+ break;
+- case 0:
++ case MD_RDWR:
+ spin_lock(&mddev->lock);
+ if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
+ st = write_pending;
+@@ -4433,7 +4445,8 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
+ int err = 0;
+ enum array_state st = match_word(buf, array_states);
+
+- if (mddev->pers && (st == active || st == clean) && mddev->ro != 1) {
++ if (mddev->pers && (st == active || st == clean) &&
++ mddev->ro != MD_RDONLY) {
+ /* don't take reconfig_mutex when toggling between
+ * clean and active
+ */
+@@ -4477,23 +4490,23 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
+ if (mddev->pers)
+ err = md_set_readonly(mddev, NULL);
+ else {
+- mddev->ro = 1;
++ mddev->ro = MD_RDONLY;
+ set_disk_ro(mddev->gendisk, 1);
+ err = do_md_run(mddev);
+ }
+ break;
+ case read_auto:
+ if (mddev->pers) {
+- if (mddev->ro == 0)
++ if (md_is_rdwr(mddev))
+ err = md_set_readonly(mddev, NULL);
+- else if (mddev->ro == 1)
++ else if (mddev->ro == MD_RDONLY)
+ err = restart_array(mddev);
+ if (err == 0) {
+- mddev->ro = 2;
++ mddev->ro = MD_AUTO_READ;
+ set_disk_ro(mddev->gendisk, 0);
+ }
+ } else {
+- mddev->ro = 2;
++ mddev->ro = MD_AUTO_READ;
+ err = do_md_run(mddev);
+ }
+ break;
+@@ -4518,7 +4531,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
+ wake_up(&mddev->sb_wait);
+ err = 0;
+ } else {
+- mddev->ro = 0;
++ mddev->ro = MD_RDWR;
+ set_disk_ro(mddev->gendisk, 0);
+ err = do_md_run(mddev);
+ }
+@@ -4819,7 +4832,7 @@ action_show(struct mddev *mddev, char *page)
+ if (test_bit(MD_RECOVERY_FROZEN, &recovery))
+ type = "frozen";
+ else if (test_bit(MD_RECOVERY_RUNNING, &recovery) ||
+- (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery))) {
++ (md_is_rdwr(mddev) && test_bit(MD_RECOVERY_NEEDED, &recovery))) {
+ if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
+ type = "reshape";
+ else if (test_bit(MD_RECOVERY_SYNC, &recovery)) {
+@@ -4892,11 +4905,11 @@ action_store(struct mddev *mddev, const char *page, size_t len)
+ set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
+ set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
+ }
+- if (mddev->ro == 2) {
++ if (mddev->ro == MD_AUTO_READ) {
+ /* A write to sync_action is enough to justify
+ * canceling read-auto mode
+ */
+- mddev->ro = 0;
++ mddev->ro = MD_RDWR;
+ md_wakeup_thread(mddev->sync_thread);
+ }
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+@@ -5124,8 +5137,7 @@ max_sync_store(struct mddev *mddev, const char *buf, size_t len)
+ goto out_unlock;
+
+ err = -EBUSY;
+- if (max < mddev->resync_max &&
+- mddev->ro == 0 &&
++ if (max < mddev->resync_max && md_is_rdwr(mddev) &&
+ test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
+ goto out_unlock;
+
+@@ -5841,8 +5853,8 @@ int md_run(struct mddev *mddev)
+ continue;
+ sync_blockdev(rdev->bdev);
+ invalidate_bdev(rdev->bdev);
+- if (mddev->ro != 1 && rdev_read_only(rdev)) {
+- mddev->ro = 1;
++ if (mddev->ro != MD_RDONLY && rdev_read_only(rdev)) {
++ mddev->ro = MD_RDONLY;
+ if (mddev->gendisk)
+ set_disk_ro(mddev->gendisk, 1);
+ }
+@@ -5945,8 +5957,8 @@ int md_run(struct mddev *mddev)
+
+ mddev->ok_start_degraded = start_dirty_degraded;
+
+- if (start_readonly && mddev->ro == 0)
+- mddev->ro = 2; /* read-only, but switch on first write */
++ if (start_readonly && md_is_rdwr(mddev))
++ mddev->ro = MD_AUTO_READ; /* read-only, but switch on first write */
+
+ err = pers->run(mddev);
+ if (err)
+@@ -6021,8 +6033,8 @@ int md_run(struct mddev *mddev)
+ mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action");
+ mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed");
+ mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded");
+- } else if (mddev->ro == 2) /* auto-readonly not meaningful */
+- mddev->ro = 0;
++ } else if (mddev->ro == MD_AUTO_READ)
++ mddev->ro = MD_RDWR;
+
+ atomic_set(&mddev->max_corr_read_errors,
+ MD_DEFAULT_MAX_CORRECTED_READ_ERRORS);
+@@ -6040,7 +6052,7 @@ int md_run(struct mddev *mddev)
+ if (rdev->raid_disk >= 0)
+ sysfs_link_rdev(mddev, rdev); /* failure here is OK */
+
+- if (mddev->degraded && !mddev->ro)
++ if (mddev->degraded && md_is_rdwr(mddev))
+ /* This ensures that recovering status is reported immediately
+ * via sysfs - until a lack of spares is confirmed.
+ */
+@@ -6130,7 +6142,7 @@ static int restart_array(struct mddev *mddev)
+ return -ENXIO;
+ if (!mddev->pers)
+ return -EINVAL;
+- if (!mddev->ro)
++ if (md_is_rdwr(mddev))
+ return -EBUSY;
+
+ rcu_read_lock();
+@@ -6149,7 +6161,7 @@ static int restart_array(struct mddev *mddev)
+ return -EROFS;
+
+ mddev->safemode = 0;
+- mddev->ro = 0;
++ mddev->ro = MD_RDWR;
+ set_disk_ro(disk, 0);
+ pr_debug("md: %s switched to read-write mode.\n", mdname(mddev));
+ /* Kick recovery or resync if necessary */
+@@ -6176,7 +6188,7 @@ static void md_clean(struct mddev *mddev)
+ mddev->clevel[0] = 0;
+ mddev->flags = 0;
+ mddev->sb_flags = 0;
+- mddev->ro = 0;
++ mddev->ro = MD_RDWR;
+ mddev->metadata_type[0] = 0;
+ mddev->chunk_sectors = 0;
+ mddev->ctime = mddev->utime = 0;
+@@ -6227,7 +6239,7 @@ static void __md_stop_writes(struct mddev *mddev)
+ }
+ md_bitmap_flush(mddev);
+
+- if (mddev->ro == 0 &&
++ if (md_is_rdwr(mddev) &&
+ ((!mddev->in_sync && !mddev_is_clustered(mddev)) ||
+ mddev->sb_flags)) {
+ /* mark array as shutdown cleanly */
+@@ -6337,9 +6349,9 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
+ __md_stop_writes(mddev);
+
+ err = -ENXIO;
+- if (mddev->ro==1)
++ if (mddev->ro == MD_RDONLY)
+ goto out;
+- mddev->ro = 1;
++ mddev->ro = MD_RDONLY;
+ set_disk_ro(mddev->gendisk, 1);
+ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+@@ -6396,7 +6408,7 @@ static int do_md_stop(struct mddev *mddev, int mode,
+ return -EBUSY;
+ }
+ if (mddev->pers) {
+- if (mddev->ro)
++ if (!md_is_rdwr(mddev))
+ set_disk_ro(disk, 0);
+
+ __md_stop_writes(mddev);
+@@ -6413,8 +6425,8 @@ static int do_md_stop(struct mddev *mddev, int mode,
+ mutex_unlock(&mddev->open_mutex);
+ mddev->changed = 1;
+
+- if (mddev->ro)
+- mddev->ro = 0;
++ if (!md_is_rdwr(mddev))
++ mddev->ro = MD_RDWR;
+ } else
+ mutex_unlock(&mddev->open_mutex);
+ /*
+@@ -7226,7 +7238,7 @@ static int update_size(struct mddev *mddev, sector_t num_sectors)
+ if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
+ mddev->sync_thread)
+ return -EBUSY;
+- if (mddev->ro)
++ if (!md_is_rdwr(mddev))
+ return -EROFS;
+
+ rdev_for_each(rdev, mddev) {
+@@ -7256,7 +7268,7 @@ static int update_raid_disks(struct mddev *mddev, int raid_disks)
+ /* change the number of raid disks */
+ if (mddev->pers->check_reshape == NULL)
+ return -EINVAL;
+- if (mddev->ro)
++ if (!md_is_rdwr(mddev))
+ return -EROFS;
+ if (raid_disks <= 0 ||
+ (mddev->max_disks && raid_disks >= mddev->max_disks))
+@@ -7680,26 +7692,25 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
+ * The remaining ioctls are changing the state of the
+ * superblock, so we do not allow them on read-only arrays.
+ */
+- if (mddev->ro && mddev->pers) {
+- if (mddev->ro == 2) {
+- mddev->ro = 0;
+- sysfs_notify_dirent_safe(mddev->sysfs_state);
+- set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+- /* mddev_unlock will wake thread */
+- /* If a device failed while we were read-only, we
+- * need to make sure the metadata is updated now.
+- */
+- if (test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) {
+- mddev_unlock(mddev);
+- wait_event(mddev->sb_wait,
+- !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) &&
+- !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
+- mddev_lock_nointr(mddev);
+- }
+- } else {
++ if (!md_is_rdwr(mddev) && mddev->pers) {
++ if (mddev->ro != MD_AUTO_READ) {
+ err = -EROFS;
+ goto unlock;
+ }
++ mddev->ro = MD_RDWR;
++ sysfs_notify_dirent_safe(mddev->sysfs_state);
++ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
++ /* mddev_unlock will wake thread */
++ /* If a device failed while we were read-only, we
++ * need to make sure the metadata is updated now.
++ */
++ if (test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) {
++ mddev_unlock(mddev);
++ wait_event(mddev->sb_wait,
++ !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) &&
++ !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
++ mddev_lock_nointr(mddev);
++ }
+ }
+
+ switch (cmd) {
+@@ -7785,11 +7796,11 @@ static int md_set_read_only(struct block_device *bdev, bool ro)
+ * Transitioning to read-auto need only happen for arrays that call
+ * md_write_start and which are not ready for writes yet.
+ */
+- if (!ro && mddev->ro == 1 && mddev->pers) {
++ if (!ro && mddev->ro == MD_RDONLY && mddev->pers) {
+ err = restart_array(mddev);
+ if (err)
+ goto out_unlock;
+- mddev->ro = 2;
++ mddev->ro = MD_AUTO_READ;
+ }
+
+ out_unlock:
+@@ -8247,9 +8258,9 @@ static int md_seq_show(struct seq_file *seq, void *v)
+ seq_printf(seq, "%s : %sactive", mdname(mddev),
+ mddev->pers ? "" : "in");
+ if (mddev->pers) {
+- if (mddev->ro==1)
++ if (mddev->ro == MD_RDONLY)
+ seq_printf(seq, " (read-only)");
+- if (mddev->ro==2)
++ if (mddev->ro == MD_AUTO_READ)
+ seq_printf(seq, " (auto-read-only)");
+ seq_printf(seq, " %s", mddev->pers->name);
+ }
+@@ -8509,10 +8520,10 @@ bool md_write_start(struct mddev *mddev, struct bio *bi)
+ if (bio_data_dir(bi) != WRITE)
+ return true;
+
+- BUG_ON(mddev->ro == 1);
+- if (mddev->ro == 2) {
++ BUG_ON(mddev->ro == MD_RDONLY);
++ if (mddev->ro == MD_AUTO_READ) {
+ /* need to switch to read/write */
+- mddev->ro = 0;
++ mddev->ro = MD_RDWR;
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ md_wakeup_thread(mddev->thread);
+ md_wakeup_thread(mddev->sync_thread);
+@@ -8563,7 +8574,7 @@ void md_write_inc(struct mddev *mddev, struct bio *bi)
+ {
+ if (bio_data_dir(bi) != WRITE)
+ return;
+- WARN_ON_ONCE(mddev->in_sync || mddev->ro);
++ WARN_ON_ONCE(mddev->in_sync || !md_is_rdwr(mddev));
+ percpu_ref_get(&mddev->writes_pending);
+ }
+ EXPORT_SYMBOL(md_write_inc);
+@@ -8668,7 +8679,7 @@ void md_allow_write(struct mddev *mddev)
+ {
+ if (!mddev->pers)
+ return;
+- if (mddev->ro)
++ if (!md_is_rdwr(mddev))
+ return;
+ if (!mddev->pers->sync_request)
+ return;
+@@ -8717,7 +8728,7 @@ void md_do_sync(struct md_thread *thread)
+ if (test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
+ test_bit(MD_RECOVERY_WAIT, &mddev->recovery))
+ return;
+- if (mddev->ro) {/* never try to sync a read-only array */
++ if (!md_is_rdwr(mddev)) {/* never try to sync a read-only array */
+ set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+ return;
+ }
+@@ -9185,9 +9196,9 @@ static int remove_and_add_spares(struct mddev *mddev,
+ if (test_bit(Faulty, &rdev->flags))
+ continue;
+ if (!test_bit(Journal, &rdev->flags)) {
+- if (mddev->ro &&
+- ! (rdev->saved_raid_disk >= 0 &&
+- !test_bit(Bitmap_sync, &rdev->flags)))
++ if (!md_is_rdwr(mddev) &&
++ !(rdev->saved_raid_disk >= 0 &&
++ !test_bit(Bitmap_sync, &rdev->flags)))
+ continue;
+
+ rdev->recovery_offset = 0;
+@@ -9285,7 +9296,8 @@ void md_check_recovery(struct mddev *mddev)
+ flush_signals(current);
+ }
+
+- if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
++ if (!md_is_rdwr(mddev) &&
++ !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
+ return;
+ if ( ! (
+ (mddev->sb_flags & ~ (1<<MD_SB_CHANGE_PENDING)) ||
+@@ -9304,7 +9316,7 @@ void md_check_recovery(struct mddev *mddev)
+ if (!mddev->external && mddev->safemode == 1)
+ mddev->safemode = 0;
+
+- if (mddev->ro) {
++ if (!md_is_rdwr(mddev)) {
+ struct md_rdev *rdev;
+ if (!mddev->external && mddev->in_sync)
+ /* 'Blocked' flag not needed as failed devices
+--
+2.42.0
+
--- /dev/null
+From 41d9d986b860f50d13d59d9ee3b43f057c4d3e8e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 21 Nov 2023 00:29:47 -0800
+Subject: RDMA/bnxt_re: Correct module description string
+
+From: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
+
+[ Upstream commit 422b19f7f006e813ee0865aadce6a62b3c263c42 ]
+
+The word "Driver" is repeated twice in the "modinfo bnxt_re"
+output description. Fix it.
+
+Fixes: 1ac5a4047975 ("RDMA/bnxt_re: Add bnxt_re RoCE driver")
+Signed-off-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
+Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com>
+Link: https://lore.kernel.org/r/1700555387-6277-1-git-send-email-selvin.xavier@broadcom.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/bnxt_re/main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
+index 7b85eef113fc0..c7ea2eedd60c6 100644
+--- a/drivers/infiniband/hw/bnxt_re/main.c
++++ b/drivers/infiniband/hw/bnxt_re/main.c
+@@ -70,7 +70,7 @@ static char version[] =
+ BNXT_RE_DESC "\n";
+
+ MODULE_AUTHOR("Eddie Wai <eddie.wai@broadcom.com>");
+-MODULE_DESCRIPTION(BNXT_RE_DESC " Driver");
++MODULE_DESCRIPTION(BNXT_RE_DESC);
+ MODULE_LICENSE("Dual BSD/GPL");
+
+ /* globals */
+--
+2.42.0
+
--- /dev/null
+From 5fd7e44d3eb0fed80c60bca6906018b95775ce3e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 29 Nov 2023 14:21:41 -0600
+Subject: RDMA/core: Fix umem iterator when PAGE_SIZE is greater then HCA pgsz
+
+From: Mike Marciniszyn <mike.marciniszyn@intel.com>
+
+[ Upstream commit 4fbc3a52cd4d14de3793f4b2c721d7306ea84cf9 ]
+
+64k pages introduce the situation in this diagram when the HCA 4k page
+size is being used:
+
+ +-------------------------------------------+ <--- 64k aligned VA
+ | |
+ | HCA 4k page |
+ | |
+ +-------------------------------------------+
+ | o |
+ | |
+ | o |
+ | |
+ | o |
+ +-------------------------------------------+
+ | |
+ | HCA 4k page |
+ | |
+ +-------------------------------------------+ <--- Live HCA page
+ |OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO| <--- offset
+ | | <--- VA
+ | MR data |
+ +-------------------------------------------+
+ | |
+ | HCA 4k page |
+ | |
+ +-------------------------------------------+
+ | o |
+ | |
+ | o |
+ | |
+ | o |
+ +-------------------------------------------+
+ | |
+ | HCA 4k page |
+ | |
+ +-------------------------------------------+
+
+The VA addresses are coming from rdma-core in this diagram can be
+arbitrary, but for 64k pages, the VA may be offset by some number of HCA
+4k pages and followed by some number of HCA 4k pages.
+
+The current iterator doesn't account for either the preceding 4k pages or
+the following 4k pages.
+
+Fix the issue by extending the ib_block_iter to contain the number of DMA
+pages like comment [1] says and by using __sg_advance to start the
+iterator at the first live HCA page.
+
+The changes are contained in a parallel set of iterator start and next
+functions that are umem aware and specific to umem since there is one user
+of the rdma_for_each_block() without umem.
+
+These two fixes prevents the extra pages before and after the user MR
+data.
+
+Fix the preceding pages by using the __sq_advance field to start at the
+first 4k page containing MR data.
+
+Fix the following pages by saving the number of pgsz blocks in the
+iterator state and downcounting on each next.
+
+This fix allows for the elimination of the small page crutch noted in the
+Fixes.
+
+Fixes: 10c75ccb54e4 ("RDMA/umem: Prevent small pages from being returned by ib_umem_find_best_pgsz()")
+Link: https://lore.kernel.org/r/20231129202143.1434-2-shiraz.saleem@intel.com
+Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
+Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/core/umem.c | 6 ------
+ include/rdma/ib_umem.h | 9 ++++++++-
+ include/rdma/ib_verbs.h | 1 +
+ 3 files changed, 9 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
+index 957634eceba8f..8ce569bf7525e 100644
+--- a/drivers/infiniband/core/umem.c
++++ b/drivers/infiniband/core/umem.c
+@@ -96,12 +96,6 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
+ return page_size;
+ }
+
+- /* rdma_for_each_block() has a bug if the page size is smaller than the
+- * page size used to build the umem. For now prevent smaller page sizes
+- * from being returned.
+- */
+- pgsz_bitmap &= GENMASK(BITS_PER_LONG - 1, PAGE_SHIFT);
+-
+ /* The best result is the smallest page size that results in the minimum
+ * number of required pages. Compute the largest page size that could
+ * work based on VA address bits that don't change.
+diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h
+index 5ae9dff74dac8..2381e482fab34 100644
+--- a/include/rdma/ib_umem.h
++++ b/include/rdma/ib_umem.h
+@@ -77,6 +77,13 @@ static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter,
+ {
+ __rdma_block_iter_start(biter, umem->sgt_append.sgt.sgl,
+ umem->sgt_append.sgt.nents, pgsz);
++ biter->__sg_advance = ib_umem_offset(umem) & ~(pgsz - 1);
++ biter->__sg_numblocks = ib_umem_num_dma_blocks(umem, pgsz);
++}
++
++static inline bool __rdma_umem_block_iter_next(struct ib_block_iter *biter)
++{
++ return __rdma_block_iter_next(biter) && biter->__sg_numblocks--;
+ }
+
+ /**
+@@ -92,7 +99,7 @@ static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter,
+ */
+ #define rdma_umem_for_each_dma_block(umem, biter, pgsz) \
+ for (__rdma_umem_block_iter_start(biter, umem, pgsz); \
+- __rdma_block_iter_next(biter);)
++ __rdma_umem_block_iter_next(biter);)
+
+ #ifdef CONFIG_INFINIBAND_USER_MEM
+
+diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
+index 4ba642fc8a19a..fa13bf15feb3e 100644
+--- a/include/rdma/ib_verbs.h
++++ b/include/rdma/ib_verbs.h
+@@ -2817,6 +2817,7 @@ struct ib_block_iter {
+ /* internal states */
+ struct scatterlist *__sg; /* sg holding the current aligned block */
+ dma_addr_t __dma_addr; /* unaligned DMA address of this block */
++ size_t __sg_numblocks; /* ib_umem_num_dma_blocks() */
+ unsigned int __sg_nents; /* number of SG entries */
+ unsigned int __sg_advance; /* number of bytes to advance in sg in next step */
+ unsigned int __pg_bit; /* alignment of current block */
+--
+2.42.0
+
--- /dev/null
+From 28288ac881f03809f6b0f0e9d701a8b8bafbefbc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 28 Oct 2023 17:32:42 +0800
+Subject: RDMA/hns: Fix unnecessary err return when using invalid congest
+ control algorithm
+
+From: Junxian Huang <huangjunxian6@hisilicon.com>
+
+[ Upstream commit efb9cbf66440482ceaa90493d648226ab7ec2ebf ]
+
+Add a default congest control algorithm so that driver won't return
+an error when the configured algorithm is invalid.
+
+Fixes: f91696f2f053 ("RDMA/hns: Support congestion control type selection according to the FW")
+Signed-off-by: Junxian Huang <huangjunxian6@hisilicon.com>
+Link: https://lore.kernel.org/r/20231028093242.670325-1-huangjunxian6@hisilicon.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 13 +++++++++----
+ 1 file changed, 9 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+index f034021f3adc1..6d8f3aa9d6aa9 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -4586,10 +4586,15 @@ static int check_cong_type(struct ib_qp *ibqp,
+ cong_alg->wnd_mode_sel = WND_LIMIT;
+ break;
+ default:
+- ibdev_err(&hr_dev->ib_dev,
+- "error type(%u) for congestion selection.\n",
+- hr_dev->caps.cong_type);
+- return -EINVAL;
++ ibdev_warn(&hr_dev->ib_dev,
++ "invalid type(%u) for congestion selection.\n",
++ hr_dev->caps.cong_type);
++ hr_dev->caps.cong_type = CONG_TYPE_DCQCN;
++ cong_alg->alg_sel = CONG_DCQCN;
++ cong_alg->alg_sub_sel = UNSUPPORT_CONG_LEVEL;
++ cong_alg->dip_vld = DIP_INVALID;
++ cong_alg->wnd_mode_sel = WND_LIMIT;
++ break;
+ }
+
+ return 0;
+--
+2.42.0
+
--- /dev/null
+From 6994391a3cd39712c4b5be7f9ab958ea6b3af5ec Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 14 Nov 2023 11:02:46 -0600
+Subject: RDMA/irdma: Add wait for suspend on SQD
+
+From: Mustafa Ismail <mustafa.ismail@intel.com>
+
+[ Upstream commit bd6da690c27d75cae432c09162d054b34fa2156f ]
+
+Currently, there is no wait for the QP suspend to complete on a modify
+to SQD state. Add a wait, after the modify to SQD state, for the Suspend
+Complete AE. While we are at it, update the suspend timeout value in
+irdma_prep_tc_change to use IRDMA_EVENT_TIMEOUT_MS too.
+
+Fixes: b48c24c2d710 ("RDMA/irdma: Implement device supported verb APIs")
+Signed-off-by: Mustafa Ismail <mustafa.ismail@intel.com>
+Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
+Link: https://lore.kernel.org/r/20231114170246.238-3-shiraz.saleem@intel.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/irdma/hw.c | 6 +++++-
+ drivers/infiniband/hw/irdma/main.c | 2 +-
+ drivers/infiniband/hw/irdma/main.h | 2 +-
+ drivers/infiniband/hw/irdma/verbs.c | 21 +++++++++++++++++++++
+ drivers/infiniband/hw/irdma/verbs.h | 1 +
+ 5 files changed, 29 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c
+index 70dffa9a9f674..2357feddfbb9d 100644
+--- a/drivers/infiniband/hw/irdma/hw.c
++++ b/drivers/infiniband/hw/irdma/hw.c
+@@ -322,7 +322,11 @@ static void irdma_process_aeq(struct irdma_pci_f *rf)
+ break;
+ case IRDMA_AE_QP_SUSPEND_COMPLETE:
+ if (iwqp->iwdev->vsi.tc_change_pending) {
+- atomic_dec(&iwqp->sc_qp.vsi->qp_suspend_reqs);
++ if (!atomic_dec_return(&qp->vsi->qp_suspend_reqs))
++ wake_up(&iwqp->iwdev->suspend_wq);
++ }
++ if (iwqp->suspend_pending) {
++ iwqp->suspend_pending = false;
+ wake_up(&iwqp->iwdev->suspend_wq);
+ }
+ break;
+diff --git a/drivers/infiniband/hw/irdma/main.c b/drivers/infiniband/hw/irdma/main.c
+index c556a36e76703..232db2c8b6ae1 100644
+--- a/drivers/infiniband/hw/irdma/main.c
++++ b/drivers/infiniband/hw/irdma/main.c
+@@ -48,7 +48,7 @@ static void irdma_prep_tc_change(struct irdma_device *iwdev)
+ /* Wait for all qp's to suspend */
+ wait_event_timeout(iwdev->suspend_wq,
+ !atomic_read(&iwdev->vsi.qp_suspend_reqs),
+- IRDMA_EVENT_TIMEOUT);
++ msecs_to_jiffies(IRDMA_EVENT_TIMEOUT_MS));
+ irdma_ws_reset(&iwdev->vsi);
+ }
+
+diff --git a/drivers/infiniband/hw/irdma/main.h b/drivers/infiniband/hw/irdma/main.h
+index bd13cc38e5ae1..0e998e0801982 100644
+--- a/drivers/infiniband/hw/irdma/main.h
++++ b/drivers/infiniband/hw/irdma/main.h
+@@ -79,7 +79,7 @@ extern struct auxiliary_driver i40iw_auxiliary_drv;
+
+ #define MAX_DPC_ITERATIONS 128
+
+-#define IRDMA_EVENT_TIMEOUT 50000
++#define IRDMA_EVENT_TIMEOUT_MS 5000
+ #define IRDMA_VCHNL_EVENT_TIMEOUT 100000
+ #define IRDMA_RST_TIMEOUT_HZ 4
+
+diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
+index 1d57194476a7b..1af1df8a734b8 100644
+--- a/drivers/infiniband/hw/irdma/verbs.c
++++ b/drivers/infiniband/hw/irdma/verbs.c
+@@ -1082,6 +1082,21 @@ static int irdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
+ return 0;
+ }
+
++static int irdma_wait_for_suspend(struct irdma_qp *iwqp)
++{
++ if (!wait_event_timeout(iwqp->iwdev->suspend_wq,
++ !iwqp->suspend_pending,
++ msecs_to_jiffies(IRDMA_EVENT_TIMEOUT_MS))) {
++ iwqp->suspend_pending = false;
++ ibdev_warn(&iwqp->iwdev->ibdev,
++ "modify_qp timed out waiting for suspend. qp_id = %d, last_ae = 0x%x\n",
++ iwqp->ibqp.qp_num, iwqp->last_aeq);
++ return -EBUSY;
++ }
++
++ return 0;
++}
++
+ /**
+ * irdma_modify_qp_roce - modify qp request
+ * @ibqp: qp's pointer for modify
+@@ -1330,6 +1345,7 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+
+ info.next_iwarp_state = IRDMA_QP_STATE_SQD;
+ issue_modify_qp = 1;
++ iwqp->suspend_pending = true;
+ break;
+ case IB_QPS_SQE:
+ case IB_QPS_ERR:
+@@ -1370,6 +1386,11 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ ctx_info->rem_endpoint_idx = udp_info->arp_idx;
+ if (irdma_hw_modify_qp(iwdev, iwqp, &info, true))
+ return -EINVAL;
++ if (info.next_iwarp_state == IRDMA_QP_STATE_SQD) {
++ ret = irdma_wait_for_suspend(iwqp);
++ if (ret)
++ return ret;
++ }
+ spin_lock_irqsave(&iwqp->lock, flags);
+ if (iwqp->iwarp_state == info.curr_iwarp_state) {
+ iwqp->iwarp_state = info.next_iwarp_state;
+diff --git a/drivers/infiniband/hw/irdma/verbs.h b/drivers/infiniband/hw/irdma/verbs.h
+index 5af3c8e9b3941..a934c985dbb4d 100644
+--- a/drivers/infiniband/hw/irdma/verbs.h
++++ b/drivers/infiniband/hw/irdma/verbs.h
+@@ -188,6 +188,7 @@ struct irdma_qp {
+ u8 flush_issued : 1;
+ u8 sig_all : 1;
+ u8 pau_mode : 1;
++ u8 suspend_pending : 1;
+ u8 rsvd : 1;
+ u8 iwarp_state;
+ u16 term_sq_flush_code;
+--
+2.42.0
+
--- /dev/null
+From 573c6d7ea297b7b73657b90fd296711342b16533 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 30 Nov 2023 00:14:15 -0800
+Subject: RDMA/irdma: Avoid free the non-cqp_request scratch
+
+From: Shifeng Li <lishifeng@sangfor.com.cn>
+
+[ Upstream commit e3e82fcb79eeb3f1a88a89f676831773caff514a ]
+
+When creating ceq_0 during probing irdma, cqp.sc_cqp will be sent as a
+cqp_request to cqp->sc_cqp.sq_ring. If the request is pending when
+removing the irdma driver or unplugging its aux device, cqp.sc_cqp will be
+dereferenced as wrong struct in irdma_free_pending_cqp_request().
+
+ PID: 3669 TASK: ffff88aef892c000 CPU: 28 COMMAND: "kworker/28:0"
+ #0 [fffffe0000549e38] crash_nmi_callback at ffffffff810e3a34
+ #1 [fffffe0000549e40] nmi_handle at ffffffff810788b2
+ #2 [fffffe0000549ea0] default_do_nmi at ffffffff8107938f
+ #3 [fffffe0000549eb8] do_nmi at ffffffff81079582
+ #4 [fffffe0000549ef0] end_repeat_nmi at ffffffff82e016b4
+ [exception RIP: native_queued_spin_lock_slowpath+1291]
+ RIP: ffffffff8127e72b RSP: ffff88aa841ef778 RFLAGS: 00000046
+ RAX: 0000000000000000 RBX: ffff88b01f849700 RCX: ffffffff8127e47e
+ RDX: 0000000000000000 RSI: 0000000000000004 RDI: ffffffff83857ec0
+ RBP: ffff88afe3e4efc8 R8: ffffed15fc7c9dfa R9: ffffed15fc7c9dfa
+ R10: 0000000000000001 R11: ffffed15fc7c9df9 R12: 0000000000740000
+ R13: ffff88b01f849708 R14: 0000000000000003 R15: ffffed1603f092e1
+ ORIG_RAX: ffffffffffffffff CS: 0010 SS: 0000
+ -- <NMI exception stack> --
+ #5 [ffff88aa841ef778] native_queued_spin_lock_slowpath at ffffffff8127e72b
+ #6 [ffff88aa841ef7b0] _raw_spin_lock_irqsave at ffffffff82c22aa4
+ #7 [ffff88aa841ef7c8] __wake_up_common_lock at ffffffff81257363
+ #8 [ffff88aa841ef888] irdma_free_pending_cqp_request at ffffffffa0ba12cc [irdma]
+ #9 [ffff88aa841ef958] irdma_cleanup_pending_cqp_op at ffffffffa0ba1469 [irdma]
+ #10 [ffff88aa841ef9c0] irdma_ctrl_deinit_hw at ffffffffa0b2989f [irdma]
+ #11 [ffff88aa841efa28] irdma_remove at ffffffffa0b252df [irdma]
+ #12 [ffff88aa841efae8] auxiliary_bus_remove at ffffffff8219afdb
+ #13 [ffff88aa841efb00] device_release_driver_internal at ffffffff821882e6
+ #14 [ffff88aa841efb38] bus_remove_device at ffffffff82184278
+ #15 [ffff88aa841efb88] device_del at ffffffff82179d23
+ #16 [ffff88aa841efc48] ice_unplug_aux_dev at ffffffffa0eb1c14 [ice]
+ #17 [ffff88aa841efc68] ice_service_task at ffffffffa0d88201 [ice]
+ #18 [ffff88aa841efde8] process_one_work at ffffffff811c589a
+ #19 [ffff88aa841efe60] worker_thread at ffffffff811c71ff
+ #20 [ffff88aa841eff10] kthread at ffffffff811d87a0
+ #21 [ffff88aa841eff50] ret_from_fork at ffffffff82e0022f
+
+Fixes: 44d9e52977a1 ("RDMA/irdma: Implement device initialization definitions")
+Link: https://lore.kernel.org/r/20231130081415.891006-1-lishifeng@sangfor.com.cn
+Suggested-by: "Ismail, Mustafa" <mustafa.ismail@intel.com>
+Signed-off-by: Shifeng Li <lishifeng@sangfor.com.cn>
+Reviewed-by: Shiraz Saleem <shiraz.saleem@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/irdma/hw.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c
+index 2357feddfbb9d..0359f415280c8 100644
+--- a/drivers/infiniband/hw/irdma/hw.c
++++ b/drivers/infiniband/hw/irdma/hw.c
+@@ -1169,7 +1169,6 @@ static enum irdma_status_code irdma_create_ceq(struct irdma_pci_f *rf,
+ enum irdma_status_code status;
+ struct irdma_ceq_init_info info = {};
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+- u64 scratch;
+ u32 ceq_size;
+
+ info.ceq_id = ceq_id;
+@@ -1190,14 +1189,13 @@ static enum irdma_status_code irdma_create_ceq(struct irdma_pci_f *rf,
+ iwceq->sc_ceq.ceq_id = ceq_id;
+ info.dev = dev;
+ info.vsi = vsi;
+- scratch = (uintptr_t)&rf->cqp.sc_cqp;
+ status = irdma_sc_ceq_init(&iwceq->sc_ceq, &info);
+ if (!status) {
+ if (dev->ceq_valid)
+ status = irdma_cqp_ceq_cmd(&rf->sc_dev, &iwceq->sc_ceq,
+ IRDMA_OP_CEQ_CREATE);
+ else
+- status = irdma_sc_cceq_create(&iwceq->sc_ceq, scratch);
++ status = irdma_sc_cceq_create(&iwceq->sc_ceq, 0);
+ }
+
+ if (status) {
+--
+2.42.0
+
--- /dev/null
+From 929ef026f6409f99cac8e77ac0c08022764bcaed Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 14 Nov 2023 11:02:45 -0600
+Subject: RDMA/irdma: Do not modify to SQD on error
+
+From: Mustafa Ismail <mustafa.ismail@intel.com>
+
+[ Upstream commit ba12ab66aa83a2340a51ad6e74b284269745138c ]
+
+Remove the modify to SQD before going to ERROR state. It is not needed.
+
+Fixes: b48c24c2d710 ("RDMA/irdma: Implement device supported verb APIs")
+Signed-off-by: Mustafa Ismail <mustafa.ismail@intel.com>
+Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
+Link: https://lore.kernel.org/r/20231114170246.238-2-shiraz.saleem@intel.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/irdma/verbs.c | 7 -------
+ 1 file changed, 7 deletions(-)
+
+diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
+index 8ccbe761b8607..1d57194476a7b 100644
+--- a/drivers/infiniband/hw/irdma/verbs.c
++++ b/drivers/infiniband/hw/irdma/verbs.c
+@@ -1334,13 +1334,6 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ case IB_QPS_SQE:
+ case IB_QPS_ERR:
+ case IB_QPS_RESET:
+- if (iwqp->iwarp_state == IRDMA_QP_STATE_RTS) {
+- spin_unlock_irqrestore(&iwqp->lock, flags);
+- info.next_iwarp_state = IRDMA_QP_STATE_SQD;
+- irdma_hw_modify_qp(iwdev, iwqp, &info, true);
+- spin_lock_irqsave(&iwqp->lock, flags);
+- }
+-
+ if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) {
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ if (udata) {
+--
+2.42.0
+
--- /dev/null
+From 118ce3749af6bef901724d9901c8672211979dec Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Nov 2023 16:41:43 +0100
+Subject: RDMA/rtrs-clt: Fix the max_send_wr setting
+
+From: Jack Wang <jinpu.wang@ionos.com>
+
+[ Upstream commit 6d09f6f7d7584e099633282ea915988914f86529 ]
+
+For each write request, we need Request, Response Memory Registration,
+Local Invalidate.
+
+Fixes: 6a98d71daea1 ("RDMA/rtrs: client: main functionality")
+Signed-off-by: Jack Wang <jinpu.wang@ionos.com>
+Reviewed-by: Md Haris Iqbal <haris.iqbal@ionos.com>
+Signed-off-by: Grzegorz Prajsner <grzegorz.prajsner@ionos.com>
+Link: https://lore.kernel.org/r/20231120154146.920486-7-haris.iqbal@ionos.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/ulp/rtrs/rtrs-clt.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+index fac5e122fd372..c0950587f377a 100644
+--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
++++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+@@ -1682,7 +1682,7 @@ static int create_con_cq_qp(struct rtrs_clt_con *con)
+ clt_path->s.dev_ref++;
+ max_send_wr = min_t(int, wr_limit,
+ /* QD * (REQ + RSP + FR REGS or INVS) + drain */
+- clt_path->queue_depth * 3 + 1);
++ clt_path->queue_depth * 4 + 1);
+ max_recv_wr = min_t(int, wr_limit,
+ clt_path->queue_depth * 3 + 1);
+ max_send_sge = 2;
+--
+2.42.0
+
--- /dev/null
+From ea110df8a9a06e931ff19a823d3d6f7357a027e9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Nov 2023 16:41:44 +0100
+Subject: RDMA/rtrs-clt: Remove the warnings for req in_use check
+
+From: Jack Wang <jinpu.wang@ionos.com>
+
+[ Upstream commit 0c8bb6eb70ca41031f663b4481aac9ac78b53bc6 ]
+
+As we chain the WR during write request: memory registration,
+rdma write, local invalidate, if only the last WR fail to send due
+to send queue overrun, the server can send back the reply, while
+client mark the req->in_use to false in case of error in rtrs_clt_req
+when error out from rtrs_post_rdma_write_sg.
+
+Fixes: 6a98d71daea1 ("RDMA/rtrs: client: main functionality")
+Signed-off-by: Jack Wang <jinpu.wang@ionos.com>
+Reviewed-by: Md Haris Iqbal <haris.iqbal@ionos.com>
+Signed-off-by: Grzegorz Prajsner <grzegorz.prajsner@ionos.com>
+Link: https://lore.kernel.org/r/20231120154146.920486-8-haris.iqbal@ionos.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/ulp/rtrs/rtrs-clt.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+index c0950587f377a..3f4ef6e4a89be 100644
+--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
++++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+@@ -383,7 +383,7 @@ static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
+ struct rtrs_clt_path *clt_path;
+ int err;
+
+- if (WARN_ON(!req->in_use))
++ if (!req->in_use)
+ return;
+ if (WARN_ON(!req->con))
+ return;
+--
+2.42.0
+
--- /dev/null
+From 6649fbb16629cc922a4d68ef08c1bf5400c2825e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Nov 2023 16:41:39 +0100
+Subject: RDMA/rtrs-clt: Start hb after path_up
+
+From: Jack Wang <jinpu.wang@ionos.com>
+
+[ Upstream commit 3e44a61b5db873612e20e7b7922468d7d1ac2d22 ]
+
+If we start hb too early, it will confuse server side to close
+the session.
+
+Fixes: 6a98d71daea1 ("RDMA/rtrs: client: main functionality")
+Signed-off-by: Jack Wang <jinpu.wang@ionos.com>
+Reviewed-by: Md Haris Iqbal <haris.iqbal@ionos.com>
+Signed-off-by: Grzegorz Prajsner <grzegorz.prajsner@ionos.com>
+Link: https://lore.kernel.org/r/20231120154146.920486-3-haris.iqbal@ionos.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/ulp/rtrs/rtrs-clt.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+index afe8670f9e555..fac5e122fd372 100644
+--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
++++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+@@ -2341,8 +2341,6 @@ static int init_conns(struct rtrs_clt_path *clt_path)
+ if (err)
+ goto destroy;
+
+- rtrs_start_hb(&clt_path->s);
+-
+ return 0;
+
+ destroy:
+@@ -2616,6 +2614,7 @@ static int init_path(struct rtrs_clt_path *clt_path)
+ goto out;
+ }
+ rtrs_clt_path_up(clt_path);
++ rtrs_start_hb(&clt_path->s);
+ out:
+ mutex_unlock(&clt_path->init_mutex);
+
+--
+2.42.0
+
--- /dev/null
+From 81cedcfd580d0eeee6d7d753168836b9723dfebc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Nov 2023 16:41:40 +0100
+Subject: RDMA/rtrs-srv: Check return values while processing info request
+
+From: Md Haris Iqbal <haris.iqbal@ionos.com>
+
+[ Upstream commit ed1e52aefa16f15dc2f04054a3baf11726a7460e ]
+
+While processing info request, it could so happen that the srv_path goes
+to CLOSING state, cause of any of the error events from RDMA. That state
+change should be picked up while trying to change the state in
+process_info_req, by checking the return value. In case the state change
+call in process_info_req fails, we fail the processing.
+
+We should also check the return value for rtrs_srv_path_up, since it
+sends a link event to the client above, and the client can fail for any
+reason.
+
+Fixes: 9cb837480424 ("RDMA/rtrs: server: main functionality")
+Signed-off-by: Md Haris Iqbal <haris.iqbal@ionos.com>
+Signed-off-by: Jack Wang <jinpu.wang@ionos.com>
+Signed-off-by: Grzegorz Prajsner <grzegorz.prajsner@ionos.com>
+Link: https://lore.kernel.org/r/20231120154146.920486-4-haris.iqbal@ionos.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/ulp/rtrs/rtrs-srv.c | 24 ++++++++++++++++++------
+ 1 file changed, 18 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+index 9f7f694036f72..43de2895f1b1d 100644
+--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c
++++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+@@ -722,20 +722,23 @@ static void rtrs_srv_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc)
+ WARN_ON(wc->opcode != IB_WC_SEND);
+ }
+
+-static void rtrs_srv_path_up(struct rtrs_srv_path *srv_path)
++static int rtrs_srv_path_up(struct rtrs_srv_path *srv_path)
+ {
+ struct rtrs_srv *srv = srv_path->srv;
+ struct rtrs_srv_ctx *ctx = srv->ctx;
+- int up;
++ int up, ret = 0;
+
+ mutex_lock(&srv->paths_ev_mutex);
+ up = ++srv->paths_up;
+ if (up == 1)
+- ctx->ops.link_ev(srv, RTRS_SRV_LINK_EV_CONNECTED, NULL);
++ ret = ctx->ops.link_ev(srv, RTRS_SRV_LINK_EV_CONNECTED, NULL);
+ mutex_unlock(&srv->paths_ev_mutex);
+
+ /* Mark session as established */
+- srv_path->established = true;
++ if (!ret)
++ srv_path->established = true;
++
++ return ret;
+ }
+
+ static void rtrs_srv_path_down(struct rtrs_srv_path *srv_path)
+@@ -864,7 +867,12 @@ static int process_info_req(struct rtrs_srv_con *con,
+ goto iu_free;
+ kobject_get(&srv_path->kobj);
+ get_device(&srv_path->srv->dev);
+- rtrs_srv_change_state(srv_path, RTRS_SRV_CONNECTED);
++ err = rtrs_srv_change_state(srv_path, RTRS_SRV_CONNECTED);
++ if (!err) {
++ rtrs_err(s, "rtrs_srv_change_state(), err: %d\n", err);
++ goto iu_free;
++ }
++
+ rtrs_srv_start_hb(srv_path);
+
+ /*
+@@ -873,7 +881,11 @@ static int process_info_req(struct rtrs_srv_con *con,
+ * all connections are successfully established. Thus, simply notify
+ * listener with a proper event if we are the first path.
+ */
+- rtrs_srv_path_up(srv_path);
++ err = rtrs_srv_path_up(srv_path);
++ if (err) {
++ rtrs_err(s, "rtrs_srv_path_up(), err: %d\n", err);
++ goto iu_free;
++ }
+
+ ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev,
+ tx_iu->dma_addr,
+--
+2.42.0
+
--- /dev/null
+From 13d0387950eee658ff23699c5955d13d23f8c64d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Nov 2023 16:41:42 +0100
+Subject: RDMA/rtrs-srv: Destroy path files after making sure no IOs in-flight
+
+From: Md Haris Iqbal <haris.iqbal@ionos.com>
+
+[ Upstream commit c4d32e77fc1006f99eeb78417efc3d81a384072a ]
+
+Destroying path files may lead to the freeing of rdma_stats. This creates
+the following race.
+
+An IO is in-flight, or has just passed the session state check in
+process_read/process_write. The close_work gets triggered and the function
+rtrs_srv_close_work() starts and does destroy path which frees the
+rdma_stats. After this the function process_read/process_write resumes and
+tries to update the stats through the function rtrs_srv_update_rdma_stats
+
+This commit solves the problem by moving the destroy path function to a
+later point. This point makes sure any inflights are completed. This is
+done by qp drain, and waiting for all in-flights through ops_id.
+
+Fixes: 9cb837480424 ("RDMA/rtrs: server: main functionality")
+Signed-off-by: Md Haris Iqbal <haris.iqbal@ionos.com>
+Signed-off-by: Santosh Kumar Pradhan <santosh.pradhan@ionos.com>
+Signed-off-by: Grzegorz Prajsner <grzegorz.prajsner@ionos.com>
+Link: https://lore.kernel.org/r/20231120154146.920486-6-haris.iqbal@ionos.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/ulp/rtrs/rtrs-srv.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+index 3a61615a8ea6a..27bf2b2da9fd6 100644
+--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c
++++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+@@ -1541,7 +1541,6 @@ static void rtrs_srv_close_work(struct work_struct *work)
+
+ srv_path = container_of(work, typeof(*srv_path), close_work);
+
+- rtrs_srv_destroy_path_files(srv_path);
+ rtrs_srv_stop_hb(srv_path);
+
+ for (i = 0; i < srv_path->s.con_num; i++) {
+@@ -1561,6 +1560,8 @@ static void rtrs_srv_close_work(struct work_struct *work)
+ /* Wait for all completion */
+ wait_for_completion(&srv_path->complete_done);
+
++ rtrs_srv_destroy_path_files(srv_path);
++
+ /* Notify upper layer if we are the last path */
+ rtrs_srv_path_down(srv_path);
+
+--
+2.42.0
+
--- /dev/null
+From 95bdc438506f144ab2412c722229416fea02c638 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Nov 2023 16:41:38 +0100
+Subject: RDMA/rtrs-srv: Do not unconditionally enable irq
+
+From: Jack Wang <jinpu.wang@ionos.com>
+
+[ Upstream commit 3ee7ecd712048ade6482bea4b2f3dcaf039c0348 ]
+
+When IO is completed, rtrs can be called in softirq context,
+unconditionally enabling irq could cause panic.
+
+To be on safe side, use spin_lock_irqsave and spin_unlock_irqrestore
+instread.
+
+Fixes: 9cb837480424 ("RDMA/rtrs: server: main functionality")
+Signed-off-by: Jack Wang <jinpu.wang@ionos.com>
+Signed-off-by: Florian-Ewald Mueller <florian-ewald.mueller@ionos.com>
+Signed-off-by: Md Haris Iqbal <haris.iqbal@ionos.com>
+Signed-off-by: Grzegorz Prajsner <grzegorz.prajsner@ionos.com>
+Link: https://lore.kernel.org/r/20231120154146.920486-2-haris.iqbal@ionos.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/ulp/rtrs/rtrs-srv.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+index 733116554e0bc..9f7f694036f72 100644
+--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c
++++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+@@ -72,8 +72,9 @@ static bool rtrs_srv_change_state(struct rtrs_srv_path *srv_path,
+ {
+ enum rtrs_srv_state old_state;
+ bool changed = false;
++ unsigned long flags;
+
+- spin_lock_irq(&srv_path->state_lock);
++ spin_lock_irqsave(&srv_path->state_lock, flags);
+ old_state = srv_path->state;
+ switch (new_state) {
+ case RTRS_SRV_CONNECTED:
+@@ -94,7 +95,7 @@ static bool rtrs_srv_change_state(struct rtrs_srv_path *srv_path,
+ }
+ if (changed)
+ srv_path->state = new_state;
+- spin_unlock_irq(&srv_path->state_lock);
++ spin_unlock_irqrestore(&srv_path->state_lock, flags);
+
+ return changed;
+ }
+--
+2.42.0
+
--- /dev/null
+From 1aecd2d1c30347823a7d172c809a2b3297a9cc2c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Nov 2023 16:41:41 +0100
+Subject: RDMA/rtrs-srv: Free srv_mr iu only when always_invalidate is true
+
+From: Md Haris Iqbal <haris.iqbal@ionos.com>
+
+[ Upstream commit 3a71cd6ca0ce33d1af019ecf1d7167406fa54400 ]
+
+Since srv_mr->iu is allocated and used only when always_invalidate is
+true, free it only when always_invalidate is true.
+
+Fixes: 9cb837480424 ("RDMA/rtrs: server: main functionality")
+Signed-off-by: Md Haris Iqbal <haris.iqbal@ionos.com>
+Signed-off-by: Jack Wang <jinpu.wang@ionos.com>
+Signed-off-by: Grzegorz Prajsner <grzegorz.prajsner@ionos.com>
+Link: https://lore.kernel.org/r/20231120154146.920486-5-haris.iqbal@ionos.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/ulp/rtrs/rtrs-srv.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+index 43de2895f1b1d..3a61615a8ea6a 100644
+--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c
++++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+@@ -556,7 +556,10 @@ static void unmap_cont_bufs(struct rtrs_srv_path *srv_path)
+ struct rtrs_srv_mr *srv_mr;
+
+ srv_mr = &srv_path->mrs[i];
+- rtrs_iu_free(srv_mr->iu, srv_path->s.dev->ib_dev, 1);
++
++ if (always_invalidate)
++ rtrs_iu_free(srv_mr->iu, srv_path->s.dev->ib_dev, 1);
++
+ ib_dereg_mr(srv_mr->mr);
+ ib_dma_unmap_sg(srv_path->s.dev->ib_dev, srv_mr->sgt.sgl,
+ srv_mr->sgt.nents, DMA_BIDIRECTIONAL);
+--
+2.42.0
+
--- /dev/null
+From a48ec7a80637047a389d2d407ecdddf4e65c0501 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 3 Nov 2023 10:02:23 +0100
+Subject: riscv: fix misaligned access handling of C.SWSP and C.SDSP
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Clément Léger <cleger@rivosinc.com>
+
+[ Upstream commit 22e0eb04837a63af111fae35a92f7577676b9bc8 ]
+
+This is a backport of a fix that was done in OpenSBI: ec0559eb315b
+("lib: sbi_misaligned_ldst: Fix handling of C.SWSP and C.SDSP").
+
+Unlike C.LWSP/C.LDSP, these encodings can be used with the zero
+register, so checking that the rs2 field is non-zero is unnecessary.
+
+Additionally, the previous check was incorrect since it was checking
+the immediate field of the instruction instead of the rs2 field.
+
+Fixes: 956d705dd279 ("riscv: Unaligned load/store handling for M_MODE")
+Signed-off-by: Clément Léger <cleger@rivosinc.com>
+Link: https://lore.kernel.org/r/20231103090223.702340-1-cleger@rivosinc.com
+Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/riscv/kernel/traps_misaligned.c | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c
+index 46c4dafe3ba0e..b246c3dc69930 100644
+--- a/arch/riscv/kernel/traps_misaligned.c
++++ b/arch/riscv/kernel/traps_misaligned.c
+@@ -344,16 +344,14 @@ int handle_misaligned_store(struct pt_regs *regs)
+ } else if ((insn & INSN_MASK_C_SD) == INSN_MATCH_C_SD) {
+ len = 8;
+ val.data_ulong = GET_RS2S(insn, regs);
+- } else if ((insn & INSN_MASK_C_SDSP) == INSN_MATCH_C_SDSP &&
+- ((insn >> SH_RD) & 0x1f)) {
++ } else if ((insn & INSN_MASK_C_SDSP) == INSN_MATCH_C_SDSP) {
+ len = 8;
+ val.data_ulong = GET_RS2C(insn, regs);
+ #endif
+ } else if ((insn & INSN_MASK_C_SW) == INSN_MATCH_C_SW) {
+ len = 4;
+ val.data_ulong = GET_RS2S(insn, regs);
+- } else if ((insn & INSN_MASK_C_SWSP) == INSN_MATCH_C_SWSP &&
+- ((insn >> SH_RD) & 0x1f)) {
++ } else if ((insn & INSN_MASK_C_SWSP) == INSN_MATCH_C_SWSP) {
+ len = 4;
+ val.data_ulong = GET_RS2C(insn, regs);
+ } else {
+--
+2.42.0
+
--- /dev/null
+From 7eed23bb2f4827397a7bf11c70d23085b9fe0549 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 23 Nov 2023 16:19:41 +0800
+Subject: scsi: be2iscsi: Fix a memleak in beiscsi_init_wrb_handle()
+
+From: Dinghao Liu <dinghao.liu@zju.edu.cn>
+
+[ Upstream commit 235f2b548d7f4ac5931d834f05d3f7f5166a2e72 ]
+
+When an error occurs in the for loop of beiscsi_init_wrb_handle(), we
+should free phwi_ctxt->be_wrbq before returning an error code to prevent
+potential memleak.
+
+Fixes: a7909b396ba7 ("[SCSI] be2iscsi: Fix dynamic CID allocation Mechanism in driver")
+Signed-off-by: Dinghao Liu <dinghao.liu@zju.edu.cn>
+Link: https://lore.kernel.org/r/20231123081941.24854-1-dinghao.liu@zju.edu.cn
+Reviewed-by: Mike Christie <michael.christie@oracle.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/be2iscsi/be_main.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
+index 7974c1326d461..27e73cd54beaa 100644
+--- a/drivers/scsi/be2iscsi/be_main.c
++++ b/drivers/scsi/be2iscsi/be_main.c
+@@ -2691,6 +2691,7 @@ static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
+ kfree(pwrb_context->pwrb_handle_base);
+ kfree(pwrb_context->pwrb_handle_basestd);
+ }
++ kfree(phwi_ctxt->be_wrbq);
+ return -ENOMEM;
+ }
+
+--
+2.42.0
+
psample-require-cap_net_admin-when-joining-packets-g.patch
net-add-missing-kdoc-for-struct-genl_multicast_group.patch
drop_monitor-require-cap_sys_admin-when-joining-even.patch
+tee-optee-fix-supplicant-based-device-enumeration.patch
+rdma-hns-fix-unnecessary-err-return-when-using-inval.patch
+rdma-irdma-do-not-modify-to-sqd-on-error.patch
+rdma-irdma-add-wait-for-suspend-on-sqd.patch
+arm64-dts-rockchip-expand-reg-size-of-vdec-node-for-.patch
+rdma-rtrs-srv-do-not-unconditionally-enable-irq.patch
+rdma-rtrs-clt-start-hb-after-path_up.patch
+rdma-rtrs-srv-check-return-values-while-processing-i.patch
+rdma-rtrs-srv-free-srv_mr-iu-only-when-always_invali.patch
+rdma-rtrs-srv-destroy-path-files-after-making-sure-n.patch
+rdma-rtrs-clt-fix-the-max_send_wr-setting.patch
+rdma-rtrs-clt-remove-the-warnings-for-req-in_use-che.patch
+rdma-bnxt_re-correct-module-description-string.patch
+hwmon-acpi_power_meter-fix-4.29-mw-bug.patch
+hwmon-nzxt-kraken2-fix-error-handling-path-in-kraken.patch
+asoc-wm_adsp-fix-memleak-in-wm_adsp_buffer_populate.patch
+rdma-core-fix-umem-iterator-when-page_size-is-greate.patch
+rdma-irdma-avoid-free-the-non-cqp_request-scratch.patch
+arm64-dts-imx8mq-drop-usb3-resume-missing-cas-from-u.patch
+arm64-dts-imx8mp-imx8mq-add-parkmode-disable-ss-quir.patch
+arm-dts-imx6ul-pico-describe-the-ethernet-phy-clock.patch
+tracing-fix-a-warning-when-allocating-buffered-event.patch
+scsi-be2iscsi-fix-a-memleak-in-beiscsi_init_wrb_hand.patch
+arm-imx-check-return-value-of-devm_kasprintf-in-imx_.patch
+arm-dts-imx7-declare-timers-compatible-with-fsl-imx6.patch
+arm-dts-imx28-xea-pass-the-model-property.patch
+riscv-fix-misaligned-access-handling-of-c.swsp-and-c.patch
+md-introduce-md_ro_state.patch
+md-don-t-leave-md_recovery_frozen-in-error-path-of-m.patch
--- /dev/null
+From 1b715687801c3bb668dd29a5dd08555f65ee07c6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 2 Nov 2023 13:00:55 +0530
+Subject: tee: optee: Fix supplicant based device enumeration
+
+From: Sumit Garg <sumit.garg@linaro.org>
+
+[ Upstream commit 7269cba53d906cf257c139d3b3a53ad272176bca ]
+
+Currently supplicant dependent optee device enumeration only registers
+devices whenever tee-supplicant is invoked for the first time. But it
+forgets to remove devices when tee-supplicant daemon stops running and
+closes its context gracefully. This leads to following error for fTPM
+driver during reboot/shutdown:
+
+[ 73.466791] tpm tpm0: ftpm_tee_tpm_op_send: SUBMIT_COMMAND invoke error: 0xffff3024
+
+Fix this by adding an attribute for supplicant dependent devices so that
+the user-space service can detect and detach supplicant devices before
+closing the supplicant:
+
+$ for dev in /sys/bus/tee/devices/*; do if [[ -f "$dev/need_supplicant" && -f "$dev/driver/unbind" ]]; \
+ then echo $(basename "$dev") > $dev/driver/unbind; fi done
+
+Reported-by: Jan Kiszka <jan.kiszka@siemens.com>
+Closes: https://github.com/OP-TEE/optee_os/issues/6094
+Fixes: 5f178bb71e3a ("optee: enable support for multi-stage bus enumeration")
+Signed-off-by: Sumit Garg <sumit.garg@linaro.org>
+Reviewed-by: Ilias Apalodimas <ilias.apalodimas@linaro.org>
+Acked-by: Jerome Forissier <jerome.forissier@linaro.org>
+[jw: fixed up Date documentation]
+Signed-off-by: Jens Wiklander <jens.wiklander@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../ABI/testing/sysfs-bus-optee-devices | 9 +++++++++
+ drivers/tee/optee/device.c | 17 +++++++++++++++--
+ 2 files changed, 24 insertions(+), 2 deletions(-)
+
+diff --git a/Documentation/ABI/testing/sysfs-bus-optee-devices b/Documentation/ABI/testing/sysfs-bus-optee-devices
+index 0f58701367b66..af31e5a22d89f 100644
+--- a/Documentation/ABI/testing/sysfs-bus-optee-devices
++++ b/Documentation/ABI/testing/sysfs-bus-optee-devices
+@@ -6,3 +6,12 @@ Description:
+ OP-TEE bus provides reference to registered drivers under this directory. The <uuid>
+ matches Trusted Application (TA) driver and corresponding TA in secure OS. Drivers
+ are free to create needed API under optee-ta-<uuid> directory.
++
++What: /sys/bus/tee/devices/optee-ta-<uuid>/need_supplicant
++Date: November 2023
++KernelVersion: 6.7
++Contact: op-tee@lists.trustedfirmware.org
++Description:
++ Allows to distinguish whether an OP-TEE based TA/device requires user-space
++ tee-supplicant to function properly or not. This attribute will be present for
++ devices which depend on tee-supplicant to be running.
+diff --git a/drivers/tee/optee/device.c b/drivers/tee/optee/device.c
+index a74d82e230e36..8957065ea7307 100644
+--- a/drivers/tee/optee/device.c
++++ b/drivers/tee/optee/device.c
+@@ -60,7 +60,16 @@ static void optee_release_device(struct device *dev)
+ kfree(optee_device);
+ }
+
+-static int optee_register_device(const uuid_t *device_uuid)
++static ssize_t need_supplicant_show(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ return 0;
++}
++
++static DEVICE_ATTR_RO(need_supplicant);
++
++static int optee_register_device(const uuid_t *device_uuid, u32 func)
+ {
+ struct tee_client_device *optee_device = NULL;
+ int rc;
+@@ -83,6 +92,10 @@ static int optee_register_device(const uuid_t *device_uuid)
+ put_device(&optee_device->dev);
+ }
+
++ if (func == PTA_CMD_GET_DEVICES_SUPP)
++ device_create_file(&optee_device->dev,
++ &dev_attr_need_supplicant);
++
+ return rc;
+ }
+
+@@ -143,7 +156,7 @@ static int __optee_enumerate_devices(u32 func)
+ num_devices = shm_size / sizeof(uuid_t);
+
+ for (idx = 0; idx < num_devices; idx++) {
+- rc = optee_register_device(&device_uuid[idx]);
++ rc = optee_register_device(&device_uuid[idx], func);
+ if (rc)
+ goto out_shm;
+ }
+--
+2.42.0
+
--- /dev/null
+From 5ff45e88af820df2c5b17781f3e0ff923246d2fc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 5 Dec 2023 17:17:35 +0100
+Subject: tracing: Fix a warning when allocating buffered events fails
+
+From: Petr Pavlu <petr.pavlu@suse.com>
+
+[ Upstream commit 34209fe83ef8404353f91ab4ea4035dbc9922d04 ]
+
+Function trace_buffered_event_disable() produces an unexpected warning
+when the previous call to trace_buffered_event_enable() fails to
+allocate pages for buffered events.
+
+The situation can occur as follows:
+
+* The counter trace_buffered_event_ref is at 0.
+
+* The soft mode gets enabled for some event and
+ trace_buffered_event_enable() is called. The function increments
+ trace_buffered_event_ref to 1 and starts allocating event pages.
+
+* The allocation fails for some page and trace_buffered_event_disable()
+ is called for cleanup.
+
+* Function trace_buffered_event_disable() decrements
+ trace_buffered_event_ref back to 0, recognizes that it was the last
+ use of buffered events and frees all allocated pages.
+
+* The control goes back to trace_buffered_event_enable() which returns.
+ The caller of trace_buffered_event_enable() has no information that
+ the function actually failed.
+
+* Some time later, the soft mode is disabled for the same event.
+ Function trace_buffered_event_disable() is called. It warns on
+ "WARN_ON_ONCE(!trace_buffered_event_ref)" and returns.
+
+Buffered events are just an optimization and can handle failures. Make
+trace_buffered_event_enable() exit on the first failure and left any
+cleanup later to when trace_buffered_event_disable() is called.
+
+Link: https://lore.kernel.org/all/20231127151248.7232-2-petr.pavlu@suse.com/
+Link: https://lkml.kernel.org/r/20231205161736.19663-3-petr.pavlu@suse.com
+
+Fixes: 0fc1b09ff1ff ("tracing: Use temp buffer when filtering events")
+Signed-off-by: Petr Pavlu <petr.pavlu@suse.com>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/trace/trace.c | 11 +++++------
+ 1 file changed, 5 insertions(+), 6 deletions(-)
+
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index c35c805e4ab15..171efe3f29db4 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -2692,8 +2692,11 @@ void trace_buffered_event_enable(void)
+ for_each_tracing_cpu(cpu) {
+ page = alloc_pages_node(cpu_to_node(cpu),
+ GFP_KERNEL | __GFP_NORETRY, 0);
+- if (!page)
+- goto failed;
++ /* This is just an optimization and can handle failures */
++ if (!page) {
++ pr_err("Failed to allocate event buffer\n");
++ break;
++ }
+
+ event = page_address(page);
+ memset(event, 0, sizeof(*event));
+@@ -2707,10 +2710,6 @@ void trace_buffered_event_enable(void)
+ WARN_ON_ONCE(1);
+ preempt_enable();
+ }
+-
+- return;
+- failed:
+- trace_buffered_event_disable();
+ }
+
+ static void enable_trace_buffered_event(void *data)
+--
+2.42.0
+