--- /dev/null
+From b76bbf835d8945080b22b52fc1e6f41cde06865d Mon Sep 17 00:00:00 2001
+From: Andrejs Cainikovs <andrejs.cainikovs@toradex.com>
+Date: Fri, 20 Oct 2023 17:30:22 +0200
+Subject: ARM: dts: imx6q-apalis: add can power-up delay on ixora board
+
+From: Andrejs Cainikovs <andrejs.cainikovs@toradex.com>
+
+commit b76bbf835d8945080b22b52fc1e6f41cde06865d upstream.
+
+Newer variants of Ixora boards require a power-up delay when powering up
+the CAN transceiver of up to 1ms.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Andrejs Cainikovs <andrejs.cainikovs@toradex.com>
+Signed-off-by: Shawn Guo <shawnguo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/boot/dts/nxp/imx/imx6q-apalis-ixora-v1.2.dts | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/arm/boot/dts/nxp/imx/imx6q-apalis-ixora-v1.2.dts
++++ b/arch/arm/boot/dts/nxp/imx/imx6q-apalis-ixora-v1.2.dts
+@@ -76,6 +76,7 @@
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_enable_can1_power>;
+ regulator-name = "can1_supply";
++ startup-delay-us = <1000>;
+ };
+
+ reg_can2_supply: regulator-can2-supply {
+@@ -85,6 +86,7 @@
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_enable_can2_power>;
+ regulator-name = "can2_supply";
++ startup-delay-us = <1000>;
+ };
+ };
+
--- /dev/null
+From cc25bd06c16aa582596a058d375b2e3133f79b93 Mon Sep 17 00:00:00 2001
+From: Johan Hovold <johan+linaro@kernel.org>
+Date: Wed, 13 Dec 2023 18:31:29 +0100
+Subject: ARM: dts: qcom: sdx55: fix pdc '#interrupt-cells'
+
+From: Johan Hovold <johan+linaro@kernel.org>
+
+commit cc25bd06c16aa582596a058d375b2e3133f79b93 upstream.
+
+The Qualcomm PDC interrupt controller binding expects two cells in
+interrupt specifiers.
+
+Fixes: 9d038b2e62de ("ARM: dts: qcom: Add SDX55 platform and MTP board support")
+Cc: stable@vger.kernel.org # 5.12
+Cc: Manivannan Sadhasivam <mani@kernel.org>
+Signed-off-by: Johan Hovold <johan+linaro@kernel.org>
+Reviewed-by: Konrad Dybcio <konrad.dybcio@linaro.org>
+Reviewed-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Link: https://lore.kernel.org/r/20231213173131.29436-2-johan+linaro@kernel.org
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/boot/dts/qcom/qcom-sdx55.dtsi | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm/boot/dts/qcom/qcom-sdx55.dtsi
++++ b/arch/arm/boot/dts/qcom/qcom-sdx55.dtsi
+@@ -619,7 +619,7 @@
+ compatible = "qcom,sdx55-pdc", "qcom,pdc";
+ reg = <0x0b210000 0x30000>;
+ qcom,pdc-ranges = <0 179 52>;
+- #interrupt-cells = <3>;
++ #interrupt-cells = <2>;
+ interrupt-parent = <&intc>;
+ interrupt-controller;
+ };
--- /dev/null
+From d0ec3c4c11c3b30e1f2d344973b2a7bf0f986734 Mon Sep 17 00:00:00 2001
+From: Johan Hovold <johan+linaro@kernel.org>
+Date: Mon, 20 Nov 2023 17:43:21 +0100
+Subject: ARM: dts: qcom: sdx55: fix USB wakeup interrupt types
+
+From: Johan Hovold <johan+linaro@kernel.org>
+
+commit d0ec3c4c11c3b30e1f2d344973b2a7bf0f986734 upstream.
+
+The DP/DM wakeup interrupts are edge triggered and which edge to trigger
+on depends on use-case and whether a Low speed or Full/High speed device
+is connected.
+
+Fixes: fea4b41022f3 ("ARM: dts: qcom: sdx55: Add USB3 and PHY support")
+Cc: stable@vger.kernel.org # 5.12
+Cc: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Signed-off-by: Johan Hovold <johan+linaro@kernel.org>
+Link: https://lore.kernel.org/r/20231120164331.8116-2-johan+linaro@kernel.org
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/boot/dts/qcom/qcom-sdx55.dtsi | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/arm/boot/dts/qcom/qcom-sdx55.dtsi
++++ b/arch/arm/boot/dts/qcom/qcom-sdx55.dtsi
+@@ -594,8 +594,8 @@
+
+ interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 198 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 158 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>;
++ <GIC_SPI 158 IRQ_TYPE_EDGE_BOTH>,
++ <GIC_SPI 157 IRQ_TYPE_EDGE_BOTH>;
+ interrupt-names = "hs_phy_irq", "ss_phy_irq",
+ "dm_hs_phy_irq", "dp_hs_phy_irq";
+
--- /dev/null
+From 84228d5e29dbc7a6be51e221000e1d122125826c Mon Sep 17 00:00:00 2001
+From: Paul Cercueil <paul@crapouillou.net>
+Date: Wed, 6 Dec 2023 23:15:54 +0100
+Subject: ARM: dts: samsung: exynos4210-i9100: Unconditionally enable LDO12
+
+From: Paul Cercueil <paul@crapouillou.net>
+
+commit 84228d5e29dbc7a6be51e221000e1d122125826c upstream.
+
+The kernel hangs for a good 12 seconds without any info being printed to
+dmesg, very early in the boot process, if this regulator is not enabled.
+
+Force-enable it to work around this issue, until we know more about the
+underlying problem.
+
+Signed-off-by: Paul Cercueil <paul@crapouillou.net>
+Fixes: 8620cc2f99b7 ("ARM: dts: exynos: Add devicetree file for the Galaxy S2")
+Cc: stable@vger.kernel.org # v5.8+
+Link: https://lore.kernel.org/r/20231206221556.15348-2-paul@crapouillou.net
+Signed-off-by: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/boot/dts/samsung/exynos4210-i9100.dts | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/arch/arm/boot/dts/samsung/exynos4210-i9100.dts
++++ b/arch/arm/boot/dts/samsung/exynos4210-i9100.dts
+@@ -527,6 +527,14 @@
+ regulator-name = "VT_CAM_1.8V";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
++
++ /*
++ * Force-enable this regulator; otherwise the
++ * kernel hangs very early in the boot process
++ * for about 12 seconds, without apparent
++ * reason.
++ */
++ regulator-always-on;
+ };
+
+ vcclcd_reg: LDO13 {
--- /dev/null
+From cc1ec484f2d0f464ad11b56fe3de2589c23f73ec Mon Sep 17 00:00:00 2001
+From: Stephan Gerhold <stephan@gerhold.net>
+Date: Mon, 4 Dec 2023 10:46:11 +0100
+Subject: arm64: dts: qcom: Add missing vio-supply for AW2013
+
+From: Stephan Gerhold <stephan@gerhold.net>
+
+commit cc1ec484f2d0f464ad11b56fe3de2589c23f73ec upstream.
+
+Add the missing vio-supply to all usages of the AW2013 LED controller
+to ensure that the regulator needed for pull-up of the interrupt and
+I2C lines is really turned on. While this seems to have worked fine so
+far some of these regulators are not guaranteed to be always-on. For
+example, pm8916_l6 is typically turned off together with the display
+if there aren't any other devices (e.g. sensors) keeping it always-on.
+
+Cc: stable@vger.kernel.org # 6.6
+Signed-off-by: Stephan Gerhold <stephan@gerhold.net>
+Link: https://lore.kernel.org/r/20231204-qcom-aw2013-vio-v1-1-5d264bb5c0b2@gerhold.net
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/boot/dts/qcom/msm8916-longcheer-l8150.dts | 1 +
+ arch/arm64/boot/dts/qcom/msm8916-wingtech-wt88047.dts | 1 +
+ arch/arm64/boot/dts/qcom/msm8953-xiaomi-mido.dts | 1 +
+ arch/arm64/boot/dts/qcom/msm8953-xiaomi-tissot.dts | 1 +
+ arch/arm64/boot/dts/qcom/msm8953-xiaomi-vince.dts | 1 +
+ 5 files changed, 5 insertions(+)
+
+--- a/arch/arm64/boot/dts/qcom/msm8916-longcheer-l8150.dts
++++ b/arch/arm64/boot/dts/qcom/msm8916-longcheer-l8150.dts
+@@ -88,6 +88,7 @@
+ #size-cells = <0>;
+
+ vcc-supply = <&pm8916_l17>;
++ vio-supply = <&pm8916_l6>;
+
+ led@0 {
+ reg = <0>;
+--- a/arch/arm64/boot/dts/qcom/msm8916-wingtech-wt88047.dts
++++ b/arch/arm64/boot/dts/qcom/msm8916-wingtech-wt88047.dts
+@@ -118,6 +118,7 @@
+ #size-cells = <0>;
+
+ vcc-supply = <&pm8916_l16>;
++ vio-supply = <&pm8916_l5>;
+
+ led@0 {
+ reg = <0>;
+--- a/arch/arm64/boot/dts/qcom/msm8953-xiaomi-mido.dts
++++ b/arch/arm64/boot/dts/qcom/msm8953-xiaomi-mido.dts
+@@ -111,6 +111,7 @@
+ reg = <0x45>;
+
+ vcc-supply = <&pm8953_l10>;
++ vio-supply = <&pm8953_l5>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+--- a/arch/arm64/boot/dts/qcom/msm8953-xiaomi-tissot.dts
++++ b/arch/arm64/boot/dts/qcom/msm8953-xiaomi-tissot.dts
+@@ -104,6 +104,7 @@
+ reg = <0x45>;
+
+ vcc-supply = <&pm8953_l10>;
++ vio-supply = <&pm8953_l5>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+--- a/arch/arm64/boot/dts/qcom/msm8953-xiaomi-vince.dts
++++ b/arch/arm64/boot/dts/qcom/msm8953-xiaomi-vince.dts
+@@ -113,6 +113,7 @@
+ reg = <0x45>;
+
+ vcc-supply = <&pm8953_l10>;
++ vio-supply = <&pm8953_l5>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
--- /dev/null
+From 7c45b6ddbcff01f9934d11802010cfeb0879e693 Mon Sep 17 00:00:00 2001
+From: Stephan Gerhold <stephan@gerhold.net>
+Date: Mon, 4 Dec 2023 11:21:20 +0100
+Subject: arm64: dts: qcom: msm8916: Make blsp_dma controlled-remotely
+
+From: Stephan Gerhold <stephan@gerhold.net>
+
+commit 7c45b6ddbcff01f9934d11802010cfeb0879e693 upstream.
+
+The blsp_dma controller is shared between the different subsystems,
+which is why it is already initialized by the firmware. We should not
+reinitialize it from Linux to avoid potential other users of the DMA
+engine to misbehave.
+
+In mainline this can be described using the "qcom,controlled-remotely"
+property. In the downstream/vendor kernel from Qualcomm there is an
+opposite "qcom,managed-locally" property. This property is *not* set
+for the qcom,sps-dma@7884000 [1] so adding "qcom,controlled-remotely"
+upstream matches the behavior of the downstream/vendor kernel.
+
+Adding this seems to fix some weird issues with UART where both
+input/output becomes garbled with certain obscure firmware versions on
+some devices.
+
+[1]: https://git.codelinaro.org/clo/la/kernel/msm-3.10/-/blob/LA.BR.1.2.9.1-02310-8x16.0/arch/arm/boot/dts/qcom/msm8916.dtsi#L1466-1472
+
+Cc: stable@vger.kernel.org # 6.5
+Fixes: a0e5fb103150 ("arm64: dts: qcom: Add msm8916 BLSP device nodes")
+Signed-off-by: Stephan Gerhold <stephan@gerhold.net>
+Reviewed-by: Bryan O'Donoghue <bryan.odonoghue@linaro.org>
+Link: https://lore.kernel.org/r/20231204-msm8916-blsp-dma-remote-v1-1-3e49c8838c8d@gerhold.net
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/boot/dts/qcom/msm8916.dtsi | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/arm64/boot/dts/qcom/msm8916.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi
+@@ -2085,6 +2085,7 @@
+ clock-names = "bam_clk";
+ #dma-cells = <1>;
+ qcom,ee = <0>;
++ qcom,controlled-remotely;
+ };
+
+ blsp_uart1: serial@78af000 {
--- /dev/null
+From 4bbda9421f316efdaef5dbf642e24925ef7de130 Mon Sep 17 00:00:00 2001
+From: Stephan Gerhold <stephan@gerhold.net>
+Date: Mon, 4 Dec 2023 11:21:21 +0100
+Subject: arm64: dts: qcom: msm8939: Make blsp_dma controlled-remotely
+
+From: Stephan Gerhold <stephan@gerhold.net>
+
+commit 4bbda9421f316efdaef5dbf642e24925ef7de130 upstream.
+
+The blsp_dma controller is shared between the different subsystems,
+which is why it is already initialized by the firmware. We should not
+reinitialize it from Linux to avoid potential other users of the DMA
+engine to misbehave.
+
+In mainline this can be described using the "qcom,controlled-remotely"
+property. In the downstream/vendor kernel from Qualcomm there is an
+opposite "qcom,managed-locally" property. This property is *not* set
+for the qcom,sps-dma@7884000 [1] so adding "qcom,controlled-remotely"
+upstream matches the behavior of the downstream/vendor kernel.
+
+Adding this seems to fix some weird issues with UART where both
+input/output becomes garbled with certain obscure firmware versions on
+some devices.
+
+[1]: https://git.codelinaro.org/clo/la/kernel/msm-3.10/-/blob/LA.BR.1.2.9.1-02310-8x16.0/arch/arm/boot/dts/qcom/msm8939-common.dtsi#L866-872
+
+Cc: stable@vger.kernel.org # 6.5
+Fixes: 61550c6c156c ("arm64: dts: qcom: Add msm8939 SoC")
+Signed-off-by: Stephan Gerhold <stephan@gerhold.net>
+Reviewed-by: Bryan O'Donoghue <bryan.odonoghue@linaro.org>
+Link: https://lore.kernel.org/r/20231204-msm8916-blsp-dma-remote-v1-2-3e49c8838c8d@gerhold.net
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/boot/dts/qcom/msm8939.dtsi | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/arm64/boot/dts/qcom/msm8939.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8939.dtsi
+@@ -1661,6 +1661,7 @@
+ clock-names = "bam_clk";
+ #dma-cells = <1>;
+ qcom,ee = <0>;
++ qcom,controlled-remotely;
+ };
+
+ blsp_uart1: serial@78af000 {
--- /dev/null
+From 9b956999bf725fd62613f719c3178fdbee6e5f47 Mon Sep 17 00:00:00 2001
+From: Johan Hovold <johan+linaro@kernel.org>
+Date: Mon, 20 Nov 2023 17:43:23 +0100
+Subject: arm64: dts: qcom: sc7180: fix USB wakeup interrupt types
+
+From: Johan Hovold <johan+linaro@kernel.org>
+
+commit 9b956999bf725fd62613f719c3178fdbee6e5f47 upstream.
+
+The DP/DM wakeup interrupts are edge triggered and which edge to trigger
+on depends on use-case and whether a Low speed or Full/High speed device
+is connected.
+
+Fixes: 0b766e7fe5a2 ("arm64: dts: qcom: sc7180: Add USB related nodes")
+Cc: stable@vger.kernel.org # 5.10
+Signed-off-by: Johan Hovold <johan+linaro@kernel.org>
+Link: https://lore.kernel.org/r/20231120164331.8116-4-johan+linaro@kernel.org
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/boot/dts/qcom/sc7180.dtsi | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/arm64/boot/dts/qcom/sc7180.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7180.dtsi
+@@ -2978,8 +2978,8 @@
+
+ interrupts-extended = <&intc GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+ <&pdc 6 IRQ_TYPE_LEVEL_HIGH>,
+- <&pdc 8 IRQ_TYPE_LEVEL_HIGH>,
+- <&pdc 9 IRQ_TYPE_LEVEL_HIGH>;
++ <&pdc 8 IRQ_TYPE_EDGE_BOTH>,
++ <&pdc 9 IRQ_TYPE_EDGE_BOTH>;
+ interrupt-names = "hs_phy_irq", "ss_phy_irq",
+ "dm_hs_phy_irq", "dp_hs_phy_irq";
+
--- /dev/null
+From c34199d967a946e55381404fa949382691737521 Mon Sep 17 00:00:00 2001
+From: Johan Hovold <johan+linaro@kernel.org>
+Date: Mon, 20 Nov 2023 17:43:24 +0100
+Subject: arm64: dts: qcom: sc7280: fix usb_1 wakeup interrupt types
+
+From: Johan Hovold <johan+linaro@kernel.org>
+
+commit c34199d967a946e55381404fa949382691737521 upstream.
+
+A recent cleanup reordering the usb_1 wakeup interrupts inadvertently
+switched the DP and SuperSpeed interrupt trigger types.
+
+Fixes: 4a7ffc10d195 ("arm64: dts: qcom: align DWC3 USB interrupts with DT schema")
+Cc: stable@vger.kernel.org # 5.19
+Cc: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+Signed-off-by: Johan Hovold <johan+linaro@kernel.org>
+Reviewed-by: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+Link: https://lore.kernel.org/r/20231120164331.8116-5-johan+linaro@kernel.org
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/boot/dts/qcom/sc7280.dtsi | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/arm64/boot/dts/qcom/sc7280.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7280.dtsi
+@@ -3668,9 +3668,9 @@
+ assigned-clock-rates = <19200000>, <200000000>;
+
+ interrupts-extended = <&intc GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+- <&pdc 14 IRQ_TYPE_LEVEL_HIGH>,
++ <&pdc 14 IRQ_TYPE_EDGE_BOTH>,
+ <&pdc 15 IRQ_TYPE_EDGE_BOTH>,
+- <&pdc 17 IRQ_TYPE_EDGE_BOTH>;
++ <&pdc 17 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "hs_phy_irq",
+ "dp_hs_phy_irq",
+ "dm_hs_phy_irq",
--- /dev/null
+From 0dc0f6da3d43da8d2297105663e51ecb01b6f790 Mon Sep 17 00:00:00 2001
+From: Johan Hovold <johan+linaro@kernel.org>
+Date: Mon, 20 Nov 2023 17:43:26 +0100
+Subject: arm64: dts: qcom: sc8180x: fix USB wakeup interrupt types
+
+From: Johan Hovold <johan+linaro@kernel.org>
+
+commit 0dc0f6da3d43da8d2297105663e51ecb01b6f790 upstream.
+
+The DP/DM wakeup interrupts are edge triggered and which edge to trigger
+on depends on use-case and whether a Low speed or Full/High speed device
+is connected.
+
+Fixes: b080f53a8f44 ("arm64: dts: qcom: sc8180x: Add remoteprocs, wifi and usb nodes")
+Cc: stable@vger.kernel.org # 6.5
+Cc: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Johan Hovold <johan+linaro@kernel.org>
+Link: https://lore.kernel.org/r/20231120164331.8116-7-johan+linaro@kernel.org
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/boot/dts/qcom/sc8180x.dtsi | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/arch/arm64/boot/dts/qcom/sc8180x.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc8180x.dtsi
+@@ -2562,8 +2562,8 @@
+ reg = <0 0x0a6f8800 0 0x400>;
+ interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 486 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 488 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 489 IRQ_TYPE_LEVEL_HIGH>;
++ <GIC_SPI 488 IRQ_TYPE_EDGE_BOTH>,
++ <GIC_SPI 489 IRQ_TYPE_EDGE_BOTH>;
+ interrupt-names = "hs_phy_irq",
+ "ss_phy_irq",
+ "dm_hs_phy_irq",
+@@ -2636,8 +2636,8 @@
+ power-domains = <&gcc USB30_SEC_GDSC>;
+ interrupts = <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 487 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 490 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 491 IRQ_TYPE_LEVEL_HIGH>;
++ <GIC_SPI 490 IRQ_TYPE_EDGE_BOTH>,
++ <GIC_SPI 491 IRQ_TYPE_EDGE_BOTH>;
+ interrupt-names = "hs_phy_irq", "ss_phy_irq",
+ "dm_hs_phy_irq", "dp_hs_phy_irq";
+
--- /dev/null
+From 663affdb12b3e26c77d103327cf27de720c8117e Mon Sep 17 00:00:00 2001
+From: Johan Hovold <johan+linaro@kernel.org>
+Date: Mon, 16 Oct 2023 10:06:58 +0200
+Subject: arm64: dts: qcom: sc8280xp-crd: fix eDP phy compatible
+
+From: Johan Hovold <johan+linaro@kernel.org>
+
+commit 663affdb12b3e26c77d103327cf27de720c8117e upstream.
+
+The sc8280xp Display Port PHYs can be used in either DP or eDP mode and
+this is configured using the devicetree compatible string which defaults
+to DP mode in the SoC dtsi.
+
+Override the default compatible string for the CRD eDP PHY node so that
+the eDP settings are used.
+
+Fixes: 4a883a8d80b5 ("arm64: dts: qcom: sc8280xp-crd: Enable EDP")
+Cc: stable@vger.kernel.org # 6.3
+Signed-off-by: Johan Hovold <johan+linaro@kernel.org>
+Link: https://lore.kernel.org/r/20231016080658.6667-1-johan+linaro@kernel.org
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/boot/dts/qcom/sc8280xp-crd.dts | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/arch/arm64/boot/dts/qcom/sc8280xp-crd.dts b/arch/arm64/boot/dts/qcom/sc8280xp-crd.dts
+index e4861c61a65b..ffc4406422ae 100644
+--- a/arch/arm64/boot/dts/qcom/sc8280xp-crd.dts
++++ b/arch/arm64/boot/dts/qcom/sc8280xp-crd.dts
+@@ -458,6 +458,8 @@ mdss0_dp3_out: endpoint {
+ };
+
+ &mdss0_dp3_phy {
++ compatible = "qcom,sc8280xp-edp-phy";
++
+ vdda-phy-supply = <&vreg_l6b>;
+ vdda-pll-supply = <&vreg_l3b>;
+
+--
+2.43.0
+
--- /dev/null
+From de3b3de30999106549da4df88a7963d0ac02b91e Mon Sep 17 00:00:00 2001
+From: Johan Hovold <johan+linaro@kernel.org>
+Date: Mon, 20 Nov 2023 17:43:27 +0100
+Subject: arm64: dts: qcom: sdm670: fix USB wakeup interrupt types
+
+From: Johan Hovold <johan+linaro@kernel.org>
+
+commit de3b3de30999106549da4df88a7963d0ac02b91e upstream.
+
+The DP/DM wakeup interrupts are edge triggered and which edge to trigger
+on depends on use-case and whether a Low speed or Full/High speed device
+is connected.
+
+Fixes: 07c8ded6e373 ("arm64: dts: qcom: add sdm670 and pixel 3a device trees")
+Cc: stable@vger.kernel.org # 6.2
+Cc: Richard Acayan <mailingradian@gmail.com>
+Signed-off-by: Johan Hovold <johan+linaro@kernel.org>
+Acked-by: Richard Acayan <mailingradian@gmail.com>
+Link: https://lore.kernel.org/r/20231120164331.8116-8-johan+linaro@kernel.org
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/boot/dts/qcom/sdm670.dtsi | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/arm64/boot/dts/qcom/sdm670.dtsi
++++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi
+@@ -1297,8 +1297,8 @@
+
+ interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 486 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 488 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 489 IRQ_TYPE_LEVEL_HIGH>;
++ <GIC_SPI 488 IRQ_TYPE_EDGE_BOTH>,
++ <GIC_SPI 489 IRQ_TYPE_EDGE_BOTH>;
+ interrupt-names = "hs_phy_irq", "ss_phy_irq",
+ "dm_hs_phy_irq", "dp_hs_phy_irq";
+
--- /dev/null
+From 84ad9ac8d9ca29033d589e79a991866b38e23b85 Mon Sep 17 00:00:00 2001
+From: Johan Hovold <johan+linaro@kernel.org>
+Date: Mon, 20 Nov 2023 17:43:28 +0100
+Subject: arm64: dts: qcom: sdm845: fix USB wakeup interrupt types
+
+From: Johan Hovold <johan+linaro@kernel.org>
+
+commit 84ad9ac8d9ca29033d589e79a991866b38e23b85 upstream.
+
+The DP/DM wakeup interrupts are edge triggered and which edge to trigger
+on depends on use-case and whether a Low speed or Full/High speed device
+is connected.
+
+Fixes: ca4db2b538a1 ("arm64: dts: qcom: sdm845: Add USB-related nodes")
+Cc: stable@vger.kernel.org # 4.20
+Signed-off-by: Johan Hovold <johan+linaro@kernel.org>
+Link: https://lore.kernel.org/r/20231120164331.8116-9-johan+linaro@kernel.org
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/boot/dts/qcom/sdm845.dtsi | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
++++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
+@@ -4086,8 +4086,8 @@
+
+ interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 486 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 488 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 489 IRQ_TYPE_LEVEL_HIGH>;
++ <GIC_SPI 488 IRQ_TYPE_EDGE_BOTH>,
++ <GIC_SPI 489 IRQ_TYPE_EDGE_BOTH>;
+ interrupt-names = "hs_phy_irq", "ss_phy_irq",
+ "dm_hs_phy_irq", "dp_hs_phy_irq";
+
+@@ -4137,8 +4137,8 @@
+
+ interrupts = <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 487 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 490 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 491 IRQ_TYPE_LEVEL_HIGH>;
++ <GIC_SPI 490 IRQ_TYPE_EDGE_BOTH>,
++ <GIC_SPI 491 IRQ_TYPE_EDGE_BOTH>;
+ interrupt-names = "hs_phy_irq", "ss_phy_irq",
+ "dm_hs_phy_irq", "dp_hs_phy_irq";
+
--- /dev/null
+From 54524b6987d1fffe64cbf3dded1b2fa6b903edf9 Mon Sep 17 00:00:00 2001
+From: Johan Hovold <johan+linaro@kernel.org>
+Date: Mon, 20 Nov 2023 17:43:30 +0100
+Subject: arm64: dts: qcom: sm8150: fix USB wakeup interrupt types
+
+From: Johan Hovold <johan+linaro@kernel.org>
+
+commit 54524b6987d1fffe64cbf3dded1b2fa6b903edf9 upstream.
+
+The DP/DM wakeup interrupts are edge triggered and which edge to trigger
+on depends on use-case and whether a Low speed or Full/High speed device
+is connected.
+
+Fixes: 0c9dde0d2015 ("arm64: dts: qcom: sm8150: Add secondary USB and PHY nodes")
+Fixes: b33d2868e8d3 ("arm64: dts: qcom: sm8150: Add USB and PHY device nodes")
+Cc: stable@vger.kernel.org # 5.10
+Cc: Jonathan Marek <jonathan@marek.ca>
+Cc: Jack Pham <quic_jackp@quicinc.com>
+Signed-off-by: Johan Hovold <johan+linaro@kernel.org>
+Reviewed-by: Jack Pham <quic_jackp@quicinc.com>
+Link: https://lore.kernel.org/r/20231120164331.8116-11-johan+linaro@kernel.org
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/boot/dts/qcom/sm8150.dtsi | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/arch/arm64/boot/dts/qcom/sm8150.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi
+@@ -3594,8 +3594,8 @@
+
+ interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 486 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 488 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 489 IRQ_TYPE_LEVEL_HIGH>;
++ <GIC_SPI 488 IRQ_TYPE_EDGE_BOTH>,
++ <GIC_SPI 489 IRQ_TYPE_EDGE_BOTH>;
+ interrupt-names = "hs_phy_irq", "ss_phy_irq",
+ "dm_hs_phy_irq", "dp_hs_phy_irq";
+
+@@ -3647,8 +3647,8 @@
+
+ interrupts = <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 487 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 490 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 491 IRQ_TYPE_LEVEL_HIGH>;
++ <GIC_SPI 490 IRQ_TYPE_EDGE_BOTH>,
++ <GIC_SPI 491 IRQ_TYPE_EDGE_BOTH>;
+ interrupt-names = "hs_phy_irq", "ss_phy_irq",
+ "dm_hs_phy_irq", "dp_hs_phy_irq";
+
--- /dev/null
+From fc5a80a432607d05e85bba37971712405f75c546 Mon Sep 17 00:00:00 2001
+From: Tianling Shen <cnsztl@gmail.com>
+Date: Sat, 16 Dec 2023 12:07:23 +0800
+Subject: arm64: dts: rockchip: configure eth pad driver strength for orangepi r1 plus lts
+
+From: Tianling Shen <cnsztl@gmail.com>
+
+commit fc5a80a432607d05e85bba37971712405f75c546 upstream.
+
+The default strength is not enough to provide stable connection
+under 3.3v LDO voltage.
+
+Fixes: 387b3bbac5ea ("arm64: dts: rockchip: Add Xunlong OrangePi R1 Plus LTS")
+Cc: stable@vger.kernel.org # 6.6+
+Signed-off-by: Tianling Shen <cnsztl@gmail.com>
+Link: https://lore.kernel.org/r/20231216040723.17864-1-cnsztl@gmail.com
+Signed-off-by: Heiko Stuebner <heiko@sntech.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/boot/dts/rockchip/rk3328-orangepi-r1-plus-lts.dts | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/arch/arm64/boot/dts/rockchip/rk3328-orangepi-r1-plus-lts.dts b/arch/arm64/boot/dts/rockchip/rk3328-orangepi-r1-plus-lts.dts
+index 5d7d567283e5..4237f2ee8fee 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3328-orangepi-r1-plus-lts.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3328-orangepi-r1-plus-lts.dts
+@@ -26,9 +26,11 @@ yt8531c: ethernet-phy@0 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0>;
+
++ motorcomm,auto-sleep-disabled;
+ motorcomm,clk-out-frequency-hz = <125000000>;
+ motorcomm,keep-pll-enabled;
+- motorcomm,auto-sleep-disabled;
++ motorcomm,rx-clk-drv-microamp = <5020>;
++ motorcomm,rx-data-drv-microamp = <5020>;
+
+ pinctrl-0 = <ð_phy_reset_pin>;
+ pinctrl-names = "default";
+--
+2.43.0
+
--- /dev/null
+From 44de8996ed5a10f08f2fe947182da6535edcfae5 Mon Sep 17 00:00:00 2001
+From: Sam Edwards <cfsworks@gmail.com>
+Date: Fri, 15 Dec 2023 19:10:19 -0700
+Subject: arm64: dts: rockchip: Fix rk3588 USB power-domain clocks
+
+From: Sam Edwards <cfsworks@gmail.com>
+
+commit 44de8996ed5a10f08f2fe947182da6535edcfae5 upstream.
+
+The QoS blocks saved/restored when toggling the PD_USB power domain are
+clocked by ACLK_USB. Attempting to access these memory regions without
+that clock running will result in an indefinite CPU stall.
+
+The PD_USB node wasn't specifying this clock dependency, resulting in
+hangs when trying to toggle the power domain (either on or off), unless
+we get "lucky" and have ACLK_USB running for another reason at the time.
+This "luck" can result from the bootloader leaving USB powered/clocked,
+and if no built-in driver wants USB, Linux will disable the unused
+PD+CLK on boot when {pd,clk}_ignore_unused aren't given. This can also
+be unlucky because the two cleanup tasks run in parallel and race: if
+the CLK is disabled first, the PD deactivation stalls the boot. In any
+case, the PD cannot then be reenabled (if e.g. the driver loads later)
+once the clock has been stopped.
+
+Fix this by specifying a dependency on ACLK_USB, instead of only
+ACLK_USB_ROOT. The child-parent relationship means the former implies
+the latter anyway.
+
+Fixes: c9211fa2602b8 ("arm64: dts: rockchip: Add base DT for rk3588 SoC")
+Cc: stable@vger.kernel.org
+Signed-off-by: Sam Edwards <CFSworks@gmail.com>
+Link: https://lore.kernel.org/r/20231216021019.1543811-1-CFSworks@gmail.com
+[changed to only include the missing clock, not dropping the root-clocks]
+Signed-off-by: Heiko Stuebner <heiko@sntech.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/boot/dts/rockchip/rk3588s.dtsi | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/arm64/boot/dts/rockchip/rk3588s.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3588s.dtsi
+@@ -890,6 +890,7 @@
+ reg = <RK3588_PD_USB>;
+ clocks = <&cru PCLK_PHP_ROOT>,
+ <&cru ACLK_USB_ROOT>,
++ <&cru ACLK_USB>,
+ <&cru HCLK_USB_ROOT>,
+ <&cru HCLK_HOST0>,
+ <&cru HCLK_HOST_ARB0>,
--- /dev/null
+From 2da4f4a7b003441b80f0f12d8a216590f652a40f Mon Sep 17 00:00:00 2001
+From: Cixi Geng <cixi.geng1@unisoc.com>
+Date: Wed, 12 Jul 2023 00:23:46 +0800
+Subject: arm64: dts: sprd: fix the cpu node for UMS512
+
+From: Cixi Geng <cixi.geng1@unisoc.com>
+
+commit 2da4f4a7b003441b80f0f12d8a216590f652a40f upstream.
+
+The UMS512 Socs have 8 cores contains 6 a55 and 2 a75.
+modify the cpu nodes to correct information.
+
+Fixes: 2b4881839a39 ("arm64: dts: sprd: Add support for Unisoc's UMS512")
+Cc: stable@vger.kernel.org
+Signed-off-by: Cixi Geng <cixi.geng1@unisoc.com>
+Link: https://lore.kernel.org/r/20230711162346.5978-1-cixi.geng@linux.dev
+Signed-off-by: Chunyan Zhang <chunyan.zhang@unisoc.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/boot/dts/sprd/ums512.dtsi | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/arch/arm64/boot/dts/sprd/ums512.dtsi b/arch/arm64/boot/dts/sprd/ums512.dtsi
+index 024be594c47d..97ac550af2f1 100644
+--- a/arch/arm64/boot/dts/sprd/ums512.dtsi
++++ b/arch/arm64/boot/dts/sprd/ums512.dtsi
+@@ -96,7 +96,7 @@ CPU5: cpu@500 {
+
+ CPU6: cpu@600 {
+ device_type = "cpu";
+- compatible = "arm,cortex-a55";
++ compatible = "arm,cortex-a75";
+ reg = <0x0 0x600>;
+ enable-method = "psci";
+ cpu-idle-states = <&CORE_PD>;
+@@ -104,7 +104,7 @@ CPU6: cpu@600 {
+
+ CPU7: cpu@700 {
+ device_type = "cpu";
+- compatible = "arm,cortex-a55";
++ compatible = "arm,cortex-a75";
+ reg = <0x0 0x700>;
+ enable-method = "psci";
+ cpu-idle-states = <&CORE_PD>;
+--
+2.43.0
+
--- /dev/null
+From 7b21ed7d119dc06b0ed2ba3e406a02cafe3a8d03 Mon Sep 17 00:00:00 2001
+From: Josef Bacik <josef@toxicpanda.com>
+Date: Thu, 14 Dec 2023 11:18:50 -0500
+Subject: arm64: properly install vmlinuz.efi
+
+From: Josef Bacik <josef@toxicpanda.com>
+
+commit 7b21ed7d119dc06b0ed2ba3e406a02cafe3a8d03 upstream.
+
+If you select CONFIG_EFI_ZBOOT, we will generate vmlinuz.efi, and then
+when we go to install the kernel we'll install the vmlinux instead
+because install.sh only recognizes Image.gz as wanting the compressed
+install image. With CONFIG_EFI_ZBOOT we don't get the proper kernel
+installed, which means it doesn't boot, which makes for a very confused
+and subsequently angry kernel developer.
+
+Fix this by properly installing our compressed kernel if we've enabled
+CONFIG_EFI_ZBOOT.
+
+Signed-off-by: Josef Bacik <josef@toxicpanda.com>
+Cc: <stable@vger.kernel.org> # 6.1.x
+Fixes: c37b830fef13 ("arm64: efi: enable generic EFI compressed boot")
+Reviewed-by: Simon Glass <sjg@chromium.org>
+Link: https://lore.kernel.org/r/6edb1402769c2c14c4fbef8f7eaedb3167558789.1702570674.git.josef@toxicpanda.com
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/boot/install.sh | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/arm64/boot/install.sh
++++ b/arch/arm64/boot/install.sh
+@@ -17,7 +17,8 @@
+ # $3 - kernel map file
+ # $4 - default install path (blank if root directory)
+
+-if [ "$(basename $2)" = "Image.gz" ]; then
++if [ "$(basename $2)" = "Image.gz" ] || [ "$(basename $2)" = "vmlinuz.efi" ]
++then
+ # Compressed install
+ echo "Installing compressed kernel"
+ base=vmlinuz
--- /dev/null
+From 7d4b5d7a37bdd63a5a3371b988744b060d5bb86f Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+Date: Wed, 27 Dec 2023 21:38:23 +0100
+Subject: async: Introduce async_schedule_dev_nocall()
+
+From: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+commit 7d4b5d7a37bdd63a5a3371b988744b060d5bb86f upstream.
+
+In preparation for subsequent changes, introduce a specialized variant
+of async_schedule_dev() that will not invoke the argument function
+synchronously when it cannot be scheduled for asynchronous execution.
+
+The new function, async_schedule_dev_nocall(), will be used for fixing
+possible deadlocks in the system-wide power management core code.
+
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Reviewed-by: Stanislaw Gruszka <stanislaw.gruszka@linux.intel.com> for the series.
+Tested-by: Youngmin Nam <youngmin.nam@samsung.com>
+Reviewed-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/async.h | 2 ++
+ kernel/async.c | 29 +++++++++++++++++++++++++++++
+ 2 files changed, 31 insertions(+)
+
+--- a/include/linux/async.h
++++ b/include/linux/async.h
+@@ -90,6 +90,8 @@ async_schedule_dev(async_func_t func, st
+ return async_schedule_node(func, dev, dev_to_node(dev));
+ }
+
++bool async_schedule_dev_nocall(async_func_t func, struct device *dev);
++
+ /**
+ * async_schedule_dev_domain - A device specific version of async_schedule_domain
+ * @func: function to execute asynchronously
+--- a/kernel/async.c
++++ b/kernel/async.c
+@@ -244,6 +244,35 @@ async_cookie_t async_schedule_node(async
+ EXPORT_SYMBOL_GPL(async_schedule_node);
+
+ /**
++ * async_schedule_dev_nocall - A simplified variant of async_schedule_dev()
++ * @func: function to execute asynchronously
++ * @dev: device argument to be passed to function
++ *
++ * @dev is used as both the argument for the function and to provide NUMA
++ * context for where to run the function.
++ *
++ * If the asynchronous execution of @func is scheduled successfully, return
++ * true. Otherwise, do nothing and return false, unlike async_schedule_dev()
++ * that will run the function synchronously then.
++ */
++bool async_schedule_dev_nocall(async_func_t func, struct device *dev)
++{
++ struct async_entry *entry;
++
++ entry = kzalloc(sizeof(struct async_entry), GFP_KERNEL);
++
++ /* Give up if there is no memory or too much work. */
++ if (!entry || atomic_read(&entry_count) > MAX_WORK) {
++ kfree(entry);
++ return false;
++ }
++
++ __async_schedule_node_domain(func, dev, dev_to_node(dev),
++ &async_dfl_domain, entry);
++ return true;
++}
++
++/**
+ * async_synchronize_full - synchronize all asynchronous function calls
+ *
+ * This function waits until all asynchronous function calls have been done.
--- /dev/null
+From 6aa09a5bccd8e224d917afdb4c278fc66aacde4d Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+Date: Wed, 27 Dec 2023 21:37:02 +0100
+Subject: async: Split async_schedule_node_domain()
+
+From: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+commit 6aa09a5bccd8e224d917afdb4c278fc66aacde4d upstream.
+
+In preparation for subsequent changes, split async_schedule_node_domain()
+in two pieces so as to allow the bottom part of it to be called from a
+somewhat different code path.
+
+No functional impact.
+
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Reviewed-by: Stanislaw Gruszka <stanislaw.gruszka@linux.intel.com>
+Tested-by: Youngmin Nam <youngmin.nam@samsung.com>
+Reviewed-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/async.c | 56 ++++++++++++++++++++++++++++++++++----------------------
+ 1 file changed, 34 insertions(+), 22 deletions(-)
+
+--- a/kernel/async.c
++++ b/kernel/async.c
+@@ -145,6 +145,39 @@ static void async_run_entry_fn(struct wo
+ wake_up(&async_done);
+ }
+
++static async_cookie_t __async_schedule_node_domain(async_func_t func,
++ void *data, int node,
++ struct async_domain *domain,
++ struct async_entry *entry)
++{
++ async_cookie_t newcookie;
++ unsigned long flags;
++
++ INIT_LIST_HEAD(&entry->domain_list);
++ INIT_LIST_HEAD(&entry->global_list);
++ INIT_WORK(&entry->work, async_run_entry_fn);
++ entry->func = func;
++ entry->data = data;
++ entry->domain = domain;
++
++ spin_lock_irqsave(&async_lock, flags);
++
++ /* allocate cookie and queue */
++ newcookie = entry->cookie = next_cookie++;
++
++ list_add_tail(&entry->domain_list, &domain->pending);
++ if (domain->registered)
++ list_add_tail(&entry->global_list, &async_global_pending);
++
++ atomic_inc(&entry_count);
++ spin_unlock_irqrestore(&async_lock, flags);
++
++ /* schedule for execution */
++ queue_work_node(node, system_unbound_wq, &entry->work);
++
++ return newcookie;
++}
++
+ /**
+ * async_schedule_node_domain - NUMA specific version of async_schedule_domain
+ * @func: function to execute asynchronously
+@@ -186,29 +219,8 @@ async_cookie_t async_schedule_node_domai
+ func(data, newcookie);
+ return newcookie;
+ }
+- INIT_LIST_HEAD(&entry->domain_list);
+- INIT_LIST_HEAD(&entry->global_list);
+- INIT_WORK(&entry->work, async_run_entry_fn);
+- entry->func = func;
+- entry->data = data;
+- entry->domain = domain;
+-
+- spin_lock_irqsave(&async_lock, flags);
+
+- /* allocate cookie and queue */
+- newcookie = entry->cookie = next_cookie++;
+-
+- list_add_tail(&entry->domain_list, &domain->pending);
+- if (domain->registered)
+- list_add_tail(&entry->global_list, &async_global_pending);
+-
+- atomic_inc(&entry_count);
+- spin_unlock_irqrestore(&async_lock, flags);
+-
+- /* schedule for execution */
+- queue_work_node(node, system_unbound_wq, &entry->work);
+-
+- return newcookie;
++ return __async_schedule_node_domain(func, data, node, domain, entry);
+ }
+ EXPORT_SYMBOL_GPL(async_schedule_node_domain);
+
--- /dev/null
+From 2b0122aaa800b021e36027d7f29e206f87c761d6 Mon Sep 17 00:00:00 2001
+From: David Disseldorp <ddiss@suse.de>
+Date: Fri, 8 Dec 2023 11:41:56 +1100
+Subject: btrfs: sysfs: validate scrub_speed_max value
+
+From: David Disseldorp <ddiss@suse.de>
+
+commit 2b0122aaa800b021e36027d7f29e206f87c761d6 upstream.
+
+The value set as scrub_speed_max accepts size with suffixes
+(k/m/g/t/p/e) but we should still validate it for trailing characters,
+similar to what we do with chunk_size_store.
+
+CC: stable@vger.kernel.org # 5.15+
+Signed-off-by: David Disseldorp <ddiss@suse.de>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/sysfs.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/fs/btrfs/sysfs.c
++++ b/fs/btrfs/sysfs.c
+@@ -1760,6 +1760,10 @@ static ssize_t btrfs_devinfo_scrub_speed
+ unsigned long long limit;
+
+ limit = memparse(buf, &endptr);
++ /* There could be trailing '\n', also catch any typos after the value. */
++ endptr = skip_spaces(endptr);
++ if (*endptr != 0)
++ return -EINVAL;
+ WRITE_ONCE(device->scrub_speed_max, limit);
+ return len;
+ }
--- /dev/null
+From eff9704f5332a13b08fbdbe0f84059c9e7051d5f Mon Sep 17 00:00:00 2001
+From: Krishna chaitanya chundru <quic_krichai@quicinc.com>
+Date: Tue, 31 Oct 2023 15:21:05 +0530
+Subject: bus: mhi: host: Add alignment check for event ring read pointer
+
+From: Krishna chaitanya chundru <quic_krichai@quicinc.com>
+
+commit eff9704f5332a13b08fbdbe0f84059c9e7051d5f upstream.
+
+Though we do check the event ring read pointer by "is_valid_ring_ptr"
+to make sure it is in the buffer range, but there is another risk the
+pointer may be not aligned. Since we are expecting event ring elements
+are 128 bits(struct mhi_ring_element) aligned, an unaligned read pointer
+could lead to multiple issues like DoS or ring buffer memory corruption.
+
+So add a alignment check for event ring read pointer.
+
+Fixes: ec32332df764 ("bus: mhi: core: Sanity check values from remote device before use")
+cc: stable@vger.kernel.org
+Signed-off-by: Krishna chaitanya chundru <quic_krichai@quicinc.com>
+Reviewed-by: Jeffrey Hugo <quic_jhugo@quicinc.com>
+Reviewed-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Link: https://lore.kernel.org/r/20231031-alignment_check-v2-1-1441db7c5efd@quicinc.com
+Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/bus/mhi/host/main.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/bus/mhi/host/main.c
++++ b/drivers/bus/mhi/host/main.c
+@@ -268,7 +268,8 @@ static void mhi_del_ring_element(struct
+
+ static bool is_valid_ring_ptr(struct mhi_ring *ring, dma_addr_t addr)
+ {
+- return addr >= ring->iommu_base && addr < ring->iommu_base + ring->len;
++ return addr >= ring->iommu_base && addr < ring->iommu_base + ring->len &&
++ !(addr & (sizeof(struct mhi_ring_element) - 1));
+ }
+
+ int mhi_destroy_device(struct device *dev, void *data)
--- /dev/null
+From b89b6a863dd53bc70d8e52d50f9cfaef8ef5e9c9 Mon Sep 17 00:00:00 2001
+From: Bhaumik Bhatt <bbhatt@codeaurora.org>
+Date: Mon, 11 Dec 2023 14:42:51 +0800
+Subject: bus: mhi: host: Add spinlock to protect WP access when queueing TREs
+
+From: Bhaumik Bhatt <bbhatt@codeaurora.org>
+
+commit b89b6a863dd53bc70d8e52d50f9cfaef8ef5e9c9 upstream.
+
+Protect WP accesses such that multiple threads queueing buffers for
+incoming data do not race.
+
+Meanwhile, if CONFIG_TRACE_IRQFLAGS is enabled, irq will be enabled once
+__local_bh_enable_ip is called as part of write_unlock_bh. Hence, let's
+take irqsave lock after TRE is generated to avoid running write_unlock_bh
+when irqsave lock is held.
+
+Cc: stable@vger.kernel.org
+Fixes: 189ff97cca53 ("bus: mhi: core: Add support for data transfer")
+Signed-off-by: Bhaumik Bhatt <bbhatt@codeaurora.org>
+Signed-off-by: Qiang Yu <quic_qianyu@quicinc.com>
+Reviewed-by: Jeffrey Hugo <quic_jhugo@quicinc.com>
+Tested-by: Jeffrey Hugo <quic_jhugo@quicinc.com>
+Reviewed-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Link: https://lore.kernel.org/r/1702276972-41296-2-git-send-email-quic_qianyu@quicinc.com
+Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/bus/mhi/host/main.c | 22 +++++++++++++---------
+ 1 file changed, 13 insertions(+), 9 deletions(-)
+
+--- a/drivers/bus/mhi/host/main.c
++++ b/drivers/bus/mhi/host/main.c
+@@ -1127,17 +1127,15 @@ static int mhi_queue(struct mhi_device *
+ if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
+ return -EIO;
+
+- read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
+-
+ ret = mhi_is_ring_full(mhi_cntrl, tre_ring);
+- if (unlikely(ret)) {
+- ret = -EAGAIN;
+- goto exit_unlock;
+- }
++ if (unlikely(ret))
++ return -EAGAIN;
+
+ ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags);
+ if (unlikely(ret))
+- goto exit_unlock;
++ return ret;
++
++ read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
+
+ /* Packet is queued, take a usage ref to exit M3 if necessary
+ * for host->device buffer, balanced put is done on buffer completion
+@@ -1157,7 +1155,6 @@ static int mhi_queue(struct mhi_device *
+ if (dir == DMA_FROM_DEVICE)
+ mhi_cntrl->runtime_put(mhi_cntrl);
+
+-exit_unlock:
+ read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
+
+ return ret;
+@@ -1209,6 +1206,9 @@ int mhi_gen_tre(struct mhi_controller *m
+ int eot, eob, chain, bei;
+ int ret;
+
++ /* Protect accesses for reading and incrementing WP */
++ write_lock_bh(&mhi_chan->lock);
++
+ buf_ring = &mhi_chan->buf_ring;
+ tre_ring = &mhi_chan->tre_ring;
+
+@@ -1226,8 +1226,10 @@ int mhi_gen_tre(struct mhi_controller *m
+
+ if (!info->pre_mapped) {
+ ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
+- if (ret)
++ if (ret) {
++ write_unlock_bh(&mhi_chan->lock);
+ return ret;
++ }
+ }
+
+ eob = !!(flags & MHI_EOB);
+@@ -1244,6 +1246,8 @@ int mhi_gen_tre(struct mhi_controller *m
+ mhi_add_ring_element(mhi_cntrl, tre_ring);
+ mhi_add_ring_element(mhi_cntrl, buf_ring);
+
++ write_unlock_bh(&mhi_chan->lock);
++
+ return 0;
+ }
+
--- /dev/null
+From 01bd694ac2f682fb8017e16148b928482bc8fa4b Mon Sep 17 00:00:00 2001
+From: Qiang Yu <quic_qianyu@quicinc.com>
+Date: Mon, 11 Dec 2023 14:42:52 +0800
+Subject: bus: mhi: host: Drop chan lock before queuing buffers
+
+From: Qiang Yu <quic_qianyu@quicinc.com>
+
+commit 01bd694ac2f682fb8017e16148b928482bc8fa4b upstream.
+
+Ensure read and write locks for the channel are not taken in succession by
+dropping the read lock from parse_xfer_event() such that a callback given
+to client can potentially queue buffers and acquire the write lock in that
+process. Any queueing of buffers should be done without channel read lock
+acquired as it can result in multiple locks and a soft lockup.
+
+Cc: <stable@vger.kernel.org> # 5.7
+Fixes: 1d3173a3bae7 ("bus: mhi: core: Add support for processing events from client device")
+Signed-off-by: Qiang Yu <quic_qianyu@quicinc.com>
+Reviewed-by: Jeffrey Hugo <quic_jhugo@quicinc.com>
+Tested-by: Jeffrey Hugo <quic_jhugo@quicinc.com>
+Reviewed-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Link: https://lore.kernel.org/r/1702276972-41296-3-git-send-email-quic_qianyu@quicinc.com
+[mani: added fixes tag and cc'ed stable]
+Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/bus/mhi/host/main.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/bus/mhi/host/main.c
++++ b/drivers/bus/mhi/host/main.c
+@@ -643,6 +643,8 @@ static int parse_xfer_event(struct mhi_c
+ mhi_del_ring_element(mhi_cntrl, tre_ring);
+ local_rp = tre_ring->rp;
+
++ read_unlock_bh(&mhi_chan->lock);
++
+ /* notify client */
+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+
+@@ -668,6 +670,8 @@ static int parse_xfer_event(struct mhi_c
+ kfree(buf_info->cb_buf);
+ }
+ }
++
++ read_lock_bh(&mhi_chan->lock);
+ }
+ break;
+ } /* CC_EOT */
--- /dev/null
+From 27016f75f5ed47e2d8e0ca75a8ff1f40bc1a5e27 Mon Sep 17 00:00:00 2001
+From: Herbert Xu <herbert@gondor.apana.org.au>
+Date: Thu, 7 Dec 2023 18:36:57 +0800
+Subject: crypto: api - Disallow identical driver names
+
+From: Herbert Xu <herbert@gondor.apana.org.au>
+
+commit 27016f75f5ed47e2d8e0ca75a8ff1f40bc1a5e27 upstream.
+
+Disallow registration of two algorithms with identical driver names.
+
+Cc: <stable@vger.kernel.org>
+Reported-by: Ovidiu Panait <ovidiu.panait@windriver.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ crypto/algapi.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/crypto/algapi.c
++++ b/crypto/algapi.c
+@@ -341,6 +341,7 @@ __crypto_register_alg(struct crypto_alg
+ }
+
+ if (!strcmp(q->cra_driver_name, alg->cra_name) ||
++ !strcmp(q->cra_driver_name, alg->cra_driver_name) ||
+ !strcmp(q->cra_name, alg->cra_driver_name))
+ goto err;
+ }
--- /dev/null
+From ba3c5574203034781ac4231acf117da917efcd2a Mon Sep 17 00:00:00 2001
+From: Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
+Date: Thu, 14 Dec 2023 11:08:34 +0800
+Subject: crypto: lib/mpi - Fix unexpected pointer access in mpi_ec_init
+
+From: Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
+
+commit ba3c5574203034781ac4231acf117da917efcd2a upstream.
+
+When the mpi_ec_ctx structure is initialized, some fields are not
+cleared, causing a crash when referencing the field when the
+structure was released. Initially, this issue was ignored because
+memory for mpi_ec_ctx is allocated with the __GFP_ZERO flag.
+For example, this error will be triggered when calculating the
+Za value for SM2 separately.
+
+Fixes: d58bb7e55a8a ("lib/mpi: Introduce ec implementation to MPI library")
+Cc: stable@vger.kernel.org # v6.5
+Signed-off-by: Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ lib/crypto/mpi/ec.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/lib/crypto/mpi/ec.c b/lib/crypto/mpi/ec.c
+index 40f5908e57a4..e16dca1e23d5 100644
+--- a/lib/crypto/mpi/ec.c
++++ b/lib/crypto/mpi/ec.c
+@@ -584,6 +584,9 @@ void mpi_ec_init(struct mpi_ec_ctx *ctx, enum gcry_mpi_ec_models model,
+ ctx->a = mpi_copy(a);
+ ctx->b = mpi_copy(b);
+
++ ctx->d = NULL;
++ ctx->t.two_inv_p = NULL;
++
+ ctx->t.p_barrett = use_barrett > 0 ? mpi_barrett_init(ctx->p, 0) : NULL;
+
+ mpi_ec_get_reset(ctx);
+--
+2.43.0
+
--- /dev/null
+From d07f951903fa9922c375b8ab1ce81b18a0034e3b Mon Sep 17 00:00:00 2001
+From: Herbert Xu <herbert@gondor.apana.org.au>
+Date: Tue, 28 Nov 2023 14:22:13 +0800
+Subject: crypto: s390/aes - Fix buffer overread in CTR mode
+
+From: Herbert Xu <herbert@gondor.apana.org.au>
+
+commit d07f951903fa9922c375b8ab1ce81b18a0034e3b upstream.
+
+When processing the last block, the s390 ctr code will always read
+a whole block, even if there isn't a whole block of data left. Fix
+this by using the actual length left and copy it into a buffer first
+for processing.
+
+Fixes: 0200f3ecc196 ("crypto: s390 - add System z hardware support for CTR mode")
+Cc: <stable@vger.kernel.org>
+Reported-by: Guangwu Zhang <guazhang@redhat.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Reviewd-by: Harald Freudenberger <freude@de.ibm.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/s390/crypto/aes_s390.c | 4 +++-
+ arch/s390/crypto/paes_s390.c | 4 +++-
+ 2 files changed, 6 insertions(+), 2 deletions(-)
+
+--- a/arch/s390/crypto/aes_s390.c
++++ b/arch/s390/crypto/aes_s390.c
+@@ -597,7 +597,9 @@ static int ctr_aes_crypt(struct skcipher
+ * final block may be < AES_BLOCK_SIZE, copy only nbytes
+ */
+ if (nbytes) {
+- cpacf_kmctr(sctx->fc, sctx->key, buf, walk.src.virt.addr,
++ memset(buf, 0, AES_BLOCK_SIZE);
++ memcpy(buf, walk.src.virt.addr, nbytes);
++ cpacf_kmctr(sctx->fc, sctx->key, buf, buf,
+ AES_BLOCK_SIZE, walk.iv);
+ memcpy(walk.dst.virt.addr, buf, nbytes);
+ crypto_inc(walk.iv, AES_BLOCK_SIZE);
+--- a/arch/s390/crypto/paes_s390.c
++++ b/arch/s390/crypto/paes_s390.c
+@@ -693,9 +693,11 @@ static int ctr_paes_crypt(struct skciphe
+ * final block may be < AES_BLOCK_SIZE, copy only nbytes
+ */
+ if (nbytes) {
++ memset(buf, 0, AES_BLOCK_SIZE);
++ memcpy(buf, walk.src.virt.addr, nbytes);
+ while (1) {
+ if (cpacf_kmctr(ctx->fc, ¶m, buf,
+- walk.src.virt.addr, AES_BLOCK_SIZE,
++ buf, AES_BLOCK_SIZE,
+ walk.iv) == AES_BLOCK_SIZE)
+ break;
+ if (__paes_convert_key(ctx))
--- /dev/null
+From 3c12466b6b7bf1e56f9b32c366a3d83d87afb4de Mon Sep 17 00:00:00 2001
+From: Gao Xiang <hsiangkao@linux.alibaba.com>
+Date: Wed, 6 Dec 2023 12:55:34 +0800
+Subject: erofs: fix lz4 inplace decompression
+
+From: Gao Xiang <hsiangkao@linux.alibaba.com>
+
+commit 3c12466b6b7bf1e56f9b32c366a3d83d87afb4de upstream.
+
+Currently EROFS can map another compressed buffer for inplace
+decompression, that was used to handle the cases that some pages of
+compressed data are actually not in-place I/O.
+
+However, like most simple LZ77 algorithms, LZ4 expects the compressed
+data is arranged at the end of the decompressed buffer and it
+explicitly uses memmove() to handle overlapping:
+ __________________________________________________________
+ |_ direction of decompression --> ____ |_ compressed data _|
+
+Although EROFS arranges compressed data like this, it typically maps two
+individual virtual buffers so the relative order is uncertain.
+Previously, it was hardly observed since LZ4 only uses memmove() for
+short overlapped literals and x86/arm64 memmove implementations seem to
+completely cover it up and they don't have this issue. Juhyung reported
+that EROFS data corruption can be found on a new Intel x86 processor.
+After some analysis, it seems that recent x86 processors with the new
+FSRM feature expose this issue with "rep movsb".
+
+Let's strictly use the decompressed buffer for lz4 inplace
+decompression for now. Later, as an useful improvement, we could try
+to tie up these two buffers together in the correct order.
+
+Reported-and-tested-by: Juhyung Park <qkrwngud825@gmail.com>
+Closes: https://lore.kernel.org/r/CAD14+f2AVKf8Fa2OO1aAUdDNTDsVzzR6ctU_oJSmTyd6zSYR2Q@mail.gmail.com
+Fixes: 0ffd71bcc3a0 ("staging: erofs: introduce LZ4 decompression inplace")
+Fixes: 598162d05080 ("erofs: support decompress big pcluster for lz4 backend")
+Cc: stable <stable@vger.kernel.org> # 5.4+
+Tested-by: Yifan Zhao <zhaoyifan@sjtu.edu.cn>
+Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
+Link: https://lore.kernel.org/r/20231206045534.3920847-1-hsiangkao@linux.alibaba.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/erofs/decompressor.c | 31 ++++++++++++++++---------------
+ 1 file changed, 16 insertions(+), 15 deletions(-)
+
+--- a/fs/erofs/decompressor.c
++++ b/fs/erofs/decompressor.c
+@@ -122,11 +122,11 @@ static int z_erofs_lz4_prepare_dstpages(
+ }
+
+ static void *z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx *ctx,
+- void *inpage, unsigned int *inputmargin, int *maptype,
+- bool may_inplace)
++ void *inpage, void *out, unsigned int *inputmargin,
++ int *maptype, bool may_inplace)
+ {
+ struct z_erofs_decompress_req *rq = ctx->rq;
+- unsigned int omargin, total, i, j;
++ unsigned int omargin, total, i;
+ struct page **in;
+ void *src, *tmp;
+
+@@ -136,12 +136,13 @@ static void *z_erofs_lz4_handle_overlap(
+ omargin < LZ4_DECOMPRESS_INPLACE_MARGIN(rq->inputsize))
+ goto docopy;
+
+- for (i = 0; i < ctx->inpages; ++i) {
+- DBG_BUGON(rq->in[i] == NULL);
+- for (j = 0; j < ctx->outpages - ctx->inpages + i; ++j)
+- if (rq->out[j] == rq->in[i])
+- goto docopy;
+- }
++ for (i = 0; i < ctx->inpages; ++i)
++ if (rq->out[ctx->outpages - ctx->inpages + i] !=
++ rq->in[i])
++ goto docopy;
++ kunmap_local(inpage);
++ *maptype = 3;
++ return out + ((ctx->outpages - ctx->inpages) << PAGE_SHIFT);
+ }
+
+ if (ctx->inpages <= 1) {
+@@ -149,7 +150,6 @@ static void *z_erofs_lz4_handle_overlap(
+ return inpage;
+ }
+ kunmap_local(inpage);
+- might_sleep();
+ src = erofs_vm_map_ram(rq->in, ctx->inpages);
+ if (!src)
+ return ERR_PTR(-ENOMEM);
+@@ -205,12 +205,12 @@ int z_erofs_fixup_insize(struct z_erofs_
+ }
+
+ static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx,
+- u8 *out)
++ u8 *dst)
+ {
+ struct z_erofs_decompress_req *rq = ctx->rq;
+ bool support_0padding = false, may_inplace = false;
+ unsigned int inputmargin;
+- u8 *headpage, *src;
++ u8 *out, *headpage, *src;
+ int ret, maptype;
+
+ DBG_BUGON(*rq->in == NULL);
+@@ -231,11 +231,12 @@ static int z_erofs_lz4_decompress_mem(st
+ }
+
+ inputmargin = rq->pageofs_in;
+- src = z_erofs_lz4_handle_overlap(ctx, headpage, &inputmargin,
++ src = z_erofs_lz4_handle_overlap(ctx, headpage, dst, &inputmargin,
+ &maptype, may_inplace);
+ if (IS_ERR(src))
+ return PTR_ERR(src);
+
++ out = dst + rq->pageofs_out;
+ /* legacy format could compress extra data in a pcluster. */
+ if (rq->partial_decoding || !support_0padding)
+ ret = LZ4_decompress_safe_partial(src + inputmargin, out,
+@@ -266,7 +267,7 @@ static int z_erofs_lz4_decompress_mem(st
+ vm_unmap_ram(src, ctx->inpages);
+ } else if (maptype == 2) {
+ erofs_put_pcpubuf(src);
+- } else {
++ } else if (maptype != 3) {
+ DBG_BUGON(1);
+ return -EFAULT;
+ }
+@@ -309,7 +310,7 @@ static int z_erofs_lz4_decompress(struct
+ }
+
+ dstmap_out:
+- ret = z_erofs_lz4_decompress_mem(&ctx, dst + rq->pageofs_out);
++ ret = z_erofs_lz4_decompress_mem(&ctx, dst);
+ if (!dst_maptype)
+ kunmap_local(dst);
+ else if (dst_maptype == 2)
--- /dev/null
+From 7c784d624819acbeefb0018bac89e632467cca5a Mon Sep 17 00:00:00 2001
+From: Suraj Jitindar Singh <surajjs@amazon.com>
+Date: Wed, 13 Dec 2023 16:16:35 +1100
+Subject: ext4: allow for the last group to be marked as trimmed
+
+From: Suraj Jitindar Singh <surajjs@amazon.com>
+
+commit 7c784d624819acbeefb0018bac89e632467cca5a upstream.
+
+The ext4 filesystem tracks the trim status of blocks at the group
+level. When an entire group has been trimmed then it is marked as
+such and subsequent trim invocations with the same minimum trim size
+will not be attempted on that group unless it is marked as able to be
+trimmed again such as when a block is freed.
+
+Currently the last group can't be marked as trimmed due to incorrect
+logic in ext4_last_grp_cluster(). ext4_last_grp_cluster() is supposed
+to return the zero based index of the last cluster in a group. This is
+then used by ext4_try_to_trim_range() to determine if the trim
+operation spans the entire group and as such if the trim status of the
+group should be recorded.
+
+ext4_last_grp_cluster() takes a 0 based group index, thus the valid
+values for grp are 0..(ext4_get_groups_count - 1). Any group index
+less than (ext4_get_groups_count - 1) is not the last group and must
+have EXT4_CLUSTERS_PER_GROUP(sb) clusters. For the last group we need
+to calculate the number of clusters based on the number of blocks in
+the group. Finally subtract 1 from the number of clusters as zero
+based indexing is expected. Rearrange the function slightly to make
+it clear what we are calculating and returning.
+
+Reproducer:
+// Create file system where the last group has fewer blocks than
+// blocks per group
+$ mkfs.ext4 -b 4096 -g 8192 /dev/nvme0n1 8191
+$ mount /dev/nvme0n1 /mnt
+
+Before Patch:
+$ fstrim -v /mnt
+/mnt: 25.9 MiB (27156480 bytes) trimmed
+// Group not marked as trimmed so second invocation still discards blocks
+$ fstrim -v /mnt
+/mnt: 25.9 MiB (27156480 bytes) trimmed
+
+After Patch:
+fstrim -v /mnt
+/mnt: 25.9 MiB (27156480 bytes) trimmed
+// Group marked as trimmed so second invocation DOESN'T discard any blocks
+fstrim -v /mnt
+/mnt: 0 B (0 bytes) trimmed
+
+Fixes: 45e4ab320c9b ("ext4: move setting of trimmed bit into ext4_try_to_trim_range()")
+Cc: <stable@vger.kernel.org> # 4.19+
+Signed-off-by: Suraj Jitindar Singh <surajjs@amazon.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Link: https://lore.kernel.org/r/20231213051635.37731-1-surajjs@amazon.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ext4/mballoc.c | 15 ++++++++++-----
+ 1 file changed, 10 insertions(+), 5 deletions(-)
+
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -6887,11 +6887,16 @@ __acquires(bitlock)
+ static ext4_grpblk_t ext4_last_grp_cluster(struct super_block *sb,
+ ext4_group_t grp)
+ {
+- if (grp < ext4_get_groups_count(sb))
+- return EXT4_CLUSTERS_PER_GROUP(sb) - 1;
+- return (ext4_blocks_count(EXT4_SB(sb)->s_es) -
+- ext4_group_first_block_no(sb, grp) - 1) >>
+- EXT4_CLUSTER_BITS(sb);
++ unsigned long nr_clusters_in_group;
++
++ if (grp < (ext4_get_groups_count(sb) - 1))
++ nr_clusters_in_group = EXT4_CLUSTERS_PER_GROUP(sb);
++ else
++ nr_clusters_in_group = (ext4_blocks_count(EXT4_SB(sb)->s_es) -
++ ext4_group_first_block_no(sb, grp))
++ >> EXT4_CLUSTER_BITS(sb);
++
++ return nr_clusters_in_group - 1;
+ }
+
+ static bool ext4_trim_interrupted(void)
--- /dev/null
+From 78aafb3884f6bc6636efcc1760c891c8500b9922 Mon Sep 17 00:00:00 2001
+From: Herbert Xu <herbert@gondor.apana.org.au>
+Date: Sat, 2 Dec 2023 09:01:54 +0800
+Subject: hwrng: core - Fix page fault dead lock on mmap-ed hwrng
+
+From: Herbert Xu <herbert@gondor.apana.org.au>
+
+commit 78aafb3884f6bc6636efcc1760c891c8500b9922 upstream.
+
+There is a dead-lock in the hwrng device read path. This triggers
+when the user reads from /dev/hwrng into memory also mmap-ed from
+/dev/hwrng. The resulting page fault triggers a recursive read
+which then dead-locks.
+
+Fix this by using a stack buffer when calling copy_to_user.
+
+Reported-by: Edward Adam Davis <eadavis@qq.com>
+Reported-by: syzbot+c52ab18308964d248092@syzkaller.appspotmail.com
+Fixes: 9996508b3353 ("hwrng: core - Replace u32 in driver API with byte array")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/char/hw_random/core.c | 34 +++++++++++++++++++++-------------
+ 1 file changed, 21 insertions(+), 13 deletions(-)
+
+--- a/drivers/char/hw_random/core.c
++++ b/drivers/char/hw_random/core.c
+@@ -23,10 +23,13 @@
+ #include <linux/sched.h>
+ #include <linux/sched/signal.h>
+ #include <linux/slab.h>
++#include <linux/string.h>
+ #include <linux/uaccess.h>
+
+ #define RNG_MODULE_NAME "hw_random"
+
++#define RNG_BUFFER_SIZE (SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES)
++
+ static struct hwrng *current_rng;
+ /* the current rng has been explicitly chosen by user via sysfs */
+ static int cur_rng_set_by_user;
+@@ -58,7 +61,7 @@ static inline int rng_get_data(struct hw
+
+ static size_t rng_buffer_size(void)
+ {
+- return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES;
++ return RNG_BUFFER_SIZE;
+ }
+
+ static void add_early_randomness(struct hwrng *rng)
+@@ -209,6 +212,7 @@ static inline int rng_get_data(struct hw
+ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
+ size_t size, loff_t *offp)
+ {
++ u8 buffer[RNG_BUFFER_SIZE];
+ ssize_t ret = 0;
+ int err = 0;
+ int bytes_read, len;
+@@ -236,34 +240,37 @@ static ssize_t rng_dev_read(struct file
+ if (bytes_read < 0) {
+ err = bytes_read;
+ goto out_unlock_reading;
++ } else if (bytes_read == 0 &&
++ (filp->f_flags & O_NONBLOCK)) {
++ err = -EAGAIN;
++ goto out_unlock_reading;
+ }
++
+ data_avail = bytes_read;
+ }
+
+- if (!data_avail) {
+- if (filp->f_flags & O_NONBLOCK) {
+- err = -EAGAIN;
+- goto out_unlock_reading;
+- }
+- } else {
+- len = data_avail;
++ len = data_avail;
++ if (len) {
+ if (len > size)
+ len = size;
+
+ data_avail -= len;
+
+- if (copy_to_user(buf + ret, rng_buffer + data_avail,
+- len)) {
++ memcpy(buffer, rng_buffer + data_avail, len);
++ }
++ mutex_unlock(&reading_mutex);
++ put_rng(rng);
++
++ if (len) {
++ if (copy_to_user(buf + ret, buffer, len)) {
+ err = -EFAULT;
+- goto out_unlock_reading;
++ goto out;
+ }
+
+ size -= len;
+ ret += len;
+ }
+
+- mutex_unlock(&reading_mutex);
+- put_rng(rng);
+
+ if (need_resched())
+ schedule_timeout_interruptible(1);
+@@ -274,6 +281,7 @@ static ssize_t rng_dev_read(struct file
+ }
+ }
+ out:
++ memzero_explicit(buffer, sizeof(buffer));
+ return ret ? : err;
+
+ out_unlock_reading:
--- /dev/null
+From efa5fe19c0a9199f49e36e1f5242ed5c88da617d Mon Sep 17 00:00:00 2001
+From: Bingbu Cao <bingbu.cao@intel.com>
+Date: Wed, 22 Nov 2023 17:46:06 +0800
+Subject: media: imx355: Enable runtime PM before registering async sub-device
+
+From: Bingbu Cao <bingbu.cao@intel.com>
+
+commit efa5fe19c0a9199f49e36e1f5242ed5c88da617d upstream.
+
+As the sensor device maybe accessible right after its async sub-device is
+registered, such as ipu-bridge will try to power up sensor by sensor's
+client device's runtime PM from the async notifier callback, if runtime PM
+is not enabled, it will fail.
+
+So runtime PM should be ready before its async sub-device is registered
+and accessible by others.
+
+Fixes: df0b5c4a7ddd ("media: add imx355 camera sensor driver")
+Cc: stable@vger.kernel.org
+Signed-off-by: Bingbu Cao <bingbu.cao@intel.com>
+Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/media/i2c/imx355.c | 12 +++++++-----
+ 1 file changed, 7 insertions(+), 5 deletions(-)
+
+--- a/drivers/media/i2c/imx355.c
++++ b/drivers/media/i2c/imx355.c
+@@ -1788,10 +1788,6 @@ static int imx355_probe(struct i2c_clien
+ goto error_handler_free;
+ }
+
+- ret = v4l2_async_register_subdev_sensor(&imx355->sd);
+- if (ret < 0)
+- goto error_media_entity;
+-
+ /*
+ * Device is already turned on by i2c-core with ACPI domain PM.
+ * Enable runtime PM and turn off the device.
+@@ -1800,9 +1796,15 @@ static int imx355_probe(struct i2c_clien
+ pm_runtime_enable(&client->dev);
+ pm_runtime_idle(&client->dev);
+
++ ret = v4l2_async_register_subdev_sensor(&imx355->sd);
++ if (ret < 0)
++ goto error_media_entity_runtime_pm;
++
+ return 0;
+
+-error_media_entity:
++error_media_entity_runtime_pm:
++ pm_runtime_disable(&client->dev);
++ pm_runtime_set_suspended(&client->dev);
+ media_entity_cleanup(&imx355->sd.entity);
+
+ error_handler_free:
--- /dev/null
+From 47a78052db51b16e8045524fbf33373b58f1323b Mon Sep 17 00:00:00 2001
+From: Bingbu Cao <bingbu.cao@intel.com>
+Date: Wed, 22 Nov 2023 17:46:07 +0800
+Subject: media: ov01a10: Enable runtime PM before registering async sub-device
+
+From: Bingbu Cao <bingbu.cao@intel.com>
+
+commit 47a78052db51b16e8045524fbf33373b58f1323b upstream.
+
+As the sensor device maybe accessible right after its async sub-device is
+registered, such as ipu-bridge will try to power up sensor by sensor's
+client device's runtime PM from the async notifier callback, if runtime PM
+is not enabled, it will fail.
+
+So runtime PM should be ready before its async sub-device is registered
+and accessible by others.
+
+It also sets the runtime PM status to active as the sensor was turned
+on by i2c-core.
+
+Fixes: 0827b58dabff ("media: i2c: add ov01a10 image sensor driver")
+Cc: stable@vger.kernel.org
+Signed-off-by: Bingbu Cao <bingbu.cao@intel.com>
+Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/media/i2c/ov01a10.c | 18 ++++++++++++++----
+ 1 file changed, 14 insertions(+), 4 deletions(-)
+
+--- a/drivers/media/i2c/ov01a10.c
++++ b/drivers/media/i2c/ov01a10.c
+@@ -907,6 +907,7 @@ static void ov01a10_remove(struct i2c_cl
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+
+ pm_runtime_disable(&client->dev);
++ pm_runtime_set_suspended(&client->dev);
+ }
+
+ static int ov01a10_probe(struct i2c_client *client)
+@@ -953,17 +954,26 @@ static int ov01a10_probe(struct i2c_clie
+ goto err_media_entity_cleanup;
+ }
+
++ /*
++ * Device is already turned on by i2c-core with ACPI domain PM.
++ * Enable runtime PM and turn off the device.
++ */
++ pm_runtime_set_active(&client->dev);
++ pm_runtime_enable(dev);
++ pm_runtime_idle(dev);
++
+ ret = v4l2_async_register_subdev_sensor(&ov01a10->sd);
+ if (ret < 0) {
+ dev_err(dev, "Failed to register subdev: %d\n", ret);
+- goto err_media_entity_cleanup;
++ goto err_pm_disable;
+ }
+
+- pm_runtime_enable(dev);
+- pm_runtime_idle(dev);
+-
+ return 0;
+
++err_pm_disable:
++ pm_runtime_disable(dev);
++ pm_runtime_set_suspended(&client->dev);
++
+ err_media_entity_cleanup:
+ media_entity_cleanup(&ov01a10->sd.entity);
+
--- /dev/null
+From 7b0454cfd8edb3509619407c3b9f78a6d0dee1a5 Mon Sep 17 00:00:00 2001
+From: Bingbu Cao <bingbu.cao@intel.com>
+Date: Wed, 22 Nov 2023 17:46:08 +0800
+Subject: media: ov13b10: Enable runtime PM before registering async sub-device
+
+From: Bingbu Cao <bingbu.cao@intel.com>
+
+commit 7b0454cfd8edb3509619407c3b9f78a6d0dee1a5 upstream.
+
+As the sensor device maybe accessible right after its async sub-device is
+registered, such as ipu-bridge will try to power up sensor by sensor's
+client device's runtime PM from the async notifier callback, if runtime PM
+is not enabled, it will fail.
+
+So runtime PM should be ready before its async sub-device is registered
+and accessible by others.
+
+Fixes: 7ee850546822 ("media: Add sensor driver support for the ov13b10 camera.")
+Cc: stable@vger.kernel.org
+Signed-off-by: Bingbu Cao <bingbu.cao@intel.com>
+Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/media/i2c/ov13b10.c | 14 +++++++++-----
+ 1 file changed, 9 insertions(+), 5 deletions(-)
+
+--- a/drivers/media/i2c/ov13b10.c
++++ b/drivers/media/i2c/ov13b10.c
+@@ -1536,24 +1536,27 @@ static int ov13b10_probe(struct i2c_clie
+ goto error_handler_free;
+ }
+
+- ret = v4l2_async_register_subdev_sensor(&ov13b->sd);
+- if (ret < 0)
+- goto error_media_entity;
+
+ /*
+ * Device is already turned on by i2c-core with ACPI domain PM.
+ * Enable runtime PM and turn off the device.
+ */
+-
+ /* Set the device's state to active if it's in D0 state. */
+ if (full_power)
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+ pm_runtime_idle(&client->dev);
+
++ ret = v4l2_async_register_subdev_sensor(&ov13b->sd);
++ if (ret < 0)
++ goto error_media_entity_runtime_pm;
++
+ return 0;
+
+-error_media_entity:
++error_media_entity_runtime_pm:
++ pm_runtime_disable(&client->dev);
++ if (full_power)
++ pm_runtime_set_suspended(&client->dev);
+ media_entity_cleanup(&ov13b->sd.entity);
+
+ error_handler_free:
+@@ -1576,6 +1579,7 @@ static void ov13b10_remove(struct i2c_cl
+ ov13b10_free_controls(ov13b);
+
+ pm_runtime_disable(&client->dev);
++ pm_runtime_set_suspended(&client->dev);
+ }
+
+ static DEFINE_RUNTIME_DEV_PM_OPS(ov13b10_pm_ops, ov13b10_suspend,
--- /dev/null
+From e242e9c144050ed120cf666642ba96b7c4462a4c Mon Sep 17 00:00:00 2001
+From: Bingbu Cao <bingbu.cao@intel.com>
+Date: Wed, 22 Nov 2023 17:46:09 +0800
+Subject: media: ov9734: Enable runtime PM before registering async sub-device
+
+From: Bingbu Cao <bingbu.cao@intel.com>
+
+commit e242e9c144050ed120cf666642ba96b7c4462a4c upstream.
+
+As the sensor device maybe accessible right after its async sub-device is
+registered, such as ipu-bridge will try to power up sensor by sensor's
+client device's runtime PM from the async notifier callback, if runtime PM
+is not enabled, it will fail.
+
+So runtime PM should be ready before its async sub-device is registered
+and accessible by others.
+
+Fixes: d3f863a63fe4 ("media: i2c: Add ov9734 image sensor driver")
+Cc: stable@vger.kernel.org
+Signed-off-by: Bingbu Cao <bingbu.cao@intel.com>
+Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/media/i2c/ov9734.c | 19 +++++++++++--------
+ 1 file changed, 11 insertions(+), 8 deletions(-)
+
+--- a/drivers/media/i2c/ov9734.c
++++ b/drivers/media/i2c/ov9734.c
+@@ -939,6 +939,7 @@ static void ov9734_remove(struct i2c_cli
+ media_entity_cleanup(&sd->entity);
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+ pm_runtime_disable(&client->dev);
++ pm_runtime_set_suspended(&client->dev);
+ mutex_destroy(&ov9734->mutex);
+ }
+
+@@ -984,13 +985,6 @@ static int ov9734_probe(struct i2c_clien
+ goto probe_error_v4l2_ctrl_handler_free;
+ }
+
+- ret = v4l2_async_register_subdev_sensor(&ov9734->sd);
+- if (ret < 0) {
+- dev_err(&client->dev, "failed to register V4L2 subdev: %d",
+- ret);
+- goto probe_error_media_entity_cleanup;
+- }
+-
+ /*
+ * Device is already turned on by i2c-core with ACPI domain PM.
+ * Enable runtime PM and turn off the device.
+@@ -999,9 +993,18 @@ static int ov9734_probe(struct i2c_clien
+ pm_runtime_enable(&client->dev);
+ pm_runtime_idle(&client->dev);
+
++ ret = v4l2_async_register_subdev_sensor(&ov9734->sd);
++ if (ret < 0) {
++ dev_err(&client->dev, "failed to register V4L2 subdev: %d",
++ ret);
++ goto probe_error_media_entity_cleanup_pm;
++ }
++
+ return 0;
+
+-probe_error_media_entity_cleanup:
++probe_error_media_entity_cleanup_pm:
++ pm_runtime_disable(&client->dev);
++ pm_runtime_set_suspended(&client->dev);
+ media_entity_cleanup(&ov9734->sd.entity);
+
+ probe_error_v4l2_ctrl_handler_free:
--- /dev/null
+From e1a9ae45736989c972a8d1c151bc390678ae6205 Mon Sep 17 00:00:00 2001
+From: Serge Semin <fancer.lancer@gmail.com>
+Date: Sat, 2 Dec 2023 14:14:20 +0300
+Subject: mips: Fix max_mapnr being uninitialized on early stages
+
+From: Serge Semin <fancer.lancer@gmail.com>
+
+commit e1a9ae45736989c972a8d1c151bc390678ae6205 upstream.
+
+max_mapnr variable is utilized in the pfn_valid() method in order to
+determine the upper PFN space boundary. Having it uninitialized
+effectively makes any PFN passed to that method invalid. That in its turn
+causes the kernel mm-subsystem occasion malfunctions even after the
+max_mapnr variable is actually properly updated. For instance,
+pfn_valid() is called in the init_unavailable_range() method in the
+framework of the calls-chain on MIPS:
+setup_arch()
++-> paging_init()
+ +-> free_area_init()
+ +-> memmap_init()
+ +-> memmap_init_zone_range()
+ +-> init_unavailable_range()
+
+Since pfn_valid() always returns "false" value before max_mapnr is
+initialized in the mem_init() method, any flatmem page-holes will be left
+in the poisoned/uninitialized state including the IO-memory pages. Thus
+any further attempts to map/remap the IO-memory by using MMU may fail.
+In particular it happened in my case on attempt to map the SRAM region.
+The kernel bootup procedure just crashed on the unhandled unaligned access
+bug raised in the __update_cache() method:
+
+> Unhandled kernel unaligned access[#1]:
+> CPU: 0 PID: 1 Comm: swapper/0 Not tainted 6.7.0-rc1-XXX-dirty #2056
+> ...
+> Call Trace:
+> [<8011ef9c>] __update_cache+0x88/0x1bc
+> [<80385944>] ioremap_page_range+0x110/0x2a4
+> [<80126948>] ioremap_prot+0x17c/0x1f4
+> [<80711b80>] __devm_ioremap+0x8c/0x120
+> [<80711e0c>] __devm_ioremap_resource+0xf4/0x218
+> [<808bf244>] sram_probe+0x4f4/0x930
+> [<80889d20>] platform_probe+0x68/0xec
+> ...
+
+Let's fix the problem by initializing the max_mapnr variable as soon as
+the required data is available. In particular it can be done right in the
+paging_init() method before free_area_init() is called since all the PFN
+zone boundaries have already been calculated by that time.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Serge Semin <fancer.lancer@gmail.com>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/mips/mm/init.c | 12 +++++-------
+ 1 file changed, 5 insertions(+), 7 deletions(-)
+
+--- a/arch/mips/mm/init.c
++++ b/arch/mips/mm/init.c
+@@ -422,7 +422,12 @@ void __init paging_init(void)
+ (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10));
+ max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn;
+ }
++
++ max_mapnr = highend_pfn ? highend_pfn : max_low_pfn;
++#else
++ max_mapnr = max_low_pfn;
+ #endif
++ high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
+
+ free_area_init(max_zone_pfns);
+ }
+@@ -458,13 +463,6 @@ void __init mem_init(void)
+ */
+ BUILD_BUG_ON(IS_ENABLED(CONFIG_32BIT) && (PFN_PTE_SHIFT > PAGE_SHIFT));
+
+-#ifdef CONFIG_HIGHMEM
+- max_mapnr = highend_pfn ? highend_pfn : max_low_pfn;
+-#else
+- max_mapnr = max_low_pfn;
+-#endif
+- high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
+-
+ maar_init();
+ memblock_free_all();
+ setup_zero_pages(); /* Setup zeroed pages. */
--- /dev/null
+From a7d84a2e7663bbe12394cc771107e04668ea313a Mon Sep 17 00:00:00 2001
+From: Miquel Raynal <miquel.raynal@bootlin.com>
+Date: Tue, 5 Dec 2023 08:59:36 +0100
+Subject: mtd: maps: vmu-flash: Fix the (mtd core) switch to ref counters
+
+From: Miquel Raynal <miquel.raynal@bootlin.com>
+
+commit a7d84a2e7663bbe12394cc771107e04668ea313a upstream.
+
+While switching to ref counters for track mtd devices use, the vmu-flash
+driver was forgotten. The reason for reading the ref counter seems
+debatable, but let's just fix the build for now.
+
+Fixes: 19bfa9ebebb5 ("mtd: use refcount to prevent corruption")
+Reported-by: kernel test robot <lkp@intel.com>
+Closes: https://lore.kernel.org/oe-kbuild-all/202312022315.79twVRZw-lkp@intel.com/
+Cc: stable@vger.kernel.org
+Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
+Link: https://lore.kernel.org/linux-mtd/20231205075936.13831-1-miquel.raynal@bootlin.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mtd/maps/vmu-flash.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/mtd/maps/vmu-flash.c b/drivers/mtd/maps/vmu-flash.c
+index a7ec947a3ebb..53019d313db7 100644
+--- a/drivers/mtd/maps/vmu-flash.c
++++ b/drivers/mtd/maps/vmu-flash.c
+@@ -719,7 +719,7 @@ static int vmu_can_unload(struct maple_device *mdev)
+ card = maple_get_drvdata(mdev);
+ for (x = 0; x < card->partitions; x++) {
+ mtd = &((card->mtd)[x]);
+- if (mtd->usecount > 0)
++ if (kref_read(&mtd->refcnt))
+ return 0;
+ }
+ return 1;
+--
+2.43.0
+
--- /dev/null
+From 828f6df1bcba7f64729166efc7086ea657070445 Mon Sep 17 00:00:00 2001
+From: Miquel Raynal <miquel.raynal@bootlin.com>
+Date: Fri, 15 Dec 2023 13:32:08 +0100
+Subject: mtd: rawnand: Clarify conditions to enable continuous reads
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Miquel Raynal <miquel.raynal@bootlin.com>
+
+commit 828f6df1bcba7f64729166efc7086ea657070445 upstream.
+
+The current logic is probably fine but is a bit convoluted. Plus, we
+don't want partial pages to be part of the sequential operation just in
+case the core would optimize the page read with a subpage read (which
+would break the sequence). This may happen on the first and last page
+only, so if the start offset or the end offset is not aligned with a
+page boundary, better avoid them to prevent any risk.
+
+Cc: stable@vger.kernel.org
+Fixes: 003fe4b9545b ("mtd: rawnand: Support for sequential cache reads")
+Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
+Tested-by: Martin Hundebøll <martin@geanix.com>
+Link: https://lore.kernel.org/linux-mtd/20231215123208.516590-5-miquel.raynal@bootlin.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mtd/nand/raw/nand_base.c | 24 ++++++++++++++++--------
+ 1 file changed, 16 insertions(+), 8 deletions(-)
+
+--- a/drivers/mtd/nand/raw/nand_base.c
++++ b/drivers/mtd/nand/raw/nand_base.c
+@@ -3461,21 +3461,29 @@ static void rawnand_enable_cont_reads(st
+ u32 readlen, int col)
+ {
+ struct mtd_info *mtd = nand_to_mtd(chip);
++ unsigned int end_page, end_col;
++
++ chip->cont_read.ongoing = false;
+
+ if (!chip->controller->supported_op.cont_read)
+ return;
+
+- if ((col && col + readlen < (3 * mtd->writesize)) ||
+- (!col && readlen < (2 * mtd->writesize))) {
+- chip->cont_read.ongoing = false;
++ end_page = DIV_ROUND_UP(col + readlen, mtd->writesize);
++ end_col = (col + readlen) % mtd->writesize;
++
++ if (col)
++ page++;
++
++ if (end_col && end_page)
++ end_page--;
++
++ if (page + 1 > end_page)
+ return;
+- }
+
+- chip->cont_read.ongoing = true;
+ chip->cont_read.first_page = page;
+- if (col)
+- chip->cont_read.first_page++;
+- chip->cont_read.last_page = page + ((readlen >> chip->page_shift) & chip->pagemask);
++ chip->cont_read.last_page = end_page;
++ chip->cont_read.ongoing = true;
++
+ rawnand_cap_cont_reads(chip);
+ }
+
--- /dev/null
+From 7c9414c870c027737d0f2ed7b0ed10f26edb1c61 Mon Sep 17 00:00:00 2001
+From: Miquel Raynal <miquel.raynal@bootlin.com>
+Date: Fri, 15 Dec 2023 13:32:06 +0100
+Subject: mtd: rawnand: Fix core interference with sequential reads
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Miquel Raynal <miquel.raynal@bootlin.com>
+
+commit 7c9414c870c027737d0f2ed7b0ed10f26edb1c61 upstream.
+
+A couple of reports pointed at some strange failures happening a bit
+randomly since the introduction of sequential page reads support. After
+investigation it turned out the most likely reason for these issues was
+the fact that sometimes a (longer) read might happen, starting at the
+same page that was read previously. This is optimized by the raw NAND
+core, by not sending the READ_PAGE command to the NAND device and just
+reading out the data in a local cache. When this page is also flagged as
+being the starting point for a sequential read, it means the page right
+next will be accessed without the right instructions. The NAND chip will
+be confused and will not output correct data. In order to avoid such
+situation from happening anymore, we can however handle this case with a
+bit of additional logic, to postpone the initialization of the read
+sequence by one page.
+
+Reported-by: Alexander Shiyan <eagle.alexander923@gmail.com>
+Closes: https://lore.kernel.org/linux-mtd/CAP1tNvS=NVAm-vfvYWbc3k9Cx9YxMc2uZZkmXk8h1NhGX877Zg@mail.gmail.com/
+Reported-by: Måns Rullgård <mans@mansr.com>
+Closes: https://lore.kernel.org/linux-mtd/yw1xfs6j4k6q.fsf@mansr.com/
+Reported-by: Martin Hundebøll <martin@geanix.com>
+Closes: https://lore.kernel.org/linux-mtd/9d0c42fcde79bfedfe5b05d6a4e9fdef71d3dd52.camel@geanix.com/
+Fixes: 003fe4b9545b ("mtd: rawnand: Support for sequential cache reads")
+Cc: stable@vger.kernel.org
+Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
+Tested-by: Martin Hundebøll <martin@geanix.com>
+Link: https://lore.kernel.org/linux-mtd/20231215123208.516590-3-miquel.raynal@bootlin.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mtd/nand/raw/nand_base.c | 14 ++++++++++++++
+ 1 file changed, 14 insertions(+)
+
+--- a/drivers/mtd/nand/raw/nand_base.c
++++ b/drivers/mtd/nand/raw/nand_base.c
+@@ -3479,6 +3479,18 @@ static void rawnand_enable_cont_reads(st
+ rawnand_cap_cont_reads(chip);
+ }
+
++static void rawnand_cont_read_skip_first_page(struct nand_chip *chip, unsigned int page)
++{
++ if (!chip->cont_read.ongoing || page != chip->cont_read.first_page)
++ return;
++
++ chip->cont_read.first_page++;
++ if (chip->cont_read.first_page == chip->cont_read.pause_page)
++ chip->cont_read.first_page++;
++ if (chip->cont_read.first_page >= chip->cont_read.last_page)
++ chip->cont_read.ongoing = false;
++}
++
+ /**
+ * nand_setup_read_retry - [INTERN] Set the READ RETRY mode
+ * @chip: NAND chip object
+@@ -3653,6 +3665,8 @@ read_retry:
+ buf += bytes;
+ max_bitflips = max_t(unsigned int, max_bitflips,
+ chip->pagecache.bitflips);
++
++ rawnand_cont_read_skip_first_page(chip, page);
+ }
+
+ readlen -= bytes;
--- /dev/null
+From bbcd80f53a5e8c27c2511f539fec8c373f500cf4 Mon Sep 17 00:00:00 2001
+From: Miquel Raynal <miquel.raynal@bootlin.com>
+Date: Fri, 15 Dec 2023 13:32:05 +0100
+Subject: mtd: rawnand: Prevent crossing LUN boundaries during sequential reads
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Miquel Raynal <miquel.raynal@bootlin.com>
+
+commit bbcd80f53a5e8c27c2511f539fec8c373f500cf4 upstream.
+
+The ONFI specification states that devices do not need to support
+sequential reads across LUN boundaries. In order to prevent such event
+from happening and possibly failing, let's introduce the concept of
+"pause" in the sequential read to handle these cases. The first/last
+pages remain the same but any time we cross a LUN boundary we will end
+and restart (if relevant) the sequential read operation.
+
+Cc: stable@vger.kernel.org
+Fixes: 003fe4b9545b ("mtd: rawnand: Support for sequential cache reads")
+Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
+Tested-by: Martin Hundebøll <martin@geanix.com>
+Link: https://lore.kernel.org/linux-mtd/20231215123208.516590-2-miquel.raynal@bootlin.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mtd/nand/raw/nand_base.c | 43 +++++++++++++++++++++++++++++++++------
+ include/linux/mtd/rawnand.h | 2 +
+ 2 files changed, 39 insertions(+), 6 deletions(-)
+
+--- a/drivers/mtd/nand/raw/nand_base.c
++++ b/drivers/mtd/nand/raw/nand_base.c
+@@ -1208,6 +1208,23 @@ static int nand_lp_exec_read_page_op(str
+ return nand_exec_op(chip, &op);
+ }
+
++static void rawnand_cap_cont_reads(struct nand_chip *chip)
++{
++ struct nand_memory_organization *memorg;
++ unsigned int pages_per_lun, first_lun, last_lun;
++
++ memorg = nanddev_get_memorg(&chip->base);
++ pages_per_lun = memorg->pages_per_eraseblock * memorg->eraseblocks_per_lun;
++ first_lun = chip->cont_read.first_page / pages_per_lun;
++ last_lun = chip->cont_read.last_page / pages_per_lun;
++
++ /* Prevent sequential cache reads across LUN boundaries */
++ if (first_lun != last_lun)
++ chip->cont_read.pause_page = first_lun * pages_per_lun + pages_per_lun - 1;
++ else
++ chip->cont_read.pause_page = chip->cont_read.last_page;
++}
++
+ static int nand_lp_exec_cont_read_page_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, void *buf,
+ unsigned int len, bool check_only)
+@@ -1226,7 +1243,7 @@ static int nand_lp_exec_cont_read_page_o
+ NAND_OP_DATA_IN(len, buf, 0),
+ };
+ struct nand_op_instr cont_instrs[] = {
+- NAND_OP_CMD(page == chip->cont_read.last_page ?
++ NAND_OP_CMD(page == chip->cont_read.pause_page ?
+ NAND_CMD_READCACHEEND : NAND_CMD_READCACHESEQ,
+ NAND_COMMON_TIMING_NS(conf, tWB_max)),
+ NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max),
+@@ -1263,16 +1280,29 @@ static int nand_lp_exec_cont_read_page_o
+ }
+
+ if (page == chip->cont_read.first_page)
+- return nand_exec_op(chip, &start_op);
++ ret = nand_exec_op(chip, &start_op);
+ else
+- return nand_exec_op(chip, &cont_op);
++ ret = nand_exec_op(chip, &cont_op);
++ if (ret)
++ return ret;
++
++ if (!chip->cont_read.ongoing)
++ return 0;
++
++ if (page == chip->cont_read.pause_page &&
++ page != chip->cont_read.last_page) {
++ chip->cont_read.first_page = chip->cont_read.pause_page + 1;
++ rawnand_cap_cont_reads(chip);
++ } else if (page == chip->cont_read.last_page) {
++ chip->cont_read.ongoing = false;
++ }
++
++ return 0;
+ }
+
+ static bool rawnand_cont_read_ongoing(struct nand_chip *chip, unsigned int page)
+ {
+- return chip->cont_read.ongoing &&
+- page >= chip->cont_read.first_page &&
+- page <= chip->cont_read.last_page;
++ return chip->cont_read.ongoing && page >= chip->cont_read.first_page;
+ }
+
+ /**
+@@ -3446,6 +3476,7 @@ static void rawnand_enable_cont_reads(st
+ if (col)
+ chip->cont_read.first_page++;
+ chip->cont_read.last_page = page + ((readlen >> chip->page_shift) & chip->pagemask);
++ rawnand_cap_cont_reads(chip);
+ }
+
+ /**
+--- a/include/linux/mtd/rawnand.h
++++ b/include/linux/mtd/rawnand.h
+@@ -1265,6 +1265,7 @@ struct nand_secure_region {
+ * @cont_read: Sequential page read internals
+ * @cont_read.ongoing: Whether a continuous read is ongoing or not
+ * @cont_read.first_page: Start of the continuous read operation
++ * @cont_read.pause_page: End of the current sequential cache read operation
+ * @cont_read.last_page: End of the continuous read operation
+ * @controller: The hardware controller structure which is shared among multiple
+ * independent devices
+@@ -1321,6 +1322,7 @@ struct nand_chip {
+ struct {
+ bool ongoing;
+ unsigned int first_page;
++ unsigned int pause_page;
+ unsigned int last_page;
+ } cont_read;
+
--- /dev/null
+From a62c4597953fe54c6af04166a5e2872efd0e1490 Mon Sep 17 00:00:00 2001
+From: Miquel Raynal <miquel.raynal@bootlin.com>
+Date: Fri, 15 Dec 2023 13:32:07 +0100
+Subject: mtd: rawnand: Prevent sequential reads with on-die ECC engines
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Miquel Raynal <miquel.raynal@bootlin.com>
+
+commit a62c4597953fe54c6af04166a5e2872efd0e1490 upstream.
+
+Some devices support sequential reads when using the on-die ECC engines,
+some others do not. It is a bit hard to know which ones will break other
+than experimentally, so in order to avoid such a difficult and painful
+task, let's just pretend all devices should avoid using this
+optimization when configured like this.
+
+Cc: stable@vger.kernel.org
+Fixes: 003fe4b9545b ("mtd: rawnand: Support for sequential cache reads")
+Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
+Tested-by: Martin Hundebøll <martin@geanix.com>
+Link: https://lore.kernel.org/linux-mtd/20231215123208.516590-4-miquel.raynal@bootlin.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mtd/nand/raw/nand_base.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/drivers/mtd/nand/raw/nand_base.c
++++ b/drivers/mtd/nand/raw/nand_base.c
+@@ -5171,6 +5171,14 @@ static void rawnand_late_check_supported
+ /* The supported_op fields should not be set by individual drivers */
+ WARN_ON_ONCE(chip->controller->supported_op.cont_read);
+
++ /*
++ * Too many devices do not support sequential cached reads with on-die
++ * ECC correction enabled, so in this case refuse to perform the
++ * automation.
++ */
++ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_DIE)
++ return;
++
+ if (!nand_has_exec_op(chip))
+ return;
+
--- /dev/null
+From 78fbb92af27d0982634116c7a31065f24d092826 Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Fri, 12 Jan 2024 13:26:57 +0000
+Subject: nbd: always initialize struct msghdr completely
+
+From: Eric Dumazet <edumazet@google.com>
+
+commit 78fbb92af27d0982634116c7a31065f24d092826 upstream.
+
+syzbot complains that msg->msg_get_inq value can be uninitialized [1]
+
+struct msghdr got many new fields recently, we should always make
+sure their values is zero by default.
+
+[1]
+ BUG: KMSAN: uninit-value in tcp_recvmsg+0x686/0xac0 net/ipv4/tcp.c:2571
+ tcp_recvmsg+0x686/0xac0 net/ipv4/tcp.c:2571
+ inet_recvmsg+0x131/0x580 net/ipv4/af_inet.c:879
+ sock_recvmsg_nosec net/socket.c:1044 [inline]
+ sock_recvmsg+0x12b/0x1e0 net/socket.c:1066
+ __sock_xmit+0x236/0x5c0 drivers/block/nbd.c:538
+ nbd_read_reply drivers/block/nbd.c:732 [inline]
+ recv_work+0x262/0x3100 drivers/block/nbd.c:863
+ process_one_work kernel/workqueue.c:2627 [inline]
+ process_scheduled_works+0x104e/0x1e70 kernel/workqueue.c:2700
+ worker_thread+0xf45/0x1490 kernel/workqueue.c:2781
+ kthread+0x3ed/0x540 kernel/kthread.c:388
+ ret_from_fork+0x66/0x80 arch/x86/kernel/process.c:147
+ ret_from_fork_asm+0x11/0x20 arch/x86/entry/entry_64.S:242
+
+Local variable msg created at:
+ __sock_xmit+0x4c/0x5c0 drivers/block/nbd.c:513
+ nbd_read_reply drivers/block/nbd.c:732 [inline]
+ recv_work+0x262/0x3100 drivers/block/nbd.c:863
+
+CPU: 1 PID: 7465 Comm: kworker/u5:1 Not tainted 6.7.0-rc7-syzkaller-00041-gf016f7547aee #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 11/17/2023
+Workqueue: nbd5-recv recv_work
+
+Fixes: f94fd25cb0aa ("tcp: pass back data left in socket after receive")
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: stable@vger.kernel.org
+Cc: Josef Bacik <josef@toxicpanda.com>
+Cc: Jens Axboe <axboe@kernel.dk>
+Cc: linux-block@vger.kernel.org
+Cc: nbd@other.debian.org
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://lore.kernel.org/r/20240112132657.647112-1-edumazet@google.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/block/nbd.c | 6 +-----
+ 1 file changed, 1 insertion(+), 5 deletions(-)
+
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -510,7 +510,7 @@ static int __sock_xmit(struct nbd_device
+ struct iov_iter *iter, int msg_flags, int *sent)
+ {
+ int result;
+- struct msghdr msg;
++ struct msghdr msg = {} ;
+ unsigned int noreclaim_flag;
+
+ if (unlikely(!sock)) {
+@@ -526,10 +526,6 @@ static int __sock_xmit(struct nbd_device
+ do {
+ sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
+ sock->sk->sk_use_task_frag = false;
+- msg.msg_name = NULL;
+- msg.msg_namelen = 0;
+- msg.msg_control = NULL;
+- msg.msg_controllen = 0;
+ msg.msg_flags = msg_flags | MSG_NOSIGNAL;
+
+ if (send)
--- /dev/null
+From 7269c250db1b89cda72ca419b7bd5e37997309d6 Mon Sep 17 00:00:00 2001
+From: Viresh Kumar <viresh.kumar@linaro.org>
+Date: Fri, 5 Jan 2024 13:55:37 +0530
+Subject: OPP: Pass rounded rate to _set_opp()
+
+From: Viresh Kumar <viresh.kumar@linaro.org>
+
+commit 7269c250db1b89cda72ca419b7bd5e37997309d6 upstream.
+
+The OPP core finds the eventual frequency to set with the help of
+clk_round_rate() and the same was earlier getting passed to _set_opp()
+and that's what would get configured.
+
+The commit 1efae8d2e777 ("OPP: Make dev_pm_opp_set_opp() independent of
+frequency") mistakenly changed that. Fix it.
+
+Fixes: 1efae8d2e777 ("OPP: Make dev_pm_opp_set_opp() independent of frequency")
+Cc: v5.18+ <stable@vger.kernel.org> # v6.0+
+Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/opp/core.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/opp/core.c
++++ b/drivers/opp/core.c
+@@ -1322,12 +1322,12 @@ int dev_pm_opp_set_rate(struct device *d
+ * value of the frequency. In such a case, do not abort but
+ * configure the hardware to the desired frequency forcefully.
+ */
+- forced = opp_table->rate_clk_single != target_freq;
++ forced = opp_table->rate_clk_single != freq;
+ }
+
+- ret = _set_opp(dev, opp_table, opp, &target_freq, forced);
++ ret = _set_opp(dev, opp_table, opp, &freq, forced);
+
+- if (target_freq)
++ if (freq)
+ dev_pm_opp_put(opp);
+
+ put_opp_table:
--- /dev/null
+From 735ae74f73e55c191d48689bd11ff4a06ea0508f Mon Sep 17 00:00:00 2001
+From: Helge Deller <deller@gmx.de>
+Date: Wed, 3 Jan 2024 21:02:16 +0100
+Subject: parisc/firmware: Fix F-extend for PDC addresses
+
+From: Helge Deller <deller@gmx.de>
+
+commit 735ae74f73e55c191d48689bd11ff4a06ea0508f upstream.
+
+When running with narrow firmware (64-bit kernel using a 32-bit
+firmware), extend PDC addresses into the 0xfffffff0.00000000
+region instead of the 0xf0f0f0f0.00000000 region.
+
+This fixes the power button on the C3700 machine in qemu (64-bit CPU
+with 32-bit firmware), and my assumption is that the previous code was
+really never used (because most 64-bit machines have a 64-bit firmware),
+or it just worked on very old machines because they may only decode
+40-bit of virtual addresses.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/parisc/kernel/firmware.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/parisc/kernel/firmware.c
++++ b/arch/parisc/kernel/firmware.c
+@@ -123,10 +123,10 @@ static unsigned long f_extend(unsigned l
+ #ifdef CONFIG_64BIT
+ if(unlikely(parisc_narrow_firmware)) {
+ if((address & 0xff000000) == 0xf0000000)
+- return 0xf0f0f0f000000000UL | (u32)address;
++ return (0xfffffff0UL << 32) | (u32)address;
+
+ if((address & 0xf0000000) == 0xf0000000)
+- return 0xffffffff00000000UL | (u32)address;
++ return (0xffffffffUL << 32) | (u32)address;
+ }
+ #endif
+ return address;
--- /dev/null
+From 6472036581f947109b20664121db1d143e916f0b Mon Sep 17 00:00:00 2001
+From: Helge Deller <deller@gmx.de>
+Date: Wed, 3 Jan 2024 21:17:23 +0100
+Subject: parisc/power: Fix power soft-off button emulation on qemu
+
+From: Helge Deller <deller@gmx.de>
+
+commit 6472036581f947109b20664121db1d143e916f0b upstream.
+
+Make sure to start the kthread to check the power button on qemu as
+well if the power button address was provided.
+This fixes the qemu built-in system_powerdown runtime command.
+
+Fixes: d0c219472980 ("parisc/power: Add power soft-off when running on qemu")
+Signed-off-by: Helge Deller <deller@gmx.de>
+Cc: stable@vger.kernel.org # v6.0+
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/parisc/power.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/parisc/power.c
++++ b/drivers/parisc/power.c
+@@ -238,7 +238,7 @@ static int __init power_init(void)
+ if (running_on_qemu && soft_power_reg)
+ register_sys_off_handler(SYS_OFF_MODE_POWER_OFF, SYS_OFF_PRIO_DEFAULT,
+ qemu_power_off, (void *)soft_power_reg);
+- else
++ if (!running_on_qemu || soft_power_reg)
+ power_task = kthread_run(kpowerswd, (void*)soft_power_reg,
+ KTHREAD_NAME);
+ if (IS_ERR(power_task)) {
--- /dev/null
+From 08e23d05fa6dc4fc13da0ccf09defdd4bbc92ff4 Mon Sep 17 00:00:00 2001
+From: Christian Marangi <ansuelsmth@gmail.com>
+Date: Tue, 24 Oct 2023 20:30:15 +0200
+Subject: PM / devfreq: Fix buffer overflow in trans_stat_show
+
+From: Christian Marangi <ansuelsmth@gmail.com>
+
+commit 08e23d05fa6dc4fc13da0ccf09defdd4bbc92ff4 upstream.
+
+Fix buffer overflow in trans_stat_show().
+
+Convert simple snprintf to the more secure scnprintf with size of
+PAGE_SIZE.
+
+Add condition checking if we are exceeding PAGE_SIZE and exit early from
+loop. Also add at the end a warning that we exceeded PAGE_SIZE and that
+stats is disabled.
+
+Return -EFBIG in the case where we don't have enough space to write the
+full transition table.
+
+Also document in the ABI that this function can return -EFBIG error.
+
+Link: https://lore.kernel.org/all/20231024183016.14648-2-ansuelsmth@gmail.com/
+Cc: stable@vger.kernel.org
+Closes: https://bugzilla.kernel.org/show_bug.cgi?id=218041
+Fixes: e552bbaf5b98 ("PM / devfreq: Add sysfs node for representing frequency transition information.")
+Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
+Signed-off-by: Chanwoo Choi <cw00.choi@samsung.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/ABI/testing/sysfs-class-devfreq | 3 +
+ drivers/devfreq/devfreq.c | 59 +++++++++++++++++---------
+ 2 files changed, 43 insertions(+), 19 deletions(-)
+
+--- a/Documentation/ABI/testing/sysfs-class-devfreq
++++ b/Documentation/ABI/testing/sysfs-class-devfreq
+@@ -52,6 +52,9 @@ Description:
+
+ echo 0 > /sys/class/devfreq/.../trans_stat
+
++ If the transition table is bigger than PAGE_SIZE, reading
++ this will return an -EFBIG error.
++
+ What: /sys/class/devfreq/.../available_frequencies
+ Date: October 2012
+ Contact: Nishanth Menon <nm@ti.com>
+--- a/drivers/devfreq/devfreq.c
++++ b/drivers/devfreq/devfreq.c
+@@ -1688,7 +1688,7 @@ static ssize_t trans_stat_show(struct de
+ struct device_attribute *attr, char *buf)
+ {
+ struct devfreq *df = to_devfreq(dev);
+- ssize_t len;
++ ssize_t len = 0;
+ int i, j;
+ unsigned int max_state;
+
+@@ -1697,7 +1697,7 @@ static ssize_t trans_stat_show(struct de
+ max_state = df->max_state;
+
+ if (max_state == 0)
+- return sprintf(buf, "Not Supported.\n");
++ return scnprintf(buf, PAGE_SIZE, "Not Supported.\n");
+
+ mutex_lock(&df->lock);
+ if (!df->stop_polling &&
+@@ -1707,31 +1707,52 @@ static ssize_t trans_stat_show(struct de
+ }
+ mutex_unlock(&df->lock);
+
+- len = sprintf(buf, " From : To\n");
+- len += sprintf(buf + len, " :");
+- for (i = 0; i < max_state; i++)
+- len += sprintf(buf + len, "%10lu",
+- df->freq_table[i]);
++ len += scnprintf(buf + len, PAGE_SIZE - len, " From : To\n");
++ len += scnprintf(buf + len, PAGE_SIZE - len, " :");
++ for (i = 0; i < max_state; i++) {
++ if (len >= PAGE_SIZE - 1)
++ break;
++ len += scnprintf(buf + len, PAGE_SIZE - len, "%10lu",
++ df->freq_table[i]);
++ }
++ if (len >= PAGE_SIZE - 1)
++ return PAGE_SIZE - 1;
+
+- len += sprintf(buf + len, " time(ms)\n");
++ len += scnprintf(buf + len, PAGE_SIZE - len, " time(ms)\n");
+
+ for (i = 0; i < max_state; i++) {
++ if (len >= PAGE_SIZE - 1)
++ break;
+ if (df->freq_table[i] == df->previous_freq)
+- len += sprintf(buf + len, "*");
++ len += scnprintf(buf + len, PAGE_SIZE - len, "*");
+ else
+- len += sprintf(buf + len, " ");
+-
+- len += sprintf(buf + len, "%10lu:", df->freq_table[i]);
+- for (j = 0; j < max_state; j++)
+- len += sprintf(buf + len, "%10u",
+- df->stats.trans_table[(i * max_state) + j]);
++ len += scnprintf(buf + len, PAGE_SIZE - len, " ");
++ if (len >= PAGE_SIZE - 1)
++ break;
++
++ len += scnprintf(buf + len, PAGE_SIZE - len, "%10lu:",
++ df->freq_table[i]);
++ for (j = 0; j < max_state; j++) {
++ if (len >= PAGE_SIZE - 1)
++ break;
++ len += scnprintf(buf + len, PAGE_SIZE - len, "%10u",
++ df->stats.trans_table[(i * max_state) + j]);
++ }
++ if (len >= PAGE_SIZE - 1)
++ break;
++ len += scnprintf(buf + len, PAGE_SIZE - len, "%10llu\n", (u64)
++ jiffies64_to_msecs(df->stats.time_in_state[i]));
++ }
+
+- len += sprintf(buf + len, "%10llu\n", (u64)
+- jiffies64_to_msecs(df->stats.time_in_state[i]));
++ if (len < PAGE_SIZE - 1)
++ len += scnprintf(buf + len, PAGE_SIZE - len, "Total transition : %u\n",
++ df->stats.total_trans);
++
++ if (len >= PAGE_SIZE - 1) {
++ pr_warn_once("devfreq transition table exceeds PAGE_SIZE. Disabling\n");
++ return -EFBIG;
+ }
+
+- len += sprintf(buf + len, "Total transition : %u\n",
+- df->stats.total_trans);
+ return len;
+ }
+
--- /dev/null
+From 71cd7e80cfde548959952eac7063aeaea1f2e1c6 Mon Sep 17 00:00:00 2001
+From: Hongchen Zhang <zhanghongchen@loongson.cn>
+Date: Thu, 16 Nov 2023 08:56:09 +0800
+Subject: PM: hibernate: Enforce ordering during image compression/decompression
+
+From: Hongchen Zhang <zhanghongchen@loongson.cn>
+
+commit 71cd7e80cfde548959952eac7063aeaea1f2e1c6 upstream.
+
+An S4 (suspend to disk) test on the LoongArch 3A6000 platform sometimes
+fails with the following error messaged in the dmesg log:
+
+ Invalid LZO compressed length
+
+That happens because when compressing/decompressing the image, the
+synchronization between the control thread and the compress/decompress/crc
+thread is based on a relaxed ordering interface, which is unreliable, and the
+following situation may occur:
+
+CPU 0 CPU 1
+save_image_lzo lzo_compress_threadfn
+ atomic_set(&d->stop, 1);
+ atomic_read(&data[thr].stop)
+ data[thr].cmp = data[thr].cmp_len;
+ WRITE data[thr].cmp_len
+
+Then CPU0 gets a stale cmp_len and writes it to disk. During resume from S4,
+wrong cmp_len is loaded.
+
+To maintain data consistency between the two threads, use the acquire/release
+variants of atomic set and read operations.
+
+Fixes: 081a9d043c98 ("PM / Hibernate: Improve performance of LZO/plain hibernation, checksum image")
+Cc: All applicable <stable@vger.kernel.org>
+Signed-off-by: Hongchen Zhang <zhanghongchen@loongson.cn>
+Co-developed-by: Weihao Li <liweihao@loongson.cn>
+Signed-off-by: Weihao Li <liweihao@loongson.cn>
+[ rjw: Subject rewrite and changelog edits ]
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/power/swap.c | 38 +++++++++++++++++++-------------------
+ 1 file changed, 19 insertions(+), 19 deletions(-)
+
+--- a/kernel/power/swap.c
++++ b/kernel/power/swap.c
+@@ -605,11 +605,11 @@ static int crc32_threadfn(void *data)
+ unsigned i;
+
+ while (1) {
+- wait_event(d->go, atomic_read(&d->ready) ||
++ wait_event(d->go, atomic_read_acquire(&d->ready) ||
+ kthread_should_stop());
+ if (kthread_should_stop()) {
+ d->thr = NULL;
+- atomic_set(&d->stop, 1);
++ atomic_set_release(&d->stop, 1);
+ wake_up(&d->done);
+ break;
+ }
+@@ -618,7 +618,7 @@ static int crc32_threadfn(void *data)
+ for (i = 0; i < d->run_threads; i++)
+ *d->crc32 = crc32_le(*d->crc32,
+ d->unc[i], *d->unc_len[i]);
+- atomic_set(&d->stop, 1);
++ atomic_set_release(&d->stop, 1);
+ wake_up(&d->done);
+ }
+ return 0;
+@@ -648,12 +648,12 @@ static int lzo_compress_threadfn(void *d
+ struct cmp_data *d = data;
+
+ while (1) {
+- wait_event(d->go, atomic_read(&d->ready) ||
++ wait_event(d->go, atomic_read_acquire(&d->ready) ||
+ kthread_should_stop());
+ if (kthread_should_stop()) {
+ d->thr = NULL;
+ d->ret = -1;
+- atomic_set(&d->stop, 1);
++ atomic_set_release(&d->stop, 1);
+ wake_up(&d->done);
+ break;
+ }
+@@ -662,7 +662,7 @@ static int lzo_compress_threadfn(void *d
+ d->ret = lzo1x_1_compress(d->unc, d->unc_len,
+ d->cmp + LZO_HEADER, &d->cmp_len,
+ d->wrk);
+- atomic_set(&d->stop, 1);
++ atomic_set_release(&d->stop, 1);
+ wake_up(&d->done);
+ }
+ return 0;
+@@ -797,7 +797,7 @@ static int save_image_lzo(struct swap_ma
+
+ data[thr].unc_len = off;
+
+- atomic_set(&data[thr].ready, 1);
++ atomic_set_release(&data[thr].ready, 1);
+ wake_up(&data[thr].go);
+ }
+
+@@ -805,12 +805,12 @@ static int save_image_lzo(struct swap_ma
+ break;
+
+ crc->run_threads = thr;
+- atomic_set(&crc->ready, 1);
++ atomic_set_release(&crc->ready, 1);
+ wake_up(&crc->go);
+
+ for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
+ wait_event(data[thr].done,
+- atomic_read(&data[thr].stop));
++ atomic_read_acquire(&data[thr].stop));
+ atomic_set(&data[thr].stop, 0);
+
+ ret = data[thr].ret;
+@@ -849,7 +849,7 @@ static int save_image_lzo(struct swap_ma
+ }
+ }
+
+- wait_event(crc->done, atomic_read(&crc->stop));
++ wait_event(crc->done, atomic_read_acquire(&crc->stop));
+ atomic_set(&crc->stop, 0);
+ }
+
+@@ -1131,12 +1131,12 @@ static int lzo_decompress_threadfn(void
+ struct dec_data *d = data;
+
+ while (1) {
+- wait_event(d->go, atomic_read(&d->ready) ||
++ wait_event(d->go, atomic_read_acquire(&d->ready) ||
+ kthread_should_stop());
+ if (kthread_should_stop()) {
+ d->thr = NULL;
+ d->ret = -1;
+- atomic_set(&d->stop, 1);
++ atomic_set_release(&d->stop, 1);
+ wake_up(&d->done);
+ break;
+ }
+@@ -1149,7 +1149,7 @@ static int lzo_decompress_threadfn(void
+ flush_icache_range((unsigned long)d->unc,
+ (unsigned long)d->unc + d->unc_len);
+
+- atomic_set(&d->stop, 1);
++ atomic_set_release(&d->stop, 1);
+ wake_up(&d->done);
+ }
+ return 0;
+@@ -1334,7 +1334,7 @@ static int load_image_lzo(struct swap_ma
+ }
+
+ if (crc->run_threads) {
+- wait_event(crc->done, atomic_read(&crc->stop));
++ wait_event(crc->done, atomic_read_acquire(&crc->stop));
+ atomic_set(&crc->stop, 0);
+ crc->run_threads = 0;
+ }
+@@ -1370,7 +1370,7 @@ static int load_image_lzo(struct swap_ma
+ pg = 0;
+ }
+
+- atomic_set(&data[thr].ready, 1);
++ atomic_set_release(&data[thr].ready, 1);
+ wake_up(&data[thr].go);
+ }
+
+@@ -1389,7 +1389,7 @@ static int load_image_lzo(struct swap_ma
+
+ for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
+ wait_event(data[thr].done,
+- atomic_read(&data[thr].stop));
++ atomic_read_acquire(&data[thr].stop));
+ atomic_set(&data[thr].stop, 0);
+
+ ret = data[thr].ret;
+@@ -1420,7 +1420,7 @@ static int load_image_lzo(struct swap_ma
+ ret = snapshot_write_next(snapshot);
+ if (ret <= 0) {
+ crc->run_threads = thr + 1;
+- atomic_set(&crc->ready, 1);
++ atomic_set_release(&crc->ready, 1);
+ wake_up(&crc->go);
+ goto out_finish;
+ }
+@@ -1428,13 +1428,13 @@ static int load_image_lzo(struct swap_ma
+ }
+
+ crc->run_threads = thr;
+- atomic_set(&crc->ready, 1);
++ atomic_set_release(&crc->ready, 1);
+ wake_up(&crc->go);
+ }
+
+ out_finish:
+ if (crc->run_threads) {
+- wait_event(crc->done, atomic_read(&crc->stop));
++ wait_event(crc->done, atomic_read_acquire(&crc->stop));
+ atomic_set(&crc->stop, 0);
+ }
+ stop = ktime_get();
--- /dev/null
+From 7839d0078e0d5e6cc2fa0b0dfbee71de74f1e557 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+Date: Wed, 27 Dec 2023 21:41:06 +0100
+Subject: PM: sleep: Fix possible deadlocks in core system-wide PM code
+
+From: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+commit 7839d0078e0d5e6cc2fa0b0dfbee71de74f1e557 upstream.
+
+It is reported that in low-memory situations the system-wide resume core
+code deadlocks, because async_schedule_dev() executes its argument
+function synchronously if it cannot allocate memory (and not only in
+that case) and that function attempts to acquire a mutex that is already
+held. Executing the argument function synchronously from within
+dpm_async_fn() may also be problematic for ordering reasons (it may
+cause a consumer device's resume callback to be invoked before a
+requisite supplier device's one, for example).
+
+Address this by changing the code in question to use
+async_schedule_dev_nocall() for scheduling the asynchronous
+execution of device suspend and resume functions and to directly
+run them synchronously if async_schedule_dev_nocall() returns false.
+
+Link: https://lore.kernel.org/linux-pm/ZYvjiqX6EsL15moe@perf/
+Reported-by: Youngmin Nam <youngmin.nam@samsung.com>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Reviewed-by: Stanislaw Gruszka <stanislaw.gruszka@linux.intel.com>
+Tested-by: Youngmin Nam <youngmin.nam@samsung.com>
+Reviewed-by: Ulf Hansson <ulf.hansson@linaro.org>
+Cc: 5.7+ <stable@vger.kernel.org> # 5.7+: 6aa09a5bccd8 async: Split async_schedule_node_domain()
+Cc: 5.7+ <stable@vger.kernel.org> # 5.7+: 7d4b5d7a37bd async: Introduce async_schedule_dev_nocall()
+Cc: 5.7+ <stable@vger.kernel.org> # 5.7+
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/base/power/main.c | 148 +++++++++++++++++++++-------------------------
+ 1 file changed, 68 insertions(+), 80 deletions(-)
+
+--- a/drivers/base/power/main.c
++++ b/drivers/base/power/main.c
+@@ -579,7 +579,7 @@ bool dev_pm_skip_resume(struct device *d
+ }
+
+ /**
+- * device_resume_noirq - Execute a "noirq resume" callback for given device.
++ * __device_resume_noirq - Execute a "noirq resume" callback for given device.
+ * @dev: Device to handle.
+ * @state: PM transition of the system being carried out.
+ * @async: If true, the device is being resumed asynchronously.
+@@ -587,7 +587,7 @@ bool dev_pm_skip_resume(struct device *d
+ * The driver of @dev will not receive interrupts while this function is being
+ * executed.
+ */
+-static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
++static void __device_resume_noirq(struct device *dev, pm_message_t state, bool async)
+ {
+ pm_callback_t callback = NULL;
+ const char *info = NULL;
+@@ -655,7 +655,13 @@ Skip:
+ Out:
+ complete_all(&dev->power.completion);
+ TRACE_RESUME(error);
+- return error;
++
++ if (error) {
++ suspend_stats.failed_resume_noirq++;
++ dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
++ dpm_save_failed_dev(dev_name(dev));
++ pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
++ }
+ }
+
+ static bool is_async(struct device *dev)
+@@ -668,11 +674,15 @@ static bool dpm_async_fn(struct device *
+ {
+ reinit_completion(&dev->power.completion);
+
+- if (is_async(dev)) {
+- get_device(dev);
+- async_schedule_dev(func, dev);
++ if (!is_async(dev))
++ return false;
++
++ get_device(dev);
++
++ if (async_schedule_dev_nocall(func, dev))
+ return true;
+- }
++
++ put_device(dev);
+
+ return false;
+ }
+@@ -680,15 +690,19 @@ static bool dpm_async_fn(struct device *
+ static void async_resume_noirq(void *data, async_cookie_t cookie)
+ {
+ struct device *dev = data;
+- int error;
+-
+- error = device_resume_noirq(dev, pm_transition, true);
+- if (error)
+- pm_dev_err(dev, pm_transition, " async", error);
+
++ __device_resume_noirq(dev, pm_transition, true);
+ put_device(dev);
+ }
+
++static void device_resume_noirq(struct device *dev)
++{
++ if (dpm_async_fn(dev, async_resume_noirq))
++ return;
++
++ __device_resume_noirq(dev, pm_transition, false);
++}
++
+ static void dpm_noirq_resume_devices(pm_message_t state)
+ {
+ struct device *dev;
+@@ -698,14 +712,6 @@ static void dpm_noirq_resume_devices(pm_
+ mutex_lock(&dpm_list_mtx);
+ pm_transition = state;
+
+- /*
+- * Advanced the async threads upfront,
+- * in case the starting of async threads is
+- * delayed by non-async resuming devices.
+- */
+- list_for_each_entry(dev, &dpm_noirq_list, power.entry)
+- dpm_async_fn(dev, async_resume_noirq);
+-
+ while (!list_empty(&dpm_noirq_list)) {
+ dev = to_device(dpm_noirq_list.next);
+ get_device(dev);
+@@ -713,17 +719,7 @@ static void dpm_noirq_resume_devices(pm_
+
+ mutex_unlock(&dpm_list_mtx);
+
+- if (!is_async(dev)) {
+- int error;
+-
+- error = device_resume_noirq(dev, state, false);
+- if (error) {
+- suspend_stats.failed_resume_noirq++;
+- dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
+- dpm_save_failed_dev(dev_name(dev));
+- pm_dev_err(dev, state, " noirq", error);
+- }
+- }
++ device_resume_noirq(dev);
+
+ put_device(dev);
+
+@@ -751,14 +747,14 @@ void dpm_resume_noirq(pm_message_t state
+ }
+
+ /**
+- * device_resume_early - Execute an "early resume" callback for given device.
++ * __device_resume_early - Execute an "early resume" callback for given device.
+ * @dev: Device to handle.
+ * @state: PM transition of the system being carried out.
+ * @async: If true, the device is being resumed asynchronously.
+ *
+ * Runtime PM is disabled for @dev while this function is being executed.
+ */
+-static int device_resume_early(struct device *dev, pm_message_t state, bool async)
++static void __device_resume_early(struct device *dev, pm_message_t state, bool async)
+ {
+ pm_callback_t callback = NULL;
+ const char *info = NULL;
+@@ -811,21 +807,31 @@ Out:
+
+ pm_runtime_enable(dev);
+ complete_all(&dev->power.completion);
+- return error;
++
++ if (error) {
++ suspend_stats.failed_resume_early++;
++ dpm_save_failed_step(SUSPEND_RESUME_EARLY);
++ dpm_save_failed_dev(dev_name(dev));
++ pm_dev_err(dev, state, async ? " async early" : " early", error);
++ }
+ }
+
+ static void async_resume_early(void *data, async_cookie_t cookie)
+ {
+ struct device *dev = data;
+- int error;
+-
+- error = device_resume_early(dev, pm_transition, true);
+- if (error)
+- pm_dev_err(dev, pm_transition, " async", error);
+
++ __device_resume_early(dev, pm_transition, true);
+ put_device(dev);
+ }
+
++static void device_resume_early(struct device *dev)
++{
++ if (dpm_async_fn(dev, async_resume_early))
++ return;
++
++ __device_resume_early(dev, pm_transition, false);
++}
++
+ /**
+ * dpm_resume_early - Execute "early resume" callbacks for all devices.
+ * @state: PM transition of the system being carried out.
+@@ -839,14 +845,6 @@ void dpm_resume_early(pm_message_t state
+ mutex_lock(&dpm_list_mtx);
+ pm_transition = state;
+
+- /*
+- * Advanced the async threads upfront,
+- * in case the starting of async threads is
+- * delayed by non-async resuming devices.
+- */
+- list_for_each_entry(dev, &dpm_late_early_list, power.entry)
+- dpm_async_fn(dev, async_resume_early);
+-
+ while (!list_empty(&dpm_late_early_list)) {
+ dev = to_device(dpm_late_early_list.next);
+ get_device(dev);
+@@ -854,17 +852,7 @@ void dpm_resume_early(pm_message_t state
+
+ mutex_unlock(&dpm_list_mtx);
+
+- if (!is_async(dev)) {
+- int error;
+-
+- error = device_resume_early(dev, state, false);
+- if (error) {
+- suspend_stats.failed_resume_early++;
+- dpm_save_failed_step(SUSPEND_RESUME_EARLY);
+- dpm_save_failed_dev(dev_name(dev));
+- pm_dev_err(dev, state, " early", error);
+- }
+- }
++ device_resume_early(dev);
+
+ put_device(dev);
+
+@@ -888,12 +876,12 @@ void dpm_resume_start(pm_message_t state
+ EXPORT_SYMBOL_GPL(dpm_resume_start);
+
+ /**
+- * device_resume - Execute "resume" callbacks for given device.
++ * __device_resume - Execute "resume" callbacks for given device.
+ * @dev: Device to handle.
+ * @state: PM transition of the system being carried out.
+ * @async: If true, the device is being resumed asynchronously.
+ */
+-static int device_resume(struct device *dev, pm_message_t state, bool async)
++static void __device_resume(struct device *dev, pm_message_t state, bool async)
+ {
+ pm_callback_t callback = NULL;
+ const char *info = NULL;
+@@ -975,20 +963,30 @@ static int device_resume(struct device *
+
+ TRACE_RESUME(error);
+
+- return error;
++ if (error) {
++ suspend_stats.failed_resume++;
++ dpm_save_failed_step(SUSPEND_RESUME);
++ dpm_save_failed_dev(dev_name(dev));
++ pm_dev_err(dev, state, async ? " async" : "", error);
++ }
+ }
+
+ static void async_resume(void *data, async_cookie_t cookie)
+ {
+ struct device *dev = data;
+- int error;
+
+- error = device_resume(dev, pm_transition, true);
+- if (error)
+- pm_dev_err(dev, pm_transition, " async", error);
++ __device_resume(dev, pm_transition, true);
+ put_device(dev);
+ }
+
++static void device_resume(struct device *dev)
++{
++ if (dpm_async_fn(dev, async_resume))
++ return;
++
++ __device_resume(dev, pm_transition, false);
++}
++
+ /**
+ * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
+ * @state: PM transition of the system being carried out.
+@@ -1008,27 +1006,17 @@ void dpm_resume(pm_message_t state)
+ pm_transition = state;
+ async_error = 0;
+
+- list_for_each_entry(dev, &dpm_suspended_list, power.entry)
+- dpm_async_fn(dev, async_resume);
+-
+ while (!list_empty(&dpm_suspended_list)) {
+ dev = to_device(dpm_suspended_list.next);
++
+ get_device(dev);
+- if (!is_async(dev)) {
+- int error;
+
+- mutex_unlock(&dpm_list_mtx);
++ mutex_unlock(&dpm_list_mtx);
+
+- error = device_resume(dev, state, false);
+- if (error) {
+- suspend_stats.failed_resume++;
+- dpm_save_failed_step(SUSPEND_RESUME);
+- dpm_save_failed_dev(dev_name(dev));
+- pm_dev_err(dev, state, "", error);
+- }
++ device_resume(dev);
++
++ mutex_lock(&dpm_list_mtx);
+
+- mutex_lock(&dpm_list_mtx);
+- }
+ if (!list_empty(&dev->power.entry))
+ list_move_tail(&dev->power.entry, &dpm_prepared_list);
+
--- /dev/null
+From 482b718a84f08b6fc84879c3e90cc57dba11c115 Mon Sep 17 00:00:00 2001
+From: Geoff Levand <geoff@infradead.org>
+Date: Sun, 24 Dec 2023 09:52:46 +0900
+Subject: powerpc/ps3_defconfig: Disable PPC64_BIG_ENDIAN_ELF_ABI_V2
+
+From: Geoff Levand <geoff@infradead.org>
+
+commit 482b718a84f08b6fc84879c3e90cc57dba11c115 upstream.
+
+Commit 8c5fa3b5c4df ("powerpc/64: Make ELFv2 the default for big-endian
+builds"), merged in Linux-6.5-rc1 changes the calling ABI in a way
+that is incompatible with the current code for the PS3's LV1 hypervisor
+calls.
+
+This change just adds the line '# CONFIG_PPC64_BIG_ENDIAN_ELF_ABI_V2 is not set'
+to the ps3_defconfig file so that the PPC64_ELF_ABI_V1 is used.
+
+Fixes run time errors like these:
+
+ BUG: Kernel NULL pointer dereference at 0x00000000
+ Faulting instruction address: 0xc000000000047cf0
+ Oops: Kernel access of bad area, sig: 11 [#1]
+ Call Trace:
+ [c0000000023039e0] [c00000000100ebfc] ps3_create_spu+0xc4/0x2b0 (unreliable)
+ [c000000002303ab0] [c00000000100d4c4] create_spu+0xcc/0x3c4
+ [c000000002303b40] [c00000000100eae4] ps3_enumerate_spus+0xa4/0xf8
+
+Fixes: 8c5fa3b5c4df ("powerpc/64: Make ELFv2 the default for big-endian builds")
+Cc: stable@vger.kernel.org # v6.5+
+Signed-off-by: Geoff Levand <geoff@infradead.org>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://msgid.link/df906ac1-5f17-44b9-b0bb-7cd292a0df65@infradead.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/configs/ps3_defconfig | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig
+index 2b175ddf82f0..aa8bb0208bcc 100644
+--- a/arch/powerpc/configs/ps3_defconfig
++++ b/arch/powerpc/configs/ps3_defconfig
+@@ -24,6 +24,7 @@ CONFIG_PS3_VRAM=m
+ CONFIG_PS3_LPM=m
+ # CONFIG_PPC_OF_BOOT_TRAMPOLINE is not set
+ CONFIG_KEXEC=y
++# CONFIG_PPC64_BIG_ENDIAN_ELF_ABI_V2 is not set
+ CONFIG_PPC_4K_PAGES=y
+ CONFIG_SCHED_SMT=y
+ CONFIG_PM=y
+--
+2.43.0
+
--- /dev/null
+From d5362c37e1f8a40096452fc201c30e705750e687 Mon Sep 17 00:00:00 2001
+From: Xiaolei Wang <xiaolei.wang@windriver.com>
+Date: Fri, 15 Dec 2023 10:00:49 +0800
+Subject: rpmsg: virtio: Free driver_override when rpmsg_remove()
+
+From: Xiaolei Wang <xiaolei.wang@windriver.com>
+
+commit d5362c37e1f8a40096452fc201c30e705750e687 upstream.
+
+Free driver_override when rpmsg_remove(), otherwise
+the following memory leak will occur:
+
+unreferenced object 0xffff0000d55d7080 (size 128):
+ comm "kworker/u8:2", pid 56, jiffies 4294893188 (age 214.272s)
+ hex dump (first 32 bytes):
+ 72 70 6d 73 67 5f 6e 73 00 00 00 00 00 00 00 00 rpmsg_ns........
+ 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
+ backtrace:
+ [<000000009c94c9c1>] __kmem_cache_alloc_node+0x1f8/0x320
+ [<000000002300d89b>] __kmalloc_node_track_caller+0x44/0x70
+ [<00000000228a60c3>] kstrndup+0x4c/0x90
+ [<0000000077158695>] driver_set_override+0xd0/0x164
+ [<000000003e9c4ea5>] rpmsg_register_device_override+0x98/0x170
+ [<000000001c0c89a8>] rpmsg_ns_register_device+0x24/0x30
+ [<000000008bbf8fa2>] rpmsg_probe+0x2e0/0x3ec
+ [<00000000e65a68df>] virtio_dev_probe+0x1c0/0x280
+ [<00000000443331cc>] really_probe+0xbc/0x2dc
+ [<00000000391064b1>] __driver_probe_device+0x78/0xe0
+ [<00000000a41c9a5b>] driver_probe_device+0xd8/0x160
+ [<000000009c3bd5df>] __device_attach_driver+0xb8/0x140
+ [<0000000043cd7614>] bus_for_each_drv+0x7c/0xd4
+ [<000000003b929a36>] __device_attach+0x9c/0x19c
+ [<00000000a94e0ba8>] device_initial_probe+0x14/0x20
+ [<000000003c999637>] bus_probe_device+0xa0/0xac
+
+Signed-off-by: Xiaolei Wang <xiaolei.wang@windriver.com>
+Fixes: b0b03b811963 ("rpmsg: Release rpmsg devices in backends")
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20231215020049.78750-1-xiaolei.wang@windriver.com
+Signed-off-by: Mathieu Poirier <mathieu.poirier@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/rpmsg/virtio_rpmsg_bus.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/rpmsg/virtio_rpmsg_bus.c
++++ b/drivers/rpmsg/virtio_rpmsg_bus.c
+@@ -378,6 +378,7 @@ static void virtio_rpmsg_release_device(
+ struct rpmsg_device *rpdev = to_rpmsg_device(dev);
+ struct virtio_rpmsg_channel *vch = to_virtio_rpmsg_channel(rpdev);
+
++ kfree(rpdev->driver_override);
+ kfree(vch);
+ }
+
--- /dev/null
+From 850fb7fa8c684a4c6bf0e4b6978f4ddcc5d43d11 Mon Sep 17 00:00:00 2001
+From: Tony Krowiak <akrowiak@linux.ibm.com>
+Date: Mon, 15 Jan 2024 13:54:31 -0500
+Subject: s390/vfio-ap: always filter entire AP matrix
+
+From: Tony Krowiak <akrowiak@linux.ibm.com>
+
+commit 850fb7fa8c684a4c6bf0e4b6978f4ddcc5d43d11 upstream.
+
+The vfio_ap_mdev_filter_matrix function is called whenever a new adapter or
+domain is assigned to the mdev. The purpose of the function is to update
+the guest's AP configuration by filtering the matrix of adapters and
+domains assigned to the mdev. When an adapter or domain is assigned, only
+the APQNs associated with the APID of the new adapter or APQI of the new
+domain are inspected. If an APQN does not reference a queue device bound to
+the vfio_ap device driver, then it's APID will be filtered from the mdev's
+matrix when updating the guest's AP configuration.
+
+Inspecting only the APID of the new adapter or APQI of the new domain will
+result in passing AP queues through to a guest that are not bound to the
+vfio_ap device driver under certain circumstances. Consider the following:
+
+guest's AP configuration (all also assigned to the mdev's matrix):
+14.0004
+14.0005
+14.0006
+16.0004
+16.0005
+16.0006
+
+unassign domain 4
+unbind queue 16.0005
+assign domain 4
+
+When domain 4 is re-assigned, since only domain 4 will be inspected, the
+APQNs that will be examined will be:
+14.0004
+16.0004
+
+Since both of those APQNs reference queue devices that are bound to the
+vfio_ap device driver, nothing will get filtered from the mdev's matrix
+when updating the guest's AP configuration. Consequently, queue 16.0005
+will get passed through despite not being bound to the driver. This
+violates the linux device model requirement that a guest shall only be
+given access to devices bound to the device driver facilitating their
+pass-through.
+
+To resolve this problem, every adapter and domain assigned to the mdev will
+be inspected when filtering the mdev's matrix.
+
+Signed-off-by: Tony Krowiak <akrowiak@linux.ibm.com>
+Acked-by: Halil Pasic <pasic@linux.ibm.com>
+Fixes: 48cae940c31d ("s390/vfio-ap: refresh guest's APCB by filtering AP resources assigned to mdev")
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20240115185441.31526-2-akrowiak@linux.ibm.com
+Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/s390/crypto/vfio_ap_ops.c | 57 +++++++++++---------------------------
+ 1 file changed, 17 insertions(+), 40 deletions(-)
+
+--- a/drivers/s390/crypto/vfio_ap_ops.c
++++ b/drivers/s390/crypto/vfio_ap_ops.c
+@@ -671,8 +671,7 @@ static bool vfio_ap_mdev_filter_cdoms(st
+ * Return: a boolean value indicating whether the KVM guest's APCB was changed
+ * by the filtering or not.
+ */
+-static bool vfio_ap_mdev_filter_matrix(unsigned long *apm, unsigned long *aqm,
+- struct ap_matrix_mdev *matrix_mdev)
++static bool vfio_ap_mdev_filter_matrix(struct ap_matrix_mdev *matrix_mdev)
+ {
+ unsigned long apid, apqi, apqn;
+ DECLARE_BITMAP(prev_shadow_apm, AP_DEVICES);
+@@ -693,8 +692,8 @@ static bool vfio_ap_mdev_filter_matrix(u
+ bitmap_and(matrix_mdev->shadow_apcb.aqm, matrix_mdev->matrix.aqm,
+ (unsigned long *)matrix_dev->info.aqm, AP_DOMAINS);
+
+- for_each_set_bit_inv(apid, apm, AP_DEVICES) {
+- for_each_set_bit_inv(apqi, aqm, AP_DOMAINS) {
++ for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, AP_DEVICES) {
++ for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, AP_DOMAINS) {
+ /*
+ * If the APQN is not bound to the vfio_ap device
+ * driver, then we can't assign it to the guest's
+@@ -959,7 +958,6 @@ static ssize_t assign_adapter_store(stru
+ {
+ int ret;
+ unsigned long apid;
+- DECLARE_BITMAP(apm_delta, AP_DEVICES);
+ struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
+
+ mutex_lock(&ap_perms_mutex);
+@@ -988,11 +986,8 @@ static ssize_t assign_adapter_store(stru
+ }
+
+ vfio_ap_mdev_link_adapter(matrix_mdev, apid);
+- memset(apm_delta, 0, sizeof(apm_delta));
+- set_bit_inv(apid, apm_delta);
+
+- if (vfio_ap_mdev_filter_matrix(apm_delta,
+- matrix_mdev->matrix.aqm, matrix_mdev))
++ if (vfio_ap_mdev_filter_matrix(matrix_mdev))
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+
+ ret = count;
+@@ -1168,7 +1163,6 @@ static ssize_t assign_domain_store(struc
+ {
+ int ret;
+ unsigned long apqi;
+- DECLARE_BITMAP(aqm_delta, AP_DOMAINS);
+ struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
+
+ mutex_lock(&ap_perms_mutex);
+@@ -1197,11 +1191,8 @@ static ssize_t assign_domain_store(struc
+ }
+
+ vfio_ap_mdev_link_domain(matrix_mdev, apqi);
+- memset(aqm_delta, 0, sizeof(aqm_delta));
+- set_bit_inv(apqi, aqm_delta);
+
+- if (vfio_ap_mdev_filter_matrix(matrix_mdev->matrix.apm, aqm_delta,
+- matrix_mdev))
++ if (vfio_ap_mdev_filter_matrix(matrix_mdev))
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+
+ ret = count;
+@@ -2092,9 +2083,7 @@ int vfio_ap_mdev_probe_queue(struct ap_d
+ if (matrix_mdev) {
+ vfio_ap_mdev_link_queue(matrix_mdev, q);
+
+- if (vfio_ap_mdev_filter_matrix(matrix_mdev->matrix.apm,
+- matrix_mdev->matrix.aqm,
+- matrix_mdev))
++ if (vfio_ap_mdev_filter_matrix(matrix_mdev))
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+ }
+ dev_set_drvdata(&apdev->device, q);
+@@ -2444,34 +2433,22 @@ void vfio_ap_on_cfg_changed(struct ap_co
+
+ static void vfio_ap_mdev_hot_plug_cfg(struct ap_matrix_mdev *matrix_mdev)
+ {
+- bool do_hotplug = false;
+- int filter_domains = 0;
+- int filter_adapters = 0;
+- DECLARE_BITMAP(apm, AP_DEVICES);
+- DECLARE_BITMAP(aqm, AP_DOMAINS);
++ bool filter_domains, filter_adapters, filter_cdoms, do_hotplug = false;
+
+ mutex_lock(&matrix_mdev->kvm->lock);
+ mutex_lock(&matrix_dev->mdevs_lock);
+
+- filter_adapters = bitmap_and(apm, matrix_mdev->matrix.apm,
+- matrix_mdev->apm_add, AP_DEVICES);
+- filter_domains = bitmap_and(aqm, matrix_mdev->matrix.aqm,
+- matrix_mdev->aqm_add, AP_DOMAINS);
+-
+- if (filter_adapters && filter_domains)
+- do_hotplug |= vfio_ap_mdev_filter_matrix(apm, aqm, matrix_mdev);
+- else if (filter_adapters)
+- do_hotplug |=
+- vfio_ap_mdev_filter_matrix(apm,
+- matrix_mdev->shadow_apcb.aqm,
+- matrix_mdev);
+- else
+- do_hotplug |=
+- vfio_ap_mdev_filter_matrix(matrix_mdev->shadow_apcb.apm,
+- aqm, matrix_mdev);
++ filter_adapters = bitmap_intersects(matrix_mdev->matrix.apm,
++ matrix_mdev->apm_add, AP_DEVICES);
++ filter_domains = bitmap_intersects(matrix_mdev->matrix.aqm,
++ matrix_mdev->aqm_add, AP_DOMAINS);
++ filter_cdoms = bitmap_intersects(matrix_mdev->matrix.adm,
++ matrix_mdev->adm_add, AP_DOMAINS);
++
++ if (filter_adapters || filter_domains)
++ do_hotplug = vfio_ap_mdev_filter_matrix(matrix_mdev);
+
+- if (bitmap_intersects(matrix_mdev->matrix.adm, matrix_mdev->adm_add,
+- AP_DOMAINS))
++ if (filter_cdoms)
+ do_hotplug |= vfio_ap_mdev_filter_cdoms(matrix_mdev);
+
+ if (do_hotplug)
--- /dev/null
+From b9bd10c43456d16abd97b717446f51afb3b88411 Mon Sep 17 00:00:00 2001
+From: Tony Krowiak <akrowiak@linux.ibm.com>
+Date: Mon, 15 Jan 2024 13:54:36 -0500
+Subject: s390/vfio-ap: do not reset queue removed from host config
+
+From: Tony Krowiak <akrowiak@linux.ibm.com>
+
+commit b9bd10c43456d16abd97b717446f51afb3b88411 upstream.
+
+When a queue is unbound from the vfio_ap device driver, it is reset to
+ensure its crypto data is not leaked when it is bound to another device
+driver. If the queue is unbound due to the fact that the adapter or domain
+was removed from the host's AP configuration, then attempting to reset it
+will fail with response code 01 (APID not valid) getting returned from the
+reset command. Let's ensure that the queue is assigned to the host's
+configuration before resetting it.
+
+Signed-off-by: Tony Krowiak <akrowiak@linux.ibm.com>
+Reviewed-by: "Jason J. Herne" <jjherne@linux.ibm.com>
+Reviewed-by: Halil Pasic <pasic@linux.ibm.com>
+Fixes: eeb386aeb5b7 ("s390/vfio-ap: handle config changed and scan complete notification")
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20240115185441.31526-7-akrowiak@linux.ibm.com
+Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/s390/crypto/vfio_ap_ops.c | 16 ++++++++++++----
+ 1 file changed, 12 insertions(+), 4 deletions(-)
+
+--- a/drivers/s390/crypto/vfio_ap_ops.c
++++ b/drivers/s390/crypto/vfio_ap_ops.c
+@@ -2198,10 +2198,10 @@ void vfio_ap_mdev_remove_queue(struct ap
+ q = dev_get_drvdata(&apdev->device);
+ get_update_locks_for_queue(q);
+ matrix_mdev = q->matrix_mdev;
++ apid = AP_QID_CARD(q->apqn);
++ apqi = AP_QID_QUEUE(q->apqn);
+
+ if (matrix_mdev) {
+- apid = AP_QID_CARD(q->apqn);
+- apqi = AP_QID_QUEUE(q->apqn);
+ /* If the queue is assigned to the guest's AP configuration */
+ if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) &&
+ test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) {
+@@ -2217,8 +2217,16 @@ void vfio_ap_mdev_remove_queue(struct ap
+ }
+ }
+
+- vfio_ap_mdev_reset_queue(q);
+- flush_work(&q->reset_work);
++ /*
++ * If the queue is not in the host's AP configuration, then resetting
++ * it will fail with response code 01, (APQN not valid); so, let's make
++ * sure it is in the host's config.
++ */
++ if (test_bit_inv(apid, (unsigned long *)matrix_dev->info.apm) &&
++ test_bit_inv(apqi, (unsigned long *)matrix_dev->info.aqm)) {
++ vfio_ap_mdev_reset_queue(q);
++ flush_work(&q->reset_work);
++ }
+
+ done:
+ if (matrix_mdev)
--- /dev/null
+From 774d10196e648e2c0b78da817f631edfb3dfa557 Mon Sep 17 00:00:00 2001
+From: Tony Krowiak <akrowiak@linux.ibm.com>
+Date: Mon, 15 Jan 2024 13:54:33 -0500
+Subject: s390/vfio-ap: let on_scan_complete() callback filter matrix and update guest's APCB
+
+From: Tony Krowiak <akrowiak@linux.ibm.com>
+
+commit 774d10196e648e2c0b78da817f631edfb3dfa557 upstream.
+
+When adapters and/or domains are added to the host's AP configuration, this
+may result in multiple queue devices getting created and probed by the
+vfio_ap device driver. For each queue device probed, the matrix of adapters
+and domains assigned to a matrix mdev will be filtered to update the
+guest's APCB. If any adapters or domains get added to or removed from the
+APCB, the guest's AP configuration will be dynamically updated (i.e., hot
+plug/unplug). To dynamically update the guest's configuration, its VCPUs
+must be taken out of SIE for the period of time it takes to make the
+update. This is disruptive to the guest's operation and if there are many
+queues probed due to a change in the host's AP configuration, this could be
+troublesome. The problem is exacerbated by the fact that the
+'on_scan_complete' callback also filters the mdev's matrix and updates
+the guest's AP configuration.
+
+In order to reduce the potential amount of disruption to the guest that may
+result from a change to the host's AP configuration, let's bypass the
+filtering of the matrix and updating of the guest's AP configuration in the
+probe callback - if due to a host config change - and defer it until the
+'on_scan_complete' callback is invoked after the AP bus finishes its device
+scan operation. This way the filtering and updating will be performed only
+once regardless of the number of queues added.
+
+Signed-off-by: Tony Krowiak <akrowiak@linux.ibm.com>
+Reviewed-by: Halil Pasic <pasic@linux.ibm.com>
+Fixes: 48cae940c31d ("s390/vfio-ap: refresh guest's APCB by filtering AP resources assigned to mdev")
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20240115185441.31526-4-akrowiak@linux.ibm.com
+Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/s390/crypto/vfio_ap_ops.c | 13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+--- a/drivers/s390/crypto/vfio_ap_ops.c
++++ b/drivers/s390/crypto/vfio_ap_ops.c
+@@ -2084,9 +2084,22 @@ int vfio_ap_mdev_probe_queue(struct ap_d
+ if (matrix_mdev) {
+ vfio_ap_mdev_link_queue(matrix_mdev, q);
+
++ /*
++ * If we're in the process of handling the adding of adapters or
++ * domains to the host's AP configuration, then let the
++ * vfio_ap device driver's on_scan_complete callback filter the
++ * matrix and update the guest's AP configuration after all of
++ * the new queue devices are probed.
++ */
++ if (!bitmap_empty(matrix_mdev->apm_add, AP_DEVICES) ||
++ !bitmap_empty(matrix_mdev->aqm_add, AP_DOMAINS))
++ goto done;
++
+ if (vfio_ap_mdev_filter_matrix(matrix_mdev))
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+ }
++
++done:
+ dev_set_drvdata(&apdev->device, q);
+ release_update_locks_for_mdev(matrix_mdev);
+
--- /dev/null
+From 16fb78cbf56e42b8efb2682a4444ab59e32e7959 Mon Sep 17 00:00:00 2001
+From: Tony Krowiak <akrowiak@linux.ibm.com>
+Date: Mon, 15 Jan 2024 13:54:32 -0500
+Subject: s390/vfio-ap: loop over the shadow APCB when filtering guest's AP configuration
+
+From: Tony Krowiak <akrowiak@linux.ibm.com>
+
+commit 16fb78cbf56e42b8efb2682a4444ab59e32e7959 upstream.
+
+While filtering the mdev matrix, it doesn't make sense - and will have
+unexpected results - to filter an APID from the matrix if the APID or one
+of the associated APQIs is not in the host's AP configuration. There are
+two reasons for this:
+
+1. An adapter or domain that is not in the host's AP configuration can be
+ assigned to the matrix; this is known as over-provisioning. Queue
+ devices, however, are only created for adapters and domains in the
+ host's AP configuration, so there will be no queues associated with an
+ over-provisioned adapter or domain to filter.
+
+2. The adapter or domain may have been externally removed from the host's
+ configuration via an SE or HMC attached to a DPM enabled LPAR. In this
+ case, the vfio_ap device driver would have been notified by the AP bus
+ via the on_config_changed callback and the adapter or domain would
+ have already been filtered.
+
+Since the matrix_mdev->shadow_apcb.apm and matrix_mdev->shadow_apcb.aqm are
+copied from the mdev matrix sans the APIDs and APQIs not in the host's AP
+configuration, let's loop over those bitmaps instead of those assigned to
+the matrix.
+
+Signed-off-by: Tony Krowiak <akrowiak@linux.ibm.com>
+Reviewed-by: Halil Pasic <pasic@linux.ibm.com>
+Fixes: 48cae940c31d ("s390/vfio-ap: refresh guest's APCB by filtering AP resources assigned to mdev")
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20240115185441.31526-3-akrowiak@linux.ibm.com
+Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/s390/crypto/vfio_ap_ops.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/drivers/s390/crypto/vfio_ap_ops.c
++++ b/drivers/s390/crypto/vfio_ap_ops.c
+@@ -692,8 +692,9 @@ static bool vfio_ap_mdev_filter_matrix(s
+ bitmap_and(matrix_mdev->shadow_apcb.aqm, matrix_mdev->matrix.aqm,
+ (unsigned long *)matrix_dev->info.aqm, AP_DOMAINS);
+
+- for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, AP_DEVICES) {
+- for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, AP_DOMAINS) {
++ for_each_set_bit_inv(apid, matrix_mdev->shadow_apcb.apm, AP_DEVICES) {
++ for_each_set_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm,
++ AP_DOMAINS) {
+ /*
+ * If the APQN is not bound to the vfio_ap device
+ * driver, then we can't assign it to the guest's
--- /dev/null
+From f009cfa466558b7dfe97f167ba1875d6f9ea4c07 Mon Sep 17 00:00:00 2001
+From: Tony Krowiak <akrowiak@linux.ibm.com>
+Date: Mon, 15 Jan 2024 13:54:35 -0500
+Subject: s390/vfio-ap: reset queues associated with adapter for queue unbound from driver
+
+From: Tony Krowiak <akrowiak@linux.ibm.com>
+
+commit f009cfa466558b7dfe97f167ba1875d6f9ea4c07 upstream.
+
+When a queue is unbound from the vfio_ap device driver, if that queue is
+assigned to a guest's AP configuration, its associated adapter is removed
+because queues are defined to a guest via a matrix of adapters and
+domains; so, it is not possible to remove a single queue.
+
+If an adapter is removed from the guest's AP configuration, all associated
+queues must be reset to prevent leaking crypto data should any of them be
+assigned to a different guest or device driver. The one caveat is that if
+the queue is being removed because the adapter or domain has been removed
+from the host's AP configuration, then an attempt to reset the queue will
+fail with response code 01, AP-queue number not valid; so resetting these
+queues should be skipped.
+
+Acked-by: Halil Pasic <pasic@linux.ibm.com>
+Signed-off-by: Tony Krowiak <akrowiak@linux.ibm.com>
+Fixes: 09d31ff78793 ("s390/vfio-ap: hot plug/unplug of AP devices when probed/removed")
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20240115185441.31526-6-akrowiak@linux.ibm.com
+Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/s390/crypto/vfio_ap_ops.c | 76 ++++++++++++++++++++------------------
+ 1 file changed, 41 insertions(+), 35 deletions(-)
+
+--- a/drivers/s390/crypto/vfio_ap_ops.c
++++ b/drivers/s390/crypto/vfio_ap_ops.c
+@@ -936,45 +936,45 @@ static void vfio_ap_mdev_link_adapter(st
+ AP_MKQID(apid, apqi));
+ }
+
++static void collect_queues_to_reset(struct ap_matrix_mdev *matrix_mdev,
++ unsigned long apid,
++ struct list_head *qlist)
++{
++ struct vfio_ap_queue *q;
++ unsigned long apqi;
++
++ for_each_set_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm, AP_DOMAINS) {
++ q = vfio_ap_mdev_get_queue(matrix_mdev, AP_MKQID(apid, apqi));
++ if (q)
++ list_add_tail(&q->reset_qnode, qlist);
++ }
++}
++
++static void reset_queues_for_apid(struct ap_matrix_mdev *matrix_mdev,
++ unsigned long apid)
++{
++ struct list_head qlist;
++
++ INIT_LIST_HEAD(&qlist);
++ collect_queues_to_reset(matrix_mdev, apid, &qlist);
++ vfio_ap_mdev_reset_qlist(&qlist);
++}
++
+ static int reset_queues_for_apids(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long *apm_reset)
+ {
+- struct vfio_ap_queue *q, *tmpq;
+ struct list_head qlist;
+- unsigned long apid, apqi;
+- int apqn, ret = 0;
++ unsigned long apid;
+
+ if (bitmap_empty(apm_reset, AP_DEVICES))
+ return 0;
+
+ INIT_LIST_HEAD(&qlist);
+
+- for_each_set_bit_inv(apid, apm_reset, AP_DEVICES) {
+- for_each_set_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm,
+- AP_DOMAINS) {
+- /*
+- * If the domain is not in the host's AP configuration,
+- * then resetting it will fail with response code 01
+- * (APQN not valid).
+- */
+- if (!test_bit_inv(apqi,
+- (unsigned long *)matrix_dev->info.aqm))
+- continue;
+-
+- apqn = AP_MKQID(apid, apqi);
+- q = vfio_ap_mdev_get_queue(matrix_mdev, apqn);
++ for_each_set_bit_inv(apid, apm_reset, AP_DEVICES)
++ collect_queues_to_reset(matrix_mdev, apid, &qlist);
+
+- if (q)
+- list_add_tail(&q->reset_qnode, &qlist);
+- }
+- }
+-
+- ret = vfio_ap_mdev_reset_qlist(&qlist);
+-
+- list_for_each_entry_safe(q, tmpq, &qlist, reset_qnode)
+- list_del(&q->reset_qnode);
+-
+- return ret;
++ return vfio_ap_mdev_reset_qlist(&qlist);
+ }
+
+ /**
+@@ -2200,24 +2200,30 @@ void vfio_ap_mdev_remove_queue(struct ap
+ matrix_mdev = q->matrix_mdev;
+
+ if (matrix_mdev) {
+- vfio_ap_unlink_queue_fr_mdev(q);
+-
+ apid = AP_QID_CARD(q->apqn);
+ apqi = AP_QID_QUEUE(q->apqn);
+-
+- /*
+- * If the queue is assigned to the guest's APCB, then remove
+- * the adapter's APID from the APCB and hot it into the guest.
+- */
++ /* If the queue is assigned to the guest's AP configuration */
+ if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) &&
+ test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) {
++ /*
++ * Since the queues are defined via a matrix of adapters
++ * and domains, it is not possible to hot unplug a
++ * single queue; so, let's unplug the adapter.
++ */
+ clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm);
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
++ reset_queues_for_apid(matrix_mdev, apid);
++ goto done;
+ }
+ }
+
+ vfio_ap_mdev_reset_queue(q);
+ flush_work(&q->reset_work);
++
++done:
++ if (matrix_mdev)
++ vfio_ap_unlink_queue_fr_mdev(q);
++
+ dev_set_drvdata(&apdev->device, NULL);
+ kfree(q);
+ release_update_locks_for_mdev(matrix_mdev);
--- /dev/null
+From f848cba767e59f8d5c54984b1d45451aae040d50 Mon Sep 17 00:00:00 2001
+From: Tony Krowiak <akrowiak@linux.ibm.com>
+Date: Mon, 15 Jan 2024 13:54:34 -0500
+Subject: s390/vfio-ap: reset queues filtered from the guest's AP config
+
+From: Tony Krowiak <akrowiak@linux.ibm.com>
+
+commit f848cba767e59f8d5c54984b1d45451aae040d50 upstream.
+
+When filtering the adapters from the configuration profile for a guest to
+create or update a guest's AP configuration, if the APID of an adapter and
+the APQI of a domain identify a queue device that is not bound to the
+vfio_ap device driver, the APID of the adapter will be filtered because an
+individual APQN can not be filtered due to the fact the APQNs are assigned
+to an AP configuration as a matrix of APIDs and APQIs. Consequently, a
+guest will not have access to all of the queues associated with the
+filtered adapter. If the queues are subsequently made available again to
+the guest, they should re-appear in a reset state; so, let's make sure all
+queues associated with an adapter unplugged from the guest are reset.
+
+In order to identify the set of queues that need to be reset, let's allow a
+vfio_ap_queue object to be simultaneously stored in both a hashtable and a
+list: A hashtable used to store all of the queues assigned
+to a matrix mdev; and/or, a list used to store a subset of the queues that
+need to be reset. For example, when an adapter is hot unplugged from a
+guest, all guest queues associated with that adapter must be reset. Since
+that may be a subset of those assigned to the matrix mdev, they can be
+stored in a list that can be passed to the vfio_ap_mdev_reset_queues
+function.
+
+Signed-off-by: Tony Krowiak <akrowiak@linux.ibm.com>
+Acked-by: Halil Pasic <pasic@linux.ibm.com>
+Fixes: 48cae940c31d ("s390/vfio-ap: refresh guest's APCB by filtering AP resources assigned to mdev")
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20240115185441.31526-5-akrowiak@linux.ibm.com
+Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/s390/crypto/vfio_ap_ops.c | 171 +++++++++++++++++++++++++---------
+ drivers/s390/crypto/vfio_ap_private.h | 3
+ 2 files changed, 129 insertions(+), 45 deletions(-)
+
+--- a/drivers/s390/crypto/vfio_ap_ops.c
++++ b/drivers/s390/crypto/vfio_ap_ops.c
+@@ -32,7 +32,8 @@
+
+ #define AP_RESET_INTERVAL 20 /* Reset sleep interval (20ms) */
+
+-static int vfio_ap_mdev_reset_queues(struct ap_queue_table *qtable);
++static int vfio_ap_mdev_reset_queues(struct ap_matrix_mdev *matrix_mdev);
++static int vfio_ap_mdev_reset_qlist(struct list_head *qlist);
+ static struct vfio_ap_queue *vfio_ap_find_queue(int apqn);
+ static const struct vfio_device_ops vfio_ap_matrix_dev_ops;
+ static void vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q);
+@@ -662,16 +663,23 @@ static bool vfio_ap_mdev_filter_cdoms(st
+ * device driver.
+ *
+ * @matrix_mdev: the matrix mdev whose matrix is to be filtered.
++ * @apm_filtered: a 256-bit bitmap for storing the APIDs filtered from the
++ * guest's AP configuration that are still in the host's AP
++ * configuration.
+ *
+ * Note: If an APQN referencing a queue device that is not bound to the vfio_ap
+ * driver, its APID will be filtered from the guest's APCB. The matrix
+ * structure precludes filtering an individual APQN, so its APID will be
+- * filtered.
++ * filtered. Consequently, all queues associated with the adapter that
++ * are in the host's AP configuration must be reset. If queues are
++ * subsequently made available again to the guest, they should re-appear
++ * in a reset state
+ *
+ * Return: a boolean value indicating whether the KVM guest's APCB was changed
+ * by the filtering or not.
+ */
+-static bool vfio_ap_mdev_filter_matrix(struct ap_matrix_mdev *matrix_mdev)
++static bool vfio_ap_mdev_filter_matrix(struct ap_matrix_mdev *matrix_mdev,
++ unsigned long *apm_filtered)
+ {
+ unsigned long apid, apqi, apqn;
+ DECLARE_BITMAP(prev_shadow_apm, AP_DEVICES);
+@@ -681,6 +689,7 @@ static bool vfio_ap_mdev_filter_matrix(s
+ bitmap_copy(prev_shadow_apm, matrix_mdev->shadow_apcb.apm, AP_DEVICES);
+ bitmap_copy(prev_shadow_aqm, matrix_mdev->shadow_apcb.aqm, AP_DOMAINS);
+ vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->shadow_apcb);
++ bitmap_clear(apm_filtered, 0, AP_DEVICES);
+
+ /*
+ * Copy the adapters, domains and control domains to the shadow_apcb
+@@ -706,8 +715,16 @@ static bool vfio_ap_mdev_filter_matrix(s
+ apqn = AP_MKQID(apid, apqi);
+ q = vfio_ap_mdev_get_queue(matrix_mdev, apqn);
+ if (!q || q->reset_status.response_code) {
+- clear_bit_inv(apid,
+- matrix_mdev->shadow_apcb.apm);
++ clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm);
++
++ /*
++ * If the adapter was previously plugged into
++ * the guest, let's let the caller know that
++ * the APID was filtered.
++ */
++ if (test_bit_inv(apid, prev_shadow_apm))
++ set_bit_inv(apid, apm_filtered);
++
+ break;
+ }
+ }
+@@ -809,7 +826,7 @@ static void vfio_ap_mdev_remove(struct m
+
+ mutex_lock(&matrix_dev->guests_lock);
+ mutex_lock(&matrix_dev->mdevs_lock);
+- vfio_ap_mdev_reset_queues(&matrix_mdev->qtable);
++ vfio_ap_mdev_reset_queues(matrix_mdev);
+ vfio_ap_mdev_unlink_fr_queues(matrix_mdev);
+ list_del(&matrix_mdev->node);
+ mutex_unlock(&matrix_dev->mdevs_lock);
+@@ -919,6 +936,47 @@ static void vfio_ap_mdev_link_adapter(st
+ AP_MKQID(apid, apqi));
+ }
+
++static int reset_queues_for_apids(struct ap_matrix_mdev *matrix_mdev,
++ unsigned long *apm_reset)
++{
++ struct vfio_ap_queue *q, *tmpq;
++ struct list_head qlist;
++ unsigned long apid, apqi;
++ int apqn, ret = 0;
++
++ if (bitmap_empty(apm_reset, AP_DEVICES))
++ return 0;
++
++ INIT_LIST_HEAD(&qlist);
++
++ for_each_set_bit_inv(apid, apm_reset, AP_DEVICES) {
++ for_each_set_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm,
++ AP_DOMAINS) {
++ /*
++ * If the domain is not in the host's AP configuration,
++ * then resetting it will fail with response code 01
++ * (APQN not valid).
++ */
++ if (!test_bit_inv(apqi,
++ (unsigned long *)matrix_dev->info.aqm))
++ continue;
++
++ apqn = AP_MKQID(apid, apqi);
++ q = vfio_ap_mdev_get_queue(matrix_mdev, apqn);
++
++ if (q)
++ list_add_tail(&q->reset_qnode, &qlist);
++ }
++ }
++
++ ret = vfio_ap_mdev_reset_qlist(&qlist);
++
++ list_for_each_entry_safe(q, tmpq, &qlist, reset_qnode)
++ list_del(&q->reset_qnode);
++
++ return ret;
++}
++
+ /**
+ * assign_adapter_store - parses the APID from @buf and sets the
+ * corresponding bit in the mediated matrix device's APM
+@@ -959,6 +1017,7 @@ static ssize_t assign_adapter_store(stru
+ {
+ int ret;
+ unsigned long apid;
++ DECLARE_BITMAP(apm_filtered, AP_DEVICES);
+ struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
+
+ mutex_lock(&ap_perms_mutex);
+@@ -988,8 +1047,10 @@ static ssize_t assign_adapter_store(stru
+
+ vfio_ap_mdev_link_adapter(matrix_mdev, apid);
+
+- if (vfio_ap_mdev_filter_matrix(matrix_mdev))
++ if (vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered)) {
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
++ reset_queues_for_apids(matrix_mdev, apm_filtered);
++ }
+
+ ret = count;
+ done:
+@@ -1020,11 +1081,12 @@ static struct vfio_ap_queue
+ * adapter was assigned.
+ * @matrix_mdev: the matrix mediated device to which the adapter was assigned.
+ * @apid: the APID of the unassigned adapter.
+- * @qtable: table for storing queues associated with unassigned adapter.
++ * @qlist: list for storing queues associated with unassigned adapter that
++ * need to be reset.
+ */
+ static void vfio_ap_mdev_unlink_adapter(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long apid,
+- struct ap_queue_table *qtable)
++ struct list_head *qlist)
+ {
+ unsigned long apqi;
+ struct vfio_ap_queue *q;
+@@ -1032,11 +1094,10 @@ static void vfio_ap_mdev_unlink_adapter(
+ for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, AP_DOMAINS) {
+ q = vfio_ap_unlink_apqn_fr_mdev(matrix_mdev, apid, apqi);
+
+- if (q && qtable) {
++ if (q && qlist) {
+ if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) &&
+ test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm))
+- hash_add(qtable->queues, &q->mdev_qnode,
+- q->apqn);
++ list_add_tail(&q->reset_qnode, qlist);
+ }
+ }
+ }
+@@ -1044,26 +1105,23 @@ static void vfio_ap_mdev_unlink_adapter(
+ static void vfio_ap_mdev_hot_unplug_adapter(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long apid)
+ {
+- int loop_cursor;
+- struct vfio_ap_queue *q;
+- struct ap_queue_table *qtable = kzalloc(sizeof(*qtable), GFP_KERNEL);
++ struct vfio_ap_queue *q, *tmpq;
++ struct list_head qlist;
+
+- hash_init(qtable->queues);
+- vfio_ap_mdev_unlink_adapter(matrix_mdev, apid, qtable);
++ INIT_LIST_HEAD(&qlist);
++ vfio_ap_mdev_unlink_adapter(matrix_mdev, apid, &qlist);
+
+ if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm)) {
+ clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm);
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+ }
+
+- vfio_ap_mdev_reset_queues(qtable);
++ vfio_ap_mdev_reset_qlist(&qlist);
+
+- hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode) {
++ list_for_each_entry_safe(q, tmpq, &qlist, reset_qnode) {
+ vfio_ap_unlink_mdev_fr_queue(q);
+- hash_del(&q->mdev_qnode);
++ list_del(&q->reset_qnode);
+ }
+-
+- kfree(qtable);
+ }
+
+ /**
+@@ -1164,6 +1222,7 @@ static ssize_t assign_domain_store(struc
+ {
+ int ret;
+ unsigned long apqi;
++ DECLARE_BITMAP(apm_filtered, AP_DEVICES);
+ struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
+
+ mutex_lock(&ap_perms_mutex);
+@@ -1193,8 +1252,10 @@ static ssize_t assign_domain_store(struc
+
+ vfio_ap_mdev_link_domain(matrix_mdev, apqi);
+
+- if (vfio_ap_mdev_filter_matrix(matrix_mdev))
++ if (vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered)) {
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
++ reset_queues_for_apids(matrix_mdev, apm_filtered);
++ }
+
+ ret = count;
+ done:
+@@ -1207,7 +1268,7 @@ static DEVICE_ATTR_WO(assign_domain);
+
+ static void vfio_ap_mdev_unlink_domain(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long apqi,
+- struct ap_queue_table *qtable)
++ struct list_head *qlist)
+ {
+ unsigned long apid;
+ struct vfio_ap_queue *q;
+@@ -1215,11 +1276,10 @@ static void vfio_ap_mdev_unlink_domain(s
+ for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, AP_DEVICES) {
+ q = vfio_ap_unlink_apqn_fr_mdev(matrix_mdev, apid, apqi);
+
+- if (q && qtable) {
++ if (q && qlist) {
+ if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) &&
+ test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm))
+- hash_add(qtable->queues, &q->mdev_qnode,
+- q->apqn);
++ list_add_tail(&q->reset_qnode, qlist);
+ }
+ }
+ }
+@@ -1227,26 +1287,23 @@ static void vfio_ap_mdev_unlink_domain(s
+ static void vfio_ap_mdev_hot_unplug_domain(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long apqi)
+ {
+- int loop_cursor;
+- struct vfio_ap_queue *q;
+- struct ap_queue_table *qtable = kzalloc(sizeof(*qtable), GFP_KERNEL);
++ struct vfio_ap_queue *q, *tmpq;
++ struct list_head qlist;
+
+- hash_init(qtable->queues);
+- vfio_ap_mdev_unlink_domain(matrix_mdev, apqi, qtable);
++ INIT_LIST_HEAD(&qlist);
++ vfio_ap_mdev_unlink_domain(matrix_mdev, apqi, &qlist);
+
+ if (test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) {
+ clear_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm);
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+ }
+
+- vfio_ap_mdev_reset_queues(qtable);
++ vfio_ap_mdev_reset_qlist(&qlist);
+
+- hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode) {
++ list_for_each_entry_safe(q, tmpq, &qlist, reset_qnode) {
+ vfio_ap_unlink_mdev_fr_queue(q);
+- hash_del(&q->mdev_qnode);
++ list_del(&q->reset_qnode);
+ }
+-
+- kfree(qtable);
+ }
+
+ /**
+@@ -1601,7 +1658,7 @@ static void vfio_ap_mdev_unset_kvm(struc
+ get_update_locks_for_kvm(kvm);
+
+ kvm_arch_crypto_clear_masks(kvm);
+- vfio_ap_mdev_reset_queues(&matrix_mdev->qtable);
++ vfio_ap_mdev_reset_queues(matrix_mdev);
+ kvm_put_kvm(kvm);
+ matrix_mdev->kvm = NULL;
+
+@@ -1737,15 +1794,33 @@ static void vfio_ap_mdev_reset_queue(str
+ }
+ }
+
+-static int vfio_ap_mdev_reset_queues(struct ap_queue_table *qtable)
++static int vfio_ap_mdev_reset_queues(struct ap_matrix_mdev *matrix_mdev)
+ {
+ int ret = 0, loop_cursor;
+ struct vfio_ap_queue *q;
+
+- hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode)
++ hash_for_each(matrix_mdev->qtable.queues, loop_cursor, q, mdev_qnode)
+ vfio_ap_mdev_reset_queue(q);
+
+- hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode) {
++ hash_for_each(matrix_mdev->qtable.queues, loop_cursor, q, mdev_qnode) {
++ flush_work(&q->reset_work);
++
++ if (q->reset_status.response_code)
++ ret = -EIO;
++ }
++
++ return ret;
++}
++
++static int vfio_ap_mdev_reset_qlist(struct list_head *qlist)
++{
++ int ret = 0;
++ struct vfio_ap_queue *q;
++
++ list_for_each_entry(q, qlist, reset_qnode)
++ vfio_ap_mdev_reset_queue(q);
++
++ list_for_each_entry(q, qlist, reset_qnode) {
+ flush_work(&q->reset_work);
+
+ if (q->reset_status.response_code)
+@@ -1931,7 +2006,7 @@ static ssize_t vfio_ap_mdev_ioctl(struct
+ ret = vfio_ap_mdev_get_device_info(arg);
+ break;
+ case VFIO_DEVICE_RESET:
+- ret = vfio_ap_mdev_reset_queues(&matrix_mdev->qtable);
++ ret = vfio_ap_mdev_reset_queues(matrix_mdev);
+ break;
+ case VFIO_DEVICE_GET_IRQ_INFO:
+ ret = vfio_ap_get_irq_info(arg);
+@@ -2063,6 +2138,7 @@ int vfio_ap_mdev_probe_queue(struct ap_d
+ {
+ int ret;
+ struct vfio_ap_queue *q;
++ DECLARE_BITMAP(apm_filtered, AP_DEVICES);
+ struct ap_matrix_mdev *matrix_mdev;
+
+ ret = sysfs_create_group(&apdev->device.kobj, &vfio_queue_attr_group);
+@@ -2095,15 +2171,17 @@ int vfio_ap_mdev_probe_queue(struct ap_d
+ !bitmap_empty(matrix_mdev->aqm_add, AP_DOMAINS))
+ goto done;
+
+- if (vfio_ap_mdev_filter_matrix(matrix_mdev))
++ if (vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered)) {
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
++ reset_queues_for_apids(matrix_mdev, apm_filtered);
++ }
+ }
+
+ done:
+ dev_set_drvdata(&apdev->device, q);
+ release_update_locks_for_mdev(matrix_mdev);
+
+- return 0;
++ return ret;
+
+ err_remove_group:
+ sysfs_remove_group(&apdev->device.kobj, &vfio_queue_attr_group);
+@@ -2447,6 +2525,7 @@ void vfio_ap_on_cfg_changed(struct ap_co
+
+ static void vfio_ap_mdev_hot_plug_cfg(struct ap_matrix_mdev *matrix_mdev)
+ {
++ DECLARE_BITMAP(apm_filtered, AP_DEVICES);
+ bool filter_domains, filter_adapters, filter_cdoms, do_hotplug = false;
+
+ mutex_lock(&matrix_mdev->kvm->lock);
+@@ -2460,7 +2539,7 @@ static void vfio_ap_mdev_hot_plug_cfg(st
+ matrix_mdev->adm_add, AP_DOMAINS);
+
+ if (filter_adapters || filter_domains)
+- do_hotplug = vfio_ap_mdev_filter_matrix(matrix_mdev);
++ do_hotplug = vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered);
+
+ if (filter_cdoms)
+ do_hotplug |= vfio_ap_mdev_filter_cdoms(matrix_mdev);
+@@ -2468,6 +2547,8 @@ static void vfio_ap_mdev_hot_plug_cfg(st
+ if (do_hotplug)
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+
++ reset_queues_for_apids(matrix_mdev, apm_filtered);
++
+ mutex_unlock(&matrix_dev->mdevs_lock);
+ mutex_unlock(&matrix_mdev->kvm->lock);
+ }
+--- a/drivers/s390/crypto/vfio_ap_private.h
++++ b/drivers/s390/crypto/vfio_ap_private.h
+@@ -133,6 +133,8 @@ struct ap_matrix_mdev {
+ * @apqn: the APQN of the AP queue device
+ * @saved_isc: the guest ISC registered with the GIB interface
+ * @mdev_qnode: allows the vfio_ap_queue struct to be added to a hashtable
++ * @reset_qnode: allows the vfio_ap_queue struct to be added to a list of queues
++ * that need to be reset
+ * @reset_status: the status from the last reset of the queue
+ * @reset_work: work to wait for queue reset to complete
+ */
+@@ -143,6 +145,7 @@ struct vfio_ap_queue {
+ #define VFIO_AP_ISC_INVALID 0xff
+ unsigned char saved_isc;
+ struct hlist_node mdev_qnode;
++ struct list_head reset_qnode;
+ struct ap_queue_status reset_status;
+ struct work_struct reset_work;
+ };
--- /dev/null
+From 7b2d039da622daa9ba259ac6f38701d542b237c3 Mon Sep 17 00:00:00 2001
+From: Anthony Krowiak <akrowiak@linux.ibm.com>
+Date: Thu, 9 Nov 2023 11:44:20 -0500
+Subject: s390/vfio-ap: unpin pages on gisc registration failure
+
+From: Anthony Krowiak <akrowiak@linux.ibm.com>
+
+commit 7b2d039da622daa9ba259ac6f38701d542b237c3 upstream.
+
+In the vfio_ap_irq_enable function, after the page containing the
+notification indicator byte (NIB) is pinned, the function attempts
+to register the guest ISC. If registration fails, the function sets the
+status response code and returns without unpinning the page containing
+the NIB. In order to avoid a memory leak, the NIB should be unpinned before
+returning from the vfio_ap_irq_enable function.
+
+Co-developed-by: Janosch Frank <frankja@linux.ibm.com>
+Signed-off-by: Janosch Frank <frankja@linux.ibm.com>
+Signed-off-by: Anthony Krowiak <akrowiak@linux.ibm.com>
+Reviewed-by: Matthew Rosato <mjrosato@linux.ibm.com>
+Fixes: 783f0a3ccd79 ("s390/vfio-ap: add s390dbf logging to the vfio_ap_irq_enable function")
+Cc: <stable@vger.kernel.org>
+Link: https://lore.kernel.org/r/20231109164427.460493-2-akrowiak@linux.ibm.com
+Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/s390/crypto/vfio_ap_ops.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/s390/crypto/vfio_ap_ops.c
++++ b/drivers/s390/crypto/vfio_ap_ops.c
+@@ -457,6 +457,7 @@ static struct ap_queue_status vfio_ap_ir
+ VFIO_AP_DBF_WARN("%s: gisc registration failed: nisc=%d, isc=%d, apqn=%#04x\n",
+ __func__, nisc, isc, q->apqn);
+
++ vfio_unpin_pages(&q->matrix_mdev->vdev, nib, 1);
+ status.response_code = AP_RESPONSE_INVALID_GISA;
+ return status;
+ }
cifs-reconnect-worker-should-take-reference-on-serve.patch
cifs-handle-servers-that-still-advertise-multichanne.patch
cifs-update-iface_last_update-on-each-query-and-upda.patch
+powerpc-ps3_defconfig-disable-ppc64_big_endian_elf_abi_v2.patch
+ext4-allow-for-the-last-group-to-be-marked-as-trimmed.patch
+async-split-async_schedule_node_domain.patch
+async-introduce-async_schedule_dev_nocall.patch
+pm-sleep-fix-possible-deadlocks-in-core-system-wide-pm-code.patch
+arm64-properly-install-vmlinuz.efi.patch
+opp-pass-rounded-rate-to-_set_opp.patch
+btrfs-sysfs-validate-scrub_speed_max-value.patch
+crypto-lib-mpi-fix-unexpected-pointer-access-in-mpi_ec_init.patch
+erofs-fix-lz4-inplace-decompression.patch
+crypto-api-disallow-identical-driver-names.patch
+pm-hibernate-enforce-ordering-during-image-compression-decompression.patch
+hwrng-core-fix-page-fault-dead-lock-on-mmap-ed-hwrng.patch
+crypto-s390-aes-fix-buffer-overread-in-ctr-mode.patch
+s390-vfio-ap-unpin-pages-on-gisc-registration-failure.patch
+pm-devfreq-fix-buffer-overflow-in-trans_stat_show.patch
+mtd-maps-vmu-flash-fix-the-mtd-core-switch-to-ref-counters.patch
+mtd-rawnand-prevent-crossing-lun-boundaries-during-sequential-reads.patch
+mtd-rawnand-fix-core-interference-with-sequential-reads.patch
+mtd-rawnand-prevent-sequential-reads-with-on-die-ecc-engines.patch
+mtd-rawnand-clarify-conditions-to-enable-continuous-reads.patch
+soc-qcom-pmic_glink_altmode-fix-port-sanity-check.patch
+media-imx355-enable-runtime-pm-before-registering-async-sub-device.patch
+rpmsg-virtio-free-driver_override-when-rpmsg_remove.patch
+media-ov9734-enable-runtime-pm-before-registering-async-sub-device.patch
+media-ov13b10-enable-runtime-pm-before-registering-async-sub-device.patch
+media-ov01a10-enable-runtime-pm-before-registering-async-sub-device.patch
+soc-fsl-cpm1-tsa-fix-__iomem-addresses-declaration.patch
+soc-fsl-cpm1-qmc-fix-__iomem-addresses-declaration.patch
+soc-fsl-cpm1-qmc-fix-rx-channel-reset.patch
+s390-vfio-ap-always-filter-entire-ap-matrix.patch
+s390-vfio-ap-loop-over-the-shadow-apcb-when-filtering-guest-s-ap-configuration.patch
+s390-vfio-ap-let-on_scan_complete-callback-filter-matrix-and-update-guest-s-apcb.patch
+s390-vfio-ap-reset-queues-filtered-from-the-guest-s-ap-config.patch
+s390-vfio-ap-reset-queues-associated-with-adapter-for-queue-unbound-from-driver.patch
+s390-vfio-ap-do-not-reset-queue-removed-from-host-config.patch
+nbd-always-initialize-struct-msghdr-completely.patch
+mips-fix-max_mapnr-being-uninitialized-on-early-stages.patch
+bus-mhi-host-add-alignment-check-for-event-ring-read-pointer.patch
+bus-mhi-host-drop-chan-lock-before-queuing-buffers.patch
+bus-mhi-host-add-spinlock-to-protect-wp-access-when-queueing-tres.patch
+parisc-firmware-fix-f-extend-for-pdc-addresses.patch
+parisc-power-fix-power-soft-off-button-emulation-on-qemu.patch
+arm-dts-imx6q-apalis-add-can-power-up-delay-on-ixora-board.patch
+arm64-dts-qcom-sc8280xp-crd-fix-edp-phy-compatible.patch
+arm-dts-qcom-sdx55-fix-usb-wakeup-interrupt-types.patch
+arm-dts-samsung-exynos4210-i9100-unconditionally-enable-ldo12.patch
+arm-dts-qcom-sdx55-fix-pdc-interrupt-cells.patch
+arm64-dts-sprd-fix-the-cpu-node-for-ums512.patch
+arm64-dts-rockchip-configure-eth-pad-driver-strength-for-orangepi-r1-plus-lts.patch
+arm64-dts-rockchip-fix-rk3588-usb-power-domain-clocks.patch
+arm64-dts-qcom-msm8916-make-blsp_dma-controlled-remotely.patch
+arm64-dts-qcom-msm8939-make-blsp_dma-controlled-remotely.patch
+arm64-dts-qcom-sc7180-fix-usb-wakeup-interrupt-types.patch
+arm64-dts-qcom-sdm845-fix-usb-wakeup-interrupt-types.patch
+arm64-dts-qcom-sdm670-fix-usb-wakeup-interrupt-types.patch
+arm64-dts-qcom-sm8150-fix-usb-wakeup-interrupt-types.patch
+arm64-dts-qcom-sc8180x-fix-usb-wakeup-interrupt-types.patch
+arm64-dts-qcom-sc7280-fix-usb_1-wakeup-interrupt-types.patch
+arm64-dts-qcom-add-missing-vio-supply-for-aw2013.patch
--- /dev/null
+From a5ec3a21220da06bdda2e686012ca64fdb6c513d Mon Sep 17 00:00:00 2001
+From: Herve Codina <herve.codina@bootlin.com>
+Date: Tue, 5 Dec 2023 16:20:59 +0100
+Subject: soc: fsl: cpm1: qmc: Fix __iomem addresses declaration
+
+From: Herve Codina <herve.codina@bootlin.com>
+
+commit a5ec3a21220da06bdda2e686012ca64fdb6c513d upstream.
+
+Running sparse (make C=1) on qmc.c raises a lot of warning such as:
+ ...
+ warning: incorrect type in assignment (different address spaces)
+ expected struct cpm_buf_desc [usertype] *[noderef] __iomem bd
+ got struct cpm_buf_desc [noderef] [usertype] __iomem *txbd_free
+ ...
+
+Indeed, some variable were declared 'type *__iomem var' instead of
+'type __iomem *var'.
+
+Use the correct declaration to remove these warnings.
+
+Fixes: 3178d58e0b97 ("soc: fsl: cpm1: Add support for QMC")
+Cc: stable@vger.kernel.org
+Signed-off-by: Herve Codina <herve.codina@bootlin.com>
+Reviewed-by: Christophe Leroy <christophe.leroy@csgroup.eu>
+Link: https://lore.kernel.org/r/20231205152116.122512-3-herve.codina@bootlin.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/soc/fsl/qe/qmc.c | 34 +++++++++++++++++-----------------
+ 1 file changed, 17 insertions(+), 17 deletions(-)
+
+diff --git a/drivers/soc/fsl/qe/qmc.c b/drivers/soc/fsl/qe/qmc.c
+index 92ec76c03965..3f3de1351c96 100644
+--- a/drivers/soc/fsl/qe/qmc.c
++++ b/drivers/soc/fsl/qe/qmc.c
+@@ -175,7 +175,7 @@ struct qmc_chan {
+ struct list_head list;
+ unsigned int id;
+ struct qmc *qmc;
+- void *__iomem s_param;
++ void __iomem *s_param;
+ enum qmc_mode mode;
+ u64 tx_ts_mask;
+ u64 rx_ts_mask;
+@@ -203,9 +203,9 @@ struct qmc_chan {
+ struct qmc {
+ struct device *dev;
+ struct tsa_serial *tsa_serial;
+- void *__iomem scc_regs;
+- void *__iomem scc_pram;
+- void *__iomem dpram;
++ void __iomem *scc_regs;
++ void __iomem *scc_pram;
++ void __iomem *dpram;
+ u16 scc_pram_offset;
+ cbd_t __iomem *bd_table;
+ dma_addr_t bd_dma_addr;
+@@ -218,37 +218,37 @@ struct qmc {
+ struct qmc_chan *chans[64];
+ };
+
+-static inline void qmc_write16(void *__iomem addr, u16 val)
++static inline void qmc_write16(void __iomem *addr, u16 val)
+ {
+ iowrite16be(val, addr);
+ }
+
+-static inline u16 qmc_read16(void *__iomem addr)
++static inline u16 qmc_read16(void __iomem *addr)
+ {
+ return ioread16be(addr);
+ }
+
+-static inline void qmc_setbits16(void *__iomem addr, u16 set)
++static inline void qmc_setbits16(void __iomem *addr, u16 set)
+ {
+ qmc_write16(addr, qmc_read16(addr) | set);
+ }
+
+-static inline void qmc_clrbits16(void *__iomem addr, u16 clr)
++static inline void qmc_clrbits16(void __iomem *addr, u16 clr)
+ {
+ qmc_write16(addr, qmc_read16(addr) & ~clr);
+ }
+
+-static inline void qmc_write32(void *__iomem addr, u32 val)
++static inline void qmc_write32(void __iomem *addr, u32 val)
+ {
+ iowrite32be(val, addr);
+ }
+
+-static inline u32 qmc_read32(void *__iomem addr)
++static inline u32 qmc_read32(void __iomem *addr)
+ {
+ return ioread32be(addr);
+ }
+
+-static inline void qmc_setbits32(void *__iomem addr, u32 set)
++static inline void qmc_setbits32(void __iomem *addr, u32 set)
+ {
+ qmc_write32(addr, qmc_read32(addr) | set);
+ }
+@@ -318,7 +318,7 @@ int qmc_chan_write_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length,
+ {
+ struct qmc_xfer_desc *xfer_desc;
+ unsigned long flags;
+- cbd_t *__iomem bd;
++ cbd_t __iomem *bd;
+ u16 ctrl;
+ int ret;
+
+@@ -374,7 +374,7 @@ static void qmc_chan_write_done(struct qmc_chan *chan)
+ void (*complete)(void *context);
+ unsigned long flags;
+ void *context;
+- cbd_t *__iomem bd;
++ cbd_t __iomem *bd;
+ u16 ctrl;
+
+ /*
+@@ -425,7 +425,7 @@ int qmc_chan_read_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length,
+ {
+ struct qmc_xfer_desc *xfer_desc;
+ unsigned long flags;
+- cbd_t *__iomem bd;
++ cbd_t __iomem *bd;
+ u16 ctrl;
+ int ret;
+
+@@ -488,7 +488,7 @@ static void qmc_chan_read_done(struct qmc_chan *chan)
+ void (*complete)(void *context, size_t size);
+ struct qmc_xfer_desc *xfer_desc;
+ unsigned long flags;
+- cbd_t *__iomem bd;
++ cbd_t __iomem *bd;
+ void *context;
+ u16 datalen;
+ u16 ctrl;
+@@ -663,7 +663,7 @@ static void qmc_chan_reset_rx(struct qmc_chan *chan)
+ {
+ struct qmc_xfer_desc *xfer_desc;
+ unsigned long flags;
+- cbd_t *__iomem bd;
++ cbd_t __iomem *bd;
+ u16 ctrl;
+
+ spin_lock_irqsave(&chan->rx_lock, flags);
+@@ -694,7 +694,7 @@ static void qmc_chan_reset_tx(struct qmc_chan *chan)
+ {
+ struct qmc_xfer_desc *xfer_desc;
+ unsigned long flags;
+- cbd_t *__iomem bd;
++ cbd_t __iomem *bd;
+ u16 ctrl;
+
+ spin_lock_irqsave(&chan->tx_lock, flags);
+--
+2.43.0
+
--- /dev/null
+From dfe66d012af2ddfa566cf9c860b8472b412fb7e4 Mon Sep 17 00:00:00 2001
+From: Herve Codina <herve.codina@bootlin.com>
+Date: Tue, 5 Dec 2023 16:21:00 +0100
+Subject: soc: fsl: cpm1: qmc: Fix rx channel reset
+
+From: Herve Codina <herve.codina@bootlin.com>
+
+commit dfe66d012af2ddfa566cf9c860b8472b412fb7e4 upstream.
+
+The qmc_chan_reset_rx() set the is_rx_stopped flag. This leads to an
+inconsistent state in the following sequence.
+ qmc_chan_stop()
+ qmc_chan_reset()
+Indeed, after the qmc_chan_reset() call, the channel must still be
+stopped. Only a qmc_chan_start() call can move the channel from stopped
+state to started state.
+
+Fix the issue removing the is_rx_stopped flag setting from
+qmc_chan_reset()
+
+Fixes: 3178d58e0b97 ("soc: fsl: cpm1: Add support for QMC")
+Cc: stable@vger.kernel.org
+Signed-off-by: Herve Codina <herve.codina@bootlin.com>
+Reviewed-by: Christophe Leroy <christophe.leroy@csgroup.eu>
+Link: https://lore.kernel.org/r/20231205152116.122512-4-herve.codina@bootlin.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/soc/fsl/qe/qmc.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/drivers/soc/fsl/qe/qmc.c b/drivers/soc/fsl/qe/qmc.c
+index 3f3de1351c96..2312152a44b3 100644
+--- a/drivers/soc/fsl/qe/qmc.c
++++ b/drivers/soc/fsl/qe/qmc.c
+@@ -685,7 +685,6 @@ static void qmc_chan_reset_rx(struct qmc_chan *chan)
+ qmc_read16(chan->s_param + QMC_SPE_RBASE));
+
+ chan->rx_pending = 0;
+- chan->is_rx_stopped = false;
+
+ spin_unlock_irqrestore(&chan->rx_lock, flags);
+ }
+--
+2.43.0
+
--- /dev/null
+From fc0c64154e5ddeb6f63c954735bd646ce5b8d9a4 Mon Sep 17 00:00:00 2001
+From: Herve Codina <herve.codina@bootlin.com>
+Date: Tue, 5 Dec 2023 16:20:58 +0100
+Subject: soc: fsl: cpm1: tsa: Fix __iomem addresses declaration
+
+From: Herve Codina <herve.codina@bootlin.com>
+
+commit fc0c64154e5ddeb6f63c954735bd646ce5b8d9a4 upstream.
+
+Running sparse (make C=1) on tsa.c raises a lot of warning such as:
+ --- 8< ---
+ warning: incorrect type in assignment (different address spaces)
+ expected void *[noderef] si_regs
+ got void [noderef] __iomem *
+ --- 8< ---
+
+Indeed, some variable were declared 'type *__iomem var' instead of
+'type __iomem *var'.
+
+Use the correct declaration to remove these warnings.
+
+Fixes: 1d4ba0b81c1c ("soc: fsl: cpm1: Add support for TSA")
+Cc: stable@vger.kernel.org
+Reported-by: kernel test robot <lkp@intel.com>
+Closes: https://lore.kernel.org/oe-kbuild-all/202312051959.9YdRIYbg-lkp@intel.com/
+Signed-off-by: Herve Codina <herve.codina@bootlin.com>
+Reviewed-by: Christophe Leroy <christophe.leroy@csgroup.eu>
+Link: https://lore.kernel.org/r/20231205152116.122512-2-herve.codina@bootlin.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/soc/fsl/qe/tsa.c | 22 +++++++++++-----------
+ 1 file changed, 11 insertions(+), 11 deletions(-)
+
+--- a/drivers/soc/fsl/qe/tsa.c
++++ b/drivers/soc/fsl/qe/tsa.c
+@@ -98,9 +98,9 @@
+ #define TSA_SIRP 0x10
+
+ struct tsa_entries_area {
+- void *__iomem entries_start;
+- void *__iomem entries_next;
+- void *__iomem last_entry;
++ void __iomem *entries_start;
++ void __iomem *entries_next;
++ void __iomem *last_entry;
+ };
+
+ struct tsa_tdm {
+@@ -117,8 +117,8 @@ struct tsa_tdm {
+
+ struct tsa {
+ struct device *dev;
+- void *__iomem si_regs;
+- void *__iomem si_ram;
++ void __iomem *si_regs;
++ void __iomem *si_ram;
+ resource_size_t si_ram_sz;
+ spinlock_t lock;
+ int tdms; /* TSA_TDMx ORed */
+@@ -135,27 +135,27 @@ static inline struct tsa *tsa_serial_get
+ return container_of(tsa_serial, struct tsa, serials[tsa_serial->id]);
+ }
+
+-static inline void tsa_write32(void *__iomem addr, u32 val)
++static inline void tsa_write32(void __iomem *addr, u32 val)
+ {
+ iowrite32be(val, addr);
+ }
+
+-static inline void tsa_write8(void *__iomem addr, u32 val)
++static inline void tsa_write8(void __iomem *addr, u32 val)
+ {
+ iowrite8(val, addr);
+ }
+
+-static inline u32 tsa_read32(void *__iomem addr)
++static inline u32 tsa_read32(void __iomem *addr)
+ {
+ return ioread32be(addr);
+ }
+
+-static inline void tsa_clrbits32(void *__iomem addr, u32 clr)
++static inline void tsa_clrbits32(void __iomem *addr, u32 clr)
+ {
+ tsa_write32(addr, tsa_read32(addr) & ~clr);
+ }
+
+-static inline void tsa_clrsetbits32(void *__iomem addr, u32 clr, u32 set)
++static inline void tsa_clrsetbits32(void __iomem *addr, u32 clr, u32 set)
+ {
+ tsa_write32(addr, (tsa_read32(addr) & ~clr) | set);
+ }
+@@ -313,7 +313,7 @@ static u32 tsa_serial_id2csel(struct tsa
+ static int tsa_add_entry(struct tsa *tsa, struct tsa_entries_area *area,
+ u32 count, u32 serial_id)
+ {
+- void *__iomem addr;
++ void __iomem *addr;
+ u32 left;
+ u32 val;
+ u32 cnt;
--- /dev/null
+From c4fb7d2eac9ff9bfc35a2e4d40c7169a332416e0 Mon Sep 17 00:00:00 2001
+From: Johan Hovold <johan+linaro@kernel.org>
+Date: Thu, 9 Nov 2023 10:31:00 +0100
+Subject: soc: qcom: pmic_glink_altmode: fix port sanity check
+
+From: Johan Hovold <johan+linaro@kernel.org>
+
+commit c4fb7d2eac9ff9bfc35a2e4d40c7169a332416e0 upstream.
+
+The PMIC GLINK altmode driver currently supports at most two ports.
+
+Fix the incomplete port sanity check on notifications to avoid
+accessing and corrupting memory beyond the port array if we ever get a
+notification for an unsupported port.
+
+Fixes: 080b4e24852b ("soc: qcom: pmic_glink: Introduce altmode support")
+Cc: stable@vger.kernel.org # 6.3
+Signed-off-by: Johan Hovold <johan+linaro@kernel.org>
+Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+Reviewed-by: Konrad Dybcio <konrad.dybcio@linaro.org>
+Link: https://lore.kernel.org/r/20231109093100.19971-1-johan+linaro@kernel.org
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/soc/qcom/pmic_glink_altmode.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/soc/qcom/pmic_glink_altmode.c
++++ b/drivers/soc/qcom/pmic_glink_altmode.c
+@@ -285,7 +285,7 @@ static void pmic_glink_altmode_sc8180xp_
+
+ svid = mux == 2 ? USB_TYPEC_DP_SID : 0;
+
+- if (!altmode->ports[port].altmode) {
++ if (port >= ARRAY_SIZE(altmode->ports) || !altmode->ports[port].altmode) {
+ dev_dbg(altmode->dev, "notification on undefined port %d\n", port);
+ return;
+ }
+@@ -328,7 +328,7 @@ static void pmic_glink_altmode_sc8280xp_
+ hpd_state = FIELD_GET(SC8280XP_HPD_STATE_MASK, notify->payload[8]);
+ hpd_irq = FIELD_GET(SC8280XP_HPD_IRQ_MASK, notify->payload[8]);
+
+- if (!altmode->ports[port].altmode) {
++ if (port >= ARRAY_SIZE(altmode->ports) || !altmode->ports[port].altmode) {
+ dev_dbg(altmode->dev, "notification on undefined port %d\n", port);
+ return;
+ }