--- /dev/null
+From fe232bc08b39ac0d5b2d5fb2fba9a99b45058525 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 14 Jun 2024 19:31:49 +0530
+Subject: ACPICA: Revert "ACPICA: avoid Info: mapping multiple BARs. Your
+ kernel is fine."
+
+From: Raju Rangoju <Raju.Rangoju@amd.com>
+
+[ Upstream commit a83e1385b780d41307433ddbc86e3c528db031f0 ]
+
+Undo the modifications made in commit d410ee5109a1 ("ACPICA: avoid
+"Info: mapping multiple BARs. Your kernel is fine.""). The initial
+purpose of this commit was to stop memory mappings for operation
+regions from overlapping page boundaries, as it can trigger warnings
+if different page attributes are present.
+
+However, it was found that when this situation arises, mapping
+continues until the boundary's end, but there is still an attempt to
+read/write the entire length of the map, leading to a NULL pointer
+deference. For example, if a four-byte mapping request is made but
+only one byte is mapped because it hits the current page boundary's
+end, a four-byte read/write attempt is still made, resulting in a NULL
+pointer deference.
+
+Instead, map the entire length, as the ACPI specification does not
+mandate that it must be within the same page boundary. It is
+permissible for it to be mapped across different regions.
+
+Link: https://github.com/acpica/acpica/pull/954
+Closes: https://bugzilla.kernel.org/show_bug.cgi?id=218849
+Fixes: d410ee5109a1 ("ACPICA: avoid "Info: mapping multiple BARs. Your kernel is fine."")
+Co-developed-by: Sanath S <Sanath.S@amd.com>
+Signed-off-by: Sanath S <Sanath.S@amd.com>
+Signed-off-by: Raju Rangoju <Raju.Rangoju@amd.com>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/acpi/acpica/exregion.c | 23 ++---------------------
+ 1 file changed, 2 insertions(+), 21 deletions(-)
+
+diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
+index 8907b8bf42672..c49b9f8de723d 100644
+--- a/drivers/acpi/acpica/exregion.c
++++ b/drivers/acpi/acpica/exregion.c
+@@ -44,7 +44,6 @@ acpi_ex_system_memory_space_handler(u32 function,
+ struct acpi_mem_mapping *mm = mem_info->cur_mm;
+ u32 length;
+ acpi_size map_length;
+- acpi_size page_boundary_map_length;
+ #ifdef ACPI_MISALIGNMENT_NOT_SUPPORTED
+ u32 remainder;
+ #endif
+@@ -138,26 +137,8 @@ acpi_ex_system_memory_space_handler(u32 function,
+ map_length = (acpi_size)
+ ((mem_info->address + mem_info->length) - address);
+
+- /*
+- * If mapping the entire remaining portion of the region will cross
+- * a page boundary, just map up to the page boundary, do not cross.
+- * On some systems, crossing a page boundary while mapping regions
+- * can cause warnings if the pages have different attributes
+- * due to resource management.
+- *
+- * This has the added benefit of constraining a single mapping to
+- * one page, which is similar to the original code that used a 4k
+- * maximum window.
+- */
+- page_boundary_map_length = (acpi_size)
+- (ACPI_ROUND_UP(address, ACPI_DEFAULT_PAGE_SIZE) - address);
+- if (page_boundary_map_length == 0) {
+- page_boundary_map_length = ACPI_DEFAULT_PAGE_SIZE;
+- }
+-
+- if (map_length > page_boundary_map_length) {
+- map_length = page_boundary_map_length;
+- }
++ if (map_length > ACPI_DEFAULT_PAGE_SIZE)
++ map_length = ACPI_DEFAULT_PAGE_SIZE;
+
+ /* Create a new mapping starting at the address given */
+
+--
+2.43.0
+
--- /dev/null
+From 57ccaddd93adf662beaa3131627e10ef42f5a3c7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 3 Jun 2024 16:00:45 +0200
+Subject: arm64: dts: freescale: imx8mm-verdin: enable hysteresis on slow input
+ pin
+
+From: Max Krummenacher <max.krummenacher@toradex.com>
+
+[ Upstream commit 67cc6125fb39902169707cb6277f010e56d4a40a ]
+
+SODIMM 17 can be used as an edge triggered interrupt supplied from an
+off board source.
+
+Enable hysteresis on the pinmuxing to increase immunity against noise
+on the signal.
+
+Fixes: 60f01b5b5c7d ("arm64: dts: imx8mm-verdin: update iomux configuration")
+Signed-off-by: Max Krummenacher <max.krummenacher@toradex.com>
+Signed-off-by: Shawn Guo <shawnguo@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
+index 6f0811587142d..14d20a33af8e1 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
+@@ -929,7 +929,7 @@
+ /* Verdin GPIO_9_DSI (pulled-up as active-low) */
+ pinctrl_gpio_9_dsi: gpio9dsigrp {
+ fsl,pins =
+- <MX8MM_IOMUXC_NAND_RE_B_GPIO3_IO15 0x146>; /* SODIMM 17 */
++ <MX8MM_IOMUXC_NAND_RE_B_GPIO3_IO15 0x1c6>; /* SODIMM 17 */
+ };
+
+ /* Verdin GPIO_10_DSI (pulled-up as active-low) */
+--
+2.43.0
+
--- /dev/null
+From 2f23580775ba6c9d4dcfc47e55be377075faca21 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 22 May 2024 14:38:28 -0700
+Subject: arm64: dts: freescale: imx8mp-venice-gw73xx-2x: fix BT shutdown GPIO
+
+From: Tim Harvey <tharvey@gateworks.com>
+
+[ Upstream commit e1b4622efbe7ad09c9a902365a993f68c270c453 ]
+
+Fix the invalid BT shutdown GPIO (gpio1_io3 not gpio4_io16)
+
+Fixes: 716ced308234 ("arm64: dts: freescale: Add imx8mp-venice-gw73xx-2x")
+Signed-off-by: Tim Harvey <tharvey@gateworks.com>
+Signed-off-by: Shawn Guo <shawnguo@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/boot/dts/freescale/imx8mp-venice-gw73xx.dtsi | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw73xx.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw73xx.dtsi
+index 68c62def4c06e..d27bfba1b4b8c 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw73xx.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw73xx.dtsi
+@@ -161,7 +161,7 @@
+
+ bluetooth {
+ compatible = "brcm,bcm4330-bt";
+- shutdown-gpios = <&gpio4 16 GPIO_ACTIVE_HIGH>;
++ shutdown-gpios = <&gpio1 3 GPIO_ACTIVE_HIGH>;
+ };
+ };
+
+--
+2.43.0
+
--- /dev/null
+From 2458f72546cb5468a6902e688a122f2007d7156a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 15 Jun 2024 16:00:43 +0800
+Subject: arm64: dts: imx8mp: Fix TC9595 input clock on DH i.MX8M Plus DHCOM
+ SoM
+
+From: Marek Vasut <marex@denx.de>
+
+[ Upstream commit c03984d43a9dd9282da54ccf275419f666029452 ]
+
+The IMX8MP_CLK_CLKOUT2 supplies the TC9595 bridge with 13 MHz reference
+clock. The IMX8MP_CLK_CLKOUT2 is supplied from IMX8MP_AUDIO_PLL2_OUT.
+The IMX8MP_CLK_CLKOUT2 operates only as a power-of-two divider, and the
+current 156 MHz is not power-of-two divisible to achieve 13 MHz.
+
+To achieve 13 MHz output from IMX8MP_CLK_CLKOUT2, set IMX8MP_AUDIO_PLL2_OUT
+to 208 MHz, because 208 MHz / 16 = 13 MHz.
+
+Fixes: 20d0b83e712b ("arm64: dts: imx8mp: Add TC9595 bridge on DH electronics i.MX8M Plus DHCOM")
+Signed-off-by: Marek Vasut <marex@denx.de>
+Signed-off-by: Shawn Guo <shawnguo@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/boot/dts/freescale/imx8mp-dhcom-som.dtsi | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/arm64/boot/dts/freescale/imx8mp-dhcom-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-dhcom-som.dtsi
+index eacf1da674778..eae39c1cb9856 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mp-dhcom-som.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mp-dhcom-som.dtsi
+@@ -251,7 +251,7 @@
+ <&clk IMX8MP_CLK_CLKOUT2>,
+ <&clk IMX8MP_AUDIO_PLL2_OUT>;
+ assigned-clock-parents = <&clk IMX8MP_AUDIO_PLL2_OUT>;
+- assigned-clock-rates = <13000000>, <13000000>, <156000000>;
++ assigned-clock-rates = <13000000>, <13000000>, <208000000>;
+ reset-gpios = <&gpio4 1 GPIO_ACTIVE_HIGH>;
+ status = "disabled";
+
+--
+2.43.0
+
--- /dev/null
+From 3f568ac4f25db902e8ec2c98e37fb5fd896026b6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 25 Feb 2024 04:33:42 +0100
+Subject: arm64: dts: imx8mp: Fix TC9595 reset GPIO on DH i.MX8M Plus DHCOM SoM
+
+From: Marek Vasut <marex@denx.de>
+
+[ Upstream commit 418a7fc5397719c4b8f50eaeca6694879f89a6ec ]
+
+The TC9595 reset GPIO is SAI1_RXC / GPIO4_IO01, fix the DT accordingly.
+The SAI5_RXD0 / GPIO3_IO21 is thus far unused TC9595 interrupt line.
+
+Fixes: 20d0b83e712b ("arm64: dts: imx8mp: Add TC9595 bridge on DH electronics i.MX8M Plus DHCOM")
+Signed-off-by: Marek Vasut <marex@denx.de>
+Signed-off-by: Shawn Guo <shawnguo@kernel.org>
+Stable-dep-of: c03984d43a9d ("arm64: dts: imx8mp: Fix TC9595 input clock on DH i.MX8M Plus DHCOM SoM")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/boot/dts/freescale/imx8mp-dhcom-som.dtsi | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/arm64/boot/dts/freescale/imx8mp-dhcom-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-dhcom-som.dtsi
+index cb1953d14aa90..eacf1da674778 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mp-dhcom-som.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mp-dhcom-som.dtsi
+@@ -252,7 +252,7 @@
+ <&clk IMX8MP_AUDIO_PLL2_OUT>;
+ assigned-clock-parents = <&clk IMX8MP_AUDIO_PLL2_OUT>;
+ assigned-clock-rates = <13000000>, <13000000>, <156000000>;
+- reset-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>;
++ reset-gpios = <&gpio4 1 GPIO_ACTIVE_HIGH>;
+ status = "disabled";
+
+ ports {
+--
+2.43.0
+
--- /dev/null
+From dbb87cc7a1591e5eb507b4d489ae0fd8e87004d5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 29 May 2024 00:48:54 -0300
+Subject: arm64: dts: imx93-11x11-evk: Remove the 'no-sdio' property
+
+From: Fabio Estevam <festevam@gmail.com>
+
+[ Upstream commit a5d400b6439ac734a5c0dbb641e26a38736abc17 ]
+
+The usdhc2 port is connected to the microSD slot. The presence of the
+'no-sdio' property prevents Wifi SDIO cards, such as CMP9010-X-EVB [1]
+to be detected.
+
+Remove the 'no-sdio' property so that SDIO cards could also work.
+
+[1] https://www.nxp.com/products/wireless-connectivity/wi-fi-plus-bluetooth-plus-802-15-4/cmp9010-x-evb-iw416-usd-interface-evaluation-board:CMP9010-X-EVB
+
+Fixes: e37907bd8294 ("arm64: dts: freescale: add i.MX93 11x11 EVK basic support")
+Signed-off-by: Fabio Estevam <festevam@gmail.com>
+Signed-off-by: Shawn Guo <shawnguo@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts b/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts
+index cafd39130eb88..a06ca740f540c 100644
+--- a/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts
++++ b/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts
+@@ -168,7 +168,6 @@
+ vmmc-supply = <®_usdhc2_vmmc>;
+ bus-width = <4>;
+ status = "okay";
+- no-sdio;
+ no-mmc;
+ };
+
+--
+2.43.0
+
--- /dev/null
+From a32e395b0e4fc5a5dc113353114a7ffcc04e9980 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 28 May 2024 13:54:22 +0200
+Subject: dmaengine: fsl-edma: avoid linking both modules
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+[ Upstream commit fa555b5026d0bf1ba7c9e645ff75e2725a982631 ]
+
+Kbuild does not support having a source file compiled multiple times
+and linked into distinct modules, or built-in and modular at the
+same time. For fs-edma, there are two common components that are
+linked into the fsl-edma.ko for Arm and PowerPC, plus the mcf-edma.ko
+module on Coldfire. This violates the rule for compile-testing:
+
+scripts/Makefile.build:236: drivers/dma/Makefile: fsl-edma-common.o is added to multiple modules: fsl-edma mcf-edma
+scripts/Makefile.build:236: drivers/dma/Makefile: fsl-edma-trace.o is added to multiple modules: fsl-edma mcf-edma
+
+I tried splitting out the common parts into a separate modules, but
+that adds back the complexity that a cleanup patch removed, and it
+gets harder with the addition of the tracepoints.
+
+As a minimal workaround, address it at the Kconfig level, by disallowing
+the broken configurations.
+
+Link: https://lore.kernel.org/lkml/20240110232255.1099757-1-arnd@kernel.org/
+Fixes: 66aac8ea0a6c ("dmaengine: fsl-edma: clean up EXPORT_SYMBOL_GPL in fsl-edma-common.c")
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Acked-by: Peng Fan <peng.fan@nxp.com>
+Link: https://lore.kernel.org/r/20240528115440.2965975-1-arnd@kernel.org
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/Kconfig | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
+index 7a618f629e86b..e36506471a4f6 100644
+--- a/drivers/dma/Kconfig
++++ b/drivers/dma/Kconfig
+@@ -380,7 +380,7 @@ config LPC18XX_DMAMUX
+
+ config MCF_EDMA
+ tristate "Freescale eDMA engine support, ColdFire mcf5441x SoCs"
+- depends on M5441x || COMPILE_TEST
++ depends on M5441x || (COMPILE_TEST && FSL_EDMA=n)
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+--
+2.43.0
+
--- /dev/null
+From 63312a0f9ec430075f20f938b1ae050b25b6fc28 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 3 Jun 2024 09:24:44 +0800
+Subject: dmaengine: idxd: Fix possible Use-After-Free in irq_process_work_list
+
+From: Li RongQing <lirongqing@baidu.com>
+
+[ Upstream commit e3215deca4520773cd2b155bed164c12365149a7 ]
+
+Use list_for_each_entry_safe() to allow iterating through the list and
+deleting the entry in the iteration process. The descriptor is freed via
+idxd_desc_complete() and there's a slight chance may cause issue for
+the list iterator when the descriptor is reused by another thread
+without it being deleted from the list.
+
+Fixes: 16e19e11228b ("dmaengine: idxd: Fix list corruption in description completion")
+Signed-off-by: Li RongQing <lirongqing@baidu.com>
+Reviewed-by: Dave Jiang <dave.jiang@intel.com>
+Reviewed-by: Fenghua Yu <fenghua.yu@intel.com>
+Link: https://lore.kernel.org/r/20240603012444.11902-1-lirongqing@baidu.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/idxd/irq.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
+index b2ca9c1f194c9..7efc85b5bad9e 100644
+--- a/drivers/dma/idxd/irq.c
++++ b/drivers/dma/idxd/irq.c
+@@ -611,11 +611,13 @@ static void irq_process_work_list(struct idxd_irq_entry *irq_entry)
+
+ spin_unlock(&irq_entry->list_lock);
+
+- list_for_each_entry(desc, &flist, list) {
++ list_for_each_entry_safe(desc, n, &flist, list) {
+ /*
+ * Check against the original status as ABORT is software defined
+ * and 0xff, which DSA_COMP_STATUS_MASK can mask out.
+ */
++ list_del(&desc->list);
++
+ if (unlikely(desc->completion->status == IDXD_COMP_DESC_ABORT)) {
+ idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT, true);
+ continue;
+--
+2.43.0
+
--- /dev/null
+From a552cd6fce4553f872dd19fb93794a461cb8a5b8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 28 May 2024 09:09:24 +0300
+Subject: dmaengine: ioatdma: Fix error path in ioat3_dma_probe()
+
+From: Nikita Shubin <n.shubin@yadro.com>
+
+[ Upstream commit f0dc9fda2e0ee9e01496c2f5aca3a831131fad79 ]
+
+Make sure we are disabling interrupts and destroying DMA pool if
+pcie_capability_read/write_word() call failed.
+
+Fixes: 511deae0261c ("dmaengine: ioatdma: disable relaxed ordering for ioatdma")
+Signed-off-by: Nikita Shubin <n.shubin@yadro.com>
+Reviewed-by: Dave Jiang <dave.jiang@intel.com>
+Link: https://lore.kernel.org/r/20240528-ioatdma-fixes-v2-2-a9f2fbe26ab1@yadro.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/ioat/init.c | 33 +++++++++++++++------------------
+ 1 file changed, 15 insertions(+), 18 deletions(-)
+
+diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
+index e76e507ae898c..26964b7c8cf14 100644
+--- a/drivers/dma/ioat/init.c
++++ b/drivers/dma/ioat/init.c
+@@ -534,18 +534,6 @@ static int ioat_probe(struct ioatdma_device *ioat_dma)
+ return err;
+ }
+
+-static int ioat_register(struct ioatdma_device *ioat_dma)
+-{
+- int err = dma_async_device_register(&ioat_dma->dma_dev);
+-
+- if (err) {
+- ioat_disable_interrupts(ioat_dma);
+- dma_pool_destroy(ioat_dma->completion_pool);
+- }
+-
+- return err;
+-}
+-
+ static void ioat_dma_remove(struct ioatdma_device *ioat_dma)
+ {
+ struct dma_device *dma = &ioat_dma->dma_dev;
+@@ -1181,9 +1169,9 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
+ ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
+ }
+
+- err = ioat_register(ioat_dma);
++ err = dma_async_device_register(&ioat_dma->dma_dev);
+ if (err)
+- return err;
++ goto err_disable_interrupts;
+
+ ioat_kobject_add(ioat_dma, &ioat_ktype);
+
+@@ -1192,20 +1180,29 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
+
+ /* disable relaxed ordering */
+ err = pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &val16);
+- if (err)
+- return pcibios_err_to_errno(err);
++ if (err) {
++ err = pcibios_err_to_errno(err);
++ goto err_disable_interrupts;
++ }
+
+ /* clear relaxed ordering enable */
+ val16 &= ~PCI_EXP_DEVCTL_RELAX_EN;
+ err = pcie_capability_write_word(pdev, PCI_EXP_DEVCTL, val16);
+- if (err)
+- return pcibios_err_to_errno(err);
++ if (err) {
++ err = pcibios_err_to_errno(err);
++ goto err_disable_interrupts;
++ }
+
+ if (ioat_dma->cap & IOAT_CAP_DPS)
+ writeb(ioat_pending_level + 1,
+ ioat_dma->reg_base + IOAT_PREFETCH_LIMIT_OFFSET);
+
+ return 0;
++
++err_disable_interrupts:
++ ioat_disable_interrupts(ioat_dma);
++ dma_pool_destroy(ioat_dma->completion_pool);
++ return err;
+ }
+
+ static void ioat_shutdown(struct pci_dev *pdev)
+--
+2.43.0
+
--- /dev/null
+From 64c0561bbfae6a6b9124fac28b3a59c43eaba4fd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 28 May 2024 09:09:25 +0300
+Subject: dmaengine: ioatdma: Fix kmemleak in ioat_pci_probe()
+
+From: Nikita Shubin <n.shubin@yadro.com>
+
+[ Upstream commit 29b7cd255f3628e0d65be33a939d8b5bba10aa62 ]
+
+If probing fails we end up with leaking ioatdma_device and each
+allocated channel.
+
+Following kmemleak easy to reproduce by injecting an error in
+ioat_alloc_chan_resources() when doing ioat_dma_self_test().
+
+unreferenced object 0xffff888014ad5800 (size 1024): [..]
+ [<ffffffff827692ca>] kmemleak_alloc+0x4a/0x80
+ [<ffffffff81430600>] kmalloc_trace+0x270/0x2f0
+ [<ffffffffa000b7d1>] ioat_pci_probe+0xc1/0x1c0 [ioatdma]
+[..]
+
+repeated for each ioatdma channel:
+
+unreferenced object 0xffff8880148e5c00 (size 512): [..]
+ [<ffffffff827692ca>] kmemleak_alloc+0x4a/0x80
+ [<ffffffff81430600>] kmalloc_trace+0x270/0x2f0
+ [<ffffffffa0009641>] ioat_enumerate_channels+0x101/0x2d0 [ioatdma]
+ [<ffffffffa000b266>] ioat3_dma_probe+0x4d6/0x970 [ioatdma]
+ [<ffffffffa000b891>] ioat_pci_probe+0x181/0x1c0 [ioatdma]
+[..]
+
+Fixes: bf453a0a18b2 ("dmaengine: ioat: Support in-use unbind")
+Signed-off-by: Nikita Shubin <n.shubin@yadro.com>
+Reviewed-by: Dave Jiang <dave.jiang@intel.com>
+Link: https://lore.kernel.org/r/20240528-ioatdma-fixes-v2-3-a9f2fbe26ab1@yadro.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/ioat/init.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
+index 26964b7c8cf14..cf688b0c8444c 100644
+--- a/drivers/dma/ioat/init.c
++++ b/drivers/dma/ioat/init.c
+@@ -1347,6 +1347,7 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ void __iomem * const *iomap;
+ struct device *dev = &pdev->dev;
+ struct ioatdma_device *device;
++ unsigned int i;
+ u8 version;
+ int err;
+
+@@ -1384,6 +1385,9 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+
+ err = ioat3_dma_probe(device, ioat_dca_enabled);
+ if (err) {
++ for (i = 0; i < IOAT_MAX_CHANS; i++)
++ kfree(device->idx[i]);
++ kfree(device);
+ dev_err(dev, "Intel(R) I/OAT DMA Engine init failed\n");
+ return -ENODEV;
+ }
+--
+2.43.0
+
--- /dev/null
+From fa99336d8c1f1d017385c3935ca2c26e9f9d4f17 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 28 May 2024 09:09:23 +0300
+Subject: dmaengine: ioatdma: Fix leaking on version mismatch
+
+From: Nikita Shubin <n.shubin@yadro.com>
+
+[ Upstream commit 1b11b4ef6bd68591dcaf8423c7d05e794e6aec6f ]
+
+Fix leaking ioatdma_device if I/OAT version is less than IOAT_VER_3_0.
+
+Fixes: bf453a0a18b2 ("dmaengine: ioat: Support in-use unbind")
+Signed-off-by: Nikita Shubin <n.shubin@yadro.com>
+Reviewed-by: Dave Jiang <dave.jiang@intel.com>
+Link: https://lore.kernel.org/r/20240528-ioatdma-fixes-v2-1-a9f2fbe26ab1@yadro.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/ioat/init.c | 17 ++++++++++-------
+ 1 file changed, 10 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
+index 9c364e92cb828..e76e507ae898c 100644
+--- a/drivers/dma/ioat/init.c
++++ b/drivers/dma/ioat/init.c
+@@ -1350,6 +1350,7 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ void __iomem * const *iomap;
+ struct device *dev = &pdev->dev;
+ struct ioatdma_device *device;
++ u8 version;
+ int err;
+
+ err = pcim_enable_device(pdev);
+@@ -1363,6 +1364,10 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ if (!iomap)
+ return -ENOMEM;
+
++ version = readb(iomap[IOAT_MMIO_BAR] + IOAT_VER_OFFSET);
++ if (version < IOAT_VER_3_0)
++ return -ENODEV;
++
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err)
+ return err;
+@@ -1373,16 +1378,14 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ pci_set_master(pdev);
+ pci_set_drvdata(pdev, device);
+
+- device->version = readb(device->reg_base + IOAT_VER_OFFSET);
++ device->version = version;
+ if (device->version >= IOAT_VER_3_4)
+ ioat_dca_enabled = 0;
+- if (device->version >= IOAT_VER_3_0) {
+- if (is_skx_ioat(pdev))
+- device->version = IOAT_VER_3_2;
+- err = ioat3_dma_probe(device, ioat_dca_enabled);
+- } else
+- return -ENODEV;
+
++ if (is_skx_ioat(pdev))
++ device->version = IOAT_VER_3_2;
++
++ err = ioat3_dma_probe(device, ioat_dca_enabled);
+ if (err) {
+ dev_err(dev, "Intel(R) I/OAT DMA Engine init failed\n");
+ return -ENODEV;
+--
+2.43.0
+
--- /dev/null
+From fd1cc0ce5cd12a5f7c18320fc90cd6da150d119b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 14 May 2024 13:52:31 +0300
+Subject: dmaengine: ioatdma: Fix missing kmem_cache_destroy()
+
+From: Nikita Shubin <n.shubin@yadro.com>
+
+[ Upstream commit 5422145d0b749ad554ada772133b9b20f9fb0ec8 ]
+
+Fix missing kmem_cache_destroy() for ioat_sed_cache in
+ioat_exit_module().
+
+Noticed via:
+
+```
+modprobe ioatdma
+rmmod ioatdma
+modprobe ioatdma
+debugfs: Directory 'ioat_sed_ent' with parent 'slab' already present!
+```
+
+Fixes: c0f28ce66ecf ("dmaengine: ioatdma: move all the init routines")
+Signed-off-by: Nikita Shubin <n.shubin@yadro.com>
+Acked-by: Dave Jiang <dave.jiang@intel.com>
+Link: https://lore.kernel.org/r/20240514-ioatdma_fixes-v1-1-2776a0913254@yadro.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/ioat/init.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
+index cf688b0c8444c..e8f45a7fded43 100644
+--- a/drivers/dma/ioat/init.c
++++ b/drivers/dma/ioat/init.c
+@@ -1449,6 +1449,7 @@ module_init(ioat_init_module);
+ static void __exit ioat_exit_module(void)
+ {
+ pci_unregister_driver(&ioat_pci_driver);
++ kmem_cache_destroy(ioat_sed_cache);
+ kmem_cache_destroy(ioat_cache);
+ }
+ module_exit(ioat_exit_module);
+--
+2.43.0
+
--- /dev/null
+From 49bf0830ed0528bee6b838f10b80fb1e4465036f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 15 May 2024 10:55:28 +0100
+Subject: firmware: psci: Fix return value from psci_system_suspend()
+
+From: Sudeep Holla <sudeep.holla@arm.com>
+
+[ Upstream commit e7c3696d4692e8046d25f6e63f983e934e12f2c5 ]
+
+Currently we return the value from invoke_psci_fn() directly as return
+value from psci_system_suspend(). It is wrong to send the PSCI interface
+return value directly. psci_to_linux_errno() provide the mapping from
+PSCI return value to the one that can be returned to the callers within
+the kernel.
+
+Use psci_to_linux_errno() to convert and return the correct value from
+psci_system_suspend().
+
+Fixes: faf7ec4a92c0 ("drivers: firmware: psci: add system suspend support")
+Acked-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Sudeep Holla <sudeep.holla@arm.com>
+Link: https://lore.kernel.org/r/20240515095528.1949992-1-sudeep.holla@arm.com
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/firmware/psci/psci.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/firmware/psci/psci.c b/drivers/firmware/psci/psci.c
+index d9629ff878619..2328ca58bba61 100644
+--- a/drivers/firmware/psci/psci.c
++++ b/drivers/firmware/psci/psci.c
+@@ -497,10 +497,12 @@ int psci_cpu_suspend_enter(u32 state)
+
+ static int psci_system_suspend(unsigned long unused)
+ {
++ int err;
+ phys_addr_t pa_cpu_resume = __pa_symbol(cpu_resume);
+
+- return invoke_psci_fn(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND),
++ err = invoke_psci_fn(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND),
+ pa_cpu_resume, 0, 0);
++ return psci_to_linux_errno(err);
+ }
+
+ static int psci_system_suspend_enter(suspend_state_t state)
+--
+2.43.0
+
--- /dev/null
+From d01178b07921b41bfeb8d4527f188879f386b7ec Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 19 Jun 2024 14:38:19 +0800
+Subject: io_uring/rsrc: fix incorrect assignment of iter->nr_segs in
+ io_import_fixed
+
+From: Chenliang Li <cliang01.li@samsung.com>
+
+[ Upstream commit a23800f08a60787dfbf2b87b2e6ed411cb629859 ]
+
+In io_import_fixed when advancing the iter within the first bvec, the
+iter->nr_segs is set to bvec->bv_len. nr_segs should be the number of
+bvecs, plus we don't need to adjust it here, so just remove it.
+
+Fixes: b000ae0ec2d7 ("io_uring/rsrc: optimise single entry advance")
+Signed-off-by: Chenliang Li <cliang01.li@samsung.com>
+Reviewed-by: Pavel Begunkov <asml.silence@gmail.com>
+Link: https://lore.kernel.org/r/20240619063819.2445-1-cliang01.li@samsung.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ io_uring/rsrc.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
+index 2e88b6658e4e0..0f9dcde72ebff 100644
+--- a/io_uring/rsrc.c
++++ b/io_uring/rsrc.c
+@@ -1108,7 +1108,6 @@ int io_import_fixed(int ddir, struct iov_iter *iter,
+ * branch doesn't expect non PAGE_SIZE'd chunks.
+ */
+ iter->bvec = bvec;
+- iter->nr_segs = bvec->bv_len;
+ iter->count -= offset;
+ iter->iov_offset = offset;
+ } else {
+--
+2.43.0
+
--- /dev/null
+From e9ee1a47b9290e949ebb77c91e363bbcc9ed48a9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Oct 2023 11:21:52 -0700
+Subject: KVM: Add a dedicated mmu_notifier flag for reclaiming freed memory
+
+From: Sean Christopherson <seanjc@google.com>
+
+[ Upstream commit cec29eef0a815386d520d61c2cbe16d537931639 ]
+
+Handle AMD SEV's kvm_arch_guest_memory_reclaimed() hook by having
+__kvm_handle_hva_range() return whether or not an overlapping memslot
+was found, i.e. mmu_lock was acquired. Using the .on_unlock() hook
+works, but kvm_arch_guest_memory_reclaimed() needs to run after dropping
+mmu_lock, which makes .on_lock() and .on_unlock() asymmetrical.
+
+Use a small struct to return the tuple of the notifier-specific return,
+plus whether or not overlap was found. Because the iteration helpers are
+__always_inlined, practically speaking, the struct will never actually be
+returned from a function call (not to mention the size of the struct will
+be two bytes in practice).
+
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
+Reviewed-by: Fuad Tabba <tabba@google.com>
+Tested-by: Fuad Tabba <tabba@google.com>
+Message-Id: <20231027182217.3615211-11-seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Stable-dep-of: c3f3edf73a8f ("KVM: Stop processing *all* memslots when "null" mmu_notifier handler is found")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ virt/kvm/kvm_main.c | 53 +++++++++++++++++++++++++++++++--------------
+ 1 file changed, 37 insertions(+), 16 deletions(-)
+
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index 9cc57b23ec81f..b3b3edcc537de 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -561,6 +561,19 @@ struct kvm_mmu_notifier_range {
+ bool may_block;
+ };
+
++/*
++ * The inner-most helper returns a tuple containing the return value from the
++ * arch- and action-specific handler, plus a flag indicating whether or not at
++ * least one memslot was found, i.e. if the handler found guest memory.
++ *
++ * Note, most notifiers are averse to booleans, so even though KVM tracks the
++ * return from arch code as a bool, outer helpers will cast it to an int. :-(
++ */
++typedef struct kvm_mmu_notifier_return {
++ bool ret;
++ bool found_memslot;
++} kvm_mn_ret_t;
++
+ /*
+ * Use a dedicated stub instead of NULL to indicate that there is no callback
+ * function/handler. The compiler technically can't guarantee that a real
+@@ -582,22 +595,25 @@ static const union kvm_mmu_notifier_arg KVM_MMU_NOTIFIER_NO_ARG;
+ node; \
+ node = interval_tree_iter_next(node, start, last)) \
+
+-static __always_inline int __kvm_handle_hva_range(struct kvm *kvm,
+- const struct kvm_mmu_notifier_range *range)
++static __always_inline kvm_mn_ret_t __kvm_handle_hva_range(struct kvm *kvm,
++ const struct kvm_mmu_notifier_range *range)
+ {
+- bool ret = false, locked = false;
++ struct kvm_mmu_notifier_return r = {
++ .ret = false,
++ .found_memslot = false,
++ };
+ struct kvm_gfn_range gfn_range;
+ struct kvm_memory_slot *slot;
+ struct kvm_memslots *slots;
+ int i, idx;
+
+ if (WARN_ON_ONCE(range->end <= range->start))
+- return 0;
++ return r;
+
+ /* A null handler is allowed if and only if on_lock() is provided. */
+ if (WARN_ON_ONCE(IS_KVM_NULL_FN(range->on_lock) &&
+ IS_KVM_NULL_FN(range->handler)))
+- return 0;
++ return r;
+
+ idx = srcu_read_lock(&kvm->srcu);
+
+@@ -631,8 +647,8 @@ static __always_inline int __kvm_handle_hva_range(struct kvm *kvm,
+ gfn_range.end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, slot);
+ gfn_range.slot = slot;
+
+- if (!locked) {
+- locked = true;
++ if (!r.found_memslot) {
++ r.found_memslot = true;
+ KVM_MMU_LOCK(kvm);
+ if (!IS_KVM_NULL_FN(range->on_lock))
+ range->on_lock(kvm);
+@@ -640,14 +656,14 @@ static __always_inline int __kvm_handle_hva_range(struct kvm *kvm,
+ if (IS_KVM_NULL_FN(range->handler))
+ break;
+ }
+- ret |= range->handler(kvm, &gfn_range);
++ r.ret |= range->handler(kvm, &gfn_range);
+ }
+ }
+
+- if (range->flush_on_ret && ret)
++ if (range->flush_on_ret && r.ret)
+ kvm_flush_remote_tlbs(kvm);
+
+- if (locked) {
++ if (r.found_memslot) {
+ KVM_MMU_UNLOCK(kvm);
+ if (!IS_KVM_NULL_FN(range->on_unlock))
+ range->on_unlock(kvm);
+@@ -655,8 +671,7 @@ static __always_inline int __kvm_handle_hva_range(struct kvm *kvm,
+
+ srcu_read_unlock(&kvm->srcu, idx);
+
+- /* The notifiers are averse to booleans. :-( */
+- return (int)ret;
++ return r;
+ }
+
+ static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn,
+@@ -677,7 +692,7 @@ static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn,
+ .may_block = false,
+ };
+
+- return __kvm_handle_hva_range(kvm, &range);
++ return __kvm_handle_hva_range(kvm, &range).ret;
+ }
+
+ static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn,
+@@ -696,7 +711,7 @@ static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn
+ .may_block = false,
+ };
+
+- return __kvm_handle_hva_range(kvm, &range);
++ return __kvm_handle_hva_range(kvm, &range).ret;
+ }
+
+ static bool kvm_change_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
+@@ -798,7 +813,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
+ .end = range->end,
+ .handler = kvm_mmu_unmap_gfn_range,
+ .on_lock = kvm_mmu_invalidate_begin,
+- .on_unlock = kvm_arch_guest_memory_reclaimed,
++ .on_unlock = (void *)kvm_null_fn,
+ .flush_on_ret = true,
+ .may_block = mmu_notifier_range_blockable(range),
+ };
+@@ -830,7 +845,13 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
+ gfn_to_pfn_cache_invalidate_start(kvm, range->start, range->end,
+ hva_range.may_block);
+
+- __kvm_handle_hva_range(kvm, &hva_range);
++ /*
++ * If one or more memslots were found and thus zapped, notify arch code
++ * that guest memory has been reclaimed. This needs to be done *after*
++ * dropping mmu_lock, as x86's reclaim path is slooooow.
++ */
++ if (__kvm_handle_hva_range(kvm, &hva_range).found_memslot)
++ kvm_arch_guest_memory_reclaimed(kvm);
+
+ return 0;
+ }
+--
+2.43.0
+
--- /dev/null
+From 5566397d2a56c5c8dacbcd391db95a84994fac00 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Oct 2023 11:21:44 -0700
+Subject: KVM: Assert that mmu_invalidate_in_progress *never* goes negative
+
+From: Sean Christopherson <seanjc@google.com>
+
+[ Upstream commit c0db19232c1ed6bd7fcb825c28b014c52732c19e ]
+
+Move the assertion on the in-progress invalidation count from the primary
+MMU's notifier path to KVM's common notification path, i.e. assert that
+the count doesn't go negative even when the invalidation is coming from
+KVM itself.
+
+Opportunistically convert the assertion to a KVM_BUG_ON(), i.e. kill only
+the affected VM, not the entire kernel. A corrupted count is fatal to the
+VM, e.g. the non-zero (negative) count will cause mmu_invalidate_retry()
+to block any and all attempts to install new mappings. But it's far from
+guaranteed that an end() without a start() is fatal or even problematic to
+anything other than the target VM, e.g. the underlying bug could simply be
+a duplicate call to end(). And it's much more likely that a missed
+invalidation, i.e. a potential use-after-free, would manifest as no
+notification whatsoever, not an end() without a start().
+
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
+Reviewed-by: Fuad Tabba <tabba@google.com>
+Tested-by: Fuad Tabba <tabba@google.com>
+Message-Id: <20231027182217.3615211-3-seanjc@google.com>
+Reviewed-by: Kai Huang <kai.huang@intel.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Stable-dep-of: c3f3edf73a8f ("KVM: Stop processing *all* memslots when "null" mmu_notifier handler is found")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ virt/kvm/kvm_main.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index 0524933856d42..5a97e6c7d9c20 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -833,6 +833,7 @@ void kvm_mmu_invalidate_end(struct kvm *kvm, unsigned long start,
+ * in conjunction with the smp_rmb in mmu_invalidate_retry().
+ */
+ kvm->mmu_invalidate_in_progress--;
++ KVM_BUG_ON(kvm->mmu_invalidate_in_progress < 0, kvm);
+ }
+
+ static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
+@@ -863,8 +864,6 @@ static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
+ */
+ if (wake)
+ rcuwait_wake_up(&kvm->mn_memslots_update_rcuwait);
+-
+- BUG_ON(kvm->mmu_invalidate_in_progress < 0);
+ }
+
+ static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
+--
+2.43.0
+
--- /dev/null
+From 5ba477e9b1493fda1c2979d1bf27b52bd04d8aaf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Oct 2023 11:21:53 -0700
+Subject: KVM: Drop .on_unlock() mmu_notifier hook
+
+From: Sean Christopherson <seanjc@google.com>
+
+[ Upstream commit 193bbfaacc84f9ee9c281ec0a8dd2ec8e4821e57 ]
+
+Drop the .on_unlock() mmu_notifer hook now that it's no longer used for
+notifying arch code that memory has been reclaimed. Adding .on_unlock()
+and invoking it *after* dropping mmu_lock was a terrible idea, as doing so
+resulted in .on_lock() and .on_unlock() having divergent and asymmetric
+behavior, and set future developers up for failure, i.e. all but asked for
+bugs where KVM relied on using .on_unlock() to try to run a callback while
+holding mmu_lock.
+
+Opportunistically add a lockdep assertion in kvm_mmu_invalidate_end() to
+guard against future bugs of this nature.
+
+Reported-by: Isaku Yamahata <isaku.yamahata@intel.com>
+Link: https://lore.kernel.org/all/20230802203119.GB2021422@ls.amr.corp.intel.com
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
+Reviewed-by: Fuad Tabba <tabba@google.com>
+Tested-by: Fuad Tabba <tabba@google.com>
+Message-Id: <20231027182217.3615211-12-seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Stable-dep-of: c3f3edf73a8f ("KVM: Stop processing *all* memslots when "null" mmu_notifier handler is found")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ virt/kvm/kvm_main.c | 11 +----------
+ 1 file changed, 1 insertion(+), 10 deletions(-)
+
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index b3b3edcc537de..5ac350ba4e996 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -544,7 +544,6 @@ static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
+ typedef bool (*gfn_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range);
+
+ typedef void (*on_lock_fn_t)(struct kvm *kvm);
+-typedef void (*on_unlock_fn_t)(struct kvm *kvm);
+
+ struct kvm_mmu_notifier_range {
+ /*
+@@ -556,7 +555,6 @@ struct kvm_mmu_notifier_range {
+ union kvm_mmu_notifier_arg arg;
+ gfn_handler_t handler;
+ on_lock_fn_t on_lock;
+- on_unlock_fn_t on_unlock;
+ bool flush_on_ret;
+ bool may_block;
+ };
+@@ -663,11 +661,8 @@ static __always_inline kvm_mn_ret_t __kvm_handle_hva_range(struct kvm *kvm,
+ if (range->flush_on_ret && r.ret)
+ kvm_flush_remote_tlbs(kvm);
+
+- if (r.found_memslot) {
++ if (r.found_memslot)
+ KVM_MMU_UNLOCK(kvm);
+- if (!IS_KVM_NULL_FN(range->on_unlock))
+- range->on_unlock(kvm);
+- }
+
+ srcu_read_unlock(&kvm->srcu, idx);
+
+@@ -687,7 +682,6 @@ static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn,
+ .arg = arg,
+ .handler = handler,
+ .on_lock = (void *)kvm_null_fn,
+- .on_unlock = (void *)kvm_null_fn,
+ .flush_on_ret = true,
+ .may_block = false,
+ };
+@@ -706,7 +700,6 @@ static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn
+ .end = end,
+ .handler = handler,
+ .on_lock = (void *)kvm_null_fn,
+- .on_unlock = (void *)kvm_null_fn,
+ .flush_on_ret = false,
+ .may_block = false,
+ };
+@@ -813,7 +806,6 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
+ .end = range->end,
+ .handler = kvm_mmu_unmap_gfn_range,
+ .on_lock = kvm_mmu_invalidate_begin,
+- .on_unlock = (void *)kvm_null_fn,
+ .flush_on_ret = true,
+ .may_block = mmu_notifier_range_blockable(range),
+ };
+@@ -891,7 +883,6 @@ static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
+ .end = range->end,
+ .handler = (void *)kvm_null_fn,
+ .on_lock = kvm_mmu_invalidate_end,
+- .on_unlock = (void *)kvm_null_fn,
+ .flush_on_ret = false,
+ .may_block = mmu_notifier_range_blockable(range),
+ };
+--
+2.43.0
+
--- /dev/null
+From cdf74c69ece21da50e61ab85d3c0ab026d7a5b5f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 12 Jun 2024 09:41:51 -0500
+Subject: KVM: Stop processing *all* memslots when "null" mmu_notifier handler
+ is found
+
+From: Babu Moger <babu.moger@amd.com>
+
+[ Upstream commit c3f3edf73a8f854f8766a69d2734198a58762e33 ]
+
+Bail from outer address space loop, not just the inner memslot loop, when
+a "null" handler is encountered by __kvm_handle_hva_range(), which is the
+intended behavior. On x86, which has multiple address spaces thanks to
+SMM emulation, breaking from just the memslot loop results in undefined
+behavior due to assigning the non-existent return value from kvm_null_fn()
+to a bool.
+
+In practice, the bug is benign as kvm_mmu_notifier_invalidate_range_end()
+is the only caller that passes handler=kvm_null_fn, and it doesn't set
+flush_on_ret, i.e. assigning garbage to r.ret is ultimately ignored. And
+for most configuration the compiler elides the entire sequence, i.e. there
+is no undefined behavior at runtime.
+
+ ------------[ cut here ]------------
+ UBSAN: invalid-load in arch/x86/kvm/../../../virt/kvm/kvm_main.c:655:10
+ load of value 160 is not a valid value for type '_Bool'
+ CPU: 370 PID: 8246 Comm: CPU 0/KVM Not tainted 6.8.2-amdsos-build58-ubuntu-22.04+ #1
+ Hardware name: AMD Corporation Sh54p/Sh54p, BIOS WPC4429N 04/25/2024
+ Call Trace:
+ <TASK>
+ dump_stack_lvl+0x48/0x60
+ ubsan_epilogue+0x5/0x30
+ __ubsan_handle_load_invalid_value+0x79/0x80
+ kvm_mmu_notifier_invalidate_range_end.cold+0x18/0x4f [kvm]
+ __mmu_notifier_invalidate_range_end+0x63/0xe0
+ __split_huge_pmd+0x367/0xfc0
+ do_huge_pmd_wp_page+0x1cc/0x380
+ __handle_mm_fault+0x8ee/0xe50
+ handle_mm_fault+0xe4/0x4a0
+ __get_user_pages+0x190/0x840
+ get_user_pages_unlocked+0xe0/0x590
+ hva_to_pfn+0x114/0x550 [kvm]
+ kvm_faultin_pfn+0xed/0x5b0 [kvm]
+ kvm_tdp_page_fault+0x123/0x170 [kvm]
+ kvm_mmu_page_fault+0x244/0xaa0 [kvm]
+ vcpu_enter_guest+0x592/0x1070 [kvm]
+ kvm_arch_vcpu_ioctl_run+0x145/0x8a0 [kvm]
+ kvm_vcpu_ioctl+0x288/0x6d0 [kvm]
+ __x64_sys_ioctl+0x8f/0xd0
+ do_syscall_64+0x77/0x120
+ entry_SYSCALL_64_after_hwframe+0x6e/0x76
+ </TASK>
+ ---[ end trace ]---
+
+Fixes: 071064f14d87 ("KVM: Don't take mmu_lock for range invalidation unless necessary")
+Signed-off-by: Babu Moger <babu.moger@amd.com>
+Link: https://lore.kernel.org/r/b8723d39903b64c241c50f5513f804390c7b5eec.1718203311.git.babu.moger@amd.com
+[sean: massage changelog]
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ virt/kvm/kvm_main.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index 5ac350ba4e996..61c48e88c9796 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -652,7 +652,7 @@ static __always_inline kvm_mn_ret_t __kvm_handle_hva_range(struct kvm *kvm,
+ range->on_lock(kvm);
+
+ if (IS_KVM_NULL_FN(range->handler))
+- break;
++ goto mmu_unlock;
+ }
+ r.ret |= range->handler(kvm, &gfn_range);
+ }
+@@ -661,6 +661,7 @@ static __always_inline kvm_mn_ret_t __kvm_handle_hva_range(struct kvm *kvm,
+ if (range->flush_on_ret && r.ret)
+ kvm_flush_remote_tlbs(kvm);
+
++mmu_unlock:
+ if (r.found_memslot)
+ KVM_MMU_UNLOCK(kvm);
+
+--
+2.43.0
+
--- /dev/null
+From 75255b23d060e7de1c17cb37cc2dcb7fb9226f79 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Oct 2023 11:21:43 -0700
+Subject: KVM: Tweak kvm_hva_range and hva_handler_t to allow reusing for gfn
+ ranges
+
+From: Sean Christopherson <seanjc@google.com>
+
+[ Upstream commit e97b39c5c4362dc1cbc37a563ddac313b96c84f3 ]
+
+Rework and rename "struct kvm_hva_range" into "kvm_mmu_notifier_range" so
+that the structure can be used to handle notifications that operate on gfn
+context, i.e. that aren't tied to a host virtual address. Rename the
+handler typedef too (arguably it should always have been gfn_handler_t).
+
+Practically speaking, this is a nop for 64-bit kernels as the only
+meaningful change is to store start+end as u64s instead of unsigned longs.
+
+Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
+Reviewed-by: Xiaoyao Li <xiaoyao.li@intel.com>
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Reviewed-by: Fuad Tabba <tabba@google.com>
+Tested-by: Fuad Tabba <tabba@google.com>
+Message-Id: <20231027182217.3615211-2-seanjc@google.com>
+Reviewed-by: Kai Huang <kai.huang@intel.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Stable-dep-of: c3f3edf73a8f ("KVM: Stop processing *all* memslots when "null" mmu_notifier handler is found")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ virt/kvm/kvm_main.c | 34 +++++++++++++++++++---------------
+ 1 file changed, 19 insertions(+), 15 deletions(-)
+
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index 486800a7024b3..0524933856d42 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -541,18 +541,22 @@ static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
+ return container_of(mn, struct kvm, mmu_notifier);
+ }
+
+-typedef bool (*hva_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range);
++typedef bool (*gfn_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range);
+
+ typedef void (*on_lock_fn_t)(struct kvm *kvm, unsigned long start,
+ unsigned long end);
+
+ typedef void (*on_unlock_fn_t)(struct kvm *kvm);
+
+-struct kvm_hva_range {
+- unsigned long start;
+- unsigned long end;
++struct kvm_mmu_notifier_range {
++ /*
++ * 64-bit addresses, as KVM notifiers can operate on host virtual
++ * addresses (unsigned long) and guest physical addresses (64-bit).
++ */
++ u64 start;
++ u64 end;
+ union kvm_mmu_notifier_arg arg;
+- hva_handler_t handler;
++ gfn_handler_t handler;
+ on_lock_fn_t on_lock;
+ on_unlock_fn_t on_unlock;
+ bool flush_on_ret;
+@@ -581,7 +585,7 @@ static const union kvm_mmu_notifier_arg KVM_MMU_NOTIFIER_NO_ARG;
+ node = interval_tree_iter_next(node, start, last)) \
+
+ static __always_inline int __kvm_handle_hva_range(struct kvm *kvm,
+- const struct kvm_hva_range *range)
++ const struct kvm_mmu_notifier_range *range)
+ {
+ bool ret = false, locked = false;
+ struct kvm_gfn_range gfn_range;
+@@ -608,9 +612,9 @@ static __always_inline int __kvm_handle_hva_range(struct kvm *kvm,
+ unsigned long hva_start, hva_end;
+
+ slot = container_of(node, struct kvm_memory_slot, hva_node[slots->node_idx]);
+- hva_start = max(range->start, slot->userspace_addr);
+- hva_end = min(range->end, slot->userspace_addr +
+- (slot->npages << PAGE_SHIFT));
++ hva_start = max_t(unsigned long, range->start, slot->userspace_addr);
++ hva_end = min_t(unsigned long, range->end,
++ slot->userspace_addr + (slot->npages << PAGE_SHIFT));
+
+ /*
+ * To optimize for the likely case where the address
+@@ -660,10 +664,10 @@ static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn,
+ unsigned long start,
+ unsigned long end,
+ union kvm_mmu_notifier_arg arg,
+- hva_handler_t handler)
++ gfn_handler_t handler)
+ {
+ struct kvm *kvm = mmu_notifier_to_kvm(mn);
+- const struct kvm_hva_range range = {
++ const struct kvm_mmu_notifier_range range = {
+ .start = start,
+ .end = end,
+ .arg = arg,
+@@ -680,10 +684,10 @@ static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn,
+ static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn,
+ unsigned long start,
+ unsigned long end,
+- hva_handler_t handler)
++ gfn_handler_t handler)
+ {
+ struct kvm *kvm = mmu_notifier_to_kvm(mn);
+- const struct kvm_hva_range range = {
++ const struct kvm_mmu_notifier_range range = {
+ .start = start,
+ .end = end,
+ .handler = handler,
+@@ -771,7 +775,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
+ const struct mmu_notifier_range *range)
+ {
+ struct kvm *kvm = mmu_notifier_to_kvm(mn);
+- const struct kvm_hva_range hva_range = {
++ const struct kvm_mmu_notifier_range hva_range = {
+ .start = range->start,
+ .end = range->end,
+ .handler = kvm_unmap_gfn_range,
+@@ -835,7 +839,7 @@ static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
+ const struct mmu_notifier_range *range)
+ {
+ struct kvm *kvm = mmu_notifier_to_kvm(mn);
+- const struct kvm_hva_range hva_range = {
++ const struct kvm_mmu_notifier_range hva_range = {
+ .start = range->start,
+ .end = range->end,
+ .handler = (void *)kvm_null_fn,
+--
+2.43.0
+
--- /dev/null
+From 68a14ccc3fb35047cc4900c8ddd4b6f959e25b77 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Oct 2023 11:21:45 -0700
+Subject: KVM: Use gfn instead of hva for mmu_notifier_retry
+
+From: Chao Peng <chao.p.peng@linux.intel.com>
+
+[ Upstream commit 8569992d64b8f750e34b7858eac5d7daaf0f80fd ]
+
+Currently in mmu_notifier invalidate path, hva range is recorded and then
+checked against by mmu_invalidate_retry_hva() in the page fault handling
+path. However, for the soon-to-be-introduced private memory, a page fault
+may not have a hva associated, checking gfn(gpa) makes more sense.
+
+For existing hva based shared memory, gfn is expected to also work. The
+only downside is when aliasing multiple gfns to a single hva, the
+current algorithm of checking multiple ranges could result in a much
+larger range being rejected. Such aliasing should be uncommon, so the
+impact is expected small.
+
+Suggested-by: Sean Christopherson <seanjc@google.com>
+Cc: Xu Yilun <yilun.xu@intel.com>
+Signed-off-by: Chao Peng <chao.p.peng@linux.intel.com>
+Reviewed-by: Fuad Tabba <tabba@google.com>
+Tested-by: Fuad Tabba <tabba@google.com>
+[sean: convert vmx_set_apic_access_page_addr() to gfn-based API]
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
+Reviewed-by: Xu Yilun <yilun.xu@linux.intel.com>
+Message-Id: <20231027182217.3615211-4-seanjc@google.com>
+Reviewed-by: Kai Huang <kai.huang@intel.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Stable-dep-of: c3f3edf73a8f ("KVM: Stop processing *all* memslots when "null" mmu_notifier handler is found")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kvm/mmu/mmu.c | 10 ++++++----
+ arch/x86/kvm/vmx/vmx.c | 11 +++++-----
+ include/linux/kvm_host.h | 33 +++++++++++++++++++-----------
+ virt/kvm/kvm_main.c | 43 +++++++++++++++++++++++++++++++---------
+ 4 files changed, 66 insertions(+), 31 deletions(-)
+
+diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
+index 294775b7383b4..e32b7072324e5 100644
+--- a/arch/x86/kvm/mmu/mmu.c
++++ b/arch/x86/kvm/mmu/mmu.c
+@@ -3056,7 +3056,7 @@ static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
+ *
+ * There are several ways to safely use this helper:
+ *
+- * - Check mmu_invalidate_retry_hva() after grabbing the mapping level, before
++ * - Check mmu_invalidate_retry_gfn() after grabbing the mapping level, before
+ * consuming it. In this case, mmu_lock doesn't need to be held during the
+ * lookup, but it does need to be held while checking the MMU notifier.
+ *
+@@ -4358,7 +4358,7 @@ static bool is_page_fault_stale(struct kvm_vcpu *vcpu,
+ return true;
+
+ return fault->slot &&
+- mmu_invalidate_retry_hva(vcpu->kvm, fault->mmu_seq, fault->hva);
++ mmu_invalidate_retry_gfn(vcpu->kvm, fault->mmu_seq, fault->gfn);
+ }
+
+ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
+@@ -6245,7 +6245,9 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
+
+ write_lock(&kvm->mmu_lock);
+
+- kvm_mmu_invalidate_begin(kvm, 0, -1ul);
++ kvm_mmu_invalidate_begin(kvm);
++
++ kvm_mmu_invalidate_range_add(kvm, gfn_start, gfn_end);
+
+ flush = kvm_rmap_zap_gfn_range(kvm, gfn_start, gfn_end);
+
+@@ -6255,7 +6257,7 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
+ if (flush)
+ kvm_flush_remote_tlbs_range(kvm, gfn_start, gfn_end - gfn_start);
+
+- kvm_mmu_invalidate_end(kvm, 0, -1ul);
++ kvm_mmu_invalidate_end(kvm);
+
+ write_unlock(&kvm->mmu_lock);
+ }
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index dae499e2da84e..bb6b0f15ceb91 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -6766,10 +6766,10 @@ static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu)
+ return;
+
+ /*
+- * Grab the memslot so that the hva lookup for the mmu_notifier retry
+- * is guaranteed to use the same memslot as the pfn lookup, i.e. rely
+- * on the pfn lookup's validation of the memslot to ensure a valid hva
+- * is used for the retry check.
++ * Explicitly grab the memslot using KVM's internal slot ID to ensure
++ * KVM doesn't unintentionally grab a userspace memslot. It _should_
++ * be impossible for userspace to create a memslot for the APIC when
++ * APICv is enabled, but paranoia won't hurt in this case.
+ */
+ slot = id_to_memslot(slots, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT);
+ if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
+@@ -6794,8 +6794,7 @@ static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu)
+ return;
+
+ read_lock(&vcpu->kvm->mmu_lock);
+- if (mmu_invalidate_retry_hva(kvm, mmu_seq,
+- gfn_to_hva_memslot(slot, gfn))) {
++ if (mmu_invalidate_retry_gfn(kvm, mmu_seq, gfn)) {
+ kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
+ read_unlock(&vcpu->kvm->mmu_lock);
+ goto out;
+diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
+index fb6c6109fdcad..11d0916883460 100644
+--- a/include/linux/kvm_host.h
++++ b/include/linux/kvm_host.h
+@@ -787,8 +787,8 @@ struct kvm {
+ struct mmu_notifier mmu_notifier;
+ unsigned long mmu_invalidate_seq;
+ long mmu_invalidate_in_progress;
+- unsigned long mmu_invalidate_range_start;
+- unsigned long mmu_invalidate_range_end;
++ gfn_t mmu_invalidate_range_start;
++ gfn_t mmu_invalidate_range_end;
+ #endif
+ struct list_head devices;
+ u64 manual_dirty_log_protect;
+@@ -1392,10 +1392,9 @@ void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc);
+ void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
+ #endif
+
+-void kvm_mmu_invalidate_begin(struct kvm *kvm, unsigned long start,
+- unsigned long end);
+-void kvm_mmu_invalidate_end(struct kvm *kvm, unsigned long start,
+- unsigned long end);
++void kvm_mmu_invalidate_begin(struct kvm *kvm);
++void kvm_mmu_invalidate_range_add(struct kvm *kvm, gfn_t start, gfn_t end);
++void kvm_mmu_invalidate_end(struct kvm *kvm);
+
+ long kvm_arch_dev_ioctl(struct file *filp,
+ unsigned int ioctl, unsigned long arg);
+@@ -1970,9 +1969,9 @@ static inline int mmu_invalidate_retry(struct kvm *kvm, unsigned long mmu_seq)
+ return 0;
+ }
+
+-static inline int mmu_invalidate_retry_hva(struct kvm *kvm,
++static inline int mmu_invalidate_retry_gfn(struct kvm *kvm,
+ unsigned long mmu_seq,
+- unsigned long hva)
++ gfn_t gfn)
+ {
+ lockdep_assert_held(&kvm->mmu_lock);
+ /*
+@@ -1981,10 +1980,20 @@ static inline int mmu_invalidate_retry_hva(struct kvm *kvm,
+ * that might be being invalidated. Note that it may include some false
+ * positives, due to shortcuts when handing concurrent invalidations.
+ */
+- if (unlikely(kvm->mmu_invalidate_in_progress) &&
+- hva >= kvm->mmu_invalidate_range_start &&
+- hva < kvm->mmu_invalidate_range_end)
+- return 1;
++ if (unlikely(kvm->mmu_invalidate_in_progress)) {
++ /*
++ * Dropping mmu_lock after bumping mmu_invalidate_in_progress
++ * but before updating the range is a KVM bug.
++ */
++ if (WARN_ON_ONCE(kvm->mmu_invalidate_range_start == INVALID_GPA ||
++ kvm->mmu_invalidate_range_end == INVALID_GPA))
++ return 1;
++
++ if (gfn >= kvm->mmu_invalidate_range_start &&
++ gfn < kvm->mmu_invalidate_range_end)
++ return 1;
++ }
++
+ if (kvm->mmu_invalidate_seq != mmu_seq)
+ return 1;
+ return 0;
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index 5a97e6c7d9c20..9cc57b23ec81f 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -543,9 +543,7 @@ static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
+
+ typedef bool (*gfn_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range);
+
+-typedef void (*on_lock_fn_t)(struct kvm *kvm, unsigned long start,
+- unsigned long end);
+-
++typedef void (*on_lock_fn_t)(struct kvm *kvm);
+ typedef void (*on_unlock_fn_t)(struct kvm *kvm);
+
+ struct kvm_mmu_notifier_range {
+@@ -637,7 +635,8 @@ static __always_inline int __kvm_handle_hva_range(struct kvm *kvm,
+ locked = true;
+ KVM_MMU_LOCK(kvm);
+ if (!IS_KVM_NULL_FN(range->on_lock))
+- range->on_lock(kvm, range->start, range->end);
++ range->on_lock(kvm);
++
+ if (IS_KVM_NULL_FN(range->handler))
+ break;
+ }
+@@ -742,16 +741,29 @@ static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
+ kvm_handle_hva_range(mn, address, address + 1, arg, kvm_change_spte_gfn);
+ }
+
+-void kvm_mmu_invalidate_begin(struct kvm *kvm, unsigned long start,
+- unsigned long end)
++void kvm_mmu_invalidate_begin(struct kvm *kvm)
+ {
++ lockdep_assert_held_write(&kvm->mmu_lock);
+ /*
+ * The count increase must become visible at unlock time as no
+ * spte can be established without taking the mmu_lock and
+ * count is also read inside the mmu_lock critical section.
+ */
+ kvm->mmu_invalidate_in_progress++;
++
+ if (likely(kvm->mmu_invalidate_in_progress == 1)) {
++ kvm->mmu_invalidate_range_start = INVALID_GPA;
++ kvm->mmu_invalidate_range_end = INVALID_GPA;
++ }
++}
++
++void kvm_mmu_invalidate_range_add(struct kvm *kvm, gfn_t start, gfn_t end)
++{
++ lockdep_assert_held_write(&kvm->mmu_lock);
++
++ WARN_ON_ONCE(!kvm->mmu_invalidate_in_progress);
++
++ if (likely(kvm->mmu_invalidate_range_start == INVALID_GPA)) {
+ kvm->mmu_invalidate_range_start = start;
+ kvm->mmu_invalidate_range_end = end;
+ } else {
+@@ -771,6 +783,12 @@ void kvm_mmu_invalidate_begin(struct kvm *kvm, unsigned long start,
+ }
+ }
+
++static bool kvm_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
++{
++ kvm_mmu_invalidate_range_add(kvm, range->start, range->end);
++ return kvm_unmap_gfn_range(kvm, range);
++}
++
+ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
+ const struct mmu_notifier_range *range)
+ {
+@@ -778,7 +796,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
+ const struct kvm_mmu_notifier_range hva_range = {
+ .start = range->start,
+ .end = range->end,
+- .handler = kvm_unmap_gfn_range,
++ .handler = kvm_mmu_unmap_gfn_range,
+ .on_lock = kvm_mmu_invalidate_begin,
+ .on_unlock = kvm_arch_guest_memory_reclaimed,
+ .flush_on_ret = true,
+@@ -817,9 +835,10 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
+ return 0;
+ }
+
+-void kvm_mmu_invalidate_end(struct kvm *kvm, unsigned long start,
+- unsigned long end)
++void kvm_mmu_invalidate_end(struct kvm *kvm)
+ {
++ lockdep_assert_held_write(&kvm->mmu_lock);
++
+ /*
+ * This sequence increase will notify the kvm page fault that
+ * the page that is going to be mapped in the spte could have
+@@ -834,6 +853,12 @@ void kvm_mmu_invalidate_end(struct kvm *kvm, unsigned long start,
+ */
+ kvm->mmu_invalidate_in_progress--;
+ KVM_BUG_ON(kvm->mmu_invalidate_in_progress < 0, kvm);
++
++ /*
++ * Assert that at least one range was added between start() and end().
++ * Not adding a range isn't fatal, but it is a KVM bug.
++ */
++ WARN_ON_ONCE(kvm->mmu_invalidate_range_start == INVALID_GPA);
+ }
+
+ static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
+--
+2.43.0
+
--- /dev/null
+From e3fd28087ae4c8b7e838139002c5f429b0d07e58 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 May 2024 01:56:58 -0700
+Subject: RDMA/bnxt_re: Fix the max msix vectors macro
+
+From: Selvin Xavier <selvin.xavier@broadcom.com>
+
+[ Upstream commit 056620da899527c14cf36e5019a0decaf4cf0f79 ]
+
+bnxt_re no longer decide the number of MSI-x vectors used by itself.
+Its decided by bnxt_en now. So when bnxt_en changes this value, system
+crash is seen.
+
+Depend on the max value reported by bnxt_en instead of using the its own macros.
+
+Fixes: 303432211324 ("bnxt_en: Remove runtime interrupt vector allocation")
+Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com>
+Link: https://lore.kernel.org/r/1716195418-11767-1-git-send-email-selvin.xavier@broadcom.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/bnxt_re/bnxt_re.h | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+index 9fd9849ebdd14..5b481d8539eee 100644
+--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
++++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+@@ -106,8 +106,6 @@ struct bnxt_re_gsi_context {
+ struct bnxt_re_sqp_entries *sqp_tbl;
+ };
+
+-#define BNXT_RE_MIN_MSIX 2
+-#define BNXT_RE_MAX_MSIX 9
+ #define BNXT_RE_AEQ_IDX 0
+ #define BNXT_RE_NQ_IDX 1
+ #define BNXT_RE_GEN_P5_MAX_VF 64
+@@ -166,7 +164,7 @@ struct bnxt_re_dev {
+ struct bnxt_qplib_rcfw rcfw;
+
+ /* NQ */
+- struct bnxt_qplib_nq nq[BNXT_RE_MAX_MSIX];
++ struct bnxt_qplib_nq nq[BNXT_MAX_ROCE_MSIX];
+
+ /* Device Resources */
+ struct bnxt_qplib_dev_attr dev_attr;
+--
+2.43.0
+
--- /dev/null
+From 539121bcaeeeb7b357399e62c49058531df3755e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 5 Jun 2024 01:16:08 -0700
+Subject: RDMA/mana_ib: Ignore optional access flags for MRs
+
+From: Konstantin Taranov <kotaranov@microsoft.com>
+
+[ Upstream commit 82a5cc783d49b86afd2f60e297ecd85223c39f88 ]
+
+Ignore optional ib_access_flags when an MR is created.
+
+Fixes: 0266a177631d ("RDMA/mana_ib: Add a driver for Microsoft Azure Network Adapter")
+Signed-off-by: Konstantin Taranov <kotaranov@microsoft.com>
+Link: https://lore.kernel.org/r/1717575368-14879-1-git-send-email-kotaranov@linux.microsoft.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/mana/mr.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/infiniband/hw/mana/mr.c b/drivers/infiniband/hw/mana/mr.c
+index 351207c60eb65..af79b6e3a5818 100644
+--- a/drivers/infiniband/hw/mana/mr.c
++++ b/drivers/infiniband/hw/mana/mr.c
+@@ -118,6 +118,7 @@ struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
+ "start 0x%llx, iova 0x%llx length 0x%llx access_flags 0x%x",
+ start, iova, length, access_flags);
+
++ access_flags &= ~IB_ACCESS_OPTIONAL;
+ if (access_flags & ~VALID_MR_FLAGS)
+ return ERR_PTR(-EINVAL);
+
+--
+2.43.0
+
--- /dev/null
+From 8cbc66a8ff8700c4d8dd5b87454969ef717139c1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 28 May 2024 15:52:56 +0300
+Subject: RDMA/mlx5: Add check for srq max_sge attribute
+
+From: Patrisious Haddad <phaddad@nvidia.com>
+
+[ Upstream commit 36ab7ada64caf08f10ee5a114d39964d1f91e81d ]
+
+max_sge attribute is passed by the user, and is inserted and used
+unchecked, so verify that the value doesn't exceed maximum allowed value
+before using it.
+
+Fixes: e126ba97dba9 ("mlx5: Add driver for Mellanox Connect-IB adapters")
+Signed-off-by: Patrisious Haddad <phaddad@nvidia.com>
+Link: https://lore.kernel.org/r/277ccc29e8d57bfd53ddeb2ac633f2760cf8cdd0.1716900410.git.leon@kernel.org
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/mlx5/srq.c | 13 ++++++++-----
+ 1 file changed, 8 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
+index a056ea835da54..84be0c3d56995 100644
+--- a/drivers/infiniband/hw/mlx5/srq.c
++++ b/drivers/infiniband/hw/mlx5/srq.c
+@@ -199,17 +199,20 @@ int mlx5_ib_create_srq(struct ib_srq *ib_srq,
+ int err;
+ struct mlx5_srq_attr in = {};
+ __u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
++ __u32 max_sge_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq) /
++ sizeof(struct mlx5_wqe_data_seg);
+
+ if (init_attr->srq_type != IB_SRQT_BASIC &&
+ init_attr->srq_type != IB_SRQT_XRC &&
+ init_attr->srq_type != IB_SRQT_TM)
+ return -EOPNOTSUPP;
+
+- /* Sanity check SRQ size before proceeding */
+- if (init_attr->attr.max_wr >= max_srq_wqes) {
+- mlx5_ib_dbg(dev, "max_wr %d, cap %d\n",
+- init_attr->attr.max_wr,
+- max_srq_wqes);
++ /* Sanity check SRQ and sge size before proceeding */
++ if (init_attr->attr.max_wr >= max_srq_wqes ||
++ init_attr->attr.max_sge > max_sge_sz) {
++ mlx5_ib_dbg(dev, "max_wr %d,wr_cap %d,max_sge %d, sge_cap:%d\n",
++ init_attr->attr.max_wr, max_srq_wqes,
++ init_attr->attr.max_sge, max_sge_sz);
+ return -EINVAL;
+ }
+
+--
+2.43.0
+
--- /dev/null
+From d3fa7e1daaebe1d3d6810af32aae760ad359000d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 28 May 2024 15:52:55 +0300
+Subject: RDMA/mlx5: Fix unwind flow as part of mlx5_ib_stage_init_init
+
+From: Yishai Hadas <yishaih@nvidia.com>
+
+[ Upstream commit 81497c148b7a2e4a4fbda93aee585439f7323e2e ]
+
+Fix unwind flow as part of mlx5_ib_stage_init_init to use the correct
+goto upon an error.
+
+Fixes: 758ce14aee82 ("RDMA/mlx5: Implement MACsec gid addition and deletion")
+Signed-off-by: Yishai Hadas <yishaih@nvidia.com>
+Reviewed-by: Patrisious Haddad <phaddad@nvidia.com>
+Link: https://lore.kernel.org/r/aa40615116eda14ec9eca21d52017d632ea89188.1716900410.git.leon@kernel.org
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/mlx5/main.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
+index 102ead497196c..45a497c0258b3 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -3732,10 +3732,10 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
+ spin_lock_init(&dev->dm.lock);
+ dev->dm.dev = mdev;
+ return 0;
+-err:
+- mlx5r_macsec_dealloc_gids(dev);
+ err_mp:
+ mlx5_ib_cleanup_multiport_master(dev);
++err:
++ mlx5r_macsec_dealloc_gids(dev);
+ return err;
+ }
+
+--
+2.43.0
+
--- /dev/null
+From 22d5521708f3c775d3b9b6331bb707d26e31f29e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 23 May 2024 17:46:17 +0800
+Subject: RDMA/rxe: Fix responder length checking for UD request packets
+
+From: Honggang LI <honggangli@163.com>
+
+[ Upstream commit f67ac0061c7614c1548963d3ef1ee1606efd8636 ]
+
+According to the IBA specification:
+If a UD request packet is detected with an invalid length, the request
+shall be an invalid request and it shall be silently dropped by
+the responder. The responder then waits for a new request packet.
+
+commit 689c5421bfe0 ("RDMA/rxe: Fix incorrect responder length checking")
+defers responder length check for UD QPs in function `copy_data`.
+But it introduces a regression issue for UD QPs.
+
+When the packet size is too large to fit in the receive buffer.
+`copy_data` will return error code -EINVAL. Then `send_data_in`
+will return RESPST_ERR_MALFORMED_WQE. UD QP will transfer into
+ERROR state.
+
+Fixes: 689c5421bfe0 ("RDMA/rxe: Fix incorrect responder length checking")
+Signed-off-by: Honggang LI <honggangli@163.com>
+Link: https://lore.kernel.org/r/20240523094617.141148-1-honggangli@163.com
+Reviewed-by: Zhu Yanjun <yanjun.zhu@linux.dev>
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/sw/rxe/rxe_resp.c | 13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
+index da470a925efc7..c02aa27fe5d81 100644
+--- a/drivers/infiniband/sw/rxe/rxe_resp.c
++++ b/drivers/infiniband/sw/rxe/rxe_resp.c
+@@ -354,6 +354,19 @@ static enum resp_states rxe_resp_check_length(struct rxe_qp *qp,
+ * receive buffer later. For rmda operations additional
+ * length checks are performed in check_rkey.
+ */
++ if ((qp_type(qp) == IB_QPT_GSI) || (qp_type(qp) == IB_QPT_UD)) {
++ unsigned int payload = payload_size(pkt);
++ unsigned int recv_buffer_len = 0;
++ int i;
++
++ for (i = 0; i < qp->resp.wqe->dma.num_sge; i++)
++ recv_buffer_len += qp->resp.wqe->dma.sge[i].length;
++ if (payload + 40 > recv_buffer_len) {
++ rxe_dbg_qp(qp, "The receive buffer is too small for this UD packet.\n");
++ return RESPST_ERR_LENGTH;
++ }
++ }
++
+ if (pkt->mask & RXE_PAYLOAD_MASK && ((qp_type(qp) == IB_QPT_RC) ||
+ (qp_type(qp) == IB_QPT_UC))) {
+ unsigned int mtu = qp->mtu;
+--
+2.43.0
+
--- /dev/null
+From b2cb3be5190ebd8c02105e484cd3327bfbaf4af7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 12 Jun 2024 14:42:34 +0300
+Subject: regulator: bd71815: fix ramp values
+
+From: Kalle Niemi <kaleposti@gmail.com>
+
+[ Upstream commit 4cac29b846f38d5f0654cdfff5c5bfc37305081c ]
+
+Ramp values are inverted. This caused wrong values written to register
+when ramp values were defined in device tree.
+
+Invert values in table to fix this.
+
+Signed-off-by: Kalle Niemi <kaleposti@gmail.com>
+Fixes: 1aad39001e85 ("regulator: Support ROHM BD71815 regulators")
+Reviewed-by: Matti Vaittinen <mazziesaccount@gmail.com>
+Link: https://lore.kernel.org/r/ZmmJXtuVJU6RgQAH@latitude5580
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/regulator/bd71815-regulator.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/regulator/bd71815-regulator.c b/drivers/regulator/bd71815-regulator.c
+index 26192d55a6858..79fbb45297f6b 100644
+--- a/drivers/regulator/bd71815-regulator.c
++++ b/drivers/regulator/bd71815-regulator.c
+@@ -256,7 +256,7 @@ static int buck12_set_hw_dvs_levels(struct device_node *np,
+ * 10: 2.50mV/usec 10mV 4uS
+ * 11: 1.25mV/usec 10mV 8uS
+ */
+-static const unsigned int bd7181x_ramp_table[] = { 1250, 2500, 5000, 10000 };
++static const unsigned int bd7181x_ramp_table[] = { 10000, 5000, 2500, 1250 };
+
+ static int bd7181x_led_set_current_limit(struct regulator_dev *rdev,
+ int min_uA, int max_uA)
+--
+2.43.0
+
--- /dev/null
+From b6fe35c42a6970e102a1005783e3e4299321a235 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 10 Jun 2024 20:55:32 +0100
+Subject: regulator: core: Fix modpost error "regulator_get_regmap" undefined
+
+From: Biju Das <biju.das.jz@bp.renesas.com>
+
+[ Upstream commit 3f60497c658d2072714d097a177612d34b34aa3d ]
+
+Fix the modpost error "regulator_get_regmap" undefined by adding export
+symbol.
+
+Fixes: 04eca28cde52 ("regulator: Add helpers for low-level register access")
+Reported-by: kernel test robot <lkp@intel.com>
+Closes: https://lore.kernel.org/oe-kbuild-all/202406110117.mk5UR3VZ-lkp@intel.com
+Signed-off-by: Biju Das <biju.das.jz@bp.renesas.com>
+Link: https://lore.kernel.org/r/20240610195532.175942-1-biju.das.jz@bp.renesas.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/regulator/core.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
+index dfb986377a989..c96bf095695fd 100644
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -3334,6 +3334,7 @@ struct regmap *regulator_get_regmap(struct regulator *regulator)
+
+ return map ? map : ERR_PTR(-EOPNOTSUPP);
+ }
++EXPORT_SYMBOL_GPL(regulator_get_regmap);
+
+ /**
+ * regulator_get_hardware_vsel_register - get the HW voltage selector register
+--
+2.43.0
+
ice-fix-vsi-list-rule-with-ice_sw_lkup_last-type.patch
bnxt_en-restore-ptp-tx_avail-count-in-case-of-skb_pa.patch
net-usb-rtl8150-fix-unintiatilzed-variables-in-rtl81.patch
+rdma-bnxt_re-fix-the-max-msix-vectors-macro.patch
+spi-cs42l43-correct-spi-root-clock-speed.patch
+rdma-rxe-fix-responder-length-checking-for-ud-reques.patch
+regulator-core-fix-modpost-error-regulator_get_regma.patch
+dmaengine-idxd-fix-possible-use-after-free-in-irq_pr.patch
+dmaengine-ioatdma-fix-leaking-on-version-mismatch.patch
+dmaengine-ioatdma-fix-error-path-in-ioat3_dma_probe.patch
+dmaengine-ioatdma-fix-kmemleak-in-ioat_pci_probe.patch
+dmaengine-fsl-edma-avoid-linking-both-modules.patch
+dmaengine-ioatdma-fix-missing-kmem_cache_destroy.patch
+regulator-bd71815-fix-ramp-values.patch
+thermal-drivers-mediatek-lvts_thermal-return-error-i.patch
+arm64-dts-imx8mp-fix-tc9595-reset-gpio-on-dh-i.mx8m-.patch
+arm64-dts-imx8mp-fix-tc9595-input-clock-on-dh-i.mx8m.patch
+arm64-dts-freescale-imx8mp-venice-gw73xx-2x-fix-bt-s.patch
+arm64-dts-imx93-11x11-evk-remove-the-no-sdio-propert.patch
+arm64-dts-freescale-imx8mm-verdin-enable-hysteresis-.patch
+acpica-revert-acpica-avoid-info-mapping-multiple-bar.patch
+kvm-tweak-kvm_hva_range-and-hva_handler_t-to-allow-r.patch
+kvm-assert-that-mmu_invalidate_in_progress-never-goe.patch
+kvm-use-gfn-instead-of-hva-for-mmu_notifier_retry.patch
+kvm-add-a-dedicated-mmu_notifier-flag-for-reclaiming.patch
+kvm-drop-.on_unlock-mmu_notifier-hook.patch
+kvm-stop-processing-all-memslots-when-null-mmu_notif.patch
+spi-spi-imx-imx51-revert-burst-length-calculation-ba.patch
+io_uring-rsrc-fix-incorrect-assignment-of-iter-nr_se.patch
+firmware-psci-fix-return-value-from-psci_system_susp.patch
+rdma-mlx5-fix-unwind-flow-as-part-of-mlx5_ib_stage_i.patch
+rdma-mlx5-add-check-for-srq-max_sge-attribute.patch
+rdma-mana_ib-ignore-optional-access-flags-for-mrs.patch
--- /dev/null
+From a4be9d5825c2e375ff259d40c042e6f08db03d59 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 4 Jun 2024 14:17:04 +0100
+Subject: spi: cs42l43: Correct SPI root clock speed
+
+From: Charles Keepax <ckeepax@opensource.cirrus.com>
+
+[ Upstream commit 4eecb644b8b82f5279a348f6ebe77e3d6e5b1b05 ]
+
+The root clock is actually 49.152MHz not 40MHz, as it is derived from
+the primary audio clock, update the driver to match. This error can
+cause the actual clock rate to be higher than the requested clock rate
+on the SPI bus.
+
+Fixes: ef75e767167a ("spi: cs42l43: Add SPI controller support")
+Signed-off-by: Charles Keepax <ckeepax@opensource.cirrus.com>
+Link: https://msgid.link/r/20240604131704.3227500-1-ckeepax@opensource.cirrus.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/spi/spi-cs42l43.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/spi/spi-cs42l43.c b/drivers/spi/spi-cs42l43.c
+index c1556b6529092..3169febd80514 100644
+--- a/drivers/spi/spi-cs42l43.c
++++ b/drivers/spi/spi-cs42l43.c
+@@ -19,7 +19,7 @@
+ #include <linux/units.h>
+
+ #define CS42L43_FIFO_SIZE 16
+-#define CS42L43_SPI_ROOT_HZ (40 * HZ_PER_MHZ)
++#define CS42L43_SPI_ROOT_HZ 49152000
+ #define CS42L43_SPI_MAX_LENGTH 65532
+
+ enum cs42l43_spi_cmd {
+--
+2.43.0
+
--- /dev/null
+From 8aa29cab1a5dde0cb47e2b88a58be336105651e0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 18 Jun 2024 19:34:18 +0200
+Subject: spi: spi-imx: imx51: revert burst length calculation back to
+ bits_per_word
+
+From: Marc Kleine-Budde <mkl@pengutronix.de>
+
+[ Upstream commit df75470b317b46affbe1f5f8f006b34175be9789 ]
+
+The patch 15a6af94a277 ("spi: Increase imx51 ecspi burst length based
+on transfer length") increased the burst length calculation in
+mx51_ecspi_prepare_transfer() to be based on the transfer length.
+
+This breaks HW CS + SPI_CS_WORD support which was added in
+6e95b23a5b2d ("spi: imx: Implement support for CS_WORD") and transfers
+with bits-per-word != 8, 16, 32.
+
+SPI_CS_WORD means the CS should be toggled after each word. The
+implementation in the imx-spi driver relies on the fact that the HW CS
+is toggled automatically by the controller after each burst length
+number of bits. Setting the burst length to the number of bits of the
+_whole_ message breaks this use case.
+
+Further the patch 15a6af94a277 ("spi: Increase imx51 ecspi burst
+length based on transfer length") claims to optimize the transfers.
+But even without this patch, on modern spi-imx controllers with
+"dynamic_burst = true" (imx51, imx6 and newer), the transfers are
+already optimized, i.e. the burst length is dynamically adjusted in
+spi_imx_push() to avoid the pause between the SPI bursts. This has
+been confirmed by a scope measurement on an imx6d.
+
+Subsequent Patches tried to fix these and other problems:
+
+- 5f66db08cbd3 ("spi: imx: Take in account bits per word instead of assuming 8-bits")
+- e9b220aeacf1 ("spi: spi-imx: correctly configure burst length when using dma")
+- c712c05e46c8 ("spi: imx: fix the burst length at DMA mode and CPU mode")
+- cf6d79a0f576 ("spi: spi-imx: fix off-by-one in mx51 CPU mode burst length")
+
+but the HW CS + SPI_CS_WORD use case is still broken.
+
+To fix the problems revert the burst size calculation in
+mx51_ecspi_prepare_transfer() back to the original form, before
+15a6af94a277 ("spi: Increase imx51 ecspi burst length based on
+transfer length") was applied.
+
+Cc: Stefan Moring <stefan.moring@technolution.nl>
+Cc: Stefan Bigler <linux@bigler.io>
+Cc: Clark Wang <xiaoning.wang@nxp.com>
+Cc: Carlos Song <carlos.song@nxp.com>
+Cc: Sebastian Reichel <sre@kernel.org>
+Cc: Thorsten Scherer <T.Scherer@eckelmann.de>
+Fixes: 15a6af94a277 ("spi: Increase imx51 ecspi burst length based on transfer length")
+Fixes: 5f66db08cbd3 ("spi: imx: Take in account bits per word instead of assuming 8-bits")
+Fixes: e9b220aeacf1 ("spi: spi-imx: correctly configure burst length when using dma")
+Fixes: c712c05e46c8 ("spi: imx: fix the burst length at DMA mode and CPU mode")
+Fixes: cf6d79a0f576 ("spi: spi-imx: fix off-by-one in mx51 CPU mode burst length")
+Link: https://lore.kernel.org/all/20240618-oxpecker-of-ideal-mastery-db59f8-mkl@pengutronix.de
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Tested-by: Thorsten Scherer <t.scherer@eckelmann.de>
+Link: https://msgid.link/r/20240618-spi-imx-fix-bustlength-v1-1-2053dd5fdf87@pengutronix.de
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/spi/spi-imx.c | 14 ++------------
+ 1 file changed, 2 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
+index 0e479c5406217..d323b37723929 100644
+--- a/drivers/spi/spi-imx.c
++++ b/drivers/spi/spi-imx.c
+@@ -660,18 +660,8 @@ static int mx51_ecspi_prepare_transfer(struct spi_imx_data *spi_imx,
+ ctrl |= (spi_imx->target_burst * 8 - 1)
+ << MX51_ECSPI_CTRL_BL_OFFSET;
+ else {
+- if (spi_imx->usedma) {
+- ctrl |= (spi_imx->bits_per_word - 1)
+- << MX51_ECSPI_CTRL_BL_OFFSET;
+- } else {
+- if (spi_imx->count >= MX51_ECSPI_CTRL_MAX_BURST)
+- ctrl |= (MX51_ECSPI_CTRL_MAX_BURST * BITS_PER_BYTE - 1)
+- << MX51_ECSPI_CTRL_BL_OFFSET;
+- else
+- ctrl |= (spi_imx->count / DIV_ROUND_UP(spi_imx->bits_per_word,
+- BITS_PER_BYTE) * spi_imx->bits_per_word - 1)
+- << MX51_ECSPI_CTRL_BL_OFFSET;
+- }
++ ctrl |= (spi_imx->bits_per_word - 1)
++ << MX51_ECSPI_CTRL_BL_OFFSET;
+ }
+
+ /* set clock speed */
+--
+2.43.0
+
--- /dev/null
+From 8a06870776cc732db5ee1d8c7119e0e037b071dd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 4 Jun 2024 18:46:58 +0200
+Subject: thermal/drivers/mediatek/lvts_thermal: Return error in case of
+ invalid efuse data
+
+From: Julien Panis <jpanis@baylibre.com>
+
+[ Upstream commit 72cacd06e47d86d89b0e7179fbc9eb3a0f39cd93 ]
+
+This patch prevents from registering thermal entries and letting the
+driver misbehave if efuse data is invalid. A device is not properly
+calibrated if the golden temperature is zero.
+
+Fixes: f5f633b18234 ("thermal/drivers/mediatek: Add the Low Voltage Thermal Sensor driver")
+Signed-off-by: Julien Panis <jpanis@baylibre.com>
+Reviewed-by: Nicolas Pitre <npitre@baylibre.com>
+Reviewed-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+Link: https://lore.kernel.org/r/20240604-mtk-thermal-calib-check-v2-1-8f258254051d@baylibre.com
+Signed-off-by: Daniel Lezcano <daniel.lezcano@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/thermal/mediatek/lvts_thermal.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/thermal/mediatek/lvts_thermal.c b/drivers/thermal/mediatek/lvts_thermal.c
+index a4e56017dda3f..666f440b66631 100644
+--- a/drivers/thermal/mediatek/lvts_thermal.c
++++ b/drivers/thermal/mediatek/lvts_thermal.c
+@@ -700,7 +700,11 @@ static int lvts_golden_temp_init(struct device *dev, u32 *value)
+
+ gt = (*value) >> 24;
+
+- if (gt && gt < LVTS_GOLDEN_TEMP_MAX)
++ /* A zero value for gt means that device has invalid efuse data */
++ if (!gt)
++ return -ENODATA;
++
++ if (gt < LVTS_GOLDEN_TEMP_MAX)
+ golden_temp = gt;
+
+ coeff_b = golden_temp * 500 + LVTS_COEFF_B;
+--
+2.43.0
+