--- /dev/null
+From 30ea655a02d359ba8af013199a3bbaaa42b6e270 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 7 Feb 2026 14:13:17 +0100
+Subject: ALSA: hda/conexant: Add quirk for HP ZBook Studio G4
+
+From: Takashi Iwai <tiwai@suse.de>
+
+[ Upstream commit 1585cf83e98db32463e5d54161b06a5f01fe9976 ]
+
+It was reported that we need the same quirk for HP ZBook Studio G4
+(SSID 103c:826b) as other HP models to make the mute-LED working.
+
+Cc: <stable@vger.kernel.org>
+Link: https://lore.kernel.org/64d78753-b9ff-4c64-8920-64d8d31cd20c@gmail.com
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=221002
+Link: https://patch.msgid.link/20260207131324.2428030-1-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/pci/hda/patch_conexant.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index d1430ee344854..fae09c88a33c0 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -1026,6 +1026,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
+ SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
+ SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO),
+ SND_PCI_QUIRK(0x103c, 0x8231, "HP ProBook 450 G4", CXT_FIXUP_MUTE_LED_GPIO),
++ SND_PCI_QUIRK(0x103c, 0x826b, "HP ZBook Studio G4", CXT_FIXUP_MUTE_LED_GPIO),
+ SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK),
+ SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+--
+2.51.0
+
--- /dev/null
+From 95c8e278583e78acc02cd001c29464bebfb81a00 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Feb 2026 11:44:11 +0100
+Subject: ALSA: hda/conexant: Fix headphone jack handling on Acer Swift SF314
+
+From: Takashi Iwai <tiwai@suse.de>
+
+[ Upstream commit 7bc0df86c2384bc1e2012a2c946f82305054da64 ]
+
+Acer Swift SF314 (SSID 1025:136d) needs a bit of tweaks of the pin
+configurations for NID 0x16 and 0x19 to make the headphone / headset
+jack working. NID 0x17 can remain as is for the working speaker, and
+the built-in mic is supported via SOF.
+
+Cc: <stable@vger.kernel.org>
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=221086
+Link: https://patch.msgid.link/20260217104414.62911-1-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/pci/hda/patch_conexant.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index fae09c88a33c0..4d0bd1903ccbd 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -239,6 +239,7 @@ enum {
+ CXT_PINCFG_SWS_JS201D,
+ CXT_PINCFG_TOP_SPEAKER,
+ CXT_FIXUP_HP_A_U,
++ CXT_FIXUP_ACER_SWIFT_HP,
+ };
+
+ /* for hda_fixup_thinkpad_acpi() */
+@@ -969,6 +970,14 @@ static const struct hda_fixup cxt_fixups[] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = cxt_fixup_hp_a_u,
+ },
++ [CXT_FIXUP_ACER_SWIFT_HP] = {
++ .type = HDA_FIXUP_PINS,
++ .v.pins = (const struct hda_pintbl[]) {
++ { 0x16, 0x0321403f }, /* Headphone */
++ { 0x19, 0x40f001f0 }, /* Mic */
++ { }
++ },
++ },
+ };
+
+ static const struct snd_pci_quirk cxt5045_fixups[] = {
+@@ -1018,6 +1027,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
+ SND_PCI_QUIRK(0x1025, 0x0543, "Acer Aspire One 522", CXT_FIXUP_STEREO_DMIC),
+ SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_ASPIRE_DMIC),
+ SND_PCI_QUIRK(0x1025, 0x054f, "Acer Aspire 4830T", CXT_FIXUP_ASPIRE_DMIC),
++ SND_PCI_QUIRK(0x1025, 0x136d, "Acer Swift SF314", CXT_FIXUP_ACER_SWIFT_HP),
+ SND_PCI_QUIRK(0x103c, 0x8079, "HP EliteBook 840 G3", CXT_FIXUP_HP_DOCK),
+ SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK),
+ SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK),
+--
+2.51.0
+
--- /dev/null
+From 2fa1fc8bcef8e5a0543ebcdfc63cc9a3b335a8e8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Dec 2021 01:42:24 -0800
+Subject: ARM: OMAP2+: add missing of_node_put before break and return
+
+From: Wang Qing <wangqing@vivo.com>
+
+[ Upstream commit 883f464c1d23663047eda4f2bcf622365e2d0dd0 ]
+
+Fix following coccicheck warning:
+WARNING: Function "for_each_matching_node_and_match"
+should have of_node_put() before return.
+
+Early exits from for_each_matching_node_and_match should decrement the
+node reference counter.
+
+Signed-off-by: Wang Qing <wangqing@vivo.com>
+Message-Id: <1639388545-63615-1-git-send-email-wangqing@vivo.com>
+[tony@atomide.com: updated for omap_hwmod.c that was already patched]
+Signed-off-by: Tony Lindgren <tony@atomide.com>
+Stable-dep-of: 93a04ab480c8 ("ARM: omap2: Fix reference count leaks in omap_control_init()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm/mach-omap2/cm_common.c | 8 ++++++--
+ arch/arm/mach-omap2/control.c | 19 ++++++++++++++-----
+ arch/arm/mach-omap2/prm_common.c | 8 ++++++--
+ 3 files changed, 26 insertions(+), 9 deletions(-)
+
+diff --git a/arch/arm/mach-omap2/cm_common.c b/arch/arm/mach-omap2/cm_common.c
+index b7ea609386d52..d86a36120738d 100644
+--- a/arch/arm/mach-omap2/cm_common.c
++++ b/arch/arm/mach-omap2/cm_common.c
+@@ -333,8 +333,10 @@ int __init omap2_cm_base_init(void)
+ data = (struct omap_prcm_init_data *)match->data;
+
+ ret = of_address_to_resource(np, 0, &res);
+- if (ret)
++ if (ret) {
++ of_node_put(np);
+ return ret;
++ }
+
+ if (data->index == TI_CLKM_CM)
+ mem = &cm_base;
+@@ -380,8 +382,10 @@ int __init omap_cm_init(void)
+ continue;
+
+ ret = omap2_clk_provider_init(np, data->index, NULL, data->mem);
+- if (ret)
++ if (ret) {
++ of_node_put(np);
+ return ret;
++ }
+ }
+
+ return 0;
+diff --git a/arch/arm/mach-omap2/control.c b/arch/arm/mach-omap2/control.c
+index 73338cf80d76c..9bc69caf338f1 100644
+--- a/arch/arm/mach-omap2/control.c
++++ b/arch/arm/mach-omap2/control.c
+@@ -774,8 +774,10 @@ int __init omap2_control_base_init(void)
+ data = (struct control_init_data *)match->data;
+
+ mem = of_iomap(np, 0);
+- if (!mem)
++ if (!mem) {
++ of_node_put(np);
+ return -ENOMEM;
++ }
+
+ if (data->index == TI_CLKM_CTRL) {
+ omap2_ctrl_base = mem;
+@@ -815,22 +817,24 @@ int __init omap_control_init(void)
+ if (scm_conf) {
+ syscon = syscon_node_to_regmap(scm_conf);
+
+- if (IS_ERR(syscon))
+- return PTR_ERR(syscon);
++ if (IS_ERR(syscon)) {
++ ret = PTR_ERR(syscon);
++ goto of_node_put;
++ }
+
+ if (of_get_child_by_name(scm_conf, "clocks")) {
+ ret = omap2_clk_provider_init(scm_conf,
+ data->index,
+ syscon, NULL);
+ if (ret)
+- return ret;
++ goto of_node_put;
+ }
+ } else {
+ /* No scm_conf found, direct access */
+ ret = omap2_clk_provider_init(np, data->index, NULL,
+ data->mem);
+ if (ret)
+- return ret;
++ goto of_node_put;
+ }
+ }
+
+@@ -841,6 +845,11 @@ int __init omap_control_init(void)
+ }
+
+ return 0;
++
++of_node_put:
++ of_node_put(np);
++ return ret;
++
+ }
+
+ /**
+diff --git a/arch/arm/mach-omap2/prm_common.c b/arch/arm/mach-omap2/prm_common.c
+index 65b2d82efa27b..fb2d48cfe756b 100644
+--- a/arch/arm/mach-omap2/prm_common.c
++++ b/arch/arm/mach-omap2/prm_common.c
+@@ -752,8 +752,10 @@ int __init omap2_prm_base_init(void)
+ data = (struct omap_prcm_init_data *)match->data;
+
+ ret = of_address_to_resource(np, 0, &res);
+- if (ret)
++ if (ret) {
++ of_node_put(np);
+ return ret;
++ }
+
+ data->mem = ioremap(res.start, resource_size(&res));
+
+@@ -799,8 +801,10 @@ int __init omap_prcm_init(void)
+ data = match->data;
+
+ ret = omap2_clk_provider_init(np, data->index, NULL, data->mem);
+- if (ret)
++ if (ret) {
++ of_node_put(np);
+ return ret;
++ }
+ }
+
+ omap_cm_init();
+--
+2.51.0
+
--- /dev/null
+From bbed9b1cafe93220bcb5cf0bbbc808e093209f8c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 17 Dec 2025 14:21:22 +0000
+Subject: ARM: omap2: Fix reference count leaks in omap_control_init()
+
+From: Wentao Liang <vulab@iscas.ac.cn>
+
+[ Upstream commit 93a04ab480c8bbcb7d9004be139c538c8a0c1bc8 ]
+
+The of_get_child_by_name() function increments the reference count
+of child nodes, causing multiple reference leaks in omap_control_init():
+
+1. scm_conf node never released in normal/error paths
+2. clocks node leak when checking existence
+3. Missing scm_conf release before np in error paths
+
+Fix these leaks by adding proper of_node_put() calls and separate error
+handling.
+
+Fixes: e5b635742e98 ("ARM: OMAP2+: control: add syscon support for register accesses")
+Cc: stable@vger.kernel.org
+Signed-off-by: Wentao Liang <vulab@iscas.ac.cn>
+Reviewed-by: Andreas Kemnade <andreas@kemnade.info>
+Link: https://patch.msgid.link/20251217142122.1861292-1-vulab@iscas.ac.cn
+Signed-off-by: Kevin Hilman <khilman@baylibre.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm/mach-omap2/control.c | 14 ++++++++++----
+ 1 file changed, 10 insertions(+), 4 deletions(-)
+
+diff --git a/arch/arm/mach-omap2/control.c b/arch/arm/mach-omap2/control.c
+index 9bc69caf338f1..a1288d438071b 100644
+--- a/arch/arm/mach-omap2/control.c
++++ b/arch/arm/mach-omap2/control.c
+@@ -798,7 +798,7 @@ int __init omap2_control_base_init(void)
+ */
+ int __init omap_control_init(void)
+ {
+- struct device_node *np, *scm_conf;
++ struct device_node *np, *scm_conf, *clocks_node;
+ const struct of_device_id *match;
+ const struct omap_prcm_init_data *data;
+ int ret;
+@@ -819,16 +819,19 @@ int __init omap_control_init(void)
+
+ if (IS_ERR(syscon)) {
+ ret = PTR_ERR(syscon);
+- goto of_node_put;
++ goto err_put_scm_conf;
+ }
+
+- if (of_get_child_by_name(scm_conf, "clocks")) {
++ clocks_node = of_get_child_by_name(scm_conf, "clocks");
++ if (clocks_node) {
++ of_node_put(clocks_node);
+ ret = omap2_clk_provider_init(scm_conf,
+ data->index,
+ syscon, NULL);
+ if (ret)
+- goto of_node_put;
++ goto err_put_scm_conf;
+ }
++ of_node_put(scm_conf);
+ } else {
+ /* No scm_conf found, direct access */
+ ret = omap2_clk_provider_init(np, data->index, NULL,
+@@ -846,6 +849,9 @@ int __init omap_control_init(void)
+
+ return 0;
+
++err_put_scm_conf:
++ if (scm_conf)
++ of_node_put(scm_conf);
+ of_node_put:
+ of_node_put(np);
+ return ret;
+--
+2.51.0
+
--- /dev/null
+From 09d8eb1f3a01b0d27891a7964da1126b4cba842e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 3 Dec 2025 01:44:38 +0800
+Subject: bus: fsl-mc: fix use-after-free in driver_override_show()
+
+From: Gui-Dong Han <hanguidong02@gmail.com>
+
+[ Upstream commit 148891e95014b5dc5878acefa57f1940c281c431 ]
+
+The driver_override_show() function reads the driver_override string
+without holding the device_lock. However, driver_override_store() uses
+driver_set_override(), which modifies and frees the string while holding
+the device_lock.
+
+This can result in a concurrent use-after-free if the string is freed
+by the store function while being read by the show function.
+
+Fix this by holding the device_lock around the read operation.
+
+Fixes: 1f86a00c1159 ("bus/fsl-mc: add support for 'driver_override' in the mc-bus")
+Cc: stable@vger.kernel.org
+Signed-off-by: Gui-Dong Han <hanguidong02@gmail.com>
+Reviewed-by: Ioana Ciornei <ioana.ciornei@nxp.com>
+Link: https://lore.kernel.org/r/20251202174438.12658-1-hanguidong02@gmail.com
+Signed-off-by: Christophe Leroy (CS GROUP) <chleroy@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/bus/fsl-mc/fsl-mc-bus.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
+index 8f7448da9258d..49eaf5bddd5ad 100644
+--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
++++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
+@@ -194,8 +194,12 @@ static ssize_t driver_override_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+ {
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
++ ssize_t len;
+
+- return sysfs_emit(buf, "%s\n", mc_dev->driver_override);
++ device_lock(dev);
++ len = sysfs_emit(buf, "%s\n", mc_dev->driver_override);
++ device_unlock(dev);
++ return len;
+ }
+ static DEVICE_ATTR_RW(driver_override);
+
+--
+2.51.0
+
--- /dev/null
+From 86bec1d7af7118ee1d70fad681aa2d3a799dd84c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 22 Aug 2025 05:43:39 -0700
+Subject: bus: fsl-mc: Replace snprintf and sprintf with sysfs_emit in sysfs
+ show functions
+
+From: Chelsy Ratnawat <chelsyratnawat2001@gmail.com>
+
+[ Upstream commit a50522c805a6c575c80f41b04706e084d814e116 ]
+
+Use sysfs_emit() instead of snprintf()/sprintf() when writing
+to sysfs buffers, as recommended by the kernel documentation.
+
+Signed-off-by: Chelsy Ratnawat <chelsyratnawat2001@gmail.com>
+Acked-by: Ioana Ciornei <ioana.ciornei@nxp.com>
+Link: https://lore.kernel.org/r/20250822124339.1739290-1-chelsyratnawat2001@gmail.com
+Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
+Stable-dep-of: 148891e95014 ("bus: fsl-mc: fix use-after-free in driver_override_show()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/bus/fsl-mc/fsl-mc-bus.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
+index 4471cd1606424..8f7448da9258d 100644
+--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
++++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
+@@ -151,8 +151,8 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+ {
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+
+- return sprintf(buf, "fsl-mc:v%08Xd%s\n", mc_dev->obj_desc.vendor,
+- mc_dev->obj_desc.type);
++ return sysfs_emit(buf, "fsl-mc:v%08Xd%s\n", mc_dev->obj_desc.vendor,
++ mc_dev->obj_desc.type);
+ }
+ static DEVICE_ATTR_RO(modalias);
+
+@@ -195,7 +195,7 @@ static ssize_t driver_override_show(struct device *dev,
+ {
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+
+- return snprintf(buf, PAGE_SIZE, "%s\n", mc_dev->driver_override);
++ return sysfs_emit(buf, "%s\n", mc_dev->driver_override);
+ }
+ static DEVICE_ATTR_RW(driver_override);
+
+--
+2.51.0
+
--- /dev/null
+From 76576ce6682202789cc90ab4f56f4625c1eb43dd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 9 Nov 2023 21:28:32 +0100
+Subject: bus: omap-ocp2scp: Convert to platform remove callback returning void
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+
+[ Upstream commit 854f89a5b56354ba4135e0e1f0e57ab2caee59ee ]
+
+The .remove() callback for a platform driver returns an int which makes
+many driver authors wrongly assume it's possible to do error handling by
+returning an error code. However the value returned is ignored (apart
+from emitting a warning) and this typically results in resource leaks.
+
+To improve here there is a quest to make the remove callback return
+void. In the first step of this quest all drivers are converted to
+.remove_new(), which already returns void. Eventually after all drivers
+are converted, .remove_new() will be renamed to .remove().
+
+Trivially convert this driver from always returning zero in the remove
+callback to the void returning variant.
+
+Link: https://lore.kernel.org/r/20231109202830.4124591-3-u.kleine-koenig@pengutronix.de
+Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+Stable-dep-of: 5eb63e9bb65d ("bus: omap-ocp2scp: fix OF populate on driver rebind")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/bus/omap-ocp2scp.c | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/bus/omap-ocp2scp.c b/drivers/bus/omap-ocp2scp.c
+index e02d0656242b8..7d7479ba0a759 100644
+--- a/drivers/bus/omap-ocp2scp.c
++++ b/drivers/bus/omap-ocp2scp.c
+@@ -84,12 +84,10 @@ static int omap_ocp2scp_probe(struct platform_device *pdev)
+ return ret;
+ }
+
+-static int omap_ocp2scp_remove(struct platform_device *pdev)
++static void omap_ocp2scp_remove(struct platform_device *pdev)
+ {
+ pm_runtime_disable(&pdev->dev);
+ device_for_each_child(&pdev->dev, NULL, ocp2scp_remove_devices);
+-
+- return 0;
+ }
+
+ #ifdef CONFIG_OF
+@@ -103,7 +101,7 @@ MODULE_DEVICE_TABLE(of, omap_ocp2scp_id_table);
+
+ static struct platform_driver omap_ocp2scp_driver = {
+ .probe = omap_ocp2scp_probe,
+- .remove = omap_ocp2scp_remove,
++ .remove_new = omap_ocp2scp_remove,
+ .driver = {
+ .name = "omap-ocp2scp",
+ .of_match_table = of_match_ptr(omap_ocp2scp_id_table),
+--
+2.51.0
+
--- /dev/null
+From 32d30b8d8607eeb29992f09d25982520a059dfcb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 19 Dec 2025 12:01:19 +0100
+Subject: bus: omap-ocp2scp: fix OF populate on driver rebind
+
+From: Johan Hovold <johan@kernel.org>
+
+[ Upstream commit 5eb63e9bb65d88abde647ced50fe6ad40c11de1a ]
+
+Since commit c6e126de43e7 ("of: Keep track of populated platform
+devices") child devices will not be created by of_platform_populate()
+if the devices had previously been deregistered individually so that the
+OF_POPULATED flag is still set in the corresponding OF nodes.
+
+Switch to using of_platform_depopulate() instead of open coding so that
+the child devices are created if the driver is rebound.
+
+Fixes: c6e126de43e7 ("of: Keep track of populated platform devices")
+Cc: stable@vger.kernel.org # 3.16
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Link: https://patch.msgid.link/20251219110119.23507-1-johan@kernel.org
+Signed-off-by: Kevin Hilman <khilman@baylibre.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/bus/omap-ocp2scp.c | 13 ++-----------
+ 1 file changed, 2 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/bus/omap-ocp2scp.c b/drivers/bus/omap-ocp2scp.c
+index 7d7479ba0a759..87e290a3dc817 100644
+--- a/drivers/bus/omap-ocp2scp.c
++++ b/drivers/bus/omap-ocp2scp.c
+@@ -17,15 +17,6 @@
+ #define OCP2SCP_TIMING 0x18
+ #define SYNC2_MASK 0xf
+
+-static int ocp2scp_remove_devices(struct device *dev, void *c)
+-{
+- struct platform_device *pdev = to_platform_device(dev);
+-
+- platform_device_unregister(pdev);
+-
+- return 0;
+-}
+-
+ static int omap_ocp2scp_probe(struct platform_device *pdev)
+ {
+ int ret;
+@@ -79,7 +70,7 @@ static int omap_ocp2scp_probe(struct platform_device *pdev)
+ pm_runtime_disable(&pdev->dev);
+
+ err0:
+- device_for_each_child(&pdev->dev, NULL, ocp2scp_remove_devices);
++ of_platform_depopulate(&pdev->dev);
+
+ return ret;
+ }
+@@ -87,7 +78,7 @@ static int omap_ocp2scp_probe(struct platform_device *pdev)
+ static void omap_ocp2scp_remove(struct platform_device *pdev)
+ {
+ pm_runtime_disable(&pdev->dev);
+- device_for_each_child(&pdev->dev, NULL, ocp2scp_remove_devices);
++ of_platform_depopulate(&pdev->dev);
+ }
+
+ #ifdef CONFIG_OF
+--
+2.51.0
+
--- /dev/null
+From 8caf3a045b22ec7159fbd4505d376661d606fe08 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 Nov 2025 17:40:03 +0100
+Subject: clk: tegra: tegra124-emc: fix device leak on set_rate()
+
+From: Johan Hovold <johan@kernel.org>
+
+[ Upstream commit da61439c63d34ae6503d080a847f144d587e3a48 ]
+
+Make sure to drop the reference taken when looking up the EMC device and
+its driver data on first set_rate().
+
+Note that holding a reference to a device does not prevent its driver
+data from going away so there is no point in keeping the reference.
+
+Fixes: 2db04f16b589 ("clk: tegra: Add EMC clock driver")
+Fixes: 6d6ef58c2470 ("clk: tegra: tegra124-emc: Fix missing put_device() call in emc_ensure_emc_driver")
+Cc: stable@vger.kernel.org # 4.2: 6d6ef58c2470
+Cc: Mikko Perttunen <mperttunen@nvidia.com>
+Cc: Miaoqian Lin <linmq006@gmail.com>
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Signed-off-by: Stephen Boyd <sboyd@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/clk/tegra/clk-tegra124-emc.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/clk/tegra/clk-tegra124-emc.c b/drivers/clk/tegra/clk-tegra124-emc.c
+index 670bb6b0765f3..4e2fc9ba2b211 100644
+--- a/drivers/clk/tegra/clk-tegra124-emc.c
++++ b/drivers/clk/tegra/clk-tegra124-emc.c
+@@ -190,8 +190,8 @@ static struct tegra_emc *emc_ensure_emc_driver(struct tegra_clk_emc *tegra)
+ tegra->emc_node = NULL;
+
+ tegra->emc = platform_get_drvdata(pdev);
++ put_device(&pdev->dev);
+ if (!tegra->emc) {
+- put_device(&pdev->dev);
+ pr_err("%s: cannot find EMC driver\n", __func__);
+ return NULL;
+ }
+--
+2.51.0
+
--- /dev/null
+From 7cf2bd01c108a4fd6cf9338b4b57ed39d0b7fa08 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 19 Nov 2020 13:46:10 +0100
+Subject: driver core: platform: change logic implementing
+ platform_driver_probe
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+
+[ Upstream commit 16085668eacdc56c46652d0f3bfef81ecace57de ]
+
+Instead of overwriting the core driver's probe function handle probing
+devices for drivers loaded by platform_driver_probe() in the platform
+driver probe function.
+
+The intended goal is to not have to change the probe function to
+simplify converting the platform bus to use bus functions.
+
+Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+Link: https://lore.kernel.org/r/20201119124611.2573057-2-u.kleine-koenig@pengutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Stable-dep-of: 27a8acea47a9 ("mfd: qcom-pm8xxx: Fix OF populate on driver rebind")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/base/platform.c | 18 +++++++++++++++---
+ 1 file changed, 15 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/base/platform.c b/drivers/base/platform.c
+index fa023cf80dc48..16426eb934632 100644
+--- a/drivers/base/platform.c
++++ b/drivers/base/platform.c
+@@ -743,12 +743,25 @@ struct platform_device *platform_device_register_full(
+ }
+ EXPORT_SYMBOL_GPL(platform_device_register_full);
+
++static int platform_probe_fail(struct platform_device *pdev);
++
+ static int platform_drv_probe(struct device *_dev)
+ {
+ struct platform_driver *drv = to_platform_driver(_dev->driver);
+ struct platform_device *dev = to_platform_device(_dev);
+ int ret;
+
++ /*
++ * A driver registered using platform_driver_probe() cannot be bound
++ * again later because the probe function usually lives in __init code
++ * and so is gone. For these drivers .probe is set to
++ * platform_probe_fail in __platform_driver_probe(). Don't even
++ * prepare clocks and PM domains for these to match the traditional
++ * behaviour.
++ */
++ if (unlikely(drv->probe == platform_probe_fail))
++ return -ENXIO;
++
+ ret = of_clk_set_defaults(_dev->of_node, false);
+ if (ret < 0)
+ return ret;
+@@ -822,7 +835,7 @@ void platform_driver_unregister(struct platform_driver *drv)
+ }
+ EXPORT_SYMBOL_GPL(platform_driver_unregister);
+
+-static int platform_drv_probe_fail(struct device *_dev)
++static int platform_probe_fail(struct platform_device *pdev)
+ {
+ return -ENXIO;
+ }
+@@ -887,10 +900,9 @@ int __init_or_module __platform_driver_probe(struct platform_driver *drv,
+ * new devices fail.
+ */
+ spin_lock(&drv->driver.bus->p->klist_drivers.k_lock);
+- drv->probe = NULL;
++ drv->probe = platform_probe_fail;
+ if (code == 0 && list_empty(&drv->driver.p->klist_devices.k_list))
+ retval = -ENODEV;
+- drv->driver.probe = platform_drv_probe_fail;
+ spin_unlock(&drv->driver.bus->p->klist_drivers.k_lock);
+
+ if (code != retval)
+--
+2.51.0
+
--- /dev/null
+From 8e34e4f21147091c8b2d3584d1e5fd103be2c094 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 7 Feb 2021 22:15:37 +0100
+Subject: driver core: platform: Emit a warning if a remove callback returned
+ non-zero
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Uwe Kleine-König <uwe@kleine-koenig.org>
+
+[ Upstream commit e5e1c209788138f33ca6558bf9f572f6904f486d ]
+
+The driver core ignores the return value of a bus' remove callback. However
+a driver returning an error code is a hint that there is a problem,
+probably a driver author who expects that returning e.g. -EBUSY has any
+effect.
+
+The right thing to do would be to make struct platform_driver::remove()
+return void. With the immense number of platform drivers this is however a
+big quest and I hope to prevent at least a few new drivers that return an
+error code here.
+
+Signed-off-by: Uwe Kleine-König <uwe@kleine-koenig.org>
+Link: https://lore.kernel.org/r/20210207211537.19992-1-uwe@kleine-koenig.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Stable-dep-of: 27a8acea47a9 ("mfd: qcom-pm8xxx: Fix OF populate on driver rebind")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/base/platform.c | 11 +++++++----
+ 1 file changed, 7 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/base/platform.c b/drivers/base/platform.c
+index 90166535a5c05..d0b15cbab0ff0 100644
+--- a/drivers/base/platform.c
++++ b/drivers/base/platform.c
+@@ -1305,13 +1305,16 @@ static int platform_remove(struct device *_dev)
+ {
+ struct platform_driver *drv = to_platform_driver(_dev->driver);
+ struct platform_device *dev = to_platform_device(_dev);
+- int ret = 0;
+
+- if (drv->remove)
+- ret = drv->remove(dev);
++ if (drv->remove) {
++ int ret = drv->remove(dev);
++
++ if (ret)
++ dev_warn(_dev, "remove callback returned a non-zero value. This will be ignored.\n");
++ }
+ dev_pm_domain_detach(_dev, true);
+
+- return ret;
++ return 0;
+ }
+
+ static void platform_shutdown(struct device *_dev)
+--
+2.51.0
+
--- /dev/null
+From c6cef33f098d3b680f1f93056867cf20421748cf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 19 Nov 2020 13:46:09 +0100
+Subject: driver core: platform: reorder functions
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+
+[ Upstream commit e21d740a3fe5ad2db7b5f5c2331fe2b713b1edba ]
+
+This way all callbacks and structures used to initialize
+platform_bus_type are defined just before platform_bus_type and in the
+same order. Also move platform_drv_probe_fail just before it's only
+user.
+
+Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+Link: https://lore.kernel.org/r/20201119124611.2573057-1-u.kleine-koenig@pengutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Stable-dep-of: 27a8acea47a9 ("mfd: qcom-pm8xxx: Fix OF populate on driver rebind")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/base/platform.c | 293 ++++++++++++++++++++--------------------
+ 1 file changed, 147 insertions(+), 146 deletions(-)
+
+diff --git a/drivers/base/platform.c b/drivers/base/platform.c
+index 647066229fec3..fa023cf80dc48 100644
+--- a/drivers/base/platform.c
++++ b/drivers/base/platform.c
+@@ -772,11 +772,6 @@ static int platform_drv_probe(struct device *_dev)
+ return ret;
+ }
+
+-static int platform_drv_probe_fail(struct device *_dev)
+-{
+- return -ENXIO;
+-}
+-
+ static int platform_drv_remove(struct device *_dev)
+ {
+ struct platform_driver *drv = to_platform_driver(_dev->driver);
+@@ -827,6 +822,11 @@ void platform_driver_unregister(struct platform_driver *drv)
+ }
+ EXPORT_SYMBOL_GPL(platform_driver_unregister);
+
++static int platform_drv_probe_fail(struct device *_dev)
++{
++ return -ENXIO;
++}
++
+ /**
+ * __platform_driver_probe - register driver for non-hotpluggable device
+ * @drv: platform driver structure
+@@ -1017,109 +1017,6 @@ void platform_unregister_drivers(struct platform_driver * const *drivers,
+ }
+ EXPORT_SYMBOL_GPL(platform_unregister_drivers);
+
+-/* modalias support enables more hands-off userspace setup:
+- * (a) environment variable lets new-style hotplug events work once system is
+- * fully running: "modprobe $MODALIAS"
+- * (b) sysfs attribute lets new-style coldplug recover from hotplug events
+- * mishandled before system is fully running: "modprobe $(cat modalias)"
+- */
+-static ssize_t modalias_show(struct device *dev,
+- struct device_attribute *attr, char *buf)
+-{
+- struct platform_device *pdev = to_platform_device(dev);
+- int len;
+-
+- len = of_device_modalias(dev, buf, PAGE_SIZE);
+- if (len != -ENODEV)
+- return len;
+-
+- len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
+- if (len != -ENODEV)
+- return len;
+-
+- return sysfs_emit(buf, "platform:%s\n", pdev->name);
+-}
+-static DEVICE_ATTR_RO(modalias);
+-
+-static ssize_t driver_override_store(struct device *dev,
+- struct device_attribute *attr,
+- const char *buf, size_t count)
+-{
+- struct platform_device *pdev = to_platform_device(dev);
+- int ret;
+-
+- ret = driver_set_override(dev, &pdev->driver_override, buf, count);
+- if (ret)
+- return ret;
+-
+- return count;
+-}
+-
+-static ssize_t driver_override_show(struct device *dev,
+- struct device_attribute *attr, char *buf)
+-{
+- struct platform_device *pdev = to_platform_device(dev);
+- ssize_t len;
+-
+- device_lock(dev);
+- len = sysfs_emit(buf, "%s\n", pdev->driver_override);
+- device_unlock(dev);
+-
+- return len;
+-}
+-static DEVICE_ATTR_RW(driver_override);
+-
+-static ssize_t numa_node_show(struct device *dev,
+- struct device_attribute *attr, char *buf)
+-{
+- return sysfs_emit(buf, "%d\n", dev_to_node(dev));
+-}
+-static DEVICE_ATTR_RO(numa_node);
+-
+-static umode_t platform_dev_attrs_visible(struct kobject *kobj, struct attribute *a,
+- int n)
+-{
+- struct device *dev = container_of(kobj, typeof(*dev), kobj);
+-
+- if (a == &dev_attr_numa_node.attr &&
+- dev_to_node(dev) == NUMA_NO_NODE)
+- return 0;
+-
+- return a->mode;
+-}
+-
+-static struct attribute *platform_dev_attrs[] = {
+- &dev_attr_modalias.attr,
+- &dev_attr_numa_node.attr,
+- &dev_attr_driver_override.attr,
+- NULL,
+-};
+-
+-static struct attribute_group platform_dev_group = {
+- .attrs = platform_dev_attrs,
+- .is_visible = platform_dev_attrs_visible,
+-};
+-__ATTRIBUTE_GROUPS(platform_dev);
+-
+-static int platform_uevent(struct device *dev, struct kobj_uevent_env *env)
+-{
+- struct platform_device *pdev = to_platform_device(dev);
+- int rc;
+-
+- /* Some devices have extra OF data and an OF-style MODALIAS */
+- rc = of_device_uevent_modalias(dev, env);
+- if (rc != -ENODEV)
+- return rc;
+-
+- rc = acpi_device_uevent_modalias(dev, env);
+- if (rc != -ENODEV)
+- return rc;
+-
+- add_uevent_var(env, "MODALIAS=%s%s", PLATFORM_MODULE_PREFIX,
+- pdev->name);
+- return 0;
+-}
+-
+ static const struct platform_device_id *platform_match_id(
+ const struct platform_device_id *id,
+ struct platform_device *pdev)
+@@ -1134,44 +1031,6 @@ static const struct platform_device_id *platform_match_id(
+ return NULL;
+ }
+
+-/**
+- * platform_match - bind platform device to platform driver.
+- * @dev: device.
+- * @drv: driver.
+- *
+- * Platform device IDs are assumed to be encoded like this:
+- * "<name><instance>", where <name> is a short description of the type of
+- * device, like "pci" or "floppy", and <instance> is the enumerated
+- * instance of the device, like '0' or '42'. Driver IDs are simply
+- * "<name>". So, extract the <name> from the platform_device structure,
+- * and compare it against the name of the driver. Return whether they match
+- * or not.
+- */
+-static int platform_match(struct device *dev, struct device_driver *drv)
+-{
+- struct platform_device *pdev = to_platform_device(dev);
+- struct platform_driver *pdrv = to_platform_driver(drv);
+-
+- /* When driver_override is set, only bind to the matching driver */
+- if (pdev->driver_override)
+- return !strcmp(pdev->driver_override, drv->name);
+-
+- /* Attempt an OF style match first */
+- if (of_driver_match_device(dev, drv))
+- return 1;
+-
+- /* Then try ACPI style match */
+- if (acpi_driver_match_device(dev, drv))
+- return 1;
+-
+- /* Then try to match against the id table */
+- if (pdrv->id_table)
+- return platform_match_id(pdrv->id_table, pdev) != NULL;
+-
+- /* fall-back to driver name match */
+- return (strcmp(pdev->name, drv->name) == 0);
+-}
+-
+ #ifdef CONFIG_PM_SLEEP
+
+ static int platform_legacy_suspend(struct device *dev, pm_message_t mesg)
+@@ -1316,6 +1175,148 @@ int platform_pm_restore(struct device *dev)
+
+ #endif /* CONFIG_HIBERNATE_CALLBACKS */
+
++/* modalias support enables more hands-off userspace setup:
++ * (a) environment variable lets new-style hotplug events work once system is
++ * fully running: "modprobe $MODALIAS"
++ * (b) sysfs attribute lets new-style coldplug recover from hotplug events
++ * mishandled before system is fully running: "modprobe $(cat modalias)"
++ */
++static ssize_t modalias_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct platform_device *pdev = to_platform_device(dev);
++ int len;
++
++ len = of_device_modalias(dev, buf, PAGE_SIZE);
++ if (len != -ENODEV)
++ return len;
++
++ len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
++ if (len != -ENODEV)
++ return len;
++
++ return sysfs_emit(buf, "platform:%s\n", pdev->name);
++}
++static DEVICE_ATTR_RO(modalias);
++
++static ssize_t numa_node_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ return sysfs_emit(buf, "%d\n", dev_to_node(dev));
++}
++static DEVICE_ATTR_RO(numa_node);
++
++static ssize_t driver_override_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct platform_device *pdev = to_platform_device(dev);
++ ssize_t len;
++
++ device_lock(dev);
++ len = sysfs_emit(buf, "%s\n", pdev->driver_override);
++ device_unlock(dev);
++
++ return len;
++}
++
++static ssize_t driver_override_store(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct platform_device *pdev = to_platform_device(dev);
++ int ret;
++
++ ret = driver_set_override(dev, &pdev->driver_override, buf, count);
++ if (ret)
++ return ret;
++
++ return count;
++}
++static DEVICE_ATTR_RW(driver_override);
++
++static struct attribute *platform_dev_attrs[] = {
++ &dev_attr_modalias.attr,
++ &dev_attr_numa_node.attr,
++ &dev_attr_driver_override.attr,
++ NULL,
++};
++
++static umode_t platform_dev_attrs_visible(struct kobject *kobj, struct attribute *a,
++ int n)
++{
++ struct device *dev = container_of(kobj, typeof(*dev), kobj);
++
++ if (a == &dev_attr_numa_node.attr &&
++ dev_to_node(dev) == NUMA_NO_NODE)
++ return 0;
++
++ return a->mode;
++}
++
++static struct attribute_group platform_dev_group = {
++ .attrs = platform_dev_attrs,
++ .is_visible = platform_dev_attrs_visible,
++};
++__ATTRIBUTE_GROUPS(platform_dev);
++
++
++/**
++ * platform_match - bind platform device to platform driver.
++ * @dev: device.
++ * @drv: driver.
++ *
++ * Platform device IDs are assumed to be encoded like this:
++ * "<name><instance>", where <name> is a short description of the type of
++ * device, like "pci" or "floppy", and <instance> is the enumerated
++ * instance of the device, like '0' or '42'. Driver IDs are simply
++ * "<name>". So, extract the <name> from the platform_device structure,
++ * and compare it against the name of the driver. Return whether they match
++ * or not.
++ */
++static int platform_match(struct device *dev, struct device_driver *drv)
++{
++ struct platform_device *pdev = to_platform_device(dev);
++ struct platform_driver *pdrv = to_platform_driver(drv);
++
++ /* When driver_override is set, only bind to the matching driver */
++ if (pdev->driver_override)
++ return !strcmp(pdev->driver_override, drv->name);
++
++ /* Attempt an OF style match first */
++ if (of_driver_match_device(dev, drv))
++ return 1;
++
++ /* Then try ACPI style match */
++ if (acpi_driver_match_device(dev, drv))
++ return 1;
++
++ /* Then try to match against the id table */
++ if (pdrv->id_table)
++ return platform_match_id(pdrv->id_table, pdev) != NULL;
++
++ /* fall-back to driver name match */
++ return (strcmp(pdev->name, drv->name) == 0);
++}
++
++static int platform_uevent(struct device *dev, struct kobj_uevent_env *env)
++{
++ struct platform_device *pdev = to_platform_device(dev);
++ int rc;
++
++ /* Some devices have extra OF data and an OF-style MODALIAS */
++ rc = of_device_uevent_modalias(dev, env);
++ if (rc != -ENODEV)
++ return rc;
++
++ rc = acpi_device_uevent_modalias(dev, env);
++ if (rc != -ENODEV)
++ return rc;
++
++ add_uevent_var(env, "MODALIAS=%s%s", PLATFORM_MODULE_PREFIX,
++ pdev->name);
++ return 0;
++}
++
+ int platform_dma_configure(struct device *dev)
+ {
+ enum dev_dma_attr attr;
+--
+2.51.0
+
--- /dev/null
+From 9a6ee30bd7fe0b29bda76c36c9871bde80a762f9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 19 Nov 2020 13:46:11 +0100
+Subject: driver core: platform: use bus_type functions
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+
+[ Upstream commit 9c30921fe7994907e0b3e0637b2c8c0fc4b5171f ]
+
+This works towards the goal mentioned in 2006 in commit 594c8281f905
+("[PATCH] Add bus_type probe, remove, shutdown methods.").
+
+The functions are moved to where the other bus_type functions are
+defined and renamed to match the already established naming scheme.
+
+Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+Link: https://lore.kernel.org/r/20201119124611.2573057-3-u.kleine-koenig@pengutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Stable-dep-of: 27a8acea47a9 ("mfd: qcom-pm8xxx: Fix OF populate on driver rebind")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/base/platform.c | 132 ++++++++++++++++++++--------------------
+ 1 file changed, 65 insertions(+), 67 deletions(-)
+
+diff --git a/drivers/base/platform.c b/drivers/base/platform.c
+index 16426eb934632..90166535a5c05 100644
+--- a/drivers/base/platform.c
++++ b/drivers/base/platform.c
+@@ -743,70 +743,6 @@ struct platform_device *platform_device_register_full(
+ }
+ EXPORT_SYMBOL_GPL(platform_device_register_full);
+
+-static int platform_probe_fail(struct platform_device *pdev);
+-
+-static int platform_drv_probe(struct device *_dev)
+-{
+- struct platform_driver *drv = to_platform_driver(_dev->driver);
+- struct platform_device *dev = to_platform_device(_dev);
+- int ret;
+-
+- /*
+- * A driver registered using platform_driver_probe() cannot be bound
+- * again later because the probe function usually lives in __init code
+- * and so is gone. For these drivers .probe is set to
+- * platform_probe_fail in __platform_driver_probe(). Don't even
+- * prepare clocks and PM domains for these to match the traditional
+- * behaviour.
+- */
+- if (unlikely(drv->probe == platform_probe_fail))
+- return -ENXIO;
+-
+- ret = of_clk_set_defaults(_dev->of_node, false);
+- if (ret < 0)
+- return ret;
+-
+- ret = dev_pm_domain_attach(_dev, true);
+- if (ret)
+- goto out;
+-
+- if (drv->probe) {
+- ret = drv->probe(dev);
+- if (ret)
+- dev_pm_domain_detach(_dev, true);
+- }
+-
+-out:
+- if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) {
+- dev_warn(_dev, "probe deferral not supported\n");
+- ret = -ENXIO;
+- }
+-
+- return ret;
+-}
+-
+-static int platform_drv_remove(struct device *_dev)
+-{
+- struct platform_driver *drv = to_platform_driver(_dev->driver);
+- struct platform_device *dev = to_platform_device(_dev);
+- int ret = 0;
+-
+- if (drv->remove)
+- ret = drv->remove(dev);
+- dev_pm_domain_detach(_dev, true);
+-
+- return ret;
+-}
+-
+-static void platform_drv_shutdown(struct device *_dev)
+-{
+- struct platform_driver *drv = to_platform_driver(_dev->driver);
+- struct platform_device *dev = to_platform_device(_dev);
+-
+- if (drv->shutdown)
+- drv->shutdown(dev);
+-}
+-
+ /**
+ * __platform_driver_register - register a driver for platform-level devices
+ * @drv: platform driver structure
+@@ -817,9 +753,6 @@ int __platform_driver_register(struct platform_driver *drv,
+ {
+ drv->driver.owner = owner;
+ drv->driver.bus = &platform_bus_type;
+- drv->driver.probe = platform_drv_probe;
+- drv->driver.remove = platform_drv_remove;
+- drv->driver.shutdown = platform_drv_shutdown;
+
+ return driver_register(&drv->driver);
+ }
+@@ -1329,6 +1262,68 @@ static int platform_uevent(struct device *dev, struct kobj_uevent_env *env)
+ return 0;
+ }
+
++static int platform_probe(struct device *_dev)
++{
++ struct platform_driver *drv = to_platform_driver(_dev->driver);
++ struct platform_device *dev = to_platform_device(_dev);
++ int ret;
++
++ /*
++ * A driver registered using platform_driver_probe() cannot be bound
++ * again later because the probe function usually lives in __init code
++ * and so is gone. For these drivers .probe is set to
++ * platform_probe_fail in __platform_driver_probe(). Don't even prepare
++ * clocks and PM domains for these to match the traditional behaviour.
++ */
++ if (unlikely(drv->probe == platform_probe_fail))
++ return -ENXIO;
++
++ ret = of_clk_set_defaults(_dev->of_node, false);
++ if (ret < 0)
++ return ret;
++
++ ret = dev_pm_domain_attach(_dev, true);
++ if (ret)
++ goto out;
++
++ if (drv->probe) {
++ ret = drv->probe(dev);
++ if (ret)
++ dev_pm_domain_detach(_dev, true);
++ }
++
++out:
++ if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) {
++ dev_warn(_dev, "probe deferral not supported\n");
++ ret = -ENXIO;
++ }
++
++ return ret;
++}
++
++static int platform_remove(struct device *_dev)
++{
++ struct platform_driver *drv = to_platform_driver(_dev->driver);
++ struct platform_device *dev = to_platform_device(_dev);
++ int ret = 0;
++
++ if (drv->remove)
++ ret = drv->remove(dev);
++ dev_pm_domain_detach(_dev, true);
++
++ return ret;
++}
++
++static void platform_shutdown(struct device *_dev)
++{
++ struct platform_driver *drv = to_platform_driver(_dev->driver);
++ struct platform_device *dev = to_platform_device(_dev);
++
++ if (drv->shutdown)
++ drv->shutdown(dev);
++}
++
++
+ int platform_dma_configure(struct device *dev)
+ {
+ enum dev_dma_attr attr;
+@@ -1355,6 +1350,9 @@ struct bus_type platform_bus_type = {
+ .dev_groups = platform_dev_groups,
+ .match = platform_match,
+ .uevent = platform_uevent,
++ .probe = platform_probe,
++ .remove = platform_remove,
++ .shutdown = platform_shutdown,
+ .dma_configure = platform_dma_configure,
+ .pm = &platform_dev_pm_ops,
+ };
+--
+2.51.0
+
--- /dev/null
+From 062eadcf2f3d6d1ac8b493697b7da4c6909ba7e7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 Nov 2025 17:42:01 +0100
+Subject: drm/tegra: dsi: fix device leak on probe
+
+From: Johan Hovold <johan@kernel.org>
+
+[ Upstream commit bfef062695570842cf96358f2f46f4c6642c6689 ]
+
+Make sure to drop the reference taken when looking up the companion
+(ganged) device and its driver data during probe().
+
+Note that holding a reference to a device does not prevent its driver
+data from going away so there is no point in keeping the reference.
+
+Fixes: e94236cde4d5 ("drm/tegra: dsi: Add ganged mode support")
+Fixes: 221e3638feb8 ("drm/tegra: Fix reference leak in tegra_dsi_ganged_probe")
+Cc: stable@vger.kernel.org # 3.19: 221e3638feb8
+Cc: Thierry Reding <treding@nvidia.com>
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Signed-off-by: Thierry Reding <treding@nvidia.com>
+Link: https://patch.msgid.link/20251121164201.13188-1-johan@kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/tegra/dsi.c | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
+index 7bb26655cb3cc..74d27b564d564 100644
+--- a/drivers/gpu/drm/tegra/dsi.c
++++ b/drivers/gpu/drm/tegra/dsi.c
+@@ -1539,11 +1539,9 @@ static int tegra_dsi_ganged_probe(struct tegra_dsi *dsi)
+ return -EPROBE_DEFER;
+
+ dsi->slave = platform_get_drvdata(gangster);
+-
+- if (!dsi->slave) {
+- put_device(&gangster->dev);
++ put_device(&gangster->dev);
++ if (!dsi->slave)
+ return -EPROBE_DEFER;
+- }
+
+ dsi->slave->master = dsi;
+ }
+--
+2.51.0
+
--- /dev/null
+From 92c7d3e2857774a1626ed1002e1523d3caa2791d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Feb 2026 20:14:43 +0800
+Subject: hwmon: (max16065) Use READ/WRITE_ONCE to avoid compiler optimization
+ induced race
+
+From: Gui-Dong Han <hanguidong02@gmail.com>
+
+[ Upstream commit 007be4327e443d79c9dd9e56dc16c36f6395d208 ]
+
+Simply copying shared data to a local variable cannot prevent data
+races. The compiler is allowed to optimize away the local copy and
+re-read the shared memory, causing a Time-of-Check Time-of-Use (TOCTOU)
+issue if the data changes between the check and the usage.
+
+To enforce the use of the local variable, use READ_ONCE() when reading
+the shared data and WRITE_ONCE() when updating it. Apply these macros to
+the three identified locations (curr_sense, adc, and fault) where local
+variables are used for error validation, ensuring the value remains
+consistent.
+
+Reported-by: Ben Hutchings <ben@decadent.org.uk>
+Closes: https://lore.kernel.org/all/6fe17868327207e8b850cf9f88b7dc58b2021f73.camel@decadent.org.uk/
+Fixes: f5bae2642e3d ("hwmon: Driver for MAX16065 System Manager and compatibles")
+Fixes: b8d5acdcf525 ("hwmon: (max16065) Use local variable to avoid TOCTOU")
+Cc: stable@vger.kernel.org
+Signed-off-by: Gui-Dong Han <hanguidong02@gmail.com>
+Link: https://lore.kernel.org/r/20260203121443.5482-1-hanguidong02@gmail.com
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwmon/max16065.c | 26 +++++++++++++-------------
+ 1 file changed, 13 insertions(+), 13 deletions(-)
+
+diff --git a/drivers/hwmon/max16065.c b/drivers/hwmon/max16065.c
+index 5787db933fad6..b292ef48b80bf 100644
+--- a/drivers/hwmon/max16065.c
++++ b/drivers/hwmon/max16065.c
+@@ -151,27 +151,27 @@ static struct max16065_data *max16065_update_device(struct device *dev)
+ int i;
+
+ for (i = 0; i < data->num_adc; i++)
+- data->adc[i]
+- = max16065_read_adc(client, MAX16065_ADC(i));
++ WRITE_ONCE(data->adc[i],
++ max16065_read_adc(client, MAX16065_ADC(i)));
+
+ if (data->have_current) {
+- data->adc[MAX16065_NUM_ADC]
+- = max16065_read_adc(client, MAX16065_CSP_ADC);
+- data->curr_sense
+- = i2c_smbus_read_byte_data(client,
+- MAX16065_CURR_SENSE);
++ WRITE_ONCE(data->adc[MAX16065_NUM_ADC],
++ max16065_read_adc(client, MAX16065_CSP_ADC));
++ WRITE_ONCE(data->curr_sense,
++ i2c_smbus_read_byte_data(client, MAX16065_CURR_SENSE));
+ }
+
+ for (i = 0; i < 2; i++)
+- data->fault[i]
+- = i2c_smbus_read_byte_data(client, MAX16065_FAULT(i));
++ WRITE_ONCE(data->fault[i],
++ i2c_smbus_read_byte_data(client, MAX16065_FAULT(i)));
+
+ /*
+ * MAX16067 and MAX16068 have separate undervoltage and
+ * overvoltage alarm bits. Squash them together.
+ */
+ if (data->chip == max16067 || data->chip == max16068)
+- data->fault[0] |= data->fault[1];
++ WRITE_ONCE(data->fault[0],
++ data->fault[0] | data->fault[1]);
+
+ data->last_updated = jiffies;
+ data->valid = 1;
+@@ -185,7 +185,7 @@ static ssize_t max16065_alarm_show(struct device *dev,
+ {
+ struct sensor_device_attribute_2 *attr2 = to_sensor_dev_attr_2(da);
+ struct max16065_data *data = max16065_update_device(dev);
+- int val = data->fault[attr2->nr];
++ int val = READ_ONCE(data->fault[attr2->nr]);
+
+ if (val < 0)
+ return val;
+@@ -203,7 +203,7 @@ static ssize_t max16065_input_show(struct device *dev,
+ {
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct max16065_data *data = max16065_update_device(dev);
+- int adc = data->adc[attr->index];
++ int adc = READ_ONCE(data->adc[attr->index]);
+
+ if (unlikely(adc < 0))
+ return adc;
+@@ -216,7 +216,7 @@ static ssize_t max16065_current_show(struct device *dev,
+ struct device_attribute *da, char *buf)
+ {
+ struct max16065_data *data = max16065_update_device(dev);
+- int curr_sense = data->curr_sense;
++ int curr_sense = READ_ONCE(data->curr_sense);
+
+ if (unlikely(curr_sense < 0))
+ return curr_sense;
+--
+2.51.0
+
--- /dev/null
+From 0dbe5b9a4bf2518ea5b40ca4358008e9eb3ab67b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 10 Apr 2021 17:11:16 +0800
+Subject: memory: mtk-smi: Add device-link between smi-larb and smi-common
+
+From: Yong Wu <yong.wu@mediatek.com>
+
+[ Upstream commit 6ce2c05b21189eb17b3aa26720cc5841acf9dce8 ]
+
+Normally, If the smi-larb HW need work, we should enable the smi-common
+HW power and clock firstly.
+This patch adds device-link between the smi-larb dev and the smi-common
+dev. then If pm_runtime_get_sync(smi-larb-dev), the pm_runtime_get_sync
+(smi-common-dev) will be called automatically.
+
+Also, Add DL_FLAG_STATELESS to avoid the smi-common clocks be gated when
+probe.
+
+CC: Matthias Brugger <matthias.bgg@gmail.com>
+Suggested-by: Tomasz Figa <tfiga@chromium.org>
+Signed-off-by: Yong Wu <yong.wu@mediatek.com>
+Signed-off-by: Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+Link: https://lore.kernel.org/r/20210410091128.31823-5-yong.wu@mediatek.com
+Stable-dep-of: 9dae65913b32 ("memory: mtk-smi: fix device leak on larb probe")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/memory/mtk-smi.c | 19 ++++++++++---------
+ 1 file changed, 10 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
+index 75f8e0f60d81d..101e61e956d8d 100644
+--- a/drivers/memory/mtk-smi.c
++++ b/drivers/memory/mtk-smi.c
+@@ -303,6 +303,7 @@ static int mtk_smi_larb_probe(struct platform_device *pdev)
+ struct device *dev = &pdev->dev;
+ struct device_node *smi_node;
+ struct platform_device *smi_pdev;
++ struct device_link *link;
+
+ larb = devm_kzalloc(dev, sizeof(*larb), GFP_KERNEL);
+ if (!larb)
+@@ -342,6 +343,12 @@ static int mtk_smi_larb_probe(struct platform_device *pdev)
+ if (!platform_get_drvdata(smi_pdev))
+ return -EPROBE_DEFER;
+ larb->smi_common_dev = &smi_pdev->dev;
++ link = device_link_add(dev, larb->smi_common_dev,
++ DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS);
++ if (!link) {
++ dev_err(dev, "Unable to link smi-common dev\n");
++ return -ENODEV;
++ }
+ } else {
+ dev_err(dev, "Failed to get the smi_common device\n");
+ return -EINVAL;
+@@ -354,6 +361,9 @@ static int mtk_smi_larb_probe(struct platform_device *pdev)
+
+ static int mtk_smi_larb_remove(struct platform_device *pdev)
+ {
++ struct mtk_smi_larb *larb = platform_get_drvdata(pdev);
++
++ device_link_remove(&pdev->dev, larb->smi_common_dev);
+ pm_runtime_disable(&pdev->dev);
+ component_del(&pdev->dev, &mtk_smi_larb_component_ops);
+ return 0;
+@@ -365,17 +375,9 @@ static int __maybe_unused mtk_smi_larb_resume(struct device *dev)
+ const struct mtk_smi_larb_gen *larb_gen = larb->larb_gen;
+ int ret;
+
+- /* Power on smi-common. */
+- ret = pm_runtime_resume_and_get(larb->smi_common_dev);
+- if (ret < 0) {
+- dev_err(dev, "Failed to pm get for smi-common(%d).\n", ret);
+- return ret;
+- }
+-
+ ret = mtk_smi_clk_enable(&larb->smi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable clock(%d).\n", ret);
+- pm_runtime_put_sync(larb->smi_common_dev);
+ return ret;
+ }
+
+@@ -390,7 +392,6 @@ static int __maybe_unused mtk_smi_larb_suspend(struct device *dev)
+ struct mtk_smi_larb *larb = dev_get_drvdata(dev);
+
+ mtk_smi_clk_disable(&larb->smi);
+- pm_runtime_put_sync(larb->smi_common_dev);
+ return 0;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 8b9c6784a72995f98acffac86591f3446e977c82 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 17 Dec 2023 15:29:33 +0100
+Subject: memory: mtk-smi: Convert to platform remove callback returning void
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+
+[ Upstream commit 08c1aeaa45ce0fd18912e92c6705586c8aa5240f ]
+
+The .remove() callback for a platform driver returns an int which makes
+many driver authors wrongly assume it's possible to do error handling by
+returning an error code. However the value returned is ignored (apart
+from emitting a warning) and this typically results in resource leaks.
+
+To improve here there is a quest to make the remove callback return
+void. In the first step of this quest all drivers are converted to
+.remove_new(), which already returns void. Eventually after all drivers
+are converted, .remove_new() will be renamed to .remove().
+
+Trivially convert this driver from always returning zero in the remove
+callback to the void returning variant.
+
+Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+Link: https://lore.kernel.org/r/5c35a33cfdc359842e034ddd2e9358f10e91fa1f.1702822744.git.u.kleine-koenig@pengutronix.de
+Signed-off-by: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+Stable-dep-of: 9dae65913b32 ("memory: mtk-smi: fix device leak on larb probe")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/memory/mtk-smi.c | 10 ++++------
+ 1 file changed, 4 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
+index 101e61e956d8d..a04775e675f1b 100644
+--- a/drivers/memory/mtk-smi.c
++++ b/drivers/memory/mtk-smi.c
+@@ -359,14 +359,13 @@ static int mtk_smi_larb_probe(struct platform_device *pdev)
+ return component_add(dev, &mtk_smi_larb_component_ops);
+ }
+
+-static int mtk_smi_larb_remove(struct platform_device *pdev)
++static void mtk_smi_larb_remove(struct platform_device *pdev)
+ {
+ struct mtk_smi_larb *larb = platform_get_drvdata(pdev);
+
+ device_link_remove(&pdev->dev, larb->smi_common_dev);
+ pm_runtime_disable(&pdev->dev);
+ component_del(&pdev->dev, &mtk_smi_larb_component_ops);
+- return 0;
+ }
+
+ static int __maybe_unused mtk_smi_larb_resume(struct device *dev)
+@@ -403,7 +402,7 @@ static const struct dev_pm_ops smi_larb_pm_ops = {
+
+ static struct platform_driver mtk_smi_larb_driver = {
+ .probe = mtk_smi_larb_probe,
+- .remove = mtk_smi_larb_remove,
++ .remove_new = mtk_smi_larb_remove,
+ .driver = {
+ .name = "mtk-smi-larb",
+ .of_match_table = mtk_smi_larb_of_ids,
+@@ -522,10 +521,9 @@ static int mtk_smi_common_probe(struct platform_device *pdev)
+ return 0;
+ }
+
+-static int mtk_smi_common_remove(struct platform_device *pdev)
++static void mtk_smi_common_remove(struct platform_device *pdev)
+ {
+ pm_runtime_disable(&pdev->dev);
+- return 0;
+ }
+
+ static int __maybe_unused mtk_smi_common_resume(struct device *dev)
+@@ -561,7 +559,7 @@ static const struct dev_pm_ops smi_common_pm_ops = {
+
+ static struct platform_driver mtk_smi_common_driver = {
+ .probe = mtk_smi_common_probe,
+- .remove = mtk_smi_common_remove,
++ .remove_new = mtk_smi_common_remove,
+ .driver = {
+ .name = "mtk-smi-common",
+ .of_match_table = mtk_smi_common_of_ids,
+--
+2.51.0
+
--- /dev/null
+From 4a91bea59327f84807630203429b3f79b1af2665 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 Nov 2025 17:46:23 +0100
+Subject: memory: mtk-smi: fix device leak on larb probe
+
+From: Johan Hovold <johan@kernel.org>
+
+[ Upstream commit 9dae65913b32d05dbc8ff4b8a6bf04a0e49a8eb6 ]
+
+Make sure to drop the reference taken when looking up the SMI device
+during larb probe on late probe failure (e.g. probe deferral) and on
+driver unbind.
+
+Fixes: cc8bbe1a8312 ("memory: mediatek: Add SMI driver")
+Fixes: 038ae37c510f ("memory: mtk-smi: add missing put_device() call in mtk_smi_device_link_common")
+Cc: stable@vger.kernel.org # 4.6: 038ae37c510f
+Cc: stable@vger.kernel.org # 4.6
+Cc: Yong Wu <yong.wu@mediatek.com>
+Cc: Miaoqian Lin <linmq006@gmail.com>
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Link: https://patch.msgid.link/20251121164624.13685-3-johan@kernel.org
+Signed-off-by: Krzysztof Kozlowski <krzk@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/memory/mtk-smi.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
+index a04775e675f1b..2c06b9900ea04 100644
+--- a/drivers/memory/mtk-smi.c
++++ b/drivers/memory/mtk-smi.c
+@@ -366,6 +366,7 @@ static void mtk_smi_larb_remove(struct platform_device *pdev)
+ device_link_remove(&pdev->dev, larb->smi_common_dev);
+ pm_runtime_disable(&pdev->dev);
+ component_del(&pdev->dev, &mtk_smi_larb_component_ops);
++ put_device(larb->smi_common_dev);
+ }
+
+ static int __maybe_unused mtk_smi_larb_resume(struct device *dev)
+--
+2.51.0
+
--- /dev/null
+From 1af67a9d8de2ea0bde14b917f1c99dddb5560c2b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 23 Nov 2023 17:56:38 +0100
+Subject: mfd: omap-usb-host: Convert to platform remove callback returning
+ void
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+
+[ Upstream commit 418d1e74f8597e0b2d5d0d6e1be8f1f47e68f0a4 ]
+
+The .remove() callback for a platform driver returns an int which makes
+many driver authors wrongly assume it's possible to do error handling by
+returning an error code. However the value returned is ignored (apart
+from emitting a warning) and this typically results in resource leaks.
+
+To improve here there is a quest to make the remove callback return
+void. In the first step of this quest all drivers are converted to
+.remove_new(), which already returns void. Eventually after all drivers
+are converted, .remove_new() will be renamed to .remove().
+
+Trivially convert this driver from always returning zero in the remove
+callback to the void returning variant.
+
+Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+Link: https://lore.kernel.org/r/20231123165627.492259-11-u.kleine-koenig@pengutronix.de
+Signed-off-by: Lee Jones <lee@kernel.org>
+Stable-dep-of: 24804ba508a3 ("mfd: omap-usb-host: Fix OF populate on driver rebind")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mfd/omap-usb-host.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
+index 2a3a240b4619a..4c07add1f7ddb 100644
+--- a/drivers/mfd/omap-usb-host.c
++++ b/drivers/mfd/omap-usb-host.c
+@@ -818,13 +818,12 @@ static int usbhs_omap_remove_child(struct device *dev, void *data)
+ *
+ * Reverses the effect of usbhs_omap_probe().
+ */
+-static int usbhs_omap_remove(struct platform_device *pdev)
++static void usbhs_omap_remove(struct platform_device *pdev)
+ {
+ pm_runtime_disable(&pdev->dev);
+
+ /* remove children */
+ device_for_each_child(&pdev->dev, NULL, usbhs_omap_remove_child);
+- return 0;
+ }
+
+ static const struct dev_pm_ops usbhsomap_dev_pm_ops = {
+@@ -847,7 +846,7 @@ static struct platform_driver usbhs_omap_driver = {
+ .of_match_table = usbhs_omap_dt_ids,
+ },
+ .probe = usbhs_omap_probe,
+- .remove = usbhs_omap_remove,
++ .remove_new = usbhs_omap_remove,
+ };
+
+ MODULE_AUTHOR("Keshava Munegowda <keshava_mgowda@ti.com>");
+--
+2.51.0
+
--- /dev/null
+From ecdcde760ba6c5ddbf34b437311a12db49ac540b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 19 Dec 2025 12:07:14 +0100
+Subject: mfd: omap-usb-host: Fix OF populate on driver rebind
+
+From: Johan Hovold <johan@kernel.org>
+
+[ Upstream commit 24804ba508a3e240501c521685a1c4eb9f574f8e ]
+
+Since commit c6e126de43e7 ("of: Keep track of populated platform
+devices") child devices will not be created by of_platform_populate()
+if the devices had previously been deregistered individually so that the
+OF_POPULATED flag is still set in the corresponding OF nodes.
+
+Switch to using of_platform_depopulate() instead of open coding so that
+the child devices are created if the driver is rebound.
+
+Fixes: c6e126de43e7 ("of: Keep track of populated platform devices")
+Cc: stable@vger.kernel.org # 3.16
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Reviewed-by: Andreas Kemnade <andreas@kemnade.info>
+Link: https://patch.msgid.link/20251219110714.23919-1-johan@kernel.org
+Signed-off-by: Lee Jones <lee@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mfd/omap-usb-host.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
+index 4c07add1f7ddb..0fd321ed53a47 100644
+--- a/drivers/mfd/omap-usb-host.c
++++ b/drivers/mfd/omap-usb-host.c
+@@ -822,8 +822,10 @@ static void usbhs_omap_remove(struct platform_device *pdev)
+ {
+ pm_runtime_disable(&pdev->dev);
+
+- /* remove children */
+- device_for_each_child(&pdev->dev, NULL, usbhs_omap_remove_child);
++ if (pdev->dev.of_node)
++ of_platform_depopulate(&pdev->dev);
++ else
++ device_for_each_child(&pdev->dev, NULL, usbhs_omap_remove_child);
+ }
+
+ static const struct dev_pm_ops usbhsomap_dev_pm_ops = {
+--
+2.51.0
+
--- /dev/null
+From ded537441c39557f81f18f5a3fb440a1082f9132 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 23 Nov 2023 17:56:41 +0100
+Subject: mfd: qcom-pm8xxx: Convert to platform remove callback returning void
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+
+[ Upstream commit 19ea1d3953017518d85db35b69b5aea9bc64d630 ]
+
+The .remove() callback for a platform driver returns an int which makes
+many driver authors wrongly assume it's possible to do error handling by
+returning an error code. However the value returned is ignored (apart
+from emitting a warning) and this typically results in resource leaks.
+
+To improve here there is a quest to make the remove callback return
+void. In the first step of this quest all drivers are converted to
+.remove_new(), which already returns void. Eventually after all drivers
+are converted, .remove_new() will be renamed to .remove().
+
+Trivially convert this driver from always returning zero in the remove
+callback to the void returning variant.
+
+Reviewed-by: Konrad Dybcio <konrad.dybcio@linaro.org>
+Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+Link: https://lore.kernel.org/r/20231123165627.492259-14-u.kleine-koenig@pengutronix.de
+Signed-off-by: Lee Jones <lee@kernel.org>
+Stable-dep-of: 27a8acea47a9 ("mfd: qcom-pm8xxx: Fix OF populate on driver rebind")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mfd/qcom-pm8xxx.c | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/mfd/qcom-pm8xxx.c b/drivers/mfd/qcom-pm8xxx.c
+index 4e24ce7ea009e..d25ef7c717a12 100644
+--- a/drivers/mfd/qcom-pm8xxx.c
++++ b/drivers/mfd/qcom-pm8xxx.c
+@@ -589,19 +589,17 @@ static int pm8xxx_remove_child(struct device *dev, void *unused)
+ return 0;
+ }
+
+-static int pm8xxx_remove(struct platform_device *pdev)
++static void pm8xxx_remove(struct platform_device *pdev)
+ {
+ struct pm_irq_chip *chip = platform_get_drvdata(pdev);
+
+ device_for_each_child(&pdev->dev, NULL, pm8xxx_remove_child);
+ irq_domain_remove(chip->irqdomain);
+-
+- return 0;
+ }
+
+ static struct platform_driver pm8xxx_driver = {
+ .probe = pm8xxx_probe,
+- .remove = pm8xxx_remove,
++ .remove_new = pm8xxx_remove,
+ .driver = {
+ .name = "pm8xxx-core",
+ .of_match_table = pm8xxx_id_table,
+--
+2.51.0
+
--- /dev/null
+From 885fe121c2208c15247519e67230c745182933cf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 19 Dec 2025 12:09:47 +0100
+Subject: mfd: qcom-pm8xxx: Fix OF populate on driver rebind
+
+From: Johan Hovold <johan@kernel.org>
+
+[ Upstream commit 27a8acea47a93fea6ad0e2df4c20a9b51490e4d9 ]
+
+Since commit c6e126de43e7 ("of: Keep track of populated platform
+devices") child devices will not be created by of_platform_populate()
+if the devices had previously been deregistered individually so that the
+OF_POPULATED flag is still set in the corresponding OF nodes.
+
+Switch to using of_platform_depopulate() instead of open coding so that
+the child devices are created if the driver is rebound.
+
+Fixes: c6e126de43e7 ("of: Keep track of populated platform devices")
+Cc: stable@vger.kernel.org # 3.16
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@oss.qualcomm.com>
+Reviewed-by: Konrad Dybcio <konrad.dybcio@oss.qualcomm.com>
+Link: https://patch.msgid.link/20251219110947.24101-1-johan@kernel.org
+Signed-off-by: Lee Jones <lee@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mfd/qcom-pm8xxx.c | 8 +-------
+ 1 file changed, 1 insertion(+), 7 deletions(-)
+
+diff --git a/drivers/mfd/qcom-pm8xxx.c b/drivers/mfd/qcom-pm8xxx.c
+index d25ef7c717a12..795759df1c069 100644
+--- a/drivers/mfd/qcom-pm8xxx.c
++++ b/drivers/mfd/qcom-pm8xxx.c
+@@ -583,17 +583,11 @@ static int pm8xxx_probe(struct platform_device *pdev)
+ return rc;
+ }
+
+-static int pm8xxx_remove_child(struct device *dev, void *unused)
+-{
+- platform_device_unregister(to_platform_device(dev));
+- return 0;
+-}
+-
+ static void pm8xxx_remove(struct platform_device *pdev)
+ {
+ struct pm_irq_chip *chip = platform_get_drvdata(pdev);
+
+- device_for_each_child(&pdev->dev, NULL, pm8xxx_remove_child);
++ of_platform_depopulate(&pdev->dev);
+ irq_domain_remove(chip->irqdomain);
+ }
+
+--
+2.51.0
+
--- /dev/null
+From ac6223ce517c547305e4c4800922947c27a34cf6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 26 Sep 2021 02:43:33 +0300
+Subject: mfd: qcom-pm8xxx: switch away from using chained IRQ handlers
+
+From: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+
+[ Upstream commit d3546ccdce4bc07fcf0648bfe865dbcd6d961afc ]
+
+PM8xxx PMIC family uses GPIO as parent IRQ. Using it together with the
+irq_set_chained_handler_and_data() results in warnings from the GPIOLIB
+(see 461c1a7d4733 ("gpiolib: override irq_enable/disable"))
+as in this path the IRQ resources are not allocated (and thus the
+corresponding GPIO is not marked as used for the IRQ. Use request_irq so
+that the IRQ resources are proprely setup.
+
+[ 0.803271] ------------[ cut here ]------------
+[ 0.803338] WARNING: CPU: 3 PID: 1 at drivers/gpio/gpiolib.c:3207 gpiochip_enable_irq+0xa4/0xa8
+[ 0.803470] Modules linked in:
+[ 0.803542] CPU: 3 PID: 1 Comm: swapper/0 Not tainted 5.14.0-rc6-next-20210820-postmarketos-qcom-apq8064+ #1
+[ 0.803645] Hardware name: Generic DT based system
+[ 0.803710] Backtrace:
+[ 0.803777] [<c0e3493c>] (dump_backtrace) from [<c0e34d00>] (show_stack+0x20/0x24)
+[ 0.803911] r7:00000c87 r6:c07062dc r5:60000093 r4:c11d0f54
+[ 0.803980] [<c0e34ce0>] (show_stack) from [<c0e38314>] (dump_stack_lvl+0x48/0x54)
+[ 0.804097] [<c0e382cc>] (dump_stack_lvl) from [<c0e38338>] (dump_stack+0x18/0x1c)
+[ 0.804217] r5:00000009 r4:c11fe208
+[ 0.804274] [<c0e38320>] (dump_stack) from [<c03219c8>] (__warn+0xfc/0x114)
+[ 0.804387] [<c03218cc>] (__warn) from [<c0e35334>] (warn_slowpath_fmt+0x74/0xd0)
+[ 0.804509] r7:c07062dc r6:00000c87 r5:c11fe208 r4:00000000
+[ 0.804577] [<c0e352c4>] (warn_slowpath_fmt) from [<c07062dc>] (gpiochip_enable_irq+0xa4/0xa8)
+[ 0.804716] r8:c27b6200 r7:c27aec00 r6:c27aec18 r5:cf77a448 r4:c02225f0
+[ 0.804789] [<c0706238>] (gpiochip_enable_irq) from [<c0706348>] (gpiochip_irq_enable+0x28/0x38)
+[ 0.804921] r5:cf77a448 r4:c27aec18
+[ 0.804977] [<c0706320>] (gpiochip_irq_enable) from [<c03897a0>] (irq_enable+0x48/0x78)
+[ 0.805111] r5:00000000 r4:c27aec00
+[ 0.805167] [<c0389758>] (irq_enable) from [<c0389850>] (__irq_startup+0x80/0xbc)
+[ 0.805286] r5:00000000 r4:c27aec00
+[ 0.805343] [<c03897d0>] (__irq_startup) from [<c038996c>] (irq_startup+0xe0/0x18c)
+[ 0.805468] r7:c27aec00 r6:00000001 r5:00000000 r4:c27aec00
+[ 0.805535] [<c038988c>] (irq_startup) from [<c0389a54>] (irq_activate_and_startup+0x3c/0x74)
+[ 0.805669] r7:c27aec00 r6:00000001 r5:c27aec00 r4:00000000
+[ 0.805736] [<c0389a18>] (irq_activate_and_startup) from [<c0389b58>] (__irq_do_set_handler+0xcc/0x1c0)
+[ 0.805875] r7:c27aec00 r6:c0383710 r5:c08a16b0 r4:00000001
+[ 0.805943] [<c0389a8c>] (__irq_do_set_handler) from [<c0389d80>] (irq_set_chained_handler_and_data+0x60/0x98)
+[ 0.806087] r7:c27b5c10 r6:c27aed40 r5:c08a16b0 r4:c27aec00
+[ 0.806154] [<c0389d20>] (irq_set_chained_handler_and_data) from [<c08a1660>] (pm8xxx_probe+0x1fc/0x24c)
+[ 0.806298] r6:0000003a r5:0000003a r4:c27b5c00
+[ 0.806359] [<c08a1464>] (pm8xxx_probe) from [<c0871420>] (platform_probe+0x6c/0xc8)
+[ 0.806495] r10:c2507080 r9:e8bea2cc r8:c165e0e0 r7:c165e0e0 r6:c15f08f8 r5:c27b5c10
+[ 0.806582] r4:00000000
+[ 0.806632] [<c08713b4>] (platform_probe) from [<c086e280>] (really_probe+0xe8/0x460)
+[ 0.806769] r7:c165e0e0 r6:c15f08f8 r5:00000000 r4:c27b5c10
+[ 0.806837] [<c086e198>] (really_probe) from [<c086e6a8>] (__driver_probe_device+0xb0/0x22c)
+[ 0.806975] r7:c27b5c10 r6:cf70fba4 r5:c15f08f8 r4:c27b5c10
+[ 0.807042] [<c086e5f8>] (__driver_probe_device) from [<c086e868>] (driver_probe_device+0x44/0xe0)
+[ 0.807188] r9:e8bea2cc r8:00000000 r7:c27b5c10 r6:cf70fba4 r5:c16ae4b4 r4:c16ae4b0
+[ 0.807271] [<c086e824>] (driver_probe_device) from [<c086ecd8>] (__device_attach_driver+0xb4/0x12c)
+[ 0.807421] r9:e8bea2cc r8:c15eec08 r7:c27b5c10 r6:cf70fba4 r5:c15f08f8 r4:00000001
+[ 0.807506] [<c086ec24>] (__device_attach_driver) from [<c086c06c>] (bus_for_each_drv+0x94/0xe4)
+[ 0.807651] r7:c16ae484 r6:c086ec24 r5:cf70fba4 r4:00000000
+[ 0.807718] [<c086bfd8>] (bus_for_each_drv) from [<c086e0e0>] (__device_attach+0x104/0x19c)
+[ 0.807852] r6:00000001 r5:c27b5c54 r4:c27b5c10
+[ 0.807913] [<c086dfdc>] (__device_attach) from [<c086eef4>] (device_initial_probe+0x1c/0x20)
+[ 0.808050] r6:c27b5c10 r5:c15ef1b0 r4:c27b5c10
+[ 0.808111] [<c086eed8>] (device_initial_probe) from [<c086d00c>] (bus_probe_device+0x94/0x9c)
+[ 0.808240] [<c086cf78>] (bus_probe_device) from [<c086a60c>] (device_add+0x404/0x8f4)
+[ 0.808370] r7:c16ae484 r6:c251ba10 r5:00000000 r4:c27b5c10
+[ 0.808439] [<c086a208>] (device_add) from [<c0a82f50>] (of_device_add+0x44/0x4c)
+[ 0.808581] r10:c144c854 r9:00000001 r8:e8bea314 r7:c251ba10 r6:00000000 r5:00000000
+[ 0.808669] r4:c27b5c00
+[ 0.808718] [<c0a82f0c>] (of_device_add) from [<c0a836cc>] (of_platform_device_create_pdata+0xa0/0xc8)
+[ 0.808850] [<c0a8362c>] (of_platform_device_create_pdata) from [<c0a83908>] (of_platform_bus_create+0x1f0/0x514)
+[ 0.809005] r9:00000001 r8:c251ba10 r7:00000000 r6:00000000 r5:00000000 r4:e8bea2b0
+[ 0.809086] [<c0a83718>] (of_platform_bus_create) from [<c0a83e04>] (of_platform_populate+0x98/0x128)
+[ 0.809233] r10:c144c854 r9:00000001 r8:c251ba10 r7:00000000 r6:00000000 r5:e8bea170
+[ 0.809321] r4:e8bea2b0
+[ 0.809371] [<c0a83d6c>] (of_platform_populate) from [<c0a83f20>] (devm_of_platform_populate+0x60/0xa8)
+[ 0.809521] r9:0000011d r8:c165e0e0 r7:e8bea170 r6:c2c34f40 r5:c2cac140 r4:c251ba10
+[ 0.809604] [<c0a83ec0>] (devm_of_platform_populate) from [<c08a212c>] (ssbi_probe+0x138/0x16c)
+[ 0.809738] r6:c2c34f40 r5:c251ba10 r4:ff822700
+[ 0.809800] [<c08a1ff4>] (ssbi_probe) from [<c0871420>] (platform_probe+0x6c/0xc8)
+[ 0.809923] r7:c165e0e0 r6:c15f0a80 r5:c251ba10 r4:00000000
+[ 0.809989] [<c08713b4>] (platform_probe) from [<c086e280>] (really_probe+0xe8/0x460)
+[ 0.810120] r7:c165e0e0 r6:c15f0a80 r5:00000000 r4:c251ba10
+[ 0.810187] [<c086e198>] (really_probe) from [<c086e6a8>] (__driver_probe_device+0xb0/0x22c)
+[ 0.810325] r7:c251ba10 r6:c15f0a80 r5:c15f0a80 r4:c251ba10
+[ 0.810393] [<c086e5f8>] (__driver_probe_device) from [<c086e868>] (driver_probe_device+0x44/0xe0)
+[ 0.810539] r9:0000011d r8:00000000 r7:c251ba10 r6:c15f0a80 r5:c16ae4b4 r4:c16ae4b0
+[ 0.810623] [<c086e824>] (driver_probe_device) from [<c086ee2c>] (__driver_attach+0xdc/0x188)
+[ 0.810766] r9:0000011d r8:c144c834 r7:00000000 r6:c15f0a80 r5:c251ba10 r4:00000000
+[ 0.810849] [<c086ed50>] (__driver_attach) from [<c086bf60>] (bus_for_each_dev+0x88/0xd4)
+[ 0.810985] r7:00000000 r6:c086ed50 r5:c15f0a80 r4:00000000
+[ 0.811052] [<c086bed8>] (bus_for_each_dev) from [<c086dad4>] (driver_attach+0x2c/0x30)
+[ 0.811182] r6:c15ef1b0 r5:c2c34e80 r4:c15f0a80
+[ 0.811243] [<c086daa8>] (driver_attach) from [<c086d2dc>] (bus_add_driver+0x180/0x21c)
+[ 0.811364] [<c086d15c>] (bus_add_driver) from [<c086fa6c>] (driver_register+0x84/0x118)
+[ 0.811492] r7:00000000 r6:ffffe000 r5:c1428210 r4:c15f0a80
+[ 0.811558] [<c086f9e8>] (driver_register) from [<c0871174>] (__platform_driver_register+0x2c/0x34)
+[ 0.811683] r5:c1428210 r4:c16524a0
+[ 0.811739] [<c0871148>] (__platform_driver_register) from [<c1428234>] (ssbi_driver_init+0x24/0x28)
+[ 0.811868] [<c1428210>] (ssbi_driver_init) from [<c0302394>] (do_one_initcall+0x68/0x2c8)
+[ 0.811990] [<c030232c>] (do_one_initcall) from [<c140147c>] (kernel_init_freeable+0x1dc/0x23c)
+[ 0.812135] r7:cf7b0400 r6:c130339c r5:00000007 r4:c147f6a0
+[ 0.812204] [<c14012a0>] (kernel_init_freeable) from [<c0e40e60>] (kernel_init+0x20/0x138)
+[ 0.812345] r10:00000000 r9:00000000 r8:00000000 r7:00000000 r6:00000000 r5:c0e40e40
+[ 0.812433] r4:00000000
+[ 0.812483] [<c0e40e40>] (kernel_init) from [<c0300150>] (ret_from_fork+0x14/0x24)
+[ 0.812596] Exception stack(0xcf70ffb0 to 0xcf70fff8)
+[ 0.812684] ffa0: 00000000 00000000 00000000 00000000
+[ 0.812809] ffc0: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+[ 0.812923] ffe0: 00000000 00000000 00000000 00000000 00000013 00000000
+[ 0.813008] r5:c0e40e40 r4:00000000
+[ 0.813075] ---[ end trace ad2443eee078d094 ]---
+
+Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+Reviewed-by: Bjorn Andersson <bjorn.andersson@linaro.org>
+Reviewed-by: Linus Walleij <linus.walleij@linaro.org>
+Tested-by: David Heidelberg <david@ixit.cz> # on Nexus 7 (deb)
+Signed-off-by: Lee Jones <lee.jones@linaro.org>
+Link: https://lore.kernel.org/r/20210925234333.2430755-1-dmitry.baryshkov@linaro.org
+Stable-dep-of: 27a8acea47a9 ("mfd: qcom-pm8xxx: Fix OF populate on driver rebind")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mfd/qcom-pm8xxx.c | 39 ++++++++++++++++-----------------------
+ 1 file changed, 16 insertions(+), 23 deletions(-)
+
+diff --git a/drivers/mfd/qcom-pm8xxx.c b/drivers/mfd/qcom-pm8xxx.c
+index acd172ddcbd6a..4e24ce7ea009e 100644
+--- a/drivers/mfd/qcom-pm8xxx.c
++++ b/drivers/mfd/qcom-pm8xxx.c
+@@ -65,7 +65,7 @@
+ struct pm_irq_data {
+ int num_irqs;
+ struct irq_chip *irq_chip;
+- void (*irq_handler)(struct irq_desc *desc);
++ irq_handler_t irq_handler;
+ };
+
+ struct pm_irq_chip {
+@@ -170,19 +170,16 @@ static int pm8xxx_irq_master_handler(struct pm_irq_chip *chip, int master)
+ return ret;
+ }
+
+-static void pm8xxx_irq_handler(struct irq_desc *desc)
++static irqreturn_t pm8xxx_irq_handler(int irq, void *data)
+ {
+- struct pm_irq_chip *chip = irq_desc_get_handler_data(desc);
+- struct irq_chip *irq_chip = irq_desc_get_chip(desc);
++ struct pm_irq_chip *chip = data;
+ unsigned int root;
+ int i, ret, masters = 0;
+
+- chained_irq_enter(irq_chip, desc);
+-
+ ret = regmap_read(chip->regmap, SSBI_REG_ADDR_IRQ_ROOT, &root);
+ if (ret) {
+ pr_err("Can't read root status ret=%d\n", ret);
+- return;
++ return IRQ_NONE;
+ }
+
+ /* on pm8xxx series masters start from bit 1 of the root */
+@@ -193,7 +190,7 @@ static void pm8xxx_irq_handler(struct irq_desc *desc)
+ if (masters & (1 << i))
+ pm8xxx_irq_master_handler(chip, i);
+
+- chained_irq_exit(irq_chip, desc);
++ return IRQ_HANDLED;
+ }
+
+ static void pm8821_irq_block_handler(struct pm_irq_chip *chip,
+@@ -232,19 +229,17 @@ static inline void pm8821_irq_master_handler(struct pm_irq_chip *chip,
+ pm8821_irq_block_handler(chip, master, block);
+ }
+
+-static void pm8821_irq_handler(struct irq_desc *desc)
++static irqreturn_t pm8821_irq_handler(int irq, void *data)
+ {
+- struct pm_irq_chip *chip = irq_desc_get_handler_data(desc);
+- struct irq_chip *irq_chip = irq_desc_get_chip(desc);
++ struct pm_irq_chip *chip = data;
+ unsigned int master;
+ int ret;
+
+- chained_irq_enter(irq_chip, desc);
+ ret = regmap_read(chip->regmap,
+ PM8821_SSBI_REG_ADDR_IRQ_MASTER0, &master);
+ if (ret) {
+ pr_err("Failed to read master 0 ret=%d\n", ret);
+- goto done;
++ return IRQ_NONE;
+ }
+
+ /* bits 1 through 7 marks the first 7 blocks in master 0 */
+@@ -253,19 +248,18 @@ static void pm8821_irq_handler(struct irq_desc *desc)
+
+ /* bit 0 marks if master 1 contains any bits */
+ if (!(master & BIT(0)))
+- goto done;
++ return IRQ_NONE;
+
+ ret = regmap_read(chip->regmap,
+ PM8821_SSBI_REG_ADDR_IRQ_MASTER1, &master);
+ if (ret) {
+ pr_err("Failed to read master 1 ret=%d\n", ret);
+- goto done;
++ return IRQ_NONE;
+ }
+
+ pm8821_irq_master_handler(chip, 1, master);
+
+-done:
+- chained_irq_exit(irq_chip, desc);
++ return IRQ_HANDLED;
+ }
+
+ static void pm8xxx_irq_mask_ack(struct irq_data *d)
+@@ -576,14 +570,15 @@ static int pm8xxx_probe(struct platform_device *pdev)
+ if (!chip->irqdomain)
+ return -ENODEV;
+
+- irq_set_chained_handler_and_data(irq, data->irq_handler, chip);
++ rc = devm_request_irq(&pdev->dev, irq, data->irq_handler, 0, dev_name(&pdev->dev), chip);
++ if (rc)
++ return rc;
++
+ irq_set_irq_wake(irq, 1);
+
+ rc = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+- if (rc) {
+- irq_set_chained_handler_and_data(irq, NULL, NULL);
++ if (rc)
+ irq_domain_remove(chip->irqdomain);
+- }
+
+ return rc;
+ }
+@@ -596,11 +591,9 @@ static int pm8xxx_remove_child(struct device *dev, void *unused)
+
+ static int pm8xxx_remove(struct platform_device *pdev)
+ {
+- int irq = platform_get_irq(pdev, 0);
+ struct pm_irq_chip *chip = platform_get_drvdata(pdev);
+
+ device_for_each_child(&pdev->dev, NULL, pm8xxx_remove_child);
+- irq_set_chained_handler_and_data(irq, NULL, NULL);
+ irq_domain_remove(chip->irqdomain);
+
+ return 0;
+--
+2.51.0
+
--- /dev/null
+From 8123447986411d044a66f936feb2735d2d322c52 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 12 Feb 2026 20:55:09 -0800
+Subject: net: arcnet: com20020-pci: fix support for 2.5Mbit cards
+
+From: Ethan Nelson-Moore <enelsonmoore@gmail.com>
+
+[ Upstream commit c7d9be66b71af490446127c6ffcb66d6bb71b8b9 ]
+
+Commit 8c14f9c70327 ("ARCNET: add com20020 PCI IDs with metadata")
+converted the com20020-pci driver to use a card info structure instead
+of a single flag mask in driver_data. However, it failed to take into
+account that in the original code, driver_data of 0 indicates a card
+with no special flags, not a card that should not have any card info
+structure. This introduced a null pointer dereference when cards with
+no flags were probed.
+
+Commit bd6f1fd5d33d ("net: arcnet: com20020: Fix null-ptr-deref in
+com20020pci_probe()") then papered over this issue by rejecting cards
+with no driver_data instead of resolving the problem at its source.
+
+Fix the original issue by introducing a new card info structure for
+2.5Mbit cards that does not set any flags and using it if no
+driver_data is present.
+
+Fixes: 8c14f9c70327 ("ARCNET: add com20020 PCI IDs with metadata")
+Fixes: bd6f1fd5d33d ("net: arcnet: com20020: Fix null-ptr-deref in com20020pci_probe()")
+Cc: stable@vger.kernel.org
+Reviewed-by: Simon Horman <horms@kernel.org>
+Signed-off-by: Ethan Nelson-Moore <enelsonmoore@gmail.com>
+Link: https://patch.msgid.link/20260213045510.32368-1-enelsonmoore@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/arcnet/com20020-pci.c | 16 +++++++++++++++-
+ 1 file changed, 15 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
+index 00a80f0adece4..7cea482f2d5f9 100644
+--- a/drivers/net/arcnet/com20020-pci.c
++++ b/drivers/net/arcnet/com20020-pci.c
+@@ -114,6 +114,8 @@ static const struct attribute_group com20020_state_group = {
+ .attrs = com20020_state_attrs,
+ };
+
++static struct com20020_pci_card_info card_info_2p5mbit;
++
+ static void com20020pci_remove(struct pci_dev *pdev);
+
+ static int com20020pci_probe(struct pci_dev *pdev,
+@@ -139,7 +141,7 @@ static int com20020pci_probe(struct pci_dev *pdev,
+
+ ci = (struct com20020_pci_card_info *)id->driver_data;
+ if (!ci)
+- return -EINVAL;
++ ci = &card_info_2p5mbit;
+
+ priv->ci = ci;
+ mm = &ci->misc_map;
+@@ -346,6 +348,18 @@ static struct com20020_pci_card_info card_info_5mbit = {
+ .flags = ARC_IS_5MBIT,
+ };
+
++static struct com20020_pci_card_info card_info_2p5mbit = {
++ .name = "ARC-PCI",
++ .devcount = 1,
++ .chan_map_tbl = {
++ {
++ .bar = 2,
++ .offset = 0x00,
++ .size = 0x08,
++ },
++ },
++};
++
+ static struct com20020_pci_card_info card_info_sohard = {
+ .name = "SOHARD SH ARC-PCI",
+ .devcount = 1,
+--
+2.51.0
+
--- /dev/null
+From a6f45a4bf0643a07ab6eca336bc6ac9db517b728 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 9 Dec 2022 16:09:14 +0100
+Subject: platform: Provide a remove callback that returns no value
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+
+[ Upstream commit 5c5a7680e67ba6fbbb5f4d79fa41485450c1985c ]
+
+struct platform_driver::remove returning an integer made driver authors
+expect that returning an error code was proper error handling. However
+the driver core ignores the error and continues to remove the device
+because there is nothing the core could do anyhow and reentering the
+remove callback again is only calling for trouble.
+
+So this is an source for errors typically yielding resource leaks in the
+error path.
+
+As there are too many platform drivers to neatly convert them all to
+return void in a single go, do it in several steps after this patch:
+
+ a) Convert all drivers to implement .remove_new() returning void instead
+ of .remove() returning int;
+ b) Change struct platform_driver::remove() to return void and so make
+ it identical to .remove_new();
+ c) Change all drivers back to .remove() now with the better prototype;
+ d) drop struct platform_driver::remove_new().
+
+While this touches all drivers eventually twice, steps a) and c) can be
+done one driver after another and so reduces coordination efforts
+immensely and simplifies review.
+
+Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+Link: https://lore.kernel.org/r/20221209150914.3557650-1-u.kleine-koenig@pengutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Stable-dep-of: 27a8acea47a9 ("mfd: qcom-pm8xxx: Fix OF populate on driver rebind")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/base/platform.c | 4 +++-
+ include/linux/platform_device.h | 11 +++++++++++
+ 2 files changed, 14 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/base/platform.c b/drivers/base/platform.c
+index d0b15cbab0ff0..e07043d85c65c 100644
+--- a/drivers/base/platform.c
++++ b/drivers/base/platform.c
+@@ -1306,7 +1306,9 @@ static int platform_remove(struct device *_dev)
+ struct platform_driver *drv = to_platform_driver(_dev->driver);
+ struct platform_device *dev = to_platform_device(_dev);
+
+- if (drv->remove) {
++ if (drv->remove_new) {
++ drv->remove_new(dev);
++ } else if (drv->remove) {
+ int ret = drv->remove(dev);
+
+ if (ret)
+diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
+index e7a83b0218077..870a918aa251c 100644
+--- a/include/linux/platform_device.h
++++ b/include/linux/platform_device.h
+@@ -203,7 +203,18 @@ extern void platform_device_put(struct platform_device *pdev);
+
+ struct platform_driver {
+ int (*probe)(struct platform_device *);
++
++ /*
++ * Traditionally the remove callback returned an int which however is
++ * ignored by the driver core. This led to wrong expectations by driver
++ * authors who thought returning an error code was a valid error
++ * handling strategy. To convert to a callback returning void, new
++ * drivers should implement .remove_new() until the conversion it done
++ * that eventually makes .remove() return void.
++ */
+ int (*remove)(struct platform_device *);
++ void (*remove_new)(struct platform_device *);
++
+ void (*shutdown)(struct platform_device *);
+ int (*suspend)(struct platform_device *, pm_message_t state);
+ int (*resume)(struct platform_device *);
+--
+2.51.0
+
alsa-usb-audio-cap-the-packet-size-pre-calculations.patch
perf-fix-__perf_event_overflow-vs-perf_remove_from_c.patch
btrfs-fix-incorrect-key-offset-in-error-message-in-c.patch
+memory-mtk-smi-add-device-link-between-smi-larb-and-.patch
+memory-mtk-smi-convert-to-platform-remove-callback-r.patch
+memory-mtk-smi-fix-device-leak-on-larb-probe.patch
+arm-omap2-add-missing-of_node_put-before-break-and-r.patch
+arm-omap2-fix-reference-count-leaks-in-omap_control_.patch
+bus-fsl-mc-replace-snprintf-and-sprintf-with-sysfs_e.patch
+bus-fsl-mc-fix-use-after-free-in-driver_override_sho.patch
+drm-tegra-dsi-fix-device-leak-on-probe.patch
+bus-omap-ocp2scp-convert-to-platform-remove-callback.patch
+bus-omap-ocp2scp-fix-of-populate-on-driver-rebind.patch
+driver-core-platform-reorder-functions.patch
+driver-core-platform-change-logic-implementing-platf.patch
+driver-core-platform-use-bus_type-functions.patch
+driver-core-platform-emit-a-warning-if-a-remove-call.patch
+mfd-qcom-pm8xxx-switch-away-from-using-chained-irq-h.patch
+platform-provide-a-remove-callback-that-returns-no-v.patch
+mfd-qcom-pm8xxx-convert-to-platform-remove-callback-.patch
+mfd-qcom-pm8xxx-fix-of-populate-on-driver-rebind.patch
+mfd-omap-usb-host-convert-to-platform-remove-callbac.patch
+mfd-omap-usb-host-fix-of-populate-on-driver-rebind.patch
+clk-tegra-tegra124-emc-fix-device-leak-on-set_rate.patch
+alsa-hda-conexant-add-quirk-for-hp-zbook-studio-g4.patch
+hwmon-max16065-use-read-write_once-to-avoid-compiler.patch
+alsa-hda-conexant-fix-headphone-jack-handling-on-ace.patch
+net-arcnet-com20020-pci-fix-support-for-2.5mbit-card.patch
--- /dev/null
+From cc343d81f4a872d26cdd72b2309fb184366a01f4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 7 Feb 2026 14:13:17 +0100
+Subject: ALSA: hda/conexant: Add quirk for HP ZBook Studio G4
+
+From: Takashi Iwai <tiwai@suse.de>
+
+[ Upstream commit 1585cf83e98db32463e5d54161b06a5f01fe9976 ]
+
+It was reported that we need the same quirk for HP ZBook Studio G4
+(SSID 103c:826b) as other HP models to make the mute-LED working.
+
+Cc: <stable@vger.kernel.org>
+Link: https://lore.kernel.org/64d78753-b9ff-4c64-8920-64d8d31cd20c@gmail.com
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=221002
+Link: https://patch.msgid.link/20260207131324.2428030-1-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/pci/hda/patch_conexant.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index 59f6d70689dfc..9a2b945a25d0a 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -1099,6 +1099,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
+ SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
+ SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO),
+ SND_PCI_QUIRK(0x103c, 0x8231, "HP ProBook 450 G4", CXT_FIXUP_MUTE_LED_GPIO),
++ SND_PCI_QUIRK(0x103c, 0x826b, "HP ZBook Studio G4", CXT_FIXUP_MUTE_LED_GPIO),
+ SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK),
+ SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+--
+2.51.0
+
--- /dev/null
+From 3099d4b147fbbe907eb3c6f0e61c32265735c3b8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Feb 2026 11:44:11 +0100
+Subject: ALSA: hda/conexant: Fix headphone jack handling on Acer Swift SF314
+
+From: Takashi Iwai <tiwai@suse.de>
+
+[ Upstream commit 7bc0df86c2384bc1e2012a2c946f82305054da64 ]
+
+Acer Swift SF314 (SSID 1025:136d) needs a bit of tweaks of the pin
+configurations for NID 0x16 and 0x19 to make the headphone / headset
+jack working. NID 0x17 can remain as is for the working speaker, and
+the built-in mic is supported via SOF.
+
+Cc: <stable@vger.kernel.org>
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=221086
+Link: https://patch.msgid.link/20260217104414.62911-1-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/pci/hda/patch_conexant.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index 9a2b945a25d0a..2d653b73e6795 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -312,6 +312,7 @@ enum {
+ CXT_PINCFG_SWS_JS201D,
+ CXT_PINCFG_TOP_SPEAKER,
+ CXT_FIXUP_HP_A_U,
++ CXT_FIXUP_ACER_SWIFT_HP,
+ };
+
+ /* for hda_fixup_thinkpad_acpi() */
+@@ -1042,6 +1043,14 @@ static const struct hda_fixup cxt_fixups[] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = cxt_fixup_hp_a_u,
+ },
++ [CXT_FIXUP_ACER_SWIFT_HP] = {
++ .type = HDA_FIXUP_PINS,
++ .v.pins = (const struct hda_pintbl[]) {
++ { 0x16, 0x0321403f }, /* Headphone */
++ { 0x19, 0x40f001f0 }, /* Mic */
++ { }
++ },
++ },
+ };
+
+ static const struct snd_pci_quirk cxt5045_fixups[] = {
+@@ -1091,6 +1100,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
+ SND_PCI_QUIRK(0x1025, 0x0543, "Acer Aspire One 522", CXT_FIXUP_STEREO_DMIC),
+ SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_ASPIRE_DMIC),
+ SND_PCI_QUIRK(0x1025, 0x054f, "Acer Aspire 4830T", CXT_FIXUP_ASPIRE_DMIC),
++ SND_PCI_QUIRK(0x1025, 0x136d, "Acer Swift SF314", CXT_FIXUP_ACER_SWIFT_HP),
+ SND_PCI_QUIRK(0x103c, 0x8079, "HP EliteBook 840 G3", CXT_FIXUP_HP_DOCK),
+ SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK),
+ SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK),
+--
+2.51.0
+
--- /dev/null
+From 185e12830f2eb18ab3d78a0ad8ba024b494c0a40 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Dec 2021 01:42:24 -0800
+Subject: ARM: OMAP2+: add missing of_node_put before break and return
+
+From: Wang Qing <wangqing@vivo.com>
+
+[ Upstream commit 883f464c1d23663047eda4f2bcf622365e2d0dd0 ]
+
+Fix following coccicheck warning:
+WARNING: Function "for_each_matching_node_and_match"
+should have of_node_put() before return.
+
+Early exits from for_each_matching_node_and_match should decrement the
+node reference counter.
+
+Signed-off-by: Wang Qing <wangqing@vivo.com>
+Message-Id: <1639388545-63615-1-git-send-email-wangqing@vivo.com>
+[tony@atomide.com: updated for omap_hwmod.c that was already patched]
+Signed-off-by: Tony Lindgren <tony@atomide.com>
+Stable-dep-of: 93a04ab480c8 ("ARM: omap2: Fix reference count leaks in omap_control_init()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm/mach-omap2/cm_common.c | 8 ++++++--
+ arch/arm/mach-omap2/control.c | 19 ++++++++++++++-----
+ arch/arm/mach-omap2/prm_common.c | 8 ++++++--
+ 3 files changed, 26 insertions(+), 9 deletions(-)
+
+diff --git a/arch/arm/mach-omap2/cm_common.c b/arch/arm/mach-omap2/cm_common.c
+index e2d069fe67f18..87f2c2d2d7544 100644
+--- a/arch/arm/mach-omap2/cm_common.c
++++ b/arch/arm/mach-omap2/cm_common.c
+@@ -320,8 +320,10 @@ int __init omap2_cm_base_init(void)
+ data = (struct omap_prcm_init_data *)match->data;
+
+ ret = of_address_to_resource(np, 0, &res);
+- if (ret)
++ if (ret) {
++ of_node_put(np);
+ return ret;
++ }
+
+ if (data->index == TI_CLKM_CM)
+ mem = &cm_base;
+@@ -367,8 +369,10 @@ int __init omap_cm_init(void)
+ continue;
+
+ ret = omap2_clk_provider_init(np, data->index, NULL, data->mem);
+- if (ret)
++ if (ret) {
++ of_node_put(np);
+ return ret;
++ }
+ }
+
+ return 0;
+diff --git a/arch/arm/mach-omap2/control.c b/arch/arm/mach-omap2/control.c
+index 062d431fc33a8..c514a96022699 100644
+--- a/arch/arm/mach-omap2/control.c
++++ b/arch/arm/mach-omap2/control.c
+@@ -769,8 +769,10 @@ int __init omap2_control_base_init(void)
+ data = (struct control_init_data *)match->data;
+
+ mem = of_iomap(np, 0);
+- if (!mem)
++ if (!mem) {
++ of_node_put(np);
+ return -ENOMEM;
++ }
+
+ if (data->index == TI_CLKM_CTRL) {
+ omap2_ctrl_base = mem;
+@@ -810,22 +812,24 @@ int __init omap_control_init(void)
+ if (scm_conf) {
+ syscon = syscon_node_to_regmap(scm_conf);
+
+- if (IS_ERR(syscon))
+- return PTR_ERR(syscon);
++ if (IS_ERR(syscon)) {
++ ret = PTR_ERR(syscon);
++ goto of_node_put;
++ }
+
+ if (of_get_child_by_name(scm_conf, "clocks")) {
+ ret = omap2_clk_provider_init(scm_conf,
+ data->index,
+ syscon, NULL);
+ if (ret)
+- return ret;
++ goto of_node_put;
+ }
+ } else {
+ /* No scm_conf found, direct access */
+ ret = omap2_clk_provider_init(np, data->index, NULL,
+ data->mem);
+ if (ret)
+- return ret;
++ goto of_node_put;
+ }
+ }
+
+@@ -836,6 +840,11 @@ int __init omap_control_init(void)
+ }
+
+ return 0;
++
++of_node_put:
++ of_node_put(np);
++ return ret;
++
+ }
+
+ /**
+diff --git a/arch/arm/mach-omap2/prm_common.c b/arch/arm/mach-omap2/prm_common.c
+index 65b2d82efa27b..fb2d48cfe756b 100644
+--- a/arch/arm/mach-omap2/prm_common.c
++++ b/arch/arm/mach-omap2/prm_common.c
+@@ -752,8 +752,10 @@ int __init omap2_prm_base_init(void)
+ data = (struct omap_prcm_init_data *)match->data;
+
+ ret = of_address_to_resource(np, 0, &res);
+- if (ret)
++ if (ret) {
++ of_node_put(np);
+ return ret;
++ }
+
+ data->mem = ioremap(res.start, resource_size(&res));
+
+@@ -799,8 +801,10 @@ int __init omap_prcm_init(void)
+ data = match->data;
+
+ ret = omap2_clk_provider_init(np, data->index, NULL, data->mem);
+- if (ret)
++ if (ret) {
++ of_node_put(np);
+ return ret;
++ }
+ }
+
+ omap_cm_init();
+--
+2.51.0
+
--- /dev/null
+From 23196373efe6e071051d23da23c8efbced31e52d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 17 Dec 2025 14:21:22 +0000
+Subject: ARM: omap2: Fix reference count leaks in omap_control_init()
+
+From: Wentao Liang <vulab@iscas.ac.cn>
+
+[ Upstream commit 93a04ab480c8bbcb7d9004be139c538c8a0c1bc8 ]
+
+The of_get_child_by_name() function increments the reference count
+of child nodes, causing multiple reference leaks in omap_control_init():
+
+1. scm_conf node never released in normal/error paths
+2. clocks node leak when checking existence
+3. Missing scm_conf release before np in error paths
+
+Fix these leaks by adding proper of_node_put() calls and separate error
+handling.
+
+Fixes: e5b635742e98 ("ARM: OMAP2+: control: add syscon support for register accesses")
+Cc: stable@vger.kernel.org
+Signed-off-by: Wentao Liang <vulab@iscas.ac.cn>
+Reviewed-by: Andreas Kemnade <andreas@kemnade.info>
+Link: https://patch.msgid.link/20251217142122.1861292-1-vulab@iscas.ac.cn
+Signed-off-by: Kevin Hilman <khilman@baylibre.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm/mach-omap2/control.c | 14 ++++++++++----
+ 1 file changed, 10 insertions(+), 4 deletions(-)
+
+diff --git a/arch/arm/mach-omap2/control.c b/arch/arm/mach-omap2/control.c
+index c514a96022699..9042bbfaeb072 100644
+--- a/arch/arm/mach-omap2/control.c
++++ b/arch/arm/mach-omap2/control.c
+@@ -793,7 +793,7 @@ int __init omap2_control_base_init(void)
+ */
+ int __init omap_control_init(void)
+ {
+- struct device_node *np, *scm_conf;
++ struct device_node *np, *scm_conf, *clocks_node;
+ const struct of_device_id *match;
+ const struct omap_prcm_init_data *data;
+ int ret;
+@@ -814,16 +814,19 @@ int __init omap_control_init(void)
+
+ if (IS_ERR(syscon)) {
+ ret = PTR_ERR(syscon);
+- goto of_node_put;
++ goto err_put_scm_conf;
+ }
+
+- if (of_get_child_by_name(scm_conf, "clocks")) {
++ clocks_node = of_get_child_by_name(scm_conf, "clocks");
++ if (clocks_node) {
++ of_node_put(clocks_node);
+ ret = omap2_clk_provider_init(scm_conf,
+ data->index,
+ syscon, NULL);
+ if (ret)
+- goto of_node_put;
++ goto err_put_scm_conf;
+ }
++ of_node_put(scm_conf);
+ } else {
+ /* No scm_conf found, direct access */
+ ret = omap2_clk_provider_init(np, data->index, NULL,
+@@ -841,6 +844,9 @@ int __init omap_control_init(void)
+
+ return 0;
+
++err_put_scm_conf:
++ if (scm_conf)
++ of_node_put(scm_conf);
+ of_node_put:
+ of_node_put(np);
+ return ret;
+--
+2.51.0
+
--- /dev/null
+From 9c83783b7751c871fc5a840ab7c29d8542578ef3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 21 Dec 2021 08:20:46 +0100
+Subject: ata: libata: remove pointless VPRINTK() calls
+
+From: Hannes Reinecke <hare@suse.de>
+
+[ Upstream commit e1553351d747cbcd62db01d579dff916edcc782c ]
+
+Most of the information is already covered by tracepoints
+(if not downright pointless), so remove the VPRINTK() calls.
+And while we're at it, remove ata_scsi_dump_cdb(), too,
+as this information can be retrieved from scsi tracing.
+
+Signed-off-by: Hannes Reinecke <hare@suse.de>
+Signed-off-by: Damien Le Moal <damien.lemoal@opensource.wdc.com>
+Stable-dep-of: bb3a8154b1a1 ("ata: libata-scsi: refactor ata_scsi_translate()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/ata/libata-core.c | 3 ---
+ drivers/ata/libata-sata.c | 2 --
+ drivers/ata/libata-scsi.c | 42 ---------------------------------------
+ drivers/ata/libata-sff.c | 4 ----
+ drivers/ata/libata.h | 1 -
+ 5 files changed, 52 deletions(-)
+
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index 3df057d381a73..acc78416be8ee 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -4486,8 +4486,6 @@ static void ata_sg_clean(struct ata_queued_cmd *qc)
+
+ WARN_ON_ONCE(sg == NULL);
+
+- VPRINTK("unmapping %u sg elements\n", qc->n_elem);
+-
+ if (qc->n_elem)
+ dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
+
+@@ -4519,7 +4517,6 @@ static int ata_sg_setup(struct ata_queued_cmd *qc)
+ if (n_elem < 1)
+ return -1;
+
+- VPRINTK("%d sg elements mapped\n", n_elem);
+ qc->orig_n_elem = qc->n_elem;
+ qc->n_elem = n_elem;
+ qc->flags |= ATA_QCFLAG_DMAMAP;
+diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c
+index bac569736c937..be41c2a715545 100644
+--- a/drivers/ata/libata-sata.c
++++ b/drivers/ata/libata-sata.c
+@@ -1270,8 +1270,6 @@ int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap)
+ {
+ int rc = 0;
+
+- ata_scsi_dump_cdb(ap, cmd);
+-
+ if (likely(ata_dev_enabled(ap->link.device)))
+ rc = __ata_scsi_queuecmd(cmd, ap->link.device);
+ else {
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index 22c45bc64a95e..4fd8fcab5f972 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -1302,8 +1302,6 @@ static void scsi_6_lba_len(const u8 *cdb, u64 *plba, u32 *plen)
+ u64 lba = 0;
+ u32 len;
+
+- VPRINTK("six-byte command\n");
+-
+ lba |= ((u64)(cdb[1] & 0x1f)) << 16;
+ lba |= ((u64)cdb[2]) << 8;
+ lba |= ((u64)cdb[3]);
+@@ -1329,8 +1327,6 @@ static void scsi_10_lba_len(const u8 *cdb, u64 *plba, u32 *plen)
+ u64 lba = 0;
+ u32 len = 0;
+
+- VPRINTK("ten-byte command\n");
+-
+ lba |= ((u64)cdb[2]) << 24;
+ lba |= ((u64)cdb[3]) << 16;
+ lba |= ((u64)cdb[4]) << 8;
+@@ -1358,8 +1354,6 @@ static void scsi_16_lba_len(const u8 *cdb, u64 *plba, u32 *plen)
+ u64 lba = 0;
+ u32 len = 0;
+
+- VPRINTK("sixteen-byte command\n");
+-
+ lba |= ((u64)cdb[2]) << 56;
+ lba |= ((u64)cdb[3]) << 48;
+ lba |= ((u64)cdb[4]) << 40;
+@@ -1709,8 +1703,6 @@ static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd,
+ struct ata_queued_cmd *qc;
+ int rc;
+
+- VPRINTK("ENTER\n");
+-
+ qc = ata_scsi_qc_new(dev, cmd);
+ if (!qc)
+ goto err_mem;
+@@ -1741,7 +1733,6 @@ static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd,
+ /* select device, send command to hardware */
+ ata_qc_issue(qc);
+
+- VPRINTK("EXIT\n");
+ return 0;
+
+ early_finish:
+@@ -1854,8 +1845,6 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
+ 2
+ };
+
+- VPRINTK("ENTER\n");
+-
+ /* set scsi removable (RMB) bit per ata bit, or if the
+ * AHCI port says it's external (Hotplug-capable, eSATA).
+ */
+@@ -2266,8 +2255,6 @@ static unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf)
+ u8 dpofua, bp = 0xff;
+ u16 fp;
+
+- VPRINTK("ENTER\n");
+-
+ six_byte = (scsicmd[0] == MODE_SENSE);
+ ebd = !(scsicmd[1] & 0x8); /* dbd bit inverted == edb */
+ /*
+@@ -2385,8 +2372,6 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
+ log2_per_phys = ata_id_log2_per_physical_sector(dev->id);
+ lowest_aligned = ata_id_logical_sector_offset(dev->id, log2_per_phys);
+
+- VPRINTK("ENTER\n");
+-
+ if (args->cmd->cmnd[0] == READ_CAPACITY) {
+ if (last_lba >= 0xffffffffULL)
+ last_lba = 0xffffffff;
+@@ -2453,7 +2438,6 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
+ */
+ static unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf)
+ {
+- VPRINTK("ENTER\n");
+ rbuf[3] = 8; /* just one lun, LUN 0, size 8 bytes */
+
+ return 0;
+@@ -2549,8 +2533,6 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc)
+ struct scsi_cmnd *cmd = qc->scsicmd;
+ unsigned int err_mask = qc->err_mask;
+
+- VPRINTK("ENTER, err_mask 0x%X\n", err_mask);
+-
+ /* handle completion from new EH */
+ if (unlikely(qc->ap->ops->error_handler &&
+ (err_mask || qc->flags & ATA_QCFLAG_SENSE_VALID))) {
+@@ -3684,8 +3666,6 @@ static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc)
+ u8 buffer[64];
+ const u8 *p = buffer;
+
+- VPRINTK("ENTER\n");
+-
+ six_byte = (cdb[0] == MODE_SELECT);
+ if (six_byte) {
+ if (scmd->cmd_len < 5) {
+@@ -3984,26 +3964,6 @@ static inline ata_xlat_func_t ata_get_xlat_func(struct ata_device *dev, u8 cmd)
+ return NULL;
+ }
+
+-/**
+- * ata_scsi_dump_cdb - dump SCSI command contents to dmesg
+- * @ap: ATA port to which the command was being sent
+- * @cmd: SCSI command to dump
+- *
+- * Prints the contents of a SCSI command via printk().
+- */
+-
+-void ata_scsi_dump_cdb(struct ata_port *ap, struct scsi_cmnd *cmd)
+-{
+-#ifdef ATA_VERBOSE_DEBUG
+- struct scsi_device *scsidev = cmd->device;
+-
+- VPRINTK("CDB (%u:%d,%d,%lld) %9ph\n",
+- ap->print_id,
+- scsidev->channel, scsidev->id, scsidev->lun,
+- cmd->cmnd);
+-#endif
+-}
+-
+ int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, struct ata_device *dev)
+ {
+ struct ata_port *ap = dev->link->ap;
+@@ -4089,8 +4049,6 @@ int ata_scsi_queuecmd(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
+
+ spin_lock_irqsave(ap->lock, irq_flags);
+
+- ata_scsi_dump_cdb(ap, cmd);
+-
+ dev = ata_scsi_find_dev(ap, scsidev);
+ if (likely(dev))
+ rc = __ata_scsi_queuecmd(cmd, dev);
+diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
+index 8409e53b7b7a0..ab1fe23810707 100644
+--- a/drivers/ata/libata-sff.c
++++ b/drivers/ata/libata-sff.c
+@@ -888,8 +888,6 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc)
+ if (unlikely(!bytes))
+ goto atapi_check;
+
+- VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
+-
+ if (unlikely(__atapi_pio_bytes(qc, bytes)))
+ goto err_out;
+ ata_sff_sync(ap); /* flush */
+@@ -2614,7 +2612,6 @@ static void ata_bmdma_fill_sg(struct ata_queued_cmd *qc)
+
+ prd[pi].addr = cpu_to_le32(addr);
+ prd[pi].flags_len = cpu_to_le32(len & 0xffff);
+- VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
+
+ pi++;
+ sg_len -= len;
+@@ -2674,7 +2671,6 @@ static void ata_bmdma_fill_sg_dumb(struct ata_queued_cmd *qc)
+ prd[++pi].addr = cpu_to_le32(addr + 0x8000);
+ }
+ prd[pi].flags_len = cpu_to_le32(blen);
+- VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
+
+ pi++;
+ sg_len -= len;
+diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
+index bf71bd9e66cd8..d71fffe48495f 100644
+--- a/drivers/ata/libata.h
++++ b/drivers/ata/libata.h
+@@ -150,7 +150,6 @@ extern int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
+ unsigned int id, u64 lun);
+ void ata_scsi_sdev_config(struct scsi_device *sdev);
+ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev);
+-void ata_scsi_dump_cdb(struct ata_port *ap, struct scsi_cmnd *cmd);
+ int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, struct ata_device *dev);
+
+ /* libata-eh.c */
+--
+2.51.0
+
--- /dev/null
+From 06e8cab64bb875a003c5e3f93d1316f6b6f0b8c1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 21 Dec 2021 08:20:33 +0100
+Subject: ata: libata-scsi: drop DPRINTK calls for cdb translation
+
+From: Hannes Reinecke <hare@suse.de>
+
+[ Upstream commit 1fe9fb71b2ffcedd794daacf4db2056a6cb5199e ]
+
+Drop DPRINTK calls for cdb translation as they are already covered
+by other traces, and also drop the DPRINTK calls in ata_scsi_hotplug().
+
+Signed-off-by: Hannes Reinecke <hare@suse.de>
+Signed-off-by: Damien Le Moal <damien.lemoal@opensource.wdc.com>
+Stable-dep-of: bb3a8154b1a1 ("ata: libata-scsi: refactor ata_scsi_translate()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/ata/libata-scsi.c | 20 +-------------------
+ 1 file changed, 1 insertion(+), 19 deletions(-)
+
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index b57027206ae1e..22c45bc64a95e 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -1472,9 +1472,6 @@ static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc)
+ head = track % dev->heads;
+ sect = (u32)block % dev->sectors + 1;
+
+- DPRINTK("block %u track %u cyl %u head %u sect %u\n",
+- (u32)block, track, cyl, head, sect);
+-
+ /* Check whether the converted CHS can fit.
+ Cylinder: 0-65535
+ Head: 0-15
+@@ -1597,7 +1594,6 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
+ goto invalid_fld;
+ break;
+ default:
+- DPRINTK("no-byte command\n");
+ fp = 0;
+ goto invalid_fld;
+ }
+@@ -1751,7 +1747,6 @@ static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd,
+ early_finish:
+ ata_qc_free(qc);
+ scsi_done(cmd);
+- DPRINTK("EXIT - early finish (good or error)\n");
+ return 0;
+
+ err_did:
+@@ -1759,12 +1754,10 @@ static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd,
+ cmd->result = (DID_ERROR << 16);
+ scsi_done(cmd);
+ err_mem:
+- DPRINTK("EXIT - internal\n");
+ return 0;
+
+ defer:
+ ata_qc_free(qc);
+- DPRINTK("EXIT - defer\n");
+ if (rc == ATA_DEFER_LINK)
+ return SCSI_MLQUEUE_DEVICE_BUSY;
+ else
+@@ -2491,8 +2484,6 @@ static void atapi_request_sense(struct ata_queued_cmd *qc)
+ struct ata_port *ap = qc->ap;
+ struct scsi_cmnd *cmd = qc->scsicmd;
+
+- DPRINTK("ATAPI request sense\n");
+-
+ memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
+
+ #ifdef CONFIG_ATA_SFF
+@@ -2531,8 +2522,6 @@ static void atapi_request_sense(struct ata_queued_cmd *qc)
+ qc->complete_fn = atapi_sense_complete;
+
+ ata_qc_issue(qc);
+-
+- DPRINTK("EXIT\n");
+ }
+
+ /*
+@@ -2642,7 +2631,6 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc)
+ qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+ if (scmd->sc_data_direction == DMA_TO_DEVICE) {
+ qc->tf.flags |= ATA_TFLAG_WRITE;
+- DPRINTK("direction: write\n");
+ }
+
+ qc->tf.command = ATA_CMD_PACKET;
+@@ -4065,8 +4053,6 @@ int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, struct ata_device *dev)
+ return 0;
+
+ bad_cdb_len:
+- DPRINTK("bad CDB len=%u, scsi_op=0x%02x, max=%u\n",
+- scmd->cmd_len, scsi_op, dev->cdb_len);
+ scmd->result = DID_ERROR << 16;
+ scsi_done(scmd);
+ return 0;
+@@ -4532,12 +4518,9 @@ void ata_scsi_hotplug(struct work_struct *work)
+ container_of(work, struct ata_port, hotplug_task.work);
+ int i;
+
+- if (ap->pflags & ATA_PFLAG_UNLOADING) {
+- DPRINTK("ENTER/EXIT - unloading\n");
++ if (ap->pflags & ATA_PFLAG_UNLOADING)
+ return;
+- }
+
+- DPRINTK("ENTER\n");
+ mutex_lock(&ap->scsi_scan_mutex);
+
+ /* Unplug detached devices. We cannot use link iterator here
+@@ -4553,7 +4536,6 @@ void ata_scsi_hotplug(struct work_struct *work)
+ ata_scsi_scan_host(ap, 0);
+
+ mutex_unlock(&ap->scsi_scan_mutex);
+- DPRINTK("EXIT\n");
+ }
+
+ /**
+--
+2.51.0
+
--- /dev/null
+From 571e384a328bc7bd4b9651c40ffbe15621754df3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 17 Dec 2025 14:05:25 +0900
+Subject: ata: libata-scsi: refactor ata_scsi_translate()
+
+From: Damien Le Moal <dlemoal@kernel.org>
+
+[ Upstream commit bb3a8154b1a1dc2c86d037482c0a2cf9186829ed ]
+
+Factor out of ata_scsi_translate() the code handling queued command
+deferral using the port qc_defer callback and issuing the queued
+command with ata_qc_issue() into the new function ata_scsi_qc_issue(),
+and simplify the goto used in ata_scsi_translate().
+While at it, also add a lockdep annotation to check that the port lock
+is held when ata_scsi_translate() is called.
+
+No functional changes.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Damien Le Moal <dlemoal@kernel.org>
+Reviewed-by: Niklas Cassel <cassel@kernel.org>
+Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
+Reviewed-by: John Garry <john.g.garry@oracle.com>
+Reviewed-by: Igor Pylypiv <ipylypiv@google.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/ata/libata-scsi.c | 81 ++++++++++++++++++++++++---------------
+ 1 file changed, 50 insertions(+), 31 deletions(-)
+
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index 4fd8fcab5f972..59843188966e7 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -1670,6 +1670,42 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
+ ata_qc_done(qc);
+ }
+
++static int ata_scsi_qc_issue(struct ata_port *ap, struct ata_queued_cmd *qc)
++{
++ int ret;
++
++ if (!ap->ops->qc_defer)
++ goto issue;
++
++ /* Check if the command needs to be deferred. */
++ ret = ap->ops->qc_defer(qc);
++ switch (ret) {
++ case 0:
++ break;
++ case ATA_DEFER_LINK:
++ ret = SCSI_MLQUEUE_DEVICE_BUSY;
++ break;
++ case ATA_DEFER_PORT:
++ ret = SCSI_MLQUEUE_HOST_BUSY;
++ break;
++ default:
++ WARN_ON_ONCE(1);
++ ret = SCSI_MLQUEUE_HOST_BUSY;
++ break;
++ }
++
++ if (ret) {
++ /* Force a requeue of the command to defer its execution. */
++ ata_qc_free(qc);
++ return ret;
++ }
++
++issue:
++ ata_qc_issue(qc);
++
++ return 0;
++}
++
+ /**
+ * ata_scsi_translate - Translate then issue SCSI command to ATA device
+ * @dev: ATA device to which the command is addressed
+@@ -1693,66 +1729,49 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
+ * spin_lock_irqsave(host lock)
+ *
+ * RETURNS:
+- * 0 on success, SCSI_ML_QUEUE_DEVICE_BUSY if the command
+- * needs to be deferred.
++ * 0 on success, SCSI_ML_QUEUE_DEVICE_BUSY or SCSI_MLQUEUE_HOST_BUSY if the
++ * command needs to be deferred.
+ */
+ static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd,
+ ata_xlat_func_t xlat_func)
+ {
+ struct ata_port *ap = dev->link->ap;
+ struct ata_queued_cmd *qc;
+- int rc;
+
++ lockdep_assert_held(ap->lock);
++
++ /*
++ * ata_scsi_qc_new() calls scsi_done(cmd) in case of failure. So we
++ * have nothing further to do when allocating a qc fails.
++ */
+ qc = ata_scsi_qc_new(dev, cmd);
+ if (!qc)
+- goto err_mem;
++ return 0;
+
+ /* data is present; dma-map it */
+ if (cmd->sc_data_direction == DMA_FROM_DEVICE ||
+ cmd->sc_data_direction == DMA_TO_DEVICE) {
+ if (unlikely(scsi_bufflen(cmd) < 1)) {
+ ata_dev_warn(dev, "WARNING: zero len r/w req\n");
+- goto err_did;
++ cmd->result = (DID_ERROR << 16);
++ goto done;
+ }
+
+ ata_sg_init(qc, scsi_sglist(cmd), scsi_sg_count(cmd));
+-
+ qc->dma_dir = cmd->sc_data_direction;
+ }
+
+ qc->complete_fn = ata_scsi_qc_complete;
+
+ if (xlat_func(qc))
+- goto early_finish;
+-
+- if (ap->ops->qc_defer) {
+- if ((rc = ap->ops->qc_defer(qc)))
+- goto defer;
+- }
+-
+- /* select device, send command to hardware */
+- ata_qc_issue(qc);
++ goto done;
+
+- return 0;
+-
+-early_finish:
+- ata_qc_free(qc);
+- scsi_done(cmd);
+- return 0;
++ return ata_scsi_qc_issue(ap, qc);
+
+-err_did:
++done:
+ ata_qc_free(qc);
+- cmd->result = (DID_ERROR << 16);
+ scsi_done(cmd);
+-err_mem:
+ return 0;
+-
+-defer:
+- ata_qc_free(qc);
+- if (rc == ATA_DEFER_LINK)
+- return SCSI_MLQUEUE_DEVICE_BUSY;
+- else
+- return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
+ struct ata_scsi_args {
+--
+2.51.0
+
--- /dev/null
+From 427524382d4aeb9b90dcd8bf93b2d37361ab818c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 9 Nov 2023 21:28:32 +0100
+Subject: bus: omap-ocp2scp: Convert to platform remove callback returning void
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+
+[ Upstream commit 854f89a5b56354ba4135e0e1f0e57ab2caee59ee ]
+
+The .remove() callback for a platform driver returns an int which makes
+many driver authors wrongly assume it's possible to do error handling by
+returning an error code. However the value returned is ignored (apart
+from emitting a warning) and this typically results in resource leaks.
+
+To improve here there is a quest to make the remove callback return
+void. In the first step of this quest all drivers are converted to
+.remove_new(), which already returns void. Eventually after all drivers
+are converted, .remove_new() will be renamed to .remove().
+
+Trivially convert this driver from always returning zero in the remove
+callback to the void returning variant.
+
+Link: https://lore.kernel.org/r/20231109202830.4124591-3-u.kleine-koenig@pengutronix.de
+Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+Stable-dep-of: 5eb63e9bb65d ("bus: omap-ocp2scp: fix OF populate on driver rebind")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/bus/omap-ocp2scp.c | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/bus/omap-ocp2scp.c b/drivers/bus/omap-ocp2scp.c
+index e02d0656242b8..7d7479ba0a759 100644
+--- a/drivers/bus/omap-ocp2scp.c
++++ b/drivers/bus/omap-ocp2scp.c
+@@ -84,12 +84,10 @@ static int omap_ocp2scp_probe(struct platform_device *pdev)
+ return ret;
+ }
+
+-static int omap_ocp2scp_remove(struct platform_device *pdev)
++static void omap_ocp2scp_remove(struct platform_device *pdev)
+ {
+ pm_runtime_disable(&pdev->dev);
+ device_for_each_child(&pdev->dev, NULL, ocp2scp_remove_devices);
+-
+- return 0;
+ }
+
+ #ifdef CONFIG_OF
+@@ -103,7 +101,7 @@ MODULE_DEVICE_TABLE(of, omap_ocp2scp_id_table);
+
+ static struct platform_driver omap_ocp2scp_driver = {
+ .probe = omap_ocp2scp_probe,
+- .remove = omap_ocp2scp_remove,
++ .remove_new = omap_ocp2scp_remove,
+ .driver = {
+ .name = "omap-ocp2scp",
+ .of_match_table = of_match_ptr(omap_ocp2scp_id_table),
+--
+2.51.0
+
--- /dev/null
+From fedd7cf1f800b62554dfe502ed13119305cb5c13 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 19 Dec 2025 12:01:19 +0100
+Subject: bus: omap-ocp2scp: fix OF populate on driver rebind
+
+From: Johan Hovold <johan@kernel.org>
+
+[ Upstream commit 5eb63e9bb65d88abde647ced50fe6ad40c11de1a ]
+
+Since commit c6e126de43e7 ("of: Keep track of populated platform
+devices") child devices will not be created by of_platform_populate()
+if the devices had previously been deregistered individually so that the
+OF_POPULATED flag is still set in the corresponding OF nodes.
+
+Switch to using of_platform_depopulate() instead of open coding so that
+the child devices are created if the driver is rebound.
+
+Fixes: c6e126de43e7 ("of: Keep track of populated platform devices")
+Cc: stable@vger.kernel.org # 3.16
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Link: https://patch.msgid.link/20251219110119.23507-1-johan@kernel.org
+Signed-off-by: Kevin Hilman <khilman@baylibre.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/bus/omap-ocp2scp.c | 13 ++-----------
+ 1 file changed, 2 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/bus/omap-ocp2scp.c b/drivers/bus/omap-ocp2scp.c
+index 7d7479ba0a759..87e290a3dc817 100644
+--- a/drivers/bus/omap-ocp2scp.c
++++ b/drivers/bus/omap-ocp2scp.c
+@@ -17,15 +17,6 @@
+ #define OCP2SCP_TIMING 0x18
+ #define SYNC2_MASK 0xf
+
+-static int ocp2scp_remove_devices(struct device *dev, void *c)
+-{
+- struct platform_device *pdev = to_platform_device(dev);
+-
+- platform_device_unregister(pdev);
+-
+- return 0;
+-}
+-
+ static int omap_ocp2scp_probe(struct platform_device *pdev)
+ {
+ int ret;
+@@ -79,7 +70,7 @@ static int omap_ocp2scp_probe(struct platform_device *pdev)
+ pm_runtime_disable(&pdev->dev);
+
+ err0:
+- device_for_each_child(&pdev->dev, NULL, ocp2scp_remove_devices);
++ of_platform_depopulate(&pdev->dev);
+
+ return ret;
+ }
+@@ -87,7 +78,7 @@ static int omap_ocp2scp_probe(struct platform_device *pdev)
+ static void omap_ocp2scp_remove(struct platform_device *pdev)
+ {
+ pm_runtime_disable(&pdev->dev);
+- device_for_each_child(&pdev->dev, NULL, ocp2scp_remove_devices);
++ of_platform_depopulate(&pdev->dev);
+ }
+
+ #ifdef CONFIG_OF
+--
+2.51.0
+
--- /dev/null
+From 9148afb564faad316e6fc2e620a186fc7ce8e9c9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 Nov 2025 17:40:03 +0100
+Subject: clk: tegra: tegra124-emc: fix device leak on set_rate()
+
+From: Johan Hovold <johan@kernel.org>
+
+[ Upstream commit da61439c63d34ae6503d080a847f144d587e3a48 ]
+
+Make sure to drop the reference taken when looking up the EMC device and
+its driver data on first set_rate().
+
+Note that holding a reference to a device does not prevent its driver
+data from going away so there is no point in keeping the reference.
+
+Fixes: 2db04f16b589 ("clk: tegra: Add EMC clock driver")
+Fixes: 6d6ef58c2470 ("clk: tegra: tegra124-emc: Fix missing put_device() call in emc_ensure_emc_driver")
+Cc: stable@vger.kernel.org # 4.2: 6d6ef58c2470
+Cc: Mikko Perttunen <mperttunen@nvidia.com>
+Cc: Miaoqian Lin <linmq006@gmail.com>
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Signed-off-by: Stephen Boyd <sboyd@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/clk/tegra/clk-tegra124-emc.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/clk/tegra/clk-tegra124-emc.c b/drivers/clk/tegra/clk-tegra124-emc.c
+index 0f6fb776b2298..5f1af6dfe7154 100644
+--- a/drivers/clk/tegra/clk-tegra124-emc.c
++++ b/drivers/clk/tegra/clk-tegra124-emc.c
+@@ -197,8 +197,8 @@ static struct tegra_emc *emc_ensure_emc_driver(struct tegra_clk_emc *tegra)
+ tegra->emc_node = NULL;
+
+ tegra->emc = platform_get_drvdata(pdev);
++ put_device(&pdev->dev);
+ if (!tegra->emc) {
+- put_device(&pdev->dev);
+ pr_err("%s: cannot find EMC driver\n", __func__);
+ return NULL;
+ }
+--
+2.51.0
+
--- /dev/null
+From 07b317d9c46b72385edfa89a618df8edc8ca6dea Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 Dec 2023 15:02:35 -0800
+Subject: driver core: Add a guard() definition for the device_lock()
+
+From: Dan Williams <dan.j.williams@intel.com>
+
+[ Upstream commit 134c6eaa6087d78c0e289931ca15ae7a5007670d ]
+
+At present there are ~200 usages of device_lock() in the kernel. Some of
+those usages lead to "goto unlock;" patterns which have proven to be
+error prone. Define a "device" guard() definition to allow for those to
+be cleaned up and prevent new ones from appearing.
+
+Link: http://lore.kernel.org/r/657897453dda8_269bd29492@dwillia2-mobl3.amr.corp.intel.com.notmuch
+Link: http://lore.kernel.org/r/6577b0c2a02df_a04c5294bb@dwillia2-xfh.jf.intel.com.notmuch
+Cc: Vishal Verma <vishal.l.verma@intel.com>
+Cc: Ira Weiny <ira.weiny@intel.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Reviewed-by: Ira Weiny <ira.weiny@intel.com>
+Reviewed-by: Dave Jiang <dave.jiang@intel.com>
+Reviewed-by: Vishal Verma <vishal.l.verma@intel.com>
+Link: https://lore.kernel.org/r/170250854466.1522182.17555361077409628655.stgit@dwillia2-xfh.jf.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Stable-dep-of: dc23806a7c47 ("driver core: enforce device_lock for driver_match_device()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/device.h | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/include/linux/device.h b/include/linux/device.h
+index 89864b9185462..0ef5f7f5f8853 100644
+--- a/include/linux/device.h
++++ b/include/linux/device.h
+@@ -779,6 +779,8 @@ static inline void device_unlock(struct device *dev)
+ mutex_unlock(&dev->mutex);
+ }
+
++DEFINE_GUARD(device, struct device *, device_lock(_T), device_unlock(_T))
++
+ static inline void device_lock_assert(struct device *dev)
+ {
+ lockdep_assert_held(&dev->mutex);
+--
+2.51.0
+
--- /dev/null
+From 218cdae1b856698d6af68be0cc296d3a0a8103de Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 14 Jan 2026 00:28:43 +0800
+Subject: driver core: enforce device_lock for driver_match_device()
+
+From: Gui-Dong Han <hanguidong02@gmail.com>
+
+[ Upstream commit dc23806a7c47ec5f1293aba407fb69519f976ee0 ]
+
+Currently, driver_match_device() is called from three sites. One site
+(__device_attach_driver) holds device_lock(dev), but the other two
+(bind_store and __driver_attach) do not. This inconsistency means that
+bus match() callbacks are not guaranteed to be called with the lock
+held.
+
+Fix this by introducing driver_match_device_locked(), which guarantees
+holding the device lock using a scoped guard. Replace the unlocked calls
+in bind_store() and __driver_attach() with this new helper. Also add a
+lock assertion to driver_match_device() to enforce this guarantee.
+
+This consistency also fixes a known race condition. The driver_override
+implementation relies on the device_lock, so the missing lock led to the
+use-after-free (UAF) reported in Bugzilla for buses using this field.
+
+Stress testing the two newly locked paths for 24 hours with
+CONFIG_PROVE_LOCKING and CONFIG_LOCKDEP enabled showed no UAF recurrence
+and no lockdep warnings.
+
+Cc: stable@vger.kernel.org
+Closes: https://bugzilla.kernel.org/show_bug.cgi?id=220789
+Suggested-by: Qiu-ji Chen <chenqiuji666@gmail.com>
+Signed-off-by: Gui-Dong Han <hanguidong02@gmail.com>
+Fixes: 49b420a13ff9 ("driver core: check bus->match without holding device lock")
+Reviewed-by: Danilo Krummrich <dakr@kernel.org>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Rafael J. Wysocki (Intel) <rafael@kernel.org>
+Link: https://patch.msgid.link/20260113162843.12712-1-hanguidong02@gmail.com
+Signed-off-by: Danilo Krummrich <dakr@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/base/base.h | 9 +++++++++
+ drivers/base/bus.c | 2 +-
+ drivers/base/dd.c | 2 +-
+ 3 files changed, 11 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/base/base.h b/drivers/base/base.h
+index 406d108e8510f..7aad54e1cf20b 100644
+--- a/drivers/base/base.h
++++ b/drivers/base/base.h
+@@ -144,10 +144,19 @@ extern void device_set_deferred_probe_reason(const struct device *dev,
+ static inline int driver_match_device(struct device_driver *drv,
+ struct device *dev)
+ {
++ device_lock_assert(dev);
++
+ return drv->bus->match ? drv->bus->match(dev, drv) : 1;
+ }
+ extern bool driver_allows_async_probing(struct device_driver *drv);
+
++static inline int driver_match_device_locked(const struct device_driver *drv,
++ struct device *dev)
++{
++ guard(device)(dev);
++ return driver_match_device(drv, dev);
++}
++
+ static inline void dev_sync_state(struct device *dev)
+ {
+ if (dev->bus->sync_state)
+diff --git a/drivers/base/bus.c b/drivers/base/bus.c
+index 3ec131d877120..19af9e2469e4d 100644
+--- a/drivers/base/bus.c
++++ b/drivers/base/bus.c
+@@ -212,7 +212,7 @@ static ssize_t bind_store(struct device_driver *drv, const char *buf,
+ int err = -ENODEV;
+
+ dev = bus_find_device_by_name(bus, NULL, buf);
+- if (dev && driver_match_device(drv, dev)) {
++ if (dev && driver_match_device_locked(drv, dev)) {
+ err = device_driver_attach(drv, dev);
+ if (!err) {
+ /* success */
+diff --git a/drivers/base/dd.c b/drivers/base/dd.c
+index c0a6bc9c6d5f1..d17bc8279af68 100644
+--- a/drivers/base/dd.c
++++ b/drivers/base/dd.c
+@@ -1138,7 +1138,7 @@ static int __driver_attach(struct device *dev, void *data)
+ * is an error.
+ */
+
+- ret = driver_match_device(drv, dev);
++ ret = driver_match_device_locked(drv, dev);
+ if (ret == 0) {
+ /* no match */
+ return 0;
+--
+2.51.0
+
--- /dev/null
+From bb2d8947c1c77bdcef30f62feccb41488fb0a044 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 3 Mar 2023 16:53:54 -0800
+Subject: driver core: Make state_synced device attribute writeable
+
+From: Saravana Kannan <saravanak@google.com>
+
+[ Upstream commit f8fb576658a3e19796e2e1a12a5ec8f44dac02b6 ]
+
+If the file is written to and sync_state() hasn't been called for the
+device yet, then call sync_state() for the device independent of the
+state of its consumers.
+
+This is useful for supplier devices that have one or more consumers that
+don't have a driver but the consumers are in a state that don't use the
+resources supplied by the supplier device.
+
+This gives finer grained control than using the
+fw_devlink.sync_state=timeout kernel commandline parameter.
+
+Signed-off-by: Saravana Kannan <saravanak@google.com>
+Link: https://lore.kernel.org/r/20230304005355.746421-3-saravanak@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Stable-dep-of: dc23806a7c47 ("driver core: enforce device_lock for driver_match_device()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../ABI/testing/sysfs-devices-state_synced | 5 ++++
+ drivers/base/base.h | 8 +++++++
+ drivers/base/core.c | 5 +---
+ drivers/base/dd.c | 23 ++++++++++++++++++-
+ 4 files changed, 36 insertions(+), 5 deletions(-)
+
+diff --git a/Documentation/ABI/testing/sysfs-devices-state_synced b/Documentation/ABI/testing/sysfs-devices-state_synced
+index 0c922d7d02fc2..c64636ddac410 100644
+--- a/Documentation/ABI/testing/sysfs-devices-state_synced
++++ b/Documentation/ABI/testing/sysfs-devices-state_synced
+@@ -21,4 +21,9 @@ Description:
+ at the time the kernel starts are not affected or limited in
+ any way by sync_state() callbacks.
+
++ Writing "1" to this file will force a call to the device's
++ sync_state() function if it hasn't been called already. The
++ sync_state() call happens independent of the state of the
++ consumer devices.
++
+
+diff --git a/drivers/base/base.h b/drivers/base/base.h
+index 2882af26392ab..406d108e8510f 100644
+--- a/drivers/base/base.h
++++ b/drivers/base/base.h
+@@ -148,6 +148,14 @@ static inline int driver_match_device(struct device_driver *drv,
+ }
+ extern bool driver_allows_async_probing(struct device_driver *drv);
+
++static inline void dev_sync_state(struct device *dev)
++{
++ if (dev->bus->sync_state)
++ dev->bus->sync_state(dev);
++ else if (dev->driver && dev->driver->sync_state)
++ dev->driver->sync_state(dev);
++}
++
+ extern int driver_add_groups(struct device_driver *drv,
+ const struct attribute_group **groups);
+ extern void driver_remove_groups(struct device_driver *drv,
+diff --git a/drivers/base/core.c b/drivers/base/core.c
+index 4fc62624a95e2..db665370d3788 100644
+--- a/drivers/base/core.c
++++ b/drivers/base/core.c
+@@ -1112,10 +1112,7 @@ static void device_links_flush_sync_list(struct list_head *list,
+ if (dev != dont_lock_dev)
+ device_lock(dev);
+
+- if (dev->bus->sync_state)
+- dev->bus->sync_state(dev);
+- else if (dev->driver && dev->driver->sync_state)
+- dev->driver->sync_state(dev);
++ dev_sync_state(dev);
+
+ if (dev != dont_lock_dev)
+ device_unlock(dev);
+diff --git a/drivers/base/dd.c b/drivers/base/dd.c
+index 0bd166ad6f130..c0a6bc9c6d5f1 100644
+--- a/drivers/base/dd.c
++++ b/drivers/base/dd.c
+@@ -492,6 +492,27 @@ EXPORT_SYMBOL_GPL(device_bind_driver);
+ static atomic_t probe_count = ATOMIC_INIT(0);
+ static DECLARE_WAIT_QUEUE_HEAD(probe_waitqueue);
+
++static ssize_t state_synced_store(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ int ret = 0;
++
++ if (strcmp("1", buf))
++ return -EINVAL;
++
++ device_lock(dev);
++ if (!dev->state_synced) {
++ dev->state_synced = true;
++ dev_sync_state(dev);
++ } else {
++ ret = -EINVAL;
++ }
++ device_unlock(dev);
++
++ return ret ? ret : count;
++}
++
+ static ssize_t state_synced_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+ {
+@@ -503,7 +524,7 @@ static ssize_t state_synced_show(struct device *dev,
+
+ return sysfs_emit(buf, "%u\n", val);
+ }
+-static DEVICE_ATTR_RO(state_synced);
++static DEVICE_ATTR_RW(state_synced);
+
+
+ static int call_driver_probe(struct device *dev, struct device_driver *drv)
+--
+2.51.0
+
--- /dev/null
+From 01666ee10c12a5389a34eb1a2ee0d7d029d82ae7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 Nov 2025 17:42:01 +0100
+Subject: drm/tegra: dsi: fix device leak on probe
+
+From: Johan Hovold <johan@kernel.org>
+
+[ Upstream commit bfef062695570842cf96358f2f46f4c6642c6689 ]
+
+Make sure to drop the reference taken when looking up the companion
+(ganged) device and its driver data during probe().
+
+Note that holding a reference to a device does not prevent its driver
+data from going away so there is no point in keeping the reference.
+
+Fixes: e94236cde4d5 ("drm/tegra: dsi: Add ganged mode support")
+Fixes: 221e3638feb8 ("drm/tegra: Fix reference leak in tegra_dsi_ganged_probe")
+Cc: stable@vger.kernel.org # 3.19: 221e3638feb8
+Cc: Thierry Reding <treding@nvidia.com>
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Signed-off-by: Thierry Reding <treding@nvidia.com>
+Link: https://patch.msgid.link/20251121164201.13188-1-johan@kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/tegra/dsi.c | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
+index 7bb26655cb3cc..74d27b564d564 100644
+--- a/drivers/gpu/drm/tegra/dsi.c
++++ b/drivers/gpu/drm/tegra/dsi.c
+@@ -1539,11 +1539,9 @@ static int tegra_dsi_ganged_probe(struct tegra_dsi *dsi)
+ return -EPROBE_DEFER;
+
+ dsi->slave = platform_get_drvdata(gangster);
+-
+- if (!dsi->slave) {
+- put_device(&gangster->dev);
++ put_device(&gangster->dev);
++ if (!dsi->slave)
+ return -EPROBE_DEFER;
+- }
+
+ dsi->slave->master = dsi;
+ }
+--
+2.51.0
+
--- /dev/null
+From 7bca9ff7e59e1f0d481d7a377b2bff8bd5a89dab Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 17 Dec 2025 09:11:05 +0000
+Subject: fbcon: check return value of con2fb_acquire_newinfo()
+
+From: Andrey Vatoropin <a.vatoropin@crpt.ru>
+
+[ Upstream commit 011a0502801c8536f64141a2b61362c14f456544 ]
+
+If fbcon_open() fails when called from con2fb_acquire_newinfo() then
+info->fbcon_par pointer remains NULL which is later dereferenced.
+
+Add check for return value of the function con2fb_acquire_newinfo() to
+avoid it.
+
+Found by Linux Verification Center (linuxtesting.org) with SVACE.
+
+Fixes: d1baa4ffa677 ("fbcon: set_con2fb_map fixes")
+Cc: stable@vger.kernel.org
+Signed-off-by: Andrey Vatoropin <a.vatoropin@crpt.ru>
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/video/fbdev/core/fbcon.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
+index 0039441f3769b..f4584681fa43d 100644
+--- a/drivers/video/fbdev/core/fbcon.c
++++ b/drivers/video/fbdev/core/fbcon.c
+@@ -1008,7 +1008,8 @@ static void fbcon_init(struct vc_data *vc, bool init)
+ return;
+
+ if (!info->fbcon_par)
+- con2fb_acquire_newinfo(vc, info, vc->vc_num);
++ if (con2fb_acquire_newinfo(vc, info, vc->vc_num))
++ return;
+
+ /* If we are not the first console on this
+ fb, copy the font from that console */
+--
+2.51.0
+
--- /dev/null
+From 42b219440bca9669928661165ded9bb65558a8ca Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 5 Apr 2022 23:03:27 +0200
+Subject: fbcon: Extract fbcon_open/release helpers
+
+From: Daniel Vetter <daniel.vetter@ffwll.ch>
+
+[ Upstream commit bd6026a8c4e6b7edf4bafcb71da885b284b8f4fd ]
+
+There's two minor behaviour changes in here:
+- in error paths we now consistently call fb_ops->fb_release
+- fb_release really can't fail (fbmem.c ignores it too) and there's no
+ reasonable cleanup we can do anyway.
+
+Note that everything in fbcon.c is protected by the big console_lock()
+lock (especially all the global variables), so the minor changes in
+ordering of setup/cleanup do not matter.
+
+v2: Explain a bit better why this is all correct (Sam)
+
+Acked-by: Sam Ravnborg <sam@ravnborg.org>
+Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
+Cc: Daniel Vetter <daniel@ffwll.ch>
+Cc: Claudio Suarez <cssk@net-c.es>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Cc: Du Cheng <ducheng2@gmail.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20220405210335.3434130-10-daniel.vetter@ffwll.ch
+Stable-dep-of: 011a0502801c ("fbcon: check return value of con2fb_acquire_newinfo()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/video/fbdev/core/fbcon.c | 107 +++++++++++++++----------------
+ 1 file changed, 53 insertions(+), 54 deletions(-)
+
+diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
+index 4ad8618968715..7131af71a01ca 100644
+--- a/drivers/video/fbdev/core/fbcon.c
++++ b/drivers/video/fbdev/core/fbcon.c
+@@ -676,19 +676,37 @@ static int fbcon_invalid_charcount(struct fb_info *info, unsigned charcount)
+
+ #endif /* CONFIG_MISC_TILEBLITTING */
+
++static int fbcon_open(struct fb_info *info)
++{
++ if (!try_module_get(info->fbops->owner))
++ return -ENODEV;
++
++ if (info->fbops->fb_open &&
++ info->fbops->fb_open(info, 0)) {
++ module_put(info->fbops->owner);
++ return -ENODEV;
++ }
++
++ return 0;
++}
++
++static void fbcon_release(struct fb_info *info)
++{
++ if (info->fbops->fb_release)
++ info->fbops->fb_release(info, 0);
++
++ module_put(info->fbops->owner);
++}
+
+ static int con2fb_acquire_newinfo(struct vc_data *vc, struct fb_info *info,
+ int unit, int oldidx)
+ {
+ struct fbcon_ops *ops = NULL;
+- int err = 0;
+-
+- if (!try_module_get(info->fbops->owner))
+- err = -ENODEV;
++ int err;
+
+- if (!err && info->fbops->fb_open &&
+- info->fbops->fb_open(info, 0))
+- err = -ENODEV;
++ err = fbcon_open(info);
++ if (err)
++ return err;
+
+ if (!err) {
+ ops = kzalloc(sizeof(struct fbcon_ops), GFP_KERNEL);
+@@ -709,7 +727,7 @@ static int con2fb_acquire_newinfo(struct vc_data *vc, struct fb_info *info,
+
+ if (err) {
+ con2fb_map[unit] = oldidx;
+- module_put(info->fbops->owner);
++ fbcon_release(info);
+ }
+
+ return err;
+@@ -720,45 +738,34 @@ static int con2fb_release_oldinfo(struct vc_data *vc, struct fb_info *oldinfo,
+ int oldidx, int found)
+ {
+ struct fbcon_ops *ops = oldinfo->fbcon_par;
+- int err = 0, ret;
++ int ret;
+
+- if (oldinfo->fbops->fb_release &&
+- oldinfo->fbops->fb_release(oldinfo, 0)) {
+- con2fb_map[unit] = oldidx;
+- if (!found && newinfo->fbops->fb_release)
+- newinfo->fbops->fb_release(newinfo, 0);
+- if (!found)
+- module_put(newinfo->fbops->owner);
+- err = -ENODEV;
+- }
++ fbcon_release(oldinfo);
+
+- if (!err) {
+- fbcon_del_cursor_work(oldinfo);
+- kfree(ops->cursor_state.mask);
+- kfree(ops->cursor_data);
+- kfree(ops->cursor_src);
+- kfree(ops->fontbuffer);
+- kfree(oldinfo->fbcon_par);
+- oldinfo->fbcon_par = NULL;
+- module_put(oldinfo->fbops->owner);
+- /*
+- If oldinfo and newinfo are driving the same hardware,
+- the fb_release() method of oldinfo may attempt to
+- restore the hardware state. This will leave the
+- newinfo in an undefined state. Thus, a call to
+- fb_set_par() may be needed for the newinfo.
+- */
+- if (newinfo && newinfo->fbops->fb_set_par) {
+- ret = newinfo->fbops->fb_set_par(newinfo);
++ fbcon_del_cursor_work(oldinfo);
++ kfree(ops->cursor_state.mask);
++ kfree(ops->cursor_data);
++ kfree(ops->cursor_src);
++ kfree(ops->fontbuffer);
++ kfree(oldinfo->fbcon_par);
++ oldinfo->fbcon_par = NULL;
++ /*
++ If oldinfo and newinfo are driving the same hardware,
++ the fb_release() method of oldinfo may attempt to
++ restore the hardware state. This will leave the
++ newinfo in an undefined state. Thus, a call to
++ fb_set_par() may be needed for the newinfo.
++ */
++ if (newinfo && newinfo->fbops->fb_set_par) {
++ ret = newinfo->fbops->fb_set_par(newinfo);
+
+- if (ret)
+- printk(KERN_ERR "con2fb_release_oldinfo: "
+- "detected unhandled fb_set_par error, "
+- "error code %d\n", ret);
+- }
++ if (ret)
++ printk(KERN_ERR "con2fb_release_oldinfo: "
++ "detected unhandled fb_set_par error, "
++ "error code %d\n", ret);
+ }
+
+- return err;
++ return 0;
+ }
+
+ static void con2fb_init_display(struct vc_data *vc, struct fb_info *info,
+@@ -914,7 +921,6 @@ static const char *fbcon_startup(void)
+ struct fbcon_display *p = &fb_display[fg_console];
+ struct vc_data *vc = vc_cons[fg_console].d;
+ const struct font_desc *font = NULL;
+- struct module *owner;
+ struct fb_info *info = NULL;
+ struct fbcon_ops *ops;
+ int rows, cols;
+@@ -933,17 +939,12 @@ static const char *fbcon_startup(void)
+ if (!info)
+ return NULL;
+
+- owner = info->fbops->owner;
+- if (!try_module_get(owner))
++ if (fbcon_open(info))
+ return NULL;
+- if (info->fbops->fb_open && info->fbops->fb_open(info, 0)) {
+- module_put(owner);
+- return NULL;
+- }
+
+ ops = kzalloc(sizeof(struct fbcon_ops), GFP_KERNEL);
+ if (!ops) {
+- module_put(owner);
++ fbcon_release(info);
+ return NULL;
+ }
+
+@@ -3386,10 +3387,6 @@ static void fbcon_exit(void)
+ }
+
+ if (mapped) {
+- if (info->fbops->fb_release)
+- info->fbops->fb_release(info, 0);
+- module_put(info->fbops->owner);
+-
+ if (info->fbcon_par) {
+ struct fbcon_ops *ops = info->fbcon_par;
+
+@@ -3399,6 +3396,8 @@ static void fbcon_exit(void)
+ kfree(info->fbcon_par);
+ info->fbcon_par = NULL;
+ }
++
++ fbcon_release(info);
+ }
+ }
+ }
+--
+2.51.0
+
--- /dev/null
+From edfe70ae842631ac83fe3180a2e5816fb7a93ac9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 5 Apr 2022 23:03:29 +0200
+Subject: fbcon: move more common code into fb_open()
+
+From: Daniel Vetter <daniel.vetter@ffwll.ch>
+
+[ Upstream commit d443d93864726ad68c0a741d1e7b03934a9af143 ]
+
+No idea why con2fb_acquire_newinfo() initializes much less than
+fbcon_startup(), but so be it. From a quick look most of the
+un-initialized stuff should be fairly harmless, but who knows.
+
+Note that the error handling for the con2fb_acquire_newinfo() failure
+case was very strange: Callers updated con2fb_map to the new value
+before calling this function, but upon error con2fb_acquire_newinfo
+reset it to the old value. Since I removed the call to fbcon_release
+anyway that strange error path was sticking out like a sore thumb,
+hence I removed it. Which also allows us to remove the oldidx
+parameter from that function.
+
+v2: Explain what's going on with oldidx and error paths (Sam)
+
+v3: Drop unused variable (0day)
+
+v4: Rebased over bisect fix in previous patch, unchagend end result.
+
+Acked-by: Sam Ravnborg <sam@ravnborg.org> (v2)
+Acked-by: Thomas Zimmermann <tzimmermann@suse.de>
+Cc: kernel test robot <lkp@intel.com>
+Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
+Cc: Daniel Vetter <daniel@ffwll.ch>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Cc: Thomas Zimmermann <tzimmermann@suse.de>
+Cc: Claudio Suarez <cssk@net-c.es>
+Cc: Du Cheng <ducheng2@gmail.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20220405210335.3434130-12-daniel.vetter@ffwll.ch
+Stable-dep-of: 011a0502801c ("fbcon: check return value of con2fb_acquire_newinfo()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/video/fbdev/core/fbcon.c | 75 +++++++++++++-------------------
+ 1 file changed, 30 insertions(+), 45 deletions(-)
+
+diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
+index 7131af71a01ca..0039441f3769b 100644
+--- a/drivers/video/fbdev/core/fbcon.c
++++ b/drivers/video/fbdev/core/fbcon.c
+@@ -676,8 +676,18 @@ static int fbcon_invalid_charcount(struct fb_info *info, unsigned charcount)
+
+ #endif /* CONFIG_MISC_TILEBLITTING */
+
++static void fbcon_release(struct fb_info *info)
++{
++ if (info->fbops->fb_release)
++ info->fbops->fb_release(info, 0);
++
++ module_put(info->fbops->owner);
++}
++
+ static int fbcon_open(struct fb_info *info)
+ {
++ struct fbcon_ops *ops;
++
+ if (!try_module_get(info->fbops->owner))
+ return -ENODEV;
+
+@@ -687,48 +697,31 @@ static int fbcon_open(struct fb_info *info)
+ return -ENODEV;
+ }
+
+- return 0;
+-}
++ ops = kzalloc(sizeof(struct fbcon_ops), GFP_KERNEL);
++ if (!ops) {
++ fbcon_release(info);
++ return -ENOMEM;
++ }
+
+-static void fbcon_release(struct fb_info *info)
+-{
+- if (info->fbops->fb_release)
+- info->fbops->fb_release(info, 0);
++ INIT_DELAYED_WORK(&ops->cursor_work, fb_flashcursor);
++ ops->info = info;
++ info->fbcon_par = ops;
++ ops->cur_blink_jiffies = HZ / 5;
+
+- module_put(info->fbops->owner);
++ return 0;
+ }
+
+ static int con2fb_acquire_newinfo(struct vc_data *vc, struct fb_info *info,
+- int unit, int oldidx)
++ int unit)
+ {
+- struct fbcon_ops *ops = NULL;
+ int err;
+
+ err = fbcon_open(info);
+ if (err)
+ return err;
+
+- if (!err) {
+- ops = kzalloc(sizeof(struct fbcon_ops), GFP_KERNEL);
+- if (!ops)
+- err = -ENOMEM;
+- }
+-
+- if (!err) {
+- INIT_DELAYED_WORK(&ops->cursor_work, fb_flashcursor);
+-
+- ops->cur_blink_jiffies = HZ / 5;
+- ops->info = info;
+- info->fbcon_par = ops;
+-
+- if (vc)
+- set_blitting_type(vc, info);
+- }
+-
+- if (err) {
+- con2fb_map[unit] = oldidx;
+- fbcon_release(info);
+- }
++ if (vc)
++ set_blitting_type(vc, info);
+
+ return err;
+ }
+@@ -840,9 +833,11 @@ static int set_con2fb_map(int unit, int newidx, int user)
+
+ found = search_fb_in_map(newidx);
+
+- con2fb_map[unit] = newidx;
+- if (!err && !found)
+- err = con2fb_acquire_newinfo(vc, info, unit, oldidx);
++ if (!err && !found) {
++ err = con2fb_acquire_newinfo(vc, info, unit);
++ if (!err)
++ con2fb_map[unit] = newidx;
++ }
+
+ /*
+ * If old fb is not mapped to any of the consoles,
+@@ -942,20 +937,10 @@ static const char *fbcon_startup(void)
+ if (fbcon_open(info))
+ return NULL;
+
+- ops = kzalloc(sizeof(struct fbcon_ops), GFP_KERNEL);
+- if (!ops) {
+- fbcon_release(info);
+- return NULL;
+- }
+-
+- INIT_DELAYED_WORK(&ops->cursor_work, fb_flashcursor);
+-
++ ops = info->fbcon_par;
+ ops->currcon = -1;
+ ops->graphics = 1;
+ ops->cur_rotate = -1;
+- ops->cur_blink_jiffies = HZ / 5;
+- ops->info = info;
+- info->fbcon_par = ops;
+
+ p->con_rotate = initial_rotation;
+ if (p->con_rotate == -1)
+@@ -1023,7 +1008,7 @@ static void fbcon_init(struct vc_data *vc, bool init)
+ return;
+
+ if (!info->fbcon_par)
+- con2fb_acquire_newinfo(vc, info, vc->vc_num, -1);
++ con2fb_acquire_newinfo(vc, info, vc->vc_num);
+
+ /* If we are not the first console on this
+ fb, copy the font from that console */
+--
+2.51.0
+
--- /dev/null
+From 6b010f44feee0fb511efd411ab1794a5ac5eb8ad Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 5 Apr 2022 23:03:24 +0200
+Subject: fbcon: Use delayed work for cursor
+
+From: Daniel Vetter <daniel.vetter@ffwll.ch>
+
+[ Upstream commit 3b0fb6ab25dda03f6077bf8fce9407bb0d4db6ea ]
+
+Allows us to delete a bunch of hand-rolled stuff using a timer plus a
+separate work). Also to simplify the code we initialize the
+cursor_work completely when we allocate the fbcon_ops structure,
+instead of trying to cope with console re-initialization.
+
+The motiviation here is that fbcon code stops using the fb_info.queue,
+which helps with locking issues around cleanup and all that in a later
+patch.
+
+Also note that this allows us to ditch the hand-rolled work cleanup in
+fbcon_exit - we already call fbcon_del_cursor_timer, which takes care
+of everything. Plus this was racy anyway.
+
+v2:
+- Only INIT_DELAYED_WORK when kzalloc succeeded (Tetsuo)
+- Explain that we replace both the timer and a work with the combined
+ delayed_work (Javier)
+
+Reviewed-by: Javier Martinez Canillas <javierm@redhat.com>
+Acked-by: Thomas Zimmermann <tzimmermann@suse.de>
+Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
+Cc: Daniel Vetter <daniel@ffwll.ch>
+Cc: Claudio Suarez <cssk@net-c.es>
+Cc: Du Cheng <ducheng2@gmail.com>
+Cc: Thomas Zimmermann <tzimmermann@suse.de>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Link: https://patchwork.freedesktop.org/patch/msgid/20220405210335.3434130-7-daniel.vetter@ffwll.ch
+Stable-dep-of: 011a0502801c ("fbcon: check return value of con2fb_acquire_newinfo()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/video/fbdev/core/fbcon.c | 85 +++++++++++++-------------------
+ drivers/video/fbdev/core/fbcon.h | 4 +-
+ 2 files changed, 35 insertions(+), 54 deletions(-)
+
+diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
+index 8922595cc491d..4ad8618968715 100644
+--- a/drivers/video/fbdev/core/fbcon.c
++++ b/drivers/video/fbdev/core/fbcon.c
+@@ -342,8 +342,8 @@ static int get_color(struct vc_data *vc, struct fb_info *info,
+
+ static void fb_flashcursor(struct work_struct *work)
+ {
+- struct fb_info *info = container_of(work, struct fb_info, queue);
+- struct fbcon_ops *ops = info->fbcon_par;
++ struct fbcon_ops *ops = container_of(work, struct fbcon_ops, cursor_work.work);
++ struct fb_info *info;
+ struct vc_data *vc = NULL;
+ int c;
+ int mode;
+@@ -356,7 +356,10 @@ static void fb_flashcursor(struct work_struct *work)
+ if (ret == 0)
+ return;
+
+- if (ops && ops->currcon != -1)
++ /* protected by console_lock */
++ info = ops->info;
++
++ if (ops->currcon != -1)
+ vc = vc_cons[ops->currcon].d;
+
+ if (!vc || !con_is_visible(vc) ||
+@@ -372,42 +375,25 @@ static void fb_flashcursor(struct work_struct *work)
+ ops->cursor(vc, info, mode, get_color(vc, info, c, 1),
+ get_color(vc, info, c, 0));
+ console_unlock();
+-}
+
+-static void cursor_timer_handler(struct timer_list *t)
+-{
+- struct fbcon_ops *ops = from_timer(ops, t, cursor_timer);
+- struct fb_info *info = ops->info;
+-
+- queue_work(system_power_efficient_wq, &info->queue);
+- mod_timer(&ops->cursor_timer, jiffies + ops->cur_blink_jiffies);
++ queue_delayed_work(system_power_efficient_wq, &ops->cursor_work,
++ ops->cur_blink_jiffies);
+ }
+
+-static void fbcon_add_cursor_timer(struct fb_info *info)
++static void fbcon_add_cursor_work(struct fb_info *info)
+ {
+ struct fbcon_ops *ops = info->fbcon_par;
+
+- if ((!info->queue.func || info->queue.func == fb_flashcursor) &&
+- !(ops->flags & FBCON_FLAGS_CURSOR_TIMER) &&
+- !fbcon_cursor_noblink) {
+- if (!info->queue.func)
+- INIT_WORK(&info->queue, fb_flashcursor);
+-
+- timer_setup(&ops->cursor_timer, cursor_timer_handler, 0);
+- mod_timer(&ops->cursor_timer, jiffies + ops->cur_blink_jiffies);
+- ops->flags |= FBCON_FLAGS_CURSOR_TIMER;
+- }
++ if (!fbcon_cursor_noblink)
++ queue_delayed_work(system_power_efficient_wq, &ops->cursor_work,
++ ops->cur_blink_jiffies);
+ }
+
+-static void fbcon_del_cursor_timer(struct fb_info *info)
++static void fbcon_del_cursor_work(struct fb_info *info)
+ {
+ struct fbcon_ops *ops = info->fbcon_par;
+
+- if (info->queue.func == fb_flashcursor &&
+- ops->flags & FBCON_FLAGS_CURSOR_TIMER) {
+- del_timer_sync(&ops->cursor_timer);
+- ops->flags &= ~FBCON_FLAGS_CURSOR_TIMER;
+- }
++ cancel_delayed_work_sync(&ops->cursor_work);
+ }
+
+ #ifndef MODULE
+@@ -711,6 +697,8 @@ static int con2fb_acquire_newinfo(struct vc_data *vc, struct fb_info *info,
+ }
+
+ if (!err) {
++ INIT_DELAYED_WORK(&ops->cursor_work, fb_flashcursor);
++
+ ops->cur_blink_jiffies = HZ / 5;
+ ops->info = info;
+ info->fbcon_par = ops;
+@@ -745,7 +733,7 @@ static int con2fb_release_oldinfo(struct vc_data *vc, struct fb_info *oldinfo,
+ }
+
+ if (!err) {
+- fbcon_del_cursor_timer(oldinfo);
++ fbcon_del_cursor_work(oldinfo);
+ kfree(ops->cursor_state.mask);
+ kfree(ops->cursor_data);
+ kfree(ops->cursor_src);
+@@ -862,7 +850,7 @@ static int set_con2fb_map(int unit, int newidx, int user)
+ logo_shown != FBCON_LOGO_DONTSHOW);
+
+ if (!found)
+- fbcon_add_cursor_timer(info);
++ fbcon_add_cursor_work(info);
+ con2fb_map_boot[unit] = newidx;
+ con2fb_init_display(vc, info, unit, show_logo);
+ }
+@@ -959,6 +947,8 @@ static const char *fbcon_startup(void)
+ return NULL;
+ }
+
++ INIT_DELAYED_WORK(&ops->cursor_work, fb_flashcursor);
++
+ ops->currcon = -1;
+ ops->graphics = 1;
+ ops->cur_rotate = -1;
+@@ -999,7 +989,7 @@ static const char *fbcon_startup(void)
+ info->var.yres,
+ info->var.bits_per_pixel);
+
+- fbcon_add_cursor_timer(info);
++ fbcon_add_cursor_work(info);
+ return display_desc;
+ }
+
+@@ -1185,7 +1175,7 @@ static void fbcon_deinit(struct vc_data *vc)
+ goto finished;
+
+ if (con_is_visible(vc))
+- fbcon_del_cursor_timer(info);
++ fbcon_del_cursor_work(info);
+
+ ops->flags &= ~FBCON_FLAGS_INIT;
+ finished:
+@@ -1318,9 +1308,9 @@ static void fbcon_cursor(struct vc_data *vc, int mode)
+ return;
+
+ if (vc->vc_cursor_type & CUR_SW)
+- fbcon_del_cursor_timer(info);
++ fbcon_del_cursor_work(info);
+ else
+- fbcon_add_cursor_timer(info);
++ fbcon_add_cursor_work(info);
+
+ ops->cursor_flash = (mode == CM_ERASE) ? 0 : 1;
+
+@@ -2126,14 +2116,14 @@ static bool fbcon_switch(struct vc_data *vc)
+ }
+
+ if (old_info != info)
+- fbcon_del_cursor_timer(old_info);
++ fbcon_del_cursor_work(old_info);
+ }
+
+ if (fbcon_is_inactive(vc, info) ||
+ ops->blank_state != FB_BLANK_UNBLANK)
+- fbcon_del_cursor_timer(info);
++ fbcon_del_cursor_work(info);
+ else
+- fbcon_add_cursor_timer(info);
++ fbcon_add_cursor_work(info);
+
+ set_blitting_type(vc, info);
+ ops->cursor_reset = 1;
+@@ -2241,9 +2231,9 @@ static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch)
+
+ if (mode_switch || fbcon_is_inactive(vc, info) ||
+ ops->blank_state != FB_BLANK_UNBLANK)
+- fbcon_del_cursor_timer(info);
++ fbcon_del_cursor_work(info);
+ else
+- fbcon_add_cursor_timer(info);
++ fbcon_add_cursor_work(info);
+
+ return 0;
+ }
+@@ -3240,7 +3230,7 @@ static ssize_t show_cursor_blink(struct device *device,
+ if (!ops)
+ goto err;
+
+- blink = (ops->flags & FBCON_FLAGS_CURSOR_TIMER) ? 1 : 0;
++ blink = delayed_work_pending(&ops->cursor_work);
+ err:
+ console_unlock();
+ return snprintf(buf, PAGE_SIZE, "%d\n", blink);
+@@ -3269,10 +3259,10 @@ static ssize_t store_cursor_blink(struct device *device,
+
+ if (blink) {
+ fbcon_cursor_noblink = 0;
+- fbcon_add_cursor_timer(info);
++ fbcon_add_cursor_work(info);
+ } else {
+ fbcon_cursor_noblink = 1;
+- fbcon_del_cursor_timer(info);
++ fbcon_del_cursor_work(info);
+ }
+
+ err:
+@@ -3385,15 +3375,9 @@ static void fbcon_exit(void)
+ #endif
+
+ for_each_registered_fb(i) {
+- int pending = 0;
+-
+ mapped = 0;
+ info = registered_fb[i];
+
+- if (info->queue.func)
+- pending = cancel_work_sync(&info->queue);
+- pr_debug("fbcon: %s pending work\n", (pending ? "canceled" : "no"));
+-
+ for (j = first_fb_vc; j <= last_fb_vc; j++) {
+ if (con2fb_map[j] == i) {
+ mapped = 1;
+@@ -3409,15 +3393,12 @@ static void fbcon_exit(void)
+ if (info->fbcon_par) {
+ struct fbcon_ops *ops = info->fbcon_par;
+
+- fbcon_del_cursor_timer(info);
++ fbcon_del_cursor_work(info);
+ kfree(ops->cursor_src);
+ kfree(ops->cursor_state.mask);
+ kfree(info->fbcon_par);
+ info->fbcon_par = NULL;
+ }
+-
+- if (info->queue.func == fb_flashcursor)
+- info->queue.func = NULL;
+ }
+ }
+ }
+diff --git a/drivers/video/fbdev/core/fbcon.h b/drivers/video/fbdev/core/fbcon.h
+index 3e1ec454b8aa3..a709e5796ef7e 100644
+--- a/drivers/video/fbdev/core/fbcon.h
++++ b/drivers/video/fbdev/core/fbcon.h
+@@ -14,11 +14,11 @@
+ #include <linux/types.h>
+ #include <linux/vt_buffer.h>
+ #include <linux/vt_kern.h>
++#include <linux/workqueue.h>
+
+ #include <asm/io.h>
+
+ #define FBCON_FLAGS_INIT 1
+-#define FBCON_FLAGS_CURSOR_TIMER 2
+
+ /*
+ * This is the interface between the low-level console driver and the
+@@ -68,7 +68,7 @@ struct fbcon_ops {
+ int (*update_start)(struct fb_info *info);
+ int (*rotate_font)(struct fb_info *info, struct vc_data *vc);
+ struct fb_var_screeninfo var; /* copy of the current fb_var_screeninfo */
+- struct timer_list cursor_timer; /* Cursor timer */
++ struct delayed_work cursor_work; /* Cursor timer */
+ struct fb_cursor cursor_state;
+ struct fbcon_display *p;
+ struct fb_info *info;
+--
+2.51.0
+
--- /dev/null
+From 53e7db202cb6a72c2b4a41ec87c3afa21da745ad Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Feb 2026 20:14:43 +0800
+Subject: hwmon: (max16065) Use READ/WRITE_ONCE to avoid compiler optimization
+ induced race
+
+From: Gui-Dong Han <hanguidong02@gmail.com>
+
+[ Upstream commit 007be4327e443d79c9dd9e56dc16c36f6395d208 ]
+
+Simply copying shared data to a local variable cannot prevent data
+races. The compiler is allowed to optimize away the local copy and
+re-read the shared memory, causing a Time-of-Check Time-of-Use (TOCTOU)
+issue if the data changes between the check and the usage.
+
+To enforce the use of the local variable, use READ_ONCE() when reading
+the shared data and WRITE_ONCE() when updating it. Apply these macros to
+the three identified locations (curr_sense, adc, and fault) where local
+variables are used for error validation, ensuring the value remains
+consistent.
+
+Reported-by: Ben Hutchings <ben@decadent.org.uk>
+Closes: https://lore.kernel.org/all/6fe17868327207e8b850cf9f88b7dc58b2021f73.camel@decadent.org.uk/
+Fixes: f5bae2642e3d ("hwmon: Driver for MAX16065 System Manager and compatibles")
+Fixes: b8d5acdcf525 ("hwmon: (max16065) Use local variable to avoid TOCTOU")
+Cc: stable@vger.kernel.org
+Signed-off-by: Gui-Dong Han <hanguidong02@gmail.com>
+Link: https://lore.kernel.org/r/20260203121443.5482-1-hanguidong02@gmail.com
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwmon/max16065.c | 26 +++++++++++++-------------
+ 1 file changed, 13 insertions(+), 13 deletions(-)
+
+diff --git a/drivers/hwmon/max16065.c b/drivers/hwmon/max16065.c
+index f72b0ab7c784e..48e6e242f13eb 100644
+--- a/drivers/hwmon/max16065.c
++++ b/drivers/hwmon/max16065.c
+@@ -151,27 +151,27 @@ static struct max16065_data *max16065_update_device(struct device *dev)
+ int i;
+
+ for (i = 0; i < data->num_adc; i++)
+- data->adc[i]
+- = max16065_read_adc(client, MAX16065_ADC(i));
++ WRITE_ONCE(data->adc[i],
++ max16065_read_adc(client, MAX16065_ADC(i)));
+
+ if (data->have_current) {
+- data->adc[MAX16065_NUM_ADC]
+- = max16065_read_adc(client, MAX16065_CSP_ADC);
+- data->curr_sense
+- = i2c_smbus_read_byte_data(client,
+- MAX16065_CURR_SENSE);
++ WRITE_ONCE(data->adc[MAX16065_NUM_ADC],
++ max16065_read_adc(client, MAX16065_CSP_ADC));
++ WRITE_ONCE(data->curr_sense,
++ i2c_smbus_read_byte_data(client, MAX16065_CURR_SENSE));
+ }
+
+ for (i = 0; i < 2; i++)
+- data->fault[i]
+- = i2c_smbus_read_byte_data(client, MAX16065_FAULT(i));
++ WRITE_ONCE(data->fault[i],
++ i2c_smbus_read_byte_data(client, MAX16065_FAULT(i)));
+
+ /*
+ * MAX16067 and MAX16068 have separate undervoltage and
+ * overvoltage alarm bits. Squash them together.
+ */
+ if (data->chip == max16067 || data->chip == max16068)
+- data->fault[0] |= data->fault[1];
++ WRITE_ONCE(data->fault[0],
++ data->fault[0] | data->fault[1]);
+
+ data->last_updated = jiffies;
+ data->valid = 1;
+@@ -185,7 +185,7 @@ static ssize_t max16065_alarm_show(struct device *dev,
+ {
+ struct sensor_device_attribute_2 *attr2 = to_sensor_dev_attr_2(da);
+ struct max16065_data *data = max16065_update_device(dev);
+- int val = data->fault[attr2->nr];
++ int val = READ_ONCE(data->fault[attr2->nr]);
+
+ if (val < 0)
+ return val;
+@@ -203,7 +203,7 @@ static ssize_t max16065_input_show(struct device *dev,
+ {
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct max16065_data *data = max16065_update_device(dev);
+- int adc = data->adc[attr->index];
++ int adc = READ_ONCE(data->adc[attr->index]);
+
+ if (unlikely(adc < 0))
+ return adc;
+@@ -216,7 +216,7 @@ static ssize_t max16065_current_show(struct device *dev,
+ struct device_attribute *da, char *buf)
+ {
+ struct max16065_data *data = max16065_update_device(dev);
+- int curr_sense = data->curr_sense;
++ int curr_sense = READ_ONCE(data->curr_sense);
+
+ if (unlikely(curr_sense < 0))
+ return curr_sense;
+--
+2.51.0
+
--- /dev/null
+From 4f0a8790153a29b62c6d038a723417be9c0f821a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 24 Jan 2026 10:55:46 +0900
+Subject: ksmbd: fix infinite loop caused by next_smb2_rcv_hdr_off reset in
+ error paths
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+[ Upstream commit 010eb01ce23b34b50531448b0da391c7f05a72af ]
+
+The problem occurs when a signed request fails smb2 signature verification
+check. In __process_request(), if check_sign_req() returns an error,
+set_smb2_rsp_status(work, STATUS_ACCESS_DENIED) is called.
+set_smb2_rsp_status() set work->next_smb2_rcv_hdr_off as zero. By resetting
+next_smb2_rcv_hdr_off to zero, the pointer to the next command in the chain
+is lost. Consequently, is_chained_smb2_message() continues to point to
+the same request header instead of advancing. If the header's NextCommand
+field is non-zero, the function returns true, causing __handle_ksmbd_work()
+to repeatedly process the same failed request in an infinite loop.
+This results in the kernel log being flooded with "bad smb2 signature"
+messages and high CPU usage.
+
+This patch fixes the issue by changing the return value from
+SERVER_HANDLER_CONTINUE to SERVER_HANDLER_ABORT. This ensures that
+the processing loop terminates immediately rather than attempting to
+continue from an invalidated offset.
+
+Reported-by: tianshuo han <hantianshuo233@gmail.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ksmbd/server.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/fs/ksmbd/server.c b/fs/ksmbd/server.c
+index 27d8d6c6fdacd..fe797e8fe9419 100644
+--- a/fs/ksmbd/server.c
++++ b/fs/ksmbd/server.c
+@@ -126,21 +126,21 @@ static int __process_request(struct ksmbd_work *work, struct ksmbd_conn *conn,
+ andx_again:
+ if (command >= conn->max_cmds) {
+ conn->ops->set_rsp_status(work, STATUS_INVALID_PARAMETER);
+- return SERVER_HANDLER_CONTINUE;
++ return SERVER_HANDLER_ABORT;
+ }
+
+ cmds = &conn->cmds[command];
+ if (!cmds->proc) {
+ ksmbd_debug(SMB, "*** not implemented yet cmd = %x\n", command);
+ conn->ops->set_rsp_status(work, STATUS_NOT_IMPLEMENTED);
+- return SERVER_HANDLER_CONTINUE;
++ return SERVER_HANDLER_ABORT;
+ }
+
+ if (work->sess && conn->ops->is_sign_req(work, command)) {
+ ret = conn->ops->check_sign_req(work);
+ if (!ret) {
+ conn->ops->set_rsp_status(work, STATUS_ACCESS_DENIED);
+- return SERVER_HANDLER_CONTINUE;
++ return SERVER_HANDLER_ABORT;
+ }
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 20522d3d3736494eff157ab151bd63ee47366ad5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 17 Dec 2023 15:29:33 +0100
+Subject: memory: mtk-smi: Convert to platform remove callback returning void
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+
+[ Upstream commit 08c1aeaa45ce0fd18912e92c6705586c8aa5240f ]
+
+The .remove() callback for a platform driver returns an int which makes
+many driver authors wrongly assume it's possible to do error handling by
+returning an error code. However the value returned is ignored (apart
+from emitting a warning) and this typically results in resource leaks.
+
+To improve here there is a quest to make the remove callback return
+void. In the first step of this quest all drivers are converted to
+.remove_new(), which already returns void. Eventually after all drivers
+are converted, .remove_new() will be renamed to .remove().
+
+Trivially convert this driver from always returning zero in the remove
+callback to the void returning variant.
+
+Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+Link: https://lore.kernel.org/r/5c35a33cfdc359842e034ddd2e9358f10e91fa1f.1702822744.git.u.kleine-koenig@pengutronix.de
+Signed-off-by: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+Stable-dep-of: 9dae65913b32 ("memory: mtk-smi: fix device leak on larb probe")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/memory/mtk-smi.c | 10 ++++------
+ 1 file changed, 4 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
+index c5fb51f73b341..c317bcf49ebcd 100644
+--- a/drivers/memory/mtk-smi.c
++++ b/drivers/memory/mtk-smi.c
+@@ -375,14 +375,13 @@ static int mtk_smi_larb_probe(struct platform_device *pdev)
+ return component_add(dev, &mtk_smi_larb_component_ops);
+ }
+
+-static int mtk_smi_larb_remove(struct platform_device *pdev)
++static void mtk_smi_larb_remove(struct platform_device *pdev)
+ {
+ struct mtk_smi_larb *larb = platform_get_drvdata(pdev);
+
+ device_link_remove(&pdev->dev, larb->smi_common_dev);
+ pm_runtime_disable(&pdev->dev);
+ component_del(&pdev->dev, &mtk_smi_larb_component_ops);
+- return 0;
+ }
+
+ static int __maybe_unused mtk_smi_larb_resume(struct device *dev)
+@@ -419,7 +418,7 @@ static const struct dev_pm_ops smi_larb_pm_ops = {
+
+ static struct platform_driver mtk_smi_larb_driver = {
+ .probe = mtk_smi_larb_probe,
+- .remove = mtk_smi_larb_remove,
++ .remove_new = mtk_smi_larb_remove,
+ .driver = {
+ .name = "mtk-smi-larb",
+ .of_match_table = mtk_smi_larb_of_ids,
+@@ -549,10 +548,9 @@ static int mtk_smi_common_probe(struct platform_device *pdev)
+ return 0;
+ }
+
+-static int mtk_smi_common_remove(struct platform_device *pdev)
++static void mtk_smi_common_remove(struct platform_device *pdev)
+ {
+ pm_runtime_disable(&pdev->dev);
+- return 0;
+ }
+
+ static int __maybe_unused mtk_smi_common_resume(struct device *dev)
+@@ -588,7 +586,7 @@ static const struct dev_pm_ops smi_common_pm_ops = {
+
+ static struct platform_driver mtk_smi_common_driver = {
+ .probe = mtk_smi_common_probe,
+- .remove = mtk_smi_common_remove,
++ .remove_new = mtk_smi_common_remove,
+ .driver = {
+ .name = "mtk-smi-common",
+ .of_match_table = mtk_smi_common_of_ids,
+--
+2.51.0
+
--- /dev/null
+From d7b5ac477af6de818b8bb85b258f847f7bf22030 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 Nov 2025 17:46:23 +0100
+Subject: memory: mtk-smi: fix device leak on larb probe
+
+From: Johan Hovold <johan@kernel.org>
+
+[ Upstream commit 9dae65913b32d05dbc8ff4b8a6bf04a0e49a8eb6 ]
+
+Make sure to drop the reference taken when looking up the SMI device
+during larb probe on late probe failure (e.g. probe deferral) and on
+driver unbind.
+
+Fixes: cc8bbe1a8312 ("memory: mediatek: Add SMI driver")
+Fixes: 038ae37c510f ("memory: mtk-smi: add missing put_device() call in mtk_smi_device_link_common")
+Cc: stable@vger.kernel.org # 4.6: 038ae37c510f
+Cc: stable@vger.kernel.org # 4.6
+Cc: Yong Wu <yong.wu@mediatek.com>
+Cc: Miaoqian Lin <linmq006@gmail.com>
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Link: https://patch.msgid.link/20251121164624.13685-3-johan@kernel.org
+Signed-off-by: Krzysztof Kozlowski <krzk@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/memory/mtk-smi.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
+index c317bcf49ebcd..ae01b396cb453 100644
+--- a/drivers/memory/mtk-smi.c
++++ b/drivers/memory/mtk-smi.c
+@@ -382,6 +382,7 @@ static void mtk_smi_larb_remove(struct platform_device *pdev)
+ device_link_remove(&pdev->dev, larb->smi_common_dev);
+ pm_runtime_disable(&pdev->dev);
+ component_del(&pdev->dev, &mtk_smi_larb_component_ops);
++ put_device(larb->smi_common_dev);
+ }
+
+ static int __maybe_unused mtk_smi_larb_resume(struct device *dev)
+--
+2.51.0
+
--- /dev/null
+From aebbf719f491c1fe14bb0cf6df1fe4c2918e401a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 23 Nov 2023 17:56:38 +0100
+Subject: mfd: omap-usb-host: Convert to platform remove callback returning
+ void
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+
+[ Upstream commit 418d1e74f8597e0b2d5d0d6e1be8f1f47e68f0a4 ]
+
+The .remove() callback for a platform driver returns an int which makes
+many driver authors wrongly assume it's possible to do error handling by
+returning an error code. However the value returned is ignored (apart
+from emitting a warning) and this typically results in resource leaks.
+
+To improve here there is a quest to make the remove callback return
+void. In the first step of this quest all drivers are converted to
+.remove_new(), which already returns void. Eventually after all drivers
+are converted, .remove_new() will be renamed to .remove().
+
+Trivially convert this driver from always returning zero in the remove
+callback to the void returning variant.
+
+Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+Link: https://lore.kernel.org/r/20231123165627.492259-11-u.kleine-koenig@pengutronix.de
+Signed-off-by: Lee Jones <lee@kernel.org>
+Stable-dep-of: 24804ba508a3 ("mfd: omap-usb-host: Fix OF populate on driver rebind")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mfd/omap-usb-host.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
+index 787d2ae863752..b61fb9933aa85 100644
+--- a/drivers/mfd/omap-usb-host.c
++++ b/drivers/mfd/omap-usb-host.c
+@@ -818,13 +818,12 @@ static int usbhs_omap_remove_child(struct device *dev, void *data)
+ *
+ * Reverses the effect of usbhs_omap_probe().
+ */
+-static int usbhs_omap_remove(struct platform_device *pdev)
++static void usbhs_omap_remove(struct platform_device *pdev)
+ {
+ pm_runtime_disable(&pdev->dev);
+
+ /* remove children */
+ device_for_each_child(&pdev->dev, NULL, usbhs_omap_remove_child);
+- return 0;
+ }
+
+ static const struct dev_pm_ops usbhsomap_dev_pm_ops = {
+@@ -847,7 +846,7 @@ static struct platform_driver usbhs_omap_driver = {
+ .of_match_table = usbhs_omap_dt_ids,
+ },
+ .probe = usbhs_omap_probe,
+- .remove = usbhs_omap_remove,
++ .remove_new = usbhs_omap_remove,
+ };
+
+ MODULE_AUTHOR("Keshava Munegowda <keshava_mgowda@ti.com>");
+--
+2.51.0
+
--- /dev/null
+From b4feca3f58e80943c6121f2364898c751330e0c2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 19 Dec 2025 12:07:14 +0100
+Subject: mfd: omap-usb-host: Fix OF populate on driver rebind
+
+From: Johan Hovold <johan@kernel.org>
+
+[ Upstream commit 24804ba508a3e240501c521685a1c4eb9f574f8e ]
+
+Since commit c6e126de43e7 ("of: Keep track of populated platform
+devices") child devices will not be created by of_platform_populate()
+if the devices had previously been deregistered individually so that the
+OF_POPULATED flag is still set in the corresponding OF nodes.
+
+Switch to using of_platform_depopulate() instead of open coding so that
+the child devices are created if the driver is rebound.
+
+Fixes: c6e126de43e7 ("of: Keep track of populated platform devices")
+Cc: stable@vger.kernel.org # 3.16
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Reviewed-by: Andreas Kemnade <andreas@kemnade.info>
+Link: https://patch.msgid.link/20251219110714.23919-1-johan@kernel.org
+Signed-off-by: Lee Jones <lee@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mfd/omap-usb-host.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
+index b61fb9933aa85..936faa0c26e09 100644
+--- a/drivers/mfd/omap-usb-host.c
++++ b/drivers/mfd/omap-usb-host.c
+@@ -822,8 +822,10 @@ static void usbhs_omap_remove(struct platform_device *pdev)
+ {
+ pm_runtime_disable(&pdev->dev);
+
+- /* remove children */
+- device_for_each_child(&pdev->dev, NULL, usbhs_omap_remove_child);
++ if (pdev->dev.of_node)
++ of_platform_depopulate(&pdev->dev);
++ else
++ device_for_each_child(&pdev->dev, NULL, usbhs_omap_remove_child);
+ }
+
+ static const struct dev_pm_ops usbhsomap_dev_pm_ops = {
+--
+2.51.0
+
--- /dev/null
+From 8696a83c659f756d72fbe16f4693bc4abec9eb1e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 23 Nov 2023 17:56:41 +0100
+Subject: mfd: qcom-pm8xxx: Convert to platform remove callback returning void
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+
+[ Upstream commit 19ea1d3953017518d85db35b69b5aea9bc64d630 ]
+
+The .remove() callback for a platform driver returns an int which makes
+many driver authors wrongly assume it's possible to do error handling by
+returning an error code. However the value returned is ignored (apart
+from emitting a warning) and this typically results in resource leaks.
+
+To improve here there is a quest to make the remove callback return
+void. In the first step of this quest all drivers are converted to
+.remove_new(), which already returns void. Eventually after all drivers
+are converted, .remove_new() will be renamed to .remove().
+
+Trivially convert this driver from always returning zero in the remove
+callback to the void returning variant.
+
+Reviewed-by: Konrad Dybcio <konrad.dybcio@linaro.org>
+Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+Link: https://lore.kernel.org/r/20231123165627.492259-14-u.kleine-koenig@pengutronix.de
+Signed-off-by: Lee Jones <lee@kernel.org>
+Stable-dep-of: 27a8acea47a9 ("mfd: qcom-pm8xxx: Fix OF populate on driver rebind")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mfd/qcom-pm8xxx.c | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/mfd/qcom-pm8xxx.c b/drivers/mfd/qcom-pm8xxx.c
+index 2f2734ba5273e..8831448371290 100644
+--- a/drivers/mfd/qcom-pm8xxx.c
++++ b/drivers/mfd/qcom-pm8xxx.c
+@@ -587,19 +587,17 @@ static int pm8xxx_remove_child(struct device *dev, void *unused)
+ return 0;
+ }
+
+-static int pm8xxx_remove(struct platform_device *pdev)
++static void pm8xxx_remove(struct platform_device *pdev)
+ {
+ struct pm_irq_chip *chip = platform_get_drvdata(pdev);
+
+ device_for_each_child(&pdev->dev, NULL, pm8xxx_remove_child);
+ irq_domain_remove(chip->irqdomain);
+-
+- return 0;
+ }
+
+ static struct platform_driver pm8xxx_driver = {
+ .probe = pm8xxx_probe,
+- .remove = pm8xxx_remove,
++ .remove_new = pm8xxx_remove,
+ .driver = {
+ .name = "pm8xxx-core",
+ .of_match_table = pm8xxx_id_table,
+--
+2.51.0
+
--- /dev/null
+From be1ce7681c73cf67b75079f81b32449981814516 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 19 Dec 2025 12:09:47 +0100
+Subject: mfd: qcom-pm8xxx: Fix OF populate on driver rebind
+
+From: Johan Hovold <johan@kernel.org>
+
+[ Upstream commit 27a8acea47a93fea6ad0e2df4c20a9b51490e4d9 ]
+
+Since commit c6e126de43e7 ("of: Keep track of populated platform
+devices") child devices will not be created by of_platform_populate()
+if the devices had previously been deregistered individually so that the
+OF_POPULATED flag is still set in the corresponding OF nodes.
+
+Switch to using of_platform_depopulate() instead of open coding so that
+the child devices are created if the driver is rebound.
+
+Fixes: c6e126de43e7 ("of: Keep track of populated platform devices")
+Cc: stable@vger.kernel.org # 3.16
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@oss.qualcomm.com>
+Reviewed-by: Konrad Dybcio <konrad.dybcio@oss.qualcomm.com>
+Link: https://patch.msgid.link/20251219110947.24101-1-johan@kernel.org
+Signed-off-by: Lee Jones <lee@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mfd/qcom-pm8xxx.c | 8 +-------
+ 1 file changed, 1 insertion(+), 7 deletions(-)
+
+diff --git a/drivers/mfd/qcom-pm8xxx.c b/drivers/mfd/qcom-pm8xxx.c
+index 8831448371290..cbcbff3c95ecb 100644
+--- a/drivers/mfd/qcom-pm8xxx.c
++++ b/drivers/mfd/qcom-pm8xxx.c
+@@ -581,17 +581,11 @@ static int pm8xxx_probe(struct platform_device *pdev)
+ return rc;
+ }
+
+-static int pm8xxx_remove_child(struct device *dev, void *unused)
+-{
+- platform_device_unregister(to_platform_device(dev));
+- return 0;
+-}
+-
+ static void pm8xxx_remove(struct platform_device *pdev)
+ {
+ struct pm_irq_chip *chip = platform_get_drvdata(pdev);
+
+- device_for_each_child(&pdev->dev, NULL, pm8xxx_remove_child);
++ of_platform_depopulate(&pdev->dev);
+ irq_domain_remove(chip->irqdomain);
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 48354062beda33ef7f71d260de550f73b725a9c9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 26 Sep 2021 02:43:33 +0300
+Subject: mfd: qcom-pm8xxx: switch away from using chained IRQ handlers
+
+From: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+
+[ Upstream commit d3546ccdce4bc07fcf0648bfe865dbcd6d961afc ]
+
+PM8xxx PMIC family uses GPIO as parent IRQ. Using it together with the
+irq_set_chained_handler_and_data() results in warnings from the GPIOLIB
+(see 461c1a7d4733 ("gpiolib: override irq_enable/disable"))
+as in this path the IRQ resources are not allocated (and thus the
+corresponding GPIO is not marked as used for the IRQ. Use request_irq so
+that the IRQ resources are proprely setup.
+
+[ 0.803271] ------------[ cut here ]------------
+[ 0.803338] WARNING: CPU: 3 PID: 1 at drivers/gpio/gpiolib.c:3207 gpiochip_enable_irq+0xa4/0xa8
+[ 0.803470] Modules linked in:
+[ 0.803542] CPU: 3 PID: 1 Comm: swapper/0 Not tainted 5.14.0-rc6-next-20210820-postmarketos-qcom-apq8064+ #1
+[ 0.803645] Hardware name: Generic DT based system
+[ 0.803710] Backtrace:
+[ 0.803777] [<c0e3493c>] (dump_backtrace) from [<c0e34d00>] (show_stack+0x20/0x24)
+[ 0.803911] r7:00000c87 r6:c07062dc r5:60000093 r4:c11d0f54
+[ 0.803980] [<c0e34ce0>] (show_stack) from [<c0e38314>] (dump_stack_lvl+0x48/0x54)
+[ 0.804097] [<c0e382cc>] (dump_stack_lvl) from [<c0e38338>] (dump_stack+0x18/0x1c)
+[ 0.804217] r5:00000009 r4:c11fe208
+[ 0.804274] [<c0e38320>] (dump_stack) from [<c03219c8>] (__warn+0xfc/0x114)
+[ 0.804387] [<c03218cc>] (__warn) from [<c0e35334>] (warn_slowpath_fmt+0x74/0xd0)
+[ 0.804509] r7:c07062dc r6:00000c87 r5:c11fe208 r4:00000000
+[ 0.804577] [<c0e352c4>] (warn_slowpath_fmt) from [<c07062dc>] (gpiochip_enable_irq+0xa4/0xa8)
+[ 0.804716] r8:c27b6200 r7:c27aec00 r6:c27aec18 r5:cf77a448 r4:c02225f0
+[ 0.804789] [<c0706238>] (gpiochip_enable_irq) from [<c0706348>] (gpiochip_irq_enable+0x28/0x38)
+[ 0.804921] r5:cf77a448 r4:c27aec18
+[ 0.804977] [<c0706320>] (gpiochip_irq_enable) from [<c03897a0>] (irq_enable+0x48/0x78)
+[ 0.805111] r5:00000000 r4:c27aec00
+[ 0.805167] [<c0389758>] (irq_enable) from [<c0389850>] (__irq_startup+0x80/0xbc)
+[ 0.805286] r5:00000000 r4:c27aec00
+[ 0.805343] [<c03897d0>] (__irq_startup) from [<c038996c>] (irq_startup+0xe0/0x18c)
+[ 0.805468] r7:c27aec00 r6:00000001 r5:00000000 r4:c27aec00
+[ 0.805535] [<c038988c>] (irq_startup) from [<c0389a54>] (irq_activate_and_startup+0x3c/0x74)
+[ 0.805669] r7:c27aec00 r6:00000001 r5:c27aec00 r4:00000000
+[ 0.805736] [<c0389a18>] (irq_activate_and_startup) from [<c0389b58>] (__irq_do_set_handler+0xcc/0x1c0)
+[ 0.805875] r7:c27aec00 r6:c0383710 r5:c08a16b0 r4:00000001
+[ 0.805943] [<c0389a8c>] (__irq_do_set_handler) from [<c0389d80>] (irq_set_chained_handler_and_data+0x60/0x98)
+[ 0.806087] r7:c27b5c10 r6:c27aed40 r5:c08a16b0 r4:c27aec00
+[ 0.806154] [<c0389d20>] (irq_set_chained_handler_and_data) from [<c08a1660>] (pm8xxx_probe+0x1fc/0x24c)
+[ 0.806298] r6:0000003a r5:0000003a r4:c27b5c00
+[ 0.806359] [<c08a1464>] (pm8xxx_probe) from [<c0871420>] (platform_probe+0x6c/0xc8)
+[ 0.806495] r10:c2507080 r9:e8bea2cc r8:c165e0e0 r7:c165e0e0 r6:c15f08f8 r5:c27b5c10
+[ 0.806582] r4:00000000
+[ 0.806632] [<c08713b4>] (platform_probe) from [<c086e280>] (really_probe+0xe8/0x460)
+[ 0.806769] r7:c165e0e0 r6:c15f08f8 r5:00000000 r4:c27b5c10
+[ 0.806837] [<c086e198>] (really_probe) from [<c086e6a8>] (__driver_probe_device+0xb0/0x22c)
+[ 0.806975] r7:c27b5c10 r6:cf70fba4 r5:c15f08f8 r4:c27b5c10
+[ 0.807042] [<c086e5f8>] (__driver_probe_device) from [<c086e868>] (driver_probe_device+0x44/0xe0)
+[ 0.807188] r9:e8bea2cc r8:00000000 r7:c27b5c10 r6:cf70fba4 r5:c16ae4b4 r4:c16ae4b0
+[ 0.807271] [<c086e824>] (driver_probe_device) from [<c086ecd8>] (__device_attach_driver+0xb4/0x12c)
+[ 0.807421] r9:e8bea2cc r8:c15eec08 r7:c27b5c10 r6:cf70fba4 r5:c15f08f8 r4:00000001
+[ 0.807506] [<c086ec24>] (__device_attach_driver) from [<c086c06c>] (bus_for_each_drv+0x94/0xe4)
+[ 0.807651] r7:c16ae484 r6:c086ec24 r5:cf70fba4 r4:00000000
+[ 0.807718] [<c086bfd8>] (bus_for_each_drv) from [<c086e0e0>] (__device_attach+0x104/0x19c)
+[ 0.807852] r6:00000001 r5:c27b5c54 r4:c27b5c10
+[ 0.807913] [<c086dfdc>] (__device_attach) from [<c086eef4>] (device_initial_probe+0x1c/0x20)
+[ 0.808050] r6:c27b5c10 r5:c15ef1b0 r4:c27b5c10
+[ 0.808111] [<c086eed8>] (device_initial_probe) from [<c086d00c>] (bus_probe_device+0x94/0x9c)
+[ 0.808240] [<c086cf78>] (bus_probe_device) from [<c086a60c>] (device_add+0x404/0x8f4)
+[ 0.808370] r7:c16ae484 r6:c251ba10 r5:00000000 r4:c27b5c10
+[ 0.808439] [<c086a208>] (device_add) from [<c0a82f50>] (of_device_add+0x44/0x4c)
+[ 0.808581] r10:c144c854 r9:00000001 r8:e8bea314 r7:c251ba10 r6:00000000 r5:00000000
+[ 0.808669] r4:c27b5c00
+[ 0.808718] [<c0a82f0c>] (of_device_add) from [<c0a836cc>] (of_platform_device_create_pdata+0xa0/0xc8)
+[ 0.808850] [<c0a8362c>] (of_platform_device_create_pdata) from [<c0a83908>] (of_platform_bus_create+0x1f0/0x514)
+[ 0.809005] r9:00000001 r8:c251ba10 r7:00000000 r6:00000000 r5:00000000 r4:e8bea2b0
+[ 0.809086] [<c0a83718>] (of_platform_bus_create) from [<c0a83e04>] (of_platform_populate+0x98/0x128)
+[ 0.809233] r10:c144c854 r9:00000001 r8:c251ba10 r7:00000000 r6:00000000 r5:e8bea170
+[ 0.809321] r4:e8bea2b0
+[ 0.809371] [<c0a83d6c>] (of_platform_populate) from [<c0a83f20>] (devm_of_platform_populate+0x60/0xa8)
+[ 0.809521] r9:0000011d r8:c165e0e0 r7:e8bea170 r6:c2c34f40 r5:c2cac140 r4:c251ba10
+[ 0.809604] [<c0a83ec0>] (devm_of_platform_populate) from [<c08a212c>] (ssbi_probe+0x138/0x16c)
+[ 0.809738] r6:c2c34f40 r5:c251ba10 r4:ff822700
+[ 0.809800] [<c08a1ff4>] (ssbi_probe) from [<c0871420>] (platform_probe+0x6c/0xc8)
+[ 0.809923] r7:c165e0e0 r6:c15f0a80 r5:c251ba10 r4:00000000
+[ 0.809989] [<c08713b4>] (platform_probe) from [<c086e280>] (really_probe+0xe8/0x460)
+[ 0.810120] r7:c165e0e0 r6:c15f0a80 r5:00000000 r4:c251ba10
+[ 0.810187] [<c086e198>] (really_probe) from [<c086e6a8>] (__driver_probe_device+0xb0/0x22c)
+[ 0.810325] r7:c251ba10 r6:c15f0a80 r5:c15f0a80 r4:c251ba10
+[ 0.810393] [<c086e5f8>] (__driver_probe_device) from [<c086e868>] (driver_probe_device+0x44/0xe0)
+[ 0.810539] r9:0000011d r8:00000000 r7:c251ba10 r6:c15f0a80 r5:c16ae4b4 r4:c16ae4b0
+[ 0.810623] [<c086e824>] (driver_probe_device) from [<c086ee2c>] (__driver_attach+0xdc/0x188)
+[ 0.810766] r9:0000011d r8:c144c834 r7:00000000 r6:c15f0a80 r5:c251ba10 r4:00000000
+[ 0.810849] [<c086ed50>] (__driver_attach) from [<c086bf60>] (bus_for_each_dev+0x88/0xd4)
+[ 0.810985] r7:00000000 r6:c086ed50 r5:c15f0a80 r4:00000000
+[ 0.811052] [<c086bed8>] (bus_for_each_dev) from [<c086dad4>] (driver_attach+0x2c/0x30)
+[ 0.811182] r6:c15ef1b0 r5:c2c34e80 r4:c15f0a80
+[ 0.811243] [<c086daa8>] (driver_attach) from [<c086d2dc>] (bus_add_driver+0x180/0x21c)
+[ 0.811364] [<c086d15c>] (bus_add_driver) from [<c086fa6c>] (driver_register+0x84/0x118)
+[ 0.811492] r7:00000000 r6:ffffe000 r5:c1428210 r4:c15f0a80
+[ 0.811558] [<c086f9e8>] (driver_register) from [<c0871174>] (__platform_driver_register+0x2c/0x34)
+[ 0.811683] r5:c1428210 r4:c16524a0
+[ 0.811739] [<c0871148>] (__platform_driver_register) from [<c1428234>] (ssbi_driver_init+0x24/0x28)
+[ 0.811868] [<c1428210>] (ssbi_driver_init) from [<c0302394>] (do_one_initcall+0x68/0x2c8)
+[ 0.811990] [<c030232c>] (do_one_initcall) from [<c140147c>] (kernel_init_freeable+0x1dc/0x23c)
+[ 0.812135] r7:cf7b0400 r6:c130339c r5:00000007 r4:c147f6a0
+[ 0.812204] [<c14012a0>] (kernel_init_freeable) from [<c0e40e60>] (kernel_init+0x20/0x138)
+[ 0.812345] r10:00000000 r9:00000000 r8:00000000 r7:00000000 r6:00000000 r5:c0e40e40
+[ 0.812433] r4:00000000
+[ 0.812483] [<c0e40e40>] (kernel_init) from [<c0300150>] (ret_from_fork+0x14/0x24)
+[ 0.812596] Exception stack(0xcf70ffb0 to 0xcf70fff8)
+[ 0.812684] ffa0: 00000000 00000000 00000000 00000000
+[ 0.812809] ffc0: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+[ 0.812923] ffe0: 00000000 00000000 00000000 00000000 00000013 00000000
+[ 0.813008] r5:c0e40e40 r4:00000000
+[ 0.813075] ---[ end trace ad2443eee078d094 ]---
+
+Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+Reviewed-by: Bjorn Andersson <bjorn.andersson@linaro.org>
+Reviewed-by: Linus Walleij <linus.walleij@linaro.org>
+Tested-by: David Heidelberg <david@ixit.cz> # on Nexus 7 (deb)
+Signed-off-by: Lee Jones <lee.jones@linaro.org>
+Link: https://lore.kernel.org/r/20210925234333.2430755-1-dmitry.baryshkov@linaro.org
+Stable-dep-of: 27a8acea47a9 ("mfd: qcom-pm8xxx: Fix OF populate on driver rebind")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mfd/qcom-pm8xxx.c | 39 ++++++++++++++++-----------------------
+ 1 file changed, 16 insertions(+), 23 deletions(-)
+
+diff --git a/drivers/mfd/qcom-pm8xxx.c b/drivers/mfd/qcom-pm8xxx.c
+index ec18a04de3555..2f2734ba5273e 100644
+--- a/drivers/mfd/qcom-pm8xxx.c
++++ b/drivers/mfd/qcom-pm8xxx.c
+@@ -65,7 +65,7 @@
+ struct pm_irq_data {
+ int num_irqs;
+ struct irq_chip *irq_chip;
+- void (*irq_handler)(struct irq_desc *desc);
++ irq_handler_t irq_handler;
+ };
+
+ struct pm_irq_chip {
+@@ -169,19 +169,16 @@ static int pm8xxx_irq_master_handler(struct pm_irq_chip *chip, int master)
+ return ret;
+ }
+
+-static void pm8xxx_irq_handler(struct irq_desc *desc)
++static irqreturn_t pm8xxx_irq_handler(int irq, void *data)
+ {
+- struct pm_irq_chip *chip = irq_desc_get_handler_data(desc);
+- struct irq_chip *irq_chip = irq_desc_get_chip(desc);
++ struct pm_irq_chip *chip = data;
+ unsigned int root;
+ int i, ret, masters = 0;
+
+- chained_irq_enter(irq_chip, desc);
+-
+ ret = regmap_read(chip->regmap, SSBI_REG_ADDR_IRQ_ROOT, &root);
+ if (ret) {
+ pr_err("Can't read root status ret=%d\n", ret);
+- return;
++ return IRQ_NONE;
+ }
+
+ /* on pm8xxx series masters start from bit 1 of the root */
+@@ -192,7 +189,7 @@ static void pm8xxx_irq_handler(struct irq_desc *desc)
+ if (masters & (1 << i))
+ pm8xxx_irq_master_handler(chip, i);
+
+- chained_irq_exit(irq_chip, desc);
++ return IRQ_HANDLED;
+ }
+
+ static void pm8821_irq_block_handler(struct pm_irq_chip *chip,
+@@ -230,19 +227,17 @@ static inline void pm8821_irq_master_handler(struct pm_irq_chip *chip,
+ pm8821_irq_block_handler(chip, master, block);
+ }
+
+-static void pm8821_irq_handler(struct irq_desc *desc)
++static irqreturn_t pm8821_irq_handler(int irq, void *data)
+ {
+- struct pm_irq_chip *chip = irq_desc_get_handler_data(desc);
+- struct irq_chip *irq_chip = irq_desc_get_chip(desc);
++ struct pm_irq_chip *chip = data;
+ unsigned int master;
+ int ret;
+
+- chained_irq_enter(irq_chip, desc);
+ ret = regmap_read(chip->regmap,
+ PM8821_SSBI_REG_ADDR_IRQ_MASTER0, &master);
+ if (ret) {
+ pr_err("Failed to read master 0 ret=%d\n", ret);
+- goto done;
++ return IRQ_NONE;
+ }
+
+ /* bits 1 through 7 marks the first 7 blocks in master 0 */
+@@ -251,19 +246,18 @@ static void pm8821_irq_handler(struct irq_desc *desc)
+
+ /* bit 0 marks if master 1 contains any bits */
+ if (!(master & BIT(0)))
+- goto done;
++ return IRQ_NONE;
+
+ ret = regmap_read(chip->regmap,
+ PM8821_SSBI_REG_ADDR_IRQ_MASTER1, &master);
+ if (ret) {
+ pr_err("Failed to read master 1 ret=%d\n", ret);
+- goto done;
++ return IRQ_NONE;
+ }
+
+ pm8821_irq_master_handler(chip, 1, master);
+
+-done:
+- chained_irq_exit(irq_chip, desc);
++ return IRQ_HANDLED;
+ }
+
+ static void pm8xxx_irq_mask_ack(struct irq_data *d)
+@@ -574,14 +568,15 @@ static int pm8xxx_probe(struct platform_device *pdev)
+ if (!chip->irqdomain)
+ return -ENODEV;
+
+- irq_set_chained_handler_and_data(irq, data->irq_handler, chip);
++ rc = devm_request_irq(&pdev->dev, irq, data->irq_handler, 0, dev_name(&pdev->dev), chip);
++ if (rc)
++ return rc;
++
+ irq_set_irq_wake(irq, 1);
+
+ rc = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+- if (rc) {
+- irq_set_chained_handler_and_data(irq, NULL, NULL);
++ if (rc)
+ irq_domain_remove(chip->irqdomain);
+- }
+
+ return rc;
+ }
+@@ -594,11 +589,9 @@ static int pm8xxx_remove_child(struct device *dev, void *unused)
+
+ static int pm8xxx_remove(struct platform_device *pdev)
+ {
+- int irq = platform_get_irq(pdev, 0);
+ struct pm_irq_chip *chip = platform_get_drvdata(pdev);
+
+ device_for_each_child(&pdev->dev, NULL, pm8xxx_remove_child);
+- irq_set_chained_handler_and_data(irq, NULL, NULL);
+ irq_domain_remove(chip->irqdomain);
+
+ return 0;
+--
+2.51.0
+
--- /dev/null
+From bc3a8920d8490c6389d8802d81e70a29f59115a3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 12 Feb 2026 20:55:09 -0800
+Subject: net: arcnet: com20020-pci: fix support for 2.5Mbit cards
+
+From: Ethan Nelson-Moore <enelsonmoore@gmail.com>
+
+[ Upstream commit c7d9be66b71af490446127c6ffcb66d6bb71b8b9 ]
+
+Commit 8c14f9c70327 ("ARCNET: add com20020 PCI IDs with metadata")
+converted the com20020-pci driver to use a card info structure instead
+of a single flag mask in driver_data. However, it failed to take into
+account that in the original code, driver_data of 0 indicates a card
+with no special flags, not a card that should not have any card info
+structure. This introduced a null pointer dereference when cards with
+no flags were probed.
+
+Commit bd6f1fd5d33d ("net: arcnet: com20020: Fix null-ptr-deref in
+com20020pci_probe()") then papered over this issue by rejecting cards
+with no driver_data instead of resolving the problem at its source.
+
+Fix the original issue by introducing a new card info structure for
+2.5Mbit cards that does not set any flags and using it if no
+driver_data is present.
+
+Fixes: 8c14f9c70327 ("ARCNET: add com20020 PCI IDs with metadata")
+Fixes: bd6f1fd5d33d ("net: arcnet: com20020: Fix null-ptr-deref in com20020pci_probe()")
+Cc: stable@vger.kernel.org
+Reviewed-by: Simon Horman <horms@kernel.org>
+Signed-off-by: Ethan Nelson-Moore <enelsonmoore@gmail.com>
+Link: https://patch.msgid.link/20260213045510.32368-1-enelsonmoore@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/arcnet/com20020-pci.c | 16 +++++++++++++++-
+ 1 file changed, 15 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
+index 00a80f0adece4..7cea482f2d5f9 100644
+--- a/drivers/net/arcnet/com20020-pci.c
++++ b/drivers/net/arcnet/com20020-pci.c
+@@ -114,6 +114,8 @@ static const struct attribute_group com20020_state_group = {
+ .attrs = com20020_state_attrs,
+ };
+
++static struct com20020_pci_card_info card_info_2p5mbit;
++
+ static void com20020pci_remove(struct pci_dev *pdev);
+
+ static int com20020pci_probe(struct pci_dev *pdev,
+@@ -139,7 +141,7 @@ static int com20020pci_probe(struct pci_dev *pdev,
+
+ ci = (struct com20020_pci_card_info *)id->driver_data;
+ if (!ci)
+- return -EINVAL;
++ ci = &card_info_2p5mbit;
+
+ priv->ci = ci;
+ mm = &ci->misc_map;
+@@ -346,6 +348,18 @@ static struct com20020_pci_card_info card_info_5mbit = {
+ .flags = ARC_IS_5MBIT,
+ };
+
++static struct com20020_pci_card_info card_info_2p5mbit = {
++ .name = "ARC-PCI",
++ .devcount = 1,
++ .chan_map_tbl = {
++ {
++ .bar = 2,
++ .offset = 0x00,
++ .size = 0x08,
++ },
++ },
++};
++
+ static struct com20020_pci_card_info card_info_sohard = {
+ .name = "SOHARD SH ARC-PCI",
+ .devcount = 1,
+--
+2.51.0
+
--- /dev/null
+From d349ff3fd472fc2f917581413427a8807a9c58b3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 7 Oct 2021 13:27:58 -0700
+Subject: scsi: ata: Call scsi_done() directly
+
+From: Bart Van Assche <bvanassche@acm.org>
+
+[ Upstream commit 58bf201dfc032eadbb31eaf817b467bed17f753d ]
+
+Conditional statements are faster than indirect calls. Hence call
+scsi_done() directly.
+
+Link: https://lore.kernel.org/r/20211007202923.2174984-4-bvanassche@acm.org
+Acked-by: Damien Le Moal <damien.lemoal@opensource.wdc.com>
+Signed-off-by: Bart Van Assche <bvanassche@acm.org>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Stable-dep-of: bb3a8154b1a1 ("ata: libata-scsi: refactor ata_scsi_translate()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/ata/libata-sata.c | 2 +-
+ drivers/ata/libata-scsi.c | 14 +++++++-------
+ 2 files changed, 8 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c
+index 7cacb2bfc3608..bac569736c937 100644
+--- a/drivers/ata/libata-sata.c
++++ b/drivers/ata/libata-sata.c
+@@ -1276,7 +1276,7 @@ int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap)
+ rc = __ata_scsi_queuecmd(cmd, ap->link.device);
+ else {
+ cmd->result = (DID_BAD_TARGET << 16);
+- cmd->scsi_done(cmd);
++ scsi_done(cmd);
+ }
+ return rc;
+ }
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index f91b88073232d..b57027206ae1e 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -634,7 +634,7 @@ static struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev,
+ qc = ata_qc_new_init(dev, scsi_cmd_to_rq(cmd)->tag);
+ if (qc) {
+ qc->scsicmd = cmd;
+- qc->scsidone = cmd->scsi_done;
++ qc->scsidone = scsi_done;
+
+ qc->sg = scsi_sglist(cmd);
+ qc->n_elem = scsi_sg_count(cmd);
+@@ -643,7 +643,7 @@ static struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev,
+ qc->flags |= ATA_QCFLAG_QUIET;
+ } else {
+ cmd->result = (DID_OK << 16) | SAM_STAT_TASK_SET_FULL;
+- cmd->scsi_done(cmd);
++ scsi_done(cmd);
+ }
+
+ return qc;
+@@ -1750,14 +1750,14 @@ static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd,
+
+ early_finish:
+ ata_qc_free(qc);
+- cmd->scsi_done(cmd);
++ scsi_done(cmd);
+ DPRINTK("EXIT - early finish (good or error)\n");
+ return 0;
+
+ err_did:
+ ata_qc_free(qc);
+ cmd->result = (DID_ERROR << 16);
+- cmd->scsi_done(cmd);
++ scsi_done(cmd);
+ err_mem:
+ DPRINTK("EXIT - internal\n");
+ return 0;
+@@ -4068,7 +4068,7 @@ int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, struct ata_device *dev)
+ DPRINTK("bad CDB len=%u, scsi_op=0x%02x, max=%u\n",
+ scmd->cmd_len, scsi_op, dev->cdb_len);
+ scmd->result = DID_ERROR << 16;
+- scmd->scsi_done(scmd);
++ scsi_done(scmd);
+ return 0;
+ }
+
+@@ -4110,7 +4110,7 @@ int ata_scsi_queuecmd(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
+ rc = __ata_scsi_queuecmd(cmd, dev);
+ else {
+ cmd->result = (DID_BAD_TARGET << 16);
+- cmd->scsi_done(cmd);
++ scsi_done(cmd);
+ }
+
+ spin_unlock_irqrestore(ap->lock, irq_flags);
+@@ -4239,7 +4239,7 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
+ break;
+ }
+
+- cmd->scsi_done(cmd);
++ scsi_done(cmd);
+ }
+
+ int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht)
+--
+2.51.0
+
perf-fix-__perf_event_overflow-vs-perf_remove_from_c.patch
btrfs-fix-incorrect-key-offset-in-error-message-in-c.patch
bpf-fix-stack-out-of-bounds-write-in-devmap.patch
+memory-mtk-smi-convert-to-platform-remove-callback-r.patch
+memory-mtk-smi-fix-device-leak-on-larb-probe.patch
+arm-omap2-add-missing-of_node_put-before-break-and-r.patch
+arm-omap2-fix-reference-count-leaks-in-omap_control_.patch
+scsi-ata-call-scsi_done-directly.patch
+ata-libata-scsi-drop-dprintk-calls-for-cdb-translati.patch
+ata-libata-remove-pointless-vprintk-calls.patch
+ata-libata-scsi-refactor-ata_scsi_translate.patch
+drm-tegra-dsi-fix-device-leak-on-probe.patch
+bus-omap-ocp2scp-convert-to-platform-remove-callback.patch
+bus-omap-ocp2scp-fix-of-populate-on-driver-rebind.patch
+driver-core-make-state_synced-device-attribute-write.patch
+driver-core-add-a-guard-definition-for-the-device_lo.patch
+driver-core-enforce-device_lock-for-driver_match_dev.patch
+mfd-qcom-pm8xxx-switch-away-from-using-chained-irq-h.patch
+mfd-qcom-pm8xxx-convert-to-platform-remove-callback-.patch
+mfd-qcom-pm8xxx-fix-of-populate-on-driver-rebind.patch
+mfd-omap-usb-host-convert-to-platform-remove-callbac.patch
+mfd-omap-usb-host-fix-of-populate-on-driver-rebind.patch
+clk-tegra-tegra124-emc-fix-device-leak-on-set_rate.patch
+usb-cdns3-remove-redundant-if-branch.patch
+usb-cdns3-call-cdns_power_is_lost-only-once-in-cdns_.patch
+usb-cdns3-fix-role-switching-during-resume.patch
+alsa-hda-conexant-add-quirk-for-hp-zbook-studio-g4.patch
+hwmon-max16065-use-read-write_once-to-avoid-compiler.patch
+ksmbd-fix-infinite-loop-caused-by-next_smb2_rcv_hdr_.patch
+fbcon-use-delayed-work-for-cursor.patch
+fbcon-extract-fbcon_open-release-helpers.patch
+fbcon-move-more-common-code-into-fb_open.patch
+fbcon-check-return-value-of-con2fb_acquire_newinfo.patch
+alsa-hda-conexant-fix-headphone-jack-handling-on-ace.patch
+net-arcnet-com20020-pci-fix-support-for-2.5mbit-card.patch
--- /dev/null
+From 7378b03f64ab5754ac007177a1b7a4c3a4a8a55f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 5 Feb 2025 18:36:49 +0100
+Subject: usb: cdns3: call cdns_power_is_lost() only once in cdns_resume()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Théo Lebrun <theo.lebrun@bootlin.com>
+
+[ Upstream commit 17c6526b333cfd89a4c888a6f7c876c8c326e5ae ]
+
+cdns_power_is_lost() does a register read.
+Call it only once rather than twice.
+
+Signed-off-by: Théo Lebrun <theo.lebrun@bootlin.com>
+Link: https://lore.kernel.org/r/20250205-s2r-cdns-v7-4-13658a271c3c@bootlin.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Stable-dep-of: 87e4b043b98a ("usb: cdns3: fix role switching during resume")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/cdns3/core.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/usb/cdns3/core.c b/drivers/usb/cdns3/core.c
+index d272d7b82bec1..8e46fd36b0e56 100644
+--- a/drivers/usb/cdns3/core.c
++++ b/drivers/usb/cdns3/core.c
+@@ -523,11 +523,12 @@ EXPORT_SYMBOL_GPL(cdns_suspend);
+
+ int cdns_resume(struct cdns *cdns)
+ {
++ bool power_lost = cdns_power_is_lost(cdns);
+ enum usb_role real_role;
+ bool role_changed = false;
+ int ret = 0;
+
+- if (cdns_power_is_lost(cdns)) {
++ if (power_lost) {
+ if (!cdns->role_sw) {
+ real_role = cdns_hw_role_state_machine(cdns);
+ if (real_role != cdns->role) {
+@@ -550,7 +551,7 @@ int cdns_resume(struct cdns *cdns)
+ }
+
+ if (cdns->roles[cdns->role]->resume)
+- cdns->roles[cdns->role]->resume(cdns, cdns_power_is_lost(cdns));
++ cdns->roles[cdns->role]->resume(cdns, power_lost);
+
+ return 0;
+ }
+--
+2.51.0
+
--- /dev/null
+From e922f350f89762b2a9da4580fa244cdbe56f8eb7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 30 Jan 2026 11:05:45 +0100
+Subject: usb: cdns3: fix role switching during resume
+
+From: Thomas Richard (TI) <thomas.richard@bootlin.com>
+
+[ Upstream commit 87e4b043b98a1d269be0b812f383881abee0ca45 ]
+
+If the role change while we are suspended, the cdns3 driver switches to the
+new mode during resume. However, switching to host mode in this context
+causes a NULL pointer dereference.
+
+The host role's start() operation registers a xhci-hcd device, but its
+probe is deferred while we are in the resume path. The host role's resume()
+operation assumes the xhci-hcd device is already probed, which is not the
+case, leading to the dereference. Since the start() operation of the new
+role is already called, the resume operation can be skipped.
+
+So skip the resume operation for the new role if a role switch occurs
+during resume. Once the resume sequence is complete, the xhci-hcd device
+can be probed in case of host mode.
+
+Unable to handle kernel NULL pointer dereference at virtual address 0000000000000208
+Mem abort info:
+...
+Data abort info:
+...
+[0000000000000208] pgd=0000000000000000, p4d=0000000000000000
+Internal error: Oops: 0000000096000004 [#1] SMP
+Modules linked in:
+CPU: 0 UID: 0 PID: 146 Comm: sh Not tainted
+6.19.0-rc7-00013-g6e64f4aabfae-dirty #135 PREEMPT
+Hardware name: Texas Instruments J7200 EVM (DT)
+pstate: 20000005 (nzCv daif -PAN -UAO -TCO -DIT -SSBS BTYPE=--)
+pc : usb_hcd_is_primary_hcd+0x0/0x1c
+lr : cdns_host_resume+0x24/0x5c
+...
+Call trace:
+ usb_hcd_is_primary_hcd+0x0/0x1c (P)
+ cdns_resume+0x6c/0xbc
+ cdns3_controller_resume.isra.0+0xe8/0x17c
+ cdns3_plat_resume+0x18/0x24
+ platform_pm_resume+0x2c/0x68
+ dpm_run_callback+0x90/0x248
+ device_resume+0x100/0x24c
+ dpm_resume+0x190/0x2ec
+ dpm_resume_end+0x18/0x34
+ suspend_devices_and_enter+0x2b0/0xa44
+ pm_suspend+0x16c/0x5fc
+ state_store+0x80/0xec
+ kobj_attr_store+0x18/0x2c
+ sysfs_kf_write+0x7c/0x94
+ kernfs_fop_write_iter+0x130/0x1dc
+ vfs_write+0x240/0x370
+ ksys_write+0x70/0x108
+ __arm64_sys_write+0x1c/0x28
+ invoke_syscall+0x48/0x10c
+ el0_svc_common.constprop.0+0x40/0xe0
+ do_el0_svc+0x1c/0x28
+ el0_svc+0x34/0x108
+ el0t_64_sync_handler+0xa0/0xe4
+ el0t_64_sync+0x198/0x19c
+Code: 52800003 f9407ca5 d63f00a0 17ffffe4 (f9410401)
+---[ end trace 0000000000000000 ]---
+
+Cc: stable <stable@kernel.org>
+Fixes: 2cf2581cd229 ("usb: cdns3: add power lost support for system resume")
+Signed-off-by: Thomas Richard (TI) <thomas.richard@bootlin.com>
+Acked-by: Peter Chen <peter.chen@kernel.org>
+Link: https://patch.msgid.link/20260130-usb-cdns3-fix-role-switching-during-resume-v1-1-44c456852b52@bootlin.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/cdns3/core.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/usb/cdns3/core.c b/drivers/usb/cdns3/core.c
+index 8e46fd36b0e56..93e93bb9a314f 100644
+--- a/drivers/usb/cdns3/core.c
++++ b/drivers/usb/cdns3/core.c
+@@ -550,7 +550,7 @@ int cdns_resume(struct cdns *cdns)
+ }
+ }
+
+- if (cdns->roles[cdns->role]->resume)
++ if (!role_changed && cdns->roles[cdns->role]->resume)
+ cdns->roles[cdns->role]->resume(cdns, power_lost);
+
+ return 0;
+--
+2.51.0
+
--- /dev/null
+From 4db9681427052148bbb69fbf397bff311543b7b8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 31 Dec 2024 09:36:41 +0800
+Subject: usb: cdns3: remove redundant if branch
+
+From: Hongyu Xie <xiehongyu1@kylinos.cn>
+
+[ Upstream commit dedab674428f8a99468a4864c067128ba9ea83a6 ]
+
+cdns->role_sw->dev->driver_data gets set in routines showing below,
+cdns_init
+ sw_desc.driver_data = cdns;
+ cdns->role_sw = usb_role_switch_register(dev, &sw_desc);
+ dev_set_drvdata(&sw->dev, desc->driver_data);
+
+In cdns_resume,
+cdns->role = cdns_role_get(cdns->role_sw); //line redundant
+ struct cdns *cdns = usb_role_switch_get_drvdata(sw);
+ dev_get_drvdata(&sw->dev)
+ return dev->driver_data
+return cdns->role;
+
+"line redundant" equals to,
+ cdns->role = cdns->role;
+
+So fix this if branch.
+
+Signed-off-by: Hongyu Xie <xiehongyu1@kylinos.cn>
+Acked-by: Peter Chen <peter.chen@kernel.org>
+Link: https://lore.kernel.org/r/20241231013641.23908-1-xiehongyu1@kylinos.cn
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Stable-dep-of: 87e4b043b98a ("usb: cdns3: fix role switching during resume")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/cdns3/core.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+diff --git a/drivers/usb/cdns3/core.c b/drivers/usb/cdns3/core.c
+index 7242591b346bc..d272d7b82bec1 100644
+--- a/drivers/usb/cdns3/core.c
++++ b/drivers/usb/cdns3/core.c
+@@ -528,9 +528,7 @@ int cdns_resume(struct cdns *cdns)
+ int ret = 0;
+
+ if (cdns_power_is_lost(cdns)) {
+- if (cdns->role_sw) {
+- cdns->role = cdns_role_get(cdns->role_sw);
+- } else {
++ if (!cdns->role_sw) {
+ real_role = cdns_hw_role_state_machine(cdns);
+ if (real_role != cdns->role) {
+ ret = cdns_hw_role_switch(cdns);
+--
+2.51.0
+
--- /dev/null
+From e9d255662f2e9b92392f4b2916772df4e144c9c4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 7 Feb 2026 14:13:17 +0100
+Subject: ALSA: hda/conexant: Add quirk for HP ZBook Studio G4
+
+From: Takashi Iwai <tiwai@suse.de>
+
+[ Upstream commit 1585cf83e98db32463e5d54161b06a5f01fe9976 ]
+
+It was reported that we need the same quirk for HP ZBook Studio G4
+(SSID 103c:826b) as other HP models to make the mute-LED working.
+
+Cc: <stable@vger.kernel.org>
+Link: https://lore.kernel.org/64d78753-b9ff-4c64-8920-64d8d31cd20c@gmail.com
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=221002
+Link: https://patch.msgid.link/20260207131324.2428030-1-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/pci/hda/patch_conexant.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index a3d68b83ebd5f..643d1f7ba5ad3 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -1099,6 +1099,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
+ SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
+ SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO),
+ SND_PCI_QUIRK(0x103c, 0x8231, "HP ProBook 450 G4", CXT_FIXUP_MUTE_LED_GPIO),
++ SND_PCI_QUIRK(0x103c, 0x826b, "HP ZBook Studio G4", CXT_FIXUP_MUTE_LED_GPIO),
+ SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK),
+ SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+--
+2.51.0
+
--- /dev/null
+From 8f5d3285f03fb3677c518f96d34d4dfecc783ac4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Feb 2026 11:44:11 +0100
+Subject: ALSA: hda/conexant: Fix headphone jack handling on Acer Swift SF314
+
+From: Takashi Iwai <tiwai@suse.de>
+
+[ Upstream commit 7bc0df86c2384bc1e2012a2c946f82305054da64 ]
+
+Acer Swift SF314 (SSID 1025:136d) needs a bit of tweaks of the pin
+configurations for NID 0x16 and 0x19 to make the headphone / headset
+jack working. NID 0x17 can remain as is for the working speaker, and
+the built-in mic is supported via SOF.
+
+Cc: <stable@vger.kernel.org>
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=221086
+Link: https://patch.msgid.link/20260217104414.62911-1-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/pci/hda/patch_conexant.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index 643d1f7ba5ad3..e5837e47aa227 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -312,6 +312,7 @@ enum {
+ CXT_PINCFG_SWS_JS201D,
+ CXT_PINCFG_TOP_SPEAKER,
+ CXT_FIXUP_HP_A_U,
++ CXT_FIXUP_ACER_SWIFT_HP,
+ };
+
+ /* for hda_fixup_thinkpad_acpi() */
+@@ -1042,6 +1043,14 @@ static const struct hda_fixup cxt_fixups[] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = cxt_fixup_hp_a_u,
+ },
++ [CXT_FIXUP_ACER_SWIFT_HP] = {
++ .type = HDA_FIXUP_PINS,
++ .v.pins = (const struct hda_pintbl[]) {
++ { 0x16, 0x0321403f }, /* Headphone */
++ { 0x19, 0x40f001f0 }, /* Mic */
++ { }
++ },
++ },
+ };
+
+ static const struct snd_pci_quirk cxt5045_fixups[] = {
+@@ -1091,6 +1100,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
+ SND_PCI_QUIRK(0x1025, 0x0543, "Acer Aspire One 522", CXT_FIXUP_STEREO_DMIC),
+ SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_ASPIRE_DMIC),
+ SND_PCI_QUIRK(0x1025, 0x054f, "Acer Aspire 4830T", CXT_FIXUP_ASPIRE_DMIC),
++ SND_PCI_QUIRK(0x1025, 0x136d, "Acer Swift SF314", CXT_FIXUP_ACER_SWIFT_HP),
+ SND_PCI_QUIRK(0x103c, 0x8079, "HP EliteBook 840 G3", CXT_FIXUP_HP_DOCK),
+ SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK),
+ SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK),
+--
+2.51.0
+
--- /dev/null
+From 7634b4a0314bc2e5db0c8794829632ec0130add5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 Jan 2026 16:15:28 +0800
+Subject: arm64: dts: rockchip: Fix rk356x PCIe range mappings
+
+From: Shawn Lin <shawn.lin@rock-chips.com>
+
+[ Upstream commit f63ea193a404481f080ca2958f73e9f364682db9 ]
+
+The pcie bus address should be mapped 1:1 to the cpu side MMIO address, so
+that there is no same address allocated from normal system memory. Otherwise
+it's broken if the same address assigned to the EP for DMA purpose.Fix it to
+sync with the vendor BSP.
+
+Fixes: 568a67e742df ("arm64: dts: rockchip: Fix rk356x PCIe register and range mappings")
+Fixes: 66b51ea7d70f ("arm64: dts: rockchip: Add rk3568 PCIe2x1 controller")
+Cc: stable@vger.kernel.org
+Cc: Andrew Powers-Holmes <aholmes@omnom.net>
+Signed-off-by: Shawn Lin <shawn.lin@rock-chips.com>
+Link: https://patch.msgid.link/1767600929-195341-1-git-send-email-shawn.lin@rock-chips.com
+Signed-off-by: Heiko Stuebner <heiko@sntech.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/boot/dts/rockchip/rk3568.dtsi | 4 ++--
+ arch/arm64/boot/dts/rockchip/rk356x.dtsi | 2 +-
+ 2 files changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/arch/arm64/boot/dts/rockchip/rk3568.dtsi b/arch/arm64/boot/dts/rockchip/rk3568.dtsi
+index f1be76a54ceb0..4305fd20b5c32 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3568.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3568.dtsi
+@@ -97,7 +97,7 @@ pcie3x1: pcie@fe270000 {
+ <0x0 0xf2000000 0x0 0x00100000>;
+ ranges = <0x01000000 0x0 0xf2100000 0x0 0xf2100000 0x0 0x00100000>,
+ <0x02000000 0x0 0xf2200000 0x0 0xf2200000 0x0 0x01e00000>,
+- <0x03000000 0x0 0x40000000 0x3 0x40000000 0x0 0x40000000>;
++ <0x03000000 0x3 0x40000000 0x3 0x40000000 0x0 0x40000000>;
+ reg-names = "dbi", "apb", "config";
+ resets = <&cru SRST_PCIE30X1_POWERUP>;
+ reset-names = "pipe";
+@@ -150,7 +150,7 @@ pcie3x2: pcie@fe280000 {
+ <0x0 0xf0000000 0x0 0x00100000>;
+ ranges = <0x01000000 0x0 0xf0100000 0x0 0xf0100000 0x0 0x00100000>,
+ <0x02000000 0x0 0xf0200000 0x0 0xf0200000 0x0 0x01e00000>,
+- <0x03000000 0x0 0x40000000 0x3 0x80000000 0x0 0x40000000>;
++ <0x03000000 0x3 0x80000000 0x3 0x80000000 0x0 0x40000000>;
+ reg-names = "dbi", "apb", "config";
+ resets = <&cru SRST_PCIE30X2_POWERUP>;
+ reset-names = "pipe";
+diff --git a/arch/arm64/boot/dts/rockchip/rk356x.dtsi b/arch/arm64/boot/dts/rockchip/rk356x.dtsi
+index e5c88f0007253..05cc28f8f7669 100644
+--- a/arch/arm64/boot/dts/rockchip/rk356x.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk356x.dtsi
+@@ -985,7 +985,7 @@ pcie2x1: pcie@fe260000 {
+ power-domains = <&power RK3568_PD_PIPE>;
+ ranges = <0x01000000 0x0 0xf4100000 0x0 0xf4100000 0x0 0x00100000>,
+ <0x02000000 0x0 0xf4200000 0x0 0xf4200000 0x0 0x01e00000>,
+- <0x03000000 0x0 0x40000000 0x3 0x00000000 0x0 0x40000000>;
++ <0x03000000 0x3 0x00000000 0x3 0x00000000 0x0 0x40000000>;
+ resets = <&cru SRST_PCIE20_POWERUP>;
+ reset-names = "pipe";
+ #address-cells = <3>;
+--
+2.51.0
+
--- /dev/null
+From ba24fc817e35ce9e3e0bf2fcf7198a29d9ea8179 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 9 Nov 2023 21:28:32 +0100
+Subject: bus: omap-ocp2scp: Convert to platform remove callback returning void
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+
+[ Upstream commit 854f89a5b56354ba4135e0e1f0e57ab2caee59ee ]
+
+The .remove() callback for a platform driver returns an int which makes
+many driver authors wrongly assume it's possible to do error handling by
+returning an error code. However the value returned is ignored (apart
+from emitting a warning) and this typically results in resource leaks.
+
+To improve here there is a quest to make the remove callback return
+void. In the first step of this quest all drivers are converted to
+.remove_new(), which already returns void. Eventually after all drivers
+are converted, .remove_new() will be renamed to .remove().
+
+Trivially convert this driver from always returning zero in the remove
+callback to the void returning variant.
+
+Link: https://lore.kernel.org/r/20231109202830.4124591-3-u.kleine-koenig@pengutronix.de
+Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+Stable-dep-of: 5eb63e9bb65d ("bus: omap-ocp2scp: fix OF populate on driver rebind")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/bus/omap-ocp2scp.c | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/bus/omap-ocp2scp.c b/drivers/bus/omap-ocp2scp.c
+index e02d0656242b8..7d7479ba0a759 100644
+--- a/drivers/bus/omap-ocp2scp.c
++++ b/drivers/bus/omap-ocp2scp.c
+@@ -84,12 +84,10 @@ static int omap_ocp2scp_probe(struct platform_device *pdev)
+ return ret;
+ }
+
+-static int omap_ocp2scp_remove(struct platform_device *pdev)
++static void omap_ocp2scp_remove(struct platform_device *pdev)
+ {
+ pm_runtime_disable(&pdev->dev);
+ device_for_each_child(&pdev->dev, NULL, ocp2scp_remove_devices);
+-
+- return 0;
+ }
+
+ #ifdef CONFIG_OF
+@@ -103,7 +101,7 @@ MODULE_DEVICE_TABLE(of, omap_ocp2scp_id_table);
+
+ static struct platform_driver omap_ocp2scp_driver = {
+ .probe = omap_ocp2scp_probe,
+- .remove = omap_ocp2scp_remove,
++ .remove_new = omap_ocp2scp_remove,
+ .driver = {
+ .name = "omap-ocp2scp",
+ .of_match_table = of_match_ptr(omap_ocp2scp_id_table),
+--
+2.51.0
+
--- /dev/null
+From 374db677dddd0598064fc82e959137262c360ce7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 19 Dec 2025 12:01:19 +0100
+Subject: bus: omap-ocp2scp: fix OF populate on driver rebind
+
+From: Johan Hovold <johan@kernel.org>
+
+[ Upstream commit 5eb63e9bb65d88abde647ced50fe6ad40c11de1a ]
+
+Since commit c6e126de43e7 ("of: Keep track of populated platform
+devices") child devices will not be created by of_platform_populate()
+if the devices had previously been deregistered individually so that the
+OF_POPULATED flag is still set in the corresponding OF nodes.
+
+Switch to using of_platform_depopulate() instead of open coding so that
+the child devices are created if the driver is rebound.
+
+Fixes: c6e126de43e7 ("of: Keep track of populated platform devices")
+Cc: stable@vger.kernel.org # 3.16
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Link: https://patch.msgid.link/20251219110119.23507-1-johan@kernel.org
+Signed-off-by: Kevin Hilman <khilman@baylibre.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/bus/omap-ocp2scp.c | 13 ++-----------
+ 1 file changed, 2 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/bus/omap-ocp2scp.c b/drivers/bus/omap-ocp2scp.c
+index 7d7479ba0a759..87e290a3dc817 100644
+--- a/drivers/bus/omap-ocp2scp.c
++++ b/drivers/bus/omap-ocp2scp.c
+@@ -17,15 +17,6 @@
+ #define OCP2SCP_TIMING 0x18
+ #define SYNC2_MASK 0xf
+
+-static int ocp2scp_remove_devices(struct device *dev, void *c)
+-{
+- struct platform_device *pdev = to_platform_device(dev);
+-
+- platform_device_unregister(pdev);
+-
+- return 0;
+-}
+-
+ static int omap_ocp2scp_probe(struct platform_device *pdev)
+ {
+ int ret;
+@@ -79,7 +70,7 @@ static int omap_ocp2scp_probe(struct platform_device *pdev)
+ pm_runtime_disable(&pdev->dev);
+
+ err0:
+- device_for_each_child(&pdev->dev, NULL, ocp2scp_remove_devices);
++ of_platform_depopulate(&pdev->dev);
+
+ return ret;
+ }
+@@ -87,7 +78,7 @@ static int omap_ocp2scp_probe(struct platform_device *pdev)
+ static void omap_ocp2scp_remove(struct platform_device *pdev)
+ {
+ pm_runtime_disable(&pdev->dev);
+- device_for_each_child(&pdev->dev, NULL, ocp2scp_remove_devices);
++ of_platform_depopulate(&pdev->dev);
+ }
+
+ #ifdef CONFIG_OF
+--
+2.51.0
+
--- /dev/null
+From 34d8045c3830ce4bf9a8280c729571dd360e3868 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 Nov 2025 17:40:03 +0100
+Subject: clk: tegra: tegra124-emc: fix device leak on set_rate()
+
+From: Johan Hovold <johan@kernel.org>
+
+[ Upstream commit da61439c63d34ae6503d080a847f144d587e3a48 ]
+
+Make sure to drop the reference taken when looking up the EMC device and
+its driver data on first set_rate().
+
+Note that holding a reference to a device does not prevent its driver
+data from going away so there is no point in keeping the reference.
+
+Fixes: 2db04f16b589 ("clk: tegra: Add EMC clock driver")
+Fixes: 6d6ef58c2470 ("clk: tegra: tegra124-emc: Fix missing put_device() call in emc_ensure_emc_driver")
+Cc: stable@vger.kernel.org # 4.2: 6d6ef58c2470
+Cc: Mikko Perttunen <mperttunen@nvidia.com>
+Cc: Miaoqian Lin <linmq006@gmail.com>
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Signed-off-by: Stephen Boyd <sboyd@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/clk/tegra/clk-tegra124-emc.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/clk/tegra/clk-tegra124-emc.c b/drivers/clk/tegra/clk-tegra124-emc.c
+index 0f6fb776b2298..5f1af6dfe7154 100644
+--- a/drivers/clk/tegra/clk-tegra124-emc.c
++++ b/drivers/clk/tegra/clk-tegra124-emc.c
+@@ -197,8 +197,8 @@ static struct tegra_emc *emc_ensure_emc_driver(struct tegra_clk_emc *tegra)
+ tegra->emc_node = NULL;
+
+ tegra->emc = platform_get_drvdata(pdev);
++ put_device(&pdev->dev);
+ if (!tegra->emc) {
+- put_device(&pdev->dev);
+ pr_err("%s: cannot find EMC driver\n", __func__);
+ return NULL;
+ }
+--
+2.51.0
+
--- /dev/null
+From 4f611ffe613a7b5d9b39728100b2dff21d143a88 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 Dec 2023 15:02:35 -0800
+Subject: driver core: Add a guard() definition for the device_lock()
+
+From: Dan Williams <dan.j.williams@intel.com>
+
+[ Upstream commit 134c6eaa6087d78c0e289931ca15ae7a5007670d ]
+
+At present there are ~200 usages of device_lock() in the kernel. Some of
+those usages lead to "goto unlock;" patterns which have proven to be
+error prone. Define a "device" guard() definition to allow for those to
+be cleaned up and prevent new ones from appearing.
+
+Link: http://lore.kernel.org/r/657897453dda8_269bd29492@dwillia2-mobl3.amr.corp.intel.com.notmuch
+Link: http://lore.kernel.org/r/6577b0c2a02df_a04c5294bb@dwillia2-xfh.jf.intel.com.notmuch
+Cc: Vishal Verma <vishal.l.verma@intel.com>
+Cc: Ira Weiny <ira.weiny@intel.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Reviewed-by: Ira Weiny <ira.weiny@intel.com>
+Reviewed-by: Dave Jiang <dave.jiang@intel.com>
+Reviewed-by: Vishal Verma <vishal.l.verma@intel.com>
+Link: https://lore.kernel.org/r/170250854466.1522182.17555361077409628655.stgit@dwillia2-xfh.jf.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Stable-dep-of: dc23806a7c47 ("driver core: enforce device_lock for driver_match_device()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/device.h | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/include/linux/device.h b/include/linux/device.h
+index cc84521795b14..cfc3b1330c79d 100644
+--- a/include/linux/device.h
++++ b/include/linux/device.h
+@@ -855,6 +855,8 @@ static inline void device_unlock(struct device *dev)
+ mutex_unlock(&dev->mutex);
+ }
+
++DEFINE_GUARD(device, struct device *, device_lock(_T), device_unlock(_T))
++
+ static inline void device_lock_assert(struct device *dev)
+ {
+ lockdep_assert_held(&dev->mutex);
+--
+2.51.0
+
--- /dev/null
+From 43fcab97a217ccdc2da2d8644f88398dc061e1e9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 14 Jan 2026 00:28:43 +0800
+Subject: driver core: enforce device_lock for driver_match_device()
+
+From: Gui-Dong Han <hanguidong02@gmail.com>
+
+[ Upstream commit dc23806a7c47ec5f1293aba407fb69519f976ee0 ]
+
+Currently, driver_match_device() is called from three sites. One site
+(__device_attach_driver) holds device_lock(dev), but the other two
+(bind_store and __driver_attach) do not. This inconsistency means that
+bus match() callbacks are not guaranteed to be called with the lock
+held.
+
+Fix this by introducing driver_match_device_locked(), which guarantees
+holding the device lock using a scoped guard. Replace the unlocked calls
+in bind_store() and __driver_attach() with this new helper. Also add a
+lock assertion to driver_match_device() to enforce this guarantee.
+
+This consistency also fixes a known race condition. The driver_override
+implementation relies on the device_lock, so the missing lock led to the
+use-after-free (UAF) reported in Bugzilla for buses using this field.
+
+Stress testing the two newly locked paths for 24 hours with
+CONFIG_PROVE_LOCKING and CONFIG_LOCKDEP enabled showed no UAF recurrence
+and no lockdep warnings.
+
+Cc: stable@vger.kernel.org
+Closes: https://bugzilla.kernel.org/show_bug.cgi?id=220789
+Suggested-by: Qiu-ji Chen <chenqiuji666@gmail.com>
+Signed-off-by: Gui-Dong Han <hanguidong02@gmail.com>
+Fixes: 49b420a13ff9 ("driver core: check bus->match without holding device lock")
+Reviewed-by: Danilo Krummrich <dakr@kernel.org>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Rafael J. Wysocki (Intel) <rafael@kernel.org>
+Link: https://patch.msgid.link/20260113162843.12712-1-hanguidong02@gmail.com
+Signed-off-by: Danilo Krummrich <dakr@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/base/base.h | 9 +++++++++
+ drivers/base/bus.c | 2 +-
+ drivers/base/dd.c | 2 +-
+ 3 files changed, 11 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/base/base.h b/drivers/base/base.h
+index 2a6cf004dedc3..4e06810efe3e0 100644
+--- a/drivers/base/base.h
++++ b/drivers/base/base.h
+@@ -144,10 +144,19 @@ extern void device_set_deferred_probe_reason(const struct device *dev,
+ static inline int driver_match_device(struct device_driver *drv,
+ struct device *dev)
+ {
++ device_lock_assert(dev);
++
+ return drv->bus->match ? drv->bus->match(dev, drv) : 1;
+ }
+ extern bool driver_allows_async_probing(struct device_driver *drv);
+
++static inline int driver_match_device_locked(const struct device_driver *drv,
++ struct device *dev)
++{
++ guard(device)(dev);
++ return driver_match_device(drv, dev);
++}
++
+ static inline void dev_sync_state(struct device *dev)
+ {
+ if (dev->bus->sync_state)
+diff --git a/drivers/base/bus.c b/drivers/base/bus.c
+index 941532ddfdc68..78a64f2784d05 100644
+--- a/drivers/base/bus.c
++++ b/drivers/base/bus.c
+@@ -212,7 +212,7 @@ static ssize_t bind_store(struct device_driver *drv, const char *buf,
+ int err = -ENODEV;
+
+ dev = bus_find_device_by_name(bus, NULL, buf);
+- if (dev && driver_match_device(drv, dev)) {
++ if (dev && driver_match_device_locked(drv, dev)) {
+ err = device_driver_attach(drv, dev);
+ if (!err) {
+ /* success */
+diff --git a/drivers/base/dd.c b/drivers/base/dd.c
+index 6ad1b6eae65d6..02c846be7b174 100644
+--- a/drivers/base/dd.c
++++ b/drivers/base/dd.c
+@@ -1175,7 +1175,7 @@ static int __driver_attach(struct device *dev, void *data)
+ * is an error.
+ */
+
+- ret = driver_match_device(drv, dev);
++ ret = driver_match_device_locked(drv, dev);
+ if (ret == 0) {
+ /* no match */
+ return 0;
+--
+2.51.0
+
--- /dev/null
+From 6ad71de4f55d593d05c06ec6c68424e5d971c907 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 3 Mar 2023 16:53:54 -0800
+Subject: driver core: Make state_synced device attribute writeable
+
+From: Saravana Kannan <saravanak@google.com>
+
+[ Upstream commit f8fb576658a3e19796e2e1a12a5ec8f44dac02b6 ]
+
+If the file is written to and sync_state() hasn't been called for the
+device yet, then call sync_state() for the device independent of the
+state of its consumers.
+
+This is useful for supplier devices that have one or more consumers that
+don't have a driver but the consumers are in a state that don't use the
+resources supplied by the supplier device.
+
+This gives finer grained control than using the
+fw_devlink.sync_state=timeout kernel commandline parameter.
+
+Signed-off-by: Saravana Kannan <saravanak@google.com>
+Link: https://lore.kernel.org/r/20230304005355.746421-3-saravanak@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Stable-dep-of: dc23806a7c47 ("driver core: enforce device_lock for driver_match_device()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../ABI/testing/sysfs-devices-state_synced | 5 ++++
+ drivers/base/base.h | 8 +++++++
+ drivers/base/core.c | 5 +---
+ drivers/base/dd.c | 23 ++++++++++++++++++-
+ 4 files changed, 36 insertions(+), 5 deletions(-)
+
+diff --git a/Documentation/ABI/testing/sysfs-devices-state_synced b/Documentation/ABI/testing/sysfs-devices-state_synced
+index 0c922d7d02fc2..c64636ddac410 100644
+--- a/Documentation/ABI/testing/sysfs-devices-state_synced
++++ b/Documentation/ABI/testing/sysfs-devices-state_synced
+@@ -21,4 +21,9 @@ Description:
+ at the time the kernel starts are not affected or limited in
+ any way by sync_state() callbacks.
+
++ Writing "1" to this file will force a call to the device's
++ sync_state() function if it hasn't been called already. The
++ sync_state() call happens independent of the state of the
++ consumer devices.
++
+
+diff --git a/drivers/base/base.h b/drivers/base/base.h
+index b902d1ecc247f..2a6cf004dedc3 100644
+--- a/drivers/base/base.h
++++ b/drivers/base/base.h
+@@ -148,6 +148,14 @@ static inline int driver_match_device(struct device_driver *drv,
+ }
+ extern bool driver_allows_async_probing(struct device_driver *drv);
+
++static inline void dev_sync_state(struct device *dev)
++{
++ if (dev->bus->sync_state)
++ dev->bus->sync_state(dev);
++ else if (dev->driver && dev->driver->sync_state)
++ dev->driver->sync_state(dev);
++}
++
+ extern int driver_add_groups(struct device_driver *drv,
+ const struct attribute_group **groups);
+ extern void driver_remove_groups(struct device_driver *drv,
+diff --git a/drivers/base/core.c b/drivers/base/core.c
+index d985c4b87de5f..0f062032725ce 100644
+--- a/drivers/base/core.c
++++ b/drivers/base/core.c
+@@ -1232,10 +1232,7 @@ static void device_links_flush_sync_list(struct list_head *list,
+ if (dev != dont_lock_dev)
+ device_lock(dev);
+
+- if (dev->bus->sync_state)
+- dev->bus->sync_state(dev);
+- else if (dev->driver && dev->driver->sync_state)
+- dev->driver->sync_state(dev);
++ dev_sync_state(dev);
+
+ if (dev != dont_lock_dev)
+ device_unlock(dev);
+diff --git a/drivers/base/dd.c b/drivers/base/dd.c
+index dbbe2cebb8917..6ad1b6eae65d6 100644
+--- a/drivers/base/dd.c
++++ b/drivers/base/dd.c
+@@ -512,6 +512,27 @@ EXPORT_SYMBOL_GPL(device_bind_driver);
+ static atomic_t probe_count = ATOMIC_INIT(0);
+ static DECLARE_WAIT_QUEUE_HEAD(probe_waitqueue);
+
++static ssize_t state_synced_store(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ int ret = 0;
++
++ if (strcmp("1", buf))
++ return -EINVAL;
++
++ device_lock(dev);
++ if (!dev->state_synced) {
++ dev->state_synced = true;
++ dev_sync_state(dev);
++ } else {
++ ret = -EINVAL;
++ }
++ device_unlock(dev);
++
++ return ret ? ret : count;
++}
++
+ static ssize_t state_synced_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+ {
+@@ -523,7 +544,7 @@ static ssize_t state_synced_show(struct device *dev,
+
+ return sysfs_emit(buf, "%u\n", val);
+ }
+-static DEVICE_ATTR_RO(state_synced);
++static DEVICE_ATTR_RW(state_synced);
+
+ static void device_unbind_cleanup(struct device *dev)
+ {
+--
+2.51.0
+
--- /dev/null
+From 45963c9b26e2640aba3d4ddd61ff0e6b03cf75fd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 5 Sep 2023 14:25:57 -0500
+Subject: drm/amd: Drop special case for yellow carp without discovery
+
+From: Mario Limonciello <mario.limonciello@amd.com>
+
+[ Upstream commit 3ef07651a5756e7de65615e18eacbf8822c23016 ]
+
+`amdgpu_gmc_get_vbios_allocations` has a special case for how to
+bring up yellow carp when amdgpu discovery is turned off. As this ASIC
+ships with discovery turned on, it's generally dead code and worse it
+causes `adev->mman.keep_stolen_vga_memory` to not be initialized for
+yellow carp.
+
+Remove it.
+
+Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Stable-dep-of: 096bb75e13cc ("drm/amdgpu: keep vga memory on MacBooks with switchable graphics")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c | 6 ------
+ 1 file changed, 6 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+index fd98d2508a22a..4bc05178504dc 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+@@ -652,12 +652,6 @@ void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev)
+ case CHIP_RENOIR:
+ adev->mman.keep_stolen_vga_memory = true;
+ break;
+- case CHIP_YELLOW_CARP:
+- if (amdgpu_discovery == 0) {
+- adev->mman.stolen_reserved_offset = 0x1ffb0000;
+- adev->mman.stolen_reserved_size = 64 * PAGE_SIZE;
+- }
+- break;
+ default:
+ adev->mman.keep_stolen_vga_memory = false;
+ break;
+--
+2.51.0
+
--- /dev/null
+From 2aa972716617a1c657c8f4de5a1a11cc5b4764f9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 16 Feb 2026 10:02:32 -0500
+Subject: drm/amdgpu: keep vga memory on MacBooks with switchable graphics
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+[ Upstream commit 096bb75e13cc508d3915b7604e356bcb12b17766 ]
+
+On Intel MacBookPros with switchable graphics, when the iGPU
+is enabled, the address of VRAM gets put at 0 in the dGPU's
+virtual address space. This is non-standard and seems to cause
+issues with the cursor if it ends up at 0. We have the framework
+to reserve memory at 0 in the address space, so enable it here if
+the vram start address is 0.
+
+Reviewed-and-tested-by: Mario Kleiner <mario.kleiner.de@gmail.com>
+Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/4302
+Cc: stable@vger.kernel.org
+Cc: Mario Kleiner <mario.kleiner.de@gmail.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+index 4bc05178504dc..3a1576e2f8e3b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+@@ -652,6 +652,16 @@ void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev)
+ case CHIP_RENOIR:
+ adev->mman.keep_stolen_vga_memory = true;
+ break;
++ case CHIP_POLARIS10:
++ case CHIP_POLARIS11:
++ case CHIP_POLARIS12:
++ /* MacBookPros with switchable graphics put VRAM at 0 when
++ * the iGPU is enabled which results in cursor issues if
++ * the cursor ends up at 0. Reserve vram at 0 in that case.
++ */
++ if (adev->gmc.vram_start == 0)
++ adev->mman.keep_stolen_vga_memory = true;
++ break;
+ default:
+ adev->mman.keep_stolen_vga_memory = false;
+ break;
+--
+2.51.0
+
--- /dev/null
+From 680c8678868dcac203e7e594f0153561d06ca6b3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 Nov 2025 17:42:01 +0100
+Subject: drm/tegra: dsi: fix device leak on probe
+
+From: Johan Hovold <johan@kernel.org>
+
+[ Upstream commit bfef062695570842cf96358f2f46f4c6642c6689 ]
+
+Make sure to drop the reference taken when looking up the companion
+(ganged) device and its driver data during probe().
+
+Note that holding a reference to a device does not prevent its driver
+data from going away so there is no point in keeping the reference.
+
+Fixes: e94236cde4d5 ("drm/tegra: dsi: Add ganged mode support")
+Fixes: 221e3638feb8 ("drm/tegra: Fix reference leak in tegra_dsi_ganged_probe")
+Cc: stable@vger.kernel.org # 3.19: 221e3638feb8
+Cc: Thierry Reding <treding@nvidia.com>
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Signed-off-by: Thierry Reding <treding@nvidia.com>
+Link: https://patch.msgid.link/20251121164201.13188-1-johan@kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/tegra/dsi.c | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
+index 7bb26655cb3cc..74d27b564d564 100644
+--- a/drivers/gpu/drm/tegra/dsi.c
++++ b/drivers/gpu/drm/tegra/dsi.c
+@@ -1539,11 +1539,9 @@ static int tegra_dsi_ganged_probe(struct tegra_dsi *dsi)
+ return -EPROBE_DEFER;
+
+ dsi->slave = platform_get_drvdata(gangster);
+-
+- if (!dsi->slave) {
+- put_device(&gangster->dev);
++ put_device(&gangster->dev);
++ if (!dsi->slave)
+ return -EPROBE_DEFER;
+- }
+
+ dsi->slave->master = dsi;
+ }
+--
+2.51.0
+
--- /dev/null
+From 9bf9a4739682b7ab30f065a3f82cfee4fdc10feb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 16 Apr 2024 18:28:54 +0100
+Subject: ext4: convert bd_bitmap_page to bd_bitmap_folio
+
+From: Matthew Wilcox (Oracle) <willy@infradead.org>
+
+[ Upstream commit 99b150d84e4939735cfce245e32e3d29312c68ec ]
+
+There is no need to make this a multi-page folio, so leave all the
+infrastructure around it in pages. But since we're locking it, playing
+with its refcount and checking whether it's uptodate, it needs to move
+to the folio API.
+
+Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Link: https://lore.kernel.org/r/20240416172900.244637-2-willy@infradead.org
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: bdc56a9c46b2 ("ext4: fix e4b bitmap inconsistency reports")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/mballoc.c | 98 ++++++++++++++++++++++++-----------------------
+ fs/ext4/mballoc.h | 2 +-
+ 2 files changed, 52 insertions(+), 48 deletions(-)
+
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 899d7eb6df3dc..083e4904ed679 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -1345,9 +1345,10 @@ static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
+ int block, pnum, poff;
+ int blocks_per_page;
+ struct page *page;
++ struct folio *folio;
+
+ e4b->bd_buddy_page = NULL;
+- e4b->bd_bitmap_page = NULL;
++ e4b->bd_bitmap_folio = NULL;
+
+ blocks_per_page = PAGE_SIZE / sb->s_blocksize;
+ /*
+@@ -1358,12 +1359,13 @@ static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
+ block = group * 2;
+ pnum = block / blocks_per_page;
+ poff = block % blocks_per_page;
+- page = find_or_create_page(inode->i_mapping, pnum, gfp);
+- if (!page)
+- return -ENOMEM;
+- BUG_ON(page->mapping != inode->i_mapping);
+- e4b->bd_bitmap_page = page;
+- e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
++ folio = __filemap_get_folio(inode->i_mapping, pnum,
++ FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
++ if (IS_ERR(folio))
++ return PTR_ERR(folio);
++ BUG_ON(folio->mapping != inode->i_mapping);
++ e4b->bd_bitmap_folio = folio;
++ e4b->bd_bitmap = folio_address(folio) + (poff * sb->s_blocksize);
+
+ if (blocks_per_page >= 2) {
+ /* buddy and bitmap are on the same page */
+@@ -1381,9 +1383,9 @@ static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
+
+ static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b)
+ {
+- if (e4b->bd_bitmap_page) {
+- unlock_page(e4b->bd_bitmap_page);
+- put_page(e4b->bd_bitmap_page);
++ if (e4b->bd_bitmap_folio) {
++ folio_unlock(e4b->bd_bitmap_folio);
++ folio_put(e4b->bd_bitmap_folio);
+ }
+ if (e4b->bd_buddy_page) {
+ unlock_page(e4b->bd_buddy_page);
+@@ -1403,6 +1405,7 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp)
+ struct ext4_group_info *this_grp;
+ struct ext4_buddy e4b;
+ struct page *page;
++ struct folio *folio;
+ int ret = 0;
+
+ might_sleep();
+@@ -1429,11 +1432,11 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp)
+ goto err;
+ }
+
+- page = e4b.bd_bitmap_page;
+- ret = ext4_mb_init_cache(page, NULL, gfp);
++ folio = e4b.bd_bitmap_folio;
++ ret = ext4_mb_init_cache(&folio->page, NULL, gfp);
+ if (ret)
+ goto err;
+- if (!PageUptodate(page)) {
++ if (!folio_test_uptodate(folio)) {
+ ret = -EIO;
+ goto err;
+ }
+@@ -1475,6 +1478,7 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
+ int pnum;
+ int poff;
+ struct page *page;
++ struct folio *folio;
+ int ret;
+ struct ext4_group_info *grp;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+@@ -1493,7 +1497,7 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
+ e4b->bd_sb = sb;
+ e4b->bd_group = group;
+ e4b->bd_buddy_page = NULL;
+- e4b->bd_bitmap_page = NULL;
++ e4b->bd_bitmap_folio = NULL;
+
+ if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
+ /*
+@@ -1514,53 +1518,53 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
+ pnum = block / blocks_per_page;
+ poff = block % blocks_per_page;
+
+- /* we could use find_or_create_page(), but it locks page
+- * what we'd like to avoid in fast path ... */
+- page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
+- if (page == NULL || !PageUptodate(page)) {
+- if (page)
++ /* Avoid locking the folio in the fast path ... */
++ folio = __filemap_get_folio(inode->i_mapping, pnum, FGP_ACCESSED, 0);
++ if (IS_ERR(folio) || !folio_test_uptodate(folio)) {
++ if (!IS_ERR(folio))
+ /*
+- * drop the page reference and try
+- * to get the page with lock. If we
++ * drop the folio reference and try
++ * to get the folio with lock. If we
+ * are not uptodate that implies
+- * somebody just created the page but
+- * is yet to initialize the same. So
++ * somebody just created the folio but
++ * is yet to initialize it. So
+ * wait for it to initialize.
+ */
+- put_page(page);
+- page = find_or_create_page(inode->i_mapping, pnum, gfp);
+- if (page) {
+- if (WARN_RATELIMIT(page->mapping != inode->i_mapping,
+- "ext4: bitmap's paging->mapping != inode->i_mapping\n")) {
++ folio_put(folio);
++ folio = __filemap_get_folio(inode->i_mapping, pnum,
++ FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
++ if (!IS_ERR(folio)) {
++ if (WARN_RATELIMIT(folio->mapping != inode->i_mapping,
++ "ext4: bitmap's mapping != inode->i_mapping\n")) {
+ /* should never happen */
+- unlock_page(page);
++ folio_unlock(folio);
+ ret = -EINVAL;
+ goto err;
+ }
+- if (!PageUptodate(page)) {
+- ret = ext4_mb_init_cache(page, NULL, gfp);
++ if (!folio_test_uptodate(folio)) {
++ ret = ext4_mb_init_cache(&folio->page, NULL, gfp);
+ if (ret) {
+- unlock_page(page);
++ folio_unlock(folio);
+ goto err;
+ }
+- mb_cmp_bitmaps(e4b, page_address(page) +
++ mb_cmp_bitmaps(e4b, folio_address(folio) +
+ (poff * sb->s_blocksize));
+ }
+- unlock_page(page);
++ folio_unlock(folio);
+ }
+ }
+- if (page == NULL) {
+- ret = -ENOMEM;
++ if (IS_ERR(folio)) {
++ ret = PTR_ERR(folio);
+ goto err;
+ }
+- if (!PageUptodate(page)) {
++ if (!folio_test_uptodate(folio)) {
+ ret = -EIO;
+ goto err;
+ }
+
+ /* Pages marked accessed already */
+- e4b->bd_bitmap_page = page;
+- e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
++ e4b->bd_bitmap_folio = folio;
++ e4b->bd_bitmap = folio_address(folio) + (poff * sb->s_blocksize);
+
+ block++;
+ pnum = block / blocks_per_page;
+@@ -1608,8 +1612,8 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
+ err:
+ if (page)
+ put_page(page);
+- if (e4b->bd_bitmap_page)
+- put_page(e4b->bd_bitmap_page);
++ if (e4b->bd_bitmap_folio)
++ folio_put(e4b->bd_bitmap_folio);
+
+ e4b->bd_buddy = NULL;
+ e4b->bd_bitmap = NULL;
+@@ -1624,8 +1628,8 @@ static int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
+
+ static void ext4_mb_unload_buddy(struct ext4_buddy *e4b)
+ {
+- if (e4b->bd_bitmap_page)
+- put_page(e4b->bd_bitmap_page);
++ if (e4b->bd_bitmap_folio)
++ folio_put(e4b->bd_bitmap_folio);
+ if (e4b->bd_buddy_page)
+ put_page(e4b->bd_buddy_page);
+ }
+@@ -2050,7 +2054,7 @@ static void ext4_mb_use_best_found(struct ext4_allocation_context *ac,
+ * double allocate blocks. The reference is dropped
+ * in ext4_mb_release_context
+ */
+- ac->ac_bitmap_page = e4b->bd_bitmap_page;
++ ac->ac_bitmap_page = &e4b->bd_bitmap_folio->page;
+ get_page(ac->ac_bitmap_page);
+ ac->ac_buddy_page = e4b->bd_buddy_page;
+ get_page(ac->ac_buddy_page);
+@@ -3715,7 +3719,7 @@ static void ext4_free_data_in_buddy(struct super_block *sb,
+ * balance refcounts from ext4_mb_free_metadata()
+ */
+ put_page(e4b.bd_buddy_page);
+- put_page(e4b.bd_bitmap_page);
++ folio_put(e4b.bd_bitmap_folio);
+ }
+ ext4_unlock_group(sb, entry->efd_group);
+ ext4_mb_unload_buddy(&e4b);
+@@ -5888,7 +5892,7 @@ ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
+ struct rb_node *parent = NULL, *new_node;
+
+ BUG_ON(!ext4_handle_valid(handle));
+- BUG_ON(e4b->bd_bitmap_page == NULL);
++ BUG_ON(e4b->bd_bitmap_folio == NULL);
+ BUG_ON(e4b->bd_buddy_page == NULL);
+
+ new_node = &new_entry->efd_node;
+@@ -5901,7 +5905,7 @@ ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
+ * on-disk bitmap and lose not-yet-available
+ * blocks */
+ get_page(e4b->bd_buddy_page);
+- get_page(e4b->bd_bitmap_page);
++ folio_get(e4b->bd_bitmap_folio);
+ }
+ while (*n) {
+ parent = *n;
+diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h
+index 2d95fcab941f6..24e7c7a04f674 100644
+--- a/fs/ext4/mballoc.h
++++ b/fs/ext4/mballoc.h
+@@ -203,7 +203,7 @@ struct ext4_allocation_context {
+ struct ext4_buddy {
+ struct page *bd_buddy_page;
+ void *bd_buddy;
+- struct page *bd_bitmap_page;
++ struct folio *bd_bitmap_folio;
+ void *bd_bitmap;
+ struct ext4_group_info *bd_info;
+ struct super_block *bd_sb;
+--
+2.51.0
+
--- /dev/null
+From 59bc2b64c23cc29f4fb10fb79724c6163dbf06e9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 16 Apr 2024 18:28:55 +0100
+Subject: ext4: convert bd_buddy_page to bd_buddy_folio
+
+From: Matthew Wilcox (Oracle) <willy@infradead.org>
+
+[ Upstream commit 5eea586b47f05b5f5518cf8f9dd9283a01a8066d ]
+
+There is no need to make this a multi-page folio, so leave all the
+infrastructure around it in pages. But since we're locking it, playing
+with its refcount and checking whether it's uptodate, it needs to move
+to the folio API.
+
+Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Link: https://lore.kernel.org/r/20240416172900.244637-3-willy@infradead.org
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: bdc56a9c46b2 ("ext4: fix e4b bitmap inconsistency reports")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/mballoc.c | 91 +++++++++++++++++++++++------------------------
+ fs/ext4/mballoc.h | 2 +-
+ 2 files changed, 46 insertions(+), 47 deletions(-)
+
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 083e4904ed679..19e5b57387d60 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -1336,7 +1336,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
+ * Lock the buddy and bitmap pages. This make sure other parallel init_group
+ * on the same buddy page doesn't happen whild holding the buddy page lock.
+ * Return locked buddy and bitmap pages on e4b struct. If buddy and bitmap
+- * are on the same page e4b->bd_buddy_page is NULL and return value is 0.
++ * are on the same page e4b->bd_buddy_folio is NULL and return value is 0.
+ */
+ static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
+ ext4_group_t group, struct ext4_buddy *e4b, gfp_t gfp)
+@@ -1344,10 +1344,9 @@ static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
+ struct inode *inode = EXT4_SB(sb)->s_buddy_cache;
+ int block, pnum, poff;
+ int blocks_per_page;
+- struct page *page;
+ struct folio *folio;
+
+- e4b->bd_buddy_page = NULL;
++ e4b->bd_buddy_folio = NULL;
+ e4b->bd_bitmap_folio = NULL;
+
+ blocks_per_page = PAGE_SIZE / sb->s_blocksize;
+@@ -1373,11 +1372,12 @@ static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
+ }
+
+ /* blocks_per_page == 1, hence we need another page for the buddy */
+- page = find_or_create_page(inode->i_mapping, block + 1, gfp);
+- if (!page)
+- return -ENOMEM;
+- BUG_ON(page->mapping != inode->i_mapping);
+- e4b->bd_buddy_page = page;
++ folio = __filemap_get_folio(inode->i_mapping, block + 1,
++ FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
++ if (IS_ERR(folio))
++ return PTR_ERR(folio);
++ BUG_ON(folio->mapping != inode->i_mapping);
++ e4b->bd_buddy_folio = folio;
+ return 0;
+ }
+
+@@ -1387,9 +1387,9 @@ static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b)
+ folio_unlock(e4b->bd_bitmap_folio);
+ folio_put(e4b->bd_bitmap_folio);
+ }
+- if (e4b->bd_buddy_page) {
+- unlock_page(e4b->bd_buddy_page);
+- put_page(e4b->bd_buddy_page);
++ if (e4b->bd_buddy_folio) {
++ folio_unlock(e4b->bd_buddy_folio);
++ folio_put(e4b->bd_buddy_folio);
+ }
+ }
+
+@@ -1404,7 +1404,6 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp)
+
+ struct ext4_group_info *this_grp;
+ struct ext4_buddy e4b;
+- struct page *page;
+ struct folio *folio;
+ int ret = 0;
+
+@@ -1441,7 +1440,7 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp)
+ goto err;
+ }
+
+- if (e4b.bd_buddy_page == NULL) {
++ if (e4b.bd_buddy_folio == NULL) {
+ /*
+ * If both the bitmap and buddy are in
+ * the same page we don't need to force
+@@ -1451,11 +1450,11 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp)
+ goto err;
+ }
+ /* init buddy cache */
+- page = e4b.bd_buddy_page;
+- ret = ext4_mb_init_cache(page, e4b.bd_bitmap, gfp);
++ folio = e4b.bd_buddy_folio;
++ ret = ext4_mb_init_cache(&folio->page, e4b.bd_bitmap, gfp);
+ if (ret)
+ goto err;
+- if (!PageUptodate(page)) {
++ if (!folio_test_uptodate(folio)) {
+ ret = -EIO;
+ goto err;
+ }
+@@ -1477,7 +1476,6 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
+ int block;
+ int pnum;
+ int poff;
+- struct page *page;
+ struct folio *folio;
+ int ret;
+ struct ext4_group_info *grp;
+@@ -1496,7 +1494,7 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
+ e4b->bd_info = grp;
+ e4b->bd_sb = sb;
+ e4b->bd_group = group;
+- e4b->bd_buddy_page = NULL;
++ e4b->bd_buddy_folio = NULL;
+ e4b->bd_bitmap_folio = NULL;
+
+ if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
+@@ -1562,7 +1560,7 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
+ goto err;
+ }
+
+- /* Pages marked accessed already */
++ /* Folios marked accessed already */
+ e4b->bd_bitmap_folio = folio;
+ e4b->bd_bitmap = folio_address(folio) + (poff * sb->s_blocksize);
+
+@@ -1570,48 +1568,49 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
+ pnum = block / blocks_per_page;
+ poff = block % blocks_per_page;
+
+- page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
+- if (page == NULL || !PageUptodate(page)) {
+- if (page)
+- put_page(page);
+- page = find_or_create_page(inode->i_mapping, pnum, gfp);
+- if (page) {
+- if (WARN_RATELIMIT(page->mapping != inode->i_mapping,
+- "ext4: buddy bitmap's page->mapping != inode->i_mapping\n")) {
++ folio = __filemap_get_folio(inode->i_mapping, pnum, FGP_ACCESSED, 0);
++ if (IS_ERR(folio) || !folio_test_uptodate(folio)) {
++ if (!IS_ERR(folio))
++ folio_put(folio);
++ folio = __filemap_get_folio(inode->i_mapping, pnum,
++ FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
++ if (!IS_ERR(folio)) {
++ if (WARN_RATELIMIT(folio->mapping != inode->i_mapping,
++ "ext4: buddy bitmap's mapping != inode->i_mapping\n")) {
+ /* should never happen */
+- unlock_page(page);
++ folio_unlock(folio);
+ ret = -EINVAL;
+ goto err;
+ }
+- if (!PageUptodate(page)) {
+- ret = ext4_mb_init_cache(page, e4b->bd_bitmap,
++ if (!folio_test_uptodate(folio)) {
++ ret = ext4_mb_init_cache(&folio->page, e4b->bd_bitmap,
+ gfp);
+ if (ret) {
+- unlock_page(page);
++ folio_unlock(folio);
+ goto err;
+ }
+ }
+- unlock_page(page);
++ folio_unlock(folio);
+ }
+ }
+- if (page == NULL) {
+- ret = -ENOMEM;
++ if (IS_ERR(folio)) {
++ ret = PTR_ERR(folio);
+ goto err;
+ }
+- if (!PageUptodate(page)) {
++ if (!folio_test_uptodate(folio)) {
+ ret = -EIO;
+ goto err;
+ }
+
+- /* Pages marked accessed already */
+- e4b->bd_buddy_page = page;
+- e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
++ /* Folios marked accessed already */
++ e4b->bd_buddy_folio = folio;
++ e4b->bd_buddy = folio_address(folio) + (poff * sb->s_blocksize);
+
+ return 0;
+
+ err:
+- if (page)
+- put_page(page);
++ if (folio)
++ folio_put(folio);
+ if (e4b->bd_bitmap_folio)
+ folio_put(e4b->bd_bitmap_folio);
+
+@@ -1630,8 +1629,8 @@ static void ext4_mb_unload_buddy(struct ext4_buddy *e4b)
+ {
+ if (e4b->bd_bitmap_folio)
+ folio_put(e4b->bd_bitmap_folio);
+- if (e4b->bd_buddy_page)
+- put_page(e4b->bd_buddy_page);
++ if (e4b->bd_buddy_folio)
++ folio_put(e4b->bd_buddy_folio);
+ }
+
+
+@@ -2056,7 +2055,7 @@ static void ext4_mb_use_best_found(struct ext4_allocation_context *ac,
+ */
+ ac->ac_bitmap_page = &e4b->bd_bitmap_folio->page;
+ get_page(ac->ac_bitmap_page);
+- ac->ac_buddy_page = e4b->bd_buddy_page;
++ ac->ac_buddy_page = &e4b->bd_buddy_folio->page;
+ get_page(ac->ac_buddy_page);
+ /* store last allocated for subsequent stream allocation */
+ if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
+@@ -3718,7 +3717,7 @@ static void ext4_free_data_in_buddy(struct super_block *sb,
+ /* No more items in the per group rb tree
+ * balance refcounts from ext4_mb_free_metadata()
+ */
+- put_page(e4b.bd_buddy_page);
++ folio_put(e4b.bd_buddy_folio);
+ folio_put(e4b.bd_bitmap_folio);
+ }
+ ext4_unlock_group(sb, entry->efd_group);
+@@ -5893,7 +5892,7 @@ ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
+
+ BUG_ON(!ext4_handle_valid(handle));
+ BUG_ON(e4b->bd_bitmap_folio == NULL);
+- BUG_ON(e4b->bd_buddy_page == NULL);
++ BUG_ON(e4b->bd_buddy_folio == NULL);
+
+ new_node = &new_entry->efd_node;
+ cluster = new_entry->efd_start_cluster;
+@@ -5904,7 +5903,7 @@ ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
+ * otherwise we'll refresh it from
+ * on-disk bitmap and lose not-yet-available
+ * blocks */
+- get_page(e4b->bd_buddy_page);
++ folio_get(e4b->bd_buddy_folio);
+ folio_get(e4b->bd_bitmap_folio);
+ }
+ while (*n) {
+diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h
+index 24e7c7a04f674..fe4dbbbbe8725 100644
+--- a/fs/ext4/mballoc.h
++++ b/fs/ext4/mballoc.h
+@@ -201,7 +201,7 @@ struct ext4_allocation_context {
+ #define AC_STATUS_BREAK 3
+
+ struct ext4_buddy {
+- struct page *bd_buddy_page;
++ struct folio *bd_buddy_folio;
+ void *bd_buddy;
+ struct folio *bd_bitmap_folio;
+ void *bd_bitmap;
+--
+2.51.0
+
--- /dev/null
+From 11456f1e7f8fd43116ec9285865f0107561666d7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 16 Mar 2023 17:07:32 -0400
+Subject: ext4: convert some BUG_ON's in mballoc to use WARN_RATELIMITED
+ instead
+
+From: Theodore Ts'o <tytso@mit.edu>
+
+[ Upstream commit 19b8b035a776939ceb3de0f45aded4751d7849ef ]
+
+In cases where we have an obvious way of continuing, let's use
+WARN_RATELIMITED() instead of BUG_ON().
+
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: bdc56a9c46b2 ("ext4: fix e4b bitmap inconsistency reports")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/mballoc.c | 25 +++++++++++++++++++------
+ 1 file changed, 19 insertions(+), 6 deletions(-)
+
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 7431ff97a68c8..2a385dc610704 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -1531,7 +1531,13 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
+ put_page(page);
+ page = find_or_create_page(inode->i_mapping, pnum, gfp);
+ if (page) {
+- BUG_ON(page->mapping != inode->i_mapping);
++ if (WARN_RATELIMIT(page->mapping != inode->i_mapping,
++ "ext4: bitmap's paging->mapping != inode->i_mapping\n")) {
++ /* should never happen */
++ unlock_page(page);
++ ret = -EINVAL;
++ goto err;
++ }
+ if (!PageUptodate(page)) {
+ ret = ext4_mb_init_cache(page, NULL, gfp);
+ if (ret) {
+@@ -1567,7 +1573,13 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
+ put_page(page);
+ page = find_or_create_page(inode->i_mapping, pnum, gfp);
+ if (page) {
+- BUG_ON(page->mapping != inode->i_mapping);
++ if (WARN_RATELIMIT(page->mapping != inode->i_mapping,
++ "ext4: buddy bitmap's page->mapping != inode->i_mapping\n")) {
++ /* should never happen */
++ unlock_page(page);
++ ret = -EINVAL;
++ goto err;
++ }
+ if (!PageUptodate(page)) {
+ ret = ext4_mb_init_cache(page, e4b->bd_bitmap,
+ gfp);
+@@ -2286,7 +2298,9 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
+ continue;
+
+ buddy = mb_find_buddy(e4b, i, &max);
+- BUG_ON(buddy == NULL);
++ if (WARN_RATELIMIT(buddy == NULL,
++ "ext4: mb_simple_scan_group: mb_find_buddy failed, (%d)\n", i))
++ continue;
+
+ k = mb_find_next_zero_bit(buddy, max, 0);
+ if (k >= max) {
+@@ -4312,15 +4326,14 @@ static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)
+ if (ac->ac_f_ex.fe_len == 0)
+ return;
+ err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b);
+- if (err) {
++ if (WARN_RATELIMIT(err,
++ "ext4: mb_load_buddy failed (%d)", err))
+ /*
+ * This should never happen since we pin the
+ * pages in the ext4_allocation_context so
+ * ext4_mb_load_buddy() should never fail.
+ */
+- WARN(1, "mb_load_buddy failed (%d)", err);
+ return;
+- }
+ ext4_lock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
+ mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start,
+ ac->ac_f_ex.fe_len);
+--
+2.51.0
+
--- /dev/null
+From 0d7deff40dcb876a7530df9c8888e5c63cb9b2e9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Oct 2023 11:52:15 +0800
+Subject: ext4: delete redundant calculations in ext4_mb_get_buddy_page_lock()
+
+From: Gou Hao <gouhao@uniontech.com>
+
+[ Upstream commit f2fec3e99a32d7c14dbf63c824f8286ebc94b18d ]
+
+'blocks_per_page' is always 1 after 'if (blocks_per_page >= 2)',
+'pnum' and 'block' are equal in this case.
+
+Signed-off-by: Gou Hao <gouhao@uniontech.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Link: https://lore.kernel.org/r/20231024035215.29474-1-gouhao@uniontech.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: bdc56a9c46b2 ("ext4: fix e4b bitmap inconsistency reports")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/mballoc.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 2a385dc610704..899d7eb6df3dc 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -1370,9 +1370,8 @@ static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
+ return 0;
+ }
+
+- block++;
+- pnum = block / blocks_per_page;
+- page = find_or_create_page(inode->i_mapping, pnum, gfp);
++ /* blocks_per_page == 1, hence we need another page for the buddy */
++ page = find_or_create_page(inode->i_mapping, block + 1, gfp);
+ if (!page)
+ return -ENOMEM;
+ BUG_ON(page->mapping != inode->i_mapping);
+--
+2.51.0
+
--- /dev/null
+From 60a475f0c3513c2b62c41e8e86fbf0e972bdaa26 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 29 Nov 2025 18:32:34 +0800
+Subject: ext4: don't zero the entire extent if EXT4_EXT_DATA_PARTIAL_VALID1
+
+From: Zhang Yi <yi.zhang@huawei.com>
+
+[ Upstream commit 1bf6974822d1dba86cf11b5f05498581cf3488a2 ]
+
+When allocating initialized blocks from a large unwritten extent, or
+when splitting an unwritten extent during end I/O and converting it to
+initialized, there is currently a potential issue of stale data if the
+extent needs to be split in the middle.
+
+ 0 A B N
+ [UUUUUUUUUUUU] U: unwritten extent
+ [--DDDDDDDD--] D: valid data
+ |<- ->| ----> this range needs to be initialized
+
+ext4_split_extent() first try to split this extent at B with
+EXT4_EXT_DATA_ENTIRE_VALID1 and EXT4_EXT_MAY_ZEROOUT flag set, but
+ext4_split_extent_at() failed to split this extent due to temporary lack
+of space. It zeroout B to N and mark the entire extent from 0 to N
+as written.
+
+ 0 A B N
+ [WWWWWWWWWWWW] W: written extent
+ [SSDDDDDDDDZZ] Z: zeroed, S: stale data
+
+ext4_split_extent() then try to split this extent at A with
+EXT4_EXT_DATA_VALID2 flag set. This time, it split successfully and left
+a stale written extent from 0 to A.
+
+ 0 A B N
+ [WW|WWWWWWWWWW]
+ [SS|DDDDDDDDZZ]
+
+Fix this by pass EXT4_EXT_DATA_PARTIAL_VALID1 to ext4_split_extent_at()
+when splitting at B, don't convert the entire extent to written and left
+it as unwritten after zeroing out B to N. The remaining work is just
+like the standard two-part split. ext4_split_extent() will pass the
+EXT4_EXT_DATA_VALID2 flag when it calls ext4_split_extent_at() for the
+second time, allowing it to properly handle the split. If the split is
+successful, it will keep extent from 0 to A as unwritten.
+
+Signed-off-by: Zhang Yi <yi.zhang@huawei.com>
+Reviewed-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Reviewed-by: Baokun Li <libaokun1@huawei.com>
+Cc: stable@kernel.org
+Message-ID: <20251129103247.686136-3-yi.zhang@huaweicloud.com>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/extents.c | 13 ++++++++++++-
+ 1 file changed, 12 insertions(+), 1 deletion(-)
+
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index e2f9c27c7e161..da7414e84ead8 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -3298,6 +3298,15 @@ static struct ext4_ext_path *ext4_split_extent_at(handle_t *handle,
+ }
+
+ if (!err) {
++ /*
++ * The first half contains partially valid data, the
++ * splitting of this extent has not been completed, fix
++ * extent length and ext4_split_extent() split will the
++ * first half again.
++ */
++ if (split_flag & EXT4_EXT_DATA_PARTIAL_VALID1)
++ goto fix_extent_len;
++
+ /* update the extent length and mark as initialized */
+ ex->ee_len = cpu_to_le16(ee_len);
+ ext4_ext_try_to_merge(handle, inode, path, ex);
+@@ -3373,7 +3382,9 @@ static int ext4_split_extent(handle_t *handle,
+ split_flag1 |= EXT4_EXT_MARK_UNWRIT1 |
+ EXT4_EXT_MARK_UNWRIT2;
+ if (split_flag & EXT4_EXT_DATA_VALID2)
+- split_flag1 |= EXT4_EXT_DATA_ENTIRE_VALID1;
++ split_flag1 |= map->m_lblk > ee_block ?
++ EXT4_EXT_DATA_PARTIAL_VALID1 :
++ EXT4_EXT_DATA_ENTIRE_VALID1;
+ path = ext4_split_extent_at(handle, inode, path,
+ map->m_lblk + map->m_len, split_flag1, flags1);
+ if (IS_ERR(path)) {
+--
+2.51.0
+
--- /dev/null
+From 2ca7fcab79b19d8f98c69a373b41f43948ac204b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 29 Nov 2025 18:32:38 +0800
+Subject: ext4: drop extent cache after doing PARTIAL_VALID1 zeroout
+
+From: Zhang Yi <yi.zhang@huawei.com>
+
+[ Upstream commit 6d882ea3b0931b43530d44149b79fcd4ffc13030 ]
+
+When splitting an unwritten extent in the middle and converting it to
+initialized in ext4_split_extent() with the EXT4_EXT_MAY_ZEROOUT and
+EXT4_EXT_DATA_VALID2 flags set, it could leave a stale unwritten extent.
+
+Assume we have an unwritten file and buffered write in the middle of it
+without dioread_nolock enabled, it will allocate blocks as written
+extent.
+
+ 0 A B N
+ [UUUUUUUUUUUU] on-disk extent U: unwritten extent
+ [UUUUUUUUUUUU] extent status tree
+ [--DDDDDDDD--] D: valid data
+ |<- ->| ----> this range needs to be initialized
+
+ext4_split_extent() first try to split this extent at B with
+EXT4_EXT_DATA_PARTIAL_VALID1 and EXT4_EXT_MAY_ZEROOUT flag set, but
+ext4_split_extent_at() failed to split this extent due to temporary lack
+of space. It zeroout B to N and leave the entire extent as unwritten.
+
+ 0 A B N
+ [UUUUUUUUUUUU] on-disk extent
+ [UUUUUUUUUUUU] extent status tree
+ [--DDDDDDDDZZ] Z: zeroed data
+
+ext4_split_extent() then try to split this extent at A with
+EXT4_EXT_DATA_VALID2 flag set. This time, it split successfully and
+leave an written extent from A to N.
+
+ 0 A B N
+ [UUWWWWWWWWWW] on-disk extent W: written extent
+ [UUUUUUUUUUUU] extent status tree
+ [--DDDDDDDDZZ]
+
+Finally ext4_map_create_blocks() only insert extent A to B to the extent
+status tree, and leave an stale unwritten extent in the status tree.
+
+ 0 A B N
+ [UUWWWWWWWWWW] on-disk extent W: written extent
+ [UUWWWWWWWWUU] extent status tree
+ [--DDDDDDDDZZ]
+
+Fix this issue by always cached extent status entry after zeroing out
+the second part.
+
+Signed-off-by: Zhang Yi <yi.zhang@huawei.com>
+Reviewed-by: Baokun Li <libaokun1@huawei.com>
+Cc: stable@kernel.org
+Reviewed-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Message-ID: <20251129103247.686136-7-yi.zhang@huaweicloud.com>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/extents.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index da7414e84ead8..30b0b25aac9ff 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -3304,8 +3304,16 @@ static struct ext4_ext_path *ext4_split_extent_at(handle_t *handle,
+ * extent length and ext4_split_extent() split will the
+ * first half again.
+ */
+- if (split_flag & EXT4_EXT_DATA_PARTIAL_VALID1)
++ if (split_flag & EXT4_EXT_DATA_PARTIAL_VALID1) {
++ /*
++ * Drop extent cache to prevent stale unwritten
++ * extents remaining after zeroing out.
++ */
++ ext4_es_remove_extent(inode,
++ le32_to_cpu(zero_ex.ee_block),
++ ext4_ext_get_actual_len(&zero_ex));
+ goto fix_extent_len;
++ }
+
+ /* update the extent length and mark as initialized */
+ ex->ee_len = cpu_to_le16(ee_len);
+--
+2.51.0
+
--- /dev/null
+From f86eaaf249684fd492028cbde05230ce0f17040c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 29 Nov 2025 18:32:39 +0800
+Subject: ext4: drop extent cache when splitting extent fails
+
+From: Zhang Yi <yi.zhang@huawei.com>
+
+[ Upstream commit 79b592e8f1b435796cbc2722190368e3e8ffd7a1 ]
+
+When the split extent fails, we might leave some extents still being
+processed and return an error directly, which will result in stale
+extent entries remaining in the extent status tree. So drop all of the
+remaining potentially stale extents if the splitting fails.
+
+Signed-off-by: Zhang Yi <yi.zhang@huawei.com>
+Reviewed-by: Baokun Li <libaokun1@huawei.com>
+Cc: stable@kernel.org
+Reviewed-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Message-ID: <20251129103247.686136-8-yi.zhang@huaweicloud.com>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/extents.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 30b0b25aac9ff..bb27c04798d2b 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -3252,7 +3252,7 @@ static struct ext4_ext_path *ext4_split_extent_at(handle_t *handle,
+
+ err = PTR_ERR(path);
+ if (err != -ENOSPC && err != -EDQUOT && err != -ENOMEM)
+- return path;
++ goto out_path;
+
+ /*
+ * Get a new path to try to zeroout or fix the extent length.
+@@ -3266,7 +3266,7 @@ static struct ext4_ext_path *ext4_split_extent_at(handle_t *handle,
+ if (IS_ERR(path)) {
+ EXT4_ERROR_INODE(inode, "Failed split extent on %u, err %ld",
+ split, PTR_ERR(path));
+- return path;
++ goto out_path;
+ }
+ depth = ext_depth(inode);
+ ex = path[depth].p_ext;
+@@ -3343,6 +3343,10 @@ static struct ext4_ext_path *ext4_split_extent_at(handle_t *handle,
+ ext4_free_ext_path(path);
+ path = ERR_PTR(err);
+ }
++out_path:
++ if (IS_ERR(path))
++ /* Remove all remaining potentially stale extents. */
++ ext4_es_remove_extent(inode, ee_block, ee_len);
+ ext4_ext_show_leaf(inode, path);
+ return path;
+ }
+--
+2.51.0
+
--- /dev/null
+From 8eb91fc9f27360846321ecc3684e3577eaf0e9c5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Jan 2026 17:08:20 +0800
+Subject: ext4: fix e4b bitmap inconsistency reports
+
+From: Yongjian Sun <sunyongjian1@huawei.com>
+
+[ Upstream commit bdc56a9c46b2a99c12313122b9352b619a2e719e ]
+
+A bitmap inconsistency issue was observed during stress tests under
+mixed huge-page workloads. Ext4 reported multiple e4b bitmap check
+failures like:
+
+ext4_mb_complex_scan_group:2508: group 350, 8179 free clusters as
+per group info. But got 8192 blocks
+
+Analysis and experimentation confirmed that the issue is caused by a
+race condition between page migration and bitmap modification. Although
+this timing window is extremely narrow, it is still hit in practice:
+
+folio_lock ext4_mb_load_buddy
+__migrate_folio
+ check ref count
+ folio_mc_copy __filemap_get_folio
+ folio_try_get(folio)
+ ......
+ mb_mark_used
+ ext4_mb_unload_buddy
+ __folio_migrate_mapping
+ folio_ref_freeze
+folio_unlock
+
+The root cause of this issue is that the fast path of load_buddy only
+increments the folio's reference count, which is insufficient to prevent
+concurrent folio migration. We observed that the folio migration process
+acquires the folio lock. Therefore, we can determine whether to take the
+fast path in load_buddy by checking the lock status. If the folio is
+locked, we opt for the slow path (which acquires the lock) to close this
+concurrency window.
+
+Additionally, this change addresses the following issues:
+
+When the DOUBLE_CHECK macro is enabled to inspect bitmap-related
+issues, the following error may be triggered:
+
+corruption in group 324 at byte 784(6272): f in copy != ff on
+disk/prealloc
+
+Analysis reveals that this is a false positive. There is a specific race
+window where the bitmap and the group descriptor become momentarily
+inconsistent, leading to this error report:
+
+ext4_mb_load_buddy ext4_mb_load_buddy
+ __filemap_get_folio(create|lock)
+ folio_lock
+ ext4_mb_init_cache
+ folio_mark_uptodate
+ __filemap_get_folio(no lock)
+ ......
+ mb_mark_used
+ mb_mark_used_double
+ mb_cmp_bitmaps
+ mb_set_bits(e4b->bd_bitmap)
+ folio_unlock
+
+The original logic assumed that since mb_cmp_bitmaps is called when the
+bitmap is newly loaded from disk, the folio lock would be sufficient to
+prevent concurrent access. However, this overlooks a specific race
+condition: if another process attempts to load buddy and finds the folio
+is already in an uptodate state, it will immediately begin using it without
+holding folio lock.
+
+Signed-off-by: Yongjian Sun <sunyongjian1@huawei.com>
+Reviewed-by: Zhang Yi <yi.zhang@huawei.com>
+Reviewed-by: Baokun Li <libaokun1@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Link: https://patch.msgid.link/20260106090820.836242-1-sunyongjian@huaweicloud.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Cc: stable@kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/mballoc.c | 21 +++++++++++----------
+ 1 file changed, 11 insertions(+), 10 deletions(-)
+
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 19e5b57387d60..93e05e6159fb8 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -1518,16 +1518,17 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
+
+ /* Avoid locking the folio in the fast path ... */
+ folio = __filemap_get_folio(inode->i_mapping, pnum, FGP_ACCESSED, 0);
+- if (IS_ERR(folio) || !folio_test_uptodate(folio)) {
++ if (IS_ERR(folio) || !folio_test_uptodate(folio) || folio_test_locked(folio)) {
++ /*
++ * folio_test_locked is employed to detect ongoing folio
++ * migrations, since concurrent migrations can lead to
++ * bitmap inconsistency. And if we are not uptodate that
++ * implies somebody just created the folio but is yet to
++ * initialize it. We can drop the folio reference and
++ * try to get the folio with lock in both cases to avoid
++ * concurrency.
++ */
+ if (!IS_ERR(folio))
+- /*
+- * drop the folio reference and try
+- * to get the folio with lock. If we
+- * are not uptodate that implies
+- * somebody just created the folio but
+- * is yet to initialize it. So
+- * wait for it to initialize.
+- */
+ folio_put(folio);
+ folio = __filemap_get_folio(inode->i_mapping, pnum,
+ FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
+@@ -1569,7 +1570,7 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
+ poff = block % blocks_per_page;
+
+ folio = __filemap_get_folio(inode->i_mapping, pnum, FGP_ACCESSED, 0);
+- if (IS_ERR(folio) || !folio_test_uptodate(folio)) {
++ if (IS_ERR(folio) || !folio_test_uptodate(folio) || folio_test_locked(folio)) {
+ if (!IS_ERR(folio))
+ folio_put(folio);
+ folio = __filemap_get_folio(inode->i_mapping, pnum,
+--
+2.51.0
+
--- /dev/null
+From 0e0a0b01d63c2f9154b81923ed5d32c76b11c2f0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 22 Aug 2024 10:35:33 +0800
+Subject: ext4: get rid of ppath in ext4_ext_create_new_leaf()
+
+From: Baokun Li <libaokun1@huawei.com>
+
+[ Upstream commit a000bc8678cc2bb10a5b80b4e991e77c7b4612fd ]
+
+The use of path and ppath is now very confusing, so to make the code more
+readable, pass path between functions uniformly, and get rid of ppath.
+
+To get rid of the ppath in ext4_ext_create_new_leaf(), the following is
+done here:
+
+ * Free the extents path when an error is encountered.
+ * Its caller needs to update ppath if it uses ppath.
+
+No functional changes.
+
+Signed-off-by: Baokun Li <libaokun1@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Tested-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Link: https://patch.msgid.link/20240822023545.1994557-14-libaokun@huaweicloud.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: 22784ca541c0 ("ext4: subdivide EXT4_EXT_DATA_VALID1")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/extents.c | 43 ++++++++++++++++++++++---------------------
+ 1 file changed, 22 insertions(+), 21 deletions(-)
+
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index a58f415f882b2..eda6f92a42330 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -1392,13 +1392,12 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
+ * finds empty index and adds new leaf.
+ * if no free index is found, then it requests in-depth growing.
+ */
+-static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
+- unsigned int mb_flags,
+- unsigned int gb_flags,
+- struct ext4_ext_path **ppath,
+- struct ext4_extent *newext)
++static struct ext4_ext_path *
++ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
++ unsigned int mb_flags, unsigned int gb_flags,
++ struct ext4_ext_path *path,
++ struct ext4_extent *newext)
+ {
+- struct ext4_ext_path *path = *ppath;
+ struct ext4_ext_path *curp;
+ int depth, i, err = 0;
+
+@@ -1419,28 +1418,25 @@ static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
+ * entry: create all needed subtree and add new leaf */
+ err = ext4_ext_split(handle, inode, mb_flags, path, newext, i);
+ if (err)
+- goto out;
++ goto errout;
+
+ /* refill path */
+ path = ext4_find_extent(inode,
+ (ext4_lblk_t)le32_to_cpu(newext->ee_block),
+ path, gb_flags);
+- if (IS_ERR(path))
+- err = PTR_ERR(path);
++ return path;
+ } else {
+ /* tree is full, time to grow in depth */
+ err = ext4_ext_grow_indepth(handle, inode, mb_flags);
+ if (err)
+- goto out;
++ goto errout;
+
+ /* refill path */
+ path = ext4_find_extent(inode,
+ (ext4_lblk_t)le32_to_cpu(newext->ee_block),
+ path, gb_flags);
+- if (IS_ERR(path)) {
+- err = PTR_ERR(path);
+- goto out;
+- }
++ if (IS_ERR(path))
++ return path;
+
+ /*
+ * only first (depth 0 -> 1) produces free space;
+@@ -1452,9 +1448,11 @@ static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
+ goto repeat;
+ }
+ }
+-out:
+- *ppath = IS_ERR(path) ? NULL : path;
+- return err;
++ return path;
++
++errout:
++ ext4_free_ext_path(path);
++ return ERR_PTR(err);
+ }
+
+ /*
+@@ -2097,11 +2095,14 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
+ */
+ if (gb_flags & EXT4_GET_BLOCKS_METADATA_NOFAIL)
+ mb_flags |= EXT4_MB_USE_RESERVED;
+- err = ext4_ext_create_new_leaf(handle, inode, mb_flags, gb_flags,
+- ppath, newext);
+- if (err)
++ path = ext4_ext_create_new_leaf(handle, inode, mb_flags, gb_flags,
++ path, newext);
++ if (IS_ERR(path)) {
++ *ppath = NULL;
++ err = PTR_ERR(path);
+ goto cleanup;
+- path = *ppath;
++ }
++ *ppath = path;
+ depth = ext_depth(inode);
+ eh = path[depth].p_hdr;
+
+--
+2.51.0
+
--- /dev/null
+From cf0403bf760e7103567beccf2be9256bf89f0d2d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 22 Aug 2024 10:35:34 +0800
+Subject: ext4: get rid of ppath in ext4_ext_insert_extent()
+
+From: Baokun Li <libaokun1@huawei.com>
+
+[ Upstream commit f7d1331f16a869c76a5102caebb58e840e1d509c ]
+
+The use of path and ppath is now very confusing, so to make the code more
+readable, pass path between functions uniformly, and get rid of ppath.
+
+To get rid of the ppath in ext4_ext_insert_extent(), the following is done
+here:
+
+ * Free the extents path when an error is encountered.
+ * Its caller needs to update ppath if it uses ppath.
+ * Free path when npath is used, free npath when it is not used.
+ * The got_allocated_blocks label in ext4_ext_map_blocks() does not
+ update err now, so err is updated to 0 if the err returned by
+ ext4_ext_search_right() is greater than 0 and is about to enter
+ got_allocated_blocks.
+
+No functional changes.
+
+Signed-off-by: Baokun Li <libaokun1@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Tested-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Link: https://patch.msgid.link/20240822023545.1994557-15-libaokun@huaweicloud.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: 22784ca541c0 ("ext4: subdivide EXT4_EXT_DATA_VALID1")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/ext4.h | 7 ++--
+ fs/ext4/extents.c | 88 ++++++++++++++++++++++++-------------------
+ fs/ext4/fast_commit.c | 8 ++--
+ fs/ext4/migrate.c | 5 ++-
+ 4 files changed, 61 insertions(+), 47 deletions(-)
+
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 490496adf17cc..7449777fabc36 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -3719,9 +3719,10 @@ extern int ext4_map_blocks(handle_t *handle, struct inode *inode,
+ extern int ext4_ext_calc_credits_for_single_extent(struct inode *inode,
+ int num,
+ struct ext4_ext_path *path);
+-extern int ext4_ext_insert_extent(handle_t *, struct inode *,
+- struct ext4_ext_path **,
+- struct ext4_extent *, int);
++extern struct ext4_ext_path *ext4_ext_insert_extent(
++ handle_t *handle, struct inode *inode,
++ struct ext4_ext_path *path,
++ struct ext4_extent *newext, int gb_flags);
+ extern struct ext4_ext_path *ext4_find_extent(struct inode *, ext4_lblk_t,
+ struct ext4_ext_path *,
+ int flags);
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index eda6f92a42330..59c0bffc691d1 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -1960,16 +1960,15 @@ static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi,
+ * inserts requested extent as new one into the tree,
+ * creating new leaf in the no-space case.
+ */
+-int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
+- struct ext4_ext_path **ppath,
+- struct ext4_extent *newext, int gb_flags)
++struct ext4_ext_path *
++ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
++ struct ext4_ext_path *path,
++ struct ext4_extent *newext, int gb_flags)
+ {
+- struct ext4_ext_path *path = *ppath;
+ struct ext4_extent_header *eh;
+ struct ext4_extent *ex, *fex;
+ struct ext4_extent *nearex; /* nearest extent */
+- struct ext4_ext_path *npath = NULL;
+- int depth, len, err;
++ int depth, len, err = 0;
+ ext4_lblk_t next;
+ int mb_flags = 0, unwritten;
+
+@@ -1977,14 +1976,16 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
+ mb_flags |= EXT4_MB_DELALLOC_RESERVED;
+ if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
+ EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0");
+- return -EFSCORRUPTED;
++ err = -EFSCORRUPTED;
++ goto errout;
+ }
+ depth = ext_depth(inode);
+ ex = path[depth].p_ext;
+ eh = path[depth].p_hdr;
+ if (unlikely(path[depth].p_hdr == NULL)) {
+ EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
+- return -EFSCORRUPTED;
++ err = -EFSCORRUPTED;
++ goto errout;
+ }
+
+ /* try to insert block into found extent and return */
+@@ -2022,7 +2023,7 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
+ err = ext4_ext_get_access(handle, inode,
+ path + depth);
+ if (err)
+- return err;
++ goto errout;
+ unwritten = ext4_ext_is_unwritten(ex);
+ ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
+ + ext4_ext_get_actual_len(newext));
+@@ -2047,7 +2048,7 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
+ err = ext4_ext_get_access(handle, inode,
+ path + depth);
+ if (err)
+- return err;
++ goto errout;
+
+ unwritten = ext4_ext_is_unwritten(ex);
+ ex->ee_block = newext->ee_block;
+@@ -2072,21 +2073,26 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
+ if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block))
+ next = ext4_ext_next_leaf_block(path);
+ if (next != EXT_MAX_BLOCKS) {
++ struct ext4_ext_path *npath;
++
+ ext_debug(inode, "next leaf block - %u\n", next);
+- BUG_ON(npath != NULL);
+ npath = ext4_find_extent(inode, next, NULL, gb_flags);
+- if (IS_ERR(npath))
+- return PTR_ERR(npath);
++ if (IS_ERR(npath)) {
++ err = PTR_ERR(npath);
++ goto errout;
++ }
+ BUG_ON(npath->p_depth != path->p_depth);
+ eh = npath[depth].p_hdr;
+ if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
+ ext_debug(inode, "next leaf isn't full(%d)\n",
+ le16_to_cpu(eh->eh_entries));
++ ext4_free_ext_path(path);
+ path = npath;
+ goto has_space;
+ }
+ ext_debug(inode, "next leaf has no free space(%d,%d)\n",
+ le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
++ ext4_free_ext_path(npath);
+ }
+
+ /*
+@@ -2097,12 +2103,8 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
+ mb_flags |= EXT4_MB_USE_RESERVED;
+ path = ext4_ext_create_new_leaf(handle, inode, mb_flags, gb_flags,
+ path, newext);
+- if (IS_ERR(path)) {
+- *ppath = NULL;
+- err = PTR_ERR(path);
+- goto cleanup;
+- }
+- *ppath = path;
++ if (IS_ERR(path))
++ return path;
+ depth = ext_depth(inode);
+ eh = path[depth].p_hdr;
+
+@@ -2111,7 +2113,7 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
+
+ err = ext4_ext_get_access(handle, inode, path + depth);
+ if (err)
+- goto cleanup;
++ goto errout;
+
+ if (!nearex) {
+ /* there is no extent in this leaf, create first one */
+@@ -2169,17 +2171,20 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
+ if (!(gb_flags & EXT4_GET_BLOCKS_PRE_IO))
+ ext4_ext_try_to_merge(handle, inode, path, nearex);
+
+-
+ /* time to correct all indexes above */
+ err = ext4_ext_correct_indexes(handle, inode, path);
+ if (err)
+- goto cleanup;
++ goto errout;
+
+ err = ext4_ext_dirty(handle, inode, path + path->p_depth);
++ if (err)
++ goto errout;
+
+-cleanup:
+- ext4_free_ext_path(npath);
+- return err;
++ return path;
++
++errout:
++ ext4_free_ext_path(path);
++ return ERR_PTR(err);
+ }
+
+ static int ext4_fill_es_cache_info(struct inode *inode,
+@@ -3232,24 +3237,29 @@ static int ext4_split_extent_at(handle_t *handle,
+ if (split_flag & EXT4_EXT_MARK_UNWRIT2)
+ ext4_ext_mark_unwritten(ex2);
+
+- err = ext4_ext_insert_extent(handle, inode, ppath, &newex, flags);
+- if (err != -ENOSPC && err != -EDQUOT && err != -ENOMEM)
++ path = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
++ if (!IS_ERR(path)) {
++ *ppath = path;
+ goto out;
++ }
++ *ppath = NULL;
++ err = PTR_ERR(path);
++ if (err != -ENOSPC && err != -EDQUOT && err != -ENOMEM)
++ return err;
+
+ /*
+- * Update path is required because previous ext4_ext_insert_extent()
+- * may have freed or reallocated the path. Using EXT4_EX_NOFAIL
+- * guarantees that ext4_find_extent() will not return -ENOMEM,
+- * otherwise -ENOMEM will cause a retry in do_writepages(), and a
+- * WARN_ON may be triggered in ext4_da_update_reserve_space() due to
+- * an incorrect ee_len causing the i_reserved_data_blocks exception.
++ * Get a new path to try to zeroout or fix the extent length.
++ * Using EXT4_EX_NOFAIL guarantees that ext4_find_extent()
++ * will not return -ENOMEM, otherwise -ENOMEM will cause a
++ * retry in do_writepages(), and a WARN_ON may be triggered
++ * in ext4_da_update_reserve_space() due to an incorrect
++ * ee_len causing the i_reserved_data_blocks exception.
+ */
+- path = ext4_find_extent(inode, ee_block, *ppath,
++ path = ext4_find_extent(inode, ee_block, NULL,
+ flags | EXT4_EX_NOFAIL);
+ if (IS_ERR(path)) {
+ EXT4_ERROR_INODE(inode, "Failed split extent on %u, err %ld",
+ split, PTR_ERR(path));
+- *ppath = NULL;
+ return PTR_ERR(path);
+ }
+ depth = ext_depth(inode);
+@@ -3308,7 +3318,7 @@ static int ext4_split_extent_at(handle_t *handle,
+ ext4_ext_dirty(handle, inode, path + path->p_depth);
+ return err;
+ out:
+- ext4_ext_show_leaf(inode, *ppath);
++ ext4_ext_show_leaf(inode, path);
+ return err;
+ }
+
+@@ -4299,6 +4309,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
+ get_implied_cluster_alloc(inode->i_sb, map, &ex2, path)) {
+ ar.len = allocated = map->m_len;
+ newblock = map->m_pblk;
++ err = 0;
+ goto got_allocated_blocks;
+ }
+
+@@ -4371,8 +4382,9 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
+ map->m_flags |= EXT4_MAP_UNWRITTEN;
+ }
+
+- err = ext4_ext_insert_extent(handle, inode, &path, &newex, flags);
+- if (err) {
++ path = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
++ if (IS_ERR(path)) {
++ err = PTR_ERR(path);
+ if (allocated_clusters) {
+ int fb_flags = 0;
+
+diff --git a/fs/ext4/fast_commit.c b/fs/ext4/fast_commit.c
+index 94f90032ca561..83a0a78a124a1 100644
+--- a/fs/ext4/fast_commit.c
++++ b/fs/ext4/fast_commit.c
+@@ -1828,12 +1828,12 @@ static int ext4_fc_replay_add_range(struct super_block *sb,
+ if (ext4_ext_is_unwritten(ex))
+ ext4_ext_mark_unwritten(&newex);
+ down_write(&EXT4_I(inode)->i_data_sem);
+- ret = ext4_ext_insert_extent(
+- NULL, inode, &path, &newex, 0);
++ path = ext4_ext_insert_extent(NULL, inode,
++ path, &newex, 0);
+ up_write((&EXT4_I(inode)->i_data_sem));
+- ext4_free_ext_path(path);
+- if (ret)
++ if (IS_ERR(path))
+ goto out;
++ ext4_free_ext_path(path);
+ goto next;
+ }
+
+diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
+index 0be0467ae6dd2..7a0e429507cf3 100644
+--- a/fs/ext4/migrate.c
++++ b/fs/ext4/migrate.c
+@@ -37,7 +37,6 @@ static int finish_range(handle_t *handle, struct inode *inode,
+ path = ext4_find_extent(inode, lb->first_block, NULL, 0);
+ if (IS_ERR(path)) {
+ retval = PTR_ERR(path);
+- path = NULL;
+ goto err_out;
+ }
+
+@@ -53,7 +52,9 @@ static int finish_range(handle_t *handle, struct inode *inode,
+ retval = ext4_datasem_ensure_credits(handle, inode, needed, needed, 0);
+ if (retval < 0)
+ goto err_out;
+- retval = ext4_ext_insert_extent(handle, inode, &path, &newext, 0);
++ path = ext4_ext_insert_extent(handle, inode, path, &newext, 0);
++ if (IS_ERR(path))
++ retval = PTR_ERR(path);
+ err_out:
+ up_write((&EXT4_I(inode)->i_data_sem));
+ ext4_free_ext_path(path);
+--
+2.51.0
+
--- /dev/null
+From c47cb6e0b64f26b22ef6115de4923bae5c2f02f5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 22 Aug 2024 10:35:31 +0800
+Subject: ext4: get rid of ppath in ext4_find_extent()
+
+From: Baokun Li <libaokun1@huawei.com>
+
+[ Upstream commit 0be4c0c2f17bd10ae16c852f02d51a6a7b318aca ]
+
+The use of path and ppath is now very confusing, so to make the code more
+readable, pass path between functions uniformly, and get rid of ppath.
+
+Getting rid of ppath in ext4_find_extent() requires its caller to update
+ppath. These ppaths will also be dropped later. No functional changes.
+
+Signed-off-by: Baokun Li <libaokun1@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Tested-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Link: https://patch.msgid.link/20240822023545.1994557-12-libaokun@huaweicloud.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: 22784ca541c0 ("ext4: subdivide EXT4_EXT_DATA_VALID1")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/ext4.h | 2 +-
+ fs/ext4/extents.c | 55 +++++++++++++++++++++++--------------------
+ fs/ext4/move_extent.c | 7 +++---
+ 3 files changed, 34 insertions(+), 30 deletions(-)
+
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 27753291fb7ec..490496adf17cc 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -3723,7 +3723,7 @@ extern int ext4_ext_insert_extent(handle_t *, struct inode *,
+ struct ext4_ext_path **,
+ struct ext4_extent *, int);
+ extern struct ext4_ext_path *ext4_find_extent(struct inode *, ext4_lblk_t,
+- struct ext4_ext_path **,
++ struct ext4_ext_path *,
+ int flags);
+ extern void ext4_free_ext_path(struct ext4_ext_path *);
+ extern int ext4_ext_check_inode(struct inode *inode);
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index af4cae13685d7..a58f415f882b2 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -881,11 +881,10 @@ void ext4_ext_tree_init(handle_t *handle, struct inode *inode)
+
+ struct ext4_ext_path *
+ ext4_find_extent(struct inode *inode, ext4_lblk_t block,
+- struct ext4_ext_path **orig_path, int flags)
++ struct ext4_ext_path *path, int flags)
+ {
+ struct ext4_extent_header *eh;
+ struct buffer_head *bh;
+- struct ext4_ext_path *path = orig_path ? *orig_path : NULL;
+ short int depth, i, ppos = 0;
+ int ret;
+ gfp_t gfp_flags = GFP_NOFS;
+@@ -906,7 +905,7 @@ ext4_find_extent(struct inode *inode, ext4_lblk_t block,
+ ext4_ext_drop_refs(path);
+ if (depth > path[0].p_maxdepth) {
+ kfree(path);
+- *orig_path = path = NULL;
++ path = NULL;
+ }
+ }
+ if (!path) {
+@@ -957,14 +956,10 @@ ext4_find_extent(struct inode *inode, ext4_lblk_t block,
+
+ ext4_ext_show_path(inode, path);
+
+- if (orig_path)
+- *orig_path = path;
+ return path;
+
+ err:
+ ext4_free_ext_path(path);
+- if (orig_path)
+- *orig_path = NULL;
+ return ERR_PTR(ret);
+ }
+
+@@ -1429,7 +1424,7 @@ static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
+ /* refill path */
+ path = ext4_find_extent(inode,
+ (ext4_lblk_t)le32_to_cpu(newext->ee_block),
+- ppath, gb_flags);
++ path, gb_flags);
+ if (IS_ERR(path))
+ err = PTR_ERR(path);
+ } else {
+@@ -1441,7 +1436,7 @@ static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
+ /* refill path */
+ path = ext4_find_extent(inode,
+ (ext4_lblk_t)le32_to_cpu(newext->ee_block),
+- ppath, gb_flags);
++ path, gb_flags);
+ if (IS_ERR(path)) {
+ err = PTR_ERR(path);
+ goto out;
+@@ -1457,8 +1452,8 @@ static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
+ goto repeat;
+ }
+ }
+-
+ out:
++ *ppath = IS_ERR(path) ? NULL : path;
+ return err;
+ }
+
+@@ -3248,15 +3243,17 @@ static int ext4_split_extent_at(handle_t *handle,
+ * WARN_ON may be triggered in ext4_da_update_reserve_space() due to
+ * an incorrect ee_len causing the i_reserved_data_blocks exception.
+ */
+- path = ext4_find_extent(inode, ee_block, ppath,
++ path = ext4_find_extent(inode, ee_block, *ppath,
+ flags | EXT4_EX_NOFAIL);
+ if (IS_ERR(path)) {
+ EXT4_ERROR_INODE(inode, "Failed split extent on %u, err %ld",
+ split, PTR_ERR(path));
++ *ppath = NULL;
+ return PTR_ERR(path);
+ }
+ depth = ext_depth(inode);
+ ex = path[depth].p_ext;
++ *ppath = path;
+
+ if (EXT4_EXT_MAY_ZEROOUT & split_flag) {
+ if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) {
+@@ -3369,9 +3366,12 @@ static int ext4_split_extent(handle_t *handle,
+ * Update path is required because previous ext4_split_extent_at() may
+ * result in split of original leaf or extent zeroout.
+ */
+- path = ext4_find_extent(inode, map->m_lblk, ppath, flags);
+- if (IS_ERR(path))
++ path = ext4_find_extent(inode, map->m_lblk, *ppath, flags);
++ if (IS_ERR(path)) {
++ *ppath = NULL;
+ return PTR_ERR(path);
++ }
++ *ppath = path;
+ depth = ext_depth(inode);
+ ex = path[depth].p_ext;
+ if (!ex) {
+@@ -3758,9 +3758,12 @@ static int ext4_convert_unwritten_extents_endio(handle_t *handle,
+ EXT4_GET_BLOCKS_CONVERT);
+ if (err < 0)
+ return err;
+- path = ext4_find_extent(inode, map->m_lblk, ppath, 0);
+- if (IS_ERR(path))
++ path = ext4_find_extent(inode, map->m_lblk, *ppath, 0);
++ if (IS_ERR(path)) {
++ *ppath = NULL;
+ return PTR_ERR(path);
++ }
++ *ppath = path;
+ depth = ext_depth(inode);
+ ex = path[depth].p_ext;
+ }
+@@ -3816,9 +3819,12 @@ convert_initialized_extent(handle_t *handle, struct inode *inode,
+ EXT4_GET_BLOCKS_CONVERT_UNWRITTEN);
+ if (err < 0)
+ return err;
+- path = ext4_find_extent(inode, map->m_lblk, ppath, 0);
+- if (IS_ERR(path))
++ path = ext4_find_extent(inode, map->m_lblk, *ppath, 0);
++ if (IS_ERR(path)) {
++ *ppath = NULL;
+ return PTR_ERR(path);
++ }
++ *ppath = path;
+ depth = ext_depth(inode);
+ ex = path[depth].p_ext;
+ if (!ex) {
+@@ -5197,7 +5203,7 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
+ * won't be shifted beyond EXT_MAX_BLOCKS.
+ */
+ if (SHIFT == SHIFT_LEFT) {
+- path = ext4_find_extent(inode, start - 1, &path,
++ path = ext4_find_extent(inode, start - 1, path,
+ EXT4_EX_NOCACHE);
+ if (IS_ERR(path))
+ return PTR_ERR(path);
+@@ -5246,7 +5252,7 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
+ * becomes NULL to indicate the end of the loop.
+ */
+ while (iterator && start <= stop) {
+- path = ext4_find_extent(inode, *iterator, &path,
++ path = ext4_find_extent(inode, *iterator, path,
+ EXT4_EX_NOCACHE);
+ if (IS_ERR(path))
+ return PTR_ERR(path);
+@@ -5844,11 +5850,8 @@ int ext4_clu_mapped(struct inode *inode, ext4_lblk_t lclu)
+
+ /* search for the extent closest to the first block in the cluster */
+ path = ext4_find_extent(inode, EXT4_C2B(sbi, lclu), NULL, 0);
+- if (IS_ERR(path)) {
+- err = PTR_ERR(path);
+- path = NULL;
+- goto out;
+- }
++ if (IS_ERR(path))
++ return PTR_ERR(path);
+
+ depth = ext_depth(inode);
+
+@@ -5932,7 +5935,7 @@ int ext4_ext_replay_update_ex(struct inode *inode, ext4_lblk_t start,
+ if (ret)
+ goto out;
+
+- path = ext4_find_extent(inode, start, &path, 0);
++ path = ext4_find_extent(inode, start, path, 0);
+ if (IS_ERR(path))
+ return PTR_ERR(path);
+ ex = path[path->p_depth].p_ext;
+@@ -5946,7 +5949,7 @@ int ext4_ext_replay_update_ex(struct inode *inode, ext4_lblk_t start,
+ if (ret)
+ goto out;
+
+- path = ext4_find_extent(inode, start, &path, 0);
++ path = ext4_find_extent(inode, start, path, 0);
+ if (IS_ERR(path))
+ return PTR_ERR(path);
+ ex = path[path->p_depth].p_ext;
+diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
+index e01632462db9f..0aff07c570a46 100644
+--- a/fs/ext4/move_extent.c
++++ b/fs/ext4/move_extent.c
+@@ -26,16 +26,17 @@ static inline int
+ get_ext_path(struct inode *inode, ext4_lblk_t lblock,
+ struct ext4_ext_path **ppath)
+ {
+- struct ext4_ext_path *path;
++ struct ext4_ext_path *path = *ppath;
+
+- path = ext4_find_extent(inode, lblock, ppath, EXT4_EX_NOCACHE);
++ *ppath = NULL;
++ path = ext4_find_extent(inode, lblock, path, EXT4_EX_NOCACHE);
+ if (IS_ERR(path))
+ return PTR_ERR(path);
+ if (path[ext_depth(inode)].p_ext == NULL) {
+ ext4_free_ext_path(path);
+- *ppath = NULL;
+ return -ENODATA;
+ }
++ *ppath = path;
+ return 0;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 29c164d60ea0c50ff75830334afaace9231429f1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 22 Aug 2024 10:35:35 +0800
+Subject: ext4: get rid of ppath in ext4_split_extent_at()
+
+From: Baokun Li <libaokun1@huawei.com>
+
+[ Upstream commit 1de82b1b60d4613753254bf3cbf622a4c02c945c ]
+
+The use of path and ppath is now very confusing, so to make the code more
+readable, pass path between functions uniformly, and get rid of ppath.
+
+To get rid of the ppath in ext4_split_extent_at(), the following is done
+here:
+
+ * Free the extents path when an error is encountered.
+ * Its caller needs to update ppath if it uses ppath.
+ * Teach ext4_ext_show_leaf() to skip error pointer.
+
+No functional changes.
+
+Signed-off-by: Baokun Li <libaokun1@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Tested-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Link: https://patch.msgid.link/20240822023545.1994557-16-libaokun@huaweicloud.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: 22784ca541c0 ("ext4: subdivide EXT4_EXT_DATA_VALID1")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/extents.c | 85 ++++++++++++++++++++++++++---------------------
+ 1 file changed, 47 insertions(+), 38 deletions(-)
+
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 59c0bffc691d1..6da0bf3cf406d 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -84,12 +84,11 @@ static void ext4_extent_block_csum_set(struct inode *inode,
+ et->et_checksum = ext4_extent_block_csum(inode, eh);
+ }
+
+-static int ext4_split_extent_at(handle_t *handle,
+- struct inode *inode,
+- struct ext4_ext_path **ppath,
+- ext4_lblk_t split,
+- int split_flag,
+- int flags);
++static struct ext4_ext_path *ext4_split_extent_at(handle_t *handle,
++ struct inode *inode,
++ struct ext4_ext_path *path,
++ ext4_lblk_t split,
++ int split_flag, int flags);
+
+ static int ext4_ext_trunc_restart_fn(struct inode *inode, int *dropped)
+ {
+@@ -335,9 +334,15 @@ ext4_force_split_extent_at(handle_t *handle, struct inode *inode,
+ if (nofail)
+ flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL | EXT4_EX_NOFAIL;
+
+- return ext4_split_extent_at(handle, inode, ppath, lblk, unwritten ?
++ path = ext4_split_extent_at(handle, inode, path, lblk, unwritten ?
+ EXT4_EXT_MARK_UNWRIT1|EXT4_EXT_MARK_UNWRIT2 : 0,
+ flags);
++ if (IS_ERR(path)) {
++ *ppath = NULL;
++ return PTR_ERR(path);
++ }
++ *ppath = path;
++ return 0;
+ }
+
+ static int
+@@ -689,7 +694,7 @@ static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
+ struct ext4_extent *ex;
+ int i;
+
+- if (!path)
++ if (IS_ERR_OR_NULL(path))
+ return;
+
+ eh = path[depth].p_hdr;
+@@ -3155,16 +3160,14 @@ static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
+ * a> the extent are splitted into two extent.
+ * b> split is not needed, and just mark the extent.
+ *
+- * return 0 on success.
++ * Return an extent path pointer on success, or an error pointer on failure.
+ */
+-static int ext4_split_extent_at(handle_t *handle,
+- struct inode *inode,
+- struct ext4_ext_path **ppath,
+- ext4_lblk_t split,
+- int split_flag,
+- int flags)
++static struct ext4_ext_path *ext4_split_extent_at(handle_t *handle,
++ struct inode *inode,
++ struct ext4_ext_path *path,
++ ext4_lblk_t split,
++ int split_flag, int flags)
+ {
+- struct ext4_ext_path *path = *ppath;
+ ext4_fsblk_t newblock;
+ ext4_lblk_t ee_block;
+ struct ext4_extent *ex, newex, orig_ex, zero_ex;
+@@ -3238,14 +3241,12 @@ static int ext4_split_extent_at(handle_t *handle,
+ ext4_ext_mark_unwritten(ex2);
+
+ path = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
+- if (!IS_ERR(path)) {
+- *ppath = path;
++ if (!IS_ERR(path))
+ goto out;
+- }
+- *ppath = NULL;
++
+ err = PTR_ERR(path);
+ if (err != -ENOSPC && err != -EDQUOT && err != -ENOMEM)
+- return err;
++ return path;
+
+ /*
+ * Get a new path to try to zeroout or fix the extent length.
+@@ -3255,16 +3256,14 @@ static int ext4_split_extent_at(handle_t *handle,
+ * in ext4_da_update_reserve_space() due to an incorrect
+ * ee_len causing the i_reserved_data_blocks exception.
+ */
+- path = ext4_find_extent(inode, ee_block, NULL,
+- flags | EXT4_EX_NOFAIL);
++ path = ext4_find_extent(inode, ee_block, NULL, flags | EXT4_EX_NOFAIL);
+ if (IS_ERR(path)) {
+ EXT4_ERROR_INODE(inode, "Failed split extent on %u, err %ld",
+ split, PTR_ERR(path));
+- return PTR_ERR(path);
++ return path;
+ }
+ depth = ext_depth(inode);
+ ex = path[depth].p_ext;
+- *ppath = path;
+
+ if (EXT4_EXT_MAY_ZEROOUT & split_flag) {
+ if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) {
+@@ -3316,10 +3315,13 @@ static int ext4_split_extent_at(handle_t *handle,
+ * and err is a non-zero error code.
+ */
+ ext4_ext_dirty(handle, inode, path + path->p_depth);
+- return err;
+ out:
++ if (err) {
++ ext4_free_ext_path(path);
++ path = ERR_PTR(err);
++ }
+ ext4_ext_show_leaf(inode, path);
+- return err;
++ return path;
+ }
+
+ /*
+@@ -3366,10 +3368,14 @@ static int ext4_split_extent(handle_t *handle,
+ EXT4_EXT_MARK_UNWRIT2;
+ if (split_flag & EXT4_EXT_DATA_VALID2)
+ split_flag1 |= EXT4_EXT_DATA_VALID1;
+- err = ext4_split_extent_at(handle, inode, ppath,
++ path = ext4_split_extent_at(handle, inode, path,
+ map->m_lblk + map->m_len, split_flag1, flags1);
+- if (err)
++ if (IS_ERR(path)) {
++ err = PTR_ERR(path);
++ *ppath = NULL;
+ goto out;
++ }
++ *ppath = path;
+ } else {
+ allocated = ee_len - (map->m_lblk - ee_block);
+ }
+@@ -3377,7 +3383,7 @@ static int ext4_split_extent(handle_t *handle,
+ * Update path is required because previous ext4_split_extent_at() may
+ * result in split of original leaf or extent zeroout.
+ */
+- path = ext4_find_extent(inode, map->m_lblk, *ppath, flags);
++ path = ext4_find_extent(inode, map->m_lblk, path, flags);
+ if (IS_ERR(path)) {
+ *ppath = NULL;
+ return PTR_ERR(path);
+@@ -3399,13 +3405,17 @@ static int ext4_split_extent(handle_t *handle,
+ split_flag1 |= split_flag & (EXT4_EXT_MAY_ZEROOUT |
+ EXT4_EXT_MARK_UNWRIT2);
+ }
+- err = ext4_split_extent_at(handle, inode, ppath,
++ path = ext4_split_extent_at(handle, inode, path,
+ map->m_lblk, split_flag1, flags);
+- if (err)
++ if (IS_ERR(path)) {
++ err = PTR_ERR(path);
++ *ppath = NULL;
+ goto out;
++ }
++ *ppath = path;
+ }
+
+- ext4_ext_show_leaf(inode, *ppath);
++ ext4_ext_show_leaf(inode, path);
+ out:
+ return err ? err : allocated;
+ }
+@@ -5601,22 +5611,21 @@ static int ext4_insert_range(struct file *file, loff_t offset, loff_t len)
+ if (ext4_ext_is_unwritten(extent))
+ split_flag = EXT4_EXT_MARK_UNWRIT1 |
+ EXT4_EXT_MARK_UNWRIT2;
+- ret = ext4_split_extent_at(handle, inode, &path,
++ path = ext4_split_extent_at(handle, inode, path,
+ offset_lblk, split_flag,
+ EXT4_EX_NOCACHE |
+ EXT4_GET_BLOCKS_PRE_IO |
+ EXT4_GET_BLOCKS_METADATA_NOFAIL);
+ }
+
+- ext4_free_ext_path(path);
+- if (ret < 0) {
++ if (IS_ERR(path)) {
+ up_write(&EXT4_I(inode)->i_data_sem);
++ ret = PTR_ERR(path);
+ goto out_stop;
+ }
+- } else {
+- ext4_free_ext_path(path);
+ }
+
++ ext4_free_ext_path(path);
+ ext4_es_remove_extent(inode, offset_lblk, EXT_MAX_BLOCKS - offset_lblk);
+
+ /*
+--
+2.51.0
+
--- /dev/null
+From 452bfe349998931dc6971e5c9319467f9cd3949d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 24 Apr 2023 11:38:43 +0800
+Subject: ext4: make ext4_es_remove_extent() return void
+
+From: Baokun Li <libaokun1@huawei.com>
+
+[ Upstream commit ed5d285b3f2a9a37ff778c5e440daf49351fcc4d ]
+
+Now ext4_es_remove_extent() never fails, so make it return void.
+
+Signed-off-by: Baokun Li <libaokun1@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Link: https://lore.kernel.org/r/20230424033846.4732-10-libaokun1@huawei.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: 22784ca541c0 ("ext4: subdivide EXT4_EXT_DATA_VALID1")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/extents.c | 34 ++++++----------------------------
+ fs/ext4/extents_status.c | 12 ++++++------
+ fs/ext4/extents_status.h | 4 ++--
+ fs/ext4/inline.c | 12 ++----------
+ fs/ext4/inode.c | 8 ++------
+ 5 files changed, 18 insertions(+), 52 deletions(-)
+
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 1df7174774694..af4cae13685d7 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -4463,15 +4463,8 @@ int ext4_ext_truncate(handle_t *handle, struct inode *inode)
+
+ last_block = (inode->i_size + sb->s_blocksize - 1)
+ >> EXT4_BLOCK_SIZE_BITS(sb);
+-retry:
+- err = ext4_es_remove_extent(inode, last_block,
+- EXT_MAX_BLOCKS - last_block);
+- if (err == -ENOMEM) {
+- memalloc_retry_wait(GFP_ATOMIC);
+- goto retry;
+- }
+- if (err)
+- return err;
++ ext4_es_remove_extent(inode, last_block, EXT_MAX_BLOCKS - last_block);
++
+ retry_remove_space:
+ err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1);
+ if (err == -ENOMEM) {
+@@ -5419,13 +5412,7 @@ static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len)
+
+ down_write(&EXT4_I(inode)->i_data_sem);
+ ext4_discard_preallocations(inode, 0);
+-
+- ret = ext4_es_remove_extent(inode, punch_start,
+- EXT_MAX_BLOCKS - punch_start);
+- if (ret) {
+- up_write(&EXT4_I(inode)->i_data_sem);
+- goto out_stop;
+- }
++ ext4_es_remove_extent(inode, punch_start, EXT_MAX_BLOCKS - punch_start);
+
+ ret = ext4_ext_remove_space(inode, punch_start, punch_stop - 1);
+ if (ret) {
+@@ -5611,12 +5598,7 @@ static int ext4_insert_range(struct file *file, loff_t offset, loff_t len)
+ ext4_free_ext_path(path);
+ }
+
+- ret = ext4_es_remove_extent(inode, offset_lblk,
+- EXT_MAX_BLOCKS - offset_lblk);
+- if (ret) {
+- up_write(&EXT4_I(inode)->i_data_sem);
+- goto out_stop;
+- }
++ ext4_es_remove_extent(inode, offset_lblk, EXT_MAX_BLOCKS - offset_lblk);
+
+ /*
+ * if offset_lblk lies in a hole which is at start of file, use
+@@ -5675,12 +5657,8 @@ ext4_swap_extents(handle_t *handle, struct inode *inode1,
+ BUG_ON(!inode_is_locked(inode1));
+ BUG_ON(!inode_is_locked(inode2));
+
+- *erp = ext4_es_remove_extent(inode1, lblk1, count);
+- if (unlikely(*erp))
+- return 0;
+- *erp = ext4_es_remove_extent(inode2, lblk2, count);
+- if (unlikely(*erp))
+- return 0;
++ ext4_es_remove_extent(inode1, lblk1, count);
++ ext4_es_remove_extent(inode2, lblk2, count);
+
+ while (count) {
+ struct ext4_extent *ex1, *ex2, tmp_ex;
+diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
+index 592229027af72..862a8308cd9b0 100644
+--- a/fs/ext4/extents_status.c
++++ b/fs/ext4/extents_status.c
+@@ -1494,10 +1494,10 @@ static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
+ * @len - number of blocks to remove
+ *
+ * Reduces block/cluster reservation count and for bigalloc cancels pending
+- * reservations as needed. Returns 0 on success, error code on failure.
++ * reservations as needed.
+ */
+-int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
+- ext4_lblk_t len)
++void ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
++ ext4_lblk_t len)
+ {
+ ext4_lblk_t end;
+ int err = 0;
+@@ -1505,14 +1505,14 @@ int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
+ struct extent_status *es = NULL;
+
+ if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
+- return 0;
++ return;
+
+ trace_ext4_es_remove_extent(inode, lblk, len);
+ es_debug("remove [%u/%u) from extent status tree of inode %lu\n",
+ lblk, len, inode->i_ino);
+
+ if (!len)
+- return err;
++ return;
+
+ end = lblk + len - 1;
+ BUG_ON(end < lblk);
+@@ -1539,7 +1539,7 @@ int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
+
+ ext4_es_print_tree(inode);
+ ext4_da_release_space(inode, reserved);
+- return 0;
++ return;
+ }
+
+ static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
+diff --git a/fs/ext4/extents_status.h b/fs/ext4/extents_status.h
+index 481ec4381bee6..1d1247bbfd477 100644
+--- a/fs/ext4/extents_status.h
++++ b/fs/ext4/extents_status.h
+@@ -133,8 +133,8 @@ extern void ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
+ extern void ext4_es_cache_extent(struct inode *inode, ext4_lblk_t lblk,
+ ext4_lblk_t len, ext4_fsblk_t pblk,
+ unsigned int status);
+-extern int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
+- ext4_lblk_t len);
++extern void ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
++ ext4_lblk_t len);
+ extern void ext4_es_find_extent_range(struct inode *inode,
+ int (*match_fn)(struct extent_status *es),
+ ext4_lblk_t lblk, ext4_lblk_t end,
+diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
+index a1fb99d2b472b..c15ea7589945f 100644
+--- a/fs/ext4/inline.c
++++ b/fs/ext4/inline.c
+@@ -2004,16 +2004,8 @@ int ext4_inline_data_truncate(struct inode *inode, int *has_inline)
+ * the extent status cache must be cleared to avoid leaving
+ * behind stale delayed allocated extent entries
+ */
+- if (!ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
+-retry:
+- err = ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);
+- if (err == -ENOMEM) {
+- memalloc_retry_wait(GFP_ATOMIC);
+- goto retry;
+- }
+- if (err)
+- goto out_error;
+- }
++ if (!ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA))
++ ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);
+
+ /* Clear the content in the xattr space. */
+ if (inline_size > EXT4_MIN_INLINE_DATA_SIZE) {
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index bf1f8319e2d74..79619f3db984f 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -4134,12 +4134,8 @@ int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
+ down_write(&EXT4_I(inode)->i_data_sem);
+ ext4_discard_preallocations(inode, 0);
+
+- ret = ext4_es_remove_extent(inode, first_block,
+- stop_block - first_block);
+- if (ret) {
+- up_write(&EXT4_I(inode)->i_data_sem);
+- goto out_stop;
+- }
++ ext4_es_remove_extent(inode, first_block,
++ stop_block - first_block);
+
+ if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
+ ret = ext4_ext_remove_space(inode, first_block,
+--
+2.51.0
+
--- /dev/null
+From 7aab117c2ca817532e7de8771816a7b77ef0e0d0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 4 Mar 2023 01:21:10 +0800
+Subject: ext4: remove unnecessary e4b->bd_buddy_page check in
+ ext4_mb_load_buddy_gfp
+
+From: Kemeng Shi <shikemeng@huaweicloud.com>
+
+[ Upstream commit 285164b80175157c18a06425cf25591c9f942b1a ]
+
+e4b->bd_buddy_page is only set if we initialize ext4_buddy successfully. So
+e4b->bd_buddy_page is always NULL in error handle branch. Just remove the
+dead check.
+
+Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com>
+Reviewed-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Link: https://lore.kernel.org/r/20230303172120.3800725-11-shikemeng@huaweicloud.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: bdc56a9c46b2 ("ext4: fix e4b bitmap inconsistency reports")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/mballoc.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 71e15007ffdf4..7431ff97a68c8 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -1599,8 +1599,7 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
+ put_page(page);
+ if (e4b->bd_bitmap_page)
+ put_page(e4b->bd_bitmap_page);
+- if (e4b->bd_buddy_page)
+- put_page(e4b->bd_buddy_page);
++
+ e4b->bd_buddy = NULL;
+ e4b->bd_bitmap = NULL;
+ return ret;
+--
+2.51.0
+
--- /dev/null
+From 3d4a667dafc10ca9fc216c27e96552f60d1c213a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 29 Nov 2025 18:32:33 +0800
+Subject: ext4: subdivide EXT4_EXT_DATA_VALID1
+
+From: Zhang Yi <yi.zhang@huawei.com>
+
+[ Upstream commit 22784ca541c0f01c5ebad14e8228298dc0a390ed ]
+
+When splitting an extent, if the EXT4_GET_BLOCKS_CONVERT flag is set and
+it is necessary to split the target extent in the middle,
+ext4_split_extent() first handles splitting the latter half of the
+extent and passes the EXT4_EXT_DATA_VALID1 flag. This flag implies that
+all blocks before the split point contain valid data; however, this
+assumption is incorrect.
+
+Therefore, subdivid EXT4_EXT_DATA_VALID1 into
+EXT4_EXT_DATA_ENTIRE_VALID1 and EXT4_EXT_DATA_PARTIAL_VALID1, which
+indicate that the first half of the extent is either entirely valid or
+only partially valid, respectively. These two flags cannot be set
+simultaneously.
+
+This patch does not use EXT4_EXT_DATA_PARTIAL_VALID1, it only replaces
+EXT4_EXT_DATA_VALID1 with EXT4_EXT_DATA_ENTIRE_VALID1 at the location
+where it is set, no logical changes.
+
+Signed-off-by: Zhang Yi <yi.zhang@huawei.com>
+Reviewed-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Reviewed-by: Baokun Li <libaokun1@huawei.com>
+Cc: stable@kernel.org
+Message-ID: <20251129103247.686136-2-yi.zhang@huaweicloud.com>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/extents.c | 18 ++++++++++++------
+ 1 file changed, 12 insertions(+), 6 deletions(-)
+
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 6da0bf3cf406d..e2f9c27c7e161 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -43,8 +43,13 @@
+ #define EXT4_EXT_MARK_UNWRIT1 0x2 /* mark first half unwritten */
+ #define EXT4_EXT_MARK_UNWRIT2 0x4 /* mark second half unwritten */
+
+-#define EXT4_EXT_DATA_VALID1 0x8 /* first half contains valid data */
+-#define EXT4_EXT_DATA_VALID2 0x10 /* second half contains valid data */
++/* first half contains valid data */
++#define EXT4_EXT_DATA_ENTIRE_VALID1 0x8 /* has entirely valid data */
++#define EXT4_EXT_DATA_PARTIAL_VALID1 0x10 /* has partially valid data */
++#define EXT4_EXT_DATA_VALID1 (EXT4_EXT_DATA_ENTIRE_VALID1 | \
++ EXT4_EXT_DATA_PARTIAL_VALID1)
++
++#define EXT4_EXT_DATA_VALID2 0x20 /* second half contains valid data */
+
+ static __le32 ext4_extent_block_csum(struct inode *inode,
+ struct ext4_extent_header *eh)
+@@ -3175,8 +3180,9 @@ static struct ext4_ext_path *ext4_split_extent_at(handle_t *handle,
+ unsigned int ee_len, depth;
+ int err = 0;
+
+- BUG_ON((split_flag & (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)) ==
+- (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2));
++ BUG_ON((split_flag & EXT4_EXT_DATA_VALID1) == EXT4_EXT_DATA_VALID1);
++ BUG_ON((split_flag & EXT4_EXT_DATA_VALID1) &&
++ (split_flag & EXT4_EXT_DATA_VALID2));
+
+ /* Do not cache extents that are in the process of being modified. */
+ flags |= EXT4_EX_NOCACHE;
+@@ -3367,7 +3373,7 @@ static int ext4_split_extent(handle_t *handle,
+ split_flag1 |= EXT4_EXT_MARK_UNWRIT1 |
+ EXT4_EXT_MARK_UNWRIT2;
+ if (split_flag & EXT4_EXT_DATA_VALID2)
+- split_flag1 |= EXT4_EXT_DATA_VALID1;
++ split_flag1 |= EXT4_EXT_DATA_ENTIRE_VALID1;
+ path = ext4_split_extent_at(handle, inode, path,
+ map->m_lblk + map->m_len, split_flag1, flags1);
+ if (IS_ERR(path)) {
+@@ -3731,7 +3737,7 @@ static int ext4_split_convert_extents(handle_t *handle,
+
+ /* Convert to unwritten */
+ if (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN) {
+- split_flag |= EXT4_EXT_DATA_VALID1;
++ split_flag |= EXT4_EXT_DATA_ENTIRE_VALID1;
+ /* Convert to initialized */
+ } else if (flags & EXT4_GET_BLOCKS_CONVERT) {
+ split_flag |= ee_block + ee_len <= eof_block ?
+--
+2.51.0
+
--- /dev/null
+From 24249193ec0fc55727016f1bf2be0283152cb793 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Feb 2026 20:14:43 +0800
+Subject: hwmon: (max16065) Use READ/WRITE_ONCE to avoid compiler optimization
+ induced race
+
+From: Gui-Dong Han <hanguidong02@gmail.com>
+
+[ Upstream commit 007be4327e443d79c9dd9e56dc16c36f6395d208 ]
+
+Simply copying shared data to a local variable cannot prevent data
+races. The compiler is allowed to optimize away the local copy and
+re-read the shared memory, causing a Time-of-Check Time-of-Use (TOCTOU)
+issue if the data changes between the check and the usage.
+
+To enforce the use of the local variable, use READ_ONCE() when reading
+the shared data and WRITE_ONCE() when updating it. Apply these macros to
+the three identified locations (curr_sense, adc, and fault) where local
+variables are used for error validation, ensuring the value remains
+consistent.
+
+Reported-by: Ben Hutchings <ben@decadent.org.uk>
+Closes: https://lore.kernel.org/all/6fe17868327207e8b850cf9f88b7dc58b2021f73.camel@decadent.org.uk/
+Fixes: f5bae2642e3d ("hwmon: Driver for MAX16065 System Manager and compatibles")
+Fixes: b8d5acdcf525 ("hwmon: (max16065) Use local variable to avoid TOCTOU")
+Cc: stable@vger.kernel.org
+Signed-off-by: Gui-Dong Han <hanguidong02@gmail.com>
+Link: https://lore.kernel.org/r/20260203121443.5482-1-hanguidong02@gmail.com
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwmon/max16065.c | 26 +++++++++++++-------------
+ 1 file changed, 13 insertions(+), 13 deletions(-)
+
+diff --git a/drivers/hwmon/max16065.c b/drivers/hwmon/max16065.c
+index 648eb7e867d10..b61bba4166f78 100644
+--- a/drivers/hwmon/max16065.c
++++ b/drivers/hwmon/max16065.c
+@@ -151,27 +151,27 @@ static struct max16065_data *max16065_update_device(struct device *dev)
+ int i;
+
+ for (i = 0; i < data->num_adc; i++)
+- data->adc[i]
+- = max16065_read_adc(client, MAX16065_ADC(i));
++ WRITE_ONCE(data->adc[i],
++ max16065_read_adc(client, MAX16065_ADC(i)));
+
+ if (data->have_current) {
+- data->adc[MAX16065_NUM_ADC]
+- = max16065_read_adc(client, MAX16065_CSP_ADC);
+- data->curr_sense
+- = i2c_smbus_read_byte_data(client,
+- MAX16065_CURR_SENSE);
++ WRITE_ONCE(data->adc[MAX16065_NUM_ADC],
++ max16065_read_adc(client, MAX16065_CSP_ADC));
++ WRITE_ONCE(data->curr_sense,
++ i2c_smbus_read_byte_data(client, MAX16065_CURR_SENSE));
+ }
+
+ for (i = 0; i < 2; i++)
+- data->fault[i]
+- = i2c_smbus_read_byte_data(client, MAX16065_FAULT(i));
++ WRITE_ONCE(data->fault[i],
++ i2c_smbus_read_byte_data(client, MAX16065_FAULT(i)));
+
+ /*
+ * MAX16067 and MAX16068 have separate undervoltage and
+ * overvoltage alarm bits. Squash them together.
+ */
+ if (data->chip == max16067 || data->chip == max16068)
+- data->fault[0] |= data->fault[1];
++ WRITE_ONCE(data->fault[0],
++ data->fault[0] | data->fault[1]);
+
+ data->last_updated = jiffies;
+ data->valid = true;
+@@ -185,7 +185,7 @@ static ssize_t max16065_alarm_show(struct device *dev,
+ {
+ struct sensor_device_attribute_2 *attr2 = to_sensor_dev_attr_2(da);
+ struct max16065_data *data = max16065_update_device(dev);
+- int val = data->fault[attr2->nr];
++ int val = READ_ONCE(data->fault[attr2->nr]);
+
+ if (val < 0)
+ return val;
+@@ -203,7 +203,7 @@ static ssize_t max16065_input_show(struct device *dev,
+ {
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct max16065_data *data = max16065_update_device(dev);
+- int adc = data->adc[attr->index];
++ int adc = READ_ONCE(data->adc[attr->index]);
+
+ if (unlikely(adc < 0))
+ return adc;
+@@ -216,7 +216,7 @@ static ssize_t max16065_current_show(struct device *dev,
+ struct device_attribute *da, char *buf)
+ {
+ struct max16065_data *data = max16065_update_device(dev);
+- int curr_sense = data->curr_sense;
++ int curr_sense = READ_ONCE(data->curr_sense);
+
+ if (unlikely(curr_sense < 0))
+ return curr_sense;
+--
+2.51.0
+
--- /dev/null
+From 3202f1740823b1a41dedf96d701d49a1308c607a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 3 Feb 2024 13:45:20 +0100
+Subject: KVM: x86: Fix KVM_GET_MSRS stack info leak
+
+From: Mathias Krause <minipli@grsecurity.net>
+
+[ Upstream commit 3376ca3f1a2075eaa23c5576c47d04d7e8a4adda ]
+
+Commit 6abe9c1386e5 ("KVM: X86: Move ignore_msrs handling upper the
+stack") changed the 'ignore_msrs' handling, including sanitizing return
+values to the caller. This was fine until commit 12bc2132b15e ("KVM:
+X86: Do the same ignore_msrs check for feature msrs") which allowed
+non-existing feature MSRs to be ignored, i.e. to not generate an error
+on the ioctl() level. It even tried to preserve the sanitization of the
+return value. However, the logic is flawed, as '*data' will be
+overwritten again with the uninitialized stack value of msr.data.
+
+Fix this by simplifying the logic and always initializing msr.data,
+vanishing the need for an additional error exit path.
+
+Fixes: 12bc2132b15e ("KVM: X86: Do the same ignore_msrs check for feature msrs")
+Signed-off-by: Mathias Krause <minipli@grsecurity.net>
+Reviewed-by: Xiaoyao Li <xiaoyao.li@intel.com>
+Link: https://lore.kernel.org/r/20240203124522.592778-2-minipli@grsecurity.net
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Stable-dep-of: 5bb9ac186512 ("KVM: x86: Return "unsupported" instead of "invalid" on access to unsupported PV MSR")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kvm/x86.c | 15 +++++----------
+ 1 file changed, 5 insertions(+), 10 deletions(-)
+
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 0b8ec5886d44f..80daa1ef956fa 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1713,22 +1713,17 @@ static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
+ struct kvm_msr_entry msr;
+ int r;
+
++ /* Unconditionally clear the output for simplicity */
++ msr.data = 0;
+ msr.index = index;
+ r = kvm_get_msr_feature(&msr);
+
+- if (r == KVM_MSR_RET_INVALID) {
+- /* Unconditionally clear the output for simplicity */
+- *data = 0;
+- if (kvm_msr_ignored_check(index, 0, false))
+- r = 0;
+- }
+-
+- if (r)
+- return r;
++ if (r == KVM_MSR_RET_INVALID && kvm_msr_ignored_check(index, 0, false))
++ r = 0;
+
+ *data = msr.data;
+
+- return 0;
++ return r;
+ }
+
+ static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
+--
+2.51.0
+
--- /dev/null
+From de8a2c2ddeed7929b31485a89d0da1d0135c669d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 8 Jan 2026 19:06:57 -0800
+Subject: KVM: x86: Ignore -EBUSY when checking nested events from vcpu_block()
+
+From: Sean Christopherson <seanjc@google.com>
+
+[ Upstream commit ead63640d4e72e6f6d464f4e31f7fecb79af8869 ]
+
+Ignore -EBUSY when checking nested events after exiting a blocking state
+while L2 is active, as exiting to userspace will generate a spurious
+userspace exit, usually with KVM_EXIT_UNKNOWN, and likely lead to the VM's
+demise. Continuing with the wakeup isn't perfect either, as *something*
+has gone sideways if a vCPU is awakened in L2 with an injected event (or
+worse, a nested run pending), but continuing on gives the VM a decent
+chance of surviving without any major side effects.
+
+As explained in the Fixes commits, it _should_ be impossible for a vCPU to
+be put into a blocking state with an already-injected event (exception,
+IRQ, or NMI). Unfortunately, userspace can stuff MP_STATE and/or injected
+events, and thus put the vCPU into what should be an impossible state.
+
+Don't bother trying to preserve the WARN, e.g. with an anti-syzkaller
+Kconfig, as WARNs can (hopefully) be added in paths where _KVM_ would be
+violating x86 architecture, e.g. by WARNing if KVM attempts to inject an
+exception or interrupt while the vCPU isn't running.
+
+Cc: Alessandro Ratti <alessandro@0x65c.net>
+Cc: stable@vger.kernel.org
+Fixes: 26844fee6ade ("KVM: x86: never write to memory from kvm_vcpu_check_block()")
+Fixes: 45405155d876 ("KVM: x86: WARN if a vCPU gets a valid wakeup that KVM can't yet inject")
+Link: https://syzkaller.appspot.com/text?tag=ReproC&x=10d4261a580000
+Reported-by: syzbot+1522459a74d26b0ac33a@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/all/671bc7a7.050a0220.455e8.022a.GAE@google.com
+Link: https://patch.msgid.link/20260109030657.994759-1-seanjc@google.com
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kvm/x86.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 824844a7c6e88..8617f7fec9643 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -11066,8 +11066,7 @@ static inline int vcpu_block(struct kvm_vcpu *vcpu)
+ if (is_guest_mode(vcpu)) {
+ int r = kvm_check_nested_events(vcpu);
+
+- WARN_ON_ONCE(r == -EBUSY);
+- if (r < 0)
++ if (r < 0 && r != -EBUSY)
+ return 0;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From e2141d6aa48767eee4c91c65fda99aa65eb49203 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Jan 2023 23:49:05 +0000
+Subject: KVM: x86/pmu: Provide "error" semantics for unsupported-but-known PMU
+ MSRs
+
+From: Sean Christopherson <seanjc@google.com>
+
+[ Upstream commit 2de154f541fc5b9f2aed3fe06e218130718ce320 ]
+
+Provide "error" semantics (read zeros, drop writes) for userspace accesses
+to MSRs that are ultimately unsupported for whatever reason, but for which
+KVM told userspace to save and restore the MSR, i.e. for MSRs that KVM
+included in KVM_GET_MSR_INDEX_LIST.
+
+Previously, KVM special cased a few PMU MSRs that were problematic at one
+point or another. Extend the treatment to all PMU MSRs, e.g. to avoid
+spurious unsupported accesses.
+
+Note, the logic can also be used for non-PMU MSRs, but as of today only
+PMU MSRs can end up being unsupported after KVM told userspace to save and
+restore them.
+
+Link: https://lore.kernel.org/r/20230124234905.3774678-7-seanjc@google.com
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Stable-dep-of: 5bb9ac186512 ("KVM: x86: Return "unsupported" instead of "invalid" on access to unsupported PV MSR")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kvm/x86.c | 51 ++++++++++++++++++++++++++--------------------
+ 1 file changed, 29 insertions(+), 22 deletions(-)
+
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 2253c51e33e36..0b8ec5886d44f 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -3576,6 +3576,18 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
+ mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa));
+ }
+
++static bool kvm_is_msr_to_save(u32 msr_index)
++{
++ unsigned int i;
++
++ for (i = 0; i < num_msrs_to_save; i++) {
++ if (msrs_to_save[i] == msr_index)
++ return true;
++ }
++
++ return false;
++}
++
+ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ {
+ u32 msr = msr_info->index;
+@@ -3896,20 +3908,18 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ vcpu->arch.guest_fpu.xfd_err = data;
+ break;
+ #endif
+- case MSR_IA32_PEBS_ENABLE:
+- case MSR_IA32_DS_AREA:
+- case MSR_PEBS_DATA_CFG:
+- case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5:
++ default:
+ if (kvm_pmu_is_valid_msr(vcpu, msr))
+ return kvm_pmu_set_msr(vcpu, msr_info);
++
+ /*
+ * Userspace is allowed to write '0' to MSRs that KVM reports
+ * as to-be-saved, even if an MSRs isn't fully supported.
+ */
+- return !msr_info->host_initiated || data;
+- default:
+- if (kvm_pmu_is_valid_msr(vcpu, msr))
+- return kvm_pmu_set_msr(vcpu, msr_info);
++ if (msr_info->host_initiated && !data &&
++ kvm_is_msr_to_save(msr))
++ break;
++
+ return KVM_MSR_RET_INVALID;
+ }
+ return 0;
+@@ -4000,20 +4010,6 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ case MSR_DRAM_ENERGY_STATUS: /* DRAM controller */
+ msr_info->data = 0;
+ break;
+- case MSR_IA32_PEBS_ENABLE:
+- case MSR_IA32_DS_AREA:
+- case MSR_PEBS_DATA_CFG:
+- case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5:
+- if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
+- return kvm_pmu_get_msr(vcpu, msr_info);
+- /*
+- * Userspace is allowed to read MSRs that KVM reports as
+- * to-be-saved, even if an MSR isn't fully supported.
+- */
+- if (!msr_info->host_initiated)
+- return 1;
+- msr_info->data = 0;
+- break;
+ case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
+ case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
+ case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1:
+@@ -4268,6 +4264,17 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ default:
+ if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
+ return kvm_pmu_get_msr(vcpu, msr_info);
++
++ /*
++ * Userspace is allowed to read MSRs that KVM reports as
++ * to-be-saved, even if an MSR isn't fully supported.
++ */
++ if (msr_info->host_initiated &&
++ kvm_is_msr_to_save(msr_info->index)) {
++ msr_info->data = 0;
++ break;
++ }
++
+ return KVM_MSR_RET_INVALID;
+ }
+ return 0;
+--
+2.51.0
+
--- /dev/null
+From 96e0a49e0549a902143fe15c3b80be40bfc4ba88 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 2 Aug 2024 11:19:28 -0700
+Subject: KVM: x86: Rename KVM_MSR_RET_INVALID to KVM_MSR_RET_UNSUPPORTED
+
+From: Sean Christopherson <seanjc@google.com>
+
+[ Upstream commit aaecae7b6a2b19a874a7df0d474f44f3a5b5a74e ]
+
+Rename the "INVALID" internal MSR error return code to "UNSUPPORTED" to
+try and make it more clear that access was denied because the MSR itself
+is unsupported/unknown. "INVALID" is too ambiguous, as it could just as
+easily mean the value for WRMSR as invalid.
+
+Avoid UNKNOWN and UNIMPLEMENTED, as the error code is used for MSRs that
+_are_ actually implemented by KVM, e.g. if the MSR is unsupported because
+an associated feature flag is not present in guest CPUID.
+
+Opportunistically beef up the comments for the internal MSR error codes.
+
+Link: https://lore.kernel.org/r/20240802181935.292540-4-seanjc@google.com
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Stable-dep-of: 5bb9ac186512 ("KVM: x86: Return "unsupported" instead of "invalid" on access to unsupported PV MSR")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kvm/svm/svm.c | 2 +-
+ arch/x86/kvm/vmx/vmx.c | 2 +-
+ arch/x86/kvm/x86.c | 12 ++++++------
+ arch/x86/kvm/x86.h | 15 +++++++++++----
+ 4 files changed, 19 insertions(+), 12 deletions(-)
+
+diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
+index a885fb39a6559..5d7775b869732 100644
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -2735,7 +2735,7 @@ static int svm_get_msr_feature(struct kvm_msr_entry *msr)
+ msr->data = kvm_caps.supported_perf_cap;
+ return 0;
+ default:
+- return KVM_MSR_RET_INVALID;
++ return KVM_MSR_RET_UNSUPPORTED;
+ }
+
+ return 0;
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index ebdc86030a7a4..e5d162e97f503 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -1889,7 +1889,7 @@ static int vmx_get_msr_feature(struct kvm_msr_entry *msr)
+ msr->data = kvm_caps.supported_perf_cap;
+ return 0;
+ default:
+- return KVM_MSR_RET_INVALID;
++ return KVM_MSR_RET_UNSUPPORTED;
+ }
+ }
+
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 80daa1ef956fa..84e54547ec7d0 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1718,7 +1718,7 @@ static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
+ msr.index = index;
+ r = kvm_get_msr_feature(&msr);
+
+- if (r == KVM_MSR_RET_INVALID && kvm_msr_ignored_check(index, 0, false))
++ if (r == KVM_MSR_RET_UNSUPPORTED && kvm_msr_ignored_check(index, 0, false))
+ r = 0;
+
+ *data = msr.data;
+@@ -1908,7 +1908,7 @@ static int kvm_set_msr_ignored_check(struct kvm_vcpu *vcpu,
+ {
+ int ret = __kvm_set_msr(vcpu, index, data, host_initiated);
+
+- if (ret == KVM_MSR_RET_INVALID)
++ if (ret == KVM_MSR_RET_UNSUPPORTED)
+ if (kvm_msr_ignored_check(index, data, true))
+ ret = 0;
+
+@@ -1953,7 +1953,7 @@ static int kvm_get_msr_ignored_check(struct kvm_vcpu *vcpu,
+ {
+ int ret = __kvm_get_msr(vcpu, index, data, host_initiated);
+
+- if (ret == KVM_MSR_RET_INVALID) {
++ if (ret == KVM_MSR_RET_UNSUPPORTED) {
+ /* Unconditionally clear *data for simplicity */
+ *data = 0;
+ if (kvm_msr_ignored_check(index, 0, false))
+@@ -2022,7 +2022,7 @@ static int complete_fast_rdmsr(struct kvm_vcpu *vcpu)
+ static u64 kvm_msr_reason(int r)
+ {
+ switch (r) {
+- case KVM_MSR_RET_INVALID:
++ case KVM_MSR_RET_UNSUPPORTED:
+ return KVM_MSR_EXIT_REASON_UNKNOWN;
+ case KVM_MSR_RET_FILTERED:
+ return KVM_MSR_EXIT_REASON_FILTER;
+@@ -3915,7 +3915,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ kvm_is_msr_to_save(msr))
+ break;
+
+- return KVM_MSR_RET_INVALID;
++ return KVM_MSR_RET_UNSUPPORTED;
+ }
+ return 0;
+ }
+@@ -4270,7 +4270,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ break;
+ }
+
+- return KVM_MSR_RET_INVALID;
++ return KVM_MSR_RET_UNSUPPORTED;
+ }
+ return 0;
+ }
+diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
+index f3554bf052016..9bb2f237b0fc0 100644
+--- a/arch/x86/kvm/x86.h
++++ b/arch/x86/kvm/x86.h
+@@ -459,11 +459,18 @@ bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type);
+
+ /*
+ * Internal error codes that are used to indicate that MSR emulation encountered
+- * an error that should result in #GP in the guest, unless userspace
+- * handles it.
++ * an error that should result in #GP in the guest, unless userspace handles it.
++ * Note, '1', '0', and negative numbers are off limits, as they are used by KVM
++ * as part of KVM's lightly documented internal KVM_RUN return codes.
++ *
++ * UNSUPPORTED - The MSR isn't supported, either because it is completely
++ * unknown to KVM, or because the MSR should not exist according
++ * to the vCPU model.
++ *
++ * FILTERED - Access to the MSR is denied by a userspace MSR filter.
+ */
+-#define KVM_MSR_RET_INVALID 2 /* in-kernel MSR emulation #GP condition */
+-#define KVM_MSR_RET_FILTERED 3 /* #GP due to userspace MSR filter */
++#define KVM_MSR_RET_UNSUPPORTED 2
++#define KVM_MSR_RET_FILTERED 3
+
+ #define __cr4_reserved_bits(__cpu_has, __c) \
+ ({ \
+--
+2.51.0
+
--- /dev/null
+From 7195173fd38be3b317d13ea519ad108e66303c17 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 30 Dec 2025 12:59:48 -0800
+Subject: KVM: x86: Return "unsupported" instead of "invalid" on access to
+ unsupported PV MSR
+
+From: Sean Christopherson <seanjc@google.com>
+
+[ Upstream commit 5bb9ac1865123356337a389af935d3913ee917ed ]
+
+Return KVM_MSR_RET_UNSUPPORTED instead of '1' (which for all intents and
+purposes means "invalid") when rejecting accesses to KVM PV MSRs to adhere
+to KVM's ABI of allowing host reads and writes of '0' to MSRs that are
+advertised to userspace via KVM_GET_MSR_INDEX_LIST, even if the vCPU model
+doesn't support the MSR.
+
+E.g. running a QEMU VM with
+
+ -cpu host,-kvmclock,kvm-pv-enforce-cpuid
+
+yields:
+
+ qemu: error: failed to set MSR 0x12 to 0x0
+ qemu: target/i386/kvm/kvm.c:3301: kvm_buf_set_msrs:
+ Assertion `ret == cpu->kvm_msr_buf->nmsrs' failed.
+
+Fixes: 66570e966dd9 ("kvm: x86: only provide PV features if enabled in guest's CPUID")
+Cc: stable@vger.kernel.org
+Reviewed-by: Jim Mattson <jmattson@google.com>
+Link: https://patch.msgid.link/20251230205948.4094097-1-seanjc@google.com
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kvm/x86.c | 40 ++++++++++++++++++++--------------------
+ 1 file changed, 20 insertions(+), 20 deletions(-)
+
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 84e54547ec7d0..b6fdf084fc92a 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -3731,47 +3731,47 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ break;
+ case MSR_KVM_WALL_CLOCK_NEW:
+ if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2))
+- return 1;
++ return KVM_MSR_RET_UNSUPPORTED;
+
+ vcpu->kvm->arch.wall_clock = data;
+ kvm_write_wall_clock(vcpu->kvm, data, 0);
+ break;
+ case MSR_KVM_WALL_CLOCK:
+ if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE))
+- return 1;
++ return KVM_MSR_RET_UNSUPPORTED;
+
+ vcpu->kvm->arch.wall_clock = data;
+ kvm_write_wall_clock(vcpu->kvm, data, 0);
+ break;
+ case MSR_KVM_SYSTEM_TIME_NEW:
+ if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2))
+- return 1;
++ return KVM_MSR_RET_UNSUPPORTED;
+
+ kvm_write_system_time(vcpu, data, false, msr_info->host_initiated);
+ break;
+ case MSR_KVM_SYSTEM_TIME:
+ if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE))
+- return 1;
++ return KVM_MSR_RET_UNSUPPORTED;
+
+ kvm_write_system_time(vcpu, data, true, msr_info->host_initiated);
+ break;
+ case MSR_KVM_ASYNC_PF_EN:
+ if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF))
+- return 1;
++ return KVM_MSR_RET_UNSUPPORTED;
+
+ if (kvm_pv_enable_async_pf(vcpu, data))
+ return 1;
+ break;
+ case MSR_KVM_ASYNC_PF_INT:
+ if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT))
+- return 1;
++ return KVM_MSR_RET_UNSUPPORTED;
+
+ if (kvm_pv_enable_async_pf_int(vcpu, data))
+ return 1;
+ break;
+ case MSR_KVM_ASYNC_PF_ACK:
+ if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT))
+- return 1;
++ return KVM_MSR_RET_UNSUPPORTED;
+ if (data & 0x1) {
+ vcpu->arch.apf.pageready_pending = false;
+ kvm_check_async_pf_completion(vcpu);
+@@ -3779,7 +3779,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ break;
+ case MSR_KVM_STEAL_TIME:
+ if (!guest_pv_has(vcpu, KVM_FEATURE_STEAL_TIME))
+- return 1;
++ return KVM_MSR_RET_UNSUPPORTED;
+
+ if (unlikely(!sched_info_on()))
+ return 1;
+@@ -3797,7 +3797,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ break;
+ case MSR_KVM_PV_EOI_EN:
+ if (!guest_pv_has(vcpu, KVM_FEATURE_PV_EOI))
+- return 1;
++ return KVM_MSR_RET_UNSUPPORTED;
+
+ if (kvm_lapic_set_pv_eoi(vcpu, data, sizeof(u8)))
+ return 1;
+@@ -3805,7 +3805,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+
+ case MSR_KVM_POLL_CONTROL:
+ if (!guest_pv_has(vcpu, KVM_FEATURE_POLL_CONTROL))
+- return 1;
++ return KVM_MSR_RET_UNSUPPORTED;
+
+ /* only enable bit supported */
+ if (data & (-1ULL << 1))
+@@ -4108,61 +4108,61 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ break;
+ case MSR_KVM_WALL_CLOCK:
+ if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE))
+- return 1;
++ return KVM_MSR_RET_UNSUPPORTED;
+
+ msr_info->data = vcpu->kvm->arch.wall_clock;
+ break;
+ case MSR_KVM_WALL_CLOCK_NEW:
+ if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2))
+- return 1;
++ return KVM_MSR_RET_UNSUPPORTED;
+
+ msr_info->data = vcpu->kvm->arch.wall_clock;
+ break;
+ case MSR_KVM_SYSTEM_TIME:
+ if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE))
+- return 1;
++ return KVM_MSR_RET_UNSUPPORTED;
+
+ msr_info->data = vcpu->arch.time;
+ break;
+ case MSR_KVM_SYSTEM_TIME_NEW:
+ if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2))
+- return 1;
++ return KVM_MSR_RET_UNSUPPORTED;
+
+ msr_info->data = vcpu->arch.time;
+ break;
+ case MSR_KVM_ASYNC_PF_EN:
+ if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF))
+- return 1;
++ return KVM_MSR_RET_UNSUPPORTED;
+
+ msr_info->data = vcpu->arch.apf.msr_en_val;
+ break;
+ case MSR_KVM_ASYNC_PF_INT:
+ if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT))
+- return 1;
++ return KVM_MSR_RET_UNSUPPORTED;
+
+ msr_info->data = vcpu->arch.apf.msr_int_val;
+ break;
+ case MSR_KVM_ASYNC_PF_ACK:
+ if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT))
+- return 1;
++ return KVM_MSR_RET_UNSUPPORTED;
+
+ msr_info->data = 0;
+ break;
+ case MSR_KVM_STEAL_TIME:
+ if (!guest_pv_has(vcpu, KVM_FEATURE_STEAL_TIME))
+- return 1;
++ return KVM_MSR_RET_UNSUPPORTED;
+
+ msr_info->data = vcpu->arch.st.msr_val;
+ break;
+ case MSR_KVM_PV_EOI_EN:
+ if (!guest_pv_has(vcpu, KVM_FEATURE_PV_EOI))
+- return 1;
++ return KVM_MSR_RET_UNSUPPORTED;
+
+ msr_info->data = vcpu->arch.pv_eoi.msr_val;
+ break;
+ case MSR_KVM_POLL_CONTROL:
+ if (!guest_pv_has(vcpu, KVM_FEATURE_POLL_CONTROL))
+- return 1;
++ return KVM_MSR_RET_UNSUPPORTED;
+
+ msr_info->data = vcpu->arch.msr_kvm_poll_control;
+ break;
+--
+2.51.0
+
--- /dev/null
+From 4cf50bd44f01923aa8c78c26cda4e93cf58f0c42 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 7 Jun 2024 10:26:09 -0700
+Subject: KVM: x86: WARN if a vCPU gets a valid wakeup that KVM can't yet
+ inject
+
+From: Sean Christopherson <seanjc@google.com>
+
+[ Upstream commit 45405155d876c326da89162b8173b8cc9ab7ed75 ]
+
+WARN if a blocking vCPU is awakened by a valid wake event that KVM can't
+inject, e.g. because KVM needs to complete a nested VM-enter, or needs to
+re-inject an exception. For the nested VM-Enter case, KVM is supposed to
+clear "nested_run_pending" if L1 puts L2 into HLT, i.e. entering HLT
+"completes" the nested VM-Enter. And for already-injected exceptions, it
+should be impossible for the vCPU to be in a blocking state if a VM-Exit
+occurred while an exception was being vectored.
+
+Link: https://lore.kernel.org/r/20240607172609.3205077-7-seanjc@google.com
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Stable-dep-of: ead63640d4e7 ("KVM: x86: Ignore -EBUSY when checking nested events from vcpu_block()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kvm/x86.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index b6fdf084fc92a..824844a7c6e88 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -11064,7 +11064,10 @@ static inline int vcpu_block(struct kvm_vcpu *vcpu)
+ * causes a spurious wakeup from HLT).
+ */
+ if (is_guest_mode(vcpu)) {
+- if (kvm_check_nested_events(vcpu) < 0)
++ int r = kvm_check_nested_events(vcpu);
++
++ WARN_ON_ONCE(r == -EBUSY);
++ if (r < 0)
+ return 0;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 845f0749f0afb4e561327a3f53025d8790429f68 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 9 Dec 2022 11:40:36 +0200
+Subject: media: camss: vfe-480: Multiple outputs support for SM8250
+
+From: Milen Mitkov <quic_mmitkov@quicinc.com>
+
+[ Upstream commit 1c4abf0246d2ad5fabc830f1d9cc3944d5a4ae95 ]
+
+On SM8250 each VFE supports at least 3 RDI channels, or 4
+in case of VFE-Lite, so add appropriate IRQ setup and handling.
+
+Signed-off-by: Milen Mitkov <quic_mmitkov@quicinc.com>
+Reviewed-by: Robert Foss <robert.foss@linaro.org>
+Tested-by: Bryan O'Donoghue <bryan.odonoghue@linaro.org>
+Acked-by: Robert Foss <robert.foss@linaro.org>
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Stable-dep-of: d965919af524 ("media: qcom: camss: vfe: Fix out-of-bounds access in vfe_isr_reg_update()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../media/platform/qcom/camss/camss-vfe-480.c | 57 ++++++++++++-------
+ 1 file changed, 38 insertions(+), 19 deletions(-)
+
+diff --git a/drivers/media/platform/qcom/camss/camss-vfe-480.c b/drivers/media/platform/qcom/camss/camss-vfe-480.c
+index 72f5cfeeb49bf..0063e36a30e05 100644
+--- a/drivers/media/platform/qcom/camss/camss-vfe-480.c
++++ b/drivers/media/platform/qcom/camss/camss-vfe-480.c
+@@ -93,6 +93,8 @@ static inline int bus_irq_mask_0_comp_done(struct vfe_device *vfe, int n)
+ #define RDI_WM(n) ((IS_LITE ? 0 : 23) + (n))
+ #define RDI_COMP_GROUP(n) ((IS_LITE ? 0 : 11) + (n))
+
++#define MAX_VFE_OUTPUT_LINES 4
++
+ static u32 vfe_hw_version(struct vfe_device *vfe)
+ {
+ u32 hw_version = readl_relaxed(vfe->base + VFE_HW_VERSION);
+@@ -170,12 +172,26 @@ static inline void vfe_reg_update_clear(struct vfe_device *vfe,
+
+ static void vfe_enable_irq_common(struct vfe_device *vfe)
+ {
+- /* enable only the IRQs used: rup and comp_done irqs for RDI0 */
++ /* enable reset ack IRQ and top BUS status IRQ */
+ writel_relaxed(IRQ_MASK_0_RESET_ACK | IRQ_MASK_0_BUS_TOP_IRQ,
+ vfe->base + VFE_IRQ_MASK(0));
+- writel_relaxed(BUS_IRQ_MASK_0_RDI_RUP(vfe, 0) |
+- BUS_IRQ_MASK_0_COMP_DONE(vfe, RDI_COMP_GROUP(0)),
+- vfe->base + VFE_BUS_IRQ_MASK(0));
++}
++
++static void vfe_enable_lines_irq(struct vfe_device *vfe)
++{
++ int i;
++ u32 bus_irq_mask = 0;
++
++ for (i = 0; i < MAX_VFE_OUTPUT_LINES; i++) {
++ /* Enable IRQ for newly added lines, but also keep already running lines's IRQ */
++ if (vfe->line[i].output.state == VFE_OUTPUT_RESERVED ||
++ vfe->line[i].output.state == VFE_OUTPUT_ON) {
++ bus_irq_mask |= BUS_IRQ_MASK_0_RDI_RUP(vfe, i)
++ | BUS_IRQ_MASK_0_COMP_DONE(vfe, RDI_COMP_GROUP(i));
++ }
++ }
++
++ writel_relaxed(bus_irq_mask, vfe->base + VFE_BUS_IRQ_MASK(0));
+ }
+
+ static void vfe_isr_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id);
+@@ -192,6 +208,7 @@ static irqreturn_t vfe_isr(int irq, void *dev)
+ {
+ struct vfe_device *vfe = dev;
+ u32 status;
++ int i;
+
+ status = readl_relaxed(vfe->base + VFE_IRQ_STATUS(0));
+ writel_relaxed(status, vfe->base + VFE_IRQ_CLEAR(0));
+@@ -206,11 +223,14 @@ static irqreturn_t vfe_isr(int irq, void *dev)
+ writel_relaxed(status, vfe->base + VFE_BUS_IRQ_CLEAR(0));
+ writel_relaxed(1, vfe->base + VFE_BUS_IRQ_CLEAR_GLOBAL);
+
+- if (status & BUS_IRQ_MASK_0_RDI_RUP(vfe, 0))
+- vfe_isr_reg_update(vfe, 0);
++ /* Loop through all WMs IRQs */
++ for (i = 0; i < MSM_VFE_IMAGE_MASTERS_NUM; i++) {
++ if (status & BUS_IRQ_MASK_0_RDI_RUP(vfe, i))
++ vfe_isr_reg_update(vfe, i);
+
+- if (status & BUS_IRQ_MASK_0_COMP_DONE(vfe, RDI_COMP_GROUP(0)))
+- vfe_isr_wm_done(vfe, 0);
++ if (status & BUS_IRQ_MASK_0_COMP_DONE(vfe, RDI_COMP_GROUP(i)))
++ vfe_isr_wm_done(vfe, i);
++ }
+ }
+
+ return IRQ_HANDLED;
+@@ -233,7 +253,6 @@ static int vfe_get_output(struct vfe_line *line)
+ struct vfe_device *vfe = to_vfe(line);
+ struct vfe_output *output;
+ unsigned long flags;
+- int wm_idx;
+
+ spin_lock_irqsave(&vfe->output_lock, flags);
+
+@@ -245,12 +264,12 @@ static int vfe_get_output(struct vfe_line *line)
+
+ output->wm_num = 1;
+
+- wm_idx = vfe_reserve_wm(vfe, line->id);
+- if (wm_idx < 0) {
+- dev_err(vfe->camss->dev, "Can not reserve wm\n");
+- goto error_get_wm;
+- }
+- output->wm_idx[0] = wm_idx;
++ /* Correspondence between VFE line number and WM number.
++ * line 0 -> RDI 0, line 1 -> RDI1, line 2 -> RDI2, line 3 -> PIX/RDI3
++ * Note this 1:1 mapping will not work for PIX streams.
++ */
++ output->wm_idx[0] = line->id;
++ vfe->wm_output_map[line->id] = line->id;
+
+ output->drop_update_idx = 0;
+
+@@ -258,11 +277,9 @@ static int vfe_get_output(struct vfe_line *line)
+
+ return 0;
+
+-error_get_wm:
+- vfe_release_wm(vfe, output->wm_idx[0]);
+- output->state = VFE_OUTPUT_OFF;
+ error:
+ spin_unlock_irqrestore(&vfe->output_lock, flags);
++ output->state = VFE_OUTPUT_OFF;
+
+ return -EINVAL;
+ }
+@@ -344,6 +361,8 @@ static int vfe_enable(struct vfe_line *line)
+
+ vfe->stream_count++;
+
++ vfe_enable_lines_irq(vfe);
++
+ mutex_unlock(&vfe->stream_lock);
+
+ ret = vfe_get_output(line);
+@@ -550,7 +569,7 @@ static const struct camss_video_ops vfe_video_ops_480 = {
+ static void vfe_subdev_init(struct device *dev, struct vfe_device *vfe)
+ {
+ vfe->video_ops = vfe_video_ops_480;
+- vfe->line_num = 1;
++ vfe->line_num = MAX_VFE_OUTPUT_LINES;
+ }
+
+ const struct vfe_hw_ops vfe_ops_480 = {
+--
+2.51.0
+
--- /dev/null
+From 19e384fab3542d0a4953c584f7ad91e6aeb9a515 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 18 Jun 2024 20:18:34 +0200
+Subject: media: hantro: Disable multicore support
+
+From: Sebastian Reichel <sebastian.reichel@collabora.com>
+
+[ Upstream commit ccdeb8d57f7fb3e5c05d72cb7dfb9bc78f09f542 ]
+
+Avoid exposing equal Hantro video codecs to userspace. Equal video
+codecs allow scheduling work between the cores. For that kernel support
+is required, which does not yet exist. Until that is implemented avoid
+exposing each core separately to userspace so that multicore can be
+added in the future without breaking userspace ABI.
+
+This was written with Rockchip RK3588 in mind (which has 4 Hantro H1
+cores), but applies to all SoCs.
+
+Signed-off-by: Sebastian Reichel <sebastian.reichel@collabora.com>
+Signed-off-by: Sebastian Fricke <sebastian.fricke@collabora.com>
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Stable-dep-of: e0203ddf9af7 ("media: verisilicon: Avoid G2 bus error while decoding H.264 and HEVC")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../media/platform/verisilicon/hantro_drv.c | 47 +++++++++++++++++++
+ 1 file changed, 47 insertions(+)
+
+diff --git a/drivers/media/platform/verisilicon/hantro_drv.c b/drivers/media/platform/verisilicon/hantro_drv.c
+index a35b6ae62d585..092ee1681c3ea 100644
+--- a/drivers/media/platform/verisilicon/hantro_drv.c
++++ b/drivers/media/platform/verisilicon/hantro_drv.c
+@@ -896,6 +896,49 @@ static const struct media_device_ops hantro_m2m_media_ops = {
+ .req_queue = v4l2_m2m_request_queue,
+ };
+
++/*
++ * Some SoCs, like RK3588 have multiple identical Hantro cores, but the
++ * kernel is currently missing support for multi-core handling. Exposing
++ * separate devices for each core to userspace is bad, since that does
++ * not allow scheduling tasks properly (and creates ABI). With this workaround
++ * the driver will only probe for the first core and early exit for the other
++ * cores. Once the driver gains multi-core support, the same technique
++ * for detecting the main core can be used to cluster all cores together.
++ */
++static int hantro_disable_multicore(struct hantro_dev *vpu)
++{
++ struct device_node *node = NULL;
++ const char *compatible;
++ bool is_main_core;
++ int ret;
++
++ /* Intentionally ignores the fallback strings */
++ ret = of_property_read_string(vpu->dev->of_node, "compatible", &compatible);
++ if (ret)
++ return ret;
++
++ /* The first compatible and available node found is considered the main core */
++ do {
++ node = of_find_compatible_node(node, NULL, compatible);
++ if (of_device_is_available(node))
++ break;
++ } while (node);
++
++ if (!node)
++ return -EINVAL;
++
++ is_main_core = (vpu->dev->of_node == node);
++
++ of_node_put(node);
++
++ if (!is_main_core) {
++ dev_info(vpu->dev, "missing multi-core support, ignoring this instance\n");
++ return -ENODEV;
++ }
++
++ return 0;
++}
++
+ static int hantro_probe(struct platform_device *pdev)
+ {
+ const struct of_device_id *match;
+@@ -916,6 +959,10 @@ static int hantro_probe(struct platform_device *pdev)
+ match = of_match_node(of_hantro_match, pdev->dev.of_node);
+ vpu->variant = match->data;
+
++ ret = hantro_disable_multicore(vpu);
++ if (ret)
++ return ret;
++
+ /*
+ * Support for nxp,imx8mq-vpu is kept for backwards compatibility
+ * but it's deprecated. Please update your DTS file to use
+--
+2.51.0
+
--- /dev/null
+From fd6dd02730dfd6e3f6789990ca7399f19b727e36 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 29 Dec 2025 10:52:17 +0300
+Subject: media: qcom: camss: vfe: Fix out-of-bounds access in
+ vfe_isr_reg_update()
+
+From: Alper Ak <alperyasinak1@gmail.com>
+
+[ Upstream commit d965919af524e68cb2ab1a685872050ad2ee933d ]
+
+vfe_isr() iterates using MSM_VFE_IMAGE_MASTERS_NUM(7) as the loop
+bound and passes the index to vfe_isr_reg_update(). However,
+vfe->line[] array is defined with VFE_LINE_NUM_MAX(4):
+
+ struct vfe_line line[VFE_LINE_NUM_MAX];
+
+When index is 4, 5, 6, the access to vfe->line[line_id] exceeds
+the array bounds and resulting in out-of-bounds memory access.
+
+Fix this by using separate loops for output lines and write masters.
+
+Fixes: 4edc8eae715c ("media: camss: Add initial support for VFE hardware version Titan 480")
+Signed-off-by: Alper Ak <alperyasinak1@gmail.com>
+Cc: stable@vger.kernel.org
+Reviewed-by: Bryan O'Donoghue <bryan.odonoghue@linaro.org>
+Signed-off-by: Bryan O'Donoghue <bod@kernel.org>
+Signed-off-by: Hans Verkuil <hverkuil+cisco@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/media/platform/qcom/camss/camss-vfe-480.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/media/platform/qcom/camss/camss-vfe-480.c b/drivers/media/platform/qcom/camss/camss-vfe-480.c
+index 0063e36a30e05..fa818517ab0da 100644
+--- a/drivers/media/platform/qcom/camss/camss-vfe-480.c
++++ b/drivers/media/platform/qcom/camss/camss-vfe-480.c
+@@ -223,11 +223,13 @@ static irqreturn_t vfe_isr(int irq, void *dev)
+ writel_relaxed(status, vfe->base + VFE_BUS_IRQ_CLEAR(0));
+ writel_relaxed(1, vfe->base + VFE_BUS_IRQ_CLEAR_GLOBAL);
+
+- /* Loop through all WMs IRQs */
+- for (i = 0; i < MSM_VFE_IMAGE_MASTERS_NUM; i++) {
++ for (i = 0; i < MAX_VFE_OUTPUT_LINES; i++) {
+ if (status & BUS_IRQ_MASK_0_RDI_RUP(vfe, i))
+ vfe_isr_reg_update(vfe, i);
++ }
+
++ /* Loop through all WMs IRQs */
++ for (i = 0; i < MSM_VFE_IMAGE_MASTERS_NUM; i++) {
+ if (status & BUS_IRQ_MASK_0_COMP_DONE(vfe, RDI_COMP_GROUP(i)))
+ vfe_isr_wm_done(vfe, i);
+ }
+--
+2.51.0
+
--- /dev/null
+From 816b570ac13fa8919f79e42fe18d7165b3c89138 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 14 Nov 2025 09:12:57 +0000
+Subject: media: tegra-video: Fix memory leak in __tegra_channel_try_format()
+
+From: Zilin Guan <zilin@seu.edu.cn>
+
+[ Upstream commit 43e5302d22334f1183dec3e0d5d8007eefe2817c ]
+
+The state object allocated by __v4l2_subdev_state_alloc() must be freed
+with __v4l2_subdev_state_free() when it is no longer needed.
+
+In __tegra_channel_try_format(), two error paths return directly after
+v4l2_subdev_call() fails, without freeing the allocated 'sd_state'
+object. This violates the requirement and causes a memory leak.
+
+Fix this by introducing a cleanup label and using goto statements in the
+error paths to ensure that __v4l2_subdev_state_free() is always called
+before the function returns.
+
+Fixes: 56f64b82356b7 ("media: tegra-video: Use zero crop settings if subdev has no get_selection")
+Fixes: 1ebaeb09830f3 ("media: tegra-video: Add support for external sensor capture")
+Cc: stable@vger.kernel.org
+Signed-off-by: Zilin Guan <zilin@seu.edu.cn>
+Signed-off-by: Hans Verkuil <hverkuil+cisco@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/staging/media/tegra-video/vi.c | 13 ++++++++-----
+ 1 file changed, 8 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/staging/media/tegra-video/vi.c b/drivers/staging/media/tegra-video/vi.c
+index e82ab9044ef3b..ea96fd67035c7 100644
+--- a/drivers/staging/media/tegra-video/vi.c
++++ b/drivers/staging/media/tegra-video/vi.c
+@@ -503,7 +503,7 @@ static int __tegra_channel_try_format(struct tegra_vi_channel *chan,
+ .target = V4L2_SEL_TGT_CROP_BOUNDS,
+ };
+ struct v4l2_rect *try_crop;
+- int ret;
++ int ret = 0;
+
+ subdev = tegra_channel_get_remote_source_subdev(chan);
+ if (!subdev)
+@@ -548,8 +548,10 @@ static int __tegra_channel_try_format(struct tegra_vi_channel *chan,
+ } else {
+ ret = v4l2_subdev_call(subdev, pad, get_selection,
+ NULL, &sdsel);
+- if (ret)
+- return -EINVAL;
++ if (ret) {
++ ret = -EINVAL;
++ goto out_free;
++ }
+
+ try_crop->width = sdsel.r.width;
+ try_crop->height = sdsel.r.height;
+@@ -561,14 +563,15 @@ static int __tegra_channel_try_format(struct tegra_vi_channel *chan,
+
+ ret = v4l2_subdev_call(subdev, pad, set_fmt, sd_state, &fmt);
+ if (ret < 0)
+- return ret;
++ goto out_free;
+
+ v4l2_fill_pix_format(pix, &fmt.format);
+ tegra_channel_fmt_align(chan, pix, fmtinfo->bpp);
+
++out_free:
+ __v4l2_subdev_state_free(sd_state);
+
+- return 0;
++ return ret;
+ }
+
+ static int tegra_channel_try_format(struct file *file, void *fh,
+--
+2.51.0
+
--- /dev/null
+From 3d09ad4b86c3a43e864e2f1a60fb3a7b4e422c09 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Oct 2023 23:40:09 +0200
+Subject: media: tegra-video: Use accessors for pad config 'try_*' fields
+
+From: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+
+[ Upstream commit 0623979d8352efe18f83c4fad95a2e61df17b3e7 ]
+
+The 'try_*' fields of the v4l2_subdev_pad_config structure are meant to
+be accessed through helper functions. Replace direct access with usage
+of the v4l2_subdev_get_pad_format(), v4l2_subdev_get_pad_crop() and
+v4l2_subdev_get_pad_compose() helpers.
+
+Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Reviewed-by: Luca Ceresoli <luca.ceresoli@bootlin.com>
+Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Signed-off-by: Mauro Carvalho Chehab <mchehab@kernel.org>
+Stable-dep-of: 43e5302d2233 ("media: tegra-video: Fix memory leak in __tegra_channel_try_format()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/staging/media/tegra-video/vi.c | 14 ++++++++------
+ 1 file changed, 8 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/staging/media/tegra-video/vi.c b/drivers/staging/media/tegra-video/vi.c
+index 9d46a36cc0140..e82ab9044ef3b 100644
+--- a/drivers/staging/media/tegra-video/vi.c
++++ b/drivers/staging/media/tegra-video/vi.c
+@@ -502,6 +502,7 @@ static int __tegra_channel_try_format(struct tegra_vi_channel *chan,
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ .target = V4L2_SEL_TGT_CROP_BOUNDS,
+ };
++ struct v4l2_rect *try_crop;
+ int ret;
+
+ subdev = tegra_channel_get_remote_source_subdev(chan);
+@@ -537,24 +538,25 @@ static int __tegra_channel_try_format(struct tegra_vi_channel *chan,
+ * Attempt to obtain the format size from subdev.
+ * If not available, try to get crop boundary from subdev.
+ */
++ try_crop = v4l2_subdev_get_pad_crop(subdev, sd_state, 0);
+ fse.code = fmtinfo->code;
+ ret = v4l2_subdev_call(subdev, pad, enum_frame_size, sd_state, &fse);
+ if (ret) {
+ if (!v4l2_subdev_has_op(subdev, pad, get_selection)) {
+- sd_state->pads->try_crop.width = 0;
+- sd_state->pads->try_crop.height = 0;
++ try_crop->width = 0;
++ try_crop->height = 0;
+ } else {
+ ret = v4l2_subdev_call(subdev, pad, get_selection,
+ NULL, &sdsel);
+ if (ret)
+ return -EINVAL;
+
+- sd_state->pads->try_crop.width = sdsel.r.width;
+- sd_state->pads->try_crop.height = sdsel.r.height;
++ try_crop->width = sdsel.r.width;
++ try_crop->height = sdsel.r.height;
+ }
+ } else {
+- sd_state->pads->try_crop.width = fse.max_width;
+- sd_state->pads->try_crop.height = fse.max_height;
++ try_crop->width = fse.max_width;
++ try_crop->height = fse.max_height;
+ }
+
+ ret = v4l2_subdev_call(subdev, pad, set_fmt, sd_state, &fmt);
+--
+2.51.0
+
--- /dev/null
+From 4a73dc3209e55c4c415b2e672332b8a745cddb65 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 5 Dec 2025 09:54:24 +0800
+Subject: media: v4l2-mem2mem: Add a kref to the v4l2_m2m_dev structure
+
+From: Nicolas Dufresne <nicolas.dufresne@collabora.com>
+
+[ Upstream commit db6b97a4f8041e479be9ef4b8b07022636c96f50 ]
+
+Adding a reference count to the v4l2_m2m_dev structure allow safely
+sharing it across multiple hardware nodes. This can be used to prevent
+running jobs concurrently on m2m cores that have some internal resource
+sharing.
+
+Signed-off-by: Ming Qian <ming.qian@oss.nxp.com>
+Reviewed-by: Frank Li <Frank.Li@nxp.com>
+Signed-off-by: Nicolas Dufresne <nicolas.dufresne@collabora.com>
+Signed-off-by: Hans Verkuil <hverkuil+cisco@kernel.org>
+[hverkuil: fix typos in v4l2_m2m_put documentation]
+Stable-dep-of: e0203ddf9af7 ("media: verisilicon: Avoid G2 bus error while decoding H.264 and HEVC")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/media/v4l2-core/v4l2-mem2mem.c | 23 +++++++++++++++++++++++
+ include/media/v4l2-mem2mem.h | 21 +++++++++++++++++++++
+ 2 files changed, 44 insertions(+)
+
+diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
+index 97645d6509e1c..64a389aa7f81d 100644
+--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
++++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
+@@ -90,6 +90,7 @@ static const char * const m2m_entity_name[] = {
+ * @job_work: worker to run queued jobs.
+ * @job_queue_flags: flags of the queue status, %QUEUE_PAUSED.
+ * @m2m_ops: driver callbacks
++ * @kref: device reference count
+ */
+ struct v4l2_m2m_dev {
+ struct v4l2_m2m_ctx *curr_ctx;
+@@ -109,6 +110,8 @@ struct v4l2_m2m_dev {
+ unsigned long job_queue_flags;
+
+ const struct v4l2_m2m_ops *m2m_ops;
++
++ struct kref kref;
+ };
+
+ static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx,
+@@ -1207,6 +1210,7 @@ struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops)
+ INIT_LIST_HEAD(&m2m_dev->job_queue);
+ spin_lock_init(&m2m_dev->job_spinlock);
+ INIT_WORK(&m2m_dev->job_work, v4l2_m2m_device_run_work);
++ kref_init(&m2m_dev->kref);
+
+ return m2m_dev;
+ }
+@@ -1218,6 +1222,25 @@ void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev)
+ }
+ EXPORT_SYMBOL_GPL(v4l2_m2m_release);
+
++void v4l2_m2m_get(struct v4l2_m2m_dev *m2m_dev)
++{
++ kref_get(&m2m_dev->kref);
++}
++EXPORT_SYMBOL_GPL(v4l2_m2m_get);
++
++static void v4l2_m2m_release_from_kref(struct kref *kref)
++{
++ struct v4l2_m2m_dev *m2m_dev = container_of(kref, struct v4l2_m2m_dev, kref);
++
++ v4l2_m2m_release(m2m_dev);
++}
++
++void v4l2_m2m_put(struct v4l2_m2m_dev *m2m_dev)
++{
++ kref_put(&m2m_dev->kref, v4l2_m2m_release_from_kref);
++}
++EXPORT_SYMBOL_GPL(v4l2_m2m_put);
++
+ struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
+ void *drv_priv,
+ int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq))
+diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
+index 370c230ad3bea..4a2649a4562ae 100644
+--- a/include/media/v4l2-mem2mem.h
++++ b/include/media/v4l2-mem2mem.h
+@@ -537,6 +537,27 @@ v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev,
+ */
+ void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev);
+
++/**
++ * v4l2_m2m_get() - take a reference to the m2m_dev structure
++ *
++ * @m2m_dev: opaque pointer to the internal data to handle M2M context
++ *
++ * This is used to share the M2M device across multiple devices. This
++ * can be used to avoid scheduling two hardware nodes concurrently.
++ */
++void v4l2_m2m_get(struct v4l2_m2m_dev *m2m_dev);
++
++/**
++ * v4l2_m2m_put() - remove a reference to the m2m_dev structure
++ *
++ * @m2m_dev: opaque pointer to the internal data to handle M2M context
++ *
++ * Once the M2M device has no more references, v4l2_m2m_release() will be
++ * called automatically. Users of this method should never call
++ * v4l2_m2m_release() directly. See v4l2_m2m_get() for more details.
++ */
++void v4l2_m2m_put(struct v4l2_m2m_dev *m2m_dev);
++
+ /**
+ * v4l2_m2m_ctx_init() - allocate and initialize a m2m context
+ *
+--
+2.51.0
+
--- /dev/null
+From 63c8800c43df8752425d8761f8ae86949fbb021c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 5 Dec 2025 09:54:26 +0800
+Subject: media: verisilicon: Avoid G2 bus error while decoding H.264 and HEVC
+
+From: Ming Qian <ming.qian@oss.nxp.com>
+
+[ Upstream commit e0203ddf9af7c8e170e1e99ce83b4dc07f0cd765 ]
+
+For the i.MX8MQ platform, there is a hardware limitation: the g1 VPU and
+g2 VPU cannot decode simultaneously; otherwise, it will cause below bus
+error and produce corrupted pictures, even potentially lead to system hang.
+
+[ 110.527986] hantro-vpu 38310000.video-codec: frame decode timed out.
+[ 110.583517] hantro-vpu 38310000.video-codec: bus error detected.
+
+Therefore, it is necessary to ensure that g1 and g2 operate alternately.
+This allows for successful multi-instance decoding of H.264 and HEVC.
+
+To achieve this, g1 and g2 share the same v4l2_m2m_dev, and then the
+v4l2_m2m_dev can handle the scheduling.
+
+Fixes: cb5dd5a0fa518 ("media: hantro: Introduce G2/HEVC decoder")
+Cc: stable@vger.kernel.org
+Signed-off-by: Ming Qian <ming.qian@oss.nxp.com>
+Reviewed-by: Frank Li <Frank.Li@nxp.com>
+Co-developed-by: Nicolas Dufresne <nicolas.dufresne@collabora.com>
+Signed-off-by: Nicolas Dufresne <nicolas.dufresne@collabora.com>
+Signed-off-by: Hans Verkuil <hverkuil+cisco@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/media/platform/verisilicon/hantro.h | 2 +
+ .../media/platform/verisilicon/hantro_drv.c | 42 +++++++++++++++++--
+ .../media/platform/verisilicon/imx8m_vpu_hw.c | 8 ++++
+ 3 files changed, 49 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/media/platform/verisilicon/hantro.h b/drivers/media/platform/verisilicon/hantro.h
+index 2989ebc631cc0..fca6ef4a11665 100644
+--- a/drivers/media/platform/verisilicon/hantro.h
++++ b/drivers/media/platform/verisilicon/hantro.h
+@@ -76,6 +76,7 @@ struct hantro_irq {
+ * @double_buffer: core needs double buffering
+ * @legacy_regs: core uses legacy register set
+ * @late_postproc: postproc must be set up at the end of the job
++ * @shared_devices: an array of device ids that cannot run concurrently
+ */
+ struct hantro_variant {
+ unsigned int enc_offset;
+@@ -100,6 +101,7 @@ struct hantro_variant {
+ unsigned int double_buffer : 1;
+ unsigned int legacy_regs : 1;
+ unsigned int late_postproc : 1;
++ const struct of_device_id *shared_devices;
+ };
+
+ /**
+diff --git a/drivers/media/platform/verisilicon/hantro_drv.c b/drivers/media/platform/verisilicon/hantro_drv.c
+index 092ee1681c3ea..29baefc93628c 100644
+--- a/drivers/media/platform/verisilicon/hantro_drv.c
++++ b/drivers/media/platform/verisilicon/hantro_drv.c
+@@ -13,6 +13,7 @@
+ #include <linux/clk.h>
+ #include <linux/module.h>
+ #include <linux/of.h>
++#include <linux/of_platform.h>
+ #include <linux/platform_device.h>
+ #include <linux/pm.h>
+ #include <linux/pm_runtime.h>
+@@ -939,6 +940,41 @@ static int hantro_disable_multicore(struct hantro_dev *vpu)
+ return 0;
+ }
+
++static struct v4l2_m2m_dev *hantro_get_v4l2_m2m_dev(struct hantro_dev *vpu)
++{
++ struct device_node *node;
++ struct hantro_dev *shared_vpu;
++
++ if (!vpu->variant || !vpu->variant->shared_devices)
++ goto init_new_m2m_dev;
++
++ for_each_matching_node(node, vpu->variant->shared_devices) {
++ struct platform_device *pdev;
++ struct v4l2_m2m_dev *m2m_dev;
++
++ pdev = of_find_device_by_node(node);
++ if (!pdev)
++ continue;
++
++ shared_vpu = platform_get_drvdata(pdev);
++ if (IS_ERR_OR_NULL(shared_vpu) || shared_vpu == vpu) {
++ platform_device_put(pdev);
++ continue;
++ }
++
++ v4l2_m2m_get(shared_vpu->m2m_dev);
++ m2m_dev = shared_vpu->m2m_dev;
++ platform_device_put(pdev);
++
++ of_node_put(node);
++
++ return m2m_dev;
++ }
++
++init_new_m2m_dev:
++ return v4l2_m2m_init(&vpu_m2m_ops);
++}
++
+ static int hantro_probe(struct platform_device *pdev)
+ {
+ const struct of_device_id *match;
+@@ -1093,7 +1129,7 @@ static int hantro_probe(struct platform_device *pdev)
+ }
+ platform_set_drvdata(pdev, vpu);
+
+- vpu->m2m_dev = v4l2_m2m_init(&vpu_m2m_ops);
++ vpu->m2m_dev = hantro_get_v4l2_m2m_dev(vpu);
+ if (IS_ERR(vpu->m2m_dev)) {
+ v4l2_err(&vpu->v4l2_dev, "Failed to init mem2mem device\n");
+ ret = PTR_ERR(vpu->m2m_dev);
+@@ -1134,7 +1170,7 @@ static int hantro_probe(struct platform_device *pdev)
+ hantro_remove_enc_func(vpu);
+ err_m2m_rel:
+ media_device_cleanup(&vpu->mdev);
+- v4l2_m2m_release(vpu->m2m_dev);
++ v4l2_m2m_put(vpu->m2m_dev);
+ err_v4l2_unreg:
+ v4l2_device_unregister(&vpu->v4l2_dev);
+ err_clk_unprepare:
+@@ -1157,7 +1193,7 @@ static int hantro_remove(struct platform_device *pdev)
+ hantro_remove_dec_func(vpu);
+ hantro_remove_enc_func(vpu);
+ media_device_cleanup(&vpu->mdev);
+- v4l2_m2m_release(vpu->m2m_dev);
++ v4l2_m2m_put(vpu->m2m_dev);
+ v4l2_device_unregister(&vpu->v4l2_dev);
+ clk_bulk_unprepare(vpu->variant->num_clocks, vpu->clocks);
+ reset_control_assert(vpu->resets);
+diff --git a/drivers/media/platform/verisilicon/imx8m_vpu_hw.c b/drivers/media/platform/verisilicon/imx8m_vpu_hw.c
+index d89c2c3501aa8..80539f7573c18 100644
+--- a/drivers/media/platform/verisilicon/imx8m_vpu_hw.c
++++ b/drivers/media/platform/verisilicon/imx8m_vpu_hw.c
+@@ -359,6 +359,12 @@ const struct hantro_variant imx8mq_vpu_variant = {
+ .num_regs = ARRAY_SIZE(imx8mq_reg_names)
+ };
+
++static const struct of_device_id imx8mq_vpu_shared_resources[] __initconst = {
++ { .compatible = "nxp,imx8mq-vpu-g1", },
++ { .compatible = "nxp,imx8mq-vpu-g2", },
++ { /* sentinel */ }
++};
++
+ const struct hantro_variant imx8mq_vpu_g1_variant = {
+ .dec_fmts = imx8m_vpu_dec_fmts,
+ .num_dec_fmts = ARRAY_SIZE(imx8m_vpu_dec_fmts),
+@@ -372,6 +378,7 @@ const struct hantro_variant imx8mq_vpu_g1_variant = {
+ .num_irqs = ARRAY_SIZE(imx8mq_irqs),
+ .clk_names = imx8mq_g1_clk_names,
+ .num_clocks = ARRAY_SIZE(imx8mq_g1_clk_names),
++ .shared_devices = imx8mq_vpu_shared_resources,
+ };
+
+ const struct hantro_variant imx8mq_vpu_g2_variant = {
+@@ -387,6 +394,7 @@ const struct hantro_variant imx8mq_vpu_g2_variant = {
+ .num_irqs = ARRAY_SIZE(imx8mq_g2_irqs),
+ .clk_names = imx8mq_g2_clk_names,
+ .num_clocks = ARRAY_SIZE(imx8mq_g2_clk_names),
++ .shared_devices = imx8mq_vpu_shared_resources,
+ };
+
+ const struct hantro_variant imx8mm_vpu_g1_variant = {
+--
+2.51.0
+
--- /dev/null
+From 005206fc2bcce218bc07fd8c7b78367a76494669 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 17 Dec 2023 15:29:33 +0100
+Subject: memory: mtk-smi: Convert to platform remove callback returning void
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+
+[ Upstream commit 08c1aeaa45ce0fd18912e92c6705586c8aa5240f ]
+
+The .remove() callback for a platform driver returns an int which makes
+many driver authors wrongly assume it's possible to do error handling by
+returning an error code. However the value returned is ignored (apart
+from emitting a warning) and this typically results in resource leaks.
+
+To improve here there is a quest to make the remove callback return
+void. In the first step of this quest all drivers are converted to
+.remove_new(), which already returns void. Eventually after all drivers
+are converted, .remove_new() will be renamed to .remove().
+
+Trivially convert this driver from always returning zero in the remove
+callback to the void returning variant.
+
+Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+Link: https://lore.kernel.org/r/5c35a33cfdc359842e034ddd2e9358f10e91fa1f.1702822744.git.u.kleine-koenig@pengutronix.de
+Signed-off-by: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+Stable-dep-of: 6cfa038bddd7 ("memory: mtk-smi: fix device leaks on common probe")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/memory/mtk-smi.c | 10 ++++------
+ 1 file changed, 4 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
+index 5a9754442bc75..c9c444d4a64ab 100644
+--- a/drivers/memory/mtk-smi.c
++++ b/drivers/memory/mtk-smi.c
+@@ -566,14 +566,13 @@ static int mtk_smi_larb_probe(struct platform_device *pdev)
+ return ret;
+ }
+
+-static int mtk_smi_larb_remove(struct platform_device *pdev)
++static void mtk_smi_larb_remove(struct platform_device *pdev)
+ {
+ struct mtk_smi_larb *larb = platform_get_drvdata(pdev);
+
+ device_link_remove(&pdev->dev, larb->smi_common_dev);
+ pm_runtime_disable(&pdev->dev);
+ component_del(&pdev->dev, &mtk_smi_larb_component_ops);
+- return 0;
+ }
+
+ static int __maybe_unused mtk_smi_larb_resume(struct device *dev)
+@@ -616,7 +615,7 @@ static const struct dev_pm_ops smi_larb_pm_ops = {
+
+ static struct platform_driver mtk_smi_larb_driver = {
+ .probe = mtk_smi_larb_probe,
+- .remove = mtk_smi_larb_remove,
++ .remove_new = mtk_smi_larb_remove,
+ .driver = {
+ .name = "mtk-smi-larb",
+ .of_match_table = mtk_smi_larb_of_ids,
+@@ -789,14 +788,13 @@ static int mtk_smi_common_probe(struct platform_device *pdev)
+ return 0;
+ }
+
+-static int mtk_smi_common_remove(struct platform_device *pdev)
++static void mtk_smi_common_remove(struct platform_device *pdev)
+ {
+ struct mtk_smi *common = dev_get_drvdata(&pdev->dev);
+
+ if (common->plat->type == MTK_SMI_GEN2_SUB_COMM)
+ device_link_remove(&pdev->dev, common->smi_common_dev);
+ pm_runtime_disable(&pdev->dev);
+- return 0;
+ }
+
+ static int __maybe_unused mtk_smi_common_resume(struct device *dev)
+@@ -836,7 +834,7 @@ static const struct dev_pm_ops smi_common_pm_ops = {
+
+ static struct platform_driver mtk_smi_common_driver = {
+ .probe = mtk_smi_common_probe,
+- .remove = mtk_smi_common_remove,
++ .remove_new = mtk_smi_common_remove,
+ .driver = {
+ .name = "mtk-smi-common",
+ .of_match_table = mtk_smi_common_of_ids,
+--
+2.51.0
+
--- /dev/null
+From d799ad10563e6089faa9788eb0f10718526e04b7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 Nov 2025 17:46:23 +0100
+Subject: memory: mtk-smi: fix device leak on larb probe
+
+From: Johan Hovold <johan@kernel.org>
+
+[ Upstream commit 9dae65913b32d05dbc8ff4b8a6bf04a0e49a8eb6 ]
+
+Make sure to drop the reference taken when looking up the SMI device
+during larb probe on late probe failure (e.g. probe deferral) and on
+driver unbind.
+
+Fixes: cc8bbe1a8312 ("memory: mediatek: Add SMI driver")
+Fixes: 038ae37c510f ("memory: mtk-smi: add missing put_device() call in mtk_smi_device_link_common")
+Cc: stable@vger.kernel.org # 4.6: 038ae37c510f
+Cc: stable@vger.kernel.org # 4.6
+Cc: Yong Wu <yong.wu@mediatek.com>
+Cc: Miaoqian Lin <linmq006@gmail.com>
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Link: https://patch.msgid.link/20251121164624.13685-3-johan@kernel.org
+Signed-off-by: Krzysztof Kozlowski <krzk@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/memory/mtk-smi.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
+index 1326119288c98..95f1bf2c37785 100644
+--- a/drivers/memory/mtk-smi.c
++++ b/drivers/memory/mtk-smi.c
+@@ -574,6 +574,7 @@ static void mtk_smi_larb_remove(struct platform_device *pdev)
+ device_link_remove(&pdev->dev, larb->smi_common_dev);
+ pm_runtime_disable(&pdev->dev);
+ component_del(&pdev->dev, &mtk_smi_larb_component_ops);
++ put_device(larb->smi_common_dev);
+ }
+
+ static int __maybe_unused mtk_smi_larb_resume(struct device *dev)
+--
+2.51.0
+
--- /dev/null
+From e94dd1228f42da21abc38069ff58d702236627f5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 Nov 2025 17:46:22 +0100
+Subject: memory: mtk-smi: fix device leaks on common probe
+
+From: Johan Hovold <johan@kernel.org>
+
+[ Upstream commit 6cfa038bddd710f544076ea2ef7792fc82fbedd6 ]
+
+Make sure to drop the reference taken when looking up the SMI device
+during common probe on late probe failure (e.g. probe deferral) and on
+driver unbind.
+
+Fixes: 47404757702e ("memory: mtk-smi: Add device link for smi-sub-common")
+Fixes: 038ae37c510f ("memory: mtk-smi: add missing put_device() call in mtk_smi_device_link_common")
+Cc: stable@vger.kernel.org # 5.16: 038ae37c510f
+Cc: stable@vger.kernel.org # 5.16
+Cc: Yong Wu <yong.wu@mediatek.com>
+Cc: Miaoqian Lin <linmq006@gmail.com>
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Link: https://patch.msgid.link/20251121164624.13685-2-johan@kernel.org
+Signed-off-by: Krzysztof Kozlowski <krzk@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/memory/mtk-smi.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
+index c9c444d4a64ab..1326119288c98 100644
+--- a/drivers/memory/mtk-smi.c
++++ b/drivers/memory/mtk-smi.c
+@@ -563,6 +563,7 @@ static int mtk_smi_larb_probe(struct platform_device *pdev)
+ err_pm_disable:
+ pm_runtime_disable(dev);
+ device_link_remove(dev, larb->smi_common_dev);
++ put_device(larb->smi_common_dev);
+ return ret;
+ }
+
+@@ -795,6 +796,7 @@ static void mtk_smi_common_remove(struct platform_device *pdev)
+ if (common->plat->type == MTK_SMI_GEN2_SUB_COMM)
+ device_link_remove(&pdev->dev, common->smi_common_dev);
+ pm_runtime_disable(&pdev->dev);
++ put_device(common->smi_common_dev);
+ }
+
+ static int __maybe_unused mtk_smi_common_resume(struct device *dev)
+--
+2.51.0
+
--- /dev/null
+From 87642ce203de5b0a97cd20cc435b716cf5986939 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 23 Nov 2023 17:56:38 +0100
+Subject: mfd: omap-usb-host: Convert to platform remove callback returning
+ void
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+
+[ Upstream commit 418d1e74f8597e0b2d5d0d6e1be8f1f47e68f0a4 ]
+
+The .remove() callback for a platform driver returns an int which makes
+many driver authors wrongly assume it's possible to do error handling by
+returning an error code. However the value returned is ignored (apart
+from emitting a warning) and this typically results in resource leaks.
+
+To improve here there is a quest to make the remove callback return
+void. In the first step of this quest all drivers are converted to
+.remove_new(), which already returns void. Eventually after all drivers
+are converted, .remove_new() will be renamed to .remove().
+
+Trivially convert this driver from always returning zero in the remove
+callback to the void returning variant.
+
+Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+Link: https://lore.kernel.org/r/20231123165627.492259-11-u.kleine-koenig@pengutronix.de
+Signed-off-by: Lee Jones <lee@kernel.org>
+Stable-dep-of: 24804ba508a3 ("mfd: omap-usb-host: Fix OF populate on driver rebind")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mfd/omap-usb-host.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
+index 787d2ae863752..b61fb9933aa85 100644
+--- a/drivers/mfd/omap-usb-host.c
++++ b/drivers/mfd/omap-usb-host.c
+@@ -818,13 +818,12 @@ static int usbhs_omap_remove_child(struct device *dev, void *data)
+ *
+ * Reverses the effect of usbhs_omap_probe().
+ */
+-static int usbhs_omap_remove(struct platform_device *pdev)
++static void usbhs_omap_remove(struct platform_device *pdev)
+ {
+ pm_runtime_disable(&pdev->dev);
+
+ /* remove children */
+ device_for_each_child(&pdev->dev, NULL, usbhs_omap_remove_child);
+- return 0;
+ }
+
+ static const struct dev_pm_ops usbhsomap_dev_pm_ops = {
+@@ -847,7 +846,7 @@ static struct platform_driver usbhs_omap_driver = {
+ .of_match_table = usbhs_omap_dt_ids,
+ },
+ .probe = usbhs_omap_probe,
+- .remove = usbhs_omap_remove,
++ .remove_new = usbhs_omap_remove,
+ };
+
+ MODULE_AUTHOR("Keshava Munegowda <keshava_mgowda@ti.com>");
+--
+2.51.0
+
--- /dev/null
+From 766530df7c68fc88b9f81fd7b7f07dbaf99e586d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 19 Dec 2025 12:07:14 +0100
+Subject: mfd: omap-usb-host: Fix OF populate on driver rebind
+
+From: Johan Hovold <johan@kernel.org>
+
+[ Upstream commit 24804ba508a3e240501c521685a1c4eb9f574f8e ]
+
+Since commit c6e126de43e7 ("of: Keep track of populated platform
+devices") child devices will not be created by of_platform_populate()
+if the devices had previously been deregistered individually so that the
+OF_POPULATED flag is still set in the corresponding OF nodes.
+
+Switch to using of_platform_depopulate() instead of open coding so that
+the child devices are created if the driver is rebound.
+
+Fixes: c6e126de43e7 ("of: Keep track of populated platform devices")
+Cc: stable@vger.kernel.org # 3.16
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Reviewed-by: Andreas Kemnade <andreas@kemnade.info>
+Link: https://patch.msgid.link/20251219110714.23919-1-johan@kernel.org
+Signed-off-by: Lee Jones <lee@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mfd/omap-usb-host.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
+index b61fb9933aa85..936faa0c26e09 100644
+--- a/drivers/mfd/omap-usb-host.c
++++ b/drivers/mfd/omap-usb-host.c
+@@ -822,8 +822,10 @@ static void usbhs_omap_remove(struct platform_device *pdev)
+ {
+ pm_runtime_disable(&pdev->dev);
+
+- /* remove children */
+- device_for_each_child(&pdev->dev, NULL, usbhs_omap_remove_child);
++ if (pdev->dev.of_node)
++ of_platform_depopulate(&pdev->dev);
++ else
++ device_for_each_child(&pdev->dev, NULL, usbhs_omap_remove_child);
+ }
+
+ static const struct dev_pm_ops usbhsomap_dev_pm_ops = {
+--
+2.51.0
+
--- /dev/null
+From 337ebc31db416ea367530dc04ba5ab7ae9b0d72a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 23 Nov 2023 17:56:41 +0100
+Subject: mfd: qcom-pm8xxx: Convert to platform remove callback returning void
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+
+[ Upstream commit 19ea1d3953017518d85db35b69b5aea9bc64d630 ]
+
+The .remove() callback for a platform driver returns an int which makes
+many driver authors wrongly assume it's possible to do error handling by
+returning an error code. However the value returned is ignored (apart
+from emitting a warning) and this typically results in resource leaks.
+
+To improve here there is a quest to make the remove callback return
+void. In the first step of this quest all drivers are converted to
+.remove_new(), which already returns void. Eventually after all drivers
+are converted, .remove_new() will be renamed to .remove().
+
+Trivially convert this driver from always returning zero in the remove
+callback to the void returning variant.
+
+Reviewed-by: Konrad Dybcio <konrad.dybcio@linaro.org>
+Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+Link: https://lore.kernel.org/r/20231123165627.492259-14-u.kleine-koenig@pengutronix.de
+Signed-off-by: Lee Jones <lee@kernel.org>
+Stable-dep-of: 27a8acea47a9 ("mfd: qcom-pm8xxx: Fix OF populate on driver rebind")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mfd/qcom-pm8xxx.c | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/mfd/qcom-pm8xxx.c b/drivers/mfd/qcom-pm8xxx.c
+index 2f2734ba5273e..8831448371290 100644
+--- a/drivers/mfd/qcom-pm8xxx.c
++++ b/drivers/mfd/qcom-pm8xxx.c
+@@ -587,19 +587,17 @@ static int pm8xxx_remove_child(struct device *dev, void *unused)
+ return 0;
+ }
+
+-static int pm8xxx_remove(struct platform_device *pdev)
++static void pm8xxx_remove(struct platform_device *pdev)
+ {
+ struct pm_irq_chip *chip = platform_get_drvdata(pdev);
+
+ device_for_each_child(&pdev->dev, NULL, pm8xxx_remove_child);
+ irq_domain_remove(chip->irqdomain);
+-
+- return 0;
+ }
+
+ static struct platform_driver pm8xxx_driver = {
+ .probe = pm8xxx_probe,
+- .remove = pm8xxx_remove,
++ .remove_new = pm8xxx_remove,
+ .driver = {
+ .name = "pm8xxx-core",
+ .of_match_table = pm8xxx_id_table,
+--
+2.51.0
+
--- /dev/null
+From a5fc19dddf8645364ed364002044761c3b1d683c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 19 Dec 2025 12:09:47 +0100
+Subject: mfd: qcom-pm8xxx: Fix OF populate on driver rebind
+
+From: Johan Hovold <johan@kernel.org>
+
+[ Upstream commit 27a8acea47a93fea6ad0e2df4c20a9b51490e4d9 ]
+
+Since commit c6e126de43e7 ("of: Keep track of populated platform
+devices") child devices will not be created by of_platform_populate()
+if the devices had previously been deregistered individually so that the
+OF_POPULATED flag is still set in the corresponding OF nodes.
+
+Switch to using of_platform_depopulate() instead of open coding so that
+the child devices are created if the driver is rebound.
+
+Fixes: c6e126de43e7 ("of: Keep track of populated platform devices")
+Cc: stable@vger.kernel.org # 3.16
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@oss.qualcomm.com>
+Reviewed-by: Konrad Dybcio <konrad.dybcio@oss.qualcomm.com>
+Link: https://patch.msgid.link/20251219110947.24101-1-johan@kernel.org
+Signed-off-by: Lee Jones <lee@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mfd/qcom-pm8xxx.c | 8 +-------
+ 1 file changed, 1 insertion(+), 7 deletions(-)
+
+diff --git a/drivers/mfd/qcom-pm8xxx.c b/drivers/mfd/qcom-pm8xxx.c
+index 8831448371290..cbcbff3c95ecb 100644
+--- a/drivers/mfd/qcom-pm8xxx.c
++++ b/drivers/mfd/qcom-pm8xxx.c
+@@ -581,17 +581,11 @@ static int pm8xxx_probe(struct platform_device *pdev)
+ return rc;
+ }
+
+-static int pm8xxx_remove_child(struct device *dev, void *unused)
+-{
+- platform_device_unregister(to_platform_device(dev));
+- return 0;
+-}
+-
+ static void pm8xxx_remove(struct platform_device *pdev)
+ {
+ struct pm_irq_chip *chip = platform_get_drvdata(pdev);
+
+- device_for_each_child(&pdev->dev, NULL, pm8xxx_remove_child);
++ of_platform_depopulate(&pdev->dev);
+ irq_domain_remove(chip->irqdomain);
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 9cbe2b5535c151c6546e7ffd315a28bee47ff6cb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 12 Feb 2026 20:55:09 -0800
+Subject: net: arcnet: com20020-pci: fix support for 2.5Mbit cards
+
+From: Ethan Nelson-Moore <enelsonmoore@gmail.com>
+
+[ Upstream commit c7d9be66b71af490446127c6ffcb66d6bb71b8b9 ]
+
+Commit 8c14f9c70327 ("ARCNET: add com20020 PCI IDs with metadata")
+converted the com20020-pci driver to use a card info structure instead
+of a single flag mask in driver_data. However, it failed to take into
+account that in the original code, driver_data of 0 indicates a card
+with no special flags, not a card that should not have any card info
+structure. This introduced a null pointer dereference when cards with
+no flags were probed.
+
+Commit bd6f1fd5d33d ("net: arcnet: com20020: Fix null-ptr-deref in
+com20020pci_probe()") then papered over this issue by rejecting cards
+with no driver_data instead of resolving the problem at its source.
+
+Fix the original issue by introducing a new card info structure for
+2.5Mbit cards that does not set any flags and using it if no
+driver_data is present.
+
+Fixes: 8c14f9c70327 ("ARCNET: add com20020 PCI IDs with metadata")
+Fixes: bd6f1fd5d33d ("net: arcnet: com20020: Fix null-ptr-deref in com20020pci_probe()")
+Cc: stable@vger.kernel.org
+Reviewed-by: Simon Horman <horms@kernel.org>
+Signed-off-by: Ethan Nelson-Moore <enelsonmoore@gmail.com>
+Link: https://patch.msgid.link/20260213045510.32368-1-enelsonmoore@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/arcnet/com20020-pci.c | 16 +++++++++++++++-
+ 1 file changed, 15 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
+index e7db6a4e4dc9d..e9ee32b091a41 100644
+--- a/drivers/net/arcnet/com20020-pci.c
++++ b/drivers/net/arcnet/com20020-pci.c
+@@ -114,6 +114,8 @@ static const struct attribute_group com20020_state_group = {
+ .attrs = com20020_state_attrs,
+ };
+
++static struct com20020_pci_card_info card_info_2p5mbit;
++
+ static void com20020pci_remove(struct pci_dev *pdev);
+
+ static int com20020pci_probe(struct pci_dev *pdev,
+@@ -139,7 +141,7 @@ static int com20020pci_probe(struct pci_dev *pdev,
+
+ ci = (struct com20020_pci_card_info *)id->driver_data;
+ if (!ci)
+- return -EINVAL;
++ ci = &card_info_2p5mbit;
+
+ priv->ci = ci;
+ mm = &ci->misc_map;
+@@ -346,6 +348,18 @@ static struct com20020_pci_card_info card_info_5mbit = {
+ .flags = ARC_IS_5MBIT,
+ };
+
++static struct com20020_pci_card_info card_info_2p5mbit = {
++ .name = "ARC-PCI",
++ .devcount = 1,
++ .chan_map_tbl = {
++ {
++ .bar = 2,
++ .offset = 0x00,
++ .size = 0x08,
++ },
++ },
++};
++
+ static struct com20020_pci_card_info card_info_sohard = {
+ .name = "SOHARD SH ARC-PCI",
+ .devcount = 1,
+--
+2.51.0
+
--- /dev/null
+From 357540cd40876a18f12b6308b0c588c0d24ee80e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 15 Jan 2021 17:18:36 -0600
+Subject: PCI: Fix printk field formatting
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Bjorn Helgaas <bhelgaas@google.com>
+
+[ Upstream commit 62008578b73f16e274070a232b939ba5933bb8ba ]
+
+Previously we used "%#08x" to print a 32-bit value. This fills an
+8-character field with "0x...", but of course many 32-bit values require a
+10-character field "0x12345678" for this format. Fix the formats to avoid
+confusion.
+
+Link: https://lore.kernel.org/r/20230824193712.542167-5-helgaas@kernel.org
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+Stable-dep-of: 11721c45a826 ("PCI: Use resource_set_range() that correctly sets ->end")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pci/setup-res.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
+index 967f9a7589239..ceaa69491f5ef 100644
+--- a/drivers/pci/setup-res.c
++++ b/drivers/pci/setup-res.c
+@@ -104,7 +104,7 @@ static void pci_std_update_resource(struct pci_dev *dev, int resno)
+ pci_read_config_dword(dev, reg, &check);
+
+ if ((new ^ check) & mask) {
+- pci_err(dev, "BAR %d: error updating (%#08x != %#08x)\n",
++ pci_err(dev, "BAR %d: error updating (%#010x != %#010x)\n",
+ resno, new, check);
+ }
+
+@@ -113,7 +113,7 @@ static void pci_std_update_resource(struct pci_dev *dev, int resno)
+ pci_write_config_dword(dev, reg + 4, new);
+ pci_read_config_dword(dev, reg + 4, &check);
+ if (check != new) {
+- pci_err(dev, "BAR %d: error updating (high %#08x != %#08x)\n",
++ pci_err(dev, "BAR %d: error updating (high %#010x != %#010x)\n",
+ resno, new, check);
+ }
+ }
+--
+2.51.0
+
--- /dev/null
+From bad09e759b3b9aa49088cae0065a1bb568ddb1af Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 30 Mar 2023 19:24:30 +0300
+Subject: PCI: Introduce pci_dev_for_each_resource()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Mika Westerberg <mika.westerberg@linux.intel.com>
+
+[ Upstream commit 09cc900632400079619e9154604fd299c2cc9a5a ]
+
+Instead of open-coding it everywhere introduce a tiny helper that can be
+used to iterate over each resource of a PCI device, and convert the most
+obvious users into it.
+
+While at it drop doubled empty line before pdev_sort_resources().
+
+No functional changes intended.
+
+Suggested-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Link: https://lore.kernel.org/r/20230330162434.35055-4-andriy.shevchenko@linux.intel.com
+Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Reviewed-by: Krzysztof Wilczyński <kw@linux.com>
+Stable-dep-of: 11721c45a826 ("PCI: Use resource_set_range() that correctly sets ->end")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .clang-format | 1 +
+ arch/alpha/kernel/pci.c | 5 ++--
+ arch/arm/kernel/bios32.c | 16 ++++++-------
+ arch/arm/mach-dove/pcie.c | 10 ++++----
+ arch/arm/mach-mv78xx0/pcie.c | 10 ++++----
+ arch/arm/mach-orion5x/pci.c | 10 ++++----
+ arch/mips/pci/ops-bcm63xx.c | 8 +++----
+ arch/mips/pci/pci-legacy.c | 3 +--
+ arch/powerpc/kernel/pci-common.c | 21 ++++++++--------
+ arch/powerpc/platforms/4xx/pci.c | 8 +++----
+ arch/powerpc/platforms/52xx/mpc52xx_pci.c | 5 ++--
+ arch/powerpc/platforms/pseries/pci.c | 16 ++++++-------
+ arch/sh/drivers/pci/pcie-sh7786.c | 10 ++++----
+ arch/sparc/kernel/leon_pci.c | 5 ++--
+ arch/sparc/kernel/pci.c | 10 ++++----
+ arch/sparc/kernel/pcic.c | 5 ++--
+ drivers/pci/remove.c | 5 ++--
+ drivers/pci/setup-bus.c | 27 ++++++++-------------
+ drivers/pci/setup-res.c | 4 +---
+ drivers/pci/vgaarb.c | 17 ++++---------
+ drivers/pci/xen-pcifront.c | 4 +---
+ drivers/pnp/quirks.c | 29 ++++++++---------------
+ include/linux/pci.h | 14 +++++++++++
+ 23 files changed, 111 insertions(+), 132 deletions(-)
+
+diff --git a/.clang-format b/.clang-format
+index 8d01225bfcb7d..d4e2dcb76609a 100644
+--- a/.clang-format
++++ b/.clang-format
+@@ -516,6 +516,7 @@ ForEachMacros:
+ - 'of_property_for_each_string'
+ - 'of_property_for_each_u32'
+ - 'pci_bus_for_each_resource'
++ - 'pci_dev_for_each_resource'
+ - 'pci_doe_for_each_off'
+ - 'pcl_for_each_chunk'
+ - 'pcl_for_each_segment'
+diff --git a/arch/alpha/kernel/pci.c b/arch/alpha/kernel/pci.c
+index 64fbfb0763b29..4458eb7f44f0c 100644
+--- a/arch/alpha/kernel/pci.c
++++ b/arch/alpha/kernel/pci.c
+@@ -288,11 +288,10 @@ pcibios_claim_one_bus(struct pci_bus *b)
+ struct pci_bus *child_bus;
+
+ list_for_each_entry(dev, &b->devices, bus_list) {
++ struct resource *r;
+ int i;
+
+- for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+- struct resource *r = &dev->resource[i];
+-
++ pci_dev_for_each_resource(dev, r, i) {
+ if (r->parent || !r->start || !r->flags)
+ continue;
+ if (pci_has_flag(PCI_PROBE_ONLY) ||
+diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
+index e7ef2b5bea9c2..d334c7fb672b7 100644
+--- a/arch/arm/kernel/bios32.c
++++ b/arch/arm/kernel/bios32.c
+@@ -142,15 +142,15 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_WINBOND2, PCI_DEVICE_ID_WINBOND2_89C940F,
+ */
+ static void pci_fixup_dec21285(struct pci_dev *dev)
+ {
+- int i;
+-
+ if (dev->devfn == 0) {
++ struct resource *r;
++
+ dev->class &= 0xff;
+ dev->class |= PCI_CLASS_BRIDGE_HOST << 8;
+- for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+- dev->resource[i].start = 0;
+- dev->resource[i].end = 0;
+- dev->resource[i].flags = 0;
++ pci_dev_for_each_resource(dev, r) {
++ r->start = 0;
++ r->end = 0;
++ r->flags = 0;
+ }
+ }
+ }
+@@ -162,13 +162,11 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21285, pci_fixup_d
+ static void pci_fixup_ide_bases(struct pci_dev *dev)
+ {
+ struct resource *r;
+- int i;
+
+ if ((dev->class >> 8) != PCI_CLASS_STORAGE_IDE)
+ return;
+
+- for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+- r = dev->resource + i;
++ pci_dev_for_each_resource(dev, r) {
+ if ((r->start & ~0x80) == 0x374) {
+ r->start |= 2;
+ r->end = r->start;
+diff --git a/arch/arm/mach-dove/pcie.c b/arch/arm/mach-dove/pcie.c
+index 754ca381f600a..3044b7e038901 100644
+--- a/arch/arm/mach-dove/pcie.c
++++ b/arch/arm/mach-dove/pcie.c
+@@ -142,14 +142,14 @@ static struct pci_ops pcie_ops = {
+ static void rc_pci_fixup(struct pci_dev *dev)
+ {
+ if (dev->bus->parent == NULL && dev->devfn == 0) {
+- int i;
++ struct resource *r;
+
+ dev->class &= 0xff;
+ dev->class |= PCI_CLASS_BRIDGE_HOST << 8;
+- for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+- dev->resource[i].start = 0;
+- dev->resource[i].end = 0;
+- dev->resource[i].flags = 0;
++ pci_dev_for_each_resource(dev, r) {
++ r->start = 0;
++ r->end = 0;
++ r->flags = 0;
+ }
+ }
+ }
+diff --git a/arch/arm/mach-mv78xx0/pcie.c b/arch/arm/mach-mv78xx0/pcie.c
+index 6190f538a124f..0ebc909ea273f 100644
+--- a/arch/arm/mach-mv78xx0/pcie.c
++++ b/arch/arm/mach-mv78xx0/pcie.c
+@@ -186,14 +186,14 @@ static struct pci_ops pcie_ops = {
+ static void rc_pci_fixup(struct pci_dev *dev)
+ {
+ if (dev->bus->parent == NULL && dev->devfn == 0) {
+- int i;
++ struct resource *r;
+
+ dev->class &= 0xff;
+ dev->class |= PCI_CLASS_BRIDGE_HOST << 8;
+- for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+- dev->resource[i].start = 0;
+- dev->resource[i].end = 0;
+- dev->resource[i].flags = 0;
++ pci_dev_for_each_resource(dev, r) {
++ r->start = 0;
++ r->end = 0;
++ r->flags = 0;
+ }
+ }
+ }
+diff --git a/arch/arm/mach-orion5x/pci.c b/arch/arm/mach-orion5x/pci.c
+index 888fdc9099c52..3313bc5a63ea6 100644
+--- a/arch/arm/mach-orion5x/pci.c
++++ b/arch/arm/mach-orion5x/pci.c
+@@ -522,14 +522,14 @@ static int __init pci_setup(struct pci_sys_data *sys)
+ static void rc_pci_fixup(struct pci_dev *dev)
+ {
+ if (dev->bus->parent == NULL && dev->devfn == 0) {
+- int i;
++ struct resource *r;
+
+ dev->class &= 0xff;
+ dev->class |= PCI_CLASS_BRIDGE_HOST << 8;
+- for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+- dev->resource[i].start = 0;
+- dev->resource[i].end = 0;
+- dev->resource[i].flags = 0;
++ pci_dev_for_each_resource(dev, r) {
++ r->start = 0;
++ r->end = 0;
++ r->flags = 0;
+ }
+ }
+ }
+diff --git a/arch/mips/pci/ops-bcm63xx.c b/arch/mips/pci/ops-bcm63xx.c
+index dc6dc2741272e..b0ea023c47c02 100644
+--- a/arch/mips/pci/ops-bcm63xx.c
++++ b/arch/mips/pci/ops-bcm63xx.c
+@@ -413,18 +413,18 @@ struct pci_ops bcm63xx_cb_ops = {
+ static void bcm63xx_fixup(struct pci_dev *dev)
+ {
+ static int io_window = -1;
+- int i, found, new_io_window;
++ int found, new_io_window;
++ struct resource *r;
+ u32 val;
+
+ /* look for any io resource */
+ found = 0;
+- for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+- if (pci_resource_flags(dev, i) & IORESOURCE_IO) {
++ pci_dev_for_each_resource(dev, r) {
++ if (resource_type(r) == IORESOURCE_IO) {
+ found = 1;
+ break;
+ }
+ }
+-
+ if (!found)
+ return;
+
+diff --git a/arch/mips/pci/pci-legacy.c b/arch/mips/pci/pci-legacy.c
+index 468722c8a5c61..ec2567f8efd83 100644
+--- a/arch/mips/pci/pci-legacy.c
++++ b/arch/mips/pci/pci-legacy.c
+@@ -249,12 +249,11 @@ static int pcibios_enable_resources(struct pci_dev *dev, int mask)
+
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ old_cmd = cmd;
+- for (idx = 0; idx < PCI_NUM_RESOURCES; idx++) {
++ pci_dev_for_each_resource(dev, r, idx) {
+ /* Only set up the requested stuff */
+ if (!(mask & (1<<idx)))
+ continue;
+
+- r = &dev->resource[idx];
+ if (!(r->flags & (IORESOURCE_IO | IORESOURCE_MEM)))
+ continue;
+ if ((idx == PCI_ROM_RESOURCE) &&
+diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
+index d67cf79bf5d03..e88d7c9feeec3 100644
+--- a/arch/powerpc/kernel/pci-common.c
++++ b/arch/powerpc/kernel/pci-common.c
+@@ -880,6 +880,7 @@ int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
+ static void pcibios_fixup_resources(struct pci_dev *dev)
+ {
+ struct pci_controller *hose = pci_bus_to_host(dev->bus);
++ struct resource *res;
+ int i;
+
+ if (!hose) {
+@@ -891,9 +892,9 @@ static void pcibios_fixup_resources(struct pci_dev *dev)
+ if (dev->is_virtfn)
+ return;
+
+- for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+- struct resource *res = dev->resource + i;
++ pci_dev_for_each_resource(dev, res, i) {
+ struct pci_bus_region reg;
++
+ if (!res->flags)
+ continue;
+
+@@ -1452,11 +1453,10 @@ void pcibios_claim_one_bus(struct pci_bus *bus)
+ struct pci_bus *child_bus;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
++ struct resource *r;
+ int i;
+
+- for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+- struct resource *r = &dev->resource[i];
+-
++ pci_dev_for_each_resource(dev, r, i) {
+ if (r->parent || !r->start || !r->flags)
+ continue;
+
+@@ -1705,19 +1705,20 @@ EXPORT_SYMBOL_GPL(pcibios_scan_phb);
+
+ static void fixup_hide_host_resource_fsl(struct pci_dev *dev)
+ {
+- int i, class = dev->class >> 8;
++ int class = dev->class >> 8;
+ /* When configured as agent, programming interface = 1 */
+ int prog_if = dev->class & 0xf;
++ struct resource *r;
+
+ if ((class == PCI_CLASS_PROCESSOR_POWERPC ||
+ class == PCI_CLASS_BRIDGE_OTHER) &&
+ (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) &&
+ (prog_if == 0) &&
+ (dev->bus->parent == NULL)) {
+- for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+- dev->resource[i].start = 0;
+- dev->resource[i].end = 0;
+- dev->resource[i].flags = 0;
++ pci_dev_for_each_resource(dev, r) {
++ r->start = 0;
++ r->end = 0;
++ r->flags = 0;
+ }
+ }
+ }
+diff --git a/arch/powerpc/platforms/4xx/pci.c b/arch/powerpc/platforms/4xx/pci.c
+index ca5dd7a5842ac..07dcc2b8007f9 100644
+--- a/arch/powerpc/platforms/4xx/pci.c
++++ b/arch/powerpc/platforms/4xx/pci.c
+@@ -57,7 +57,7 @@ static inline int ppc440spe_revA(void)
+ static void fixup_ppc4xx_pci_bridge(struct pci_dev *dev)
+ {
+ struct pci_controller *hose;
+- int i;
++ struct resource *r;
+
+ if (dev->devfn != 0 || dev->bus->self != NULL)
+ return;
+@@ -79,9 +79,9 @@ static void fixup_ppc4xx_pci_bridge(struct pci_dev *dev)
+ /* Hide the PCI host BARs from the kernel as their content doesn't
+ * fit well in the resource management
+ */
+- for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+- dev->resource[i].start = dev->resource[i].end = 0;
+- dev->resource[i].flags = 0;
++ pci_dev_for_each_resource(dev, r) {
++ r->start = r->end = 0;
++ r->flags = 0;
+ }
+
+ printk(KERN_INFO "PCI: Hiding 4xx host bridge resources %s\n",
+diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pci.c b/arch/powerpc/platforms/52xx/mpc52xx_pci.c
+index 859e2818c43d5..0ca4401ba7819 100644
+--- a/arch/powerpc/platforms/52xx/mpc52xx_pci.c
++++ b/arch/powerpc/platforms/52xx/mpc52xx_pci.c
+@@ -327,14 +327,13 @@ mpc52xx_pci_setup(struct pci_controller *hose,
+ static void
+ mpc52xx_pci_fixup_resources(struct pci_dev *dev)
+ {
+- int i;
++ struct resource *res;
+
+ pr_debug("%s() %.4x:%.4x\n", __func__, dev->vendor, dev->device);
+
+ /* We don't rely on boot loader for PCI and resets all
+ devices */
+- for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+- struct resource *res = &dev->resource[i];
++ pci_dev_for_each_resource(dev, res) {
+ if (res->end > res->start) { /* Only valid resources */
+ res->end -= res->start;
+ res->start = 0;
+diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c
+index 6e671c3809ecf..f6cd534797864 100644
+--- a/arch/powerpc/platforms/pseries/pci.c
++++ b/arch/powerpc/platforms/pseries/pci.c
+@@ -240,7 +240,7 @@ void __init pSeries_final_fixup(void)
+ */
+ static void fixup_winbond_82c105(struct pci_dev* dev)
+ {
+- int i;
++ struct resource *r;
+ unsigned int reg;
+
+ if (!machine_is(pseries))
+@@ -251,14 +251,14 @@ static void fixup_winbond_82c105(struct pci_dev* dev)
+ /* Enable LEGIRQ to use INTC instead of ISA interrupts */
+ pci_write_config_dword(dev, 0x40, reg | (1<<11));
+
+- for (i = 0; i < DEVICE_COUNT_RESOURCE; ++i) {
++ pci_dev_for_each_resource(dev, r) {
+ /* zap the 2nd function of the winbond chip */
+- if (dev->resource[i].flags & IORESOURCE_IO
+- && dev->bus->number == 0 && dev->devfn == 0x81)
+- dev->resource[i].flags &= ~IORESOURCE_IO;
+- if (dev->resource[i].start == 0 && dev->resource[i].end) {
+- dev->resource[i].flags = 0;
+- dev->resource[i].end = 0;
++ if (dev->bus->number == 0 && dev->devfn == 0x81 &&
++ r->flags & IORESOURCE_IO)
++ r->flags &= ~IORESOURCE_IO;
++ if (r->start == 0 && r->end) {
++ r->flags = 0;
++ r->end = 0;
+ }
+ }
+ }
+diff --git a/arch/sh/drivers/pci/pcie-sh7786.c b/arch/sh/drivers/pci/pcie-sh7786.c
+index b0c2a5238d049..4f5e49f10805e 100644
+--- a/arch/sh/drivers/pci/pcie-sh7786.c
++++ b/arch/sh/drivers/pci/pcie-sh7786.c
+@@ -140,12 +140,12 @@ static void sh7786_pci_fixup(struct pci_dev *dev)
+ * Prevent enumeration of root complex resources.
+ */
+ if (pci_is_root_bus(dev->bus) && dev->devfn == 0) {
+- int i;
++ struct resource *r;
+
+- for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+- dev->resource[i].start = 0;
+- dev->resource[i].end = 0;
+- dev->resource[i].flags = 0;
++ pci_dev_for_each_resource(dev, r) {
++ r->start = 0;
++ r->end = 0;
++ r->flags = 0;
+ }
+ }
+ }
+diff --git a/arch/sparc/kernel/leon_pci.c b/arch/sparc/kernel/leon_pci.c
+index 3a73bc466f95d..8de6646e9ce85 100644
+--- a/arch/sparc/kernel/leon_pci.c
++++ b/arch/sparc/kernel/leon_pci.c
+@@ -63,15 +63,14 @@ void leon_pci_init(struct platform_device *ofdev, struct leon_pci_info *info)
+
+ int pcibios_enable_device(struct pci_dev *dev, int mask)
+ {
++ struct resource *res;
+ u16 cmd, oldcmd;
+ int i;
+
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ oldcmd = cmd;
+
+- for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+- struct resource *res = &dev->resource[i];
+-
++ pci_dev_for_each_resource(dev, res, i) {
+ /* Only set up the requested stuff */
+ if (!(mask & (1<<i)))
+ continue;
+diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
+index 5637b37ba9114..f66005ce4cb56 100644
+--- a/arch/sparc/kernel/pci.c
++++ b/arch/sparc/kernel/pci.c
+@@ -664,11 +664,10 @@ static void pci_claim_bus_resources(struct pci_bus *bus)
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
++ struct resource *r;
+ int i;
+
+- for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+- struct resource *r = &dev->resource[i];
+-
++ pci_dev_for_each_resource(dev, r, i) {
+ if (r->parent || !r->start || !r->flags)
+ continue;
+
+@@ -725,15 +724,14 @@ struct pci_bus *pci_scan_one_pbm(struct pci_pbm_info *pbm,
+
+ int pcibios_enable_device(struct pci_dev *dev, int mask)
+ {
++ struct resource *res;
+ u16 cmd, oldcmd;
+ int i;
+
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ oldcmd = cmd;
+
+- for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+- struct resource *res = &dev->resource[i];
+-
++ pci_dev_for_each_resource(dev, res, i) {
+ /* Only set up the requested stuff */
+ if (!(mask & (1<<i)))
+ continue;
+diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c
+index ee4c9a9a171cc..25fe0a0617325 100644
+--- a/arch/sparc/kernel/pcic.c
++++ b/arch/sparc/kernel/pcic.c
+@@ -643,15 +643,14 @@ void pcibios_fixup_bus(struct pci_bus *bus)
+
+ int pcibios_enable_device(struct pci_dev *dev, int mask)
+ {
++ struct resource *res;
+ u16 cmd, oldcmd;
+ int i;
+
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ oldcmd = cmd;
+
+- for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+- struct resource *res = &dev->resource[i];
+-
++ pci_dev_for_each_resource(dev, res, i) {
+ /* Only set up the requested stuff */
+ if (!(mask & (1<<i)))
+ continue;
+diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
+index 22d39e12b236a..30a787d45d2e5 100644
+--- a/drivers/pci/remove.c
++++ b/drivers/pci/remove.c
+@@ -5,10 +5,9 @@
+
+ static void pci_free_resources(struct pci_dev *dev)
+ {
+- int i;
++ struct resource *res;
+
+- for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+- struct resource *res = dev->resource + i;
++ pci_dev_for_each_resource(dev, res) {
+ if (res->parent)
+ release_resource(res);
+ }
+diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
+index 3ce68adda9b7c..05cebc39f7642 100644
+--- a/drivers/pci/setup-bus.c
++++ b/drivers/pci/setup-bus.c
+@@ -124,20 +124,17 @@ static resource_size_t get_res_add_align(struct list_head *head,
+ return dev_res ? dev_res->min_align : 0;
+ }
+
+-
+ /* Sort resources by alignment */
+ static void pdev_sort_resources(struct pci_dev *dev, struct list_head *head)
+ {
++ struct resource *r;
+ int i;
+
+- for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+- struct resource *r;
++ pci_dev_for_each_resource(dev, r, i) {
+ struct pci_dev_resource *dev_res, *tmp;
+ resource_size_t r_align;
+ struct list_head *n;
+
+- r = &dev->resource[i];
+-
+ if (r->flags & IORESOURCE_PCI_FIXED)
+ continue;
+
+@@ -891,10 +888,9 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
+
+ min_align = window_alignment(bus, IORESOURCE_IO);
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+- int i;
++ struct resource *r;
+
+- for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+- struct resource *r = &dev->resource[i];
++ pci_dev_for_each_resource(dev, r) {
+ unsigned long r_size;
+
+ if (r->parent || !(r->flags & IORESOURCE_IO))
+@@ -1010,10 +1006,10 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
+ size = 0;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
++ struct resource *r;
+ int i;
+
+- for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+- struct resource *r = &dev->resource[i];
++ pci_dev_for_each_resource(dev, r, i) {
+ resource_size_t r_size;
+
+ if (r->parent || (r->flags & IORESOURCE_PCI_FIXED) ||
+@@ -1354,11 +1350,10 @@ static void assign_fixed_resource_on_bus(struct pci_bus *b, struct resource *r)
+ */
+ static void pdev_assign_fixed_resources(struct pci_dev *dev)
+ {
+- int i;
++ struct resource *r;
+
+- for (i = 0; i < PCI_NUM_RESOURCES; i++) {
++ pci_dev_for_each_resource(dev, r) {
+ struct pci_bus *b;
+- struct resource *r = &dev->resource[i];
+
+ if (r->parent || !(r->flags & IORESOURCE_PCI_FIXED) ||
+ !(r->flags & (IORESOURCE_IO | IORESOURCE_MEM)))
+@@ -1791,11 +1786,9 @@ static void remove_dev_resources(struct pci_dev *dev, struct resource *io,
+ struct resource *mmio,
+ struct resource *mmio_pref)
+ {
+- int i;
+-
+- for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+- struct resource *res = &dev->resource[i];
++ struct resource *res;
+
++ pci_dev_for_each_resource(dev, res) {
+ if (resource_type(res) == IORESOURCE_IO) {
+ remove_dev_resource(io, dev, res);
+ } else if (resource_type(res) == IORESOURCE_MEM) {
+diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
+index b492e67c3d871..967f9a7589239 100644
+--- a/drivers/pci/setup-res.c
++++ b/drivers/pci/setup-res.c
+@@ -484,12 +484,10 @@ int pci_enable_resources(struct pci_dev *dev, int mask)
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ old_cmd = cmd;
+
+- for (i = 0; i < PCI_NUM_RESOURCES; i++) {
++ pci_dev_for_each_resource(dev, r, i) {
+ if (!(mask & (1 << i)))
+ continue;
+
+- r = &dev->resource[i];
+-
+ if (!(r->flags & (IORESOURCE_IO | IORESOURCE_MEM)))
+ continue;
+ if ((i == PCI_ROM_RESOURCE) &&
+diff --git a/drivers/pci/vgaarb.c b/drivers/pci/vgaarb.c
+index f80b6ec88dc30..5a696078b382b 100644
+--- a/drivers/pci/vgaarb.c
++++ b/drivers/pci/vgaarb.c
+@@ -548,10 +548,8 @@ static bool vga_is_firmware_default(struct pci_dev *pdev)
+ #if defined(CONFIG_X86) || defined(CONFIG_IA64)
+ u64 base = screen_info.lfb_base;
+ u64 size = screen_info.lfb_size;
++ struct resource *r;
+ u64 limit;
+- resource_size_t start, end;
+- unsigned long flags;
+- int i;
+
+ /* Select the device owning the boot framebuffer if there is one */
+
+@@ -561,19 +559,14 @@ static bool vga_is_firmware_default(struct pci_dev *pdev)
+ limit = base + size;
+
+ /* Does firmware framebuffer belong to us? */
+- for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+- flags = pci_resource_flags(pdev, i);
+-
+- if ((flags & IORESOURCE_MEM) == 0)
++ pci_dev_for_each_resource(pdev, r) {
++ if (resource_type(r) != IORESOURCE_MEM)
+ continue;
+
+- start = pci_resource_start(pdev, i);
+- end = pci_resource_end(pdev, i);
+-
+- if (!start || !end)
++ if (!r->start || !r->end)
+ continue;
+
+- if (base < start || limit >= end)
++ if (base < r->start || limit >= r->end)
+ continue;
+
+ return true;
+diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
+index fcd029ca2eb18..83c0ab50676df 100644
+--- a/drivers/pci/xen-pcifront.c
++++ b/drivers/pci/xen-pcifront.c
+@@ -390,9 +390,7 @@ static int pcifront_claim_resource(struct pci_dev *dev, void *data)
+ int i;
+ struct resource *r;
+
+- for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+- r = &dev->resource[i];
+-
++ pci_dev_for_each_resource(dev, r, i) {
+ if (!r->parent && r->start && r->flags) {
+ dev_info(&pdev->xdev->dev, "claiming resource %s/%d\n",
+ pci_name(dev), i);
+diff --git a/drivers/pnp/quirks.c b/drivers/pnp/quirks.c
+index ac98b9919029c..6085a1471de21 100644
+--- a/drivers/pnp/quirks.c
++++ b/drivers/pnp/quirks.c
+@@ -229,8 +229,7 @@ static void quirk_ad1815_mpu_resources(struct pnp_dev *dev)
+ static void quirk_system_pci_resources(struct pnp_dev *dev)
+ {
+ struct pci_dev *pdev = NULL;
+- struct resource *res;
+- resource_size_t pnp_start, pnp_end, pci_start, pci_end;
++ struct resource *res, *r;
+ int i, j;
+
+ /*
+@@ -243,32 +242,26 @@ static void quirk_system_pci_resources(struct pnp_dev *dev)
+ * so they won't be claimed by the PNP system driver.
+ */
+ for_each_pci_dev(pdev) {
+- for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+- unsigned long flags, type;
++ pci_dev_for_each_resource(pdev, r, i) {
++ unsigned long type = resource_type(r);
+
+- flags = pci_resource_flags(pdev, i);
+- type = flags & (IORESOURCE_IO | IORESOURCE_MEM);
+- if (!type || pci_resource_len(pdev, i) == 0)
++ if (!(type == IORESOURCE_IO || type == IORESOURCE_MEM) ||
++ resource_size(r) == 0)
+ continue;
+
+- if (flags & IORESOURCE_UNSET)
++ if (r->flags & IORESOURCE_UNSET)
+ continue;
+
+- pci_start = pci_resource_start(pdev, i);
+- pci_end = pci_resource_end(pdev, i);
+ for (j = 0;
+ (res = pnp_get_resource(dev, type, j)); j++) {
+ if (res->start == 0 && res->end == 0)
+ continue;
+
+- pnp_start = res->start;
+- pnp_end = res->end;
+-
+ /*
+ * If the PNP region doesn't overlap the PCI
+ * region at all, there's no problem.
+ */
+- if (pnp_end < pci_start || pnp_start > pci_end)
++ if (!resource_overlaps(res, r))
+ continue;
+
+ /*
+@@ -278,8 +271,7 @@ static void quirk_system_pci_resources(struct pnp_dev *dev)
+ * PNP device describes a bridge with PCI
+ * behind it.
+ */
+- if (pnp_start <= pci_start &&
+- pnp_end >= pci_end)
++ if (res->start <= r->start && res->end >= r->end)
+ continue;
+
+ /*
+@@ -288,9 +280,8 @@ static void quirk_system_pci_resources(struct pnp_dev *dev)
+ * driver from requesting its resources.
+ */
+ dev_warn(&dev->dev,
+- "disabling %pR because it overlaps "
+- "%s BAR %d %pR\n", res,
+- pci_name(pdev), i, &pdev->resource[i]);
++ "disabling %pR because it overlaps %s BAR %d %pR\n",
++ res, pci_name(pdev), i, r);
+ res->flags |= IORESOURCE_DISABLED;
+ }
+ }
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index e10b54642b7f2..8b13be1633db1 100644
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -2029,6 +2029,20 @@ int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma);
+ (pci_resource_end((dev), (bar)) ? \
+ resource_size(pci_resource_n((dev), (bar))) : 0)
+
++#define __pci_dev_for_each_res0(dev, res, ...) \
++ for (unsigned int __b = 0; \
++ res = pci_resource_n(dev, __b), __b < PCI_NUM_RESOURCES; \
++ __b++)
++
++#define __pci_dev_for_each_res1(dev, res, __b) \
++ for (__b = 0; \
++ res = pci_resource_n(dev, __b), __b < PCI_NUM_RESOURCES; \
++ __b++)
++
++#define pci_dev_for_each_resource(dev, res, ...) \
++ CONCATENATE(__pci_dev_for_each_res, COUNT_ARGS(__VA_ARGS__)) \
++ (dev, res, __VA_ARGS__)
++
+ /*
+ * Similar to the helpers above, these manipulate per-pci_dev
+ * driver-specific data. They are really just a wrapper around
+--
+2.51.0
+
--- /dev/null
+From bc7ae82ce910fbe0b16ce679e1edf5071a957b4b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 6 Nov 2021 16:56:05 +0530
+Subject: PCI: Update BAR # and window messages
+
+From: Puranjay Mohan <puranjay12@gmail.com>
+
+[ Upstream commit 65f8e0beac5a495b8f3b387add1f9f4470678cb5 ]
+
+The PCI log messages print the register offsets at some places and BAR
+numbers at other places. There is no uniformity in this logging mechanism.
+It would be better to print names than register offsets.
+
+Add a helper function that aids in printing more meaningful information
+about the BAR numbers like "VF BAR", "ROM", "bridge window", etc. This
+function can be called while printing PCI log messages.
+
+[bhelgaas: fold in Lukas' static array suggestion from
+https: //lore.kernel.org/all/20211106115831.GA7452@wunner.de/]
+Link: https://lore.kernel.org/r/20211106112606.192563-2-puranjay12@gmail.com
+Signed-off-by: Puranjay Mohan <puranjay12@gmail.com>
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Stable-dep-of: 11721c45a826 ("PCI: Use resource_set_range() that correctly sets ->end")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pci/pci.c | 60 +++++++++++++++++++++++++++++++++++++++++++++++
+ drivers/pci/pci.h | 2 ++
+ 2 files changed, 62 insertions(+)
+
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 516eaec6488de..2975a5c781df4 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -844,6 +844,66 @@ struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res)
+ }
+ EXPORT_SYMBOL(pci_find_resource);
+
++/**
++ * pci_resource_name - Return the name of the PCI resource
++ * @dev: PCI device to query
++ * @i: index of the resource
++ *
++ * Return the standard PCI resource (BAR) name according to their index.
++ */
++const char *pci_resource_name(struct pci_dev *dev, unsigned int i)
++{
++ static const char * const bar_name[] = {
++ "BAR 0",
++ "BAR 1",
++ "BAR 2",
++ "BAR 3",
++ "BAR 4",
++ "BAR 5",
++ "ROM",
++#ifdef CONFIG_PCI_IOV
++ "VF BAR 0",
++ "VF BAR 1",
++ "VF BAR 2",
++ "VF BAR 3",
++ "VF BAR 4",
++ "VF BAR 5",
++#endif
++ "bridge window", /* "io" included in %pR */
++ "bridge window", /* "mem" included in %pR */
++ "bridge window", /* "mem pref" included in %pR */
++ };
++ static const char * const cardbus_name[] = {
++ "BAR 1",
++ "unknown",
++ "unknown",
++ "unknown",
++ "unknown",
++ "unknown",
++#ifdef CONFIG_PCI_IOV
++ "unknown",
++ "unknown",
++ "unknown",
++ "unknown",
++ "unknown",
++ "unknown",
++#endif
++ "CardBus bridge window 0", /* I/O */
++ "CardBus bridge window 1", /* I/O */
++ "CardBus bridge window 0", /* mem */
++ "CardBus bridge window 1", /* mem */
++ };
++
++ if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS &&
++ i < ARRAY_SIZE(cardbus_name))
++ return cardbus_name[i];
++
++ if (i < ARRAY_SIZE(bar_name))
++ return bar_name[i];
++
++ return "unknown";
++}
++
+ /**
+ * pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos
+ * @dev: the PCI device to operate on
+diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
+index fc760fd3ad948..4fb02de24271b 100644
+--- a/drivers/pci/pci.h
++++ b/drivers/pci/pci.h
+@@ -251,6 +251,8 @@ void __pci_bus_assign_resources(const struct pci_bus *bus,
+ struct list_head *fail_head);
+ bool pci_bus_clip_resource(struct pci_dev *dev, int idx);
+
++const char *pci_resource_name(struct pci_dev *dev, unsigned int i);
++
+ void pci_reassigndev_resource_alignment(struct pci_dev *dev);
+ void pci_disable_bridge_window(struct pci_dev *dev);
+ struct pci_bus *pci_bus_get(struct pci_bus *bus);
+--
+2.51.0
+
--- /dev/null
+From ed223438068051afab1e9545ed85b7896543d942 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 6 Nov 2021 16:56:06 +0530
+Subject: PCI: Use resource names in PCI log messages
+
+From: Puranjay Mohan <puranjay12@gmail.com>
+
+[ Upstream commit dc4e6f21c3f844ebc1c52b6920b8ec5dfc73f4e8 ]
+
+Use the pci_resource_name() to get the name of the resource and use it
+while printing log messages.
+
+[bhelgaas: rename to match struct resource * names, also use names in other
+BAR messages]
+Link: https://lore.kernel.org/r/20211106112606.192563-3-puranjay12@gmail.com
+Signed-off-by: Puranjay Mohan <puranjay12@gmail.com>
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Stable-dep-of: 11721c45a826 ("PCI: Use resource_set_range() that correctly sets ->end")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pci/iov.c | 7 ++--
+ drivers/pci/pci.c | 25 +++++++-------
+ drivers/pci/probe.c | 26 +++++++--------
+ drivers/pci/quirks.c | 15 ++++++---
+ drivers/pci/setup-bus.c | 30 +++++++++++------
+ drivers/pci/setup-res.c | 72 +++++++++++++++++++++++------------------
+ 6 files changed, 103 insertions(+), 72 deletions(-)
+
+diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
+index 132bd4447534c..3965e003d7b57 100644
+--- a/drivers/pci/iov.c
++++ b/drivers/pci/iov.c
+@@ -750,6 +750,7 @@ static int sriov_init(struct pci_dev *dev, int pos)
+ u16 ctrl, total;
+ struct pci_sriov *iov;
+ struct resource *res;
++ const char *res_name;
+ struct pci_dev *pdev;
+
+ pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &ctrl);
+@@ -790,6 +791,8 @@ static int sriov_init(struct pci_dev *dev, int pos)
+ nres = 0;
+ for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
+ res = &dev->resource[i + PCI_IOV_RESOURCES];
++ res_name = pci_resource_name(dev, i + PCI_IOV_RESOURCES);
++
+ /*
+ * If it is already FIXED, don't change it, something
+ * (perhaps EA or header fixups) wants it this way.
+@@ -807,8 +810,8 @@ static int sriov_init(struct pci_dev *dev, int pos)
+ }
+ iov->barsz[i] = resource_size(res);
+ res->end = res->start + resource_size(res) * total - 1;
+- pci_info(dev, "VF(n) BAR%d space: %pR (contains BAR%d for %d VFs)\n",
+- i, res, i, total);
++ pci_info(dev, "%s %pR: contains BAR %d for %d VFs\n",
++ res_name, res, i, total);
+ i += bar64;
+ nres++;
+ }
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 2975a5c781df4..d2d6b7da8c66c 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -3373,6 +3373,7 @@ static struct resource *pci_ea_get_resource(struct pci_dev *dev, u8 bei,
+ static int pci_ea_read(struct pci_dev *dev, int offset)
+ {
+ struct resource *res;
++ const char *res_name;
+ int ent_size, ent_offset = offset;
+ resource_size_t start, end;
+ unsigned long flags;
+@@ -3402,6 +3403,7 @@ static int pci_ea_read(struct pci_dev *dev, int offset)
+ goto out;
+
+ res = pci_ea_get_resource(dev, bei, prop);
++ res_name = pci_resource_name(dev, bei);
+ if (!res) {
+ pci_err(dev, "Unsupported EA entry BEI: %u\n", bei);
+ goto out;
+@@ -3475,16 +3477,16 @@ static int pci_ea_read(struct pci_dev *dev, int offset)
+ res->flags = flags;
+
+ if (bei <= PCI_EA_BEI_BAR5)
+- pci_info(dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
+- bei, res, prop);
++ pci_info(dev, "%s %pR: from Enhanced Allocation, properties %#02x\n",
++ res_name, res, prop);
+ else if (bei == PCI_EA_BEI_ROM)
+- pci_info(dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n",
+- res, prop);
++ pci_info(dev, "%s %pR: from Enhanced Allocation, properties %#02x\n",
++ res_name, res, prop);
+ else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5)
+- pci_info(dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
+- bei - PCI_EA_BEI_VF_BAR0, res, prop);
++ pci_info(dev, "%s %pR: from Enhanced Allocation, properties %#02x\n",
++ res_name, res, prop);
+ else
+- pci_info(dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n",
++ pci_info(dev, "BEI %d %pR: from Enhanced Allocation, properties %#02x\n",
+ bei, res, prop);
+
+ out:
+@@ -6704,14 +6706,15 @@ static void pci_request_resource_alignment(struct pci_dev *dev, int bar,
+ resource_size_t align, bool resize)
+ {
+ struct resource *r = &dev->resource[bar];
++ const char *r_name = pci_resource_name(dev, bar);
+ resource_size_t size;
+
+ if (!(r->flags & IORESOURCE_MEM))
+ return;
+
+ if (r->flags & IORESOURCE_PCI_FIXED) {
+- pci_info(dev, "BAR%d %pR: ignoring requested alignment %#llx\n",
+- bar, r, (unsigned long long)align);
++ pci_info(dev, "%s %pR: ignoring requested alignment %#llx\n",
++ r_name, r, (unsigned long long)align);
+ return;
+ }
+
+@@ -6747,8 +6750,8 @@ static void pci_request_resource_alignment(struct pci_dev *dev, int bar,
+ * devices and we use the second.
+ */
+
+- pci_info(dev, "BAR%d %pR: requesting alignment to %#llx\n",
+- bar, r, (unsigned long long)align);
++ pci_info(dev, "%s %pR: requesting alignment to %#llx\n",
++ r_name, r, (unsigned long long)align);
+
+ if (resize) {
+ r->start = 0;
+diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
+index ea7db1bd21143..8f99607e0a526 100644
+--- a/drivers/pci/probe.c
++++ b/drivers/pci/probe.c
+@@ -181,6 +181,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
+ u64 l64, sz64, mask64;
+ u16 orig_cmd;
+ struct pci_bus_region region, inverted_region;
++ const char *res_name = pci_resource_name(dev, res - dev->resource);
+
+ mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
+
+@@ -255,8 +256,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
+
+ sz64 = pci_size(l64, sz64, mask64);
+ if (!sz64) {
+- pci_info(dev, FW_BUG "reg 0x%x: invalid BAR (can't size)\n",
+- pos);
++ pci_info(dev, FW_BUG "%s: invalid; can't size\n", res_name);
+ goto fail;
+ }
+
+@@ -266,8 +266,8 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
+ res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
+ res->start = 0;
+ res->end = 0;
+- pci_err(dev, "reg 0x%x: can't handle BAR larger than 4GB (size %#010llx)\n",
+- pos, (unsigned long long)sz64);
++ pci_err(dev, "%s: can't handle BAR larger than 4GB (size %#010llx)\n",
++ res_name, (unsigned long long)sz64);
+ goto out;
+ }
+
+@@ -276,8 +276,8 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
+ res->flags |= IORESOURCE_UNSET;
+ res->start = 0;
+ res->end = sz64 - 1;
+- pci_info(dev, "reg 0x%x: can't handle BAR above 4GB (bus address %#010llx)\n",
+- pos, (unsigned long long)l64);
++ pci_info(dev, "%s: can't handle BAR above 4GB (bus address %#010llx)\n",
++ res_name, (unsigned long long)l64);
+ goto out;
+ }
+ }
+@@ -303,8 +303,8 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
+ res->flags |= IORESOURCE_UNSET;
+ res->start = 0;
+ res->end = region.end - region.start;
+- pci_info(dev, "reg 0x%x: initial BAR value %#010llx invalid\n",
+- pos, (unsigned long long)region.start);
++ pci_info(dev, "%s: initial BAR value %#010llx invalid\n",
++ res_name, (unsigned long long)region.start);
+ }
+
+ goto out;
+@@ -314,7 +314,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
+ res->flags = 0;
+ out:
+ if (res->flags)
+- pci_info(dev, "reg 0x%x: %pR\n", pos, res);
++ pci_info(dev, "%s %pR\n", res_name, res);
+
+ return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
+ }
+@@ -1948,14 +1948,14 @@ int pci_setup_device(struct pci_dev *dev)
+ res = &dev->resource[0];
+ res->flags = LEGACY_IO_RESOURCE;
+ pcibios_bus_to_resource(dev->bus, res, ®ion);
+- pci_info(dev, "legacy IDE quirk: reg 0x10: %pR\n",
++ pci_info(dev, "BAR 0 %pR: legacy IDE quirk\n",
+ res);
+ region.start = 0x3F6;
+ region.end = 0x3F6;
+ res = &dev->resource[1];
+ res->flags = LEGACY_IO_RESOURCE;
+ pcibios_bus_to_resource(dev->bus, res, ®ion);
+- pci_info(dev, "legacy IDE quirk: reg 0x14: %pR\n",
++ pci_info(dev, "BAR 1 %pR: legacy IDE quirk\n",
+ res);
+ }
+ if ((progif & 4) == 0) {
+@@ -1964,14 +1964,14 @@ int pci_setup_device(struct pci_dev *dev)
+ res = &dev->resource[2];
+ res->flags = LEGACY_IO_RESOURCE;
+ pcibios_bus_to_resource(dev->bus, res, ®ion);
+- pci_info(dev, "legacy IDE quirk: reg 0x18: %pR\n",
++ pci_info(dev, "BAR 2 %pR: legacy IDE quirk\n",
+ res);
+ region.start = 0x376;
+ region.end = 0x376;
+ res = &dev->resource[3];
+ res->flags = LEGACY_IO_RESOURCE;
+ pcibios_bus_to_resource(dev->bus, res, ®ion);
+- pci_info(dev, "legacy IDE quirk: reg 0x1c: %pR\n",
++ pci_info(dev, "BAR 3 %pR: legacy IDE quirk\n",
+ res);
+ }
+ }
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index ce57d59a047e4..9a325e1128ed6 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -475,13 +475,14 @@ static void quirk_extend_bar_to_page(struct pci_dev *dev)
+
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
+ struct resource *r = &dev->resource[i];
++ const char *r_name = pci_resource_name(dev, i);
+
+ if (r->flags & IORESOURCE_MEM && resource_size(r) < PAGE_SIZE) {
+ r->end = PAGE_SIZE - 1;
+ r->start = 0;
+ r->flags |= IORESOURCE_UNSET;
+- pci_info(dev, "expanded BAR %d to page size: %pR\n",
+- i, r);
++ pci_info(dev, "%s %pR: expanded to page size\n",
++ r_name, r);
+ }
+ }
+ }
+@@ -510,6 +511,7 @@ static void quirk_io(struct pci_dev *dev, int pos, unsigned int size,
+ u32 region;
+ struct pci_bus_region bus_region;
+ struct resource *res = dev->resource + pos;
++ const char *res_name = pci_resource_name(dev, pos);
+
+ pci_read_config_dword(dev, PCI_BASE_ADDRESS_0 + (pos << 2), ®ion);
+
+@@ -527,8 +529,7 @@ static void quirk_io(struct pci_dev *dev, int pos, unsigned int size,
+ bus_region.end = region + size - 1;
+ pcibios_bus_to_resource(dev->bus, res, &bus_region);
+
+- pci_info(dev, FW_BUG "%s quirk: reg 0x%x: %pR\n",
+- name, PCI_BASE_ADDRESS_0 + (pos << 2), res);
++ pci_info(dev, FW_BUG "%s %pR: %s quirk\n", res_name, res, name);
+ }
+
+ /*
+@@ -575,6 +576,12 @@ static void quirk_io_region(struct pci_dev *dev, int port,
+ bus_region.end = region + size - 1;
+ pcibios_bus_to_resource(dev->bus, res, &bus_region);
+
++ /*
++ * "res" is typically a bridge window resource that's not being
++ * used for a bridge window, so it's just a place to stash this
++ * non-standard resource. Printing "nr" or pci_resource_name() of
++ * it doesn't really make sense.
++ */
+ if (!pci_claim_resource(dev, nr))
+ pci_info(dev, "quirk: %pR claimed by %s\n", res, name);
+ }
+diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
+index 05cebc39f7642..9c078af9e166b 100644
+--- a/drivers/pci/setup-bus.c
++++ b/drivers/pci/setup-bus.c
+@@ -213,6 +213,7 @@ static void reassign_resources_sorted(struct list_head *realloc_head,
+ struct list_head *head)
+ {
+ struct resource *res;
++ const char *res_name;
+ struct pci_dev_resource *add_res, *tmp;
+ struct pci_dev_resource *dev_res;
+ resource_size_t add_size, align;
+@@ -222,6 +223,7 @@ static void reassign_resources_sorted(struct list_head *realloc_head,
+ bool found_match = false;
+
+ res = add_res->res;
++
+ /* Skip resource that has been reset */
+ if (!res->flags)
+ goto out;
+@@ -237,6 +239,7 @@ static void reassign_resources_sorted(struct list_head *realloc_head,
+ continue;
+
+ idx = res - &add_res->dev->resource[0];
++ res_name = pci_resource_name(add_res->dev, idx);
+ add_size = add_res->add_size;
+ align = add_res->min_align;
+ if (!resource_size(res)) {
+@@ -249,9 +252,9 @@ static void reassign_resources_sorted(struct list_head *realloc_head,
+ (IORESOURCE_STARTALIGN|IORESOURCE_SIZEALIGN);
+ if (pci_reassign_resource(add_res->dev, idx,
+ add_size, align))
+- pci_info(add_res->dev, "failed to add %llx res[%d]=%pR\n",
+- (unsigned long long) add_size, idx,
+- res);
++ pci_info(add_res->dev, "%s %pR: failed to add %llx\n",
++ res_name, res,
++ (unsigned long long) add_size);
+ }
+ out:
+ list_del(&add_res->list);
+@@ -571,6 +574,7 @@ EXPORT_SYMBOL(pci_setup_cardbus);
+ static void pci_setup_bridge_io(struct pci_dev *bridge)
+ {
+ struct resource *res;
++ const char *res_name;
+ struct pci_bus_region region;
+ unsigned long io_mask;
+ u8 io_base_lo, io_limit_lo;
+@@ -583,6 +587,7 @@ static void pci_setup_bridge_io(struct pci_dev *bridge)
+
+ /* Set up the top and bottom of the PCI I/O segment for this bus */
+ res = &bridge->resource[PCI_BRIDGE_IO_WINDOW];
++ res_name = pci_resource_name(bridge, PCI_BRIDGE_IO_WINDOW);
+ pcibios_resource_to_bus(bridge->bus, ®ion, res);
+ if (res->flags & IORESOURCE_IO) {
+ pci_read_config_word(bridge, PCI_IO_BASE, &l);
+@@ -591,7 +596,7 @@ static void pci_setup_bridge_io(struct pci_dev *bridge)
+ l = ((u16) io_limit_lo << 8) | io_base_lo;
+ /* Set up upper 16 bits of I/O base/limit */
+ io_upper16 = (region.end & 0xffff0000) | (region.start >> 16);
+- pci_info(bridge, " bridge window %pR\n", res);
++ pci_info(bridge, " %s %pR\n", res_name, res);
+ } else {
+ /* Clear upper 16 bits of I/O base/limit */
+ io_upper16 = 0;
+@@ -608,16 +613,18 @@ static void pci_setup_bridge_io(struct pci_dev *bridge)
+ static void pci_setup_bridge_mmio(struct pci_dev *bridge)
+ {
+ struct resource *res;
++ const char *res_name;
+ struct pci_bus_region region;
+ u32 l;
+
+ /* Set up the top and bottom of the PCI Memory segment for this bus */
+ res = &bridge->resource[PCI_BRIDGE_MEM_WINDOW];
++ res_name = pci_resource_name(bridge, PCI_BRIDGE_MEM_WINDOW);
+ pcibios_resource_to_bus(bridge->bus, ®ion, res);
+ if (res->flags & IORESOURCE_MEM) {
+ l = (region.start >> 16) & 0xfff0;
+ l |= region.end & 0xfff00000;
+- pci_info(bridge, " bridge window %pR\n", res);
++ pci_info(bridge, " %s %pR\n", res_name, res);
+ } else {
+ l = 0x0000fff0;
+ }
+@@ -627,6 +634,7 @@ static void pci_setup_bridge_mmio(struct pci_dev *bridge)
+ static void pci_setup_bridge_mmio_pref(struct pci_dev *bridge)
+ {
+ struct resource *res;
++ const char *res_name;
+ struct pci_bus_region region;
+ u32 l, bu, lu;
+
+@@ -640,6 +648,7 @@ static void pci_setup_bridge_mmio_pref(struct pci_dev *bridge)
+ /* Set up PREF base/limit */
+ bu = lu = 0;
+ res = &bridge->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
++ res_name = pci_resource_name(bridge, PCI_BRIDGE_PREF_MEM_WINDOW);
+ pcibios_resource_to_bus(bridge->bus, ®ion, res);
+ if (res->flags & IORESOURCE_PREFETCH) {
+ l = (region.start >> 16) & 0xfff0;
+@@ -648,7 +657,7 @@ static void pci_setup_bridge_mmio_pref(struct pci_dev *bridge)
+ bu = upper_32_bits(region.start);
+ lu = upper_32_bits(region.end);
+ }
+- pci_info(bridge, " bridge window %pR\n", res);
++ pci_info(bridge, " %s %pR\n", res_name, res);
+ } else {
+ l = 0x0000fff0;
+ }
+@@ -1010,6 +1019,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
+ int i;
+
+ pci_dev_for_each_resource(dev, r, i) {
++ const char *r_name = pci_resource_name(dev, i);
+ resource_size_t r_size;
+
+ if (r->parent || (r->flags & IORESOURCE_PCI_FIXED) ||
+@@ -1040,8 +1050,8 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
+ if (order < 0)
+ order = 0;
+ if (order >= ARRAY_SIZE(aligns)) {
+- pci_warn(dev, "disabling BAR %d: %pR (bad alignment %#llx)\n",
+- i, r, (unsigned long long) align);
++ pci_warn(dev, "%s %pR: disabling; bad alignment %#llx\n",
++ r_name, r, (unsigned long long) align);
+ r->flags = 0;
+ continue;
+ }
+@@ -2232,6 +2242,7 @@ int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type)
+ for (i = PCI_BRIDGE_RESOURCES; i < PCI_BRIDGE_RESOURCE_END;
+ i++) {
+ struct resource *res = &bridge->resource[i];
++ const char *res_name = pci_resource_name(bridge, i);
+
+ if ((res->flags ^ type) & PCI_RES_TYPE_MASK)
+ continue;
+@@ -2244,8 +2255,7 @@ int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type)
+ if (ret)
+ goto cleanup;
+
+- pci_info(bridge, "BAR %d: releasing %pR\n",
+- i, res);
++ pci_info(bridge, "%s %pR: releasing\n", res_name, res);
+
+ if (res->parent)
+ release_resource(res);
+diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
+index ceaa69491f5ef..c6d933ddfd464 100644
+--- a/drivers/pci/setup-res.c
++++ b/drivers/pci/setup-res.c
+@@ -30,6 +30,7 @@ static void pci_std_update_resource(struct pci_dev *dev, int resno)
+ u32 new, check, mask;
+ int reg;
+ struct resource *res = dev->resource + resno;
++ const char *res_name = pci_resource_name(dev, resno);
+
+ /* Per SR-IOV spec 3.4.1.11, VF BARs are RO zero */
+ if (dev->is_virtfn)
+@@ -104,8 +105,8 @@ static void pci_std_update_resource(struct pci_dev *dev, int resno)
+ pci_read_config_dword(dev, reg, &check);
+
+ if ((new ^ check) & mask) {
+- pci_err(dev, "BAR %d: error updating (%#010x != %#010x)\n",
+- resno, new, check);
++ pci_err(dev, "%s: error updating (%#010x != %#010x)\n",
++ res_name, new, check);
+ }
+
+ if (res->flags & IORESOURCE_MEM_64) {
+@@ -113,8 +114,8 @@ static void pci_std_update_resource(struct pci_dev *dev, int resno)
+ pci_write_config_dword(dev, reg + 4, new);
+ pci_read_config_dword(dev, reg + 4, &check);
+ if (check != new) {
+- pci_err(dev, "BAR %d: error updating (high %#010x != %#010x)\n",
+- resno, new, check);
++ pci_err(dev, "%s: error updating (high %#010x != %#010x)\n",
++ res_name, new, check);
+ }
+ }
+
+@@ -135,11 +136,12 @@ void pci_update_resource(struct pci_dev *dev, int resno)
+ int pci_claim_resource(struct pci_dev *dev, int resource)
+ {
+ struct resource *res = &dev->resource[resource];
++ const char *res_name = pci_resource_name(dev, resource);
+ struct resource *root, *conflict;
+
+ if (res->flags & IORESOURCE_UNSET) {
+- pci_info(dev, "can't claim BAR %d %pR: no address assigned\n",
+- resource, res);
++ pci_info(dev, "%s %pR: can't claim; no address assigned\n",
++ res_name, res);
+ return -EINVAL;
+ }
+
+@@ -153,16 +155,16 @@ int pci_claim_resource(struct pci_dev *dev, int resource)
+
+ root = pci_find_parent_resource(dev, res);
+ if (!root) {
+- pci_info(dev, "can't claim BAR %d %pR: no compatible bridge window\n",
+- resource, res);
++ pci_info(dev, "%s %pR: can't claim; no compatible bridge window\n",
++ res_name, res);
+ res->flags |= IORESOURCE_UNSET;
+ return -EINVAL;
+ }
+
+ conflict = request_resource_conflict(root, res);
+ if (conflict) {
+- pci_info(dev, "can't claim BAR %d %pR: address conflict with %s %pR\n",
+- resource, res, conflict->name, conflict);
++ pci_info(dev, "%s %pR: can't claim; address conflict with %s %pR\n",
++ res_name, res, conflict->name, conflict);
+ res->flags |= IORESOURCE_UNSET;
+ return -EBUSY;
+ }
+@@ -201,6 +203,7 @@ static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev,
+ {
+ struct resource *root, *conflict;
+ resource_size_t fw_addr, start, end;
++ const char *res_name = pci_resource_name(dev, resno);
+
+ fw_addr = pcibios_retrieve_fw_addr(dev, resno);
+ if (!fw_addr)
+@@ -231,12 +234,11 @@ static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev,
+ root = &iomem_resource;
+ }
+
+- pci_info(dev, "BAR %d: trying firmware assignment %pR\n",
+- resno, res);
++ pci_info(dev, "%s: trying firmware assignment %pR\n", res_name, res);
+ conflict = request_resource_conflict(root, res);
+ if (conflict) {
+- pci_info(dev, "BAR %d: %pR conflicts with %s %pR\n",
+- resno, res, conflict->name, conflict);
++ pci_info(dev, "%s %pR: conflicts with %s %pR\n", res_name, res,
++ conflict->name, conflict);
+ res->start = start;
+ res->end = end;
+ res->flags |= IORESOURCE_UNSET;
+@@ -325,6 +327,7 @@ static int _pci_assign_resource(struct pci_dev *dev, int resno,
+ int pci_assign_resource(struct pci_dev *dev, int resno)
+ {
+ struct resource *res = dev->resource + resno;
++ const char *res_name = pci_resource_name(dev, resno);
+ resource_size_t align, size;
+ int ret;
+
+@@ -334,8 +337,8 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
+ res->flags |= IORESOURCE_UNSET;
+ align = pci_resource_alignment(dev, res);
+ if (!align) {
+- pci_info(dev, "BAR %d: can't assign %pR (bogus alignment)\n",
+- resno, res);
++ pci_info(dev, "%s %pR: can't assign; bogus alignment\n",
++ res_name, res);
+ return -EINVAL;
+ }
+
+@@ -348,18 +351,18 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
+ * working, which is better than just leaving it disabled.
+ */
+ if (ret < 0) {
+- pci_info(dev, "BAR %d: no space for %pR\n", resno, res);
++ pci_info(dev, "%s %pR: can't assign; no space\n", res_name, res);
+ ret = pci_revert_fw_address(res, dev, resno, size);
+ }
+
+ if (ret < 0) {
+- pci_info(dev, "BAR %d: failed to assign %pR\n", resno, res);
++ pci_info(dev, "%s %pR: failed to assign\n", res_name, res);
+ return ret;
+ }
+
+ res->flags &= ~IORESOURCE_UNSET;
+ res->flags &= ~IORESOURCE_STARTALIGN;
+- pci_info(dev, "BAR %d: assigned %pR\n", resno, res);
++ pci_info(dev, "%s %pR: assigned\n", res_name, res);
+ if (resno < PCI_BRIDGE_RESOURCES)
+ pci_update_resource(dev, resno);
+
+@@ -367,10 +370,11 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
+ }
+ EXPORT_SYMBOL(pci_assign_resource);
+
+-int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsize,
+- resource_size_t min_align)
++int pci_reassign_resource(struct pci_dev *dev, int resno,
++ resource_size_t addsize, resource_size_t min_align)
+ {
+ struct resource *res = dev->resource + resno;
++ const char *res_name = pci_resource_name(dev, resno);
+ unsigned long flags;
+ resource_size_t new_size;
+ int ret;
+@@ -381,8 +385,8 @@ int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsiz
+ flags = res->flags;
+ res->flags |= IORESOURCE_UNSET;
+ if (!res->parent) {
+- pci_info(dev, "BAR %d: can't reassign an unassigned resource %pR\n",
+- resno, res);
++ pci_info(dev, "%s %pR: can't reassign; unassigned resource\n",
++ res_name, res);
+ return -EINVAL;
+ }
+
+@@ -391,15 +395,15 @@ int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsiz
+ ret = _pci_assign_resource(dev, resno, new_size, min_align);
+ if (ret) {
+ res->flags = flags;
+- pci_info(dev, "BAR %d: %pR (failed to expand by %#llx)\n",
+- resno, res, (unsigned long long) addsize);
++ pci_info(dev, "%s %pR: failed to expand by %#llx\n",
++ res_name, res, (unsigned long long) addsize);
+ return ret;
+ }
+
+ res->flags &= ~IORESOURCE_UNSET;
+ res->flags &= ~IORESOURCE_STARTALIGN;
+- pci_info(dev, "BAR %d: reassigned %pR (expanded by %#llx)\n",
+- resno, res, (unsigned long long) addsize);
++ pci_info(dev, "%s %pR: reassigned; expanded by %#llx\n",
++ res_name, res, (unsigned long long) addsize);
+ if (resno < PCI_BRIDGE_RESOURCES)
+ pci_update_resource(dev, resno);
+
+@@ -409,8 +413,9 @@ int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsiz
+ void pci_release_resource(struct pci_dev *dev, int resno)
+ {
+ struct resource *res = dev->resource + resno;
++ const char *res_name = pci_resource_name(dev, resno);
+
+- pci_info(dev, "BAR %d: releasing %pR\n", resno, res);
++ pci_info(dev, "%s %pR: releasing\n", res_name, res);
+
+ if (!res->parent)
+ return;
+@@ -480,6 +485,7 @@ int pci_enable_resources(struct pci_dev *dev, int mask)
+ u16 cmd, old_cmd;
+ int i;
+ struct resource *r;
++ const char *r_name;
+
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ old_cmd = cmd;
+@@ -488,6 +494,8 @@ int pci_enable_resources(struct pci_dev *dev, int mask)
+ if (!(mask & (1 << i)))
+ continue;
+
++ r_name = pci_resource_name(dev, i);
++
+ if (!(r->flags & (IORESOURCE_IO | IORESOURCE_MEM)))
+ continue;
+ if ((i == PCI_ROM_RESOURCE) &&
+@@ -495,14 +503,14 @@ int pci_enable_resources(struct pci_dev *dev, int mask)
+ continue;
+
+ if (r->flags & IORESOURCE_UNSET) {
+- pci_err(dev, "can't enable device: BAR %d %pR not assigned\n",
+- i, r);
++ pci_err(dev, "%s %pR: not assigned; can't enable device\n",
++ r_name, r);
+ return -EINVAL;
+ }
+
+ if (!r->parent) {
+- pci_err(dev, "can't enable device: BAR %d %pR not claimed\n",
+- i, r);
++ pci_err(dev, "%s %pR: not claimed; can't enable device\n",
++ r_name, r);
+ return -EINVAL;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 72be1ccc5cd7ab049de1390354afad19e418247e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 8 Dec 2025 16:56:54 +0200
+Subject: PCI: Use resource_set_range() that correctly sets ->end
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+
+[ Upstream commit 11721c45a8266a9d0c9684153d20e37159465f96 ]
+
+__pci_read_base() sets resource start and end addresses when resource
+is larger than 4G but pci_bus_addr_t or resource_size_t are not capable
+of representing 64-bit PCI addresses. This creates a problematic
+resource that has non-zero flags but the start and end addresses do not
+yield to resource size of 0 but 1.
+
+Replace custom resource addresses setup with resource_set_range()
+that correctly sets end address as -1 which results in resource_size()
+returning 0.
+
+For consistency, also use resource_set_range() in the other branch that
+does size based resource setup.
+
+Fixes: 23b13bc76f35 ("PCI: Fail safely if we can't handle BARs larger than 4GB")
+Link: https://lore.kernel.org/all/20251207215359.28895-1-ansuelsmth@gmail.com/T/#m990492684913c5a158ff0e5fc90697d8ad95351b
+Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Reviewed-by: Andy Shevchenko <andriy.shevchenko@intel.com>
+Cc: stable@vger.kernel.org
+Cc: Christian Marangi <ansuelsmth@gmail.com>
+Link: https://patch.msgid.link/20251208145654.5294-1-ilpo.jarvinen@linux.intel.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pci/probe.c | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
+index 8f99607e0a526..02f3fbe78c46f 100644
+--- a/drivers/pci/probe.c
++++ b/drivers/pci/probe.c
+@@ -264,8 +264,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
+ if ((sizeof(pci_bus_addr_t) < 8 || sizeof(resource_size_t) < 8)
+ && sz64 > 0x100000000ULL) {
+ res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
+- res->start = 0;
+- res->end = 0;
++ resource_set_range(res, 0, 0);
+ pci_err(dev, "%s: can't handle BAR larger than 4GB (size %#010llx)\n",
+ res_name, (unsigned long long)sz64);
+ goto out;
+@@ -274,8 +273,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
+ if ((sizeof(pci_bus_addr_t) < 8) && l) {
+ /* Above 32-bit boundary; try to reallocate */
+ res->flags |= IORESOURCE_UNSET;
+- res->start = 0;
+- res->end = sz64 - 1;
++ resource_set_range(res, 0, sz64);
+ pci_info(dev, "%s: can't handle BAR above 4GB (bus address %#010llx)\n",
+ res_name, (unsigned long long)l64);
+ goto out;
+--
+2.51.0
+
--- /dev/null
+From bdc67f12f4413b65e0a6761a8d6cd41996b22f74 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 14 Jun 2024 13:06:03 +0300
+Subject: resource: Add resource set range and size helpers
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+
+[ Upstream commit 9fb6fef0fb49124291837af1da5028f79d53f98e ]
+
+Setting the end address for a resource with a given size lacks a helper and
+is therefore coded manually unlike the getter side which has a helper for
+resource size calculation. Also, almost all callsites that calculate the
+end address for a resource also set the start address right before it like
+this:
+
+ res->start = start_addr;
+ res->end = res->start + size - 1;
+
+Add resource_set_range(res, start_addr, size) that sets the start address
+and calculates the end address to simplify this often repeated fragment.
+
+Also add resource_set_size() for the cases where setting the start address
+of the resource is not necessary but mention in its kerneldoc that
+resource_set_range() is preferred when setting both addresses.
+
+Link: https://lore.kernel.org/r/20240614100606.15830-2-ilpo.jarvinen@linux.intel.com
+Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Stable-dep-of: 11721c45a826 ("PCI: Use resource_set_range() that correctly sets ->end")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/ioport.h | 32 ++++++++++++++++++++++++++++++++
+ 1 file changed, 32 insertions(+)
+
+diff --git a/include/linux/ioport.h b/include/linux/ioport.h
+index 4ae3c541ea6f4..a81579821b673 100644
+--- a/include/linux/ioport.h
++++ b/include/linux/ioport.h
+@@ -216,6 +216,38 @@ struct resource *lookup_resource(struct resource *root, resource_size_t start);
+ int adjust_resource(struct resource *res, resource_size_t start,
+ resource_size_t size);
+ resource_size_t resource_alignment(struct resource *res);
++
++/**
++ * resource_set_size - Calculate resource end address from size and start
++ * @res: Resource descriptor
++ * @size: Size of the resource
++ *
++ * Calculate the end address for @res based on @size.
++ *
++ * Note: The start address of @res must be set when calling this function.
++ * Prefer resource_set_range() if setting both the start address and @size.
++ */
++static inline void resource_set_size(struct resource *res, resource_size_t size)
++{
++ res->end = res->start + size - 1;
++}
++
++/**
++ * resource_set_range - Set resource start and end addresses
++ * @res: Resource descriptor
++ * @start: Start address for the resource
++ * @size: Size of the resource
++ *
++ * Set @res start address and calculate the end address based on @size.
++ */
++static inline void resource_set_range(struct resource *res,
++ resource_size_t start,
++ resource_size_t size)
++{
++ res->start = start;
++ resource_set_size(res, size);
++}
++
+ static inline resource_size_t resource_size(const struct resource *res)
+ {
+ return res->end - res->start + 1;
+--
+2.51.0
+
btrfs-fix-incorrect-key-offset-in-error-message-in-c.patch
btrfs-fix-compat-mask-in-error-messages-in-btrfs_che.patch
bpf-fix-stack-out-of-bounds-write-in-devmap.patch
+memory-mtk-smi-convert-to-platform-remove-callback-r.patch
+memory-mtk-smi-fix-device-leaks-on-common-probe.patch
+memory-mtk-smi-fix-device-leak-on-larb-probe.patch
+pci-introduce-pci_dev_for_each_resource.patch
+pci-fix-printk-field-formatting.patch
+pci-update-bar-and-window-messages.patch
+pci-use-resource-names-in-pci-log-messages.patch
+resource-add-resource-set-range-and-size-helpers.patch
+pci-use-resource_set_range-that-correctly-sets-end.patch
+media-hantro-disable-multicore-support.patch
+media-v4l2-mem2mem-add-a-kref-to-the-v4l2_m2m_dev-st.patch
+media-verisilicon-avoid-g2-bus-error-while-decoding-.patch
+kvm-x86-pmu-provide-error-semantics-for-unsupported-.patch
+kvm-x86-fix-kvm_get_msrs-stack-info-leak.patch
+kvm-x86-rename-kvm_msr_ret_invalid-to-kvm_msr_ret_un.patch
+kvm-x86-return-unsupported-instead-of-invalid-on-acc.patch
+media-tegra-video-use-accessors-for-pad-config-try_-.patch
+media-tegra-video-fix-memory-leak-in-__tegra_channel.patch
+media-camss-vfe-480-multiple-outputs-support-for-sm8.patch
+media-qcom-camss-vfe-fix-out-of-bounds-access-in-vfe.patch
+kvm-x86-warn-if-a-vcpu-gets-a-valid-wakeup-that-kvm-.patch
+kvm-x86-ignore-ebusy-when-checking-nested-events-fro.patch
+drm-tegra-dsi-fix-device-leak-on-probe.patch
+bus-omap-ocp2scp-convert-to-platform-remove-callback.patch
+bus-omap-ocp2scp-fix-of-populate-on-driver-rebind.patch
+driver-core-make-state_synced-device-attribute-write.patch
+driver-core-add-a-guard-definition-for-the-device_lo.patch
+driver-core-enforce-device_lock-for-driver_match_dev.patch
+ext4-make-ext4_es_remove_extent-return-void.patch
+ext4-get-rid-of-ppath-in-ext4_find_extent.patch
+ext4-get-rid-of-ppath-in-ext4_ext_create_new_leaf.patch
+ext4-get-rid-of-ppath-in-ext4_ext_insert_extent.patch
+ext4-get-rid-of-ppath-in-ext4_split_extent_at.patch
+ext4-subdivide-ext4_ext_data_valid1.patch
+ext4-don-t-zero-the-entire-extent-if-ext4_ext_data_p.patch
+ext4-drop-extent-cache-after-doing-partial_valid1-ze.patch
+ext4-drop-extent-cache-when-splitting-extent-fails.patch
+ext4-remove-unnecessary-e4b-bd_buddy_page-check-in-e.patch
+ext4-convert-some-bug_on-s-in-mballoc-to-use-warn_ra.patch
+ext4-delete-redundant-calculations-in-ext4_mb_get_bu.patch
+ext4-convert-bd_bitmap_page-to-bd_bitmap_folio.patch
+ext4-convert-bd_buddy_page-to-bd_buddy_folio.patch
+ext4-fix-e4b-bitmap-inconsistency-reports.patch
+mfd-qcom-pm8xxx-convert-to-platform-remove-callback-.patch
+mfd-qcom-pm8xxx-fix-of-populate-on-driver-rebind.patch
+mfd-omap-usb-host-convert-to-platform-remove-callbac.patch
+mfd-omap-usb-host-fix-of-populate-on-driver-rebind.patch
+arm64-dts-rockchip-fix-rk356x-pcie-range-mappings.patch
+clk-tegra-tegra124-emc-fix-device-leak-on-set_rate.patch
+usb-cdns3-remove-redundant-if-branch.patch
+usb-cdns3-call-cdns_power_is_lost-only-once-in-cdns_.patch
+usb-cdns3-fix-role-switching-during-resume.patch
+alsa-hda-conexant-add-quirk-for-hp-zbook-studio-g4.patch
+hwmon-max16065-use-read-write_once-to-avoid-compiler.patch
+alsa-hda-conexant-fix-headphone-jack-handling-on-ace.patch
+net-arcnet-com20020-pci-fix-support-for-2.5mbit-card.patch
+drm-amd-drop-special-case-for-yellow-carp-without-di.patch
+drm-amdgpu-keep-vga-memory-on-macbooks-with-switchab.patch
--- /dev/null
+From 71644a66fb5f68d5ce6af7ebab2ef9522d97b2a7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 5 Feb 2025 18:36:49 +0100
+Subject: usb: cdns3: call cdns_power_is_lost() only once in cdns_resume()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Théo Lebrun <theo.lebrun@bootlin.com>
+
+[ Upstream commit 17c6526b333cfd89a4c888a6f7c876c8c326e5ae ]
+
+cdns_power_is_lost() does a register read.
+Call it only once rather than twice.
+
+Signed-off-by: Théo Lebrun <theo.lebrun@bootlin.com>
+Link: https://lore.kernel.org/r/20250205-s2r-cdns-v7-4-13658a271c3c@bootlin.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Stable-dep-of: 87e4b043b98a ("usb: cdns3: fix role switching during resume")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/cdns3/core.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/usb/cdns3/core.c b/drivers/usb/cdns3/core.c
+index d272d7b82bec1..8e46fd36b0e56 100644
+--- a/drivers/usb/cdns3/core.c
++++ b/drivers/usb/cdns3/core.c
+@@ -523,11 +523,12 @@ EXPORT_SYMBOL_GPL(cdns_suspend);
+
+ int cdns_resume(struct cdns *cdns)
+ {
++ bool power_lost = cdns_power_is_lost(cdns);
+ enum usb_role real_role;
+ bool role_changed = false;
+ int ret = 0;
+
+- if (cdns_power_is_lost(cdns)) {
++ if (power_lost) {
+ if (!cdns->role_sw) {
+ real_role = cdns_hw_role_state_machine(cdns);
+ if (real_role != cdns->role) {
+@@ -550,7 +551,7 @@ int cdns_resume(struct cdns *cdns)
+ }
+
+ if (cdns->roles[cdns->role]->resume)
+- cdns->roles[cdns->role]->resume(cdns, cdns_power_is_lost(cdns));
++ cdns->roles[cdns->role]->resume(cdns, power_lost);
+
+ return 0;
+ }
+--
+2.51.0
+
--- /dev/null
+From baa4444608811fea997bc82e61a28e58919a80f0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 30 Jan 2026 11:05:45 +0100
+Subject: usb: cdns3: fix role switching during resume
+
+From: Thomas Richard (TI) <thomas.richard@bootlin.com>
+
+[ Upstream commit 87e4b043b98a1d269be0b812f383881abee0ca45 ]
+
+If the role change while we are suspended, the cdns3 driver switches to the
+new mode during resume. However, switching to host mode in this context
+causes a NULL pointer dereference.
+
+The host role's start() operation registers a xhci-hcd device, but its
+probe is deferred while we are in the resume path. The host role's resume()
+operation assumes the xhci-hcd device is already probed, which is not the
+case, leading to the dereference. Since the start() operation of the new
+role is already called, the resume operation can be skipped.
+
+So skip the resume operation for the new role if a role switch occurs
+during resume. Once the resume sequence is complete, the xhci-hcd device
+can be probed in case of host mode.
+
+Unable to handle kernel NULL pointer dereference at virtual address 0000000000000208
+Mem abort info:
+...
+Data abort info:
+...
+[0000000000000208] pgd=0000000000000000, p4d=0000000000000000
+Internal error: Oops: 0000000096000004 [#1] SMP
+Modules linked in:
+CPU: 0 UID: 0 PID: 146 Comm: sh Not tainted
+6.19.0-rc7-00013-g6e64f4aabfae-dirty #135 PREEMPT
+Hardware name: Texas Instruments J7200 EVM (DT)
+pstate: 20000005 (nzCv daif -PAN -UAO -TCO -DIT -SSBS BTYPE=--)
+pc : usb_hcd_is_primary_hcd+0x0/0x1c
+lr : cdns_host_resume+0x24/0x5c
+...
+Call trace:
+ usb_hcd_is_primary_hcd+0x0/0x1c (P)
+ cdns_resume+0x6c/0xbc
+ cdns3_controller_resume.isra.0+0xe8/0x17c
+ cdns3_plat_resume+0x18/0x24
+ platform_pm_resume+0x2c/0x68
+ dpm_run_callback+0x90/0x248
+ device_resume+0x100/0x24c
+ dpm_resume+0x190/0x2ec
+ dpm_resume_end+0x18/0x34
+ suspend_devices_and_enter+0x2b0/0xa44
+ pm_suspend+0x16c/0x5fc
+ state_store+0x80/0xec
+ kobj_attr_store+0x18/0x2c
+ sysfs_kf_write+0x7c/0x94
+ kernfs_fop_write_iter+0x130/0x1dc
+ vfs_write+0x240/0x370
+ ksys_write+0x70/0x108
+ __arm64_sys_write+0x1c/0x28
+ invoke_syscall+0x48/0x10c
+ el0_svc_common.constprop.0+0x40/0xe0
+ do_el0_svc+0x1c/0x28
+ el0_svc+0x34/0x108
+ el0t_64_sync_handler+0xa0/0xe4
+ el0t_64_sync+0x198/0x19c
+Code: 52800003 f9407ca5 d63f00a0 17ffffe4 (f9410401)
+---[ end trace 0000000000000000 ]---
+
+Cc: stable <stable@kernel.org>
+Fixes: 2cf2581cd229 ("usb: cdns3: add power lost support for system resume")
+Signed-off-by: Thomas Richard (TI) <thomas.richard@bootlin.com>
+Acked-by: Peter Chen <peter.chen@kernel.org>
+Link: https://patch.msgid.link/20260130-usb-cdns3-fix-role-switching-during-resume-v1-1-44c456852b52@bootlin.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/cdns3/core.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/usb/cdns3/core.c b/drivers/usb/cdns3/core.c
+index 8e46fd36b0e56..93e93bb9a314f 100644
+--- a/drivers/usb/cdns3/core.c
++++ b/drivers/usb/cdns3/core.c
+@@ -550,7 +550,7 @@ int cdns_resume(struct cdns *cdns)
+ }
+ }
+
+- if (cdns->roles[cdns->role]->resume)
++ if (!role_changed && cdns->roles[cdns->role]->resume)
+ cdns->roles[cdns->role]->resume(cdns, power_lost);
+
+ return 0;
+--
+2.51.0
+
--- /dev/null
+From e00697ebb72bee2f624d78c079004dacdfec6a28 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 31 Dec 2024 09:36:41 +0800
+Subject: usb: cdns3: remove redundant if branch
+
+From: Hongyu Xie <xiehongyu1@kylinos.cn>
+
+[ Upstream commit dedab674428f8a99468a4864c067128ba9ea83a6 ]
+
+cdns->role_sw->dev->driver_data gets set in routines showing below,
+cdns_init
+ sw_desc.driver_data = cdns;
+ cdns->role_sw = usb_role_switch_register(dev, &sw_desc);
+ dev_set_drvdata(&sw->dev, desc->driver_data);
+
+In cdns_resume,
+cdns->role = cdns_role_get(cdns->role_sw); //line redundant
+ struct cdns *cdns = usb_role_switch_get_drvdata(sw);
+ dev_get_drvdata(&sw->dev)
+ return dev->driver_data
+return cdns->role;
+
+"line redundant" equals to,
+ cdns->role = cdns->role;
+
+So fix this if branch.
+
+Signed-off-by: Hongyu Xie <xiehongyu1@kylinos.cn>
+Acked-by: Peter Chen <peter.chen@kernel.org>
+Link: https://lore.kernel.org/r/20241231013641.23908-1-xiehongyu1@kylinos.cn
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Stable-dep-of: 87e4b043b98a ("usb: cdns3: fix role switching during resume")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/cdns3/core.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+diff --git a/drivers/usb/cdns3/core.c b/drivers/usb/cdns3/core.c
+index 7242591b346bc..d272d7b82bec1 100644
+--- a/drivers/usb/cdns3/core.c
++++ b/drivers/usb/cdns3/core.c
+@@ -528,9 +528,7 @@ int cdns_resume(struct cdns *cdns)
+ int ret = 0;
+
+ if (cdns_power_is_lost(cdns)) {
+- if (cdns->role_sw) {
+- cdns->role = cdns_role_get(cdns->role_sw);
+- } else {
++ if (!cdns->role_sw) {
+ real_role = cdns_hw_role_state_machine(cdns);
+ if (real_role != cdns->role) {
+ ret = cdns_hw_role_switch(cdns);
+--
+2.51.0
+
--- /dev/null
+From 42f84f49c153d099f7d63bd04b3cd655fc850985 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 14 Jan 2026 11:14:23 +0100
+Subject: ACPI: APEI: GHES: Add helper for CPER CXL protocol errors checks
+
+From: Fabio M. De Francesco <fabio.m.de.francesco@linux.intel.com>
+
+[ Upstream commit 70205869686212eb8e4cddf02bf87fd5fd597bc2 ]
+
+Move the CPER CXL protocol errors validity check out of
+cxl_cper_post_prot_err() to new cxl_cper_sec_prot_err_valid() and limit
+the serial number check only to CXL agents that are CXL devices (UEFI
+v2.10, Appendix N.2.13).
+
+Export the new symbol for reuse by ELOG.
+
+Reviewed-by: Dave Jiang <dave.jiang@intel.com>
+Reviewed-by: Hanjun Guo <guohanjun@huawei.com>
+Reviewed-by: Jonathan Cameron <jonathan.cameron@huawei.com>
+Signed-off-by: Fabio M. De Francesco <fabio.m.de.francesco@linux.intel.com>
+[ rjw: Subject tweak ]
+Link: https://patch.msgid.link/20260114101543.85926-4-fabio.m.de.francesco@linux.intel.com
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Stable-dep-of: b584bfbd7ec4 ("ACPI: APEI: GHES: Disable KASAN instrumentation when compile testing with clang < 18")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/acpi/apei/Makefile | 1 +
+ drivers/acpi/apei/ghes.c | 18 +----------------
+ drivers/acpi/apei/ghes_helpers.c | 33 ++++++++++++++++++++++++++++++++
+ include/cxl/event.h | 10 ++++++++++
+ 4 files changed, 45 insertions(+), 17 deletions(-)
+ create mode 100644 drivers/acpi/apei/ghes_helpers.c
+
+diff --git a/drivers/acpi/apei/Makefile b/drivers/acpi/apei/Makefile
+index 2c474e6477e12..5db61dfb46915 100644
+--- a/drivers/acpi/apei/Makefile
++++ b/drivers/acpi/apei/Makefile
+@@ -1,6 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ obj-$(CONFIG_ACPI_APEI) += apei.o
+ obj-$(CONFIG_ACPI_APEI_GHES) += ghes.o
++obj-$(CONFIG_ACPI_APEI_PCIEAER) += ghes_helpers.o
+ obj-$(CONFIG_ACPI_APEI_EINJ) += einj.o
+ einj-y := einj-core.o
+ einj-$(CONFIG_ACPI_APEI_EINJ_CXL) += einj-cxl.o
+diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
+index 6de64a4b49998..1d0fbfcd81dd9 100644
+--- a/drivers/acpi/apei/ghes.c
++++ b/drivers/acpi/apei/ghes.c
+@@ -712,24 +712,8 @@ static void cxl_cper_post_prot_err(struct cxl_cper_sec_prot_err *prot_err,
+ struct cxl_cper_prot_err_work_data wd;
+ u8 *dvsec_start, *cap_start;
+
+- if (!(prot_err->valid_bits & PROT_ERR_VALID_AGENT_ADDRESS)) {
+- pr_err_ratelimited("CXL CPER invalid agent type\n");
++ if (cxl_cper_sec_prot_err_valid(prot_err))
+ return;
+- }
+-
+- if (!(prot_err->valid_bits & PROT_ERR_VALID_ERROR_LOG)) {
+- pr_err_ratelimited("CXL CPER invalid protocol error log\n");
+- return;
+- }
+-
+- if (prot_err->err_len != sizeof(struct cxl_ras_capability_regs)) {
+- pr_err_ratelimited("CXL CPER invalid RAS Cap size (%u)\n",
+- prot_err->err_len);
+- return;
+- }
+-
+- if (!(prot_err->valid_bits & PROT_ERR_VALID_SERIAL_NUMBER))
+- pr_warn(FW_WARN "CXL CPER no device serial number\n");
+
+ switch (prot_err->agent_type) {
+ case RCD:
+diff --git a/drivers/acpi/apei/ghes_helpers.c b/drivers/acpi/apei/ghes_helpers.c
+new file mode 100644
+index 0000000000000..f3d162139a974
+--- /dev/null
++++ b/drivers/acpi/apei/ghes_helpers.c
+@@ -0,0 +1,33 @@
++// SPDX-License-Identifier: GPL-2.0-only
++// Copyright(c) 2025 Intel Corporation. All rights reserved
++
++#include <linux/printk.h>
++#include <cxl/event.h>
++
++int cxl_cper_sec_prot_err_valid(struct cxl_cper_sec_prot_err *prot_err)
++{
++ if (!(prot_err->valid_bits & PROT_ERR_VALID_AGENT_ADDRESS)) {
++ pr_err_ratelimited("CXL CPER invalid agent type\n");
++ return -EINVAL;
++ }
++
++ if (!(prot_err->valid_bits & PROT_ERR_VALID_ERROR_LOG)) {
++ pr_err_ratelimited("CXL CPER invalid protocol error log\n");
++ return -EINVAL;
++ }
++
++ if (prot_err->err_len != sizeof(struct cxl_ras_capability_regs)) {
++ pr_err_ratelimited("CXL CPER invalid RAS Cap size (%u)\n",
++ prot_err->err_len);
++ return -EINVAL;
++ }
++
++ if ((prot_err->agent_type == RCD || prot_err->agent_type == DEVICE ||
++ prot_err->agent_type == LD || prot_err->agent_type == FMLD) &&
++ !(prot_err->valid_bits & PROT_ERR_VALID_SERIAL_NUMBER))
++ pr_warn_ratelimited(FW_WARN
++ "CXL CPER no device serial number\n");
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(cxl_cper_sec_prot_err_valid);
+diff --git a/include/cxl/event.h b/include/cxl/event.h
+index ee1c3dec62fae..95bf546fd3055 100644
+--- a/include/cxl/event.h
++++ b/include/cxl/event.h
+@@ -258,4 +258,14 @@ static inline int cxl_cper_kfifo_get(struct cxl_cper_work_data *wd)
+ }
+ #endif
+
++#ifdef CONFIG_ACPI_APEI_PCIEAER
++int cxl_cper_sec_prot_err_valid(struct cxl_cper_sec_prot_err *prot_err);
++#else
++static inline int
++cxl_cper_sec_prot_err_valid(struct cxl_cper_sec_prot_err *prot_err)
++{
++ return -EOPNOTSUPP;
++}
++#endif
++
+ #endif /* _LINUX_CXL_EVENT_H */
+--
+2.51.0
+
--- /dev/null
+From ccd3cab8095d5c53eaf8a6598ff339ffaad6a696 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 14 Jan 2026 16:27:11 -0700
+Subject: ACPI: APEI: GHES: Disable KASAN instrumentation when compile testing
+ with clang < 18
+
+From: Nathan Chancellor <nathan@kernel.org>
+
+[ Upstream commit b584bfbd7ec417f257f651cc00a90c66e31dfbf1 ]
+
+After a recent innocuous change to drivers/acpi/apei/ghes.c, building
+ARCH=arm64 allmodconfig with clang-17 or older (which has both
+CONFIG_KASAN=y and CONFIG_WERROR=y) fails with:
+
+ drivers/acpi/apei/ghes.c:902:13: error: stack frame size (2768) exceeds limit (2048) in 'ghes_do_proc' [-Werror,-Wframe-larger-than]
+ 902 | static void ghes_do_proc(struct ghes *ghes,
+ | ^
+
+A KASAN pass that removes unneeded stack instrumentation, enabled by
+default in clang-18 [1], drastically improves stack usage in this case.
+
+To avoid the warning in the common allmodconfig case when it can break
+the build, disable KASAN for ghes.o when compile testing with clang-17
+and older. Disabling KASAN outright may hide legitimate runtime issues,
+so live with the warning in that case; the user can either increase the
+frame warning limit or disable -Werror, which they should probably do
+when debugging with KASAN anyways.
+
+Closes: https://github.com/ClangBuiltLinux/linux/issues/2148
+Link: https://github.com/llvm/llvm-project/commit/51fbab134560ece663517bf1e8c2a30300d08f1a [1]
+Signed-off-by: Nathan Chancellor <nathan@kernel.org>
+Cc: All applicable <stable@vger.kernel.org>
+Link: https://patch.msgid.link/20260114-ghes-avoid-wflt-clang-older-than-18-v1-1-9c8248bfe4f4@kernel.org
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/acpi/apei/Makefile | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/acpi/apei/Makefile b/drivers/acpi/apei/Makefile
+index 5db61dfb46915..1a0b85923cd42 100644
+--- a/drivers/acpi/apei/Makefile
++++ b/drivers/acpi/apei/Makefile
+@@ -1,6 +1,10 @@
+ # SPDX-License-Identifier: GPL-2.0
+ obj-$(CONFIG_ACPI_APEI) += apei.o
+ obj-$(CONFIG_ACPI_APEI_GHES) += ghes.o
++# clang versions prior to 18 may blow out the stack with KASAN
++ifeq ($(CONFIG_COMPILE_TEST)_$(CONFIG_CC_IS_CLANG)_$(call clang-min-version, 180000),y_y_)
++KASAN_SANITIZE_ghes.o := n
++endif
+ obj-$(CONFIG_ACPI_APEI_PCIEAER) += ghes_helpers.o
+ obj-$(CONFIG_ACPI_APEI_EINJ) += einj.o
+ einj-y := einj-core.o
+--
+2.51.0
+
--- /dev/null
+From ccc5d9b1a49cf9f8f592ec6c954bd2d49a4a1ab5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 23 Jan 2025 08:44:19 +0000
+Subject: acpi/ghes, cper: Recognize and cache CXL Protocol errors
+
+From: Smita Koralahalli <Smita.KoralahalliChannabasappa@amd.com>
+
+[ Upstream commit 315c2f0b53ba2645062627443a12cea73f3dad9c ]
+
+Add support in GHES to detect and process CXL CPER Protocol errors, as
+defined in UEFI v2.10, section N.2.13.
+
+Define struct cxl_cper_prot_err_work_data to cache CXL protocol error
+information, including RAS capabilities and severity, for further
+handling.
+
+These cached CXL CPER records will later be processed by workqueues
+within the CXL subsystem.
+
+Signed-off-by: Smita Koralahalli <Smita.KoralahalliChannabasappa@amd.com>
+Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Reviewed-by: Dave Jiang <dave.jiang@intel.com>
+Reviewed-by: Ira Weiny <ira.weiny@intel.com>
+Reviewed-by: Tony Luck <tony.luck@intel.com>
+Reviewed-by: Gregory Price <gourry@gourry.net>
+Reviewed-by: Dan Williams <dan.j.williams@intel.com>
+Link: https://patch.msgid.link/20250123084421.127697-5-Smita.KoralahalliChannabasappa@amd.com
+Signed-off-by: Dave Jiang <dave.jiang@intel.com>
+Stable-dep-of: b584bfbd7ec4 ("ACPI: APEI: GHES: Disable KASAN instrumentation when compile testing with clang < 18")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/acpi/apei/ghes.c | 54 ++++++++++++++++++++++++++++++++++++++++
+ include/cxl/event.h | 6 +++++
+ 2 files changed, 60 insertions(+)
+
+diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
+index 98901fbe2f7b7..6de64a4b49998 100644
+--- a/drivers/acpi/apei/ghes.c
++++ b/drivers/acpi/apei/ghes.c
+@@ -705,6 +705,56 @@ static void ghes_defer_non_standard_event(struct acpi_hest_generic_data *gdata,
+ schedule_work(&entry->work);
+ }
+
++static void cxl_cper_post_prot_err(struct cxl_cper_sec_prot_err *prot_err,
++ int severity)
++{
++#ifdef CONFIG_ACPI_APEI_PCIEAER
++ struct cxl_cper_prot_err_work_data wd;
++ u8 *dvsec_start, *cap_start;
++
++ if (!(prot_err->valid_bits & PROT_ERR_VALID_AGENT_ADDRESS)) {
++ pr_err_ratelimited("CXL CPER invalid agent type\n");
++ return;
++ }
++
++ if (!(prot_err->valid_bits & PROT_ERR_VALID_ERROR_LOG)) {
++ pr_err_ratelimited("CXL CPER invalid protocol error log\n");
++ return;
++ }
++
++ if (prot_err->err_len != sizeof(struct cxl_ras_capability_regs)) {
++ pr_err_ratelimited("CXL CPER invalid RAS Cap size (%u)\n",
++ prot_err->err_len);
++ return;
++ }
++
++ if (!(prot_err->valid_bits & PROT_ERR_VALID_SERIAL_NUMBER))
++ pr_warn(FW_WARN "CXL CPER no device serial number\n");
++
++ switch (prot_err->agent_type) {
++ case RCD:
++ case DEVICE:
++ case LD:
++ case FMLD:
++ case RP:
++ case DSP:
++ case USP:
++ memcpy(&wd.prot_err, prot_err, sizeof(wd.prot_err));
++
++ dvsec_start = (u8 *)(prot_err + 1);
++ cap_start = dvsec_start + prot_err->dvsec_len;
++
++ memcpy(&wd.ras_cap, cap_start, sizeof(wd.ras_cap));
++ wd.severity = cper_severity_to_aer(severity);
++ break;
++ default:
++ pr_err_ratelimited("CXL CPER invalid agent type: %d\n",
++ prot_err->agent_type);
++ return;
++ }
++#endif
++}
++
+ /* Room for 8 entries for each of the 4 event log queues */
+ #define CXL_CPER_FIFO_DEPTH 32
+ DEFINE_KFIFO(cxl_cper_fifo, struct cxl_cper_work_data, CXL_CPER_FIFO_DEPTH);
+@@ -806,6 +856,10 @@ static bool ghes_do_proc(struct ghes *ghes,
+ ghes_handle_aer(gdata);
+ } else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) {
+ queued = ghes_handle_arm_hw_error(gdata, sev, sync);
++ } else if (guid_equal(sec_type, &CPER_SEC_CXL_PROT_ERR)) {
++ struct cxl_cper_sec_prot_err *prot_err = acpi_hest_get_payload(gdata);
++
++ cxl_cper_post_prot_err(prot_err, gdata->error_severity);
+ } else if (guid_equal(sec_type, &CPER_SEC_CXL_GEN_MEDIA_GUID)) {
+ struct cxl_cper_event_rec *rec = acpi_hest_get_payload(gdata);
+
+diff --git a/include/cxl/event.h b/include/cxl/event.h
+index 66d85fc87701d..ee1c3dec62fae 100644
+--- a/include/cxl/event.h
++++ b/include/cxl/event.h
+@@ -232,6 +232,12 @@ struct cxl_ras_capability_regs {
+ u32 header_log[16];
+ };
+
++struct cxl_cper_prot_err_work_data {
++ struct cxl_cper_sec_prot_err prot_err;
++ struct cxl_ras_capability_regs ras_cap;
++ int severity;
++};
++
+ #ifdef CONFIG_ACPI_APEI_GHES
+ int cxl_cper_register_work(struct work_struct *work);
+ int cxl_cper_unregister_work(struct work_struct *work);
+--
+2.51.0
+
--- /dev/null
+From 8fda1a34f9a3e28e221295d9e12e88108289142e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 7 Feb 2026 14:13:17 +0100
+Subject: ALSA: hda/conexant: Add quirk for HP ZBook Studio G4
+
+From: Takashi Iwai <tiwai@suse.de>
+
+[ Upstream commit 1585cf83e98db32463e5d54161b06a5f01fe9976 ]
+
+It was reported that we need the same quirk for HP ZBook Studio G4
+(SSID 103c:826b) as other HP models to make the mute-LED working.
+
+Cc: <stable@vger.kernel.org>
+Link: https://lore.kernel.org/64d78753-b9ff-4c64-8920-64d8d31cd20c@gmail.com
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=221002
+Link: https://patch.msgid.link/20260207131324.2428030-1-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/pci/hda/patch_conexant.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index 482e801a496a1..9dc11d922612b 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -1081,6 +1081,7 @@ static const struct hda_quirk cxt5066_fixups[] = {
+ SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
+ SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO),
+ SND_PCI_QUIRK(0x103c, 0x8231, "HP ProBook 450 G4", CXT_FIXUP_MUTE_LED_GPIO),
++ SND_PCI_QUIRK(0x103c, 0x826b, "HP ZBook Studio G4", CXT_FIXUP_MUTE_LED_GPIO),
+ SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK),
+ SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+--
+2.51.0
+
--- /dev/null
+From e187ad3b70869f182bb02b8ef7628b9c1be025e0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Feb 2026 11:44:11 +0100
+Subject: ALSA: hda/conexant: Fix headphone jack handling on Acer Swift SF314
+
+From: Takashi Iwai <tiwai@suse.de>
+
+[ Upstream commit 7bc0df86c2384bc1e2012a2c946f82305054da64 ]
+
+Acer Swift SF314 (SSID 1025:136d) needs a bit of tweaks of the pin
+configurations for NID 0x16 and 0x19 to make the headphone / headset
+jack working. NID 0x17 can remain as is for the working speaker, and
+the built-in mic is supported via SOF.
+
+Cc: <stable@vger.kernel.org>
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=221086
+Link: https://patch.msgid.link/20260217104414.62911-1-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/pci/hda/patch_conexant.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index 9dc11d922612b..b7c9eba9236d8 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -308,6 +308,7 @@ enum {
+ CXT_PINCFG_SWS_JS201D,
+ CXT_PINCFG_TOP_SPEAKER,
+ CXT_FIXUP_HP_A_U,
++ CXT_FIXUP_ACER_SWIFT_HP,
+ };
+
+ /* for hda_fixup_thinkpad_acpi() */
+@@ -1024,6 +1025,14 @@ static const struct hda_fixup cxt_fixups[] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = cxt_fixup_hp_a_u,
+ },
++ [CXT_FIXUP_ACER_SWIFT_HP] = {
++ .type = HDA_FIXUP_PINS,
++ .v.pins = (const struct hda_pintbl[]) {
++ { 0x16, 0x0321403f }, /* Headphone */
++ { 0x19, 0x40f001f0 }, /* Mic */
++ { }
++ },
++ },
+ };
+
+ static const struct hda_quirk cxt5045_fixups[] = {
+@@ -1073,6 +1082,7 @@ static const struct hda_quirk cxt5066_fixups[] = {
+ SND_PCI_QUIRK(0x1025, 0x0543, "Acer Aspire One 522", CXT_FIXUP_STEREO_DMIC),
+ SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_ASPIRE_DMIC),
+ SND_PCI_QUIRK(0x1025, 0x054f, "Acer Aspire 4830T", CXT_FIXUP_ASPIRE_DMIC),
++ SND_PCI_QUIRK(0x1025, 0x136d, "Acer Swift SF314", CXT_FIXUP_ACER_SWIFT_HP),
+ SND_PCI_QUIRK(0x103c, 0x8079, "HP EliteBook 840 G3", CXT_FIXUP_HP_DOCK),
+ SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK),
+ SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK),
+--
+2.51.0
+
--- /dev/null
+From 4adcc863e183fac660d4567a2d1c32467b62cea3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 10 Feb 2026 17:34:02 +0800
+Subject: ALSA: hda/realtek: Add quirk for Gigabyte G5 KF5 (2023)
+
+From: Eric Naim <dnaim@cachyos.org>
+
+[ Upstream commit 405d59fdd2038a65790eaad8c1013d37a2af6561 ]
+
+Fixes microphone detection when a headset is connected to the audio jack
+using the ALC256.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Eric Naim <dnaim@cachyos.org>
+Link: https://patch.msgid.link/20260210093403.21514-1-dnaim@cachyos.org
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/pci/hda/patch_realtek.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 85178a0303a57..e321428225f9b 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -11296,6 +11296,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x144d, 0xc886, "Samsung Galaxy Book3 Pro (NP964XFG)", ALC298_FIXUP_SAMSUNG_AMP_V2_4_AMPS),
+ SND_PCI_QUIRK(0x144d, 0xc1ca, "Samsung Galaxy Book3 Pro 360 (NP960QFG)", ALC298_FIXUP_SAMSUNG_AMP_V2_4_AMPS),
+ SND_PCI_QUIRK(0x144d, 0xc1cc, "Samsung Galaxy Book3 Ultra (NT960XFH)", ALC298_FIXUP_SAMSUNG_AMP_V2_4_AMPS),
++ SND_PCI_QUIRK(0x1458, 0x900e, "Gigabyte G5 KF5 (2023)", ALC2XX_FIXUP_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC),
+--
+2.51.0
+
--- /dev/null
+From c356efa25cee5d75c3bd12ce7d8df2aa4bb6338a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 10 Feb 2026 23:13:37 +0000
+Subject: ALSA: hda/realtek: Add quirk for Samsung Galaxy Book3 Pro 360
+ (NP965QFG)
+
+From: Lewis Mason <mason8110@gmail.com>
+
+[ Upstream commit 3a6b7dc431aab90744e973254604855e654294ae ]
+
+The Samsung Galaxy Book3 Pro 360 NP965QFG (subsystem ID 0x144d:0xc1cb)
+uses the same Realtek ALC298 codec and amplifier configuration as the
+NP960QFG (0x144d:0xc1ca). Apply the same ALC298_FIXUP_SAMSUNG_AMP_V2_4_AMPS
+fixup to enable the internal speakers.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Lewis Mason <lewis@ocuru.co.uk>
+Link: https://patch.msgid.link/20260210231337.7265-1-lewis@ocuru.co.uk
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/pci/hda/patch_realtek.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index e321428225f9b..c13def0f1e1a4 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -11295,6 +11295,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x144d, 0xc872, "Samsung Galaxy Book2 Pro (NP950XEE)", ALC298_FIXUP_SAMSUNG_AMP_V2_2_AMPS),
+ SND_PCI_QUIRK(0x144d, 0xc886, "Samsung Galaxy Book3 Pro (NP964XFG)", ALC298_FIXUP_SAMSUNG_AMP_V2_4_AMPS),
+ SND_PCI_QUIRK(0x144d, 0xc1ca, "Samsung Galaxy Book3 Pro 360 (NP960QFG)", ALC298_FIXUP_SAMSUNG_AMP_V2_4_AMPS),
++ SND_PCI_QUIRK(0x144d, 0xc1cb, "Samsung Galaxy Book3 Pro 360 (NP965QFG)", ALC298_FIXUP_SAMSUNG_AMP_V2_4_AMPS),
+ SND_PCI_QUIRK(0x144d, 0xc1cc, "Samsung Galaxy Book3 Ultra (NT960XFH)", ALC298_FIXUP_SAMSUNG_AMP_V2_4_AMPS),
+ SND_PCI_QUIRK(0x1458, 0x900e, "Gigabyte G5 KF5 (2023)", ALC2XX_FIXUP_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC),
+--
+2.51.0
+
--- /dev/null
+From 024dcb8ac6057b393dc447e3a41df4a876b05bde Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 Jan 2026 16:15:28 +0800
+Subject: arm64: dts: rockchip: Fix rk356x PCIe range mappings
+
+From: Shawn Lin <shawn.lin@rock-chips.com>
+
+[ Upstream commit f63ea193a404481f080ca2958f73e9f364682db9 ]
+
+The pcie bus address should be mapped 1:1 to the cpu side MMIO address, so
+that there is no same address allocated from normal system memory. Otherwise
+it's broken if the same address assigned to the EP for DMA purpose.Fix it to
+sync with the vendor BSP.
+
+Fixes: 568a67e742df ("arm64: dts: rockchip: Fix rk356x PCIe register and range mappings")
+Fixes: 66b51ea7d70f ("arm64: dts: rockchip: Add rk3568 PCIe2x1 controller")
+Cc: stable@vger.kernel.org
+Cc: Andrew Powers-Holmes <aholmes@omnom.net>
+Signed-off-by: Shawn Lin <shawn.lin@rock-chips.com>
+Link: https://patch.msgid.link/1767600929-195341-1-git-send-email-shawn.lin@rock-chips.com
+Signed-off-by: Heiko Stuebner <heiko@sntech.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/boot/dts/rockchip/rk3568.dtsi | 4 ++--
+ arch/arm64/boot/dts/rockchip/rk356x.dtsi | 2 +-
+ 2 files changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/arch/arm64/boot/dts/rockchip/rk3568.dtsi b/arch/arm64/boot/dts/rockchip/rk3568.dtsi
+index 6fd67ae271174..0d16f74949b6a 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3568.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3568.dtsi
+@@ -97,7 +97,7 @@ pcie3x1: pcie@fe270000 {
+ <0x0 0xf2000000 0x0 0x00100000>;
+ ranges = <0x01000000 0x0 0xf2100000 0x0 0xf2100000 0x0 0x00100000>,
+ <0x02000000 0x0 0xf2200000 0x0 0xf2200000 0x0 0x01e00000>,
+- <0x03000000 0x0 0x40000000 0x3 0x40000000 0x0 0x40000000>;
++ <0x03000000 0x3 0x40000000 0x3 0x40000000 0x0 0x40000000>;
+ reg-names = "dbi", "apb", "config";
+ resets = <&cru SRST_PCIE30X1_POWERUP>;
+ reset-names = "pipe";
+@@ -150,7 +150,7 @@ pcie3x2: pcie@fe280000 {
+ <0x0 0xf0000000 0x0 0x00100000>;
+ ranges = <0x01000000 0x0 0xf0100000 0x0 0xf0100000 0x0 0x00100000>,
+ <0x02000000 0x0 0xf0200000 0x0 0xf0200000 0x0 0x01e00000>,
+- <0x03000000 0x0 0x40000000 0x3 0x80000000 0x0 0x40000000>;
++ <0x03000000 0x3 0x80000000 0x3 0x80000000 0x0 0x40000000>;
+ reg-names = "dbi", "apb", "config";
+ resets = <&cru SRST_PCIE30X2_POWERUP>;
+ reset-names = "pipe";
+diff --git a/arch/arm64/boot/dts/rockchip/rk356x.dtsi b/arch/arm64/boot/dts/rockchip/rk356x.dtsi
+index bc0f57a26c2ff..32ccc57555545 100644
+--- a/arch/arm64/boot/dts/rockchip/rk356x.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk356x.dtsi
+@@ -1045,7 +1045,7 @@ pcie2x1: pcie@fe260000 {
+ power-domains = <&power RK3568_PD_PIPE>;
+ ranges = <0x01000000 0x0 0xf4100000 0x0 0xf4100000 0x0 0x00100000>,
+ <0x02000000 0x0 0xf4200000 0x0 0xf4200000 0x0 0x01e00000>,
+- <0x03000000 0x0 0x40000000 0x3 0x00000000 0x0 0x40000000>;
++ <0x03000000 0x3 0x00000000 0x3 0x00000000 0x0 0x40000000>;
+ resets = <&cru SRST_PCIE20_POWERUP>;
+ reset-names = "pipe";
+ #address-cells = <3>;
+--
+2.51.0
+
--- /dev/null
+From 062768214927755926470829aeba2f30d9cddd82 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 Jan 2026 16:15:29 +0800
+Subject: arm64: dts: rockchip: Fix rk3588 PCIe range mappings
+
+From: Shawn Lin <shawn.lin@rock-chips.com>
+
+[ Upstream commit 46c56b737161060dfa468f25ae699749047902a2 ]
+
+The pcie bus address should be mapped 1:1 to the cpu side MMIO address, so
+that there is no same address allocated from normal system memory. Otherwise
+it's broken if the same address assigned to the EP for DMA purpose.Fix it to
+sync with the vendor BSP.
+
+Fixes: 0acf4fa7f187 ("arm64: dts: rockchip: add PCIe3 support for rk3588")
+Fixes: 8d81b77f4c49 ("arm64: dts: rockchip: add rk3588 PCIe2 support")
+Cc: stable@vger.kernel.org
+Cc: Sebastian Reichel <sebastian.reichel@collabora.com>
+Signed-off-by: Shawn Lin <shawn.lin@rock-chips.com>
+Link: https://patch.msgid.link/1767600929-195341-2-git-send-email-shawn.lin@rock-chips.com
+Signed-off-by: Heiko Stuebner <heiko@sntech.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/boot/dts/rockchip/rk3588-base.dtsi | 4 ++--
+ arch/arm64/boot/dts/rockchip/rk3588-extra.dtsi | 6 +++---
+ 2 files changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi
+index ad4331bc07806..68801eb5713d1 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi
+@@ -1650,7 +1650,7 @@ pcie2x1l1: pcie@fe180000 {
+ power-domains = <&power RK3588_PD_PCIE>;
+ ranges = <0x01000000 0x0 0xf3100000 0x0 0xf3100000 0x0 0x00100000>,
+ <0x02000000 0x0 0xf3200000 0x0 0xf3200000 0x0 0x00e00000>,
+- <0x03000000 0x0 0x40000000 0x9 0xc0000000 0x0 0x40000000>;
++ <0x03000000 0x9 0xc0000000 0x9 0xc0000000 0x0 0x40000000>;
+ reg = <0xa 0x40c00000 0x0 0x00400000>,
+ <0x0 0xfe180000 0x0 0x00010000>,
+ <0x0 0xf3000000 0x0 0x00100000>;
+@@ -1701,7 +1701,7 @@ pcie2x1l2: pcie@fe190000 {
+ power-domains = <&power RK3588_PD_PCIE>;
+ ranges = <0x01000000 0x0 0xf4100000 0x0 0xf4100000 0x0 0x00100000>,
+ <0x02000000 0x0 0xf4200000 0x0 0xf4200000 0x0 0x00e00000>,
+- <0x03000000 0x0 0x40000000 0xa 0x00000000 0x0 0x40000000>;
++ <0x03000000 0xa 0x00000000 0xa 0x00000000 0x0 0x40000000>;
+ reg = <0xa 0x41000000 0x0 0x00400000>,
+ <0x0 0xfe190000 0x0 0x00010000>,
+ <0x0 0xf4000000 0x0 0x00100000>;
+diff --git a/arch/arm64/boot/dts/rockchip/rk3588-extra.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-extra.dtsi
+index 0ce0934ec6b79..8af2e5b59e1ac 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3588-extra.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3588-extra.dtsi
+@@ -168,7 +168,7 @@ pcie3x4: pcie@fe150000 {
+ power-domains = <&power RK3588_PD_PCIE>;
+ ranges = <0x01000000 0x0 0xf0100000 0x0 0xf0100000 0x0 0x00100000>,
+ <0x02000000 0x0 0xf0200000 0x0 0xf0200000 0x0 0x00e00000>,
+- <0x03000000 0x0 0x40000000 0x9 0x00000000 0x0 0x40000000>;
++ <0x03000000 0x9 0x00000000 0x9 0x00000000 0x0 0x40000000>;
+ reg = <0xa 0x40000000 0x0 0x00400000>,
+ <0x0 0xfe150000 0x0 0x00010000>,
+ <0x0 0xf0000000 0x0 0x00100000>;
+@@ -254,7 +254,7 @@ pcie3x2: pcie@fe160000 {
+ power-domains = <&power RK3588_PD_PCIE>;
+ ranges = <0x01000000 0x0 0xf1100000 0x0 0xf1100000 0x0 0x00100000>,
+ <0x02000000 0x0 0xf1200000 0x0 0xf1200000 0x0 0x00e00000>,
+- <0x03000000 0x0 0x40000000 0x9 0x40000000 0x0 0x40000000>;
++ <0x03000000 0x9 0x40000000 0x9 0x40000000 0x0 0x40000000>;
+ reg = <0xa 0x40400000 0x0 0x00400000>,
+ <0x0 0xfe160000 0x0 0x00010000>,
+ <0x0 0xf1000000 0x0 0x00100000>;
+@@ -303,7 +303,7 @@ pcie2x1l0: pcie@fe170000 {
+ power-domains = <&power RK3588_PD_PCIE>;
+ ranges = <0x01000000 0x0 0xf2100000 0x0 0xf2100000 0x0 0x00100000>,
+ <0x02000000 0x0 0xf2200000 0x0 0xf2200000 0x0 0x00e00000>,
+- <0x03000000 0x0 0x40000000 0x9 0x80000000 0x0 0x40000000>;
++ <0x03000000 0x9 0x80000000 0x9 0x80000000 0x0 0x40000000>;
+ reg = <0xa 0x40800000 0x0 0x00400000>,
+ <0x0 0xfe170000 0x0 0x00010000>,
+ <0x0 0xf2000000 0x0 0x00100000>;
+--
+2.51.0
+
--- /dev/null
+From c2b22e24adf8112296c2dcc155ad1c674b568df5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 4 Jul 2025 19:46:00 +0900
+Subject: ata: libata: Introduce ata_port_eh_scheduled()
+
+From: Damien Le Moal <dlemoal@kernel.org>
+
+[ Upstream commit 7aae547bbe442affc4afe176b157fab820a12437 ]
+
+Introduce the inline helper function ata_port_eh_scheduled() to test if
+EH is pending (ATA_PFLAG_EH_PENDING port flag is set) or running
+(ATA_PFLAG_EH_IN_PROGRESS port flag is set) for a port. Use this helper
+in ata_port_wait_eh() and __ata_scsi_queuecmd() to replace the hardcoded
+port flag tests.
+
+No functional changes.
+
+Signed-off-by: Damien Le Moal <dlemoal@kernel.org>
+Reviewed-by: Niklas Cassel <cassel@kernel.org>
+Link: https://lore.kernel.org/r/20250704104601.310643-1-dlemoal@kernel.org
+Signed-off-by: Niklas Cassel <cassel@kernel.org>
+Stable-dep-of: 0ea84089dbf6 ("ata: libata-scsi: avoid Non-NCQ command starvation")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/ata/libata-eh.c | 2 +-
+ drivers/ata/libata-scsi.c | 5 +++--
+ drivers/ata/libata.h | 5 +++++
+ 3 files changed, 9 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
+index 205c62cf9e32d..bd910dda8c0b1 100644
+--- a/drivers/ata/libata-eh.c
++++ b/drivers/ata/libata-eh.c
+@@ -826,7 +826,7 @@ void ata_port_wait_eh(struct ata_port *ap)
+ retry:
+ spin_lock_irqsave(ap->lock, flags);
+
+- while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) {
++ while (ata_port_eh_scheduled(ap)) {
+ prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
+ spin_unlock_irqrestore(ap->lock, flags);
+ schedule();
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index 58070edec7c77..d27bf8e2b69cc 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -4339,9 +4339,10 @@ int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, struct ata_device *dev)
+ * scsi_queue_rq() will defer commands if scsi_host_in_recovery().
+ * However, this check is done without holding the ap->lock (a libata
+ * specific lock), so we can have received an error irq since then,
+- * therefore we must check if EH is pending, while holding ap->lock.
++ * therefore we must check if EH is pending or running, while holding
++ * ap->lock.
+ */
+- if (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS))
++ if (ata_port_eh_scheduled(ap))
+ return SCSI_MLQUEUE_DEVICE_BUSY;
+
+ if (unlikely(!scmd->cmd_len))
+diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
+index e78995833e7e6..2d6f7231dcba5 100644
+--- a/drivers/ata/libata.h
++++ b/drivers/ata/libata.h
+@@ -51,6 +51,11 @@ static inline bool ata_dev_is_zac(struct ata_device *dev)
+ ata_id_zoned_cap(dev->id) == 0x01;
+ }
+
++static inline bool ata_port_eh_scheduled(struct ata_port *ap)
++{
++ return ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS);
++}
++
+ #ifdef CONFIG_ATA_FORCE
+ extern void ata_force_cbl(struct ata_port *ap);
+ #else
+--
+2.51.0
+
--- /dev/null
+From 710f934e03a25c02a5c8526fc3d79a5069406989 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Jun 2025 09:42:33 +0900
+Subject: ata: libata: Remove ATA_DFLAG_ZAC device flag
+
+From: Damien Le Moal <dlemoal@kernel.org>
+
+[ Upstream commit a0f26fcc383965e0522b81269062a9278bc802fe ]
+
+The ATA device flag ATA_DFLAG_ZAC is used to indicate if a devie is a
+host managed or host aware zoned device. However, this flag is not used
+in the hot path and only used during device scanning/revalidation and
+for inquiry and sense SCSI command translation.
+
+Save one bit from struct ata_device flags field by replacing this flag
+with the internal helper function ata_dev_is_zac(). This function
+returns true if the device class is ATA_DEV_ZAC (host managed ZAC device
+case) or if its identify data reports it supports the zoned command set
+(host aware ZAC device case).
+
+Signed-off-by: Damien Le Moal <dlemoal@kernel.org>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
+Reviewed-by: Niklas Cassel <cassel@kernel.org>
+Stable-dep-of: 0ea84089dbf6 ("ata: libata-scsi: avoid Non-NCQ command starvation")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/ata/libata-core.c | 13 +------------
+ drivers/ata/libata-scsi.c | 5 ++---
+ drivers/ata/libata.h | 7 +++++++
+ include/linux/libata.h | 1 -
+ 4 files changed, 10 insertions(+), 16 deletions(-)
+
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index 39dcefb1fdd54..2b1cb2998331d 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -2439,18 +2439,7 @@ static void ata_dev_config_zac(struct ata_device *dev)
+ dev->zac_zones_optimal_nonseq = U32_MAX;
+ dev->zac_zones_max_open = U32_MAX;
+
+- /*
+- * Always set the 'ZAC' flag for Host-managed devices.
+- */
+- if (dev->class == ATA_DEV_ZAC)
+- dev->flags |= ATA_DFLAG_ZAC;
+- else if (ata_id_zoned_cap(dev->id) == 0x01)
+- /*
+- * Check for host-aware devices.
+- */
+- dev->flags |= ATA_DFLAG_ZAC;
+-
+- if (!(dev->flags & ATA_DFLAG_ZAC))
++ if (!ata_dev_is_zac(dev))
+ return;
+
+ if (!ata_identify_page_supported(dev, ATA_LOG_ZONED_INFORMATION)) {
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index 4281516a46e0b..58070edec7c77 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -1955,8 +1955,7 @@ static unsigned int ata_scsiop_inq_00(struct ata_device *dev,
+ };
+
+ for (i = 0; i < sizeof(pages); i++) {
+- if (pages[i] == 0xb6 &&
+- !(dev->flags & ATA_DFLAG_ZAC))
++ if (pages[i] == 0xb6 && !ata_dev_is_zac(dev))
+ continue;
+ rbuf[num_pages + 4] = pages[i];
+ num_pages++;
+@@ -2209,7 +2208,7 @@ static unsigned int ata_scsiop_inq_b2(struct ata_device *dev,
+ static unsigned int ata_scsiop_inq_b6(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf)
+ {
+- if (!(dev->flags & ATA_DFLAG_ZAC)) {
++ if (!ata_dev_is_zac(dev)) {
+ ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
+ return 1;
+ }
+diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
+index d07693bd054eb..e78995833e7e6 100644
+--- a/drivers/ata/libata.h
++++ b/drivers/ata/libata.h
+@@ -44,6 +44,13 @@ static inline bool ata_sstatus_online(u32 sstatus)
+ return (sstatus & 0xf) == 0x3;
+ }
+
++static inline bool ata_dev_is_zac(struct ata_device *dev)
++{
++ /* Host managed device or host aware device */
++ return dev->class == ATA_DEV_ZAC ||
++ ata_id_zoned_cap(dev->id) == 0x01;
++}
++
+ #ifdef CONFIG_ATA_FORCE
+ extern void ata_force_cbl(struct ata_port *ap);
+ #else
+diff --git a/include/linux/libata.h b/include/linux/libata.h
+index 1983a98e3d677..50cb59402cb17 100644
+--- a/include/linux/libata.h
++++ b/include/linux/libata.h
+@@ -155,7 +155,6 @@ enum {
+ ATA_DFLAG_DEVSLP = (1 << 27), /* device supports Device Sleep */
+ ATA_DFLAG_ACPI_DISABLED = (1 << 28), /* ACPI for the device is disabled */
+ ATA_DFLAG_D_SENSE = (1 << 29), /* Descriptor sense requested */
+- ATA_DFLAG_ZAC = (1 << 30), /* ZAC device */
+
+ ATA_DFLAG_FEATURES_MASK = (ATA_DFLAG_TRUSTED | ATA_DFLAG_DA | \
+ ATA_DFLAG_DEVSLP | ATA_DFLAG_NCQ_SEND_RECV | \
+--
+2.51.0
+
--- /dev/null
+From 62f65edc33e053d9f9a29be305ca7a812738d48d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 17 Dec 2025 16:40:48 +0900
+Subject: ata: libata-scsi: avoid Non-NCQ command starvation
+
+From: Damien Le Moal <dlemoal@kernel.org>
+
+[ Upstream commit 0ea84089dbf62a92dc7889c79e6b18fc89260808 ]
+
+When a non-NCQ command is issued while NCQ commands are being executed,
+ata_scsi_qc_issue() indicates to the SCSI layer that the command issuing
+should be deferred by returning SCSI_MLQUEUE_XXX_BUSY. This command
+deferring is correct and as mandated by the ACS specifications since
+NCQ and non-NCQ commands cannot be mixed.
+
+However, in the case of a host adapter using multiple submission queues,
+when the target device is under a constant load of NCQ commands, there
+are no guarantees that requeueing the non-NCQ command will be executed
+later and it may be deferred again repeatedly as other submission queues
+can constantly issue NCQ commands from different CPUs ahead of the
+non-NCQ command. This can lead to very long delays for the execution of
+non-NCQ commands, and even complete starvation for these commands in the
+worst case scenario.
+
+Since the block layer and the SCSI layer do not distinguish between
+queueable (NCQ) and non queueable (non-NCQ) commands, libata-scsi SAT
+implementation must ensure forward progress for non-NCQ commands in the
+presence of NCQ command traffic. This is similar to what SAS HBAs with a
+hardware/firmware based SAT implementation do.
+
+Implement such forward progress guarantee by limiting requeueing of
+non-NCQ commands from ata_scsi_qc_issue(): when a non-NCQ command is
+received and NCQ commands are in-flight, do not force a requeue of the
+non-NCQ command by returning SCSI_MLQUEUE_XXX_BUSY and instead return 0
+to indicate that the command was accepted but hold on to the qc using
+the new deferred_qc field of struct ata_port.
+
+This deferred qc will be issued using the work item deferred_qc_work
+running the function ata_scsi_deferred_qc_work() once all in-flight
+commands complete, which is checked with the port qc_defer() callback
+return value indicating that no further delay is necessary. This check
+is done using the helper function ata_scsi_schedule_deferred_qc() which
+is called from ata_scsi_qc_complete(). This thus excludes this mechanism
+from all internal non-NCQ commands issued by ATA EH.
+
+When a port deferred_qc is non NULL, that is, the port has a command
+waiting for the device queue to drain, the issuing of all incoming
+commands (both NCQ and non-NCQ) is deferred using the regular busy
+mechanism. This simplifies the code and also avoids potential denial of
+service problems if a user issues too many non-NCQ commands.
+
+Finally, whenever ata EH is scheduled, regardless of the reason, a
+deferred qc is always requeued so that it can be retried once EH
+completes. This is done by calling the function
+ata_scsi_requeue_deferred_qc() from ata_eh_set_pending(). This avoids
+the need for any special processing for the deferred qc in case of NCQ
+error, link or device reset, or device timeout.
+
+Reported-by: Xingui Yang <yangxingui@huawei.com>
+Reported-by: Igor Pylypiv <ipylypiv@google.com>
+Fixes: bdb01301f3ea ("scsi: Add host and host template flag 'host_tagset'")
+Cc: stable@vger.kernel.org
+Signed-off-by: Damien Le Moal <dlemoal@kernel.org>
+Reviewed-by: Niklas Cassel <cassel@kernel.org>
+Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
+Reviewed-by: John Garry <john.g.garry@oracle.com>
+Tested-by: Igor Pylypiv <ipylypiv@google.com>
+Tested-by: Xingui Yang <yangxingui@huawei.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/ata/libata-core.c | 5 +++
+ drivers/ata/libata-eh.c | 6 +++
+ drivers/ata/libata-scsi.c | 93 +++++++++++++++++++++++++++++++++++++++
+ drivers/ata/libata.h | 2 +
+ include/linux/libata.h | 3 ++
+ 5 files changed, 109 insertions(+)
+
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index 2b1cb2998331d..42fbacd94a8a1 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -5521,6 +5521,7 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
+ mutex_init(&ap->scsi_scan_mutex);
+ INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
+ INIT_DELAYED_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
++ INIT_WORK(&ap->deferred_qc_work, ata_scsi_deferred_qc_work);
+ INIT_LIST_HEAD(&ap->eh_done_q);
+ init_waitqueue_head(&ap->eh_wait_q);
+ init_completion(&ap->park_req_pending);
+@@ -6131,6 +6132,10 @@ static void ata_port_detach(struct ata_port *ap)
+ }
+ }
+
++ /* Make sure the deferred qc work finished. */
++ cancel_work_sync(&ap->deferred_qc_work);
++ WARN_ON(ap->deferred_qc);
++
+ /* Tell EH to disable all devices */
+ ap->pflags |= ATA_PFLAG_UNLOADING;
+ ata_port_schedule_eh(ap);
+diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
+index bd910dda8c0b1..2417bba84cf50 100644
+--- a/drivers/ata/libata-eh.c
++++ b/drivers/ata/libata-eh.c
+@@ -920,6 +920,12 @@ static void ata_eh_set_pending(struct ata_port *ap, int fastdrain)
+
+ ap->pflags |= ATA_PFLAG_EH_PENDING;
+
++ /*
++ * If we have a deferred qc, requeue it so that it is retried once EH
++ * completes.
++ */
++ ata_scsi_requeue_deferred_qc(ap);
++
+ if (!fastdrain)
+ return;
+
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index d27bf8e2b69cc..37fb635f553ef 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -1671,8 +1671,77 @@ static void ata_qc_done(struct ata_queued_cmd *qc)
+ done(cmd);
+ }
+
++void ata_scsi_deferred_qc_work(struct work_struct *work)
++{
++ struct ata_port *ap =
++ container_of(work, struct ata_port, deferred_qc_work);
++ struct ata_queued_cmd *qc;
++ unsigned long flags;
++
++ spin_lock_irqsave(ap->lock, flags);
++
++ /*
++ * If we still have a deferred qc and we are not in EH, issue it. In
++ * such case, we should not need any more deferring the qc, so warn if
++ * qc_defer() says otherwise.
++ */
++ qc = ap->deferred_qc;
++ if (qc && !ata_port_eh_scheduled(ap)) {
++ WARN_ON_ONCE(ap->ops->qc_defer(qc));
++ ap->deferred_qc = NULL;
++ ata_qc_issue(qc);
++ }
++
++ spin_unlock_irqrestore(ap->lock, flags);
++}
++
++void ata_scsi_requeue_deferred_qc(struct ata_port *ap)
++{
++ struct ata_queued_cmd *qc = ap->deferred_qc;
++ struct scsi_cmnd *scmd;
++
++ lockdep_assert_held(ap->lock);
++
++ /*
++ * If we have a deferred qc when a reset occurs or NCQ commands fail,
++ * do not try to be smart about what to do with this deferred command
++ * and simply retry it by completing it with DID_SOFT_ERROR.
++ */
++ if (!qc)
++ return;
++
++ scmd = qc->scsicmd;
++ ap->deferred_qc = NULL;
++ ata_qc_free(qc);
++ scmd->result = (DID_SOFT_ERROR << 16);
++ scsi_done(scmd);
++}
++
++static void ata_scsi_schedule_deferred_qc(struct ata_port *ap)
++{
++ struct ata_queued_cmd *qc = ap->deferred_qc;
++
++ lockdep_assert_held(ap->lock);
++
++ /*
++ * If we have a deferred qc, then qc_defer() is defined and we can use
++ * this callback to determine if this qc is good to go, unless EH has
++ * been scheduled.
++ */
++ if (!qc)
++ return;
++
++ if (ata_port_eh_scheduled(ap)) {
++ ata_scsi_requeue_deferred_qc(ap);
++ return;
++ }
++ if (!ap->ops->qc_defer(qc))
++ queue_work(system_highpri_wq, &ap->deferred_qc_work);
++}
++
+ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
+ {
++ struct ata_port *ap = qc->ap;
+ struct scsi_cmnd *cmd = qc->scsicmd;
+ u8 *cdb = cmd->cmnd;
+ bool have_sense = qc->flags & ATA_QCFLAG_SENSE_VALID;
+@@ -1700,6 +1769,8 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
+ }
+
+ ata_qc_done(qc);
++
++ ata_scsi_schedule_deferred_qc(ap);
+ }
+
+ static int ata_scsi_qc_issue(struct ata_port *ap, struct ata_queued_cmd *qc)
+@@ -1709,6 +1780,16 @@ static int ata_scsi_qc_issue(struct ata_port *ap, struct ata_queued_cmd *qc)
+ if (!ap->ops->qc_defer)
+ goto issue;
+
++ /*
++ * If we already have a deferred qc, then rely on the SCSI layer to
++ * requeue and defer all incoming commands until the deferred qc is
++ * processed, once all on-going commands complete.
++ */
++ if (ap->deferred_qc) {
++ ata_qc_free(qc);
++ return SCSI_MLQUEUE_DEVICE_BUSY;
++ }
++
+ /* Check if the command needs to be deferred. */
+ ret = ap->ops->qc_defer(qc);
+ switch (ret) {
+@@ -1727,6 +1808,18 @@ static int ata_scsi_qc_issue(struct ata_port *ap, struct ata_queued_cmd *qc)
+ }
+
+ if (ret) {
++ /*
++ * We must defer this qc: if this is not an NCQ command, keep
++ * this qc as a deferred one and report to the SCSI layer that
++ * we issued it so that it is not requeued. The deferred qc will
++ * be issued with the port deferred_qc_work once all on-going
++ * commands complete.
++ */
++ if (!ata_is_ncq(qc->tf.protocol)) {
++ ap->deferred_qc = qc;
++ return 0;
++ }
++
+ /* Force a requeue of the command to defer its execution. */
+ ata_qc_free(qc);
+ return ret;
+diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
+index 2d6f7231dcba5..1a2d0f7115b5f 100644
+--- a/drivers/ata/libata.h
++++ b/drivers/ata/libata.h
+@@ -166,6 +166,8 @@ void ata_scsi_sdev_config(struct scsi_device *sdev);
+ int ata_scsi_dev_config(struct scsi_device *sdev, struct queue_limits *lim,
+ struct ata_device *dev);
+ int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, struct ata_device *dev);
++void ata_scsi_deferred_qc_work(struct work_struct *work);
++void ata_scsi_requeue_deferred_qc(struct ata_port *ap);
+
+ /* libata-eh.c */
+ extern unsigned int ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd);
+diff --git a/include/linux/libata.h b/include/linux/libata.h
+index 50cb59402cb17..14c835f5d661e 100644
+--- a/include/linux/libata.h
++++ b/include/linux/libata.h
+@@ -897,6 +897,9 @@ struct ata_port {
+ u64 qc_active;
+ int nr_active_links; /* #links with active qcs */
+
++ struct work_struct deferred_qc_work;
++ struct ata_queued_cmd *deferred_qc;
++
+ struct ata_link link; /* host default link */
+ struct ata_link *slave_link; /* see ata_slave_link_init() */
+
+--
+2.51.0
+
--- /dev/null
+From 3e430807f7657922f032b865b905dbe152fd1b5a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 22 Oct 2024 11:45:35 +0900
+Subject: ata: libata-scsi: Document all VPD page inquiry actors
+
+From: Damien Le Moal <dlemoal@kernel.org>
+
+[ Upstream commit 47000e84b3d0630d7d86eeb115894205be68035d ]
+
+Add the missing kdoc comments for the ata_scsiop_inq_XX functions used
+to emulate access to VPD pages.
+
+Signed-off-by: Damien Le Moal <dlemoal@kernel.org>
+Link: https://lore.kernel.org/r/20241022024537.251905-5-dlemoal@kernel.org
+Signed-off-by: Niklas Cassel <cassel@kernel.org>
+Stable-dep-of: 0ea84089dbf6 ("ata: libata-scsi: avoid Non-NCQ command starvation")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/ata/libata-scsi.c | 54 +++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 54 insertions(+)
+
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index c214f0832714c..a38d912a0497b 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -2086,6 +2086,16 @@ static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf)
+ return 0;
+ }
+
++/**
++ * ata_scsiop_inq_b0 - Simulate INQUIRY VPD page B0, Block Limits
++ * @args: device IDENTIFY data / SCSI command of interest.
++ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
++ *
++ * Return data for the VPD page B0h (Block Limits).
++ *
++ * LOCKING:
++ * spin_lock_irqsave(host lock)
++ */
+ static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf)
+ {
+ struct ata_device *dev = args->dev;
+@@ -2126,6 +2136,17 @@ static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf)
+ return 0;
+ }
+
++/**
++ * ata_scsiop_inq_b1 - Simulate INQUIRY VPD page B1, Block Device
++ * Characteristics
++ * @args: device IDENTIFY data / SCSI command of interest.
++ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
++ *
++ * Return data for the VPD page B1h (Block Device Characteristics).
++ *
++ * LOCKING:
++ * spin_lock_irqsave(host lock)
++ */
+ static unsigned int ata_scsiop_inq_b1(struct ata_scsi_args *args, u8 *rbuf)
+ {
+ int form_factor = ata_id_form_factor(args->id);
+@@ -2143,6 +2164,17 @@ static unsigned int ata_scsiop_inq_b1(struct ata_scsi_args *args, u8 *rbuf)
+ return 0;
+ }
+
++/**
++ * ata_scsiop_inq_b2 - Simulate INQUIRY VPD page B2, Logical Block
++ * Provisioning
++ * @args: device IDENTIFY data / SCSI command of interest.
++ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
++ *
++ * Return data for the VPD page B2h (Logical Block Provisioning).
++ *
++ * LOCKING:
++ * spin_lock_irqsave(host lock)
++ */
+ static unsigned int ata_scsiop_inq_b2(struct ata_scsi_args *args, u8 *rbuf)
+ {
+ /* SCSI Thin Provisioning VPD page: SBC-3 rev 22 or later */
+@@ -2153,6 +2185,17 @@ static unsigned int ata_scsiop_inq_b2(struct ata_scsi_args *args, u8 *rbuf)
+ return 0;
+ }
+
++/**
++ * ata_scsiop_inq_b6 - Simulate INQUIRY VPD page B6, Zoned Block Device
++ * Characteristics
++ * @args: device IDENTIFY data / SCSI command of interest.
++ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
++ *
++ * Return data for the VPD page B2h (Zoned Block Device Characteristics).
++ *
++ * LOCKING:
++ * spin_lock_irqsave(host lock)
++ */
+ static unsigned int ata_scsiop_inq_b6(struct ata_scsi_args *args, u8 *rbuf)
+ {
+ if (!(args->dev->flags & ATA_DFLAG_ZAC)) {
+@@ -2178,6 +2221,17 @@ static unsigned int ata_scsiop_inq_b6(struct ata_scsi_args *args, u8 *rbuf)
+ return 0;
+ }
+
++/**
++ * ata_scsiop_inq_b9 - Simulate INQUIRY VPD page B9, Concurrent Positioning
++ * Ranges
++ * @args: device IDENTIFY data / SCSI command of interest.
++ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
++ *
++ * Return data for the VPD page B9h (Concurrent Positioning Ranges).
++ *
++ * LOCKING:
++ * spin_lock_irqsave(host lock)
++ */
+ static unsigned int ata_scsiop_inq_b9(struct ata_scsi_args *args, u8 *rbuf)
+ {
+ struct ata_cpr_log *cpr_log = args->dev->cpr_log;
+--
+2.51.0
+
--- /dev/null
+From 4d4126af1ec0bda9bbec1f0b8e731d5801cd1175 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 22 Oct 2024 11:45:32 +0900
+Subject: ata: libata-scsi: Refactor ata_scsi_simulate()
+
+From: Damien Le Moal <dlemoal@kernel.org>
+
+[ Upstream commit b055e3be63bebc3c50d0fb1830de9bf4f2be388d ]
+
+Factor out the code handling the INQUIRY command in ata_scsi_simulate()
+using the function ata_scsi_rbuf_fill() with the new actor
+ata_scsiop_inquiry(). This new actor function calls the existing actors
+to handle the standard inquiry as well as extended inquiry (VPD page
+access).
+
+Signed-off-by: Damien Le Moal <dlemoal@kernel.org>
+Link: https://lore.kernel.org/r/20241022024537.251905-2-dlemoal@kernel.org
+Signed-off-by: Niklas Cassel <cassel@kernel.org>
+Stable-dep-of: 0ea84089dbf6 ("ata: libata-scsi: avoid Non-NCQ command starvation")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/ata/libata-scsi.c | 106 ++++++++++++++++++++++----------------
+ 1 file changed, 63 insertions(+), 43 deletions(-)
+
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index 097080c8b82df..17fb055e48748 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -1849,7 +1849,7 @@ static void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
+ }
+
+ /**
+- * ata_scsiop_inq_std - Simulate INQUIRY command
++ * ata_scsiop_inq_std - Simulate standard INQUIRY command
+ * @args: device IDENTIFY data / SCSI command of interest.
+ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *
+@@ -2155,6 +2155,11 @@ static unsigned int ata_scsiop_inq_b2(struct ata_scsi_args *args, u8 *rbuf)
+
+ static unsigned int ata_scsiop_inq_b6(struct ata_scsi_args *args, u8 *rbuf)
+ {
++ if (!(args->dev->flags & ATA_DFLAG_ZAC)) {
++ ata_scsi_set_invalid_field(args->dev, args->cmd, 2, 0xff);
++ return 1;
++ }
++
+ /*
+ * zbc-r05 SCSI Zoned Block device characteristics VPD page
+ */
+@@ -2179,6 +2184,11 @@ static unsigned int ata_scsiop_inq_b9(struct ata_scsi_args *args, u8 *rbuf)
+ u8 *desc = &rbuf[64];
+ int i;
+
++ if (!cpr_log) {
++ ata_scsi_set_invalid_field(args->dev, args->cmd, 2, 0xff);
++ return 1;
++ }
++
+ /* SCSI Concurrent Positioning Ranges VPD page: SBC-5 rev 1 or later */
+ rbuf[1] = 0xb9;
+ put_unaligned_be16(64 + (int)cpr_log->nr_cpr * 32 - 4, &rbuf[2]);
+@@ -2193,6 +2203,57 @@ static unsigned int ata_scsiop_inq_b9(struct ata_scsi_args *args, u8 *rbuf)
+ return 0;
+ }
+
++/**
++ * ata_scsiop_inquiry - Simulate INQUIRY command
++ * @args: device IDENTIFY data / SCSI command of interest.
++ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
++ *
++ * Returns data associated with an INQUIRY command output.
++ *
++ * LOCKING:
++ * spin_lock_irqsave(host lock)
++ */
++static unsigned int ata_scsiop_inquiry(struct ata_scsi_args *args, u8 *rbuf)
++{
++ struct ata_device *dev = args->dev;
++ struct scsi_cmnd *cmd = args->cmd;
++ const u8 *scsicmd = cmd->cmnd;
++
++ /* is CmdDt set? */
++ if (scsicmd[1] & 2) {
++ ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
++ return 1;
++ }
++
++ /* Is EVPD clear? */
++ if ((scsicmd[1] & 1) == 0)
++ return ata_scsiop_inq_std(args, rbuf);
++
++ switch (scsicmd[2]) {
++ case 0x00:
++ return ata_scsiop_inq_00(args, rbuf);
++ case 0x80:
++ return ata_scsiop_inq_80(args, rbuf);
++ case 0x83:
++ return ata_scsiop_inq_83(args, rbuf);
++ case 0x89:
++ return ata_scsiop_inq_89(args, rbuf);
++ case 0xb0:
++ return ata_scsiop_inq_b0(args, rbuf);
++ case 0xb1:
++ return ata_scsiop_inq_b1(args, rbuf);
++ case 0xb2:
++ return ata_scsiop_inq_b2(args, rbuf);
++ case 0xb6:
++ return ata_scsiop_inq_b6(args, rbuf);
++ case 0xb9:
++ return ata_scsiop_inq_b9(args, rbuf);
++ default:
++ ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
++ return 1;
++ }
++}
++
+ /**
+ * modecpy - Prepare response for MODE SENSE
+ * @dest: output buffer
+@@ -4304,48 +4365,7 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
+
+ switch(scsicmd[0]) {
+ case INQUIRY:
+- if (scsicmd[1] & 2) /* is CmdDt set? */
+- ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
+- else if ((scsicmd[1] & 1) == 0) /* is EVPD clear? */
+- ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std);
+- else switch (scsicmd[2]) {
+- case 0x00:
+- ata_scsi_rbuf_fill(&args, ata_scsiop_inq_00);
+- break;
+- case 0x80:
+- ata_scsi_rbuf_fill(&args, ata_scsiop_inq_80);
+- break;
+- case 0x83:
+- ata_scsi_rbuf_fill(&args, ata_scsiop_inq_83);
+- break;
+- case 0x89:
+- ata_scsi_rbuf_fill(&args, ata_scsiop_inq_89);
+- break;
+- case 0xb0:
+- ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b0);
+- break;
+- case 0xb1:
+- ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b1);
+- break;
+- case 0xb2:
+- ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b2);
+- break;
+- case 0xb6:
+- if (dev->flags & ATA_DFLAG_ZAC)
+- ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b6);
+- else
+- ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
+- break;
+- case 0xb9:
+- if (dev->cpr_log)
+- ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b9);
+- else
+- ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
+- break;
+- default:
+- ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
+- break;
+- }
++ ata_scsi_rbuf_fill(&args, ata_scsiop_inquiry);
+ break;
+
+ case MODE_SENSE:
+--
+2.51.0
+
--- /dev/null
+From 771f6f08049bfc3530c4ef62546b886ee2a2185e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 22 Oct 2024 11:45:34 +0900
+Subject: ata: libata-scsi: Refactor ata_scsiop_maint_in()
+
+From: Damien Le Moal <dlemoal@kernel.org>
+
+[ Upstream commit 4ab7bb97634351914a18f3c4533992c99eb6edb6 ]
+
+Move the check for MI_REPORT_SUPPORTED_OPERATION_CODES from
+ata_scsi_simulate() into ata_scsiop_maint_in() to simplify
+ata_scsi_simulate() code.
+
+Furthermore, since an rbuff fill actor function returning a non-zero
+value causes no data to be returned for the command, directly return
+an error (return 1) for invalid command formt after setting the invalid
+field in cdb error.
+
+Signed-off-by: Damien Le Moal <dlemoal@kernel.org>
+Link: https://lore.kernel.org/r/20241022024537.251905-4-dlemoal@kernel.org
+Signed-off-by: Niklas Cassel <cassel@kernel.org>
+Stable-dep-of: 0ea84089dbf6 ("ata: libata-scsi: avoid Non-NCQ command starvation")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/ata/libata-scsi.c | 20 +++++++++++---------
+ 1 file changed, 11 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index e5857229f0b7a..c214f0832714c 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -3425,12 +3425,16 @@ static unsigned int ata_scsiop_maint_in(struct ata_scsi_args *args, u8 *rbuf)
+ struct ata_device *dev = args->dev;
+ u8 *cdb = args->cmd->cmnd;
+ u8 supported = 0, cdlp = 0, rwcdlp = 0;
+- unsigned int err = 0;
++
++ if ((cdb[1] & 0x1f) != MI_REPORT_SUPPORTED_OPERATION_CODES) {
++ ata_scsi_set_invalid_field(dev, args->cmd, 1, 0xff);
++ return 1;
++ }
+
+ if (cdb[2] != 1 && cdb[2] != 3) {
+ ata_dev_warn(dev, "invalid command format %d\n", cdb[2]);
+- err = 2;
+- goto out;
++ ata_scsi_set_invalid_field(dev, args->cmd, 1, 0xff);
++ return 1;
+ }
+
+ switch (cdb[3]) {
+@@ -3498,11 +3502,12 @@ static unsigned int ata_scsiop_maint_in(struct ata_scsi_args *args, u8 *rbuf)
+ default:
+ break;
+ }
+-out:
++
+ /* One command format */
+ rbuf[0] = rwcdlp;
+ rbuf[1] = cdlp | supported;
+- return err;
++
++ return 0;
+ }
+
+ /**
+@@ -4418,10 +4423,7 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
+ break;
+
+ case MAINTENANCE_IN:
+- if ((scsicmd[1] & 0x1f) == MI_REPORT_SUPPORTED_OPERATION_CODES)
+- ata_scsi_rbuf_fill(&args, ata_scsiop_maint_in);
+- else
+- ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
++ ata_scsi_rbuf_fill(&args, ata_scsiop_maint_in);
+ break;
+
+ /* all other commands */
+--
+2.51.0
+
--- /dev/null
+From daa0886985bd082e804418783d4f249cd30ca336 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 22 Oct 2024 11:45:33 +0900
+Subject: ata: libata-scsi: Refactor ata_scsiop_read_cap()
+
+From: Damien Le Moal <dlemoal@kernel.org>
+
+[ Upstream commit 44bdde151a6f5b34993c570a8f6508e2e00b56e1 ]
+
+Move the check for the scsi command service action being
+SAI_READ_CAPACITY_16 from ata_scsi_simulate() into ata_scsiop_read_cap()
+to simplify ata_scsi_simulate() for processing capacity reading commands
+(READ_CAPACITY and SERVICE_ACTION_IN_16).
+
+Signed-off-by: Damien Le Moal <dlemoal@kernel.org>
+Link: https://lore.kernel.org/r/20241022024537.251905-3-dlemoal@kernel.org
+Signed-off-by: Niklas Cassel <cassel@kernel.org>
+Stable-dep-of: 0ea84089dbf6 ("ata: libata-scsi: avoid Non-NCQ command starvation")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/ata/libata-scsi.c | 87 +++++++++++++++++++++------------------
+ 1 file changed, 46 insertions(+), 41 deletions(-)
+
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index 17fb055e48748..e5857229f0b7a 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -2613,6 +2613,7 @@ static unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf)
+ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
+ {
+ struct ata_device *dev = args->dev;
++ u8 *scsicmd = args->cmd->cmnd;
+ u64 last_lba = dev->n_sectors - 1; /* LBA of the last block */
+ u32 sector_size; /* physical sector size in bytes */
+ u8 log2_per_phys;
+@@ -2622,7 +2623,7 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
+ log2_per_phys = ata_id_log2_per_physical_sector(dev->id);
+ lowest_aligned = ata_id_logical_sector_offset(dev->id, log2_per_phys);
+
+- if (args->cmd->cmnd[0] == READ_CAPACITY) {
++ if (scsicmd[0] == READ_CAPACITY) {
+ if (last_lba >= 0xffffffffULL)
+ last_lba = 0xffffffff;
+
+@@ -2637,42 +2638,52 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
+ rbuf[5] = sector_size >> (8 * 2);
+ rbuf[6] = sector_size >> (8 * 1);
+ rbuf[7] = sector_size;
+- } else {
+- /* sector count, 64-bit */
+- rbuf[0] = last_lba >> (8 * 7);
+- rbuf[1] = last_lba >> (8 * 6);
+- rbuf[2] = last_lba >> (8 * 5);
+- rbuf[3] = last_lba >> (8 * 4);
+- rbuf[4] = last_lba >> (8 * 3);
+- rbuf[5] = last_lba >> (8 * 2);
+- rbuf[6] = last_lba >> (8 * 1);
+- rbuf[7] = last_lba;
+
+- /* sector size */
+- rbuf[ 8] = sector_size >> (8 * 3);
+- rbuf[ 9] = sector_size >> (8 * 2);
+- rbuf[10] = sector_size >> (8 * 1);
+- rbuf[11] = sector_size;
+-
+- rbuf[12] = 0;
+- rbuf[13] = log2_per_phys;
+- rbuf[14] = (lowest_aligned >> 8) & 0x3f;
+- rbuf[15] = lowest_aligned;
+-
+- if (ata_id_has_trim(args->id) &&
+- !(dev->quirks & ATA_QUIRK_NOTRIM)) {
+- rbuf[14] |= 0x80; /* LBPME */
+-
+- if (ata_id_has_zero_after_trim(args->id) &&
+- dev->quirks & ATA_QUIRK_ZERO_AFTER_TRIM) {
+- ata_dev_info(dev, "Enabling discard_zeroes_data\n");
+- rbuf[14] |= 0x40; /* LBPRZ */
+- }
++ return 0;
++ }
++
++ /*
++ * READ CAPACITY 16 command is defined as a service action
++ * (SERVICE_ACTION_IN_16 command).
++ */
++ if (scsicmd[0] != SERVICE_ACTION_IN_16 ||
++ (scsicmd[1] & 0x1f) != SAI_READ_CAPACITY_16) {
++ ata_scsi_set_invalid_field(dev, args->cmd, 1, 0xff);
++ return 1;
++ }
++
++ /* sector count, 64-bit */
++ rbuf[0] = last_lba >> (8 * 7);
++ rbuf[1] = last_lba >> (8 * 6);
++ rbuf[2] = last_lba >> (8 * 5);
++ rbuf[3] = last_lba >> (8 * 4);
++ rbuf[4] = last_lba >> (8 * 3);
++ rbuf[5] = last_lba >> (8 * 2);
++ rbuf[6] = last_lba >> (8 * 1);
++ rbuf[7] = last_lba;
++
++ /* sector size */
++ rbuf[ 8] = sector_size >> (8 * 3);
++ rbuf[ 9] = sector_size >> (8 * 2);
++ rbuf[10] = sector_size >> (8 * 1);
++ rbuf[11] = sector_size;
++
++ if (ata_id_zoned_cap(args->id) || args->dev->class == ATA_DEV_ZAC)
++ rbuf[12] = (1 << 4); /* RC_BASIS */
++ rbuf[13] = log2_per_phys;
++ rbuf[14] = (lowest_aligned >> 8) & 0x3f;
++ rbuf[15] = lowest_aligned;
++
++ if (ata_id_has_trim(args->id) && !(dev->quirks & ATA_QUIRK_NOTRIM)) {
++ rbuf[14] |= 0x80; /* LBPME */
++
++ if (ata_id_has_zero_after_trim(args->id) &&
++ dev->quirks & ATA_QUIRK_ZERO_AFTER_TRIM) {
++ ata_dev_info(dev, "Enabling discard_zeroes_data\n");
++ rbuf[14] |= 0x40; /* LBPRZ */
+ }
+- if (ata_id_zoned_cap(args->id) ||
+- args->dev->class == ATA_DEV_ZAC)
+- rbuf[12] = (1 << 4); /* RC_BASIS */
+ }
++
+ return 0;
+ }
+
+@@ -4374,14 +4385,8 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
+ break;
+
+ case READ_CAPACITY:
+- ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
+- break;
+-
+ case SERVICE_ACTION_IN_16:
+- if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16)
+- ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
+- else
+- ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
++ ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
+ break;
+
+ case REPORT_LUNS:
+--
+2.51.0
+
--- /dev/null
+From f941903afc03cb5bde4408288832cadc4a51cd9a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 22 Oct 2024 11:45:36 +0900
+Subject: ata: libata-scsi: Remove struct ata_scsi_args
+
+From: Damien Le Moal <dlemoal@kernel.org>
+
+[ Upstream commit 2365278e03916b6b9a65df91e9f7c7afe5a6cf2e ]
+
+The data structure struct ata_scsi_args is used to pass the target ATA
+device, the SCSI command to simulate and the device identification data
+to ata_scsi_rbuf_fill() and to its actor function. This method of
+passing information does not improve the code in any way and in fact
+increases the number of pointer dereferences for no gains.
+
+Drop this data structure by modifying the interface of
+ata_scsi_rbuf_fill() and its actor function to take an ATA device and a
+SCSI command as argument.
+
+Signed-off-by: Damien Le Moal <dlemoal@kernel.org>
+Link: https://lore.kernel.org/r/20241022024537.251905-6-dlemoal@kernel.org
+Signed-off-by: Niklas Cassel <cassel@kernel.org>
+Stable-dep-of: 0ea84089dbf6 ("ata: libata-scsi: avoid Non-NCQ command starvation")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/ata/libata-scsi.c | 241 ++++++++++++++++++++------------------
+ 1 file changed, 127 insertions(+), 114 deletions(-)
+
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index a38d912a0497b..4281516a46e0b 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -1806,15 +1806,10 @@ static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd,
+ return 0;
+ }
+
+-struct ata_scsi_args {
+- struct ata_device *dev;
+- u16 *id;
+- struct scsi_cmnd *cmd;
+-};
+-
+ /**
+ * ata_scsi_rbuf_fill - wrapper for SCSI command simulators
+- * @args: device IDENTIFY data / SCSI command of interest.
++ * @dev: Target device.
++ * @cmd: SCSI command of interest.
+ * @actor: Callback hook for desired SCSI command simulator
+ *
+ * Takes care of the hard work of simulating a SCSI command...
+@@ -1827,30 +1822,30 @@ struct ata_scsi_args {
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+-static void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
+- unsigned int (*actor)(struct ata_scsi_args *args, u8 *rbuf))
++static void ata_scsi_rbuf_fill(struct ata_device *dev, struct scsi_cmnd *cmd,
++ unsigned int (*actor)(struct ata_device *dev,
++ struct scsi_cmnd *cmd, u8 *rbuf))
+ {
+ unsigned int rc;
+- struct scsi_cmnd *cmd = args->cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ata_scsi_rbuf_lock, flags);
+
+ memset(ata_scsi_rbuf, 0, ATA_SCSI_RBUF_SIZE);
+- rc = actor(args, ata_scsi_rbuf);
+- if (rc == 0)
++ rc = actor(dev, cmd, ata_scsi_rbuf);
++ if (rc == 0) {
+ sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd),
+ ata_scsi_rbuf, ATA_SCSI_RBUF_SIZE);
++ cmd->result = SAM_STAT_GOOD;
++ }
+
+ spin_unlock_irqrestore(&ata_scsi_rbuf_lock, flags);
+-
+- if (rc == 0)
+- cmd->result = SAM_STAT_GOOD;
+ }
+
+ /**
+ * ata_scsiop_inq_std - Simulate standard INQUIRY command
+- * @args: device IDENTIFY data / SCSI command of interest.
++ * @dev: Target device.
++ * @cmd: SCSI command of interest.
+ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *
+ * Returns standard device identification data associated
+@@ -1859,7 +1854,8 @@ static void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+-static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
++static unsigned int ata_scsiop_inq_std(struct ata_device *dev,
++ struct scsi_cmnd *cmd, u8 *rbuf)
+ {
+ static const u8 versions[] = {
+ 0x00,
+@@ -1900,30 +1896,30 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
+ * Set the SCSI Removable Media Bit (RMB) if the ATA removable media
+ * device bit (obsolete since ATA-8 ACS) is set.
+ */
+- if (ata_id_removable(args->id))
++ if (ata_id_removable(dev->id))
+ hdr[1] |= (1 << 7);
+
+- if (args->dev->class == ATA_DEV_ZAC) {
++ if (dev->class == ATA_DEV_ZAC) {
+ hdr[0] = TYPE_ZBC;
+ hdr[2] = 0x7; /* claim SPC-5 version compatibility */
+ }
+
+- if (args->dev->flags & ATA_DFLAG_CDL)
++ if (dev->flags & ATA_DFLAG_CDL)
+ hdr[2] = 0xd; /* claim SPC-6 version compatibility */
+
+ memcpy(rbuf, hdr, sizeof(hdr));
+ memcpy(&rbuf[8], "ATA ", 8);
+- ata_id_string(args->id, &rbuf[16], ATA_ID_PROD, 16);
++ ata_id_string(dev->id, &rbuf[16], ATA_ID_PROD, 16);
+
+ /* From SAT, use last 2 words from fw rev unless they are spaces */
+- ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV + 2, 4);
++ ata_id_string(dev->id, &rbuf[32], ATA_ID_FW_REV + 2, 4);
+ if (strncmp(&rbuf[32], " ", 4) == 0)
+- ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV, 4);
++ ata_id_string(dev->id, &rbuf[32], ATA_ID_FW_REV, 4);
+
+ if (rbuf[32] == 0 || rbuf[32] == ' ')
+ memcpy(&rbuf[32], "n/a ", 4);
+
+- if (ata_id_zoned_cap(args->id) || args->dev->class == ATA_DEV_ZAC)
++ if (ata_id_zoned_cap(dev->id) || dev->class == ATA_DEV_ZAC)
+ memcpy(rbuf + 58, versions_zbc, sizeof(versions_zbc));
+ else
+ memcpy(rbuf + 58, versions, sizeof(versions));
+@@ -1933,7 +1929,8 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
+
+ /**
+ * ata_scsiop_inq_00 - Simulate INQUIRY VPD page 0, list of pages
+- * @args: device IDENTIFY data / SCSI command of interest.
++ * @dev: Target device.
++ * @cmd: SCSI command of interest.
+ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *
+ * Returns list of inquiry VPD pages available.
+@@ -1941,7 +1938,8 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+-static unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf)
++static unsigned int ata_scsiop_inq_00(struct ata_device *dev,
++ struct scsi_cmnd *cmd, u8 *rbuf)
+ {
+ int i, num_pages = 0;
+ static const u8 pages[] = {
+@@ -1958,7 +1956,7 @@ static unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf)
+
+ for (i = 0; i < sizeof(pages); i++) {
+ if (pages[i] == 0xb6 &&
+- !(args->dev->flags & ATA_DFLAG_ZAC))
++ !(dev->flags & ATA_DFLAG_ZAC))
+ continue;
+ rbuf[num_pages + 4] = pages[i];
+ num_pages++;
+@@ -1969,7 +1967,8 @@ static unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf)
+
+ /**
+ * ata_scsiop_inq_80 - Simulate INQUIRY VPD page 80, device serial number
+- * @args: device IDENTIFY data / SCSI command of interest.
++ * @dev: Target device.
++ * @cmd: SCSI command of interest.
+ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *
+ * Returns ATA device serial number.
+@@ -1977,7 +1976,8 @@ static unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf)
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+-static unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf)
++static unsigned int ata_scsiop_inq_80(struct ata_device *dev,
++ struct scsi_cmnd *cmd, u8 *rbuf)
+ {
+ static const u8 hdr[] = {
+ 0,
+@@ -1987,14 +1987,15 @@ static unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf)
+ };
+
+ memcpy(rbuf, hdr, sizeof(hdr));
+- ata_id_string(args->id, (unsigned char *) &rbuf[4],
++ ata_id_string(dev->id, (unsigned char *) &rbuf[4],
+ ATA_ID_SERNO, ATA_ID_SERNO_LEN);
+ return 0;
+ }
+
+ /**
+ * ata_scsiop_inq_83 - Simulate INQUIRY VPD page 83, device identity
+- * @args: device IDENTIFY data / SCSI command of interest.
++ * @dev: Target device.
++ * @cmd: SCSI command of interest.
+ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *
+ * Yields two logical unit device identification designators:
+@@ -2005,7 +2006,8 @@ static unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf)
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+-static unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf)
++static unsigned int ata_scsiop_inq_83(struct ata_device *dev,
++ struct scsi_cmnd *cmd, u8 *rbuf)
+ {
+ const int sat_model_serial_desc_len = 68;
+ int num;
+@@ -2017,7 +2019,7 @@ static unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf)
+ rbuf[num + 0] = 2;
+ rbuf[num + 3] = ATA_ID_SERNO_LEN;
+ num += 4;
+- ata_id_string(args->id, (unsigned char *) rbuf + num,
++ ata_id_string(dev->id, (unsigned char *) rbuf + num,
+ ATA_ID_SERNO, ATA_ID_SERNO_LEN);
+ num += ATA_ID_SERNO_LEN;
+
+@@ -2029,21 +2031,21 @@ static unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf)
+ num += 4;
+ memcpy(rbuf + num, "ATA ", 8);
+ num += 8;
+- ata_id_string(args->id, (unsigned char *) rbuf + num, ATA_ID_PROD,
++ ata_id_string(dev->id, (unsigned char *) rbuf + num, ATA_ID_PROD,
+ ATA_ID_PROD_LEN);
+ num += ATA_ID_PROD_LEN;
+- ata_id_string(args->id, (unsigned char *) rbuf + num, ATA_ID_SERNO,
++ ata_id_string(dev->id, (unsigned char *) rbuf + num, ATA_ID_SERNO,
+ ATA_ID_SERNO_LEN);
+ num += ATA_ID_SERNO_LEN;
+
+- if (ata_id_has_wwn(args->id)) {
++ if (ata_id_has_wwn(dev->id)) {
+ /* SAT defined lu world wide name */
+ /* piv=0, assoc=lu, code_set=binary, designator=NAA */
+ rbuf[num + 0] = 1;
+ rbuf[num + 1] = 3;
+ rbuf[num + 3] = ATA_ID_WWN_LEN;
+ num += 4;
+- ata_id_string(args->id, (unsigned char *) rbuf + num,
++ ata_id_string(dev->id, (unsigned char *) rbuf + num,
+ ATA_ID_WWN, ATA_ID_WWN_LEN);
+ num += ATA_ID_WWN_LEN;
+ }
+@@ -2053,7 +2055,8 @@ static unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf)
+
+ /**
+ * ata_scsiop_inq_89 - Simulate INQUIRY VPD page 89, ATA info
+- * @args: device IDENTIFY data / SCSI command of interest.
++ * @dev: Target device.
++ * @cmd: SCSI command of interest.
+ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *
+ * Yields SAT-specified ATA VPD page.
+@@ -2061,7 +2064,8 @@ static unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf)
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+-static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf)
++static unsigned int ata_scsiop_inq_89(struct ata_device *dev,
++ struct scsi_cmnd *cmd, u8 *rbuf)
+ {
+ rbuf[1] = 0x89; /* our page code */
+ rbuf[2] = (0x238 >> 8); /* page size fixed at 238h */
+@@ -2082,13 +2086,14 @@ static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf)
+
+ rbuf[56] = ATA_CMD_ID_ATA;
+
+- memcpy(&rbuf[60], &args->id[0], 512);
++ memcpy(&rbuf[60], &dev->id[0], 512);
+ return 0;
+ }
+
+ /**
+ * ata_scsiop_inq_b0 - Simulate INQUIRY VPD page B0, Block Limits
+- * @args: device IDENTIFY data / SCSI command of interest.
++ * @dev: Target device.
++ * @cmd: SCSI command of interest.
+ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *
+ * Return data for the VPD page B0h (Block Limits).
+@@ -2096,9 +2101,9 @@ static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf)
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+-static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf)
++static unsigned int ata_scsiop_inq_b0(struct ata_device *dev,
++ struct scsi_cmnd *cmd, u8 *rbuf)
+ {
+- struct ata_device *dev = args->dev;
+ u16 min_io_sectors;
+
+ rbuf[1] = 0xb0;
+@@ -2111,7 +2116,7 @@ static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf)
+ * logical than physical sector size we need to figure out what the
+ * latter is.
+ */
+- min_io_sectors = 1 << ata_id_log2_per_physical_sector(args->id);
++ min_io_sectors = 1 << ata_id_log2_per_physical_sector(dev->id);
+ put_unaligned_be16(min_io_sectors, &rbuf[6]);
+
+ /*
+@@ -2123,7 +2128,7 @@ static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf)
+ * that we support some form of unmap - in thise case via WRITE SAME
+ * with the unmap bit set.
+ */
+- if (ata_id_has_trim(args->id)) {
++ if (ata_id_has_trim(dev->id)) {
+ u64 max_blocks = 65535 * ATA_MAX_TRIM_RNUM;
+
+ if (dev->quirks & ATA_QUIRK_MAX_TRIM_128M)
+@@ -2139,7 +2144,8 @@ static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf)
+ /**
+ * ata_scsiop_inq_b1 - Simulate INQUIRY VPD page B1, Block Device
+ * Characteristics
+- * @args: device IDENTIFY data / SCSI command of interest.
++ * @dev: Target device.
++ * @cmd: SCSI command of interest.
+ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *
+ * Return data for the VPD page B1h (Block Device Characteristics).
+@@ -2147,11 +2153,12 @@ static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf)
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+-static unsigned int ata_scsiop_inq_b1(struct ata_scsi_args *args, u8 *rbuf)
++static unsigned int ata_scsiop_inq_b1(struct ata_device *dev,
++ struct scsi_cmnd *cmd, u8 *rbuf)
+ {
+- int form_factor = ata_id_form_factor(args->id);
+- int media_rotation_rate = ata_id_rotation_rate(args->id);
+- u8 zoned = ata_id_zoned_cap(args->id);
++ int form_factor = ata_id_form_factor(dev->id);
++ int media_rotation_rate = ata_id_rotation_rate(dev->id);
++ u8 zoned = ata_id_zoned_cap(dev->id);
+
+ rbuf[1] = 0xb1;
+ rbuf[3] = 0x3c;
+@@ -2167,7 +2174,8 @@ static unsigned int ata_scsiop_inq_b1(struct ata_scsi_args *args, u8 *rbuf)
+ /**
+ * ata_scsiop_inq_b2 - Simulate INQUIRY VPD page B2, Logical Block
+ * Provisioning
+- * @args: device IDENTIFY data / SCSI command of interest.
++ * @dev: Target device.
++ * @cmd: SCSI command of interest.
+ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *
+ * Return data for the VPD page B2h (Logical Block Provisioning).
+@@ -2175,7 +2183,8 @@ static unsigned int ata_scsiop_inq_b1(struct ata_scsi_args *args, u8 *rbuf)
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+-static unsigned int ata_scsiop_inq_b2(struct ata_scsi_args *args, u8 *rbuf)
++static unsigned int ata_scsiop_inq_b2(struct ata_device *dev,
++ struct scsi_cmnd *cmd, u8 *rbuf)
+ {
+ /* SCSI Thin Provisioning VPD page: SBC-3 rev 22 or later */
+ rbuf[1] = 0xb2;
+@@ -2188,7 +2197,8 @@ static unsigned int ata_scsiop_inq_b2(struct ata_scsi_args *args, u8 *rbuf)
+ /**
+ * ata_scsiop_inq_b6 - Simulate INQUIRY VPD page B6, Zoned Block Device
+ * Characteristics
+- * @args: device IDENTIFY data / SCSI command of interest.
++ * @dev: Target device.
++ * @cmd: SCSI command of interest.
+ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *
+ * Return data for the VPD page B2h (Zoned Block Device Characteristics).
+@@ -2196,10 +2206,11 @@ static unsigned int ata_scsiop_inq_b2(struct ata_scsi_args *args, u8 *rbuf)
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+-static unsigned int ata_scsiop_inq_b6(struct ata_scsi_args *args, u8 *rbuf)
++static unsigned int ata_scsiop_inq_b6(struct ata_device *dev,
++ struct scsi_cmnd *cmd, u8 *rbuf)
+ {
+- if (!(args->dev->flags & ATA_DFLAG_ZAC)) {
+- ata_scsi_set_invalid_field(args->dev, args->cmd, 2, 0xff);
++ if (!(dev->flags & ATA_DFLAG_ZAC)) {
++ ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
+ return 1;
+ }
+
+@@ -2212,11 +2223,11 @@ static unsigned int ata_scsiop_inq_b6(struct ata_scsi_args *args, u8 *rbuf)
+ /*
+ * URSWRZ bit is only meaningful for host-managed ZAC drives
+ */
+- if (args->dev->zac_zoned_cap & 1)
++ if (dev->zac_zoned_cap & 1)
+ rbuf[4] |= 1;
+- put_unaligned_be32(args->dev->zac_zones_optimal_open, &rbuf[8]);
+- put_unaligned_be32(args->dev->zac_zones_optimal_nonseq, &rbuf[12]);
+- put_unaligned_be32(args->dev->zac_zones_max_open, &rbuf[16]);
++ put_unaligned_be32(dev->zac_zones_optimal_open, &rbuf[8]);
++ put_unaligned_be32(dev->zac_zones_optimal_nonseq, &rbuf[12]);
++ put_unaligned_be32(dev->zac_zones_max_open, &rbuf[16]);
+
+ return 0;
+ }
+@@ -2224,7 +2235,8 @@ static unsigned int ata_scsiop_inq_b6(struct ata_scsi_args *args, u8 *rbuf)
+ /**
+ * ata_scsiop_inq_b9 - Simulate INQUIRY VPD page B9, Concurrent Positioning
+ * Ranges
+- * @args: device IDENTIFY data / SCSI command of interest.
++ * @dev: Target device.
++ * @cmd: SCSI command of interest.
+ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *
+ * Return data for the VPD page B9h (Concurrent Positioning Ranges).
+@@ -2232,14 +2244,15 @@ static unsigned int ata_scsiop_inq_b6(struct ata_scsi_args *args, u8 *rbuf)
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+-static unsigned int ata_scsiop_inq_b9(struct ata_scsi_args *args, u8 *rbuf)
++static unsigned int ata_scsiop_inq_b9(struct ata_device *dev,
++ struct scsi_cmnd *cmd, u8 *rbuf)
+ {
+- struct ata_cpr_log *cpr_log = args->dev->cpr_log;
++ struct ata_cpr_log *cpr_log = dev->cpr_log;
+ u8 *desc = &rbuf[64];
+ int i;
+
+ if (!cpr_log) {
+- ata_scsi_set_invalid_field(args->dev, args->cmd, 2, 0xff);
++ ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
+ return 1;
+ }
+
+@@ -2259,7 +2272,8 @@ static unsigned int ata_scsiop_inq_b9(struct ata_scsi_args *args, u8 *rbuf)
+
+ /**
+ * ata_scsiop_inquiry - Simulate INQUIRY command
+- * @args: device IDENTIFY data / SCSI command of interest.
++ * @dev: Target device.
++ * @cmd: SCSI command of interest.
+ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *
+ * Returns data associated with an INQUIRY command output.
+@@ -2267,10 +2281,9 @@ static unsigned int ata_scsiop_inq_b9(struct ata_scsi_args *args, u8 *rbuf)
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+-static unsigned int ata_scsiop_inquiry(struct ata_scsi_args *args, u8 *rbuf)
++static unsigned int ata_scsiop_inquiry(struct ata_device *dev,
++ struct scsi_cmnd *cmd, u8 *rbuf)
+ {
+- struct ata_device *dev = args->dev;
+- struct scsi_cmnd *cmd = args->cmd;
+ const u8 *scsicmd = cmd->cmnd;
+
+ /* is CmdDt set? */
+@@ -2281,27 +2294,27 @@ static unsigned int ata_scsiop_inquiry(struct ata_scsi_args *args, u8 *rbuf)
+
+ /* Is EVPD clear? */
+ if ((scsicmd[1] & 1) == 0)
+- return ata_scsiop_inq_std(args, rbuf);
++ return ata_scsiop_inq_std(dev, cmd, rbuf);
+
+ switch (scsicmd[2]) {
+ case 0x00:
+- return ata_scsiop_inq_00(args, rbuf);
++ return ata_scsiop_inq_00(dev, cmd, rbuf);
+ case 0x80:
+- return ata_scsiop_inq_80(args, rbuf);
++ return ata_scsiop_inq_80(dev, cmd, rbuf);
+ case 0x83:
+- return ata_scsiop_inq_83(args, rbuf);
++ return ata_scsiop_inq_83(dev, cmd, rbuf);
+ case 0x89:
+- return ata_scsiop_inq_89(args, rbuf);
++ return ata_scsiop_inq_89(dev, cmd, rbuf);
+ case 0xb0:
+- return ata_scsiop_inq_b0(args, rbuf);
++ return ata_scsiop_inq_b0(dev, cmd, rbuf);
+ case 0xb1:
+- return ata_scsiop_inq_b1(args, rbuf);
++ return ata_scsiop_inq_b1(dev, cmd, rbuf);
+ case 0xb2:
+- return ata_scsiop_inq_b2(args, rbuf);
++ return ata_scsiop_inq_b2(dev, cmd, rbuf);
+ case 0xb6:
+- return ata_scsiop_inq_b6(args, rbuf);
++ return ata_scsiop_inq_b6(dev, cmd, rbuf);
+ case 0xb9:
+- return ata_scsiop_inq_b9(args, rbuf);
++ return ata_scsiop_inq_b9(dev, cmd, rbuf);
+ default:
+ ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
+ return 1;
+@@ -2528,7 +2541,8 @@ static unsigned int ata_msense_rw_recovery(u8 *buf, bool changeable)
+
+ /**
+ * ata_scsiop_mode_sense - Simulate MODE SENSE 6, 10 commands
+- * @args: device IDENTIFY data / SCSI command of interest.
++ * @dev: Target device.
++ * @cmd: SCSI command of interest.
+ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *
+ * Simulate MODE SENSE commands. Assume this is invoked for direct
+@@ -2538,10 +2552,10 @@ static unsigned int ata_msense_rw_recovery(u8 *buf, bool changeable)
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+-static unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf)
++static unsigned int ata_scsiop_mode_sense(struct ata_device *dev,
++ struct scsi_cmnd *cmd, u8 *rbuf)
+ {
+- struct ata_device *dev = args->dev;
+- u8 *scsicmd = args->cmd->cmnd, *p = rbuf;
++ u8 *scsicmd = cmd->cmnd, *p = rbuf;
+ static const u8 sat_blk_desc[] = {
+ 0, 0, 0, 0, /* number of blocks: sat unspecified */
+ 0,
+@@ -2606,17 +2620,17 @@ static unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf)
+ break;
+
+ case CACHE_MPAGE:
+- p += ata_msense_caching(args->id, p, page_control == 1);
++ p += ata_msense_caching(dev->id, p, page_control == 1);
+ break;
+
+ case CONTROL_MPAGE:
+- p += ata_msense_control(args->dev, p, spg, page_control == 1);
++ p += ata_msense_control(dev, p, spg, page_control == 1);
+ break;
+
+ case ALL_MPAGES:
+ p += ata_msense_rw_recovery(p, page_control == 1);
+- p += ata_msense_caching(args->id, p, page_control == 1);
+- p += ata_msense_control(args->dev, p, spg, page_control == 1);
++ p += ata_msense_caching(dev->id, p, page_control == 1);
++ p += ata_msense_control(dev, p, spg, page_control == 1);
+ break;
+
+ default: /* invalid page code */
+@@ -2645,18 +2659,19 @@ static unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf)
+ return 0;
+
+ invalid_fld:
+- ata_scsi_set_invalid_field(dev, args->cmd, fp, bp);
++ ata_scsi_set_invalid_field(dev, cmd, fp, bp);
+ return 1;
+
+ saving_not_supp:
+- ata_scsi_set_sense(dev, args->cmd, ILLEGAL_REQUEST, 0x39, 0x0);
++ ata_scsi_set_sense(dev, cmd, ILLEGAL_REQUEST, 0x39, 0x0);
+ /* "Saving parameters not supported" */
+ return 1;
+ }
+
+ /**
+ * ata_scsiop_read_cap - Simulate READ CAPACITY[ 16] commands
+- * @args: device IDENTIFY data / SCSI command of interest.
++ * @dev: Target device.
++ * @cmd: SCSI command of interest.
+ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *
+ * Simulate READ CAPACITY commands.
+@@ -2664,10 +2679,10 @@ static unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf)
+ * LOCKING:
+ * None.
+ */
+-static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
++static unsigned int ata_scsiop_read_cap(struct ata_device *dev,
++ struct scsi_cmnd *cmd, u8 *rbuf)
+ {
+- struct ata_device *dev = args->dev;
+- u8 *scsicmd = args->cmd->cmnd;
++ u8 *scsicmd = cmd->cmnd;
+ u64 last_lba = dev->n_sectors - 1; /* LBA of the last block */
+ u32 sector_size; /* physical sector size in bytes */
+ u8 log2_per_phys;
+@@ -2702,7 +2717,7 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
+ */
+ if (scsicmd[0] != SERVICE_ACTION_IN_16 ||
+ (scsicmd[1] & 0x1f) != SAI_READ_CAPACITY_16) {
+- ata_scsi_set_invalid_field(dev, args->cmd, 1, 0xff);
++ ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
+ return 1;
+ }
+
+@@ -2722,16 +2737,16 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
+ rbuf[10] = sector_size >> (8 * 1);
+ rbuf[11] = sector_size;
+
+- if (ata_id_zoned_cap(args->id) || args->dev->class == ATA_DEV_ZAC)
++ if (ata_id_zoned_cap(dev->id) || dev->class == ATA_DEV_ZAC)
+ rbuf[12] = (1 << 4); /* RC_BASIS */
+ rbuf[13] = log2_per_phys;
+ rbuf[14] = (lowest_aligned >> 8) & 0x3f;
+ rbuf[15] = lowest_aligned;
+
+- if (ata_id_has_trim(args->id) && !(dev->quirks & ATA_QUIRK_NOTRIM)) {
++ if (ata_id_has_trim(dev->id) && !(dev->quirks & ATA_QUIRK_NOTRIM)) {
+ rbuf[14] |= 0x80; /* LBPME */
+
+- if (ata_id_has_zero_after_trim(args->id) &&
++ if (ata_id_has_zero_after_trim(dev->id) &&
+ dev->quirks & ATA_QUIRK_ZERO_AFTER_TRIM) {
+ ata_dev_info(dev, "Enabling discard_zeroes_data\n");
+ rbuf[14] |= 0x40; /* LBPRZ */
+@@ -2743,7 +2758,8 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
+
+ /**
+ * ata_scsiop_report_luns - Simulate REPORT LUNS command
+- * @args: device IDENTIFY data / SCSI command of interest.
++ * @dev: Target device.
++ * @cmd: SCSI command of interest.
+ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *
+ * Simulate REPORT LUNS command.
+@@ -2751,7 +2767,8 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+-static unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf)
++static unsigned int ata_scsiop_report_luns(struct ata_device *dev,
++ struct scsi_cmnd *cmd, u8 *rbuf)
+ {
+ rbuf[3] = 8; /* just one lun, LUN 0, size 8 bytes */
+
+@@ -3466,7 +3483,8 @@ static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc)
+
+ /**
+ * ata_scsiop_maint_in - Simulate a subset of MAINTENANCE_IN
+- * @args: device MAINTENANCE_IN data / SCSI command of interest.
++ * @dev: Target device.
++ * @cmd: SCSI command of interest.
+ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *
+ * Yields a subset to satisfy scsi_report_opcode()
+@@ -3474,20 +3492,20 @@ static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc)
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+-static unsigned int ata_scsiop_maint_in(struct ata_scsi_args *args, u8 *rbuf)
++static unsigned int ata_scsiop_maint_in(struct ata_device *dev,
++ struct scsi_cmnd *cmd, u8 *rbuf)
+ {
+- struct ata_device *dev = args->dev;
+- u8 *cdb = args->cmd->cmnd;
++ u8 *cdb = cmd->cmnd;
+ u8 supported = 0, cdlp = 0, rwcdlp = 0;
+
+ if ((cdb[1] & 0x1f) != MI_REPORT_SUPPORTED_OPERATION_CODES) {
+- ata_scsi_set_invalid_field(dev, args->cmd, 1, 0xff);
++ ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
+ return 1;
+ }
+
+ if (cdb[2] != 1 && cdb[2] != 3) {
+ ata_dev_warn(dev, "invalid command format %d\n", cdb[2]);
+- ata_scsi_set_invalid_field(dev, args->cmd, 1, 0xff);
++ ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
+ return 1;
+ }
+
+@@ -4425,31 +4443,26 @@ EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
+
+ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
+ {
+- struct ata_scsi_args args;
+ const u8 *scsicmd = cmd->cmnd;
+ u8 tmp8;
+
+- args.dev = dev;
+- args.id = dev->id;
+- args.cmd = cmd;
+-
+ switch(scsicmd[0]) {
+ case INQUIRY:
+- ata_scsi_rbuf_fill(&args, ata_scsiop_inquiry);
++ ata_scsi_rbuf_fill(dev, cmd, ata_scsiop_inquiry);
+ break;
+
+ case MODE_SENSE:
+ case MODE_SENSE_10:
+- ata_scsi_rbuf_fill(&args, ata_scsiop_mode_sense);
++ ata_scsi_rbuf_fill(dev, cmd, ata_scsiop_mode_sense);
+ break;
+
+ case READ_CAPACITY:
+ case SERVICE_ACTION_IN_16:
+- ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
++ ata_scsi_rbuf_fill(dev, cmd, ata_scsiop_read_cap);
+ break;
+
+ case REPORT_LUNS:
+- ata_scsi_rbuf_fill(&args, ata_scsiop_report_luns);
++ ata_scsi_rbuf_fill(dev, cmd, ata_scsiop_report_luns);
+ break;
+
+ case REQUEST_SENSE:
+@@ -4477,7 +4490,7 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
+ break;
+
+ case MAINTENANCE_IN:
+- ata_scsi_rbuf_fill(&args, ata_scsiop_maint_in);
++ ata_scsi_rbuf_fill(dev, cmd, ata_scsiop_maint_in);
+ break;
+
+ /* all other commands */
+--
+2.51.0
+
--- /dev/null
+From edf51f080b258bf6eeb71693c4aab90b58036f75 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 24 Oct 2025 12:21:41 +0200
+Subject: btrfs: define the AUTO_KFREE/AUTO_KVFREE helper macros
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Miquel Sabaté Solà <mssola@mssola.com>
+
+[ Upstream commit d00cbce0a7d5de5fc31bf60abd59b44d36806b6e ]
+
+These are two simple macros which ensure that a pointer is initialized
+to NULL and with the proper cleanup attribute for it.
+
+Signed-off-by: Miquel Sabaté Solà <mssola@mssola.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Stable-dep-of: 52ee9965d09b ("btrfs: zoned: fixup last alloc pointer after extent removal for RAID0/10")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/misc.h | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/fs/btrfs/misc.h b/fs/btrfs/misc.h
+index 0d599fd847c9b..1212674d7a1b4 100644
+--- a/fs/btrfs/misc.h
++++ b/fs/btrfs/misc.h
+@@ -10,6 +10,13 @@
+ #include <linux/math64.h>
+ #include <linux/rbtree.h>
+
++/*
++ * Convenience macros to define a pointer with the __free(kfree) and
++ * __free(kvfree) cleanup attributes and initialized to NULL.
++ */
++#define AUTO_KFREE(name) *name __free(kfree) = NULL
++#define AUTO_KVFREE(name) *name __free(kvfree) = NULL
++
+ /*
+ * Enumerate bits using enum autoincrement. Define the @name as the n-th bit.
+ */
+--
+2.51.0
+
--- /dev/null
+From 56cf155f0f5c22eca76692767bd1f95b55403737 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 9 Oct 2024 16:31:04 +0200
+Subject: btrfs: drop unused parameter fs_info from do_reclaim_sweep()
+
+From: David Sterba <dsterba@suse.com>
+
+[ Upstream commit 343a63594bb6a49d094860705817aad6663b1f8f ]
+
+The parameter is unused and we can get it from space info if needed.
+
+Reviewed-by: Anand Jain <anand.jain@oracle.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Stable-dep-of: 19eff93dc738 ("btrfs: fix periodic reclaim condition")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/space-info.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
+index cae4ec21bab47..0470e041aba16 100644
+--- a/fs/btrfs/space-info.c
++++ b/fs/btrfs/space-info.c
+@@ -2031,8 +2031,7 @@ static bool is_reclaim_urgent(struct btrfs_space_info *space_info)
+ return unalloc < data_chunk_size;
+ }
+
+-static void do_reclaim_sweep(const struct btrfs_fs_info *fs_info,
+- struct btrfs_space_info *space_info, int raid)
++static void do_reclaim_sweep(struct btrfs_space_info *space_info, int raid)
+ {
+ struct btrfs_block_group *bg;
+ int thresh_pct;
+@@ -2128,6 +2127,6 @@ void btrfs_reclaim_sweep(const struct btrfs_fs_info *fs_info)
+ if (!btrfs_should_periodic_reclaim(space_info))
+ continue;
+ for (raid = 0; raid < BTRFS_NR_RAID_TYPES; raid++)
+- do_reclaim_sweep(fs_info, space_info, raid);
++ do_reclaim_sweep(space_info, raid);
+ }
+ }
+--
+2.51.0
+
--- /dev/null
+From 52bccfbc7c18d1dad00576f21d8ad2171df5c0d0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 14 Jan 2026 11:47:02 +0800
+Subject: btrfs: fix periodic reclaim condition
+
+From: Sun YangKai <sunk67188@gmail.com>
+
+[ Upstream commit 19eff93dc738e8afaa59cb374b44bb5a162e6c2d ]
+
+Problems with current implementation:
+
+1. reclaimable_bytes is signed while chunk_sz is unsigned, causing
+ negative reclaimable_bytes to trigger reclaim unexpectedly
+
+2. The "space must be freed between scans" assumption breaks the
+ two-scan requirement: first scan marks block groups, second scan
+ reclaims them. Without the second scan, no reclamation occurs.
+
+Instead, track actual reclaim progress: pause reclaim when block groups
+will be reclaimed, and resume only when progress is made. This ensures
+reclaim continues until no further progress can be made. And resume
+periodic reclaim when there's enough free space.
+
+And we take care if reclaim is making any progress now, so it's
+unnecessary to set periodic_reclaim_ready to false when failed to reclaim
+a block group.
+
+Fixes: 813d4c6422516 ("btrfs: prevent pathological periodic reclaim loops")
+CC: stable@vger.kernel.org # 6.12+
+Suggested-by: Boris Burkov <boris@bur.io>
+Reviewed-by: Boris Burkov <boris@bur.io>
+Signed-off-by: Sun YangKai <sunk67188@gmail.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/block-group.c | 6 ++++--
+ fs/btrfs/space-info.c | 21 ++++++++++++---------
+ 2 files changed, 16 insertions(+), 11 deletions(-)
+
+diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
+index a29dc0a15d128..c579713e9899c 100644
+--- a/fs/btrfs/block-group.c
++++ b/fs/btrfs/block-group.c
+@@ -1879,6 +1879,7 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
+ u64 zone_unusable;
+ u64 used;
+ u64 reserved;
++ u64 old_total;
+ int ret = 0;
+
+ bg = list_first_entry(&fs_info->reclaim_bgs,
+@@ -1954,6 +1955,7 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
+ zone_unusable = bg->zone_unusable;
+
+ spin_unlock(&bg->lock);
++ old_total = space_info->total_bytes;
+ spin_unlock(&space_info->lock);
+
+ /*
+@@ -2012,14 +2014,14 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
+ reserved = 0;
+ spin_lock(&space_info->lock);
+ space_info->reclaim_errors++;
+- if (READ_ONCE(space_info->periodic_reclaim))
+- space_info->periodic_reclaim_ready = false;
+ spin_unlock(&space_info->lock);
+ }
+ spin_lock(&space_info->lock);
+ space_info->reclaim_count++;
+ space_info->reclaim_bytes += used;
+ space_info->reclaim_bytes += reserved;
++ if (space_info->total_bytes < old_total)
++ btrfs_set_periodic_reclaim_ready(space_info, true);
+ spin_unlock(&space_info->lock);
+
+ next:
+diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
+index 0470e041aba16..af19f7a3e74a4 100644
+--- a/fs/btrfs/space-info.c
++++ b/fs/btrfs/space-info.c
+@@ -2031,11 +2031,11 @@ static bool is_reclaim_urgent(struct btrfs_space_info *space_info)
+ return unalloc < data_chunk_size;
+ }
+
+-static void do_reclaim_sweep(struct btrfs_space_info *space_info, int raid)
++static bool do_reclaim_sweep(struct btrfs_space_info *space_info, int raid)
+ {
+ struct btrfs_block_group *bg;
+ int thresh_pct;
+- bool try_again = true;
++ bool will_reclaim = false;
+ bool urgent;
+
+ spin_lock(&space_info->lock);
+@@ -2053,7 +2053,7 @@ static void do_reclaim_sweep(struct btrfs_space_info *space_info, int raid)
+ spin_lock(&bg->lock);
+ thresh = mult_perc(bg->length, thresh_pct);
+ if (bg->used < thresh && bg->reclaim_mark) {
+- try_again = false;
++ will_reclaim = true;
+ reclaim = true;
+ }
+ bg->reclaim_mark++;
+@@ -2070,12 +2070,13 @@ static void do_reclaim_sweep(struct btrfs_space_info *space_info, int raid)
+ * If we have any staler groups, we don't touch the fresher ones, but if we
+ * really need a block group, do take a fresh one.
+ */
+- if (try_again && urgent) {
+- try_again = false;
++ if (!will_reclaim && urgent) {
++ urgent = false;
+ goto again;
+ }
+
+ up_read(&space_info->groups_sem);
++ return will_reclaim;
+ }
+
+ void btrfs_space_info_update_reclaimable(struct btrfs_space_info *space_info, s64 bytes)
+@@ -2085,7 +2086,8 @@ void btrfs_space_info_update_reclaimable(struct btrfs_space_info *space_info, s6
+ lockdep_assert_held(&space_info->lock);
+ space_info->reclaimable_bytes += bytes;
+
+- if (space_info->reclaimable_bytes >= chunk_sz)
++ if (space_info->reclaimable_bytes > 0 &&
++ space_info->reclaimable_bytes >= chunk_sz)
+ btrfs_set_periodic_reclaim_ready(space_info, true);
+ }
+
+@@ -2112,7 +2114,6 @@ bool btrfs_should_periodic_reclaim(struct btrfs_space_info *space_info)
+
+ spin_lock(&space_info->lock);
+ ret = space_info->periodic_reclaim_ready;
+- btrfs_set_periodic_reclaim_ready(space_info, false);
+ spin_unlock(&space_info->lock);
+
+ return ret;
+@@ -2126,7 +2127,9 @@ void btrfs_reclaim_sweep(const struct btrfs_fs_info *fs_info)
+ list_for_each_entry(space_info, &fs_info->space_info, list) {
+ if (!btrfs_should_periodic_reclaim(space_info))
+ continue;
+- for (raid = 0; raid < BTRFS_NR_RAID_TYPES; raid++)
+- do_reclaim_sweep(space_info, raid);
++ for (raid = 0; raid < BTRFS_NR_RAID_TYPES; raid++) {
++ if (do_reclaim_sweep(space_info, raid))
++ btrfs_set_periodic_reclaim_ready(space_info, false);
++ }
+ }
+ }
+--
+2.51.0
+
--- /dev/null
+From afcaaaba611739a1ec42c644d79708b208dbf393 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 24 Feb 2025 16:22:22 +0000
+Subject: btrfs: fix reclaimed bytes accounting after automatic block group
+ reclaim
+
+From: Filipe Manana <fdmanana@suse.com>
+
+[ Upstream commit 620768704326c9a71ea9c8324ffda8748d8d4f10 ]
+
+We are considering the used bytes counter of a block group as the amount
+to update the space info's reclaim bytes counter after relocating the
+block group, but this value alone is often not enough. This is because we
+may have a reserved extent (or more) and in that case its size is
+reflected in the reserved counter of the block group - the size of the
+extent is only transferred from the reserved counter to the used counter
+of the block group when the delayed ref for the extent is run - typically
+when committing the transaction (or when flushing delayed refs due to
+ENOSPC on space reservation). Such call chain for data extents is:
+
+ btrfs_run_delayed_refs_for_head()
+ run_one_delayed_ref()
+ run_delayed_data_ref()
+ alloc_reserved_file_extent()
+ alloc_reserved_extent()
+ btrfs_update_block_group()
+ -> transfers the extent size from the reserved
+ counter to the used counter
+
+For metadata extents:
+
+ btrfs_run_delayed_refs_for_head()
+ run_one_delayed_ref()
+ run_delayed_tree_ref()
+ alloc_reserved_tree_block()
+ alloc_reserved_extent()
+ btrfs_update_block_group()
+ -> transfers the extent size from the reserved
+ counter to the used counter
+
+Since relocation flushes delalloc, waits for ordered extent completion
+and commits the current transaction before doing the actual relocation
+work, the correct amount of reclaimed space is therefore the sum of the
+"used" and "reserved" counters of the block group before we call
+btrfs_relocate_chunk() at btrfs_reclaim_bgs_work().
+
+So fix this by taking the "reserved" counter into consideration.
+
+Fixes: 243192b67649 ("btrfs: report reclaim stats in sysfs")
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Stable-dep-of: 19eff93dc738 ("btrfs: fix periodic reclaim condition")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/block-group.c | 28 +++++++++++++++++++++-------
+ 1 file changed, 21 insertions(+), 7 deletions(-)
+
+diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
+index 2316be6ee41db..a29dc0a15d128 100644
+--- a/fs/btrfs/block-group.c
++++ b/fs/btrfs/block-group.c
+@@ -1878,6 +1878,7 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
+ while (!list_empty(&fs_info->reclaim_bgs)) {
+ u64 zone_unusable;
+ u64 used;
++ u64 reserved;
+ int ret = 0;
+
+ bg = list_first_entry(&fs_info->reclaim_bgs,
+@@ -1974,21 +1975,32 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
+ goto next;
+
+ /*
+- * Grab the used bytes counter while holding the block group's
+- * spinlock to prevent races with tasks concurrently updating it
+- * due to extent allocation and deallocation (running
+- * btrfs_update_block_group()) - we have set the block group to
+- * RO but that only prevents extent reservation, allocation
+- * happens after reservation.
++ * The amount of bytes reclaimed corresponds to the sum of the
++ * "used" and "reserved" counters. We have set the block group
++ * to RO above, which prevents reservations from happening but
++ * we may have existing reservations for which allocation has
++ * not yet been done - btrfs_update_block_group() was not yet
++ * called, which is where we will transfer a reserved extent's
++ * size from the "reserved" counter to the "used" counter - this
++ * happens when running delayed references. When we relocate the
++ * chunk below, relocation first flushes dellaloc, waits for
++ * ordered extent completion (which is where we create delayed
++ * references for data extents) and commits the current
++ * transaction (which runs delayed references), and only after
++ * it does the actual work to move extents out of the block
++ * group. So the reported amount of reclaimed bytes is
++ * effectively the sum of the 'used' and 'reserved' counters.
+ */
+ spin_lock(&bg->lock);
+ used = bg->used;
++ reserved = bg->reserved;
+ spin_unlock(&bg->lock);
+
+ btrfs_info(fs_info,
+- "reclaiming chunk %llu with %llu%% used %llu%% unusable",
++ "reclaiming chunk %llu with %llu%% used %llu%% reserved %llu%% unusable",
+ bg->start,
+ div64_u64(used * 100, bg->length),
++ div64_u64(reserved * 100, bg->length),
+ div64_u64(zone_unusable * 100, bg->length));
+ trace_btrfs_reclaim_block_group(bg);
+ ret = btrfs_relocate_chunk(fs_info, bg->start);
+@@ -1997,6 +2009,7 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
+ btrfs_err(fs_info, "error relocating chunk %llu",
+ bg->start);
+ used = 0;
++ reserved = 0;
+ spin_lock(&space_info->lock);
+ space_info->reclaim_errors++;
+ if (READ_ONCE(space_info->periodic_reclaim))
+@@ -2006,6 +2019,7 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
+ spin_lock(&space_info->lock);
+ space_info->reclaim_count++;
+ space_info->reclaim_bytes += used;
++ space_info->reclaim_bytes += reserved;
+ spin_unlock(&space_info->lock);
+
+ next:
+--
+2.51.0
+
--- /dev/null
+From 03330b145b60419861b0a1c134b0ae18b6bc2ed4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 24 Feb 2025 15:40:26 +0000
+Subject: btrfs: get used bytes while holding lock at btrfs_reclaim_bgs_work()
+
+From: Filipe Manana <fdmanana@suse.com>
+
+[ Upstream commit ba5d06440cae63edc4f49465baf78f1f43e55c77 ]
+
+At btrfs_reclaim_bgs_work(), we are grabbing twice the used bytes counter
+of the block group while not holding the block group's spinlock. This can
+result in races, reported by KCSAN and similar tools, since a concurrent
+task can be updating that counter while at btrfs_update_block_group().
+
+So avoid these races by grabbing the counter in a critical section
+delimited by the block group's spinlock after setting the block group to
+RO mode. This also avoids using two different values of the counter in
+case it changes in between each read. This silences KCSAN and is required
+for the next patch in the series too.
+
+Fixes: 243192b67649 ("btrfs: report reclaim stats in sysfs")
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Stable-dep-of: 19eff93dc738 ("btrfs: fix periodic reclaim condition")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/block-group.c | 21 ++++++++++++++++-----
+ 1 file changed, 16 insertions(+), 5 deletions(-)
+
+diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
+index 79daf6fac58f3..2316be6ee41db 100644
+--- a/fs/btrfs/block-group.c
++++ b/fs/btrfs/block-group.c
+@@ -1877,7 +1877,7 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
+ list_sort(NULL, &fs_info->reclaim_bgs, reclaim_bgs_cmp);
+ while (!list_empty(&fs_info->reclaim_bgs)) {
+ u64 zone_unusable;
+- u64 reclaimed;
++ u64 used;
+ int ret = 0;
+
+ bg = list_first_entry(&fs_info->reclaim_bgs,
+@@ -1973,19 +1973,30 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
+ if (ret < 0)
+ goto next;
+
++ /*
++ * Grab the used bytes counter while holding the block group's
++ * spinlock to prevent races with tasks concurrently updating it
++ * due to extent allocation and deallocation (running
++ * btrfs_update_block_group()) - we have set the block group to
++ * RO but that only prevents extent reservation, allocation
++ * happens after reservation.
++ */
++ spin_lock(&bg->lock);
++ used = bg->used;
++ spin_unlock(&bg->lock);
++
+ btrfs_info(fs_info,
+ "reclaiming chunk %llu with %llu%% used %llu%% unusable",
+ bg->start,
+- div64_u64(bg->used * 100, bg->length),
++ div64_u64(used * 100, bg->length),
+ div64_u64(zone_unusable * 100, bg->length));
+ trace_btrfs_reclaim_block_group(bg);
+- reclaimed = bg->used;
+ ret = btrfs_relocate_chunk(fs_info, bg->start);
+ if (ret) {
+ btrfs_dec_block_group_ro(bg);
+ btrfs_err(fs_info, "error relocating chunk %llu",
+ bg->start);
+- reclaimed = 0;
++ used = 0;
+ spin_lock(&space_info->lock);
+ space_info->reclaim_errors++;
+ if (READ_ONCE(space_info->periodic_reclaim))
+@@ -1994,7 +2005,7 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
+ }
+ spin_lock(&space_info->lock);
+ space_info->reclaim_count++;
+- space_info->reclaim_bytes += reclaimed;
++ space_info->reclaim_bytes += used;
+ spin_unlock(&space_info->lock);
+
+ next:
+--
+2.51.0
+
--- /dev/null
+From 80938a138990c8640087f00960bdafd227c95f6d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 6 Jun 2025 09:17:41 +0200
+Subject: btrfs: zoned: fix alloc_offset calculation for partly conventional
+ block groups
+
+From: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+
+[ Upstream commit c0d90a79e8e65b89037508276b2b31f41a1b3783 ]
+
+When one of two zones composing a DUP block group is a conventional zone,
+we have the zone_info[i]->alloc_offset = WP_CONVENTIONAL. That will, of
+course, not match the write pointer of the other zone, and fails that
+block group.
+
+This commit solves that issue by properly recovering the emulated write
+pointer from the last allocated extent. The offset for the SINGLE, DUP,
+and RAID1 are straight-forward: it is same as the end of last allocated
+extent. The RAID0 and RAID10 are a bit tricky that we need to do the math
+of striping.
+
+This is the kernel equivalent of Naohiro's user-space commit:
+"btrfs-progs: zoned: fix alloc_offset calculation for partly
+conventional block groups".
+
+Reviewed-by: Naohiro Aota <naohiro.aota@wdc.com>
+Signed-off-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Stable-dep-of: dda3ec9ee6b3 ("btrfs: zoned: fixup last alloc pointer after extent removal for RAID1")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/zoned.c | 86 ++++++++++++++++++++++++++++++++++++++++--------
+ 1 file changed, 72 insertions(+), 14 deletions(-)
+
+diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
+index 181cb3f56ab41..b757377d9331e 100644
+--- a/fs/btrfs/zoned.c
++++ b/fs/btrfs/zoned.c
+@@ -1386,7 +1386,8 @@ static int btrfs_load_block_group_single(struct btrfs_block_group *bg,
+ static int btrfs_load_block_group_dup(struct btrfs_block_group *bg,
+ struct btrfs_chunk_map *map,
+ struct zone_info *zone_info,
+- unsigned long *active)
++ unsigned long *active,
++ u64 last_alloc)
+ {
+ struct btrfs_fs_info *fs_info = bg->fs_info;
+
+@@ -1409,6 +1410,13 @@ static int btrfs_load_block_group_dup(struct btrfs_block_group *bg,
+ zone_info[1].physical);
+ return -EIO;
+ }
++
++ if (zone_info[0].alloc_offset == WP_CONVENTIONAL)
++ zone_info[0].alloc_offset = last_alloc;
++
++ if (zone_info[1].alloc_offset == WP_CONVENTIONAL)
++ zone_info[1].alloc_offset = last_alloc;
++
+ if (zone_info[0].alloc_offset != zone_info[1].alloc_offset) {
+ btrfs_err(bg->fs_info,
+ "zoned: write pointer offset mismatch of zones in DUP profile");
+@@ -1429,7 +1437,8 @@ static int btrfs_load_block_group_dup(struct btrfs_block_group *bg,
+ static int btrfs_load_block_group_raid1(struct btrfs_block_group *bg,
+ struct btrfs_chunk_map *map,
+ struct zone_info *zone_info,
+- unsigned long *active)
++ unsigned long *active,
++ u64 last_alloc)
+ {
+ struct btrfs_fs_info *fs_info = bg->fs_info;
+ int i;
+@@ -1444,10 +1453,12 @@ static int btrfs_load_block_group_raid1(struct btrfs_block_group *bg,
+ bg->zone_capacity = min_not_zero(zone_info[0].capacity, zone_info[1].capacity);
+
+ for (i = 0; i < map->num_stripes; i++) {
+- if (zone_info[i].alloc_offset == WP_MISSING_DEV ||
+- zone_info[i].alloc_offset == WP_CONVENTIONAL)
++ if (zone_info[i].alloc_offset == WP_MISSING_DEV)
+ continue;
+
++ if (zone_info[i].alloc_offset == WP_CONVENTIONAL)
++ zone_info[i].alloc_offset = last_alloc;
++
+ if ((zone_info[0].alloc_offset != zone_info[i].alloc_offset) &&
+ !btrfs_test_opt(fs_info, DEGRADED)) {
+ btrfs_err(fs_info,
+@@ -1477,7 +1488,8 @@ static int btrfs_load_block_group_raid1(struct btrfs_block_group *bg,
+ static int btrfs_load_block_group_raid0(struct btrfs_block_group *bg,
+ struct btrfs_chunk_map *map,
+ struct zone_info *zone_info,
+- unsigned long *active)
++ unsigned long *active,
++ u64 last_alloc)
+ {
+ struct btrfs_fs_info *fs_info = bg->fs_info;
+
+@@ -1488,10 +1500,29 @@ static int btrfs_load_block_group_raid0(struct btrfs_block_group *bg,
+ }
+
+ for (int i = 0; i < map->num_stripes; i++) {
+- if (zone_info[i].alloc_offset == WP_MISSING_DEV ||
+- zone_info[i].alloc_offset == WP_CONVENTIONAL)
++ if (zone_info[i].alloc_offset == WP_MISSING_DEV)
+ continue;
+
++ if (zone_info[i].alloc_offset == WP_CONVENTIONAL) {
++ u64 stripe_nr, full_stripe_nr;
++ u64 stripe_offset;
++ int stripe_index;
++
++ stripe_nr = div64_u64(last_alloc, map->stripe_size);
++ stripe_offset = stripe_nr * map->stripe_size;
++ full_stripe_nr = div_u64(stripe_nr, map->num_stripes);
++ div_u64_rem(stripe_nr, map->num_stripes, &stripe_index);
++
++ zone_info[i].alloc_offset =
++ full_stripe_nr * map->stripe_size;
++
++ if (stripe_index > i)
++ zone_info[i].alloc_offset += map->stripe_size;
++ else if (stripe_index == i)
++ zone_info[i].alloc_offset +=
++ (last_alloc - stripe_offset);
++ }
++
+ if (test_bit(0, active) != test_bit(i, active)) {
+ if (!btrfs_zone_activate(bg))
+ return -EIO;
+@@ -1509,7 +1540,8 @@ static int btrfs_load_block_group_raid0(struct btrfs_block_group *bg,
+ static int btrfs_load_block_group_raid10(struct btrfs_block_group *bg,
+ struct btrfs_chunk_map *map,
+ struct zone_info *zone_info,
+- unsigned long *active)
++ unsigned long *active,
++ u64 last_alloc)
+ {
+ struct btrfs_fs_info *fs_info = bg->fs_info;
+
+@@ -1520,8 +1552,7 @@ static int btrfs_load_block_group_raid10(struct btrfs_block_group *bg,
+ }
+
+ for (int i = 0; i < map->num_stripes; i++) {
+- if (zone_info[i].alloc_offset == WP_MISSING_DEV ||
+- zone_info[i].alloc_offset == WP_CONVENTIONAL)
++ if (zone_info[i].alloc_offset == WP_MISSING_DEV)
+ continue;
+
+ if (test_bit(0, active) != test_bit(i, active)) {
+@@ -1532,6 +1563,29 @@ static int btrfs_load_block_group_raid10(struct btrfs_block_group *bg,
+ set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags);
+ }
+
++ if (zone_info[i].alloc_offset == WP_CONVENTIONAL) {
++ u64 stripe_nr, full_stripe_nr;
++ u64 stripe_offset;
++ int stripe_index;
++
++ stripe_nr = div64_u64(last_alloc, map->stripe_size);
++ stripe_offset = stripe_nr * map->stripe_size;
++ full_stripe_nr = div_u64(stripe_nr,
++ map->num_stripes / map->sub_stripes);
++ div_u64_rem(stripe_nr,
++ (map->num_stripes / map->sub_stripes),
++ &stripe_index);
++
++ zone_info[i].alloc_offset =
++ full_stripe_nr * map->stripe_size;
++
++ if (stripe_index > (i / map->sub_stripes))
++ zone_info[i].alloc_offset += map->stripe_size;
++ else if (stripe_index == (i / map->sub_stripes))
++ zone_info[i].alloc_offset +=
++ (last_alloc - stripe_offset);
++ }
++
+ if ((i % map->sub_stripes) == 0) {
+ bg->zone_capacity += zone_info[i].capacity;
+ bg->alloc_offset += zone_info[i].alloc_offset;
+@@ -1619,18 +1673,22 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
+ ret = btrfs_load_block_group_single(cache, &zone_info[0], active);
+ break;
+ case BTRFS_BLOCK_GROUP_DUP:
+- ret = btrfs_load_block_group_dup(cache, map, zone_info, active);
++ ret = btrfs_load_block_group_dup(cache, map, zone_info, active,
++ last_alloc);
+ break;
+ case BTRFS_BLOCK_GROUP_RAID1:
+ case BTRFS_BLOCK_GROUP_RAID1C3:
+ case BTRFS_BLOCK_GROUP_RAID1C4:
+- ret = btrfs_load_block_group_raid1(cache, map, zone_info, active);
++ ret = btrfs_load_block_group_raid1(cache, map, zone_info,
++ active, last_alloc);
+ break;
+ case BTRFS_BLOCK_GROUP_RAID0:
+- ret = btrfs_load_block_group_raid0(cache, map, zone_info, active);
++ ret = btrfs_load_block_group_raid0(cache, map, zone_info,
++ active, last_alloc);
+ break;
+ case BTRFS_BLOCK_GROUP_RAID10:
+- ret = btrfs_load_block_group_raid10(cache, map, zone_info, active);
++ ret = btrfs_load_block_group_raid10(cache, map, zone_info,
++ active, last_alloc);
+ break;
+ case BTRFS_BLOCK_GROUP_RAID5:
+ case BTRFS_BLOCK_GROUP_RAID6:
+--
+2.51.0
+
--- /dev/null
+From 35e298d0116bc0dc957d15d64a47a60baada300c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 16 Sep 2025 11:46:11 +0900
+Subject: btrfs: zoned: fix stripe width calculation
+
+From: Naohiro Aota <naohiro.aota@wdc.com>
+
+[ Upstream commit 6a1ab50135ce829b834b448ce49867b5210a1641 ]
+
+The stripe offset calculation in the zoned code for raid0 and raid10
+wrongly uses map->stripe_size to calculate it. In fact, map->stripe_size is
+the size of the device extent composing the block group, which always is
+the zone_size on the zoned setup.
+
+Fix it by using BTRFS_STRIPE_LEN and BTRFS_STRIPE_LEN_SHIFT. Also, optimize
+the calculation a bit by doing the common calculation only once.
+
+Fixes: c0d90a79e8e6 ("btrfs: zoned: fix alloc_offset calculation for partly conventional block groups")
+CC: stable@vger.kernel.org # 6.17+
+Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Stable-dep-of: 52ee9965d09b ("btrfs: zoned: fixup last alloc pointer after extent removal for RAID0/10")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/zoned.c | 56 ++++++++++++++++++++++--------------------------
+ 1 file changed, 26 insertions(+), 30 deletions(-)
+
+diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
+index bf41d9b641f58..f63885c7bedfe 100644
+--- a/fs/btrfs/zoned.c
++++ b/fs/btrfs/zoned.c
+@@ -1521,6 +1521,8 @@ static int btrfs_load_block_group_raid0(struct btrfs_block_group *bg,
+ u64 last_alloc)
+ {
+ struct btrfs_fs_info *fs_info = bg->fs_info;
++ u64 stripe_nr = 0, stripe_offset = 0;
++ u32 stripe_index = 0;
+
+ if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) {
+ btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree",
+@@ -1528,28 +1530,26 @@ static int btrfs_load_block_group_raid0(struct btrfs_block_group *bg,
+ return -EINVAL;
+ }
+
++ if (last_alloc) {
++ u32 factor = map->num_stripes;
++
++ stripe_nr = last_alloc >> BTRFS_STRIPE_LEN_SHIFT;
++ stripe_offset = last_alloc & BTRFS_STRIPE_LEN_MASK;
++ stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
++ }
++
+ for (int i = 0; i < map->num_stripes; i++) {
+ if (zone_info[i].alloc_offset == WP_MISSING_DEV)
+ continue;
+
+ if (zone_info[i].alloc_offset == WP_CONVENTIONAL) {
+- u64 stripe_nr, full_stripe_nr;
+- u64 stripe_offset;
+- int stripe_index;
+
+- stripe_nr = div64_u64(last_alloc, map->stripe_size);
+- stripe_offset = stripe_nr * map->stripe_size;
+- full_stripe_nr = div_u64(stripe_nr, map->num_stripes);
+- div_u64_rem(stripe_nr, map->num_stripes, &stripe_index);
+-
+- zone_info[i].alloc_offset =
+- full_stripe_nr * map->stripe_size;
++ zone_info[i].alloc_offset = btrfs_stripe_nr_to_offset(stripe_nr);
+
+ if (stripe_index > i)
+- zone_info[i].alloc_offset += map->stripe_size;
++ zone_info[i].alloc_offset += BTRFS_STRIPE_LEN;
+ else if (stripe_index == i)
+- zone_info[i].alloc_offset +=
+- (last_alloc - stripe_offset);
++ zone_info[i].alloc_offset += stripe_offset;
+ }
+
+ if (test_bit(0, active) != test_bit(i, active)) {
+@@ -1573,6 +1573,8 @@ static int btrfs_load_block_group_raid10(struct btrfs_block_group *bg,
+ u64 last_alloc)
+ {
+ struct btrfs_fs_info *fs_info = bg->fs_info;
++ u64 stripe_nr = 0, stripe_offset = 0;
++ u32 stripe_index = 0;
+
+ if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) {
+ btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree",
+@@ -1580,6 +1582,14 @@ static int btrfs_load_block_group_raid10(struct btrfs_block_group *bg,
+ return -EINVAL;
+ }
+
++ if (last_alloc) {
++ u32 factor = map->num_stripes / map->sub_stripes;
++
++ stripe_nr = last_alloc >> BTRFS_STRIPE_LEN_SHIFT;
++ stripe_offset = last_alloc & BTRFS_STRIPE_LEN_MASK;
++ stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
++ }
++
+ for (int i = 0; i < map->num_stripes; i++) {
+ if (zone_info[i].alloc_offset == WP_MISSING_DEV)
+ continue;
+@@ -1593,26 +1603,12 @@ static int btrfs_load_block_group_raid10(struct btrfs_block_group *bg,
+ }
+
+ if (zone_info[i].alloc_offset == WP_CONVENTIONAL) {
+- u64 stripe_nr, full_stripe_nr;
+- u64 stripe_offset;
+- int stripe_index;
+-
+- stripe_nr = div64_u64(last_alloc, map->stripe_size);
+- stripe_offset = stripe_nr * map->stripe_size;
+- full_stripe_nr = div_u64(stripe_nr,
+- map->num_stripes / map->sub_stripes);
+- div_u64_rem(stripe_nr,
+- (map->num_stripes / map->sub_stripes),
+- &stripe_index);
+-
+- zone_info[i].alloc_offset =
+- full_stripe_nr * map->stripe_size;
++ zone_info[i].alloc_offset = btrfs_stripe_nr_to_offset(stripe_nr);
+
+ if (stripe_index > (i / map->sub_stripes))
+- zone_info[i].alloc_offset += map->stripe_size;
++ zone_info[i].alloc_offset += BTRFS_STRIPE_LEN;
+ else if (stripe_index == (i / map->sub_stripes))
+- zone_info[i].alloc_offset +=
+- (last_alloc - stripe_offset);
++ zone_info[i].alloc_offset += stripe_offset;
+ }
+
+ if ((i % map->sub_stripes) == 0) {
+--
+2.51.0
+
--- /dev/null
+From 02f8f2a5f5ea0d2c29b40b06d3fcb20355b3d8a5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 17 Dec 2025 20:14:04 +0900
+Subject: btrfs: zoned: fixup last alloc pointer after extent removal for RAID1
+
+From: Naohiro Aota <naohiro.aota@wdc.com>
+
+[ Upstream commit dda3ec9ee6b3e120603bff1b798f25b51e54ac5d ]
+
+When a block group is composed of a sequential write zone and a
+conventional zone, we recover the (pseudo) write pointer of the
+conventional zone using the end of the last allocated position.
+
+However, if the last extent in a block group is removed, the last extent
+position will be smaller than the other real write pointer position.
+Then, that will cause an error due to mismatch of the write pointers.
+
+We can fixup this case by moving the alloc_offset to the corresponding
+write pointer position.
+
+Fixes: 568220fa9657 ("btrfs: zoned: support RAID0/1/10 on top of raid stripe tree")
+CC: stable@vger.kernel.org # 6.12+
+Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/zoned.c | 15 +++++++++++++++
+ 1 file changed, 15 insertions(+)
+
+diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
+index b757377d9331e..5deddb89c6197 100644
+--- a/fs/btrfs/zoned.c
++++ b/fs/btrfs/zoned.c
+@@ -1452,6 +1452,21 @@ static int btrfs_load_block_group_raid1(struct btrfs_block_group *bg,
+ /* In case a device is missing we have a cap of 0, so don't use it. */
+ bg->zone_capacity = min_not_zero(zone_info[0].capacity, zone_info[1].capacity);
+
++ /*
++ * When the last extent is removed, last_alloc can be smaller than the other write
++ * pointer. In that case, last_alloc should be moved to the corresponding write
++ * pointer position.
++ */
++ for (i = 0; i < map->num_stripes; i++) {
++ if (zone_info[i].alloc_offset == WP_MISSING_DEV ||
++ zone_info[i].alloc_offset == WP_CONVENTIONAL)
++ continue;
++ if (last_alloc <= zone_info[i].alloc_offset) {
++ last_alloc = zone_info[i].alloc_offset;
++ break;
++ }
++ }
++
+ for (i = 0; i < map->num_stripes; i++) {
+ if (zone_info[i].alloc_offset == WP_MISSING_DEV)
+ continue;
+--
+2.51.0
+
--- /dev/null
+From 3b8634e7af2a9d87816d17a78d76516aaa4dee3f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 23 Jan 2026 21:41:36 +0900
+Subject: btrfs: zoned: fixup last alloc pointer after extent removal for
+ RAID0/10
+
+From: Naohiro Aota <naohiro.aota@wdc.com>
+
+[ Upstream commit 52ee9965d09b2c56a027613db30d1fb20d623861 ]
+
+When a block group is composed of a sequential write zone and a
+conventional zone, we recover the (pseudo) write pointer of the
+conventional zone using the end of the last allocated position.
+
+However, if the last extent in a block group is removed, the last extent
+position will be smaller than the other real write pointer position.
+Then, that will cause an error due to mismatch of the write pointers.
+
+We can fixup this case by moving the alloc_offset to the corresponding
+write pointer position.
+
+Fixes: 568220fa9657 ("btrfs: zoned: support RAID0/1/10 on top of raid stripe tree")
+CC: stable@vger.kernel.org # 6.12+
+Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/zoned.c | 194 +++++++++++++++++++++++++++++++++++++++++++----
+ 1 file changed, 179 insertions(+), 15 deletions(-)
+
+diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
+index f63885c7bedfe..e0c5ff2e08c1f 100644
+--- a/fs/btrfs/zoned.c
++++ b/fs/btrfs/zoned.c
+@@ -1522,7 +1522,9 @@ static int btrfs_load_block_group_raid0(struct btrfs_block_group *bg,
+ {
+ struct btrfs_fs_info *fs_info = bg->fs_info;
+ u64 stripe_nr = 0, stripe_offset = 0;
++ u64 prev_offset = 0;
+ u32 stripe_index = 0;
++ bool has_partial = false, has_conventional = false;
+
+ if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) {
+ btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree",
+@@ -1530,6 +1532,35 @@ static int btrfs_load_block_group_raid0(struct btrfs_block_group *bg,
+ return -EINVAL;
+ }
+
++ /*
++ * When the last extent is removed, last_alloc can be smaller than the other write
++ * pointer. In that case, last_alloc should be moved to the corresponding write
++ * pointer position.
++ */
++ for (int i = 0; i < map->num_stripes; i++) {
++ u64 alloc;
++
++ if (zone_info[i].alloc_offset == WP_MISSING_DEV ||
++ zone_info[i].alloc_offset == WP_CONVENTIONAL)
++ continue;
++
++ stripe_nr = zone_info[i].alloc_offset >> BTRFS_STRIPE_LEN_SHIFT;
++ stripe_offset = zone_info[i].alloc_offset & BTRFS_STRIPE_LEN_MASK;
++ if (stripe_offset == 0 && stripe_nr > 0) {
++ stripe_nr--;
++ stripe_offset = BTRFS_STRIPE_LEN;
++ }
++ alloc = ((stripe_nr * map->num_stripes + i) << BTRFS_STRIPE_LEN_SHIFT) +
++ stripe_offset;
++ last_alloc = max(last_alloc, alloc);
++
++ /* Partially written stripe found. It should be last. */
++ if (zone_info[i].alloc_offset & BTRFS_STRIPE_LEN_MASK)
++ break;
++ }
++ stripe_nr = 0;
++ stripe_offset = 0;
++
+ if (last_alloc) {
+ u32 factor = map->num_stripes;
+
+@@ -1543,7 +1574,7 @@ static int btrfs_load_block_group_raid0(struct btrfs_block_group *bg,
+ continue;
+
+ if (zone_info[i].alloc_offset == WP_CONVENTIONAL) {
+-
++ has_conventional = true;
+ zone_info[i].alloc_offset = btrfs_stripe_nr_to_offset(stripe_nr);
+
+ if (stripe_index > i)
+@@ -1552,6 +1583,28 @@ static int btrfs_load_block_group_raid0(struct btrfs_block_group *bg,
+ zone_info[i].alloc_offset += stripe_offset;
+ }
+
++ /* Verification */
++ if (i != 0) {
++ if (unlikely(prev_offset < zone_info[i].alloc_offset)) {
++ btrfs_err(fs_info,
++ "zoned: stripe position disorder found in block group %llu",
++ bg->start);
++ return -EIO;
++ }
++
++ if (unlikely(has_partial &&
++ (zone_info[i].alloc_offset & BTRFS_STRIPE_LEN_MASK))) {
++ btrfs_err(fs_info,
++ "zoned: multiple partial written stripe found in block group %llu",
++ bg->start);
++ return -EIO;
++ }
++ }
++ prev_offset = zone_info[i].alloc_offset;
++
++ if ((zone_info[i].alloc_offset & BTRFS_STRIPE_LEN_MASK) != 0)
++ has_partial = true;
++
+ if (test_bit(0, active) != test_bit(i, active)) {
+ if (!btrfs_zone_activate(bg))
+ return -EIO;
+@@ -1563,6 +1616,19 @@ static int btrfs_load_block_group_raid0(struct btrfs_block_group *bg,
+ bg->alloc_offset += zone_info[i].alloc_offset;
+ }
+
++ /* Check if all devices stay in the same stripe row. */
++ if (unlikely(zone_info[0].alloc_offset -
++ zone_info[map->num_stripes - 1].alloc_offset > BTRFS_STRIPE_LEN)) {
++ btrfs_err(fs_info, "zoned: stripe gap too large in block group %llu", bg->start);
++ return -EIO;
++ }
++
++ if (unlikely(has_conventional && bg->alloc_offset < last_alloc)) {
++ btrfs_err(fs_info, "zoned: allocated extent stays beyond write pointers %llu %llu",
++ bg->alloc_offset, last_alloc);
++ return -EIO;
++ }
++
+ return 0;
+ }
+
+@@ -1573,8 +1639,11 @@ static int btrfs_load_block_group_raid10(struct btrfs_block_group *bg,
+ u64 last_alloc)
+ {
+ struct btrfs_fs_info *fs_info = bg->fs_info;
++ u64 AUTO_KFREE(raid0_allocs);
+ u64 stripe_nr = 0, stripe_offset = 0;
+ u32 stripe_index = 0;
++ bool has_partial = false, has_conventional = false;
++ u64 prev_offset = 0;
+
+ if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) {
+ btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree",
+@@ -1582,6 +1651,60 @@ static int btrfs_load_block_group_raid10(struct btrfs_block_group *bg,
+ return -EINVAL;
+ }
+
++ raid0_allocs = kcalloc(map->num_stripes / map->sub_stripes, sizeof(*raid0_allocs),
++ GFP_NOFS);
++ if (!raid0_allocs)
++ return -ENOMEM;
++
++ /*
++ * When the last extent is removed, last_alloc can be smaller than the other write
++ * pointer. In that case, last_alloc should be moved to the corresponding write
++ * pointer position.
++ */
++ for (int i = 0; i < map->num_stripes; i += map->sub_stripes) {
++ u64 alloc = zone_info[i].alloc_offset;
++
++ for (int j = 1; j < map->sub_stripes; j++) {
++ int idx = i + j;
++
++ if (zone_info[idx].alloc_offset == WP_MISSING_DEV ||
++ zone_info[idx].alloc_offset == WP_CONVENTIONAL)
++ continue;
++ if (alloc == WP_MISSING_DEV || alloc == WP_CONVENTIONAL) {
++ alloc = zone_info[idx].alloc_offset;
++ } else if (unlikely(zone_info[idx].alloc_offset != alloc)) {
++ btrfs_err(fs_info,
++ "zoned: write pointer mismatch found in block group %llu",
++ bg->start);
++ return -EIO;
++ }
++ }
++
++ raid0_allocs[i / map->sub_stripes] = alloc;
++ if (alloc == WP_CONVENTIONAL)
++ continue;
++ if (unlikely(alloc == WP_MISSING_DEV)) {
++ btrfs_err(fs_info,
++ "zoned: cannot recover write pointer of block group %llu due to missing device",
++ bg->start);
++ return -EIO;
++ }
++
++ stripe_nr = alloc >> BTRFS_STRIPE_LEN_SHIFT;
++ stripe_offset = alloc & BTRFS_STRIPE_LEN_MASK;
++ if (stripe_offset == 0 && stripe_nr > 0) {
++ stripe_nr--;
++ stripe_offset = BTRFS_STRIPE_LEN;
++ }
++
++ alloc = ((stripe_nr * (map->num_stripes / map->sub_stripes) +
++ (i / map->sub_stripes)) <<
++ BTRFS_STRIPE_LEN_SHIFT) + stripe_offset;
++ last_alloc = max(last_alloc, alloc);
++ }
++ stripe_nr = 0;
++ stripe_offset = 0;
++
+ if (last_alloc) {
+ u32 factor = map->num_stripes / map->sub_stripes;
+
+@@ -1591,24 +1714,51 @@ static int btrfs_load_block_group_raid10(struct btrfs_block_group *bg,
+ }
+
+ for (int i = 0; i < map->num_stripes; i++) {
+- if (zone_info[i].alloc_offset == WP_MISSING_DEV)
+- continue;
++ int idx = i / map->sub_stripes;
+
+- if (test_bit(0, active) != test_bit(i, active)) {
+- if (!btrfs_zone_activate(bg))
+- return -EIO;
+- } else {
+- if (test_bit(0, active))
+- set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags);
++ if (raid0_allocs[idx] == WP_CONVENTIONAL) {
++ has_conventional = true;
++ raid0_allocs[idx] = btrfs_stripe_nr_to_offset(stripe_nr);
++
++ if (stripe_index > idx)
++ raid0_allocs[idx] += BTRFS_STRIPE_LEN;
++ else if (stripe_index == idx)
++ raid0_allocs[idx] += stripe_offset;
+ }
+
+- if (zone_info[i].alloc_offset == WP_CONVENTIONAL) {
+- zone_info[i].alloc_offset = btrfs_stripe_nr_to_offset(stripe_nr);
++ if ((i % map->sub_stripes) == 0) {
++ /* Verification */
++ if (i != 0) {
++ if (unlikely(prev_offset < raid0_allocs[idx])) {
++ btrfs_err(fs_info,
++ "zoned: stripe position disorder found in block group %llu",
++ bg->start);
++ return -EIO;
++ }
+
+- if (stripe_index > (i / map->sub_stripes))
+- zone_info[i].alloc_offset += BTRFS_STRIPE_LEN;
+- else if (stripe_index == (i / map->sub_stripes))
+- zone_info[i].alloc_offset += stripe_offset;
++ if (unlikely(has_partial &&
++ (raid0_allocs[idx] & BTRFS_STRIPE_LEN_MASK))) {
++ btrfs_err(fs_info,
++ "zoned: multiple partial written stripe found in block group %llu",
++ bg->start);
++ return -EIO;
++ }
++ }
++ prev_offset = raid0_allocs[idx];
++
++ if ((raid0_allocs[idx] & BTRFS_STRIPE_LEN_MASK) != 0)
++ has_partial = true;
++ }
++
++ if (zone_info[i].alloc_offset == WP_MISSING_DEV ||
++ zone_info[i].alloc_offset == WP_CONVENTIONAL)
++ zone_info[i].alloc_offset = raid0_allocs[idx];
++
++ if (test_bit(0, active) != test_bit(i, active)) {
++ if (!btrfs_zone_activate(bg))
++ return -EIO;
++ } else if (test_bit(0, active)) {
++ set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags);
+ }
+
+ if ((i % map->sub_stripes) == 0) {
+@@ -1617,6 +1767,20 @@ static int btrfs_load_block_group_raid10(struct btrfs_block_group *bg,
+ }
+ }
+
++ /* Check if all devices stay in the same stripe row. */
++ if (unlikely(zone_info[0].alloc_offset -
++ zone_info[map->num_stripes - 1].alloc_offset > BTRFS_STRIPE_LEN)) {
++ btrfs_err(fs_info, "zoned: stripe gap too large in block group %llu",
++ bg->start);
++ return -EIO;
++ }
++
++ if (unlikely(has_conventional && bg->alloc_offset < last_alloc)) {
++ btrfs_err(fs_info, "zoned: allocated extent stays beyond write pointers %llu %llu",
++ bg->alloc_offset, last_alloc);
++ return -EIO;
++ }
++
+ return 0;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 3b03f3c872a1c2527f6e1a0c62d3902bc6e488da Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 23 Jan 2026 21:41:35 +0900
+Subject: btrfs: zoned: fixup last alloc pointer after extent removal for DUP
+
+From: Naohiro Aota <naohiro.aota@wdc.com>
+
+[ Upstream commit e2d848649e64de39fc1b9c64002629b4daa1105d ]
+
+When a block group is composed of a sequential write zone and a
+conventional zone, we recover the (pseudo) write pointer of the
+conventional zone using the end of the last allocated position.
+
+However, if the last extent in a block group is removed, the last extent
+position will be smaller than the other real write pointer position.
+Then, that will cause an error due to mismatch of the write pointers.
+
+We can fixup this case by moving the alloc_offset to the corresponding
+write pointer position.
+
+Fixes: c0d90a79e8e6 ("btrfs: zoned: fix alloc_offset calculation for partly conventional block groups")
+CC: stable@vger.kernel.org # 6.16+
+Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/zoned.c | 14 ++++++++++++++
+ 1 file changed, 14 insertions(+)
+
+diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
+index 5deddb89c6197..bf41d9b641f58 100644
+--- a/fs/btrfs/zoned.c
++++ b/fs/btrfs/zoned.c
+@@ -1411,6 +1411,20 @@ static int btrfs_load_block_group_dup(struct btrfs_block_group *bg,
+ return -EIO;
+ }
+
++ /*
++ * When the last extent is removed, last_alloc can be smaller than the other write
++ * pointer. In that case, last_alloc should be moved to the corresponding write
++ * pointer position.
++ */
++ for (int i = 0; i < map->num_stripes; i++) {
++ if (zone_info[i].alloc_offset == WP_CONVENTIONAL)
++ continue;
++ if (last_alloc <= zone_info[i].alloc_offset) {
++ last_alloc = zone_info[i].alloc_offset;
++ break;
++ }
++ }
++
+ if (zone_info[0].alloc_offset == WP_CONVENTIONAL)
+ zone_info[0].alloc_offset = last_alloc;
+
+--
+2.51.0
+
--- /dev/null
+From cac0c376fb8bed178d31c0a886e01034b2203a91 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 Nov 2025 17:40:03 +0100
+Subject: clk: tegra: tegra124-emc: fix device leak on set_rate()
+
+From: Johan Hovold <johan@kernel.org>
+
+[ Upstream commit da61439c63d34ae6503d080a847f144d587e3a48 ]
+
+Make sure to drop the reference taken when looking up the EMC device and
+its driver data on first set_rate().
+
+Note that holding a reference to a device does not prevent its driver
+data from going away so there is no point in keeping the reference.
+
+Fixes: 2db04f16b589 ("clk: tegra: Add EMC clock driver")
+Fixes: 6d6ef58c2470 ("clk: tegra: tegra124-emc: Fix missing put_device() call in emc_ensure_emc_driver")
+Cc: stable@vger.kernel.org # 4.2: 6d6ef58c2470
+Cc: Mikko Perttunen <mperttunen@nvidia.com>
+Cc: Miaoqian Lin <linmq006@gmail.com>
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Signed-off-by: Stephen Boyd <sboyd@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/clk/tegra/clk-tegra124-emc.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/clk/tegra/clk-tegra124-emc.c b/drivers/clk/tegra/clk-tegra124-emc.c
+index 0f6fb776b2298..5f1af6dfe7154 100644
+--- a/drivers/clk/tegra/clk-tegra124-emc.c
++++ b/drivers/clk/tegra/clk-tegra124-emc.c
+@@ -197,8 +197,8 @@ static struct tegra_emc *emc_ensure_emc_driver(struct tegra_clk_emc *tegra)
+ tegra->emc_node = NULL;
+
+ tegra->emc = platform_get_drvdata(pdev);
++ put_device(&pdev->dev);
+ if (!tegra->emc) {
+- put_device(&pdev->dev);
+ pr_err("%s: cannot find EMC driver\n", __func__);
+ return NULL;
+ }
+--
+2.51.0
+
--- /dev/null
+From 3c58f9c7e5012865df7d919f377a4a76e687895a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Feb 2026 10:42:54 -0600
+Subject: drm/amd: Fix hang on amdgpu unload by using pci_dev_is_disconnected()
+
+From: Mario Limonciello <mario.limonciello@amd.com>
+
+[ Upstream commit f7afda7fcd169a9168695247d07ad94cf7b9798f ]
+
+The commit 6a23e7b4332c ("drm/amd: Clean up kfd node on surprise
+disconnect") introduced early KFD cleanup when drm_dev_is_unplugged()
+returns true. However, this causes hangs during normal module unload
+(rmmod amdgpu).
+
+The issue occurs because drm_dev_unplug() is called in amdgpu_pci_remove()
+for all removal scenarios, not just surprise disconnects. This was done
+intentionally in commit 39934d3ed572 ("Revert "drm/amdgpu: TA unload
+messages are not actually sent to psp when amdgpu is uninstalled"") to
+fix IGT PCI software unplug test failures. As a result,
+drm_dev_is_unplugged() returns true even during normal module unload,
+triggering the early KFD cleanup inappropriately.
+
+The correct check should distinguish between:
+- Actual surprise disconnect (eGPU unplugged): pci_dev_is_disconnected()
+ returns true
+- Normal module unload (rmmod): pci_dev_is_disconnected() returns false
+
+Replace drm_dev_is_unplugged() with pci_dev_is_disconnected() to ensure
+the early cleanup only happens during true hardware disconnect events.
+
+Cc: stable@vger.kernel.org
+Reported-by: Cal Peake <cp@absolutedigital.net>
+Closes: https://lore.kernel.org/all/b0c22deb-c0fa-3343-33cf-fd9a77d7db99@absolutedigital.net/
+Fixes: 6a23e7b4332c ("drm/amd: Clean up kfd node on surprise disconnect")
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index cab75f5c9f2fd..361184355e232 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -4648,7 +4648,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
+ * before ip_fini_early to prevent kfd locking refcount issues by calling
+ * amdgpu_amdkfd_suspend()
+ */
+- if (drm_dev_is_unplugged(adev_to_drm(adev)))
++ if (pci_dev_is_disconnected(adev->pdev))
+ amdgpu_amdkfd_device_fini_sw(adev);
+
+ amdgpu_device_ip_fini_early(adev);
+@@ -4660,7 +4660,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
+
+ amdgpu_gart_dummy_page_fini(adev);
+
+- if (drm_dev_is_unplugged(adev_to_drm(adev)))
++ if (pci_dev_is_disconnected(adev->pdev))
+ amdgpu_device_unmap_mmio(adev);
+
+ }
+--
+2.51.0
+
--- /dev/null
+From c9c5816d837adb825ea940b0898805c67513b73d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 19 Jan 2026 17:25:52 +0900
+Subject: drm/exynos: vidi: fix to avoid directly dereferencing user pointer
+
+From: Jeongjun Park <aha310510@gmail.com>
+
+[ Upstream commit d4c98c077c7fb2dfdece7d605e694b5ea2665085 ]
+
+In vidi_connection_ioctl(), vidi->edid(user pointer) is directly
+dereferenced in the kernel.
+
+This allows arbitrary kernel memory access from the user space, so instead
+of directly accessing the user pointer in the kernel, we should modify it
+to copy edid to kernel memory using copy_from_user() and use it.
+
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Jeongjun Park <aha310510@gmail.com>
+Signed-off-by: Inki Dae <inki.dae@samsung.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/exynos/exynos_drm_vidi.c | 22 ++++++++++++++++++----
+ 1 file changed, 18 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+index 6de0cced6c9d2..007fd8dad3559 100644
+--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
++++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+@@ -246,13 +246,27 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
+
+ if (vidi->connection) {
+ const struct drm_edid *drm_edid;
+- const struct edid *raw_edid;
++ const void __user *edid_userptr = u64_to_user_ptr(vidi->edid);
++ void *edid_buf;
++ struct edid hdr;
+ size_t size;
+
+- raw_edid = (const struct edid *)(unsigned long)vidi->edid;
+- size = (raw_edid->extensions + 1) * EDID_LENGTH;
++ if (copy_from_user(&hdr, edid_userptr, sizeof(hdr)))
++ return -EFAULT;
+
+- drm_edid = drm_edid_alloc(raw_edid, size);
++ size = (hdr.extensions + 1) * EDID_LENGTH;
++
++ edid_buf = kmalloc(size, GFP_KERNEL);
++ if (!edid_buf)
++ return -ENOMEM;
++
++ if (copy_from_user(edid_buf, edid_userptr, size)) {
++ kfree(edid_buf);
++ return -EFAULT;
++ }
++
++ drm_edid = drm_edid_alloc(edid_buf, size);
++ kfree(edid_buf);
+ if (!drm_edid)
+ return -ENOMEM;
+
+--
+2.51.0
+
--- /dev/null
+From 55d088c8c2c804918aa5eeffc9e096c3bed9eece Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 6 Mar 2025 12:27:20 +0800
+Subject: drm/exynos/vidi: Remove redundant error handling in vidi_get_modes()
+
+From: Wentao Liang <vulab@iscas.ac.cn>
+
+[ Upstream commit 0253dadc772e83aaa67aea8bf24a71e7ffe13cb0 ]
+
+In the vidi_get_modes() function, if either drm_edid_dup() or
+drm_edid_alloc() fails, the function will immediately return 0,
+indicating that no display modes can be retrieved. However, in
+the event of failure in these two functions, it is still necessary
+to call the subsequent drm_edid_connector_update() function with
+a NULL drm_edid as an argument. This ensures that operations such
+as connector settings are performed in its callee function,
+_drm_edid_connector_property_update. To maintain the integrity of
+the operation, redundant error handling needs to be removed.
+
+Signed-off-by: Wentao Liang <vulab@iscas.ac.cn>
+Signed-off-by: Inki Dae <inki.dae@samsung.com>
+Stable-dep-of: 52b330799e2d ("drm/exynos: vidi: use ctx->lock to protect struct vidi_context member variables related to memory alloc/free")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/exynos/exynos_drm_vidi.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+index 007fd8dad3559..4c0d536cb57d4 100644
+--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
++++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+@@ -326,9 +326,6 @@ static int vidi_get_modes(struct drm_connector *connector)
+ else
+ drm_edid = drm_edid_alloc(fake_edid_info, sizeof(fake_edid_info));
+
+- if (!drm_edid)
+- return 0;
+-
+ drm_edid_connector_update(connector, drm_edid);
+
+ count = drm_edid_connector_add_modes(connector);
+--
+2.51.0
+
--- /dev/null
+From 2ace2a8905badef2d4a372d69959a4b98084e7b1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 19 Jan 2026 17:25:53 +0900
+Subject: drm/exynos: vidi: use ctx->lock to protect struct vidi_context member
+ variables related to memory alloc/free
+
+From: Jeongjun Park <aha310510@gmail.com>
+
+[ Upstream commit 52b330799e2d6f825ae2bb74662ec1b10eb954bb ]
+
+Exynos Virtual Display driver performs memory alloc/free operations
+without lock protection, which easily causes concurrency problem.
+
+For example, use-after-free can occur in race scenario like this:
+```
+ CPU0 CPU1 CPU2
+ ---- ---- ----
+ vidi_connection_ioctl()
+ if (vidi->connection) // true
+ drm_edid = drm_edid_alloc(); // alloc drm_edid
+ ...
+ ctx->raw_edid = drm_edid;
+ ...
+ drm_mode_getconnector()
+ drm_helper_probe_single_connector_modes()
+ vidi_get_modes()
+ if (ctx->raw_edid) // true
+ drm_edid_dup(ctx->raw_edid);
+ if (!drm_edid) // false
+ ...
+ vidi_connection_ioctl()
+ if (vidi->connection) // false
+ drm_edid_free(ctx->raw_edid); // free drm_edid
+ ...
+ drm_edid_alloc(drm_edid->edid)
+ kmemdup(edid); // UAF!!
+ ...
+```
+
+To prevent these vulns, at least in vidi_context, member variables related
+to memory alloc/free should be protected with ctx->lock.
+
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Jeongjun Park <aha310510@gmail.com>
+Signed-off-by: Inki Dae <inki.dae@samsung.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/exynos/exynos_drm_vidi.c | 38 ++++++++++++++++++++----
+ 1 file changed, 32 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+index 4c0d536cb57d4..8400330dfe3eb 100644
+--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
++++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+@@ -186,29 +186,37 @@ static ssize_t vidi_store_connection(struct device *dev,
+ const char *buf, size_t len)
+ {
+ struct vidi_context *ctx = dev_get_drvdata(dev);
+- int ret;
++ int ret, new_connected;
+
+- ret = kstrtoint(buf, 0, &ctx->connected);
++ ret = kstrtoint(buf, 0, &new_connected);
+ if (ret)
+ return ret;
+-
+- if (ctx->connected > 1)
++ if (new_connected > 1)
+ return -EINVAL;
+
++ mutex_lock(&ctx->lock);
++
+ /*
+ * Use fake edid data for test. If raw_edid is set then it can't be
+ * tested.
+ */
+ if (ctx->raw_edid) {
+ DRM_DEV_DEBUG_KMS(dev, "edid data is not fake data.\n");
+- return -EINVAL;
++ ret = -EINVAL;
++ goto fail;
+ }
+
++ ctx->connected = new_connected;
++ mutex_unlock(&ctx->lock);
++
+ DRM_DEV_DEBUG_KMS(dev, "requested connection.\n");
+
+ drm_helper_hpd_irq_event(ctx->drm_dev);
+
+ return len;
++fail:
++ mutex_unlock(&ctx->lock);
++ return ret;
+ }
+
+ static DEVICE_ATTR(connection, 0644, vidi_show_connection,
+@@ -238,11 +246,14 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
+ return -EINVAL;
+ }
+
++ mutex_lock(&ctx->lock);
+ if (ctx->connected == vidi->connection) {
++ mutex_unlock(&ctx->lock);
+ DRM_DEV_DEBUG_KMS(ctx->dev,
+ "same connection request.\n");
+ return -EINVAL;
+ }
++ mutex_unlock(&ctx->lock);
+
+ if (vidi->connection) {
+ const struct drm_edid *drm_edid;
+@@ -276,14 +287,21 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
+ "edid data is invalid.\n");
+ return -EINVAL;
+ }
++ mutex_lock(&ctx->lock);
+ ctx->raw_edid = drm_edid;
++ mutex_unlock(&ctx->lock);
+ } else {
+ /* with connection = 0, free raw_edid */
++ mutex_lock(&ctx->lock);
+ drm_edid_free(ctx->raw_edid);
+ ctx->raw_edid = NULL;
++ mutex_unlock(&ctx->lock);
+ }
+
++ mutex_lock(&ctx->lock);
+ ctx->connected = vidi->connection;
++ mutex_unlock(&ctx->lock);
++
+ drm_helper_hpd_irq_event(ctx->drm_dev);
+
+ return 0;
+@@ -298,7 +316,7 @@ static enum drm_connector_status vidi_detect(struct drm_connector *connector,
+ * connection request would come from user side
+ * to do hotplug through specific ioctl.
+ */
+- return ctx->connected ? connector_status_connected :
++ return READ_ONCE(ctx->connected) ? connector_status_connected :
+ connector_status_disconnected;
+ }
+
+@@ -321,11 +339,15 @@ static int vidi_get_modes(struct drm_connector *connector)
+ const struct drm_edid *drm_edid;
+ int count;
+
++ mutex_lock(&ctx->lock);
++
+ if (ctx->raw_edid)
+ drm_edid = drm_edid_dup(ctx->raw_edid);
+ else
+ drm_edid = drm_edid_alloc(fake_edid_info, sizeof(fake_edid_info));
+
++ mutex_unlock(&ctx->lock);
++
+ drm_edid_connector_update(connector, drm_edid);
+
+ count = drm_edid_connector_add_modes(connector);
+@@ -470,9 +492,13 @@ static void vidi_remove(struct platform_device *pdev)
+ {
+ struct vidi_context *ctx = platform_get_drvdata(pdev);
+
++ mutex_lock(&ctx->lock);
++
+ drm_edid_free(ctx->raw_edid);
+ ctx->raw_edid = NULL;
+
++ mutex_unlock(&ctx->lock);
++
+ component_del(&pdev->dev, &vidi_component_ops);
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 8aedb436f8ec02619aac932e9efa5d8ec5b50eeb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 Nov 2025 17:42:01 +0100
+Subject: drm/tegra: dsi: fix device leak on probe
+
+From: Johan Hovold <johan@kernel.org>
+
+[ Upstream commit bfef062695570842cf96358f2f46f4c6642c6689 ]
+
+Make sure to drop the reference taken when looking up the companion
+(ganged) device and its driver data during probe().
+
+Note that holding a reference to a device does not prevent its driver
+data from going away so there is no point in keeping the reference.
+
+Fixes: e94236cde4d5 ("drm/tegra: dsi: Add ganged mode support")
+Fixes: 221e3638feb8 ("drm/tegra: Fix reference leak in tegra_dsi_ganged_probe")
+Cc: stable@vger.kernel.org # 3.19: 221e3638feb8
+Cc: Thierry Reding <treding@nvidia.com>
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Signed-off-by: Thierry Reding <treding@nvidia.com>
+Link: https://patch.msgid.link/20251121164201.13188-1-johan@kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/tegra/dsi.c | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
+index 532a8f4bee7fc..a796dc6742373 100644
+--- a/drivers/gpu/drm/tegra/dsi.c
++++ b/drivers/gpu/drm/tegra/dsi.c
+@@ -1540,11 +1540,9 @@ static int tegra_dsi_ganged_probe(struct tegra_dsi *dsi)
+ return -EPROBE_DEFER;
+
+ dsi->slave = platform_get_drvdata(gangster);
+-
+- if (!dsi->slave) {
+- put_device(&gangster->dev);
++ put_device(&gangster->dev);
++ if (!dsi->slave)
+ return -EPROBE_DEFER;
+- }
+
+ dsi->slave->master = dsi;
+ }
+--
+2.51.0
+
--- /dev/null
+From 4b609d4da1841e3a1af4a4576a6244e8f8830402 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 23 Jan 2025 08:44:17 +0000
+Subject: efi/cper, cxl: Make definitions and structures global
+
+From: Smita Koralahalli <Smita.KoralahalliChannabasappa@amd.com>
+
+[ Upstream commit 958c3a6706863f9f77b21c90d2428473441cd8a1 ]
+
+In preparation to add tracepoint support, move protocol error UUID
+definition to a common location, Also, make struct CXL RAS capability,
+cxl_cper_sec_prot_err and CPER validation flags global for use across
+different modules.
+
+Signed-off-by: Smita Koralahalli <Smita.KoralahalliChannabasappa@amd.com>
+Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Reviewed-by: Ira Weiny <ira.weiny@intel.com>
+Reviewed-by: Dave Jiang <dave.jiang@intel.com>
+Reviewed-by: Fan Ni <fan.ni@samsung.com>
+Reviewed-by: Gregory Price <gourry@gourry.net>
+Reviewed-by: Dan Williams <dan.j.williams@intel.com>
+Link: https://patch.msgid.link/20250123084421.127697-3-Smita.KoralahalliChannabasappa@amd.com
+Signed-off-by: Dave Jiang <dave.jiang@intel.com>
+Stable-dep-of: b584bfbd7ec4 ("ACPI: APEI: GHES: Disable KASAN instrumentation when compile testing with clang < 18")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/firmware/efi/cper.c | 1 +
+ drivers/firmware/efi/cper_cxl.c | 35 +--------------
+ drivers/firmware/efi/cper_cxl.h | 51 ---------------------
+ include/cxl/event.h | 80 +++++++++++++++++++++++++++++++++
+ include/linux/cper.h | 4 ++
+ 5 files changed, 86 insertions(+), 85 deletions(-)
+
+diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
+index e0adbd6df9b53..7fe10a06e68c3 100644
+--- a/drivers/firmware/efi/cper.c
++++ b/drivers/firmware/efi/cper.c
+@@ -25,6 +25,7 @@
+ #include <linux/bcd.h>
+ #include <acpi/ghes.h>
+ #include <ras/ras_event.h>
++#include <cxl/event.h>
+ #include "cper_cxl.h"
+
+ /*
+diff --git a/drivers/firmware/efi/cper_cxl.c b/drivers/firmware/efi/cper_cxl.c
+index cbaabcb7382d9..64c0dd27be6ed 100644
+--- a/drivers/firmware/efi/cper_cxl.c
++++ b/drivers/firmware/efi/cper_cxl.c
+@@ -8,27 +8,9 @@
+ */
+
+ #include <linux/cper.h>
++#include <cxl/event.h>
+ #include "cper_cxl.h"
+
+-#define PROT_ERR_VALID_AGENT_TYPE BIT_ULL(0)
+-#define PROT_ERR_VALID_AGENT_ADDRESS BIT_ULL(1)
+-#define PROT_ERR_VALID_DEVICE_ID BIT_ULL(2)
+-#define PROT_ERR_VALID_SERIAL_NUMBER BIT_ULL(3)
+-#define PROT_ERR_VALID_CAPABILITY BIT_ULL(4)
+-#define PROT_ERR_VALID_DVSEC BIT_ULL(5)
+-#define PROT_ERR_VALID_ERROR_LOG BIT_ULL(6)
+-
+-/* CXL RAS Capability Structure, CXL v3.0 sec 8.2.4.16 */
+-struct cxl_ras_capability_regs {
+- u32 uncor_status;
+- u32 uncor_mask;
+- u32 uncor_severity;
+- u32 cor_status;
+- u32 cor_mask;
+- u32 cap_control;
+- u32 header_log[16];
+-};
+-
+ static const char * const prot_err_agent_type_strs[] = {
+ "Restricted CXL Device",
+ "Restricted CXL Host Downstream Port",
+@@ -40,21 +22,6 @@ static const char * const prot_err_agent_type_strs[] = {
+ "CXL Upstream Switch Port",
+ };
+
+-/*
+- * The layout of the enumeration and the values matches CXL Agent Type
+- * field in the UEFI 2.10 Section N.2.13,
+- */
+-enum {
+- RCD, /* Restricted CXL Device */
+- RCH_DP, /* Restricted CXL Host Downstream Port */
+- DEVICE, /* CXL Device */
+- LD, /* CXL Logical Device */
+- FMLD, /* CXL Fabric Manager managed Logical Device */
+- RP, /* CXL Root Port */
+- DSP, /* CXL Downstream Switch Port */
+- USP, /* CXL Upstream Switch Port */
+-};
+-
+ void cxl_cper_print_prot_err(const char *pfx,
+ const struct cxl_cper_sec_prot_err *prot_err)
+ {
+diff --git a/drivers/firmware/efi/cper_cxl.h b/drivers/firmware/efi/cper_cxl.h
+index 0e3ab0ba17c36..5ce1401ee17af 100644
+--- a/drivers/firmware/efi/cper_cxl.h
++++ b/drivers/firmware/efi/cper_cxl.h
+@@ -10,57 +10,6 @@
+ #ifndef LINUX_CPER_CXL_H
+ #define LINUX_CPER_CXL_H
+
+-/* CXL Protocol Error Section */
+-#define CPER_SEC_CXL_PROT_ERR \
+- GUID_INIT(0x80B9EFB4, 0x52B5, 0x4DE3, 0xA7, 0x77, 0x68, 0x78, \
+- 0x4B, 0x77, 0x10, 0x48)
+-
+-#pragma pack(1)
+-
+-/* Compute Express Link Protocol Error Section, UEFI v2.10 sec N.2.13 */
+-struct cxl_cper_sec_prot_err {
+- u64 valid_bits;
+- u8 agent_type;
+- u8 reserved[7];
+-
+- /*
+- * Except for RCH Downstream Port, all the remaining CXL Agent
+- * types are uniquely identified by the PCIe compatible SBDF number.
+- */
+- union {
+- u64 rcrb_base_addr;
+- struct {
+- u8 function;
+- u8 device;
+- u8 bus;
+- u16 segment;
+- u8 reserved_1[3];
+- };
+- } agent_addr;
+-
+- struct {
+- u16 vendor_id;
+- u16 device_id;
+- u16 subsystem_vendor_id;
+- u16 subsystem_id;
+- u8 class_code[2];
+- u16 slot;
+- u8 reserved_1[4];
+- } device_id;
+-
+- struct {
+- u32 lower_dw;
+- u32 upper_dw;
+- } dev_serial_num;
+-
+- u8 capability[60];
+- u16 dvsec_len;
+- u16 err_len;
+- u8 reserved_2[4];
+-};
+-
+-#pragma pack()
+-
+ void cxl_cper_print_prot_err(const char *pfx,
+ const struct cxl_cper_sec_prot_err *prot_err);
+
+diff --git a/include/cxl/event.h b/include/cxl/event.h
+index 0bea1afbd747c..66d85fc87701d 100644
+--- a/include/cxl/event.h
++++ b/include/cxl/event.h
+@@ -152,6 +152,86 @@ struct cxl_cper_work_data {
+ struct cxl_cper_event_rec rec;
+ };
+
++#define PROT_ERR_VALID_AGENT_TYPE BIT_ULL(0)
++#define PROT_ERR_VALID_AGENT_ADDRESS BIT_ULL(1)
++#define PROT_ERR_VALID_DEVICE_ID BIT_ULL(2)
++#define PROT_ERR_VALID_SERIAL_NUMBER BIT_ULL(3)
++#define PROT_ERR_VALID_CAPABILITY BIT_ULL(4)
++#define PROT_ERR_VALID_DVSEC BIT_ULL(5)
++#define PROT_ERR_VALID_ERROR_LOG BIT_ULL(6)
++
++/*
++ * The layout of the enumeration and the values matches CXL Agent Type
++ * field in the UEFI 2.10 Section N.2.13,
++ */
++enum {
++ RCD, /* Restricted CXL Device */
++ RCH_DP, /* Restricted CXL Host Downstream Port */
++ DEVICE, /* CXL Device */
++ LD, /* CXL Logical Device */
++ FMLD, /* CXL Fabric Manager managed Logical Device */
++ RP, /* CXL Root Port */
++ DSP, /* CXL Downstream Switch Port */
++ USP, /* CXL Upstream Switch Port */
++};
++
++#pragma pack(1)
++
++/* Compute Express Link Protocol Error Section, UEFI v2.10 sec N.2.13 */
++struct cxl_cper_sec_prot_err {
++ u64 valid_bits;
++ u8 agent_type;
++ u8 reserved[7];
++
++ /*
++ * Except for RCH Downstream Port, all the remaining CXL Agent
++ * types are uniquely identified by the PCIe compatible SBDF number.
++ */
++ union {
++ u64 rcrb_base_addr;
++ struct {
++ u8 function;
++ u8 device;
++ u8 bus;
++ u16 segment;
++ u8 reserved_1[3];
++ };
++ } agent_addr;
++
++ struct {
++ u16 vendor_id;
++ u16 device_id;
++ u16 subsystem_vendor_id;
++ u16 subsystem_id;
++ u8 class_code[2];
++ u16 slot;
++ u8 reserved_1[4];
++ } device_id;
++
++ struct {
++ u32 lower_dw;
++ u32 upper_dw;
++ } dev_serial_num;
++
++ u8 capability[60];
++ u16 dvsec_len;
++ u16 err_len;
++ u8 reserved_2[4];
++};
++
++#pragma pack()
++
++/* CXL RAS Capability Structure, CXL v3.0 sec 8.2.4.16 */
++struct cxl_ras_capability_regs {
++ u32 uncor_status;
++ u32 uncor_mask;
++ u32 uncor_severity;
++ u32 cor_status;
++ u32 cor_mask;
++ u32 cap_control;
++ u32 header_log[16];
++};
++
+ #ifdef CONFIG_ACPI_APEI_GHES
+ int cxl_cper_register_work(struct work_struct *work);
+ int cxl_cper_unregister_work(struct work_struct *work);
+diff --git a/include/linux/cper.h b/include/linux/cper.h
+index 951291fa50d4f..e9c0331fe204a 100644
+--- a/include/linux/cper.h
++++ b/include/linux/cper.h
+@@ -89,6 +89,10 @@ enum {
+ #define CPER_NOTIFY_DMAR \
+ GUID_INIT(0x667DD791, 0xC6B3, 0x4c27, 0x8A, 0x6B, 0x0F, 0x8E, \
+ 0x72, 0x2D, 0xEB, 0x41)
++/* CXL Protocol Error Section */
++#define CPER_SEC_CXL_PROT_ERR \
++ GUID_INIT(0x80B9EFB4, 0x52B5, 0x4DE3, 0xA7, 0x77, 0x68, 0x78, \
++ 0x4B, 0x77, 0x10, 0x48)
+
+ /* CXL Event record UUIDs are formatted as GUIDs and reported in section type */
+ /*
+--
+2.51.0
+
--- /dev/null
+From 359440e90906fcc3deb97c427092d424594cf730 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 23 Jan 2025 08:44:16 +0000
+Subject: efi/cper, cxl: Prefix protocol error struct and function names with
+ cxl_
+
+From: Smita Koralahalli <Smita.KoralahalliChannabasappa@amd.com>
+
+[ Upstream commit 84973331442a57bac31b40eaef6f264496c6f3fd ]
+
+Rename the protocol error struct from struct cper_sec_prot_err to
+struct cxl_cper_sec_prot_err and cper_print_prot_err() to
+cxl_cper_print_prot_err() to maintain naming consistency. No
+functional changes.
+
+Signed-off-by: Smita Koralahalli <Smita.KoralahalliChannabasappa@amd.com>
+Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Reviewed-by: Ira Weiny <ira.weiny@intel.com>
+Reviewed-by: Dave Jiang <dave.jiang@intel.com>
+Reviewed-by: Fan Ni <fan.ni@samsung.com>
+Reviewed-by: Gregory Price <gourry@gourry.net>
+Reviewed-by: Dan Williams <dan.j.williams@intel.com>
+Link: https://patch.msgid.link/20250123084421.127697-2-Smita.KoralahalliChannabasappa@amd.com
+Signed-off-by: Dave Jiang <dave.jiang@intel.com>
+Stable-dep-of: b584bfbd7ec4 ("ACPI: APEI: GHES: Disable KASAN instrumentation when compile testing with clang < 18")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/firmware/efi/cper.c | 4 ++--
+ drivers/firmware/efi/cper_cxl.c | 3 ++-
+ drivers/firmware/efi/cper_cxl.h | 5 +++--
+ 3 files changed, 7 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
+index 3587295cd0206..e0adbd6df9b53 100644
+--- a/drivers/firmware/efi/cper.c
++++ b/drivers/firmware/efi/cper.c
+@@ -690,11 +690,11 @@ cper_estatus_print_section(const char *pfx, struct acpi_hest_generic_data *gdata
+ else
+ goto err_section_too_small;
+ } else if (guid_equal(sec_type, &CPER_SEC_CXL_PROT_ERR)) {
+- struct cper_sec_prot_err *prot_err = acpi_hest_get_payload(gdata);
++ struct cxl_cper_sec_prot_err *prot_err = acpi_hest_get_payload(gdata);
+
+ printk("%ssection_type: CXL Protocol Error\n", newpfx);
+ if (gdata->error_data_length >= sizeof(*prot_err))
+- cper_print_prot_err(newpfx, prot_err);
++ cxl_cper_print_prot_err(newpfx, prot_err);
+ else
+ goto err_section_too_small;
+ } else {
+diff --git a/drivers/firmware/efi/cper_cxl.c b/drivers/firmware/efi/cper_cxl.c
+index a55771b99a97a..cbaabcb7382d9 100644
+--- a/drivers/firmware/efi/cper_cxl.c
++++ b/drivers/firmware/efi/cper_cxl.c
+@@ -55,7 +55,8 @@ enum {
+ USP, /* CXL Upstream Switch Port */
+ };
+
+-void cper_print_prot_err(const char *pfx, const struct cper_sec_prot_err *prot_err)
++void cxl_cper_print_prot_err(const char *pfx,
++ const struct cxl_cper_sec_prot_err *prot_err)
+ {
+ if (prot_err->valid_bits & PROT_ERR_VALID_AGENT_TYPE)
+ pr_info("%s agent_type: %d, %s\n", pfx, prot_err->agent_type,
+diff --git a/drivers/firmware/efi/cper_cxl.h b/drivers/firmware/efi/cper_cxl.h
+index 86bfcf7909eca..0e3ab0ba17c36 100644
+--- a/drivers/firmware/efi/cper_cxl.h
++++ b/drivers/firmware/efi/cper_cxl.h
+@@ -18,7 +18,7 @@
+ #pragma pack(1)
+
+ /* Compute Express Link Protocol Error Section, UEFI v2.10 sec N.2.13 */
+-struct cper_sec_prot_err {
++struct cxl_cper_sec_prot_err {
+ u64 valid_bits;
+ u8 agent_type;
+ u8 reserved[7];
+@@ -61,6 +61,7 @@ struct cper_sec_prot_err {
+
+ #pragma pack()
+
+-void cper_print_prot_err(const char *pfx, const struct cper_sec_prot_err *prot_err);
++void cxl_cper_print_prot_err(const char *pfx,
++ const struct cxl_cper_sec_prot_err *prot_err);
+
+ #endif //__CPER_CXL_
+--
+2.51.0
+
--- /dev/null
+From f82a3ece8f71192fab27975e00d5653fc4d1422e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 14 Jul 2025 21:03:11 +0800
+Subject: ext4: add ext4_try_lock_group() to skip busy groups
+
+From: Baokun Li <libaokun1@huawei.com>
+
+[ Upstream commit e9eec6f33971fbfcdd32fd1c7dd515ff4d2954c0 ]
+
+When ext4 allocates blocks, we used to just go through the block groups
+one by one to find a good one. But when there are tons of block groups
+(like hundreds of thousands or even millions) and not many have free space
+(meaning they're mostly full), it takes a really long time to check them
+all, and performance gets bad. So, we added the "mb_optimize_scan" mount
+option (which is on by default now). It keeps track of some group lists,
+so when we need a free block, we can just grab a likely group from the
+right list. This saves time and makes block allocation much faster.
+
+But when multiple processes or containers are doing similar things, like
+constantly allocating 8k blocks, they all try to use the same block group
+in the same list. Even just two processes doing this can cut the IOPS in
+half. For example, one container might do 300,000 IOPS, but if you run two
+at the same time, the total is only 150,000.
+
+Since we can already look at block groups in a non-linear way, the first
+and last groups in the same list are basically the same for finding a block
+right now. Therefore, add an ext4_try_lock_group() helper function to skip
+the current group when it is locked by another process, thereby avoiding
+contention with other processes. This helps ext4 make better use of having
+multiple block groups.
+
+Also, to make sure we don't skip all the groups that have free space
+when allocating blocks, we won't try to skip busy groups anymore when
+ac_criteria is CR_ANY_FREE.
+
+Performance test data follows:
+
+Test: Running will-it-scale/fallocate2 on CPU-bound containers.
+Observation: Average fallocate operations per container per second.
+
+|CPU: Kunpeng 920 | P80 |
+|Memory: 512GB |-------------------------|
+|960GB SSD (0.5GB/s)| base | patched |
+|-------------------|-------|-----------------|
+|mb_optimize_scan=0 | 2667 | 4821 (+80.7%) |
+|mb_optimize_scan=1 | 2643 | 4784 (+81.0%) |
+
+|CPU: AMD 9654 * 2 | P96 |
+|Memory: 1536GB |-------------------------|
+|960GB SSD (1GB/s) | base | patched |
+|-------------------|-------|-----------------|
+|mb_optimize_scan=0 | 3450 | 15371 (+345%) |
+|mb_optimize_scan=1 | 3209 | 6101 (+90.0%) |
+
+Signed-off-by: Baokun Li <libaokun1@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Reviewed-by: Zhang Yi <yi.zhang@huawei.com>
+Link: https://patch.msgid.link/20250714130327.1830534-2-libaokun1@huawei.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: 4865c768b563 ("ext4: always allocate blocks only from groups inode can use")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/ext4.h | 23 ++++++++++++++---------
+ fs/ext4/mballoc.c | 19 ++++++++++++++++---
+ 2 files changed, 30 insertions(+), 12 deletions(-)
+
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index d8a059ec1ad62..822b18996a434 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -3507,23 +3507,28 @@ static inline int ext4_fs_is_busy(struct ext4_sb_info *sbi)
+ return (atomic_read(&sbi->s_lock_busy) > EXT4_CONTENTION_THRESHOLD);
+ }
+
++static inline bool ext4_try_lock_group(struct super_block *sb, ext4_group_t group)
++{
++ if (!spin_trylock(ext4_group_lock_ptr(sb, group)))
++ return false;
++ /*
++ * We're able to grab the lock right away, so drop the lock
++ * contention counter.
++ */
++ atomic_add_unless(&EXT4_SB(sb)->s_lock_busy, -1, 0);
++ return true;
++}
++
+ static inline void ext4_lock_group(struct super_block *sb, ext4_group_t group)
+ {
+- spinlock_t *lock = ext4_group_lock_ptr(sb, group);
+- if (spin_trylock(lock))
+- /*
+- * We're able to grab the lock right away, so drop the
+- * lock contention counter.
+- */
+- atomic_add_unless(&EXT4_SB(sb)->s_lock_busy, -1, 0);
+- else {
++ if (!ext4_try_lock_group(sb, group)) {
+ /*
+ * The lock is busy, so bump the contention counter,
+ * and then wait on the spin lock.
+ */
+ atomic_add_unless(&EXT4_SB(sb)->s_lock_busy, 1,
+ EXT4_MAX_CONTENTION);
+- spin_lock(lock);
++ spin_lock(ext4_group_lock_ptr(sb, group));
+ }
+ }
+
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index edfffd15b2952..329fe83cbe814 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -913,7 +913,8 @@ static void ext4_mb_choose_next_group_p2_aligned(struct ext4_allocation_context
+ bb_largest_free_order_node) {
+ if (sbi->s_mb_stats)
+ atomic64_inc(&sbi->s_bal_cX_groups_considered[CR_POWER2_ALIGNED]);
+- if (likely(ext4_mb_good_group(ac, iter->bb_group, CR_POWER2_ALIGNED))) {
++ if (!spin_is_locked(ext4_group_lock_ptr(ac->ac_sb, iter->bb_group)) &&
++ likely(ext4_mb_good_group(ac, iter->bb_group, CR_POWER2_ALIGNED))) {
+ *group = iter->bb_group;
+ ac->ac_flags |= EXT4_MB_CR_POWER2_ALIGNED_OPTIMIZED;
+ read_unlock(&sbi->s_mb_largest_free_orders_locks[i]);
+@@ -949,7 +950,8 @@ ext4_mb_find_good_group_avg_frag_lists(struct ext4_allocation_context *ac, int o
+ list_for_each_entry(iter, frag_list, bb_avg_fragment_size_node) {
+ if (sbi->s_mb_stats)
+ atomic64_inc(&sbi->s_bal_cX_groups_considered[cr]);
+- if (likely(ext4_mb_good_group(ac, iter->bb_group, cr))) {
++ if (!spin_is_locked(ext4_group_lock_ptr(ac->ac_sb, iter->bb_group)) &&
++ likely(ext4_mb_good_group(ac, iter->bb_group, cr))) {
+ grp = iter;
+ break;
+ }
+@@ -2910,6 +2912,11 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
+ nr, &prefetch_ios);
+ }
+
++ /* prevent unnecessary buddy loading. */
++ if (cr < CR_ANY_FREE &&
++ spin_is_locked(ext4_group_lock_ptr(sb, group)))
++ continue;
++
+ /* This now checks without needing the buddy page */
+ ret = ext4_mb_good_group_nolock(ac, group, cr);
+ if (ret <= 0) {
+@@ -2922,7 +2929,13 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
+ if (err)
+ goto out;
+
+- ext4_lock_group(sb, group);
++ /* skip busy group */
++ if (cr >= CR_ANY_FREE) {
++ ext4_lock_group(sb, group);
++ } else if (!ext4_try_lock_group(sb, group)) {
++ ext4_mb_unload_buddy(&e4b);
++ continue;
++ }
+
+ /*
+ * We need to check again after locking the
+--
+2.51.0
+
--- /dev/null
+From 7a6bcf563af00b7cad7b66aa321bba8458a04538 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 14 Jan 2026 19:28:18 +0100
+Subject: ext4: always allocate blocks only from groups inode can use
+
+From: Jan Kara <jack@suse.cz>
+
+[ Upstream commit 4865c768b563deff1b6a6384e74a62f143427b42 ]
+
+For filesystems with more than 2^32 blocks inodes using indirect block
+based format cannot use blocks beyond the 32-bit limit.
+ext4_mb_scan_groups_linear() takes care to not select these unsupported
+groups for such inodes however other functions selecting groups for
+allocation don't. So far this is harmless because the other selection
+functions are used only with mb_optimize_scan and this is currently
+disabled for inodes with indirect blocks however in the following patch
+we want to enable mb_optimize_scan regardless of inode format.
+
+Reviewed-by: Baokun Li <libaokun1@huawei.com>
+Reviewed-by: Zhang Yi <yi.zhang@huawei.com>
+Signed-off-by: Jan Kara <jack@suse.cz>
+Acked-by: Pedro Falcato <pfalcato@suse.de>
+Cc: stable@kernel.org
+Link: https://patch.msgid.link/20260114182836.14120-3-jack@suse.cz
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/mballoc.c | 29 ++++++++++++++++++++---------
+ 1 file changed, 20 insertions(+), 9 deletions(-)
+
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 1e180c55ebd4f..aa1627db56c5a 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -892,6 +892,21 @@ mb_update_avg_fragment_size(struct super_block *sb, struct ext4_group_info *grp)
+ }
+ }
+
++static ext4_group_t ext4_get_allocation_groups_count(
++ struct ext4_allocation_context *ac)
++{
++ ext4_group_t ngroups = ext4_get_groups_count(ac->ac_sb);
++
++ /* non-extent files are limited to low blocks/groups */
++ if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)))
++ ngroups = EXT4_SB(ac->ac_sb)->s_blockfile_groups;
++
++ /* Pairs with smp_wmb() in ext4_update_super() */
++ smp_rmb();
++
++ return ngroups;
++}
++
+ static int ext4_mb_scan_groups_xa_range(struct ext4_allocation_context *ac,
+ struct xarray *xa,
+ ext4_group_t start, ext4_group_t end)
+@@ -899,7 +914,7 @@ static int ext4_mb_scan_groups_xa_range(struct ext4_allocation_context *ac,
+ struct super_block *sb = ac->ac_sb;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ enum criteria cr = ac->ac_criteria;
+- ext4_group_t ngroups = ext4_get_groups_count(sb);
++ ext4_group_t ngroups = ext4_get_allocation_groups_count(ac);
+ unsigned long group = start;
+ struct ext4_group_info *grp;
+
+@@ -951,7 +966,7 @@ static int ext4_mb_scan_groups_p2_aligned(struct ext4_allocation_context *ac,
+ ext4_group_t start, end;
+
+ start = group;
+- end = ext4_get_groups_count(ac->ac_sb);
++ end = ext4_get_allocation_groups_count(ac);
+ wrap_around:
+ for (i = ac->ac_2order; i < MB_NUM_ORDERS(ac->ac_sb); i++) {
+ ret = ext4_mb_scan_groups_largest_free_order_range(ac, i,
+@@ -1001,7 +1016,7 @@ static int ext4_mb_scan_groups_goal_fast(struct ext4_allocation_context *ac,
+ ext4_group_t start, end;
+
+ start = group;
+- end = ext4_get_groups_count(ac->ac_sb);
++ end = ext4_get_allocation_groups_count(ac);
+ wrap_around:
+ i = mb_avg_fragment_size_order(ac->ac_sb, ac->ac_g_ex.fe_len);
+ for (; i < MB_NUM_ORDERS(ac->ac_sb); i++) {
+@@ -1083,7 +1098,7 @@ static int ext4_mb_scan_groups_best_avail(struct ext4_allocation_context *ac,
+ min_order = fls(ac->ac_o_ex.fe_len);
+
+ start = group;
+- end = ext4_get_groups_count(ac->ac_sb);
++ end = ext4_get_allocation_groups_count(ac);
+ wrap_around:
+ for (i = order; i >= min_order; i--) {
+ int frag_order;
+@@ -1180,11 +1195,7 @@ static int ext4_mb_scan_groups(struct ext4_allocation_context *ac)
+ int ret = 0;
+ ext4_group_t start;
+ struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
+- ext4_group_t ngroups = ext4_get_groups_count(ac->ac_sb);
+-
+- /* non-extent files are limited to low blocks/groups */
+- if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)))
+- ngroups = sbi->s_blockfile_groups;
++ ext4_group_t ngroups = ext4_get_allocation_groups_count(ac);
+
+ /* searching for the right group start from the goal value specified */
+ start = ac->ac_g_ex.fe_group;
+--
+2.51.0
+
--- /dev/null
+From 1ef134c5a63e806d4a9b09f8fce9c128a0e67d59 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 14 Jul 2025 21:03:25 +0800
+Subject: ext4: convert free groups order lists to xarrays
+
+From: Baokun Li <libaokun1@huawei.com>
+
+[ Upstream commit f7eaacbb4e54f8a6c6674c16eff54f703ea63d5e ]
+
+While traversing the list, holding a spin_lock prevents load_buddy, making
+direct use of ext4_try_lock_group impossible. This can lead to a bouncing
+scenario where spin_is_locked(grp_A) succeeds, but ext4_try_lock_group()
+fails, forcing the list traversal to repeatedly restart from grp_A.
+
+In contrast, linear traversal directly uses ext4_try_lock_group(),
+avoiding this bouncing. Therefore, we need a lockless, ordered traversal
+to achieve linear-like efficiency.
+
+Therefore, this commit converts both average fragment size lists and
+largest free order lists into ordered xarrays.
+
+In an xarray, the index represents the block group number and the value
+holds the block group information; a non-empty value indicates the block
+group's presence.
+
+While insertion and deletion complexity remain O(1), lookup complexity
+changes from O(1) to O(nlogn), which may slightly reduce single-threaded
+performance.
+
+Additionally, xarray insertions might fail, potentially due to memory
+allocation issues. However, since we have linear traversal as a fallback,
+this isn't a major problem. Therefore, we've only added a warning message
+for insertion failures here.
+
+A helper function ext4_mb_find_good_group_xarray() is added to find good
+groups in the specified xarray starting at the specified position start,
+and when it reaches ngroups-1, it wraps around to 0 and then to start-1.
+This ensures an ordered traversal within the xarray.
+
+Performance test results are as follows: Single-process operations
+on an empty disk show negligible impact, while multi-process workloads
+demonstrate a noticeable performance gain.
+
+|CPU: Kunpeng 920 | P80 | P1 |
+|Memory: 512GB |------------------------|-------------------------|
+|960GB SSD (0.5GB/s)| base | patched | base | patched |
+|-------------------|-------|----------------|--------|----------------|
+|mb_optimize_scan=0 | 20097 | 19555 (-2.6%) | 316141 | 315636 (-0.2%) |
+|mb_optimize_scan=1 | 13318 | 15496 (+16.3%) | 325273 | 323569 (-0.5%) |
+
+|CPU: AMD 9654 * 2 | P96 | P1 |
+|Memory: 1536GB |------------------------|-------------------------|
+|960GB SSD (1GB/s) | base | patched | base | patched |
+|-------------------|-------|----------------|--------|----------------|
+|mb_optimize_scan=0 | 53603 | 53192 (-0.7%) | 214243 | 212678 (-0.7%) |
+|mb_optimize_scan=1 | 20887 | 37636 (+80.1%) | 213632 | 214189 (+0.2%) |
+
+[ Applied spelling fixes per discussion on the ext4-list see thread
+ referened in the Link tag. --tytso]
+
+Signed-off-by: Baokun Li <libaokun1@huawei.com>
+Reviewed-by: Zhang Yi <yi.zhang@huawei.com>
+Link: https://patch.msgid.link/20250714130327.1830534-16-libaokun1@huawei.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: 4865c768b563 ("ext4: always allocate blocks only from groups inode can use")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/ext4.h | 8 +-
+ fs/ext4/mballoc-test.c | 4 -
+ fs/ext4/mballoc.c | 254 ++++++++++++++++++++++-------------------
+ 3 files changed, 140 insertions(+), 126 deletions(-)
+
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 822b18996a434..7cfe38fdb9950 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -1588,10 +1588,8 @@ struct ext4_sb_info {
+ struct list_head s_discard_list;
+ struct work_struct s_discard_work;
+ atomic_t s_retry_alloc_pending;
+- struct list_head *s_mb_avg_fragment_size;
+- rwlock_t *s_mb_avg_fragment_size_locks;
+- struct list_head *s_mb_largest_free_orders;
+- rwlock_t *s_mb_largest_free_orders_locks;
++ struct xarray *s_mb_avg_fragment_size;
++ struct xarray *s_mb_largest_free_orders;
+
+ /* tunables */
+ unsigned long s_stripe;
+@@ -3455,8 +3453,6 @@ struct ext4_group_info {
+ void *bb_bitmap;
+ #endif
+ struct rw_semaphore alloc_sem;
+- struct list_head bb_avg_fragment_size_node;
+- struct list_head bb_largest_free_order_node;
+ ext4_grpblk_t bb_counters[]; /* Nr of free power-of-two-block
+ * regions, index is order.
+ * bb_counters[3] = 5 means
+diff --git a/fs/ext4/mballoc-test.c b/fs/ext4/mballoc-test.c
+index 8eacba6e780ad..0f81094fc0db1 100644
+--- a/fs/ext4/mballoc-test.c
++++ b/fs/ext4/mballoc-test.c
+@@ -804,8 +804,6 @@ static void test_mb_mark_used(struct kunit *test)
+ grp->bb_free = EXT4_CLUSTERS_PER_GROUP(sb);
+ grp->bb_largest_free_order = -1;
+ grp->bb_avg_fragment_size_order = -1;
+- INIT_LIST_HEAD(&grp->bb_largest_free_order_node);
+- INIT_LIST_HEAD(&grp->bb_avg_fragment_size_node);
+ mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
+ for (i = 0; i < TEST_RANGE_COUNT; i++)
+ test_mb_mark_used_range(test, &e4b, ranges[i].start,
+@@ -880,8 +878,6 @@ static void test_mb_free_blocks(struct kunit *test)
+ grp->bb_free = 0;
+ grp->bb_largest_free_order = -1;
+ grp->bb_avg_fragment_size_order = -1;
+- INIT_LIST_HEAD(&grp->bb_largest_free_order_node);
+- INIT_LIST_HEAD(&grp->bb_avg_fragment_size_node);
+ memset(bitmap, 0xff, sb->s_blocksize);
+
+ mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 03c0886da0571..719a8cb53ae4c 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -132,25 +132,30 @@
+ * If "mb_optimize_scan" mount option is set, we maintain in memory group info
+ * structures in two data structures:
+ *
+- * 1) Array of largest free order lists (sbi->s_mb_largest_free_orders)
++ * 1) Array of largest free order xarrays (sbi->s_mb_largest_free_orders)
+ *
+- * Locking: sbi->s_mb_largest_free_orders_locks(array of rw locks)
++ * Locking: Writers use xa_lock, readers use rcu_read_lock.
+ *
+- * This is an array of lists where the index in the array represents the
++ * This is an array of xarrays where the index in the array represents the
+ * largest free order in the buddy bitmap of the participating group infos of
+- * that list. So, there are exactly MB_NUM_ORDERS(sb) (which means total
+- * number of buddy bitmap orders possible) number of lists. Group-infos are
+- * placed in appropriate lists.
++ * that xarray. So, there are exactly MB_NUM_ORDERS(sb) (which means total
++ * number of buddy bitmap orders possible) number of xarrays. Group-infos are
++ * placed in appropriate xarrays.
+ *
+- * 2) Average fragment size lists (sbi->s_mb_avg_fragment_size)
++ * 2) Average fragment size xarrays (sbi->s_mb_avg_fragment_size)
+ *
+- * Locking: sbi->s_mb_avg_fragment_size_locks(array of rw locks)
++ * Locking: Writers use xa_lock, readers use rcu_read_lock.
+ *
+- * This is an array of lists where in the i-th list there are groups with
++ * This is an array of xarrays where in the i-th xarray there are groups with
+ * average fragment size >= 2^i and < 2^(i+1). The average fragment size
+ * is computed as ext4_group_info->bb_free / ext4_group_info->bb_fragments.
+- * Note that we don't bother with a special list for completely empty groups
+- * so we only have MB_NUM_ORDERS(sb) lists.
++ * Note that we don't bother with a special xarray for completely empty
++ * groups so we only have MB_NUM_ORDERS(sb) xarrays. Group-infos are placed
++ * in appropriate xarrays.
++ *
++ * In xarray, the index is the block group number, the value is the block group
++ * information, and a non-empty value indicates the block group is present in
++ * the current xarray.
+ *
+ * When "mb_optimize_scan" mount option is set, mballoc consults the above data
+ * structures to decide the order in which groups are to be traversed for
+@@ -869,19 +874,73 @@ mb_update_avg_fragment_size(struct super_block *sb, struct ext4_group_info *grp)
+ if (new == old)
+ return;
+
+- if (old >= 0) {
+- write_lock(&sbi->s_mb_avg_fragment_size_locks[old]);
+- list_del(&grp->bb_avg_fragment_size_node);
+- write_unlock(&sbi->s_mb_avg_fragment_size_locks[old]);
+- }
++ if (old >= 0)
++ xa_erase(&sbi->s_mb_avg_fragment_size[old], grp->bb_group);
+
+ grp->bb_avg_fragment_size_order = new;
+ if (new >= 0) {
+- write_lock(&sbi->s_mb_avg_fragment_size_locks[new]);
+- list_add_tail(&grp->bb_avg_fragment_size_node,
+- &sbi->s_mb_avg_fragment_size[new]);
+- write_unlock(&sbi->s_mb_avg_fragment_size_locks[new]);
++ /*
++ * Cannot use __GFP_NOFAIL because we hold the group lock.
++ * Although allocation for insertion may fails, it's not fatal
++ * as we have linear traversal to fall back on.
++ */
++ int err = xa_insert(&sbi->s_mb_avg_fragment_size[new],
++ grp->bb_group, grp, GFP_ATOMIC);
++ if (err)
++ mb_debug(sb, "insert group: %u to s_mb_avg_fragment_size[%d] failed, err %d",
++ grp->bb_group, new, err);
++ }
++}
++
++static struct ext4_group_info *
++ext4_mb_find_good_group_xarray(struct ext4_allocation_context *ac,
++ struct xarray *xa, ext4_group_t start)
++{
++ struct super_block *sb = ac->ac_sb;
++ struct ext4_sb_info *sbi = EXT4_SB(sb);
++ enum criteria cr = ac->ac_criteria;
++ ext4_group_t ngroups = ext4_get_groups_count(sb);
++ unsigned long group = start;
++ ext4_group_t end = ngroups;
++ struct ext4_group_info *grp;
++
++ if (WARN_ON_ONCE(start >= end))
++ return NULL;
++
++wrap_around:
++ xa_for_each_range(xa, group, grp, start, end - 1) {
++ if (sbi->s_mb_stats)
++ atomic64_inc(&sbi->s_bal_cX_groups_considered[cr]);
++
++ if (!spin_is_locked(ext4_group_lock_ptr(sb, group)) &&
++ likely(ext4_mb_good_group(ac, group, cr)))
++ return grp;
++
++ cond_resched();
+ }
++
++ if (start) {
++ end = start;
++ start = 0;
++ goto wrap_around;
++ }
++
++ return NULL;
++}
++
++/*
++ * Find a suitable group of given order from the largest free orders xarray.
++ */
++static struct ext4_group_info *
++ext4_mb_find_good_group_largest_free_order(struct ext4_allocation_context *ac,
++ int order, ext4_group_t start)
++{
++ struct xarray *xa = &EXT4_SB(ac->ac_sb)->s_mb_largest_free_orders[order];
++
++ if (xa_empty(xa))
++ return NULL;
++
++ return ext4_mb_find_good_group_xarray(ac, xa, start);
+ }
+
+ /*
+@@ -892,7 +951,7 @@ static void ext4_mb_choose_next_group_p2_aligned(struct ext4_allocation_context
+ enum criteria *new_cr, ext4_group_t *group)
+ {
+ struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
+- struct ext4_group_info *iter;
++ struct ext4_group_info *grp;
+ int i;
+
+ if (ac->ac_status == AC_STATUS_FOUND)
+@@ -902,26 +961,12 @@ static void ext4_mb_choose_next_group_p2_aligned(struct ext4_allocation_context
+ atomic_inc(&sbi->s_bal_p2_aligned_bad_suggestions);
+
+ for (i = ac->ac_2order; i < MB_NUM_ORDERS(ac->ac_sb); i++) {
+- if (list_empty(&sbi->s_mb_largest_free_orders[i]))
+- continue;
+- read_lock(&sbi->s_mb_largest_free_orders_locks[i]);
+- if (list_empty(&sbi->s_mb_largest_free_orders[i])) {
+- read_unlock(&sbi->s_mb_largest_free_orders_locks[i]);
+- continue;
+- }
+- list_for_each_entry(iter, &sbi->s_mb_largest_free_orders[i],
+- bb_largest_free_order_node) {
+- if (sbi->s_mb_stats)
+- atomic64_inc(&sbi->s_bal_cX_groups_considered[CR_POWER2_ALIGNED]);
+- if (!spin_is_locked(ext4_group_lock_ptr(ac->ac_sb, iter->bb_group)) &&
+- likely(ext4_mb_good_group(ac, iter->bb_group, CR_POWER2_ALIGNED))) {
+- *group = iter->bb_group;
+- ac->ac_flags |= EXT4_MB_CR_POWER2_ALIGNED_OPTIMIZED;
+- read_unlock(&sbi->s_mb_largest_free_orders_locks[i]);
+- return;
+- }
++ grp = ext4_mb_find_good_group_largest_free_order(ac, i, *group);
++ if (grp) {
++ *group = grp->bb_group;
++ ac->ac_flags |= EXT4_MB_CR_POWER2_ALIGNED_OPTIMIZED;
++ return;
+ }
+- read_unlock(&sbi->s_mb_largest_free_orders_locks[i]);
+ }
+
+ /* Increment cr and search again if no group is found */
+@@ -929,35 +974,18 @@ static void ext4_mb_choose_next_group_p2_aligned(struct ext4_allocation_context
+ }
+
+ /*
+- * Find a suitable group of given order from the average fragments list.
++ * Find a suitable group of given order from the average fragments xarray.
+ */
+ static struct ext4_group_info *
+-ext4_mb_find_good_group_avg_frag_lists(struct ext4_allocation_context *ac, int order)
++ext4_mb_find_good_group_avg_frag_xarray(struct ext4_allocation_context *ac,
++ int order, ext4_group_t start)
+ {
+- struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
+- struct list_head *frag_list = &sbi->s_mb_avg_fragment_size[order];
+- rwlock_t *frag_list_lock = &sbi->s_mb_avg_fragment_size_locks[order];
+- struct ext4_group_info *grp = NULL, *iter;
+- enum criteria cr = ac->ac_criteria;
++ struct xarray *xa = &EXT4_SB(ac->ac_sb)->s_mb_avg_fragment_size[order];
+
+- if (list_empty(frag_list))
+- return NULL;
+- read_lock(frag_list_lock);
+- if (list_empty(frag_list)) {
+- read_unlock(frag_list_lock);
++ if (xa_empty(xa))
+ return NULL;
+- }
+- list_for_each_entry(iter, frag_list, bb_avg_fragment_size_node) {
+- if (sbi->s_mb_stats)
+- atomic64_inc(&sbi->s_bal_cX_groups_considered[cr]);
+- if (!spin_is_locked(ext4_group_lock_ptr(ac->ac_sb, iter->bb_group)) &&
+- likely(ext4_mb_good_group(ac, iter->bb_group, cr))) {
+- grp = iter;
+- break;
+- }
+- }
+- read_unlock(frag_list_lock);
+- return grp;
++
++ return ext4_mb_find_good_group_xarray(ac, xa, start);
+ }
+
+ /*
+@@ -978,7 +1006,7 @@ static void ext4_mb_choose_next_group_goal_fast(struct ext4_allocation_context *
+
+ for (i = mb_avg_fragment_size_order(ac->ac_sb, ac->ac_g_ex.fe_len);
+ i < MB_NUM_ORDERS(ac->ac_sb); i++) {
+- grp = ext4_mb_find_good_group_avg_frag_lists(ac, i);
++ grp = ext4_mb_find_good_group_avg_frag_xarray(ac, i, *group);
+ if (grp) {
+ *group = grp->bb_group;
+ ac->ac_flags |= EXT4_MB_CR_GOAL_LEN_FAST_OPTIMIZED;
+@@ -1074,7 +1102,8 @@ static void ext4_mb_choose_next_group_best_avail(struct ext4_allocation_context
+ frag_order = mb_avg_fragment_size_order(ac->ac_sb,
+ ac->ac_g_ex.fe_len);
+
+- grp = ext4_mb_find_good_group_avg_frag_lists(ac, frag_order);
++ grp = ext4_mb_find_good_group_avg_frag_xarray(ac, frag_order,
++ *group);
+ if (grp) {
+ *group = grp->bb_group;
+ ac->ac_flags |= EXT4_MB_CR_BEST_AVAIL_LEN_OPTIMIZED;
+@@ -1177,18 +1206,25 @@ mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp)
+ if (new == old)
+ return;
+
+- if (old >= 0 && !list_empty(&grp->bb_largest_free_order_node)) {
+- write_lock(&sbi->s_mb_largest_free_orders_locks[old]);
+- list_del_init(&grp->bb_largest_free_order_node);
+- write_unlock(&sbi->s_mb_largest_free_orders_locks[old]);
++ if (old >= 0) {
++ struct xarray *xa = &sbi->s_mb_largest_free_orders[old];
++
++ if (!xa_empty(xa) && xa_load(xa, grp->bb_group))
++ xa_erase(xa, grp->bb_group);
+ }
+
+ grp->bb_largest_free_order = new;
+ if (test_opt2(sb, MB_OPTIMIZE_SCAN) && new >= 0 && grp->bb_free) {
+- write_lock(&sbi->s_mb_largest_free_orders_locks[new]);
+- list_add_tail(&grp->bb_largest_free_order_node,
+- &sbi->s_mb_largest_free_orders[new]);
+- write_unlock(&sbi->s_mb_largest_free_orders_locks[new]);
++ /*
++ * Cannot use __GFP_NOFAIL because we hold the group lock.
++ * Although allocation for insertion may fails, it's not fatal
++ * as we have linear traversal to fall back on.
++ */
++ int err = xa_insert(&sbi->s_mb_largest_free_orders[new],
++ grp->bb_group, grp, GFP_ATOMIC);
++ if (err)
++ mb_debug(sb, "insert group: %u to s_mb_largest_free_orders[%d] failed, err %d",
++ grp->bb_group, new, err);
+ }
+ }
+
+@@ -3281,6 +3317,7 @@ static int ext4_mb_seq_structs_summary_show(struct seq_file *seq, void *v)
+ unsigned long position = ((unsigned long) v);
+ struct ext4_group_info *grp;
+ unsigned int count;
++ unsigned long idx;
+
+ position--;
+ if (position >= MB_NUM_ORDERS(sb)) {
+@@ -3289,11 +3326,8 @@ static int ext4_mb_seq_structs_summary_show(struct seq_file *seq, void *v)
+ seq_puts(seq, "avg_fragment_size_lists:\n");
+
+ count = 0;
+- read_lock(&sbi->s_mb_avg_fragment_size_locks[position]);
+- list_for_each_entry(grp, &sbi->s_mb_avg_fragment_size[position],
+- bb_avg_fragment_size_node)
++ xa_for_each(&sbi->s_mb_avg_fragment_size[position], idx, grp)
+ count++;
+- read_unlock(&sbi->s_mb_avg_fragment_size_locks[position]);
+ seq_printf(seq, "\tlist_order_%u_groups: %u\n",
+ (unsigned int)position, count);
+ return 0;
+@@ -3305,11 +3339,8 @@ static int ext4_mb_seq_structs_summary_show(struct seq_file *seq, void *v)
+ seq_puts(seq, "max_free_order_lists:\n");
+ }
+ count = 0;
+- read_lock(&sbi->s_mb_largest_free_orders_locks[position]);
+- list_for_each_entry(grp, &sbi->s_mb_largest_free_orders[position],
+- bb_largest_free_order_node)
++ xa_for_each(&sbi->s_mb_largest_free_orders[position], idx, grp)
+ count++;
+- read_unlock(&sbi->s_mb_largest_free_orders_locks[position]);
+ seq_printf(seq, "\tlist_order_%u_groups: %u\n",
+ (unsigned int)position, count);
+
+@@ -3429,8 +3460,6 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
+ INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list);
+ init_rwsem(&meta_group_info[i]->alloc_sem);
+ meta_group_info[i]->bb_free_root = RB_ROOT;
+- INIT_LIST_HEAD(&meta_group_info[i]->bb_largest_free_order_node);
+- INIT_LIST_HEAD(&meta_group_info[i]->bb_avg_fragment_size_node);
+ meta_group_info[i]->bb_largest_free_order = -1; /* uninit */
+ meta_group_info[i]->bb_avg_fragment_size_order = -1; /* uninit */
+ meta_group_info[i]->bb_group = group;
+@@ -3640,6 +3669,20 @@ static void ext4_discard_work(struct work_struct *work)
+ ext4_mb_unload_buddy(&e4b);
+ }
+
++static inline void ext4_mb_avg_fragment_size_destroy(struct ext4_sb_info *sbi)
++{
++ for (int i = 0; i < MB_NUM_ORDERS(sbi->s_sb); i++)
++ xa_destroy(&sbi->s_mb_avg_fragment_size[i]);
++ kfree(sbi->s_mb_avg_fragment_size);
++}
++
++static inline void ext4_mb_largest_free_orders_destroy(struct ext4_sb_info *sbi)
++{
++ for (int i = 0; i < MB_NUM_ORDERS(sbi->s_sb); i++)
++ xa_destroy(&sbi->s_mb_largest_free_orders[i]);
++ kfree(sbi->s_mb_largest_free_orders);
++}
++
+ int ext4_mb_init(struct super_block *sb)
+ {
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+@@ -3685,41 +3728,24 @@ int ext4_mb_init(struct super_block *sb)
+ } while (i < MB_NUM_ORDERS(sb));
+
+ sbi->s_mb_avg_fragment_size =
+- kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct list_head),
++ kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct xarray),
+ GFP_KERNEL);
+ if (!sbi->s_mb_avg_fragment_size) {
+ ret = -ENOMEM;
+ goto out;
+ }
+- sbi->s_mb_avg_fragment_size_locks =
+- kmalloc_array(MB_NUM_ORDERS(sb), sizeof(rwlock_t),
+- GFP_KERNEL);
+- if (!sbi->s_mb_avg_fragment_size_locks) {
+- ret = -ENOMEM;
+- goto out;
+- }
+- for (i = 0; i < MB_NUM_ORDERS(sb); i++) {
+- INIT_LIST_HEAD(&sbi->s_mb_avg_fragment_size[i]);
+- rwlock_init(&sbi->s_mb_avg_fragment_size_locks[i]);
+- }
++ for (i = 0; i < MB_NUM_ORDERS(sb); i++)
++ xa_init(&sbi->s_mb_avg_fragment_size[i]);
++
+ sbi->s_mb_largest_free_orders =
+- kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct list_head),
++ kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct xarray),
+ GFP_KERNEL);
+ if (!sbi->s_mb_largest_free_orders) {
+ ret = -ENOMEM;
+ goto out;
+ }
+- sbi->s_mb_largest_free_orders_locks =
+- kmalloc_array(MB_NUM_ORDERS(sb), sizeof(rwlock_t),
+- GFP_KERNEL);
+- if (!sbi->s_mb_largest_free_orders_locks) {
+- ret = -ENOMEM;
+- goto out;
+- }
+- for (i = 0; i < MB_NUM_ORDERS(sb); i++) {
+- INIT_LIST_HEAD(&sbi->s_mb_largest_free_orders[i]);
+- rwlock_init(&sbi->s_mb_largest_free_orders_locks[i]);
+- }
++ for (i = 0; i < MB_NUM_ORDERS(sb); i++)
++ xa_init(&sbi->s_mb_largest_free_orders[i]);
+
+ spin_lock_init(&sbi->s_md_lock);
+ sbi->s_mb_free_pending = 0;
+@@ -3792,10 +3818,8 @@ int ext4_mb_init(struct super_block *sb)
+ free_percpu(sbi->s_locality_groups);
+ sbi->s_locality_groups = NULL;
+ out:
+- kfree(sbi->s_mb_avg_fragment_size);
+- kfree(sbi->s_mb_avg_fragment_size_locks);
+- kfree(sbi->s_mb_largest_free_orders);
+- kfree(sbi->s_mb_largest_free_orders_locks);
++ ext4_mb_avg_fragment_size_destroy(sbi);
++ ext4_mb_largest_free_orders_destroy(sbi);
+ kfree(sbi->s_mb_offsets);
+ sbi->s_mb_offsets = NULL;
+ kfree(sbi->s_mb_maxs);
+@@ -3862,10 +3886,8 @@ void ext4_mb_release(struct super_block *sb)
+ kvfree(group_info);
+ rcu_read_unlock();
+ }
+- kfree(sbi->s_mb_avg_fragment_size);
+- kfree(sbi->s_mb_avg_fragment_size_locks);
+- kfree(sbi->s_mb_largest_free_orders);
+- kfree(sbi->s_mb_largest_free_orders_locks);
++ ext4_mb_avg_fragment_size_destroy(sbi);
++ ext4_mb_largest_free_orders_destroy(sbi);
+ kfree(sbi->s_mb_offsets);
+ kfree(sbi->s_mb_maxs);
+ iput(sbi->s_buddy_cache);
+--
+2.51.0
+
--- /dev/null
+From 0d907112fb6aaaa6024296f95072a1af1e0d53fa Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 12 Nov 2025 16:45:38 +0800
+Subject: ext4: correct the comments place for EXT4_EXT_MAY_ZEROOUT
+
+From: Yang Erkun <yangerkun@huawei.com>
+
+[ Upstream commit cc742fd1d184bb2a11bacf50587d2c85290622e4 ]
+
+Move the comments just before we set EXT4_EXT_MAY_ZEROOUT in
+ext4_split_convert_extents.
+
+Signed-off-by: Yang Erkun <yangerkun@huawei.com>
+Message-ID: <20251112084538.1658232-4-yangerkun@huawei.com>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: feaf2a80e78f ("ext4: don't set EXT4_GET_BLOCKS_CONVERT when splitting before submitting I/O")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/extents.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 05d4a63300867..7301cf1726903 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -3754,10 +3754,6 @@ static struct ext4_ext_path *ext4_split_convert_extents(handle_t *handle,
+ >> inode->i_sb->s_blocksize_bits;
+ if (eof_block < map->m_lblk + map->m_len)
+ eof_block = map->m_lblk + map->m_len;
+- /*
+- * It is safe to convert extent to initialized via explicit
+- * zeroout only if extent is fully inside i_size or new_size.
+- */
+ depth = ext_depth(inode);
+ ex = path[depth].p_ext;
+ ee_block = le32_to_cpu(ex->ee_block);
+@@ -3768,6 +3764,10 @@ static struct ext4_ext_path *ext4_split_convert_extents(handle_t *handle,
+ split_flag |= EXT4_EXT_DATA_ENTIRE_VALID1;
+ /* Convert to initialized */
+ } else if (flags & EXT4_GET_BLOCKS_CONVERT) {
++ /*
++ * It is safe to convert extent to initialized via explicit
++ * zeroout only if extent is fully inside i_size or new_size.
++ */
+ split_flag |= ee_block + ee_len <= eof_block ?
+ EXT4_EXT_MAY_ZEROOUT : 0;
+ split_flag |= (EXT4_EXT_MARK_UNWRIT2 | EXT4_EXT_DATA_VALID2);
+--
+2.51.0
+
--- /dev/null
+From 19ee8831ebccca9e28372fa94a899dd995015144 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 29 Nov 2025 18:32:35 +0800
+Subject: ext4: don't set EXT4_GET_BLOCKS_CONVERT when splitting before
+ submitting I/O
+
+From: Zhang Yi <yi.zhang@huawei.com>
+
+[ Upstream commit feaf2a80e78f89ee8a3464126077ba8683b62791 ]
+
+When allocating blocks during within-EOF DIO and writeback with
+dioread_nolock enabled, EXT4_GET_BLOCKS_PRE_IO was set to split an
+existing large unwritten extent. However, EXT4_GET_BLOCKS_CONVERT was
+set when calling ext4_split_convert_extents(), which may potentially
+result in stale data issues.
+
+Assume we have an unwritten extent, and then DIO writes the second half.
+
+ [UUUUUUUUUUUUUUUU] on-disk extent U: unwritten extent
+ [UUUUUUUUUUUUUUUU] extent status tree
+ |<- ->| ----> dio write this range
+
+First, ext4_iomap_alloc() call ext4_map_blocks() with
+EXT4_GET_BLOCKS_PRE_IO, EXT4_GET_BLOCKS_UNWRIT_EXT and
+EXT4_GET_BLOCKS_CREATE flags set. ext4_map_blocks() find this extent and
+call ext4_split_convert_extents() with EXT4_GET_BLOCKS_CONVERT and the
+above flags set.
+
+Then, ext4_split_convert_extents() calls ext4_split_extent() with
+EXT4_EXT_MAY_ZEROOUT, EXT4_EXT_MARK_UNWRIT2 and EXT4_EXT_DATA_VALID2
+flags set, and it calls ext4_split_extent_at() to split the second half
+with EXT4_EXT_DATA_VALID2, EXT4_EXT_MARK_UNWRIT1, EXT4_EXT_MAY_ZEROOUT
+and EXT4_EXT_MARK_UNWRIT2 flags set. However, ext4_split_extent_at()
+failed to insert extent since a temporary lack -ENOSPC. It zeroes out
+the first half but convert the entire on-disk extent to written since
+the EXT4_EXT_DATA_VALID2 flag set, but left the second half as unwritten
+in the extent status tree.
+
+ [0000000000SSSSSS] data S: stale data, 0: zeroed
+ [WWWWWWWWWWWWWWWW] on-disk extent W: written extent
+ [WWWWWWWWWWUUUUUU] extent status tree
+
+Finally, if the DIO failed to write data to the disk, the stale data in
+the second half will be exposed once the cached extent entry is gone.
+
+Fix this issue by not passing EXT4_GET_BLOCKS_CONVERT when splitting
+an unwritten extent before submitting I/O, and make
+ext4_split_convert_extents() to zero out the entire extent range
+to zero for this case, and also mark the extent in the extent status
+tree for consistency.
+
+Fixes: b8a8684502a0 ("ext4: Introduce FALLOC_FL_ZERO_RANGE flag for fallocate")
+Signed-off-by: Zhang Yi <yi.zhang@huawei.com>
+Reviewed-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Reviewed-by: Baokun Li <libaokun1@huawei.com>
+Cc: stable@kernel.org
+Message-ID: <20251129103247.686136-4-yi.zhang@huaweicloud.com>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/extents.c | 12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 7301cf1726903..bd556a3eac198 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -3762,15 +3762,19 @@ static struct ext4_ext_path *ext4_split_convert_extents(handle_t *handle,
+ /* Convert to unwritten */
+ if (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN) {
+ split_flag |= EXT4_EXT_DATA_ENTIRE_VALID1;
+- /* Convert to initialized */
+- } else if (flags & EXT4_GET_BLOCKS_CONVERT) {
++ /* Split the existing unwritten extent */
++ } else if (flags & (EXT4_GET_BLOCKS_UNWRIT_EXT |
++ EXT4_GET_BLOCKS_CONVERT)) {
+ /*
+ * It is safe to convert extent to initialized via explicit
+ * zeroout only if extent is fully inside i_size or new_size.
+ */
+ split_flag |= ee_block + ee_len <= eof_block ?
+ EXT4_EXT_MAY_ZEROOUT : 0;
+- split_flag |= (EXT4_EXT_MARK_UNWRIT2 | EXT4_EXT_DATA_VALID2);
++ split_flag |= EXT4_EXT_MARK_UNWRIT2;
++ /* Convert to initialized */
++ if (flags & EXT4_GET_BLOCKS_CONVERT)
++ split_flag |= EXT4_EXT_DATA_VALID2;
+ }
+ flags |= EXT4_GET_BLOCKS_PRE_IO;
+ return ext4_split_extent(handle, inode, path, map, split_flag, flags,
+@@ -3949,7 +3953,7 @@ ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode,
+ /* get_block() before submitting IO, split the extent */
+ if (flags & EXT4_GET_BLOCKS_PRE_IO) {
+ path = ext4_split_convert_extents(handle, inode, map, path,
+- flags | EXT4_GET_BLOCKS_CONVERT, allocated);
++ flags, allocated);
+ if (IS_ERR(path))
+ return path;
+ /*
+--
+2.51.0
+
--- /dev/null
+From 65659c0df7acf8195927e76daf3f6cfe8dc73ed9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 14 Jul 2025 21:03:22 +0800
+Subject: ext4: factor out __ext4_mb_scan_group()
+
+From: Baokun Li <libaokun1@huawei.com>
+
+[ Upstream commit 45704f92e55853fe287760e019feb45eeb9c988e ]
+
+Extract __ext4_mb_scan_group() to make the code clearer and to
+prepare for the later conversion of 'choose group' to 'scan groups'.
+No functional changes.
+
+Signed-off-by: Baokun Li <libaokun1@huawei.com>
+Reviewed-by: Zhang Yi <yi.zhang@huawei.com>
+Link: https://patch.msgid.link/20250714130327.1830534-13-libaokun1@huawei.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: 4865c768b563 ("ext4: always allocate blocks only from groups inode can use")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/mballoc.c | 45 +++++++++++++++++++++++++++------------------
+ fs/ext4/mballoc.h | 2 ++
+ 2 files changed, 29 insertions(+), 18 deletions(-)
+
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 329fe83cbe814..a32d84e3031da 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -2584,6 +2584,30 @@ void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
+ }
+ }
+
++static void __ext4_mb_scan_group(struct ext4_allocation_context *ac)
++{
++ bool is_stripe_aligned;
++ struct ext4_sb_info *sbi;
++ enum criteria cr = ac->ac_criteria;
++
++ ac->ac_groups_scanned++;
++ if (cr == CR_POWER2_ALIGNED)
++ return ext4_mb_simple_scan_group(ac, ac->ac_e4b);
++
++ sbi = EXT4_SB(ac->ac_sb);
++ is_stripe_aligned = false;
++ if ((sbi->s_stripe >= sbi->s_cluster_ratio) &&
++ !(ac->ac_g_ex.fe_len % EXT4_NUM_B2C(sbi, sbi->s_stripe)))
++ is_stripe_aligned = true;
++
++ if ((cr == CR_GOAL_LEN_FAST || cr == CR_BEST_AVAIL_LEN) &&
++ is_stripe_aligned)
++ ext4_mb_scan_aligned(ac, ac->ac_e4b);
++
++ if (ac->ac_status == AC_STATUS_CONTINUE)
++ ext4_mb_complex_scan_group(ac, ac->ac_e4b);
++}
++
+ /*
+ * This is also called BEFORE we load the buddy bitmap.
+ * Returns either 1 or 0 indicating that the group is either suitable
+@@ -2871,6 +2895,8 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
+ */
+ if (ac->ac_2order)
+ cr = CR_POWER2_ALIGNED;
++
++ ac->ac_e4b = &e4b;
+ repeat:
+ for (; cr < EXT4_MB_NUM_CRS && ac->ac_status == AC_STATUS_CONTINUE; cr++) {
+ ac->ac_criteria = cr;
+@@ -2948,24 +2974,7 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
+ continue;
+ }
+
+- ac->ac_groups_scanned++;
+- if (cr == CR_POWER2_ALIGNED)
+- ext4_mb_simple_scan_group(ac, &e4b);
+- else {
+- bool is_stripe_aligned =
+- (sbi->s_stripe >=
+- sbi->s_cluster_ratio) &&
+- !(ac->ac_g_ex.fe_len %
+- EXT4_NUM_B2C(sbi, sbi->s_stripe));
+-
+- if ((cr == CR_GOAL_LEN_FAST ||
+- cr == CR_BEST_AVAIL_LEN) &&
+- is_stripe_aligned)
+- ext4_mb_scan_aligned(ac, &e4b);
+-
+- if (ac->ac_status == AC_STATUS_CONTINUE)
+- ext4_mb_complex_scan_group(ac, &e4b);
+- }
++ __ext4_mb_scan_group(ac);
+
+ ext4_unlock_group(sb, group);
+ ext4_mb_unload_buddy(&e4b);
+diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h
+index f8280de3e8820..7a60b0103e649 100644
+--- a/fs/ext4/mballoc.h
++++ b/fs/ext4/mballoc.h
+@@ -204,6 +204,8 @@ struct ext4_allocation_context {
+ __u8 ac_2order; /* if request is to allocate 2^N blocks and
+ * N > 0, the field stores N, otherwise 0 */
+ __u8 ac_op; /* operation, for history only */
++
++ struct ext4_buddy *ac_e4b;
+ struct folio *ac_bitmap_folio;
+ struct folio *ac_buddy_folio;
+ struct ext4_prealloc_space *ac_pa;
+--
+2.51.0
+
--- /dev/null
+From db307e2cd6342a4ae3cc24ddd9f6b274ef99a4c5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 14 Jul 2025 21:03:23 +0800
+Subject: ext4: factor out ext4_mb_might_prefetch()
+
+From: Baokun Li <libaokun1@huawei.com>
+
+[ Upstream commit 5abd85f667a19ef7d880ed00c201fc22de6fa707 ]
+
+Extract ext4_mb_might_prefetch() to make the code clearer and to
+prepare for the later conversion of 'choose group' to 'scan groups'.
+No functional changes.
+
+Signed-off-by: Baokun Li <libaokun1@huawei.com>
+Reviewed-by: Zhang Yi <yi.zhang@huawei.com>
+Link: https://patch.msgid.link/20250714130327.1830534-14-libaokun1@huawei.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: 4865c768b563 ("ext4: always allocate blocks only from groups inode can use")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/mballoc.c | 62 +++++++++++++++++++++++++++++------------------
+ fs/ext4/mballoc.h | 4 +++
+ 2 files changed, 42 insertions(+), 24 deletions(-)
+
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index a32d84e3031da..af014b43d0b3f 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -2797,6 +2797,37 @@ ext4_group_t ext4_mb_prefetch(struct super_block *sb, ext4_group_t group,
+ return group;
+ }
+
++/*
++ * Batch reads of the block allocation bitmaps to get
++ * multiple READs in flight; limit prefetching at inexpensive
++ * CR, otherwise mballoc can spend a lot of time loading
++ * imperfect groups
++ */
++static void ext4_mb_might_prefetch(struct ext4_allocation_context *ac,
++ ext4_group_t group)
++{
++ struct ext4_sb_info *sbi;
++
++ if (ac->ac_prefetch_grp != group)
++ return;
++
++ sbi = EXT4_SB(ac->ac_sb);
++ if (ext4_mb_cr_expensive(ac->ac_criteria) ||
++ ac->ac_prefetch_ios < sbi->s_mb_prefetch_limit) {
++ unsigned int nr = sbi->s_mb_prefetch;
++
++ if (ext4_has_feature_flex_bg(ac->ac_sb)) {
++ nr = 1 << sbi->s_log_groups_per_flex;
++ nr -= group & (nr - 1);
++ nr = umin(nr, sbi->s_mb_prefetch);
++ }
++
++ ac->ac_prefetch_nr = nr;
++ ac->ac_prefetch_grp = ext4_mb_prefetch(ac->ac_sb, group, nr,
++ &ac->ac_prefetch_ios);
++ }
++}
++
+ /*
+ * Prefetching reads the block bitmap into the buffer cache; but we
+ * need to make sure that the buddy bitmap in the page cache has been
+@@ -2833,10 +2864,9 @@ void ext4_mb_prefetch_fini(struct super_block *sb, ext4_group_t group,
+ static noinline_for_stack int
+ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
+ {
+- ext4_group_t prefetch_grp = 0, ngroups, group, i;
++ ext4_group_t ngroups, group, i;
+ enum criteria new_cr, cr = CR_GOAL_LEN_FAST;
+ int err = 0, first_err = 0;
+- unsigned int nr = 0, prefetch_ios = 0;
+ struct ext4_sb_info *sbi;
+ struct super_block *sb;
+ struct ext4_buddy e4b;
+@@ -2897,6 +2927,7 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
+ cr = CR_POWER2_ALIGNED;
+
+ ac->ac_e4b = &e4b;
++ ac->ac_prefetch_ios = 0;
+ repeat:
+ for (; cr < EXT4_MB_NUM_CRS && ac->ac_status == AC_STATUS_CONTINUE; cr++) {
+ ac->ac_criteria = cr;
+@@ -2906,8 +2937,8 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
+ */
+ group = ac->ac_g_ex.fe_group;
+ ac->ac_groups_linear_remaining = sbi->s_mb_max_linear_groups;
+- prefetch_grp = group;
+- nr = 0;
++ ac->ac_prefetch_grp = group;
++ ac->ac_prefetch_nr = 0;
+
+ for (i = 0, new_cr = cr; i < ngroups; i++,
+ ext4_mb_choose_next_group(ac, &new_cr, &group, ngroups)) {
+@@ -2919,24 +2950,7 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
+ goto repeat;
+ }
+
+- /*
+- * Batch reads of the block allocation bitmaps
+- * to get multiple READs in flight; limit
+- * prefetching at inexpensive CR, otherwise mballoc
+- * can spend a lot of time loading imperfect groups
+- */
+- if ((prefetch_grp == group) &&
+- (ext4_mb_cr_expensive(cr) ||
+- prefetch_ios < sbi->s_mb_prefetch_limit)) {
+- nr = sbi->s_mb_prefetch;
+- if (ext4_has_feature_flex_bg(sb)) {
+- nr = 1 << sbi->s_log_groups_per_flex;
+- nr -= group & (nr - 1);
+- nr = min(nr, sbi->s_mb_prefetch);
+- }
+- prefetch_grp = ext4_mb_prefetch(sb, group,
+- nr, &prefetch_ios);
+- }
++ ext4_mb_might_prefetch(ac, group);
+
+ /* prevent unnecessary buddy loading. */
+ if (cr < CR_ANY_FREE &&
+@@ -3030,8 +3044,8 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
+ ac->ac_b_ex.fe_len, ac->ac_o_ex.fe_len, ac->ac_status,
+ ac->ac_flags, cr, err);
+
+- if (nr)
+- ext4_mb_prefetch_fini(sb, prefetch_grp, nr);
++ if (ac->ac_prefetch_nr)
++ ext4_mb_prefetch_fini(sb, ac->ac_prefetch_grp, ac->ac_prefetch_nr);
+
+ return err;
+ }
+diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h
+index 7a60b0103e649..9f66b1d5db67a 100644
+--- a/fs/ext4/mballoc.h
++++ b/fs/ext4/mballoc.h
+@@ -192,6 +192,10 @@ struct ext4_allocation_context {
+ */
+ ext4_grpblk_t ac_orig_goal_len;
+
++ ext4_group_t ac_prefetch_grp;
++ unsigned int ac_prefetch_ios;
++ unsigned int ac_prefetch_nr;
++
+ __u32 ac_flags; /* allocation hints */
+ __u32 ac_groups_linear_remaining;
+ __u16 ac_groups_scanned;
+--
+2.51.0
+
--- /dev/null
+From 4ff7112eb570dd37ad224f6141c1bab7dada3c10 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 14 Jul 2025 21:03:24 +0800
+Subject: ext4: factor out ext4_mb_scan_group()
+
+From: Baokun Li <libaokun1@huawei.com>
+
+[ Upstream commit 9c08e42db9056d423dcef5e7998c73182180ff83 ]
+
+Extract ext4_mb_scan_group() to make the code clearer and to
+prepare for the later conversion of 'choose group' to 'scan groups'.
+No functional changes.
+
+Signed-off-by: Baokun Li <libaokun1@huawei.com>
+Reviewed-by: Zhang Yi <yi.zhang@huawei.com>
+Link: https://patch.msgid.link/20250714130327.1830534-15-libaokun1@huawei.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: 4865c768b563 ("ext4: always allocate blocks only from groups inode can use")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/mballoc.c | 93 +++++++++++++++++++++++++----------------------
+ fs/ext4/mballoc.h | 2 +
+ 2 files changed, 51 insertions(+), 44 deletions(-)
+
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index af014b43d0b3f..03c0886da0571 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -2861,12 +2861,56 @@ void ext4_mb_prefetch_fini(struct super_block *sb, ext4_group_t group,
+ }
+ }
+
++static int ext4_mb_scan_group(struct ext4_allocation_context *ac,
++ ext4_group_t group)
++{
++ int ret;
++ struct super_block *sb = ac->ac_sb;
++ enum criteria cr = ac->ac_criteria;
++
++ ext4_mb_might_prefetch(ac, group);
++
++ /* prevent unnecessary buddy loading. */
++ if (cr < CR_ANY_FREE && spin_is_locked(ext4_group_lock_ptr(sb, group)))
++ return 0;
++
++ /* This now checks without needing the buddy page */
++ ret = ext4_mb_good_group_nolock(ac, group, cr);
++ if (ret <= 0) {
++ if (!ac->ac_first_err)
++ ac->ac_first_err = ret;
++ return 0;
++ }
++
++ ret = ext4_mb_load_buddy(sb, group, ac->ac_e4b);
++ if (ret)
++ return ret;
++
++ /* skip busy group */
++ if (cr >= CR_ANY_FREE)
++ ext4_lock_group(sb, group);
++ else if (!ext4_try_lock_group(sb, group))
++ goto out_unload;
++
++ /* We need to check again after locking the block group. */
++ if (unlikely(!ext4_mb_good_group(ac, group, cr)))
++ goto out_unlock;
++
++ __ext4_mb_scan_group(ac);
++
++out_unlock:
++ ext4_unlock_group(sb, group);
++out_unload:
++ ext4_mb_unload_buddy(ac->ac_e4b);
++ return ret;
++}
++
+ static noinline_for_stack int
+ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
+ {
+ ext4_group_t ngroups, group, i;
+ enum criteria new_cr, cr = CR_GOAL_LEN_FAST;
+- int err = 0, first_err = 0;
++ int err = 0;
+ struct ext4_sb_info *sbi;
+ struct super_block *sb;
+ struct ext4_buddy e4b;
+@@ -2928,6 +2972,7 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
+
+ ac->ac_e4b = &e4b;
+ ac->ac_prefetch_ios = 0;
++ ac->ac_first_err = 0;
+ repeat:
+ for (; cr < EXT4_MB_NUM_CRS && ac->ac_status == AC_STATUS_CONTINUE; cr++) {
+ ac->ac_criteria = cr;
+@@ -2942,7 +2987,6 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
+
+ for (i = 0, new_cr = cr; i < ngroups; i++,
+ ext4_mb_choose_next_group(ac, &new_cr, &group, ngroups)) {
+- int ret = 0;
+
+ cond_resched();
+ if (new_cr != cr) {
+@@ -2950,49 +2994,10 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
+ goto repeat;
+ }
+
+- ext4_mb_might_prefetch(ac, group);
+-
+- /* prevent unnecessary buddy loading. */
+- if (cr < CR_ANY_FREE &&
+- spin_is_locked(ext4_group_lock_ptr(sb, group)))
+- continue;
+-
+- /* This now checks without needing the buddy page */
+- ret = ext4_mb_good_group_nolock(ac, group, cr);
+- if (ret <= 0) {
+- if (!first_err)
+- first_err = ret;
+- continue;
+- }
+-
+- err = ext4_mb_load_buddy(sb, group, &e4b);
++ err = ext4_mb_scan_group(ac, group);
+ if (err)
+ goto out;
+
+- /* skip busy group */
+- if (cr >= CR_ANY_FREE) {
+- ext4_lock_group(sb, group);
+- } else if (!ext4_try_lock_group(sb, group)) {
+- ext4_mb_unload_buddy(&e4b);
+- continue;
+- }
+-
+- /*
+- * We need to check again after locking the
+- * block group
+- */
+- ret = ext4_mb_good_group(ac, group, cr);
+- if (ret == 0) {
+- ext4_unlock_group(sb, group);
+- ext4_mb_unload_buddy(&e4b);
+- continue;
+- }
+-
+- __ext4_mb_scan_group(ac);
+-
+- ext4_unlock_group(sb, group);
+- ext4_mb_unload_buddy(&e4b);
+-
+ if (ac->ac_status != AC_STATUS_CONTINUE)
+ break;
+ }
+@@ -3037,8 +3042,8 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
+ if (sbi->s_mb_stats && ac->ac_status == AC_STATUS_FOUND)
+ atomic64_inc(&sbi->s_bal_cX_hits[ac->ac_criteria]);
+ out:
+- if (!err && ac->ac_status != AC_STATUS_FOUND && first_err)
+- err = first_err;
++ if (!err && ac->ac_status != AC_STATUS_FOUND && ac->ac_first_err)
++ err = ac->ac_first_err;
+
+ mb_debug(sb, "Best len %d, origin len %d, ac_status %u, ac_flags 0x%x, cr %d ret %d\n",
+ ac->ac_b_ex.fe_len, ac->ac_o_ex.fe_len, ac->ac_status,
+diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h
+index 9f66b1d5db67a..83886fc9521b7 100644
+--- a/fs/ext4/mballoc.h
++++ b/fs/ext4/mballoc.h
+@@ -196,6 +196,8 @@ struct ext4_allocation_context {
+ unsigned int ac_prefetch_ios;
+ unsigned int ac_prefetch_nr;
+
++ int ac_first_err;
++
+ __u32 ac_flags; /* allocation hints */
+ __u32 ac_groups_linear_remaining;
+ __u16 ac_groups_scanned;
+--
+2.51.0
+
--- /dev/null
+From a00dbf6c5716f7a1356e017dfeecd1a48e06f54d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 14 Jul 2025 21:03:27 +0800
+Subject: ext4: implement linear-like traversal across order xarrays
+
+From: Baokun Li <libaokun1@huawei.com>
+
+[ Upstream commit a3ce570a5d6a70df616ae9a78635a188e6b5fd2f ]
+
+Although we now perform ordered traversal within an xarray, this is
+currently limited to a single xarray. However, we have multiple such
+xarrays, which prevents us from guaranteeing a linear-like traversal
+where all groups on the right are visited before all groups on the left.
+
+For example, suppose we have 128 block groups, with a target group of 64,
+a target length corresponding to an order of 1, and available free groups
+of 16 (order 1) and group 65 (order 8):
+
+For linear traversal, when no suitable free block is found in group 64, it
+will search in the next block group until group 127, then start searching
+from 0 up to block group 63. It ensures continuous forward traversal, which
+is consistent with the unidirectional rotation behavior of HDD platters.
+
+Additionally, the block group lock contention during freeing block is
+unavoidable. The goal increasing from 0 to 64 indicates that previously
+scanned groups (which had no suitable free space and are likely to free
+blocks later) and skipped groups (which are currently in use) have newly
+freed some used blocks. If we allocate blocks in these groups, the
+probability of competing with other processes increases.
+
+For non-linear traversal, we first traverse all groups in order_1. If only
+group 16 has free space in this list, we first traverse [63, 128), then
+traverse [0, 64) to find the available group 16, and then allocate blocks
+in group 16. Therefore, it cannot guarantee continuous traversal in one
+direction, thus increasing the probability of contention.
+
+So refactor ext4_mb_scan_groups_xarray() to ext4_mb_scan_groups_xa_range()
+to only traverse a fixed range of groups, and move the logic for handling
+wrap around to the caller. The caller first iterates through all xarrays
+in the range [start, ngroups) and then through the range [0, start). This
+approach simulates a linear scan, which reduces contention between freeing
+blocks and allocating blocks.
+
+Assume we have the following groups, where "|" denotes the xarray traversal
+start position:
+
+order_1_groups: AB | CD
+order_2_groups: EF | GH
+
+Traversal order:
+Before: C > D > A > B > G > H > E > F
+After: C > D > G > H > A > B > E > F
+
+Performance test data follows:
+
+|CPU: Kunpeng 920 | P80 | P1 |
+|Memory: 512GB |------------------------|-------------------------|
+|960GB SSD (0.5GB/s)| base | patched | base | patched |
+|-------------------|-------|----------------|--------|----------------|
+|mb_optimize_scan=0 | 19555 | 20049 (+2.5%) | 315636 | 316724 (-0.3%) |
+|mb_optimize_scan=1 | 15496 | 19342 (+24.8%) | 323569 | 328324 (+1.4%) |
+
+|CPU: AMD 9654 * 2 | P96 | P1 |
+|Memory: 1536GB |------------------------|-------------------------|
+|960GB SSD (1GB/s) | base | patched | base | patched |
+|-------------------|-------|----------------|--------|----------------|
+|mb_optimize_scan=0 | 53192 | 52125 (-2.0%) | 212678 | 215136 (+1.1%) |
+|mb_optimize_scan=1 | 37636 | 50331 (+33.7%) | 214189 | 209431 (-2.2%) |
+
+Signed-off-by: Baokun Li <libaokun1@huawei.com>
+Reviewed-by: Zhang Yi <yi.zhang@huawei.com>
+Link: https://patch.msgid.link/20250714130327.1830534-18-libaokun1@huawei.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: 4865c768b563 ("ext4: always allocate blocks only from groups inode can use")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/mballoc.c | 68 ++++++++++++++++++++++++++++++++---------------
+ 1 file changed, 47 insertions(+), 21 deletions(-)
+
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 6c72eddcd6c1f..1e180c55ebd4f 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -892,21 +892,20 @@ mb_update_avg_fragment_size(struct super_block *sb, struct ext4_group_info *grp)
+ }
+ }
+
+-static int ext4_mb_scan_groups_xarray(struct ext4_allocation_context *ac,
+- struct xarray *xa, ext4_group_t start)
++static int ext4_mb_scan_groups_xa_range(struct ext4_allocation_context *ac,
++ struct xarray *xa,
++ ext4_group_t start, ext4_group_t end)
+ {
+ struct super_block *sb = ac->ac_sb;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ enum criteria cr = ac->ac_criteria;
+ ext4_group_t ngroups = ext4_get_groups_count(sb);
+ unsigned long group = start;
+- ext4_group_t end = ngroups;
+ struct ext4_group_info *grp;
+
+- if (WARN_ON_ONCE(start >= end))
++ if (WARN_ON_ONCE(end > ngroups || start >= end))
+ return 0;
+
+-wrap_around:
+ xa_for_each_range(xa, group, grp, start, end - 1) {
+ int err;
+
+@@ -920,28 +919,23 @@ static int ext4_mb_scan_groups_xarray(struct ext4_allocation_context *ac,
+ cond_resched();
+ }
+
+- if (start) {
+- end = start;
+- start = 0;
+- goto wrap_around;
+- }
+-
+ return 0;
+ }
+
+ /*
+ * Find a suitable group of given order from the largest free orders xarray.
+ */
+-static int
+-ext4_mb_scan_groups_largest_free_order(struct ext4_allocation_context *ac,
+- int order, ext4_group_t start)
++static inline int
++ext4_mb_scan_groups_largest_free_order_range(struct ext4_allocation_context *ac,
++ int order, ext4_group_t start,
++ ext4_group_t end)
+ {
+ struct xarray *xa = &EXT4_SB(ac->ac_sb)->s_mb_largest_free_orders[order];
+
+ if (xa_empty(xa))
+ return 0;
+
+- return ext4_mb_scan_groups_xarray(ac, xa, start);
++ return ext4_mb_scan_groups_xa_range(ac, xa, start, end);
+ }
+
+ /*
+@@ -954,12 +948,22 @@ static int ext4_mb_scan_groups_p2_aligned(struct ext4_allocation_context *ac,
+ struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
+ int i;
+ int ret = 0;
++ ext4_group_t start, end;
+
++ start = group;
++ end = ext4_get_groups_count(ac->ac_sb);
++wrap_around:
+ for (i = ac->ac_2order; i < MB_NUM_ORDERS(ac->ac_sb); i++) {
+- ret = ext4_mb_scan_groups_largest_free_order(ac, i, group);
++ ret = ext4_mb_scan_groups_largest_free_order_range(ac, i,
++ start, end);
+ if (ret || ac->ac_status != AC_STATUS_CONTINUE)
+ return ret;
+ }
++ if (start) {
++ end = start;
++ start = 0;
++ goto wrap_around;
++ }
+
+ if (sbi->s_mb_stats)
+ atomic64_inc(&sbi->s_bal_cX_failed[ac->ac_criteria]);
+@@ -972,15 +976,17 @@ static int ext4_mb_scan_groups_p2_aligned(struct ext4_allocation_context *ac,
+ /*
+ * Find a suitable group of given order from the average fragments xarray.
+ */
+-static int ext4_mb_scan_groups_avg_frag_order(struct ext4_allocation_context *ac,
+- int order, ext4_group_t start)
++static int
++ext4_mb_scan_groups_avg_frag_order_range(struct ext4_allocation_context *ac,
++ int order, ext4_group_t start,
++ ext4_group_t end)
+ {
+ struct xarray *xa = &EXT4_SB(ac->ac_sb)->s_mb_avg_fragment_size[order];
+
+ if (xa_empty(xa))
+ return 0;
+
+- return ext4_mb_scan_groups_xarray(ac, xa, start);
++ return ext4_mb_scan_groups_xa_range(ac, xa, start, end);
+ }
+
+ /*
+@@ -992,13 +998,23 @@ static int ext4_mb_scan_groups_goal_fast(struct ext4_allocation_context *ac,
+ {
+ struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
+ int i, ret = 0;
++ ext4_group_t start, end;
+
++ start = group;
++ end = ext4_get_groups_count(ac->ac_sb);
++wrap_around:
+ i = mb_avg_fragment_size_order(ac->ac_sb, ac->ac_g_ex.fe_len);
+ for (; i < MB_NUM_ORDERS(ac->ac_sb); i++) {
+- ret = ext4_mb_scan_groups_avg_frag_order(ac, i, group);
++ ret = ext4_mb_scan_groups_avg_frag_order_range(ac, i,
++ start, end);
+ if (ret || ac->ac_status != AC_STATUS_CONTINUE)
+ return ret;
+ }
++ if (start) {
++ end = start;
++ start = 0;
++ goto wrap_around;
++ }
+
+ if (sbi->s_mb_stats)
+ atomic64_inc(&sbi->s_bal_cX_failed[ac->ac_criteria]);
+@@ -1034,6 +1050,7 @@ static int ext4_mb_scan_groups_best_avail(struct ext4_allocation_context *ac,
+ struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
+ int i, order, min_order;
+ unsigned long num_stripe_clusters = 0;
++ ext4_group_t start, end;
+
+ /*
+ * mb_avg_fragment_size_order() returns order in a way that makes
+@@ -1065,6 +1082,9 @@ static int ext4_mb_scan_groups_best_avail(struct ext4_allocation_context *ac,
+ if (1 << min_order < ac->ac_o_ex.fe_len)
+ min_order = fls(ac->ac_o_ex.fe_len);
+
++ start = group;
++ end = ext4_get_groups_count(ac->ac_sb);
++wrap_around:
+ for (i = order; i >= min_order; i--) {
+ int frag_order;
+ /*
+@@ -1087,10 +1107,16 @@ static int ext4_mb_scan_groups_best_avail(struct ext4_allocation_context *ac,
+ frag_order = mb_avg_fragment_size_order(ac->ac_sb,
+ ac->ac_g_ex.fe_len);
+
+- ret = ext4_mb_scan_groups_avg_frag_order(ac, frag_order, group);
++ ret = ext4_mb_scan_groups_avg_frag_order_range(ac, frag_order,
++ start, end);
+ if (ret || ac->ac_status != AC_STATUS_CONTINUE)
+ return ret;
+ }
++ if (start) {
++ end = start;
++ start = 0;
++ goto wrap_around;
++ }
+
+ /* Reset goal length to original goal length before falling into CR_GOAL_LEN_SLOW */
+ ac->ac_g_ex.fe_len = ac->ac_orig_goal_len;
+--
+2.51.0
+
--- /dev/null
+From 71dbb5850e1dc3db7399348a81941a9df47bd856 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 14 Jul 2025 21:03:26 +0800
+Subject: ext4: refactor choose group to scan group
+
+From: Baokun Li <libaokun1@huawei.com>
+
+[ Upstream commit 6347558764911f88acac06ab996e162f0c8a212d ]
+
+This commit converts the `choose group` logic to `scan group` using
+previously prepared helper functions. This allows us to leverage xarrays
+for ordered non-linear traversal, thereby mitigating the "bouncing" issue
+inherent in the `choose group` mechanism.
+
+This also decouples linear and non-linear traversals, leading to cleaner
+and more readable code.
+
+Key changes:
+
+ * ext4_mb_choose_next_group() is refactored to ext4_mb_scan_groups().
+
+ * Replaced ext4_mb_good_group() with ext4_mb_scan_group() in non-linear
+ traversals, and related functions now return error codes instead of
+ group info.
+
+ * Added ext4_mb_scan_groups_linear() for performing linear scans starting
+ from a specific group for a set number of times.
+
+ * Linear scans now execute up to sbi->s_mb_max_linear_groups times,
+ so ac_groups_linear_remaining is removed as it's no longer used.
+
+ * ac->ac_criteria is now used directly instead of passing cr around.
+ Also, ac->ac_criteria is incremented directly after groups scan fails
+ for the corresponding criteria.
+
+ * Since we're now directly scanning groups instead of finding a good group
+ then scanning, the following variables and flags are no longer needed,
+ s_bal_cX_groups_considered is sufficient.
+
+ s_bal_p2_aligned_bad_suggestions
+ s_bal_goal_fast_bad_suggestions
+ s_bal_best_avail_bad_suggestions
+ EXT4_MB_CR_POWER2_ALIGNED_OPTIMIZED
+ EXT4_MB_CR_GOAL_LEN_FAST_OPTIMIZED
+ EXT4_MB_CR_BEST_AVAIL_LEN_OPTIMIZED
+
+Signed-off-by: Baokun Li <libaokun1@huawei.com>
+Reviewed-by: Zhang Yi <yi.zhang@huawei.com>
+Link: https://patch.msgid.link/20250714130327.1830534-17-libaokun1@huawei.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: 4865c768b563 ("ext4: always allocate blocks only from groups inode can use")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/ext4.h | 12 --
+ fs/ext4/mballoc.c | 292 +++++++++++++++++++++-------------------------
+ fs/ext4/mballoc.h | 1 -
+ 3 files changed, 131 insertions(+), 174 deletions(-)
+
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 7cfe38fdb9950..bcdd8f3818696 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -213,15 +213,6 @@ enum criteria {
+ #define EXT4_MB_USE_RESERVED 0x2000
+ /* Do strict check for free blocks while retrying block allocation */
+ #define EXT4_MB_STRICT_CHECK 0x4000
+-/* Large fragment size list lookup succeeded at least once for
+- * CR_POWER2_ALIGNED */
+-#define EXT4_MB_CR_POWER2_ALIGNED_OPTIMIZED 0x8000
+-/* Avg fragment size rb tree lookup succeeded at least once for
+- * CR_GOAL_LEN_FAST */
+-#define EXT4_MB_CR_GOAL_LEN_FAST_OPTIMIZED 0x00010000
+-/* Avg fragment size rb tree lookup succeeded at least once for
+- * CR_BEST_AVAIL_LEN */
+-#define EXT4_MB_CR_BEST_AVAIL_LEN_OPTIMIZED 0x00020000
+
+ struct ext4_allocation_request {
+ /* target inode for block we're allocating */
+@@ -1619,9 +1610,6 @@ struct ext4_sb_info {
+ atomic_t s_bal_len_goals; /* len goal hits */
+ atomic_t s_bal_breaks; /* too long searches */
+ atomic_t s_bal_2orders; /* 2^order hits */
+- atomic_t s_bal_p2_aligned_bad_suggestions;
+- atomic_t s_bal_goal_fast_bad_suggestions;
+- atomic_t s_bal_best_avail_bad_suggestions;
+ atomic64_t s_bal_cX_groups_considered[EXT4_MB_NUM_CRS];
+ atomic64_t s_bal_cX_hits[EXT4_MB_NUM_CRS];
+ atomic64_t s_bal_cX_failed[EXT4_MB_NUM_CRS]; /* cX loop didn't find blocks */
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 719a8cb53ae4c..6c72eddcd6c1f 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -425,8 +425,8 @@ static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
+ ext4_group_t group);
+ static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac);
+
+-static bool ext4_mb_good_group(struct ext4_allocation_context *ac,
+- ext4_group_t group, enum criteria cr);
++static int ext4_mb_scan_group(struct ext4_allocation_context *ac,
++ ext4_group_t group);
+
+ static int ext4_try_to_trim_range(struct super_block *sb,
+ struct ext4_buddy *e4b, ext4_grpblk_t start,
+@@ -892,9 +892,8 @@ mb_update_avg_fragment_size(struct super_block *sb, struct ext4_group_info *grp)
+ }
+ }
+
+-static struct ext4_group_info *
+-ext4_mb_find_good_group_xarray(struct ext4_allocation_context *ac,
+- struct xarray *xa, ext4_group_t start)
++static int ext4_mb_scan_groups_xarray(struct ext4_allocation_context *ac,
++ struct xarray *xa, ext4_group_t start)
+ {
+ struct super_block *sb = ac->ac_sb;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+@@ -905,16 +904,18 @@ ext4_mb_find_good_group_xarray(struct ext4_allocation_context *ac,
+ struct ext4_group_info *grp;
+
+ if (WARN_ON_ONCE(start >= end))
+- return NULL;
++ return 0;
+
+ wrap_around:
+ xa_for_each_range(xa, group, grp, start, end - 1) {
++ int err;
++
+ if (sbi->s_mb_stats)
+ atomic64_inc(&sbi->s_bal_cX_groups_considered[cr]);
+
+- if (!spin_is_locked(ext4_group_lock_ptr(sb, group)) &&
+- likely(ext4_mb_good_group(ac, group, cr)))
+- return grp;
++ err = ext4_mb_scan_group(ac, grp->bb_group);
++ if (err || ac->ac_status != AC_STATUS_CONTINUE)
++ return err;
+
+ cond_resched();
+ }
+@@ -925,95 +926,82 @@ ext4_mb_find_good_group_xarray(struct ext4_allocation_context *ac,
+ goto wrap_around;
+ }
+
+- return NULL;
++ return 0;
+ }
+
+ /*
+ * Find a suitable group of given order from the largest free orders xarray.
+ */
+-static struct ext4_group_info *
+-ext4_mb_find_good_group_largest_free_order(struct ext4_allocation_context *ac,
+- int order, ext4_group_t start)
++static int
++ext4_mb_scan_groups_largest_free_order(struct ext4_allocation_context *ac,
++ int order, ext4_group_t start)
+ {
+ struct xarray *xa = &EXT4_SB(ac->ac_sb)->s_mb_largest_free_orders[order];
+
+ if (xa_empty(xa))
+- return NULL;
++ return 0;
+
+- return ext4_mb_find_good_group_xarray(ac, xa, start);
++ return ext4_mb_scan_groups_xarray(ac, xa, start);
+ }
+
+ /*
+ * Choose next group by traversing largest_free_order lists. Updates *new_cr if
+ * cr level needs an update.
+ */
+-static void ext4_mb_choose_next_group_p2_aligned(struct ext4_allocation_context *ac,
+- enum criteria *new_cr, ext4_group_t *group)
++static int ext4_mb_scan_groups_p2_aligned(struct ext4_allocation_context *ac,
++ ext4_group_t group)
+ {
+ struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
+- struct ext4_group_info *grp;
+ int i;
+-
+- if (ac->ac_status == AC_STATUS_FOUND)
+- return;
+-
+- if (unlikely(sbi->s_mb_stats && ac->ac_flags & EXT4_MB_CR_POWER2_ALIGNED_OPTIMIZED))
+- atomic_inc(&sbi->s_bal_p2_aligned_bad_suggestions);
++ int ret = 0;
+
+ for (i = ac->ac_2order; i < MB_NUM_ORDERS(ac->ac_sb); i++) {
+- grp = ext4_mb_find_good_group_largest_free_order(ac, i, *group);
+- if (grp) {
+- *group = grp->bb_group;
+- ac->ac_flags |= EXT4_MB_CR_POWER2_ALIGNED_OPTIMIZED;
+- return;
+- }
++ ret = ext4_mb_scan_groups_largest_free_order(ac, i, group);
++ if (ret || ac->ac_status != AC_STATUS_CONTINUE)
++ return ret;
+ }
+
++ if (sbi->s_mb_stats)
++ atomic64_inc(&sbi->s_bal_cX_failed[ac->ac_criteria]);
++
+ /* Increment cr and search again if no group is found */
+- *new_cr = CR_GOAL_LEN_FAST;
++ ac->ac_criteria = CR_GOAL_LEN_FAST;
++ return ret;
+ }
+
+ /*
+ * Find a suitable group of given order from the average fragments xarray.
+ */
+-static struct ext4_group_info *
+-ext4_mb_find_good_group_avg_frag_xarray(struct ext4_allocation_context *ac,
+- int order, ext4_group_t start)
++static int ext4_mb_scan_groups_avg_frag_order(struct ext4_allocation_context *ac,
++ int order, ext4_group_t start)
+ {
+ struct xarray *xa = &EXT4_SB(ac->ac_sb)->s_mb_avg_fragment_size[order];
+
+ if (xa_empty(xa))
+- return NULL;
++ return 0;
+
+- return ext4_mb_find_good_group_xarray(ac, xa, start);
++ return ext4_mb_scan_groups_xarray(ac, xa, start);
+ }
+
+ /*
+ * Choose next group by traversing average fragment size list of suitable
+ * order. Updates *new_cr if cr level needs an update.
+ */
+-static void ext4_mb_choose_next_group_goal_fast(struct ext4_allocation_context *ac,
+- enum criteria *new_cr, ext4_group_t *group)
++static int ext4_mb_scan_groups_goal_fast(struct ext4_allocation_context *ac,
++ ext4_group_t group)
+ {
+ struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
+- struct ext4_group_info *grp = NULL;
+- int i;
++ int i, ret = 0;
+
+- if (unlikely(ac->ac_flags & EXT4_MB_CR_GOAL_LEN_FAST_OPTIMIZED)) {
+- if (sbi->s_mb_stats)
+- atomic_inc(&sbi->s_bal_goal_fast_bad_suggestions);
+- }
+-
+- for (i = mb_avg_fragment_size_order(ac->ac_sb, ac->ac_g_ex.fe_len);
+- i < MB_NUM_ORDERS(ac->ac_sb); i++) {
+- grp = ext4_mb_find_good_group_avg_frag_xarray(ac, i, *group);
+- if (grp) {
+- *group = grp->bb_group;
+- ac->ac_flags |= EXT4_MB_CR_GOAL_LEN_FAST_OPTIMIZED;
+- return;
+- }
++ i = mb_avg_fragment_size_order(ac->ac_sb, ac->ac_g_ex.fe_len);
++ for (; i < MB_NUM_ORDERS(ac->ac_sb); i++) {
++ ret = ext4_mb_scan_groups_avg_frag_order(ac, i, group);
++ if (ret || ac->ac_status != AC_STATUS_CONTINUE)
++ return ret;
+ }
+
++ if (sbi->s_mb_stats)
++ atomic64_inc(&sbi->s_bal_cX_failed[ac->ac_criteria]);
+ /*
+ * CR_BEST_AVAIL_LEN works based on the concept that we have
+ * a larger normalized goal len request which can be trimmed to
+@@ -1023,9 +1011,11 @@ static void ext4_mb_choose_next_group_goal_fast(struct ext4_allocation_context *
+ * See function ext4_mb_normalize_request() (EXT4_MB_HINT_DATA).
+ */
+ if (ac->ac_flags & EXT4_MB_HINT_DATA)
+- *new_cr = CR_BEST_AVAIL_LEN;
++ ac->ac_criteria = CR_BEST_AVAIL_LEN;
+ else
+- *new_cr = CR_GOAL_LEN_SLOW;
++ ac->ac_criteria = CR_GOAL_LEN_SLOW;
++
++ return ret;
+ }
+
+ /*
+@@ -1037,19 +1027,14 @@ static void ext4_mb_choose_next_group_goal_fast(struct ext4_allocation_context *
+ * preallocations. However, we make sure that we don't trim the request too
+ * much and fall to CR_GOAL_LEN_SLOW in that case.
+ */
+-static void ext4_mb_choose_next_group_best_avail(struct ext4_allocation_context *ac,
+- enum criteria *new_cr, ext4_group_t *group)
++static int ext4_mb_scan_groups_best_avail(struct ext4_allocation_context *ac,
++ ext4_group_t group)
+ {
++ int ret = 0;
+ struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
+- struct ext4_group_info *grp = NULL;
+ int i, order, min_order;
+ unsigned long num_stripe_clusters = 0;
+
+- if (unlikely(ac->ac_flags & EXT4_MB_CR_BEST_AVAIL_LEN_OPTIMIZED)) {
+- if (sbi->s_mb_stats)
+- atomic_inc(&sbi->s_bal_best_avail_bad_suggestions);
+- }
+-
+ /*
+ * mb_avg_fragment_size_order() returns order in a way that makes
+ * retrieving back the length using (1 << order) inaccurate. Hence, use
+@@ -1102,18 +1087,18 @@ static void ext4_mb_choose_next_group_best_avail(struct ext4_allocation_context
+ frag_order = mb_avg_fragment_size_order(ac->ac_sb,
+ ac->ac_g_ex.fe_len);
+
+- grp = ext4_mb_find_good_group_avg_frag_xarray(ac, frag_order,
+- *group);
+- if (grp) {
+- *group = grp->bb_group;
+- ac->ac_flags |= EXT4_MB_CR_BEST_AVAIL_LEN_OPTIMIZED;
+- return;
+- }
++ ret = ext4_mb_scan_groups_avg_frag_order(ac, frag_order, group);
++ if (ret || ac->ac_status != AC_STATUS_CONTINUE)
++ return ret;
+ }
+
+ /* Reset goal length to original goal length before falling into CR_GOAL_LEN_SLOW */
+ ac->ac_g_ex.fe_len = ac->ac_orig_goal_len;
+- *new_cr = CR_GOAL_LEN_SLOW;
++ if (sbi->s_mb_stats)
++ atomic64_inc(&sbi->s_bal_cX_failed[ac->ac_criteria]);
++ ac->ac_criteria = CR_GOAL_LEN_SLOW;
++
++ return ret;
+ }
+
+ static inline int should_optimize_scan(struct ext4_allocation_context *ac)
+@@ -1126,59 +1111,82 @@ static inline int should_optimize_scan(struct ext4_allocation_context *ac)
+ }
+
+ /*
+- * Return next linear group for allocation.
++ * next linear group for allocation.
+ */
+-static ext4_group_t
+-next_linear_group(ext4_group_t group, ext4_group_t ngroups)
++static void next_linear_group(ext4_group_t *group, ext4_group_t ngroups)
+ {
+ /*
+ * Artificially restricted ngroups for non-extent
+ * files makes group > ngroups possible on first loop.
+ */
+- return group + 1 >= ngroups ? 0 : group + 1;
++ *group = *group + 1 >= ngroups ? 0 : *group + 1;
+ }
+
+-/*
+- * ext4_mb_choose_next_group: choose next group for allocation.
+- *
+- * @ac Allocation Context
+- * @new_cr This is an output parameter. If the there is no good group
+- * available at current CR level, this field is updated to indicate
+- * the new cr level that should be used.
+- * @group This is an input / output parameter. As an input it indicates the
+- * next group that the allocator intends to use for allocation. As
+- * output, this field indicates the next group that should be used as
+- * determined by the optimization functions.
+- * @ngroups Total number of groups
+- */
+-static void ext4_mb_choose_next_group(struct ext4_allocation_context *ac,
+- enum criteria *new_cr, ext4_group_t *group, ext4_group_t ngroups)
++static int ext4_mb_scan_groups_linear(struct ext4_allocation_context *ac,
++ ext4_group_t ngroups, ext4_group_t *start, ext4_group_t count)
+ {
+- *new_cr = ac->ac_criteria;
++ int ret, i;
++ enum criteria cr = ac->ac_criteria;
++ struct super_block *sb = ac->ac_sb;
++ struct ext4_sb_info *sbi = EXT4_SB(sb);
++ ext4_group_t group = *start;
+
+- if (!should_optimize_scan(ac)) {
+- *group = next_linear_group(*group, ngroups);
+- return;
++ for (i = 0; i < count; i++, next_linear_group(&group, ngroups)) {
++ ret = ext4_mb_scan_group(ac, group);
++ if (ret || ac->ac_status != AC_STATUS_CONTINUE)
++ return ret;
++ cond_resched();
+ }
+
++ *start = group;
++ if (count == ngroups)
++ ac->ac_criteria++;
++
++ /* Processed all groups and haven't found blocks */
++ if (sbi->s_mb_stats && i == ngroups)
++ atomic64_inc(&sbi->s_bal_cX_failed[cr]);
++
++ return 0;
++}
++
++static int ext4_mb_scan_groups(struct ext4_allocation_context *ac)
++{
++ int ret = 0;
++ ext4_group_t start;
++ struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
++ ext4_group_t ngroups = ext4_get_groups_count(ac->ac_sb);
++
++ /* non-extent files are limited to low blocks/groups */
++ if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)))
++ ngroups = sbi->s_blockfile_groups;
++
++ /* searching for the right group start from the goal value specified */
++ start = ac->ac_g_ex.fe_group;
++ ac->ac_prefetch_grp = start;
++ ac->ac_prefetch_nr = 0;
++
++ if (!should_optimize_scan(ac))
++ return ext4_mb_scan_groups_linear(ac, ngroups, &start, ngroups);
++
+ /*
+ * Optimized scanning can return non adjacent groups which can cause
+ * seek overhead for rotational disks. So try few linear groups before
+ * trying optimized scan.
+ */
+- if (ac->ac_groups_linear_remaining) {
+- *group = next_linear_group(*group, ngroups);
+- ac->ac_groups_linear_remaining--;
+- return;
+- }
++ if (sbi->s_mb_max_linear_groups)
++ ret = ext4_mb_scan_groups_linear(ac, ngroups, &start,
++ sbi->s_mb_max_linear_groups);
++ if (ret || ac->ac_status != AC_STATUS_CONTINUE)
++ return ret;
+
+- if (*new_cr == CR_POWER2_ALIGNED) {
+- ext4_mb_choose_next_group_p2_aligned(ac, new_cr, group);
+- } else if (*new_cr == CR_GOAL_LEN_FAST) {
+- ext4_mb_choose_next_group_goal_fast(ac, new_cr, group);
+- } else if (*new_cr == CR_BEST_AVAIL_LEN) {
+- ext4_mb_choose_next_group_best_avail(ac, new_cr, group);
+- } else {
++ switch (ac->ac_criteria) {
++ case CR_POWER2_ALIGNED:
++ return ext4_mb_scan_groups_p2_aligned(ac, start);
++ case CR_GOAL_LEN_FAST:
++ return ext4_mb_scan_groups_goal_fast(ac, start);
++ case CR_BEST_AVAIL_LEN:
++ return ext4_mb_scan_groups_best_avail(ac, start);
++ default:
+ /*
+ * TODO: For CR_GOAL_LEN_SLOW, we can arrange groups in an
+ * rb tree sorted by bb_free. But until that happens, we should
+@@ -1186,6 +1194,8 @@ static void ext4_mb_choose_next_group(struct ext4_allocation_context *ac,
+ */
+ WARN_ON(1);
+ }
++
++ return 0;
+ }
+
+ /*
+@@ -2944,20 +2954,11 @@ static int ext4_mb_scan_group(struct ext4_allocation_context *ac,
+ static noinline_for_stack int
+ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
+ {
+- ext4_group_t ngroups, group, i;
+- enum criteria new_cr, cr = CR_GOAL_LEN_FAST;
++ ext4_group_t i;
+ int err = 0;
+- struct ext4_sb_info *sbi;
+- struct super_block *sb;
++ struct super_block *sb = ac->ac_sb;
++ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct ext4_buddy e4b;
+- int lost;
+-
+- sb = ac->ac_sb;
+- sbi = EXT4_SB(sb);
+- ngroups = ext4_get_groups_count(sb);
+- /* non-extent files are limited to low blocks/groups */
+- if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)))
+- ngroups = sbi->s_blockfile_groups;
+
+ BUG_ON(ac->ac_status == AC_STATUS_FOUND);
+
+@@ -3003,48 +3004,21 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
+ * start with CR_GOAL_LEN_FAST, unless it is power of 2
+ * aligned, in which case let's do that faster approach first.
+ */
++ ac->ac_criteria = CR_GOAL_LEN_FAST;
+ if (ac->ac_2order)
+- cr = CR_POWER2_ALIGNED;
++ ac->ac_criteria = CR_POWER2_ALIGNED;
+
+ ac->ac_e4b = &e4b;
+ ac->ac_prefetch_ios = 0;
+ ac->ac_first_err = 0;
+ repeat:
+- for (; cr < EXT4_MB_NUM_CRS && ac->ac_status == AC_STATUS_CONTINUE; cr++) {
+- ac->ac_criteria = cr;
+- /*
+- * searching for the right group start
+- * from the goal value specified
+- */
+- group = ac->ac_g_ex.fe_group;
+- ac->ac_groups_linear_remaining = sbi->s_mb_max_linear_groups;
+- ac->ac_prefetch_grp = group;
+- ac->ac_prefetch_nr = 0;
+-
+- for (i = 0, new_cr = cr; i < ngroups; i++,
+- ext4_mb_choose_next_group(ac, &new_cr, &group, ngroups)) {
+-
+- cond_resched();
+- if (new_cr != cr) {
+- cr = new_cr;
+- goto repeat;
+- }
+-
+- err = ext4_mb_scan_group(ac, group);
+- if (err)
+- goto out;
+-
+- if (ac->ac_status != AC_STATUS_CONTINUE)
+- break;
+- }
+- /* Processed all groups and haven't found blocks */
+- if (sbi->s_mb_stats && i == ngroups)
+- atomic64_inc(&sbi->s_bal_cX_failed[cr]);
++ while (ac->ac_criteria < EXT4_MB_NUM_CRS) {
++ err = ext4_mb_scan_groups(ac);
++ if (err)
++ goto out;
+
+- if (i == ngroups && ac->ac_criteria == CR_BEST_AVAIL_LEN)
+- /* Reset goal length to original goal length before
+- * falling into CR_GOAL_LEN_SLOW */
+- ac->ac_g_ex.fe_len = ac->ac_orig_goal_len;
++ if (ac->ac_status != AC_STATUS_CONTINUE)
++ break;
+ }
+
+ if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND &&
+@@ -3055,6 +3029,8 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
+ */
+ ext4_mb_try_best_found(ac, &e4b);
+ if (ac->ac_status != AC_STATUS_FOUND) {
++ int lost;
++
+ /*
+ * Someone more lucky has already allocated it.
+ * The only thing we can do is just take first
+@@ -3070,7 +3046,7 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
+ ac->ac_b_ex.fe_len = 0;
+ ac->ac_status = AC_STATUS_CONTINUE;
+ ac->ac_flags |= EXT4_MB_HINT_FIRST;
+- cr = CR_ANY_FREE;
++ ac->ac_criteria = CR_ANY_FREE;
+ goto repeat;
+ }
+ }
+@@ -3083,7 +3059,7 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
+
+ mb_debug(sb, "Best len %d, origin len %d, ac_status %u, ac_flags 0x%x, cr %d ret %d\n",
+ ac->ac_b_ex.fe_len, ac->ac_o_ex.fe_len, ac->ac_status,
+- ac->ac_flags, cr, err);
++ ac->ac_flags, ac->ac_criteria, err);
+
+ if (ac->ac_prefetch_nr)
+ ext4_mb_prefetch_fini(sb, ac->ac_prefetch_grp, ac->ac_prefetch_nr);
+@@ -3211,8 +3187,6 @@ int ext4_seq_mb_stats_show(struct seq_file *seq, void *offset)
+ atomic_read(&sbi->s_bal_cX_ex_scanned[CR_POWER2_ALIGNED]));
+ seq_printf(seq, "\t\tuseless_loops: %llu\n",
+ atomic64_read(&sbi->s_bal_cX_failed[CR_POWER2_ALIGNED]));
+- seq_printf(seq, "\t\tbad_suggestions: %u\n",
+- atomic_read(&sbi->s_bal_p2_aligned_bad_suggestions));
+
+ /* CR_GOAL_LEN_FAST stats */
+ seq_puts(seq, "\tcr_goal_fast_stats:\n");
+@@ -3225,8 +3199,6 @@ int ext4_seq_mb_stats_show(struct seq_file *seq, void *offset)
+ atomic_read(&sbi->s_bal_cX_ex_scanned[CR_GOAL_LEN_FAST]));
+ seq_printf(seq, "\t\tuseless_loops: %llu\n",
+ atomic64_read(&sbi->s_bal_cX_failed[CR_GOAL_LEN_FAST]));
+- seq_printf(seq, "\t\tbad_suggestions: %u\n",
+- atomic_read(&sbi->s_bal_goal_fast_bad_suggestions));
+
+ /* CR_BEST_AVAIL_LEN stats */
+ seq_puts(seq, "\tcr_best_avail_stats:\n");
+@@ -3240,8 +3212,6 @@ int ext4_seq_mb_stats_show(struct seq_file *seq, void *offset)
+ atomic_read(&sbi->s_bal_cX_ex_scanned[CR_BEST_AVAIL_LEN]));
+ seq_printf(seq, "\t\tuseless_loops: %llu\n",
+ atomic64_read(&sbi->s_bal_cX_failed[CR_BEST_AVAIL_LEN]));
+- seq_printf(seq, "\t\tbad_suggestions: %u\n",
+- atomic_read(&sbi->s_bal_best_avail_bad_suggestions));
+
+ /* CR_GOAL_LEN_SLOW stats */
+ seq_puts(seq, "\tcr_goal_slow_stats:\n");
+diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h
+index 83886fc9521b7..15a049f05d04a 100644
+--- a/fs/ext4/mballoc.h
++++ b/fs/ext4/mballoc.h
+@@ -199,7 +199,6 @@ struct ext4_allocation_context {
+ int ac_first_err;
+
+ __u32 ac_flags; /* allocation hints */
+- __u32 ac_groups_linear_remaining;
+ __u16 ac_groups_scanned;
+ __u16 ac_found;
+ __u16 ac_cX_found[EXT4_MB_NUM_CRS];
+--
+2.51.0
+
--- /dev/null
+From 309acd1211bf0b771969bcde0dd360cd74620009 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Feb 2026 20:14:43 +0800
+Subject: hwmon: (max16065) Use READ/WRITE_ONCE to avoid compiler optimization
+ induced race
+
+From: Gui-Dong Han <hanguidong02@gmail.com>
+
+[ Upstream commit 007be4327e443d79c9dd9e56dc16c36f6395d208 ]
+
+Simply copying shared data to a local variable cannot prevent data
+races. The compiler is allowed to optimize away the local copy and
+re-read the shared memory, causing a Time-of-Check Time-of-Use (TOCTOU)
+issue if the data changes between the check and the usage.
+
+To enforce the use of the local variable, use READ_ONCE() when reading
+the shared data and WRITE_ONCE() when updating it. Apply these macros to
+the three identified locations (curr_sense, adc, and fault) where local
+variables are used for error validation, ensuring the value remains
+consistent.
+
+Reported-by: Ben Hutchings <ben@decadent.org.uk>
+Closes: https://lore.kernel.org/all/6fe17868327207e8b850cf9f88b7dc58b2021f73.camel@decadent.org.uk/
+Fixes: f5bae2642e3d ("hwmon: Driver for MAX16065 System Manager and compatibles")
+Fixes: b8d5acdcf525 ("hwmon: (max16065) Use local variable to avoid TOCTOU")
+Cc: stable@vger.kernel.org
+Signed-off-by: Gui-Dong Han <hanguidong02@gmail.com>
+Link: https://lore.kernel.org/r/20260203121443.5482-1-hanguidong02@gmail.com
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwmon/max16065.c | 26 +++++++++++++-------------
+ 1 file changed, 13 insertions(+), 13 deletions(-)
+
+diff --git a/drivers/hwmon/max16065.c b/drivers/hwmon/max16065.c
+index 4c9e7892a73c1..43fbb9b26b102 100644
+--- a/drivers/hwmon/max16065.c
++++ b/drivers/hwmon/max16065.c
+@@ -151,27 +151,27 @@ static struct max16065_data *max16065_update_device(struct device *dev)
+ int i;
+
+ for (i = 0; i < data->num_adc; i++)
+- data->adc[i]
+- = max16065_read_adc(client, MAX16065_ADC(i));
++ WRITE_ONCE(data->adc[i],
++ max16065_read_adc(client, MAX16065_ADC(i)));
+
+ if (data->have_current) {
+- data->adc[MAX16065_NUM_ADC]
+- = max16065_read_adc(client, MAX16065_CSP_ADC);
+- data->curr_sense
+- = i2c_smbus_read_byte_data(client,
+- MAX16065_CURR_SENSE);
++ WRITE_ONCE(data->adc[MAX16065_NUM_ADC],
++ max16065_read_adc(client, MAX16065_CSP_ADC));
++ WRITE_ONCE(data->curr_sense,
++ i2c_smbus_read_byte_data(client, MAX16065_CURR_SENSE));
+ }
+
+ for (i = 0; i < 2; i++)
+- data->fault[i]
+- = i2c_smbus_read_byte_data(client, MAX16065_FAULT(i));
++ WRITE_ONCE(data->fault[i],
++ i2c_smbus_read_byte_data(client, MAX16065_FAULT(i)));
+
+ /*
+ * MAX16067 and MAX16068 have separate undervoltage and
+ * overvoltage alarm bits. Squash them together.
+ */
+ if (data->chip == max16067 || data->chip == max16068)
+- data->fault[0] |= data->fault[1];
++ WRITE_ONCE(data->fault[0],
++ data->fault[0] | data->fault[1]);
+
+ data->last_updated = jiffies;
+ data->valid = true;
+@@ -185,7 +185,7 @@ static ssize_t max16065_alarm_show(struct device *dev,
+ {
+ struct sensor_device_attribute_2 *attr2 = to_sensor_dev_attr_2(da);
+ struct max16065_data *data = max16065_update_device(dev);
+- int val = data->fault[attr2->nr];
++ int val = READ_ONCE(data->fault[attr2->nr]);
+
+ if (val < 0)
+ return val;
+@@ -203,7 +203,7 @@ static ssize_t max16065_input_show(struct device *dev,
+ {
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct max16065_data *data = max16065_update_device(dev);
+- int adc = data->adc[attr->index];
++ int adc = READ_ONCE(data->adc[attr->index]);
+
+ if (unlikely(adc < 0))
+ return adc;
+@@ -216,7 +216,7 @@ static ssize_t max16065_current_show(struct device *dev,
+ struct device_attribute *da, char *buf)
+ {
+ struct max16065_data *data = max16065_update_device(dev);
+- int curr_sense = data->curr_sense;
++ int curr_sense = READ_ONCE(data->curr_sense);
+
+ if (unlikely(curr_sense < 0))
+ return curr_sense;
+--
+2.51.0
+
--- /dev/null
+From e2066e9fc41514703eb49629c3ce6b41490d3057 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 21 Apr 2025 15:25:08 -0700
+Subject: ima: define and call ima_alloc_kexec_file_buf()
+
+From: Steven Chen <chenste@linux.microsoft.com>
+
+[ Upstream commit c95e1acb6d7f00efab73e41b31e0560751e3f469 ]
+
+In the current implementation, the ima_dump_measurement_list() API is
+called during the kexec "load" phase, where a buffer is allocated and
+the measurement records are copied. Due to this, new events added after
+kexec load but before kexec execute are not carried over to the new kernel
+during kexec operation
+
+Carrying the IMA measurement list across kexec requires allocating a
+buffer and copying the measurement records. Separate allocating the
+buffer and copying the measurement records into separate functions in
+order to allocate the buffer at kexec 'load' and copy the measurements
+at kexec 'execute'.
+
+After moving the vfree() here at this stage in the patch set, the IMA
+measurement list fails to verify when doing two consecutive "kexec -s -l"
+with/without a "kexec -s -u" in between. Only after "ima: kexec: move
+IMA log copy from kexec load to execute" the IMA measurement list verifies
+properly with the vfree() here.
+
+Co-developed-by: Tushar Sugandhi <tusharsu@linux.microsoft.com>
+Signed-off-by: Tushar Sugandhi <tusharsu@linux.microsoft.com>
+Signed-off-by: Steven Chen <chenste@linux.microsoft.com>
+Reviewed-by: Stefan Berger <stefanb@linux.ibm.com>
+Acked-by: Baoquan He <bhe@redhat.com>
+Tested-by: Stefan Berger <stefanb@linux.ibm.com> # ppc64/kvm
+Signed-off-by: Mimi Zohar <zohar@linux.ibm.com>
+Stable-dep-of: 10d1c75ed438 ("ima: verify the previous kernel's IMA buffer lies in addressable RAM")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ security/integrity/ima/ima_kexec.c | 46 +++++++++++++++++++++++-------
+ 1 file changed, 35 insertions(+), 11 deletions(-)
+
+diff --git a/security/integrity/ima/ima_kexec.c b/security/integrity/ima/ima_kexec.c
+index 650beb74346c5..b12ac3619b8fd 100644
+--- a/security/integrity/ima/ima_kexec.c
++++ b/security/integrity/ima/ima_kexec.c
+@@ -15,26 +15,46 @@
+ #include "ima.h"
+
+ #ifdef CONFIG_IMA_KEXEC
++static struct seq_file ima_kexec_file;
++
++static void ima_free_kexec_file_buf(struct seq_file *sf)
++{
++ vfree(sf->buf);
++ sf->buf = NULL;
++ sf->size = 0;
++ sf->read_pos = 0;
++ sf->count = 0;
++}
++
++static int ima_alloc_kexec_file_buf(size_t segment_size)
++{
++ ima_free_kexec_file_buf(&ima_kexec_file);
++
++ /* segment size can't change between kexec load and execute */
++ ima_kexec_file.buf = vmalloc(segment_size);
++ if (!ima_kexec_file.buf)
++ return -ENOMEM;
++
++ ima_kexec_file.size = segment_size;
++ ima_kexec_file.read_pos = 0;
++ ima_kexec_file.count = sizeof(struct ima_kexec_hdr); /* reserved space */
++
++ return 0;
++}
++
+ static int ima_dump_measurement_list(unsigned long *buffer_size, void **buffer,
+ unsigned long segment_size)
+ {
+- struct seq_file ima_kexec_file;
+ struct ima_queue_entry *qe;
+ struct ima_kexec_hdr khdr;
+ int ret = 0;
+
+ /* segment size can't change between kexec load and execute */
+- ima_kexec_file.buf = vmalloc(segment_size);
+ if (!ima_kexec_file.buf) {
+- ret = -ENOMEM;
+- goto out;
++ pr_err("Kexec file buf not allocated\n");
++ return -EINVAL;
+ }
+
+- ima_kexec_file.file = NULL;
+- ima_kexec_file.size = segment_size;
+- ima_kexec_file.read_pos = 0;
+- ima_kexec_file.count = sizeof(khdr); /* reserved space */
+-
+ memset(&khdr, 0, sizeof(khdr));
+ khdr.version = 1;
+ /* This is an append-only list, no need to hold the RCU read lock */
+@@ -71,8 +91,6 @@ static int ima_dump_measurement_list(unsigned long *buffer_size, void **buffer,
+ *buffer_size = ima_kexec_file.count;
+ *buffer = ima_kexec_file.buf;
+ out:
+- if (ret == -EINVAL)
+- vfree(ima_kexec_file.buf);
+ return ret;
+ }
+
+@@ -111,6 +129,12 @@ void ima_add_kexec_buffer(struct kimage *image)
+ return;
+ }
+
++ ret = ima_alloc_kexec_file_buf(kexec_segment_size);
++ if (ret < 0) {
++ pr_err("Not enough memory for the kexec measurement buffer.\n");
++ return;
++ }
++
+ ima_dump_measurement_list(&kexec_buffer_size, &kexec_buffer,
+ kexec_segment_size);
+ if (!kexec_buffer) {
+--
+2.51.0
+
--- /dev/null
+From 1729480f873b9c4d8890aca118810f6325955270 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 21 Apr 2025 15:25:11 -0700
+Subject: ima: kexec: define functions to copy IMA log at soft boot
+
+From: Steven Chen <chenste@linux.microsoft.com>
+
+[ Upstream commit f18e502db673c75f762d47101dafcf58f30e2733 ]
+
+The IMA log is currently copied to the new kernel during kexec 'load'
+using ima_dump_measurement_list(). However, the log copied at kexec
+'load' may result in loss of IMA measurements that only occurred after
+kexec "load'. Setup the needed infrastructure to move the IMA log copy
+from kexec 'load' to 'execute'.
+
+Define a new IMA hook ima_update_kexec_buffer() as a stub function.
+It will be used to call ima_dump_measurement_list() during kexec 'execute'.
+
+Implement ima_kexec_post_load() function to be invoked after the new
+Kernel image has been loaded for kexec. ima_kexec_post_load() maps the
+IMA buffer to a segment in the newly loaded Kernel. It also registers
+the reboot notifier_block to trigger ima_update_kexec_buffer() at
+kexec 'execute'.
+
+Set the priority of register_reboot_notifier to INT_MIN to ensure that the
+IMA log copy operation will happen at the end of the operation chain, so
+that all the IMA measurement records extended into the TPM are copied
+
+Co-developed-by: Tushar Sugandhi <tusharsu@linux.microsoft.com>
+Signed-off-by: Tushar Sugandhi <tusharsu@linux.microsoft.com>
+Cc: Eric Biederman <ebiederm@xmission.com>
+Cc: Baoquan He <bhe@redhat.com>
+Cc: Vivek Goyal <vgoyal@redhat.com>
+Cc: Dave Young <dyoung@redhat.com>
+Signed-off-by: Steven Chen <chenste@linux.microsoft.com>
+Reviewed-by: Stefan Berger <stefanb@linux.ibm.com>
+Acked-by: Baoquan He <bhe@redhat.com>
+Tested-by: Stefan Berger <stefanb@linux.ibm.com> # ppc64/kvm
+Signed-off-by: Mimi Zohar <zohar@linux.ibm.com>
+Stable-dep-of: 10d1c75ed438 ("ima: verify the previous kernel's IMA buffer lies in addressable RAM")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/ima.h | 3 ++
+ security/integrity/ima/ima_kexec.c | 47 ++++++++++++++++++++++++++++++
+ 2 files changed, 50 insertions(+)
+
+diff --git a/include/linux/ima.h b/include/linux/ima.h
+index 0bae61a15b60b..8e29cb4e6a01d 100644
+--- a/include/linux/ima.h
++++ b/include/linux/ima.h
+@@ -32,6 +32,9 @@ static inline void ima_appraise_parse_cmdline(void) {}
+
+ #ifdef CONFIG_IMA_KEXEC
+ extern void ima_add_kexec_buffer(struct kimage *image);
++extern void ima_kexec_post_load(struct kimage *image);
++#else
++static inline void ima_kexec_post_load(struct kimage *image) {}
+ #endif
+
+ #else
+diff --git a/security/integrity/ima/ima_kexec.c b/security/integrity/ima/ima_kexec.c
+index b12ac3619b8fd..a22eeac9320aa 100644
+--- a/security/integrity/ima/ima_kexec.c
++++ b/security/integrity/ima/ima_kexec.c
+@@ -12,10 +12,14 @@
+ #include <linux/kexec.h>
+ #include <linux/of.h>
+ #include <linux/ima.h>
++#include <linux/reboot.h>
++#include <asm/page.h>
+ #include "ima.h"
+
+ #ifdef CONFIG_IMA_KEXEC
++static bool ima_kexec_update_registered;
+ static struct seq_file ima_kexec_file;
++static void *ima_kexec_buffer;
+
+ static void ima_free_kexec_file_buf(struct seq_file *sf)
+ {
+@@ -159,6 +163,49 @@ void ima_add_kexec_buffer(struct kimage *image)
+ kexec_dprintk("kexec measurement buffer for the loaded kernel at 0x%lx.\n",
+ kbuf.mem);
+ }
++
++/*
++ * Called during kexec execute so that IMA can update the measurement list.
++ */
++static int ima_update_kexec_buffer(struct notifier_block *self,
++ unsigned long action, void *data)
++{
++ return NOTIFY_OK;
++}
++
++static struct notifier_block update_buffer_nb = {
++ .notifier_call = ima_update_kexec_buffer,
++ .priority = INT_MIN
++};
++
++/*
++ * Create a mapping for the source pages that contain the IMA buffer
++ * so we can update it later.
++ */
++void ima_kexec_post_load(struct kimage *image)
++{
++ if (ima_kexec_buffer) {
++ kimage_unmap_segment(ima_kexec_buffer);
++ ima_kexec_buffer = NULL;
++ }
++
++ if (!image->ima_buffer_addr)
++ return;
++
++ ima_kexec_buffer = kimage_map_segment(image,
++ image->ima_buffer_addr,
++ image->ima_buffer_size);
++ if (!ima_kexec_buffer) {
++ pr_err("Could not map measurements buffer.\n");
++ return;
++ }
++
++ if (!ima_kexec_update_registered) {
++ register_reboot_notifier(&update_buffer_nb);
++ ima_kexec_update_registered = true;
++ }
++}
++
+ #endif /* IMA_KEXEC */
+
+ /*
+--
+2.51.0
+
--- /dev/null
+From d397ffa229c938add8a596a6bb048144cba3549f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 21 Nov 2024 01:57:12 -0800
+Subject: ima: kexec: silence RCU list traversal warning
+
+From: Breno Leitao <leitao@debian.org>
+
+[ Upstream commit 68af44a71975688b881ea524e2526bb7c7ad0e9a ]
+
+The ima_measurements list is append-only and doesn't require
+rcu_read_lock() protection. However, lockdep issues a warning when
+traversing RCU lists without the read lock:
+
+ security/integrity/ima/ima_kexec.c:40 RCU-list traversed in non-reader section!!
+
+Fix this by using the variant of list_for_each_entry_rcu() with the last
+argument set to true. This tells the RCU subsystem that traversing this
+append-only list without the read lock is intentional and safe.
+
+This change silences the lockdep warning while maintaining the correct
+semantics for the append-only list traversal.
+
+Signed-off-by: Breno Leitao <leitao@debian.org>
+Signed-off-by: Mimi Zohar <zohar@linux.ibm.com>
+Stable-dep-of: 10d1c75ed438 ("ima: verify the previous kernel's IMA buffer lies in addressable RAM")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ security/integrity/ima/ima_kexec.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/security/integrity/ima/ima_kexec.c b/security/integrity/ima/ima_kexec.c
+index 52e00332defed..9d45f4d26f731 100644
+--- a/security/integrity/ima/ima_kexec.c
++++ b/security/integrity/ima/ima_kexec.c
+@@ -37,7 +37,8 @@ static int ima_dump_measurement_list(unsigned long *buffer_size, void **buffer,
+
+ memset(&khdr, 0, sizeof(khdr));
+ khdr.version = 1;
+- list_for_each_entry_rcu(qe, &ima_measurements, later) {
++ /* This is an append-only list, no need to hold the RCU read lock */
++ list_for_each_entry_rcu(qe, &ima_measurements, later, true) {
+ if (file.count < file.size) {
+ khdr.count++;
+ ima_measurements_show(&file, qe);
+--
+2.51.0
+
--- /dev/null
+From 62db70f3a607c1608c9ace6e4aa70af66f82428c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 21 Apr 2025 15:25:07 -0700
+Subject: ima: rename variable the seq_file "file" to "ima_kexec_file"
+
+From: Steven Chen <chenste@linux.microsoft.com>
+
+[ Upstream commit cb5052282c65dc998d12e4eea8d5133249826c13 ]
+
+Before making the function local seq_file "file" variable file static
+global, rename it to "ima_kexec_file".
+
+Signed-off-by: Steven Chen <chenste@linux.microsoft.com>
+Acked-by: Baoquan He <bhe@redhat.com>
+Tested-by: Stefan Berger <stefanb@linux.ibm.com> # ppc64/kvm
+Signed-off-by: Mimi Zohar <zohar@linux.ibm.com>
+Stable-dep-of: 10d1c75ed438 ("ima: verify the previous kernel's IMA buffer lies in addressable RAM")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ security/integrity/ima/ima_kexec.c | 31 +++++++++++++++---------------
+ 1 file changed, 16 insertions(+), 15 deletions(-)
+
+diff --git a/security/integrity/ima/ima_kexec.c b/security/integrity/ima/ima_kexec.c
+index 9d45f4d26f731..650beb74346c5 100644
+--- a/security/integrity/ima/ima_kexec.c
++++ b/security/integrity/ima/ima_kexec.c
+@@ -18,30 +18,30 @@
+ static int ima_dump_measurement_list(unsigned long *buffer_size, void **buffer,
+ unsigned long segment_size)
+ {
++ struct seq_file ima_kexec_file;
+ struct ima_queue_entry *qe;
+- struct seq_file file;
+ struct ima_kexec_hdr khdr;
+ int ret = 0;
+
+ /* segment size can't change between kexec load and execute */
+- file.buf = vmalloc(segment_size);
+- if (!file.buf) {
++ ima_kexec_file.buf = vmalloc(segment_size);
++ if (!ima_kexec_file.buf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+- file.file = NULL;
+- file.size = segment_size;
+- file.read_pos = 0;
+- file.count = sizeof(khdr); /* reserved space */
++ ima_kexec_file.file = NULL;
++ ima_kexec_file.size = segment_size;
++ ima_kexec_file.read_pos = 0;
++ ima_kexec_file.count = sizeof(khdr); /* reserved space */
+
+ memset(&khdr, 0, sizeof(khdr));
+ khdr.version = 1;
+ /* This is an append-only list, no need to hold the RCU read lock */
+ list_for_each_entry_rcu(qe, &ima_measurements, later, true) {
+- if (file.count < file.size) {
++ if (ima_kexec_file.count < ima_kexec_file.size) {
+ khdr.count++;
+- ima_measurements_show(&file, qe);
++ ima_measurements_show(&ima_kexec_file, qe);
+ } else {
+ ret = -EINVAL;
+ break;
+@@ -55,23 +55,24 @@ static int ima_dump_measurement_list(unsigned long *buffer_size, void **buffer,
+ * fill in reserved space with some buffer details
+ * (eg. version, buffer size, number of measurements)
+ */
+- khdr.buffer_size = file.count;
++ khdr.buffer_size = ima_kexec_file.count;
+ if (ima_canonical_fmt) {
+ khdr.version = cpu_to_le16(khdr.version);
+ khdr.count = cpu_to_le64(khdr.count);
+ khdr.buffer_size = cpu_to_le64(khdr.buffer_size);
+ }
+- memcpy(file.buf, &khdr, sizeof(khdr));
++ memcpy(ima_kexec_file.buf, &khdr, sizeof(khdr));
+
+ print_hex_dump_debug("ima dump: ", DUMP_PREFIX_NONE, 16, 1,
+- file.buf, file.count < 100 ? file.count : 100,
++ ima_kexec_file.buf, ima_kexec_file.count < 100 ?
++ ima_kexec_file.count : 100,
+ true);
+
+- *buffer_size = file.count;
+- *buffer = file.buf;
++ *buffer_size = ima_kexec_file.count;
++ *buffer = ima_kexec_file.buf;
+ out:
+ if (ret == -EINVAL)
+- vfree(file.buf);
++ vfree(ima_kexec_file.buf);
+ return ret;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From d51ebb3b784c7d33400456a5cc148aca146d2e74 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 30 Dec 2025 22:16:07 -0800
+Subject: ima: verify the previous kernel's IMA buffer lies in addressable RAM
+
+From: Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+
+[ Upstream commit 10d1c75ed4382a8e79874379caa2ead8952734f9 ]
+
+Patch series "Address page fault in ima_restore_measurement_list()", v3.
+
+When the second-stage kernel is booted via kexec with a limiting command
+line such as "mem=<size>" we observe a pafe fault that happens.
+
+ BUG: unable to handle page fault for address: ffff97793ff47000
+ RIP: ima_restore_measurement_list+0xdc/0x45a
+ #PF: error_code(0x0000) not-present page
+
+This happens on x86_64 only, as this is already fixed in aarch64 in
+commit: cbf9c4b9617b ("of: check previous kernel's ima-kexec-buffer
+against memory bounds")
+
+This patch (of 3):
+
+When the second-stage kernel is booted with a limiting command line (e.g.
+"mem=<size>"), the IMA measurement buffer handed over from the previous
+kernel may fall outside the addressable RAM of the new kernel. Accessing
+such a buffer can fault during early restore.
+
+Introduce a small generic helper, ima_validate_range(), which verifies
+that a physical [start, end] range for the previous-kernel IMA buffer lies
+within addressable memory:
+ - On x86, use pfn_range_is_mapped().
+ - On OF based architectures, use page_is_ram().
+
+Link: https://lkml.kernel.org/r/20251231061609.907170-1-harshit.m.mogalapalli@oracle.com
+Link: https://lkml.kernel.org/r/20251231061609.907170-2-harshit.m.mogalapalli@oracle.com
+Signed-off-by: Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Reviewed-by: Mimi Zohar <zohar@linux.ibm.com>
+Cc: Alexander Graf <graf@amazon.com>
+Cc: Ard Biesheuvel <ardb@kernel.org>
+Cc: Borislav Betkov <bp@alien8.de>
+Cc: guoweikang <guoweikang.kernel@gmail.com>
+Cc: Henry Willard <henry.willard@oracle.com>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Jiri Bohac <jbohac@suse.cz>
+Cc: Joel Granados <joel.granados@kernel.org>
+Cc: Jonathan McDowell <noodles@fb.com>
+Cc: Mike Rapoport <rppt@kernel.org>
+Cc: Paul Webb <paul.x.webb@oracle.com>
+Cc: Sohil Mehta <sohil.mehta@intel.com>
+Cc: Sourabh Jain <sourabhjain@linux.ibm.com>
+Cc: Thomas Gleinxer <tglx@linutronix.de>
+Cc: Yifei Liu <yifei.l.liu@oracle.com>
+Cc: Baoquan He <bhe@redhat.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/ima.h | 1 +
+ security/integrity/ima/ima_kexec.c | 35 ++++++++++++++++++++++++++++++
+ 2 files changed, 36 insertions(+)
+
+diff --git a/include/linux/ima.h b/include/linux/ima.h
+index 8e29cb4e6a01d..abf8923f8fc51 100644
+--- a/include/linux/ima.h
++++ b/include/linux/ima.h
+@@ -69,6 +69,7 @@ static inline int ima_measure_critical_data(const char *event_label,
+ #ifdef CONFIG_HAVE_IMA_KEXEC
+ int __init ima_free_kexec_buffer(void);
+ int __init ima_get_kexec_buffer(void **addr, size_t *size);
++int ima_validate_range(phys_addr_t phys, size_t size);
+ #endif
+
+ #ifdef CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT
+diff --git a/security/integrity/ima/ima_kexec.c b/security/integrity/ima/ima_kexec.c
+index a22eeac9320aa..c9e5b1d6b0ab8 100644
+--- a/security/integrity/ima/ima_kexec.c
++++ b/security/integrity/ima/ima_kexec.c
+@@ -12,6 +12,8 @@
+ #include <linux/kexec.h>
+ #include <linux/of.h>
+ #include <linux/ima.h>
++#include <linux/mm.h>
++#include <linux/overflow.h>
+ #include <linux/reboot.h>
+ #include <asm/page.h>
+ #include "ima.h"
+@@ -238,3 +240,36 @@ void __init ima_load_kexec_buffer(void)
+ pr_debug("Error restoring the measurement list: %d\n", rc);
+ }
+ }
++
++/*
++ * ima_validate_range - verify a physical buffer lies in addressable RAM
++ * @phys: physical start address of the buffer from previous kernel
++ * @size: size of the buffer
++ *
++ * On success return 0. On failure returns -EINVAL so callers can skip
++ * restoring.
++ */
++int ima_validate_range(phys_addr_t phys, size_t size)
++{
++ unsigned long start_pfn, end_pfn;
++ phys_addr_t end_phys;
++
++ if (check_add_overflow(phys, (phys_addr_t)size - 1, &end_phys))
++ return -EINVAL;
++
++ start_pfn = PHYS_PFN(phys);
++ end_pfn = PHYS_PFN(end_phys);
++
++#ifdef CONFIG_X86
++ if (!pfn_range_is_mapped(start_pfn, end_pfn))
++#else
++ if (!page_is_ram(start_pfn) || !page_is_ram(end_pfn))
++#endif
++ {
++ pr_warn("IMA: previous kernel measurement buffer %pa (size 0x%zx) lies outside available memory\n",
++ &phys, size);
++ return -EINVAL;
++ }
++
++ return 0;
++}
+--
+2.51.0
+
--- /dev/null
+From 71b00fc3a2d75c9d87b9acfc955f95a73cff1e4c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 21 Jan 2026 10:02:02 -0800
+Subject: Input: synaptics_i2c - guard polling restart in resume
+
+From: Minseong Kim <ii4gsp@gmail.com>
+
+[ Upstream commit 870c2e7cd881d7a10abb91f2b38135622d9f9f65 ]
+
+synaptics_i2c_resume() restarts delayed work unconditionally, even when
+the input device is not opened. Guard the polling restart by taking the
+input device mutex and checking input_device_enabled() before re-queuing
+the delayed work.
+
+Fixes: eef3e4cab72ea ("Input: add driver for Synaptics I2C touchpad")
+Signed-off-by: Minseong Kim <ii4gsp@gmail.com>
+Cc: stable@vger.kernel.org
+Link: https://patch.msgid.link/20260121063738.799967-1-ii4gsp@gmail.com
+Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/input/mouse/synaptics_i2c.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/input/mouse/synaptics_i2c.c b/drivers/input/mouse/synaptics_i2c.c
+index c8ddfff2605ff..29da66af36d74 100644
+--- a/drivers/input/mouse/synaptics_i2c.c
++++ b/drivers/input/mouse/synaptics_i2c.c
+@@ -615,13 +615,16 @@ static int synaptics_i2c_resume(struct device *dev)
+ int ret;
+ struct i2c_client *client = to_i2c_client(dev);
+ struct synaptics_i2c *touch = i2c_get_clientdata(client);
++ struct input_dev *input = touch->input;
+
+ ret = synaptics_i2c_reset_config(client);
+ if (ret)
+ return ret;
+
+- mod_delayed_work(system_dfl_wq, &touch->dwork,
+- msecs_to_jiffies(NO_DATA_SLEEP_MSECS));
++ guard(mutex)(&input->mutex);
++ if (input_device_enabled(input))
++ mod_delayed_work(system_dfl_wq, &touch->dwork,
++ msecs_to_jiffies(NO_DATA_SLEEP_MSECS));
+
+ return 0;
+ }
+--
+2.51.0
+
--- /dev/null
+From 549b04b6c6201f94b48fb58ff71010f408e6eab7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 6 Nov 2025 15:19:54 +0100
+Subject: Input: synaptics_i2c - replace use of system_wq with system_dfl_wq
+
+From: Marco Crivellari <marco.crivellari@suse.com>
+
+[ Upstream commit b3ee88e27798f0e8dd3a81867804d693da74d57d ]
+
+Currently if a user enqueues a work item using schedule_delayed_work() the
+used wq is "system_wq" (per-cpu wq) while queue_delayed_work() use
+WORK_CPU_UNBOUND (used when a cpu is not specified). The same applies to
+schedule_work() that is using system_wq and queue_work(), that makes use
+again of WORK_CPU_UNBOUND.
+
+This lack of consistency cannot be addressed without refactoring the API.
+
+This patch continues the effort to refactor worqueue APIs, which has begun
+with the change introducing new workqueues and a new alloc_workqueue flag:
+
+commit 128ea9f6ccfb ("workqueue: Add system_percpu_wq and system_dfl_wq")
+commit 930c2ea566af ("workqueue: Add new WQ_PERCPU flag")
+
+This specific workload do not benefit from a per-cpu workqueue, so use
+the default unbound workqueue (system_dfl_wq) instead.
+
+Suggested-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Marco Crivellari <marco.crivellari@suse.com>
+Link: https://patch.msgid.link/20251106141955.218911-4-marco.crivellari@suse.com
+Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+Stable-dep-of: 870c2e7cd881 ("Input: synaptics_i2c - guard polling restart in resume")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/input/mouse/synaptics_i2c.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/input/mouse/synaptics_i2c.c b/drivers/input/mouse/synaptics_i2c.c
+index a0d707e47d932..c8ddfff2605ff 100644
+--- a/drivers/input/mouse/synaptics_i2c.c
++++ b/drivers/input/mouse/synaptics_i2c.c
+@@ -372,7 +372,7 @@ static irqreturn_t synaptics_i2c_irq(int irq, void *dev_id)
+ {
+ struct synaptics_i2c *touch = dev_id;
+
+- mod_delayed_work(system_wq, &touch->dwork, 0);
++ mod_delayed_work(system_dfl_wq, &touch->dwork, 0);
+
+ return IRQ_HANDLED;
+ }
+@@ -448,7 +448,7 @@ static void synaptics_i2c_work_handler(struct work_struct *work)
+ * We poll the device once in THREAD_IRQ_SLEEP_SECS and
+ * if error is detected, we try to reset and reconfigure the touchpad.
+ */
+- mod_delayed_work(system_wq, &touch->dwork, delay);
++ mod_delayed_work(system_dfl_wq, &touch->dwork, delay);
+ }
+
+ static int synaptics_i2c_open(struct input_dev *input)
+@@ -461,7 +461,7 @@ static int synaptics_i2c_open(struct input_dev *input)
+ return ret;
+
+ if (polling_req)
+- mod_delayed_work(system_wq, &touch->dwork,
++ mod_delayed_work(system_dfl_wq, &touch->dwork,
+ msecs_to_jiffies(NO_DATA_SLEEP_MSECS));
+
+ return 0;
+@@ -620,7 +620,7 @@ static int synaptics_i2c_resume(struct device *dev)
+ if (ret)
+ return ret;
+
+- mod_delayed_work(system_wq, &touch->dwork,
++ mod_delayed_work(system_dfl_wq, &touch->dwork,
+ msecs_to_jiffies(NO_DATA_SLEEP_MSECS));
+
+ return 0;
+--
+2.51.0
+
--- /dev/null
+From 0432f65635008f7ad1ffdc6e146db83ce1b2d911 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 22 Jan 2026 09:48:50 +0800
+Subject: iommu/vt-d: Skip dev-iotlb flush for inaccessible PCIe device without
+ scalable mode
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jinhui Guo <guojinhui.liam@bytedance.com>
+
+[ Upstream commit 42662d19839f34735b718129ea200e3734b07e50 ]
+
+PCIe endpoints with ATS enabled and passed through to userspace
+(e.g., QEMU, DPDK) can hard-lock the host when their link drops,
+either by surprise removal or by a link fault.
+
+Commit 4fc82cd907ac ("iommu/vt-d: Don't issue ATS Invalidation
+request when device is disconnected") adds pci_dev_is_disconnected()
+to devtlb_invalidation_with_pasid() so ATS invalidation is skipped
+only when the device is being safely removed, but it applies only
+when Intel IOMMU scalable mode is enabled.
+
+With scalable mode disabled or unsupported, a system hard-lock
+occurs when a PCIe endpoint's link drops because the Intel IOMMU
+waits indefinitely for an ATS invalidation that cannot complete.
+
+Call Trace:
+ qi_submit_sync
+ qi_flush_dev_iotlb
+ __context_flush_dev_iotlb.part.0
+ domain_context_clear_one_cb
+ pci_for_each_dma_alias
+ device_block_translation
+ blocking_domain_attach_dev
+ iommu_deinit_device
+ __iommu_group_remove_device
+ iommu_release_device
+ iommu_bus_notifier
+ blocking_notifier_call_chain
+ bus_notify
+ device_del
+ pci_remove_bus_device
+ pci_stop_and_remove_bus_device
+ pciehp_unconfigure_device
+ pciehp_disable_slot
+ pciehp_handle_presence_or_link_change
+ pciehp_ist
+
+Commit 81e921fd3216 ("iommu/vt-d: Fix NULL domain on device release")
+adds intel_pasid_teardown_sm_context() to intel_iommu_release_device(),
+which calls qi_flush_dev_iotlb() and can also hard-lock the system
+when a PCIe endpoint's link drops.
+
+Call Trace:
+ qi_submit_sync
+ qi_flush_dev_iotlb
+ __context_flush_dev_iotlb.part.0
+ intel_context_flush_no_pasid
+ device_pasid_table_teardown
+ pci_pasid_table_teardown
+ pci_for_each_dma_alias
+ intel_pasid_teardown_sm_context
+ intel_iommu_release_device
+ iommu_deinit_device
+ __iommu_group_remove_device
+ iommu_release_device
+ iommu_bus_notifier
+ blocking_notifier_call_chain
+ bus_notify
+ device_del
+ pci_remove_bus_device
+ pci_stop_and_remove_bus_device
+ pciehp_unconfigure_device
+ pciehp_disable_slot
+ pciehp_handle_presence_or_link_change
+ pciehp_ist
+
+Sometimes the endpoint loses connection without a link-down event
+(e.g., due to a link fault); killing the process (virsh destroy)
+then hard-locks the host.
+
+Call Trace:
+ qi_submit_sync
+ qi_flush_dev_iotlb
+ __context_flush_dev_iotlb.part.0
+ domain_context_clear_one_cb
+ pci_for_each_dma_alias
+ device_block_translation
+ blocking_domain_attach_dev
+ __iommu_attach_device
+ __iommu_device_set_domain
+ __iommu_group_set_domain_internal
+ iommu_detach_group
+ vfio_iommu_type1_detach_group
+ vfio_group_detach_container
+ vfio_group_fops_release
+ __fput
+
+pci_dev_is_disconnected() only covers safe-removal paths;
+pci_device_is_present() tests accessibility by reading
+vendor/device IDs and internally calls pci_dev_is_disconnected().
+On a ConnectX-5 (8 GT/s, x2) this costs ~70 µs.
+
+Since __context_flush_dev_iotlb() is only called on
+{attach,release}_dev paths (not hot), add pci_device_is_present()
+there to skip inaccessible devices and avoid the hard-lock.
+
+Fixes: 37764b952e1b ("iommu/vt-d: Global devTLB flush when present context entry changed")
+Fixes: 81e921fd3216 ("iommu/vt-d: Fix NULL domain on device release")
+Cc: stable@vger.kernel.org
+Signed-off-by: Jinhui Guo <guojinhui.liam@bytedance.com>
+Link: https://lore.kernel.org/r/20251211035946.2071-2-guojinhui.liam@bytedance.com
+Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
+Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iommu/intel/pasid.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
+index 90fdfa5f7d1d6..3d1d43675bf22 100644
+--- a/drivers/iommu/intel/pasid.c
++++ b/drivers/iommu/intel/pasid.c
+@@ -867,6 +867,14 @@ static void __context_flush_dev_iotlb(struct device_domain_info *info)
+ if (!info->ats_enabled)
+ return;
+
++ /*
++ * Skip dev-IOTLB flush for inaccessible PCIe devices to prevent the
++ * Intel IOMMU from waiting indefinitely for an ATS invalidation that
++ * cannot complete.
++ */
++ if (!pci_device_is_present(to_pci_dev(info->dev)))
++ return;
++
+ qi_flush_dev_iotlb(info->iommu, PCI_DEVID(info->bus, info->devfn),
+ info->pfsid, info->ats_qdep, 0, MAX_AGAW_PFN_WIDTH);
+
+--
+2.51.0
+
--- /dev/null
+From 51827bfcdc1c02aa4ea01b7aadcea8c2b8250666 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 21 Apr 2025 15:25:09 -0700
+Subject: kexec: define functions to map and unmap segments
+
+From: Steven Chen <chenste@linux.microsoft.com>
+
+[ Upstream commit 0091d9241ea24c5275be4a3e5a032862fd9de9ec ]
+
+Implement kimage_map_segment() to enable IMA to map the measurement log
+list to the kimage structure during the kexec 'load' stage. This function
+gathers the source pages within the specified address range, and maps them
+to a contiguous virtual address range.
+
+This is a preparation for later usage.
+
+Implement kimage_unmap_segment() for unmapping segments using vunmap().
+
+Cc: Eric Biederman <ebiederm@xmission.com>
+Cc: Baoquan He <bhe@redhat.com>
+Cc: Vivek Goyal <vgoyal@redhat.com>
+Cc: Dave Young <dyoung@redhat.com>
+Co-developed-by: Tushar Sugandhi <tusharsu@linux.microsoft.com>
+Signed-off-by: Tushar Sugandhi <tusharsu@linux.microsoft.com>
+Signed-off-by: Steven Chen <chenste@linux.microsoft.com>
+Acked-by: Baoquan He <bhe@redhat.com>
+Tested-by: Stefan Berger <stefanb@linux.ibm.com> # ppc64/kvm
+Signed-off-by: Mimi Zohar <zohar@linux.ibm.com>
+Stable-dep-of: 10d1c75ed438 ("ima: verify the previous kernel's IMA buffer lies in addressable RAM")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/kexec.h | 6 +++++
+ kernel/kexec_core.c | 54 +++++++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 60 insertions(+)
+
+diff --git a/include/linux/kexec.h b/include/linux/kexec.h
+index f0e9f8eda7a3c..7d6b12f8b8d05 100644
+--- a/include/linux/kexec.h
++++ b/include/linux/kexec.h
+@@ -467,13 +467,19 @@ extern bool kexec_file_dbg_print;
+ #define kexec_dprintk(fmt, arg...) \
+ do { if (kexec_file_dbg_print) pr_info(fmt, ##arg); } while (0)
+
++extern void *kimage_map_segment(struct kimage *image, unsigned long addr, unsigned long size);
++extern void kimage_unmap_segment(void *buffer);
+ #else /* !CONFIG_KEXEC_CORE */
+ struct pt_regs;
+ struct task_struct;
++struct kimage;
+ static inline void __crash_kexec(struct pt_regs *regs) { }
+ static inline void crash_kexec(struct pt_regs *regs) { }
+ static inline int kexec_should_crash(struct task_struct *p) { return 0; }
+ static inline int kexec_crash_loaded(void) { return 0; }
++static inline void *kimage_map_segment(struct kimage *image, unsigned long addr, unsigned long size)
++{ return NULL; }
++static inline void kimage_unmap_segment(void *buffer) { }
+ #define kexec_in_progress false
+ #endif /* CONFIG_KEXEC_CORE */
+
+diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
+index c0caa14880c3b..6c15cd5b9cae5 100644
+--- a/kernel/kexec_core.c
++++ b/kernel/kexec_core.c
+@@ -867,6 +867,60 @@ int kimage_load_segment(struct kimage *image,
+ return result;
+ }
+
++void *kimage_map_segment(struct kimage *image,
++ unsigned long addr, unsigned long size)
++{
++ unsigned long src_page_addr, dest_page_addr = 0;
++ unsigned long eaddr = addr + size;
++ kimage_entry_t *ptr, entry;
++ struct page **src_pages;
++ unsigned int npages;
++ void *vaddr = NULL;
++ int i;
++
++ /*
++ * Collect the source pages and map them in a contiguous VA range.
++ */
++ npages = PFN_UP(eaddr) - PFN_DOWN(addr);
++ src_pages = kmalloc_array(npages, sizeof(*src_pages), GFP_KERNEL);
++ if (!src_pages) {
++ pr_err("Could not allocate ima pages array.\n");
++ return NULL;
++ }
++
++ i = 0;
++ for_each_kimage_entry(image, ptr, entry) {
++ if (entry & IND_DESTINATION) {
++ dest_page_addr = entry & PAGE_MASK;
++ } else if (entry & IND_SOURCE) {
++ if (dest_page_addr >= addr && dest_page_addr < eaddr) {
++ src_page_addr = entry & PAGE_MASK;
++ src_pages[i++] =
++ virt_to_page(__va(src_page_addr));
++ if (i == npages)
++ break;
++ dest_page_addr += PAGE_SIZE;
++ }
++ }
++ }
++
++ /* Sanity check. */
++ WARN_ON(i < npages);
++
++ vaddr = vmap(src_pages, npages, VM_MAP, PAGE_KERNEL);
++ kfree(src_pages);
++
++ if (!vaddr)
++ pr_err("Could not map ima buffer.\n");
++
++ return vaddr;
++}
++
++void kimage_unmap_segment(void *segment_buffer)
++{
++ vunmap(segment_buffer);
++}
++
+ struct kexec_load_limit {
+ /* Mutex protects the limit count. */
+ struct mutex mutex;
+--
+2.51.0
+
--- /dev/null
+From 2db30504038834a13e289deb908a16f23b5d0215 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 Feb 2026 10:43:19 +0900
+Subject: ksmbd: add chann_lock to protect ksmbd_chann_list xarray
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+[ Upstream commit 4f3a06cc57976cafa8c6f716646be6c79a99e485 ]
+
+ksmbd_chann_list xarray lacks synchronization, allowing use-after-free in
+multi-channel sessions (between lookup_chann_list() and ksmbd_chann_del).
+
+Adds rw_semaphore chann_lock to struct ksmbd_session and protects
+all xa_load/xa_store/xa_erase accesses.
+
+Cc: stable@vger.kernel.org
+Reported-by: Igor Stepansky <igor.stepansky@orca.security>
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/server/mgmt/user_session.c | 5 +++++
+ fs/smb/server/mgmt/user_session.h | 1 +
+ fs/smb/server/smb2pdu.c | 12 +++++++++++-
+ 3 files changed, 17 insertions(+), 1 deletion(-)
+
+diff --git a/fs/smb/server/mgmt/user_session.c b/fs/smb/server/mgmt/user_session.c
+index 66198ed26aeca..352cf9e47ebeb 100644
+--- a/fs/smb/server/mgmt/user_session.c
++++ b/fs/smb/server/mgmt/user_session.c
+@@ -32,12 +32,14 @@ static void free_channel_list(struct ksmbd_session *sess)
+ struct channel *chann;
+ unsigned long index;
+
++ down_write(&sess->chann_lock);
+ xa_for_each(&sess->ksmbd_chann_list, index, chann) {
+ xa_erase(&sess->ksmbd_chann_list, index);
+ kfree(chann);
+ }
+
+ xa_destroy(&sess->ksmbd_chann_list);
++ up_write(&sess->chann_lock);
+ }
+
+ static void __session_rpc_close(struct ksmbd_session *sess,
+@@ -220,7 +222,9 @@ static int ksmbd_chann_del(struct ksmbd_conn *conn, struct ksmbd_session *sess)
+ {
+ struct channel *chann;
+
++ down_write(&sess->chann_lock);
+ chann = xa_erase(&sess->ksmbd_chann_list, (long)conn);
++ up_write(&sess->chann_lock);
+ if (!chann)
+ return -ENOENT;
+
+@@ -454,6 +458,7 @@ static struct ksmbd_session *__session_create(int protocol)
+ rwlock_init(&sess->tree_conns_lock);
+ atomic_set(&sess->refcnt, 2);
+ init_rwsem(&sess->rpc_lock);
++ init_rwsem(&sess->chann_lock);
+
+ ret = __init_smb2_session(sess);
+ if (ret)
+diff --git a/fs/smb/server/mgmt/user_session.h b/fs/smb/server/mgmt/user_session.h
+index c5749d6ec7151..cba7f688f6b57 100644
+--- a/fs/smb/server/mgmt/user_session.h
++++ b/fs/smb/server/mgmt/user_session.h
+@@ -49,6 +49,7 @@ struct ksmbd_session {
+ char sess_key[CIFS_KEY_SIZE];
+
+ struct hlist_node hlist;
++ struct rw_semaphore chann_lock;
+ struct xarray ksmbd_chann_list;
+ struct xarray tree_conns;
+ struct ida tree_conn_ida;
+diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
+index 8fa6ab9dfd077..0d7ba57c1ca64 100644
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -78,7 +78,13 @@ static inline bool check_session_id(struct ksmbd_conn *conn, u64 id)
+
+ struct channel *lookup_chann_list(struct ksmbd_session *sess, struct ksmbd_conn *conn)
+ {
+- return xa_load(&sess->ksmbd_chann_list, (long)conn);
++ struct channel *chann;
++
++ down_read(&sess->chann_lock);
++ chann = xa_load(&sess->ksmbd_chann_list, (long)conn);
++ up_read(&sess->chann_lock);
++
++ return chann;
+ }
+
+ /**
+@@ -1560,8 +1566,10 @@ static int ntlm_authenticate(struct ksmbd_work *work,
+ return -ENOMEM;
+
+ chann->conn = conn;
++ down_write(&sess->chann_lock);
+ old = xa_store(&sess->ksmbd_chann_list, (long)conn, chann,
+ KSMBD_DEFAULT_GFP);
++ up_write(&sess->chann_lock);
+ if (xa_is_err(old)) {
+ kfree(chann);
+ return xa_err(old);
+@@ -1658,8 +1666,10 @@ static int krb5_authenticate(struct ksmbd_work *work,
+ return -ENOMEM;
+
+ chann->conn = conn;
++ down_write(&sess->chann_lock);
+ old = xa_store(&sess->ksmbd_chann_list, (long)conn,
+ chann, KSMBD_DEFAULT_GFP);
++ up_write(&sess->chann_lock);
+ if (xa_is_err(old)) {
+ kfree(chann);
+ return xa_err(old);
+--
+2.51.0
+
--- /dev/null
+From 2f891070607fdb6224973a0a8ed9911f26b05f43 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 21 Jul 2025 14:29:43 +0900
+Subject: ksmbd: check return value of xa_store() in krb5_authenticate
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+[ Upstream commit ecd9d6bf88ddd64e3dc7beb9a065fd5fa4714f72 ]
+
+xa_store() may fail so check its return value and return error code if
+error occurred.
+
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Stable-dep-of: 4f3a06cc5797 ("ksmbd: add chann_lock to protect ksmbd_chann_list xarray")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/server/smb2pdu.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
+index ac8248479cba2..8fa6ab9dfd077 100644
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -1592,7 +1592,7 @@ static int krb5_authenticate(struct ksmbd_work *work,
+ struct ksmbd_conn *conn = work->conn;
+ struct ksmbd_session *sess = work->sess;
+ char *in_blob, *out_blob;
+- struct channel *chann = NULL;
++ struct channel *chann = NULL, *old;
+ u64 prev_sess_id;
+ int in_len, out_len;
+ int retval;
+@@ -1658,7 +1658,12 @@ static int krb5_authenticate(struct ksmbd_work *work,
+ return -ENOMEM;
+
+ chann->conn = conn;
+- xa_store(&sess->ksmbd_chann_list, (long)conn, chann, KSMBD_DEFAULT_GFP);
++ old = xa_store(&sess->ksmbd_chann_list, (long)conn,
++ chann, KSMBD_DEFAULT_GFP);
++ if (xa_is_err(old)) {
++ kfree(chann);
++ return xa_err(old);
++ }
+ }
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 2b821ffc9c4c24a846242c9631d50c4022af467e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 8 Jan 2026 19:06:57 -0800
+Subject: KVM: x86: Ignore -EBUSY when checking nested events from vcpu_block()
+
+From: Sean Christopherson <seanjc@google.com>
+
+[ Upstream commit ead63640d4e72e6f6d464f4e31f7fecb79af8869 ]
+
+Ignore -EBUSY when checking nested events after exiting a blocking state
+while L2 is active, as exiting to userspace will generate a spurious
+userspace exit, usually with KVM_EXIT_UNKNOWN, and likely lead to the VM's
+demise. Continuing with the wakeup isn't perfect either, as *something*
+has gone sideways if a vCPU is awakened in L2 with an injected event (or
+worse, a nested run pending), but continuing on gives the VM a decent
+chance of surviving without any major side effects.
+
+As explained in the Fixes commits, it _should_ be impossible for a vCPU to
+be put into a blocking state with an already-injected event (exception,
+IRQ, or NMI). Unfortunately, userspace can stuff MP_STATE and/or injected
+events, and thus put the vCPU into what should be an impossible state.
+
+Don't bother trying to preserve the WARN, e.g. with an anti-syzkaller
+Kconfig, as WARNs can (hopefully) be added in paths where _KVM_ would be
+violating x86 architecture, e.g. by WARNing if KVM attempts to inject an
+exception or interrupt while the vCPU isn't running.
+
+Cc: Alessandro Ratti <alessandro@0x65c.net>
+Cc: stable@vger.kernel.org
+Fixes: 26844fee6ade ("KVM: x86: never write to memory from kvm_vcpu_check_block()")
+Fixes: 45405155d876 ("KVM: x86: WARN if a vCPU gets a valid wakeup that KVM can't yet inject")
+Link: https://syzkaller.appspot.com/text?tag=ReproC&x=10d4261a580000
+Reported-by: syzbot+1522459a74d26b0ac33a@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/all/671bc7a7.050a0220.455e8.022a.GAE@google.com
+Link: https://patch.msgid.link/20260109030657.994759-1-seanjc@google.com
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kvm/x86.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 8f673aaa0490f..0d9035993ed36 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -11285,8 +11285,7 @@ static inline int vcpu_block(struct kvm_vcpu *vcpu)
+ if (is_guest_mode(vcpu)) {
+ int r = kvm_check_nested_events(vcpu);
+
+- WARN_ON_ONCE(r == -EBUSY);
+- if (r < 0)
++ if (r < 0 && r != -EBUSY)
+ return 0;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 6b41703627040fffd57a7d4c86f027e6b30cad95 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 10 Feb 2026 19:31:13 +0800
+Subject: LoongArch: Handle percpu handler address for ORC unwinder
+
+From: Tiezhu Yang <yangtiezhu@loongson.cn>
+
+[ Upstream commit 055c7e75190e0be43037bd663a3f6aced194416e ]
+
+After commit 4cd641a79e69 ("LoongArch: Remove unnecessary checks for ORC
+unwinder"), the system can not boot normally under some configs (such as
+enable KASAN), there are many error messages "cannot find unwind pc".
+
+The kernel boots normally with the defconfig, so no problem found out at
+the first time. Here is one way to reproduce:
+
+ cd linux
+ make mrproper defconfig -j"$(nproc)"
+ scripts/config -e KASAN
+ make olddefconfig all -j"$(nproc)"
+ sudo make modules_install
+ sudo make install
+ sudo reboot
+
+The address that can not unwind is not a valid kernel address which is
+between "pcpu_handlers[cpu]" and "pcpu_handlers[cpu] + vec_sz" due to
+the code of eentry was copied to the new area of pcpu_handlers[cpu] in
+setup_tlb_handler(), handle this special case to get the valid address
+to unwind normally.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Tiezhu Yang <yangtiezhu@loongson.cn>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/loongarch/include/asm/setup.h | 3 +++
+ arch/loongarch/kernel/unwind_orc.c | 16 ++++++++++++++++
+ 2 files changed, 19 insertions(+)
+
+diff --git a/arch/loongarch/include/asm/setup.h b/arch/loongarch/include/asm/setup.h
+index 3c2fb16b11b64..f81375e5e89c0 100644
+--- a/arch/loongarch/include/asm/setup.h
++++ b/arch/loongarch/include/asm/setup.h
+@@ -7,6 +7,7 @@
+ #define _LOONGARCH_SETUP_H
+
+ #include <linux/types.h>
++#include <linux/threads.h>
+ #include <asm/sections.h>
+ #include <uapi/asm/setup.h>
+
+@@ -14,6 +15,8 @@
+
+ extern unsigned long eentry;
+ extern unsigned long tlbrentry;
++extern unsigned long pcpu_handlers[NR_CPUS];
++extern long exception_handlers[VECSIZE * 128 / sizeof(long)];
+ extern char init_command_line[COMMAND_LINE_SIZE];
+ extern void tlb_init(int cpu);
+ extern void cpu_cache_init(void);
+diff --git a/arch/loongarch/kernel/unwind_orc.c b/arch/loongarch/kernel/unwind_orc.c
+index 4924d1ecc4579..9512fa4fff0f9 100644
+--- a/arch/loongarch/kernel/unwind_orc.c
++++ b/arch/loongarch/kernel/unwind_orc.c
+@@ -359,6 +359,22 @@ static inline unsigned long bt_address(unsigned long ra)
+ {
+ extern unsigned long eentry;
+
++#if defined(CONFIG_NUMA) && !defined(CONFIG_PREEMPT_RT)
++ int cpu;
++ int vec_sz = sizeof(exception_handlers);
++
++ for_each_possible_cpu(cpu) {
++ if (!pcpu_handlers[cpu])
++ continue;
++
++ if (ra >= pcpu_handlers[cpu] &&
++ ra < pcpu_handlers[cpu] + vec_sz) {
++ ra = ra + eentry - pcpu_handlers[cpu];
++ break;
++ }
++ }
++#endif
++
+ if (ra >= eentry && ra < eentry + EXCCODE_INT_END * VECSIZE) {
+ unsigned long func;
+ unsigned long type = (ra - eentry) / VECSIZE;
+--
+2.51.0
+
--- /dev/null
+From cb7c7e4265d8d6fbba83f1d4df9a028837702e06 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 8 Jan 2025 10:04:47 +0100
+Subject: LoongArch/orc: Use RCU in all users of __module_address().
+
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+
+[ Upstream commit f99d27d9feb755aee9350fc89f57814d7e1b4880 ]
+
+__module_address() can be invoked within a RCU section, there is no
+requirement to have preemption disabled.
+
+Replace the preempt_disable() section around __module_address() with
+RCU.
+
+Cc: Huacai Chen <chenhuacai@kernel.org>
+Cc: WANG Xuerui <kernel@xen0n.name>
+Cc: loongarch@lists.linux.dev
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lore.kernel.org/r/20250108090457.512198-19-bigeasy@linutronix.de
+Signed-off-by: Petr Pavlu <petr.pavlu@suse.com>
+Stable-dep-of: 055c7e75190e ("LoongArch: Handle percpu handler address for ORC unwinder")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/loongarch/kernel/unwind_orc.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+diff --git a/arch/loongarch/kernel/unwind_orc.c b/arch/loongarch/kernel/unwind_orc.c
+index 471652c0c8653..59809c3406c03 100644
+--- a/arch/loongarch/kernel/unwind_orc.c
++++ b/arch/loongarch/kernel/unwind_orc.c
+@@ -399,7 +399,7 @@ bool unwind_next_frame(struct unwind_state *state)
+ return false;
+
+ /* Don't let modules unload while we're reading their ORC data. */
+- preempt_disable();
++ guard(rcu)();
+
+ if (is_entry_func(state->pc))
+ goto end;
+@@ -514,14 +514,12 @@ bool unwind_next_frame(struct unwind_state *state)
+ if (!__kernel_text_address(state->pc))
+ goto err;
+
+- preempt_enable();
+ return true;
+
+ err:
+ state->error = true;
+
+ end:
+- preempt_enable();
+ state->stack_info.type = STACK_TYPE_UNKNOWN;
+ return false;
+ }
+--
+2.51.0
+
--- /dev/null
+From 48b99ff77acc856589e73976152440fd6008703a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 10 Feb 2026 19:31:14 +0800
+Subject: LoongArch: Remove some extern variables in source files
+
+From: Tiezhu Yang <yangtiezhu@loongson.cn>
+
+[ Upstream commit 0e6f596d6ac635e80bb265d587b2287ef8fa1cd6 ]
+
+There are declarations of the variable "eentry", "pcpu_handlers[]" and
+"exception_handlers[]" in asm/setup.h, the source files already include
+this header file directly or indirectly, so no need to declare them in
+the source files, just remove the code.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Tiezhu Yang <yangtiezhu@loongson.cn>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/loongarch/kernel/unwind_orc.c | 2 --
+ arch/loongarch/kernel/unwind_prologue.c | 4 ----
+ arch/loongarch/mm/tlb.c | 1 -
+ 3 files changed, 7 deletions(-)
+
+diff --git a/arch/loongarch/kernel/unwind_orc.c b/arch/loongarch/kernel/unwind_orc.c
+index 9512fa4fff0f9..e8b95f1bc5786 100644
+--- a/arch/loongarch/kernel/unwind_orc.c
++++ b/arch/loongarch/kernel/unwind_orc.c
+@@ -357,8 +357,6 @@ static bool is_entry_func(unsigned long addr)
+
+ static inline unsigned long bt_address(unsigned long ra)
+ {
+- extern unsigned long eentry;
+-
+ #if defined(CONFIG_NUMA) && !defined(CONFIG_PREEMPT_RT)
+ int cpu;
+ int vec_sz = sizeof(exception_handlers);
+diff --git a/arch/loongarch/kernel/unwind_prologue.c b/arch/loongarch/kernel/unwind_prologue.c
+index c9ee6892d81c7..d4c42dc67134c 100644
+--- a/arch/loongarch/kernel/unwind_prologue.c
++++ b/arch/loongarch/kernel/unwind_prologue.c
+@@ -22,10 +22,6 @@ extern const int unwind_hint_lasx;
+ extern const int unwind_hint_lbt;
+ extern const int unwind_hint_ri;
+ extern const int unwind_hint_watch;
+-extern unsigned long eentry;
+-#ifdef CONFIG_NUMA
+-extern unsigned long pcpu_handlers[NR_CPUS];
+-#endif
+
+ static inline bool scan_handlers(unsigned long entry_offset)
+ {
+diff --git a/arch/loongarch/mm/tlb.c b/arch/loongarch/mm/tlb.c
+index f46c15d6e7eae..24add95ecb65e 100644
+--- a/arch/loongarch/mm/tlb.c
++++ b/arch/loongarch/mm/tlb.c
+@@ -260,7 +260,6 @@ static void output_pgtable_bits_defines(void)
+ #ifdef CONFIG_NUMA
+ unsigned long pcpu_handlers[NR_CPUS];
+ #endif
+-extern long exception_handlers[VECSIZE * 128 / sizeof(long)];
+
+ static void setup_tlb_handler(int cpu)
+ {
+--
+2.51.0
+
--- /dev/null
+From 8016357b4f9affabc1aa9efcd7c84e6d840a48a0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 31 Dec 2025 15:19:19 +0800
+Subject: LoongArch: Remove unnecessary checks for ORC unwinder
+
+From: Tiezhu Yang <yangtiezhu@loongson.cn>
+
+[ Upstream commit 4cd641a79e69270a062777f64a0dd330abb9044a ]
+
+According to the following function definitions, __kernel_text_address()
+already checks __module_text_address(), so it should remove the check of
+__module_text_address() in bt_address() at least.
+
+int __kernel_text_address(unsigned long addr)
+{
+ if (kernel_text_address(addr))
+ return 1;
+ ...
+ return 0;
+}
+
+int kernel_text_address(unsigned long addr)
+{
+ bool no_rcu;
+ int ret = 1;
+ ...
+ if (is_module_text_address(addr))
+ goto out;
+ ...
+ return ret;
+}
+
+bool is_module_text_address(unsigned long addr)
+{
+ guard(rcu)();
+ return __module_text_address(addr) != NULL;
+}
+
+Furthermore, there are two checks of __kernel_text_address(), one is in
+bt_address() and the other is after calling bt_address(), it looks like
+redundant.
+
+Handle the exception address first and then use __kernel_text_address()
+to validate the calculated address for exception or the normal address
+in bt_address(), then it can remove the check of __kernel_text_address()
+after calling bt_address().
+
+Just remove unnecessary checks, no functional changes intended.
+
+Signed-off-by: Tiezhu Yang <yangtiezhu@loongson.cn>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Stable-dep-of: 055c7e75190e ("LoongArch: Handle percpu handler address for ORC unwinder")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/loongarch/kernel/unwind_orc.c | 16 +++++-----------
+ 1 file changed, 5 insertions(+), 11 deletions(-)
+
+diff --git a/arch/loongarch/kernel/unwind_orc.c b/arch/loongarch/kernel/unwind_orc.c
+index 59809c3406c03..4924d1ecc4579 100644
+--- a/arch/loongarch/kernel/unwind_orc.c
++++ b/arch/loongarch/kernel/unwind_orc.c
+@@ -359,12 +359,6 @@ static inline unsigned long bt_address(unsigned long ra)
+ {
+ extern unsigned long eentry;
+
+- if (__kernel_text_address(ra))
+- return ra;
+-
+- if (__module_text_address(ra))
+- return ra;
+-
+ if (ra >= eentry && ra < eentry + EXCCODE_INT_END * VECSIZE) {
+ unsigned long func;
+ unsigned long type = (ra - eentry) / VECSIZE;
+@@ -382,10 +376,13 @@ static inline unsigned long bt_address(unsigned long ra)
+ break;
+ }
+
+- return func + offset;
++ ra = func + offset;
+ }
+
+- return ra;
++ if (__kernel_text_address(ra))
++ return ra;
++
++ return 0;
+ }
+
+ bool unwind_next_frame(struct unwind_state *state)
+@@ -511,9 +508,6 @@ bool unwind_next_frame(struct unwind_state *state)
+ goto err;
+ }
+
+- if (!__kernel_text_address(state->pc))
+- goto err;
+-
+ return true;
+
+ err:
+--
+2.51.0
+
--- /dev/null
+From 4486488eae06d6b4f775b552d49a5213efb05947 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 18 Aug 2025 09:39:01 +0530
+Subject: mailbox: Allow controller specific mapping using fwnode
+
+From: Anup Patel <apatel@ventanamicro.com>
+
+[ Upstream commit ba879dfc0574878f3e08f217b2b4fdf845c426c0 ]
+
+Introduce optional fw_node() callback which allows a mailbox controller
+driver to provide controller specific mapping using fwnode.
+
+The Linux OF framework already implements fwnode operations for the
+Linux DD framework so the fw_xlate() callback works fine with device
+tree as well.
+
+Acked-by: Jassi Brar <jassisinghbrar@gmail.com>
+Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Signed-off-by: Anup Patel <apatel@ventanamicro.com>
+Link: https://lore.kernel.org/r/20250818040920.272664-6-apatel@ventanamicro.com
+Signed-off-by: Paul Walmsley <pjw@kernel.org>
+Stable-dep-of: fcd7f96c7836 ("mailbox: Prevent out-of-bounds access in fw_mbox_index_xlate()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mailbox/mailbox.c | 65 ++++++++++++++++++------------
+ include/linux/mailbox_controller.h | 3 ++
+ 2 files changed, 43 insertions(+), 25 deletions(-)
+
+diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
+index 7dcbca48d1a0f..892aa0a048e0f 100644
+--- a/drivers/mailbox/mailbox.c
++++ b/drivers/mailbox/mailbox.c
+@@ -15,6 +15,7 @@
+ #include <linux/module.h>
+ #include <linux/mutex.h>
+ #include <linux/of.h>
++#include <linux/property.h>
+ #include <linux/spinlock.h>
+
+ #include "mailbox.h"
+@@ -396,34 +397,56 @@ EXPORT_SYMBOL_GPL(mbox_bind_client);
+ */
+ struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index)
+ {
+- struct device *dev = cl->dev;
++ struct fwnode_reference_args fwspec;
++ struct fwnode_handle *fwnode;
+ struct mbox_controller *mbox;
+ struct of_phandle_args spec;
+ struct mbox_chan *chan;
++ struct device *dev;
++ unsigned int i;
+ int ret;
+
+- if (!dev || !dev->of_node) {
+- pr_debug("%s: No owner device node\n", __func__);
++ dev = cl->dev;
++ if (!dev) {
++ pr_debug("No owner device\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+- ret = of_parse_phandle_with_args(dev->of_node, "mboxes", "#mbox-cells",
+- index, &spec);
++ fwnode = dev_fwnode(dev);
++ if (!fwnode) {
++ dev_dbg(dev, "No owner fwnode\n");
++ return ERR_PTR(-ENODEV);
++ }
++
++ ret = fwnode_property_get_reference_args(fwnode, "mboxes", "#mbox-cells",
++ 0, index, &fwspec);
+ if (ret) {
+- dev_err(dev, "%s: can't parse \"mboxes\" property\n", __func__);
++ dev_err(dev, "%s: can't parse \"%s\" property\n", __func__, "mboxes");
+ return ERR_PTR(ret);
+ }
+
++ spec.np = to_of_node(fwspec.fwnode);
++ spec.args_count = fwspec.nargs;
++ for (i = 0; i < spec.args_count; i++)
++ spec.args[i] = fwspec.args[i];
++
+ scoped_guard(mutex, &con_mutex) {
+ chan = ERR_PTR(-EPROBE_DEFER);
+- list_for_each_entry(mbox, &mbox_cons, node)
+- if (mbox->dev->of_node == spec.np) {
+- chan = mbox->of_xlate(mbox, &spec);
+- if (!IS_ERR(chan))
+- break;
++ list_for_each_entry(mbox, &mbox_cons, node) {
++ if (device_match_fwnode(mbox->dev, fwspec.fwnode)) {
++ if (mbox->fw_xlate) {
++ chan = mbox->fw_xlate(mbox, &fwspec);
++ if (!IS_ERR(chan))
++ break;
++ } else if (mbox->of_xlate) {
++ chan = mbox->of_xlate(mbox, &spec);
++ if (!IS_ERR(chan))
++ break;
++ }
+ }
++ }
+
+- of_node_put(spec.np);
++ fwnode_handle_put(fwspec.fwnode);
+
+ if (IS_ERR(chan))
+ return chan;
+@@ -440,15 +463,8 @@ EXPORT_SYMBOL_GPL(mbox_request_channel);
+ struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl,
+ const char *name)
+ {
+- struct device_node *np = cl->dev->of_node;
+- int index;
+-
+- if (!np) {
+- dev_err(cl->dev, "%s() currently only supports DT\n", __func__);
+- return ERR_PTR(-EINVAL);
+- }
++ int index = device_property_match_string(cl->dev, "mbox-names", name);
+
+- index = of_property_match_string(np, "mbox-names", name);
+ if (index < 0) {
+ dev_err(cl->dev, "%s() could not locate channel named \"%s\"\n",
+ __func__, name);
+@@ -485,9 +501,8 @@ void mbox_free_channel(struct mbox_chan *chan)
+ }
+ EXPORT_SYMBOL_GPL(mbox_free_channel);
+
+-static struct mbox_chan *
+-of_mbox_index_xlate(struct mbox_controller *mbox,
+- const struct of_phandle_args *sp)
++static struct mbox_chan *fw_mbox_index_xlate(struct mbox_controller *mbox,
++ const struct fwnode_reference_args *sp)
+ {
+ int ind = sp->args[0];
+
+@@ -540,8 +555,8 @@ int mbox_controller_register(struct mbox_controller *mbox)
+ spin_lock_init(&chan->lock);
+ }
+
+- if (!mbox->of_xlate)
+- mbox->of_xlate = of_mbox_index_xlate;
++ if (!mbox->fw_xlate && !mbox->of_xlate)
++ mbox->fw_xlate = fw_mbox_index_xlate;
+
+ scoped_guard(mutex, &con_mutex)
+ list_add_tail(&mbox->node, &mbox_cons);
+diff --git a/include/linux/mailbox_controller.h b/include/linux/mailbox_controller.h
+index 5fb0b65f45a2c..b91379922cb33 100644
+--- a/include/linux/mailbox_controller.h
++++ b/include/linux/mailbox_controller.h
+@@ -66,6 +66,7 @@ struct mbox_chan_ops {
+ * no interrupt rises. Ignored if 'txdone_irq' is set.
+ * @txpoll_period: If 'txdone_poll' is in effect, the API polls for
+ * last TX's status after these many millisecs
++ * @fw_xlate: Controller driver specific mapping of channel via fwnode
+ * @of_xlate: Controller driver specific mapping of channel via DT
+ * @poll_hrt: API private. hrtimer used to poll for TXDONE on all
+ * channels.
+@@ -79,6 +80,8 @@ struct mbox_controller {
+ bool txdone_irq;
+ bool txdone_poll;
+ unsigned txpoll_period;
++ struct mbox_chan *(*fw_xlate)(struct mbox_controller *mbox,
++ const struct fwnode_reference_args *sp);
+ struct mbox_chan *(*of_xlate)(struct mbox_controller *mbox,
+ const struct of_phandle_args *sp);
+ /* Internal to API */
+--
+2.51.0
+
--- /dev/null
+From d5a669d95fd53070a19d695339fcb486a3b5df74 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 24 Feb 2025 08:27:14 +0000
+Subject: mailbox: don't protect of_parse_phandle_with_args with con_mutex
+
+From: Tudor Ambarus <tudor.ambarus@linaro.org>
+
+[ Upstream commit 8c71c61fc613657d785a3377b4b34484bd978374 ]
+
+There are no concurrency problems if multiple consumers parse the
+phandle, don't gratuiously protect the parsing with the mutex used
+for the controllers list.
+
+Signed-off-by: Tudor Ambarus <tudor.ambarus@linaro.org>
+Signed-off-by: Jassi Brar <jassisinghbrar@gmail.com>
+Stable-dep-of: fcd7f96c7836 ("mailbox: Prevent out-of-bounds access in fw_mbox_index_xlate()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mailbox/mailbox.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
+index 92c2fb618c8e1..87de408fb068c 100644
+--- a/drivers/mailbox/mailbox.c
++++ b/drivers/mailbox/mailbox.c
+@@ -413,16 +413,15 @@ struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index)
+ return ERR_PTR(-ENODEV);
+ }
+
+- mutex_lock(&con_mutex);
+-
+ ret = of_parse_phandle_with_args(dev->of_node, "mboxes", "#mbox-cells",
+ index, &spec);
+ if (ret) {
+ dev_dbg(dev, "%s: can't parse \"mboxes\" property\n", __func__);
+- mutex_unlock(&con_mutex);
+ return ERR_PTR(ret);
+ }
+
++ mutex_lock(&con_mutex);
++
+ chan = ERR_PTR(-EPROBE_DEFER);
+ list_for_each_entry(mbox, &mbox_cons, node)
+ if (mbox->dev->of_node == spec.np) {
+--
+2.51.0
+
--- /dev/null
+From e1a0849f5779e1e3bc904aa94a975d444d6b26c9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 26 Nov 2025 06:22:50 +0000
+Subject: mailbox: Prevent out-of-bounds access in fw_mbox_index_xlate()
+
+From: Joonwon Kang <joonwonkang@google.com>
+
+[ Upstream commit fcd7f96c783626c07ee3ed75fa3739a8a2052310 ]
+
+Although it is guided that `#mbox-cells` must be at least 1, there are
+many instances of `#mbox-cells = <0>;` in the device tree. If that is
+the case and the corresponding mailbox controller does not provide
+`fw_xlate` and of_xlate` function pointers, `fw_mbox_index_xlate()` will
+be used by default and out-of-bounds accesses could occur due to lack of
+bounds check in that function.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Joonwon Kang <joonwonkang@google.com>
+Signed-off-by: Jassi Brar <jassisinghbrar@gmail.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mailbox/mailbox.c | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
+index 892aa0a048e0f..b4d52b814055b 100644
+--- a/drivers/mailbox/mailbox.c
++++ b/drivers/mailbox/mailbox.c
+@@ -504,12 +504,10 @@ EXPORT_SYMBOL_GPL(mbox_free_channel);
+ static struct mbox_chan *fw_mbox_index_xlate(struct mbox_controller *mbox,
+ const struct fwnode_reference_args *sp)
+ {
+- int ind = sp->args[0];
+-
+- if (ind >= mbox->num_chans)
++ if (sp->nargs < 1 || sp->args[0] >= mbox->num_chans)
+ return ERR_PTR(-EINVAL);
+
+- return &mbox->chans[ind];
++ return &mbox->chans[sp->args[0]];
+ }
+
+ /**
+--
+2.51.0
+
--- /dev/null
+From cf7d15653457e0e78a429b61535a6ff6116c1203 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 24 Feb 2025 08:27:17 +0000
+Subject: mailbox: remove unused header files
+
+From: Tudor Ambarus <tudor.ambarus@linaro.org>
+
+[ Upstream commit 4de14ec76b5e67d824896f774b3a23d86a2ebc87 ]
+
+There's nothing used from these header files, remove their inclusion.
+
+Signed-off-by: Tudor Ambarus <tudor.ambarus@linaro.org>
+Signed-off-by: Jassi Brar <jassisinghbrar@gmail.com>
+Stable-dep-of: fcd7f96c7836 ("mailbox: Prevent out-of-bounds access in fw_mbox_index_xlate()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mailbox/mailbox.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
+index c7134ece6d5dd..693975a87e19e 100644
+--- a/drivers/mailbox/mailbox.c
++++ b/drivers/mailbox/mailbox.c
+@@ -6,17 +6,14 @@
+ * Author: Jassi Brar <jassisinghbrar@gmail.com>
+ */
+
+-#include <linux/bitops.h>
+ #include <linux/delay.h>
+ #include <linux/device.h>
+ #include <linux/err.h>
+-#include <linux/interrupt.h>
+ #include <linux/mailbox_client.h>
+ #include <linux/mailbox_controller.h>
+ #include <linux/module.h>
+ #include <linux/mutex.h>
+ #include <linux/of.h>
+-#include <linux/slab.h>
+ #include <linux/spinlock.h>
+
+ #include "mailbox.h"
+--
+2.51.0
+
--- /dev/null
+From ac358a9e16c39e6f3dbe5134eef4b40eb7747dd3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 24 Feb 2025 08:27:15 +0000
+Subject: mailbox: sort headers alphabetically
+
+From: Tudor Ambarus <tudor.ambarus@linaro.org>
+
+[ Upstream commit db824c1119fc16556a84cb7a771ca6553b3c3a45 ]
+
+Sorting headers alphabetically helps locating duplicates,
+and makes it easier to figure out where to insert new headers.
+
+Signed-off-by: Tudor Ambarus <tudor.ambarus@linaro.org>
+Signed-off-by: Jassi Brar <jassisinghbrar@gmail.com>
+Stable-dep-of: fcd7f96c7836 ("mailbox: Prevent out-of-bounds access in fw_mbox_index_xlate()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mailbox/mailbox.c | 14 +++++++-------
+ include/linux/mailbox_client.h | 2 +-
+ include/linux/mailbox_controller.h | 6 +++---
+ 3 files changed, 11 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
+index 87de408fb068c..c7134ece6d5dd 100644
+--- a/drivers/mailbox/mailbox.c
++++ b/drivers/mailbox/mailbox.c
+@@ -6,18 +6,18 @@
+ * Author: Jassi Brar <jassisinghbrar@gmail.com>
+ */
+
+-#include <linux/interrupt.h>
+-#include <linux/spinlock.h>
+-#include <linux/mutex.h>
++#include <linux/bitops.h>
+ #include <linux/delay.h>
+-#include <linux/slab.h>
+-#include <linux/err.h>
+-#include <linux/module.h>
+ #include <linux/device.h>
+-#include <linux/bitops.h>
++#include <linux/err.h>
++#include <linux/interrupt.h>
+ #include <linux/mailbox_client.h>
+ #include <linux/mailbox_controller.h>
++#include <linux/module.h>
++#include <linux/mutex.h>
+ #include <linux/of.h>
++#include <linux/slab.h>
++#include <linux/spinlock.h>
+
+ #include "mailbox.h"
+
+diff --git a/include/linux/mailbox_client.h b/include/linux/mailbox_client.h
+index 734694912ef74..c6eea9afb943d 100644
+--- a/include/linux/mailbox_client.h
++++ b/include/linux/mailbox_client.h
+@@ -7,8 +7,8 @@
+ #ifndef __MAILBOX_CLIENT_H
+ #define __MAILBOX_CLIENT_H
+
+-#include <linux/of.h>
+ #include <linux/device.h>
++#include <linux/of.h>
+
+ struct mbox_chan;
+
+diff --git a/include/linux/mailbox_controller.h b/include/linux/mailbox_controller.h
+index 6fee33cb52f58..5fb0b65f45a2c 100644
+--- a/include/linux/mailbox_controller.h
++++ b/include/linux/mailbox_controller.h
+@@ -3,11 +3,11 @@
+ #ifndef __MAILBOX_CONTROLLER_H
+ #define __MAILBOX_CONTROLLER_H
+
++#include <linux/completion.h>
++#include <linux/device.h>
++#include <linux/hrtimer.h>
+ #include <linux/of.h>
+ #include <linux/types.h>
+-#include <linux/hrtimer.h>
+-#include <linux/device.h>
+-#include <linux/completion.h>
+
+ struct mbox_chan;
+
+--
+2.51.0
+
--- /dev/null
+From 53d7744585e53a42e050d577545a6ab1b70e1f82 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 11 Apr 2025 21:14:09 +0800
+Subject: mailbox: Use dev_err when there is error
+
+From: Peng Fan <peng.fan@nxp.com>
+
+[ Upstream commit 8da4988b6e645f3eaa590ea16f433583364fd09c ]
+
+Use dev_err to show the error log instead of using dev_dbg.
+
+Signed-off-by: Peng Fan <peng.fan@nxp.com>
+Signed-off-by: Jassi Brar <jassisinghbrar@gmail.com>
+Stable-dep-of: fcd7f96c7836 ("mailbox: Prevent out-of-bounds access in fw_mbox_index_xlate()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mailbox/mailbox.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
+index 693975a87e19e..4c27de9514e55 100644
+--- a/drivers/mailbox/mailbox.c
++++ b/drivers/mailbox/mailbox.c
+@@ -322,7 +322,7 @@ static int __mbox_bind_client(struct mbox_chan *chan, struct mbox_client *cl)
+ int ret;
+
+ if (chan->cl || !try_module_get(chan->mbox->dev->driver->owner)) {
+- dev_dbg(dev, "%s: mailbox not free\n", __func__);
++ dev_err(dev, "%s: mailbox not free\n", __func__);
+ return -EBUSY;
+ }
+
+@@ -413,7 +413,7 @@ struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index)
+ ret = of_parse_phandle_with_args(dev->of_node, "mboxes", "#mbox-cells",
+ index, &spec);
+ if (ret) {
+- dev_dbg(dev, "%s: can't parse \"mboxes\" property\n", __func__);
++ dev_err(dev, "%s: can't parse \"mboxes\" property\n", __func__);
+ return ERR_PTR(ret);
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 81d36c51b7af5abbcf63139f71b1a53db897659c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 11 Apr 2025 21:14:13 +0800
+Subject: mailbox: Use guard/scoped_guard for con_mutex
+
+From: Peng Fan <peng.fan@nxp.com>
+
+[ Upstream commit 16da9a653c5bf5d97fb296420899fe9735aa9c3c ]
+
+Use guard and scoped_guard for con_mutex to simplify code.
+
+Signed-off-by: Peng Fan <peng.fan@nxp.com>
+Signed-off-by: Jassi Brar <jassisinghbrar@gmail.com>
+Stable-dep-of: fcd7f96c7836 ("mailbox: Prevent out-of-bounds access in fw_mbox_index_xlate()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mailbox/mailbox.c | 61 +++++++++++++++++----------------------
+ 1 file changed, 26 insertions(+), 35 deletions(-)
+
+diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
+index 4c27de9514e55..7dcbca48d1a0f 100644
+--- a/drivers/mailbox/mailbox.c
++++ b/drivers/mailbox/mailbox.c
+@@ -6,6 +6,7 @@
+ * Author: Jassi Brar <jassisinghbrar@gmail.com>
+ */
+
++#include <linux/cleanup.h>
+ #include <linux/delay.h>
+ #include <linux/device.h>
+ #include <linux/err.h>
+@@ -370,13 +371,9 @@ static int __mbox_bind_client(struct mbox_chan *chan, struct mbox_client *cl)
+ */
+ int mbox_bind_client(struct mbox_chan *chan, struct mbox_client *cl)
+ {
+- int ret;
+-
+- mutex_lock(&con_mutex);
+- ret = __mbox_bind_client(chan, cl);
+- mutex_unlock(&con_mutex);
++ guard(mutex)(&con_mutex);
+
+- return ret;
++ return __mbox_bind_client(chan, cl);
+ }
+ EXPORT_SYMBOL_GPL(mbox_bind_client);
+
+@@ -417,28 +414,25 @@ struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index)
+ return ERR_PTR(ret);
+ }
+
+- mutex_lock(&con_mutex);
++ scoped_guard(mutex, &con_mutex) {
++ chan = ERR_PTR(-EPROBE_DEFER);
++ list_for_each_entry(mbox, &mbox_cons, node)
++ if (mbox->dev->of_node == spec.np) {
++ chan = mbox->of_xlate(mbox, &spec);
++ if (!IS_ERR(chan))
++ break;
++ }
+
+- chan = ERR_PTR(-EPROBE_DEFER);
+- list_for_each_entry(mbox, &mbox_cons, node)
+- if (mbox->dev->of_node == spec.np) {
+- chan = mbox->of_xlate(mbox, &spec);
+- if (!IS_ERR(chan))
+- break;
+- }
++ of_node_put(spec.np);
+
+- of_node_put(spec.np);
++ if (IS_ERR(chan))
++ return chan;
+
+- if (IS_ERR(chan)) {
+- mutex_unlock(&con_mutex);
+- return chan;
++ ret = __mbox_bind_client(chan, cl);
++ if (ret)
++ chan = ERR_PTR(ret);
+ }
+
+- ret = __mbox_bind_client(chan, cl);
+- if (ret)
+- chan = ERR_PTR(ret);
+-
+- mutex_unlock(&con_mutex);
+ return chan;
+ }
+ EXPORT_SYMBOL_GPL(mbox_request_channel);
+@@ -549,9 +543,8 @@ int mbox_controller_register(struct mbox_controller *mbox)
+ if (!mbox->of_xlate)
+ mbox->of_xlate = of_mbox_index_xlate;
+
+- mutex_lock(&con_mutex);
+- list_add_tail(&mbox->node, &mbox_cons);
+- mutex_unlock(&con_mutex);
++ scoped_guard(mutex, &con_mutex)
++ list_add_tail(&mbox->node, &mbox_cons);
+
+ return 0;
+ }
+@@ -568,17 +561,15 @@ void mbox_controller_unregister(struct mbox_controller *mbox)
+ if (!mbox)
+ return;
+
+- mutex_lock(&con_mutex);
+-
+- list_del(&mbox->node);
++ scoped_guard(mutex, &con_mutex) {
++ list_del(&mbox->node);
+
+- for (i = 0; i < mbox->num_chans; i++)
+- mbox_free_channel(&mbox->chans[i]);
++ for (i = 0; i < mbox->num_chans; i++)
++ mbox_free_channel(&mbox->chans[i]);
+
+- if (mbox->txdone_poll)
+- hrtimer_cancel(&mbox->poll_hrt);
+-
+- mutex_unlock(&con_mutex);
++ if (mbox->txdone_poll)
++ hrtimer_cancel(&mbox->poll_hrt);
++ }
+ }
+ EXPORT_SYMBOL_GPL(mbox_controller_unregister);
+
+--
+2.51.0
+
--- /dev/null
+From 4ea5372e3c558c09fdba776f1c6e9fa694773e7e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 12 Jun 2025 08:54:12 +0200
+Subject: media: dw9714: add support for powerdown pin
+
+From: Matthias Fend <matthias.fend@emfend.at>
+
+[ Upstream commit 03dca1842421b068d6a65b8ae16e2191882c7753 ]
+
+Add support for the powerdown pin (xSD), which can be used to put the VCM
+driver into power down mode. This is useful, for example, if the VCM
+driver's power supply cannot be controlled. The use of the powerdown pin is
+optional.
+
+Signed-off-by: Matthias Fend <matthias.fend@emfend.at>
+Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Signed-off-by: Hans Verkuil <hverkuil@xs4all.nl>
+Stable-dep-of: 401aec35ac7b ("media: dw9714: Fix powerup sequence")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/media/i2c/Kconfig | 2 +-
+ drivers/media/i2c/dw9714.c | 14 ++++++++++++++
+ 2 files changed, 15 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
+index 5cb596f38de33..4703071352541 100644
+--- a/drivers/media/i2c/Kconfig
++++ b/drivers/media/i2c/Kconfig
+@@ -748,7 +748,7 @@ config VIDEO_AK7375
+
+ config VIDEO_DW9714
+ tristate "DW9714 lens voice coil support"
+- depends on I2C && VIDEO_DEV
++ depends on GPIOLIB && I2C && VIDEO_DEV
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select V4L2_ASYNC
+diff --git a/drivers/media/i2c/dw9714.c b/drivers/media/i2c/dw9714.c
+index 37572cd0c104b..e69dd3e14b844 100644
+--- a/drivers/media/i2c/dw9714.c
++++ b/drivers/media/i2c/dw9714.c
+@@ -2,6 +2,7 @@
+ // Copyright (c) 2015--2017 Intel Corporation.
+
+ #include <linux/delay.h>
++#include <linux/gpio/consumer.h>
+ #include <linux/i2c.h>
+ #include <linux/module.h>
+ #include <linux/pm_runtime.h>
+@@ -38,6 +39,7 @@ struct dw9714_device {
+ struct v4l2_subdev sd;
+ u16 current_val;
+ struct regulator *vcc;
++ struct gpio_desc *powerdown_gpio;
+ };
+
+ static inline struct dw9714_device *to_dw9714_vcm(struct v4l2_ctrl *ctrl)
+@@ -145,6 +147,8 @@ static int dw9714_power_up(struct dw9714_device *dw9714_dev)
+ if (ret)
+ return ret;
+
++ gpiod_set_value_cansleep(dw9714_dev->powerdown_gpio, 0);
++
+ usleep_range(1000, 2000);
+
+ return 0;
+@@ -152,6 +156,8 @@ static int dw9714_power_up(struct dw9714_device *dw9714_dev)
+
+ static int dw9714_power_down(struct dw9714_device *dw9714_dev)
+ {
++ gpiod_set_value_cansleep(dw9714_dev->powerdown_gpio, 1);
++
+ return regulator_disable(dw9714_dev->vcc);
+ }
+
+@@ -169,6 +175,14 @@ static int dw9714_probe(struct i2c_client *client)
+ if (IS_ERR(dw9714_dev->vcc))
+ return PTR_ERR(dw9714_dev->vcc);
+
++ dw9714_dev->powerdown_gpio = devm_gpiod_get_optional(&client->dev,
++ "powerdown",
++ GPIOD_OUT_HIGH);
++ if (IS_ERR(dw9714_dev->powerdown_gpio))
++ return dev_err_probe(&client->dev,
++ PTR_ERR(dw9714_dev->powerdown_gpio),
++ "could not get powerdown gpio\n");
++
+ rval = dw9714_power_up(dw9714_dev);
+ if (rval)
+ return dev_err_probe(&client->dev, rval,
+--
+2.51.0
+
--- /dev/null
+From 0f5fcf924843573803dec01963f29405daba5bc3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 10 Dec 2025 07:53:43 +0000
+Subject: media: dw9714: Fix powerup sequence
+
+From: Ricardo Ribalda <ribalda@chromium.org>
+
+[ Upstream commit 401aec35ac7bd04b4018a519257b945abb88e26c ]
+
+We have experienced seen multiple I2C errors while doing stress test on
+the module:
+
+dw9714 i2c-PRP0001:01: dw9714_vcm_resume I2C failure: -5
+dw9714 i2c-PRP0001:01: I2C write fail
+
+Inspecting the powerup sequence we found that it does not match the
+documentation at:
+https://blog.arducam.com/downloads/DW9714A-DONGWOON(Autofocus_motor_manual).pdf
+
+"""
+(2) DW9714A requires waiting time of 12ms after power on. During this
+waiting time, the offset calibration of internal amplifier is
+operating for minimization of output offset current .
+"""
+
+This patch increases the powerup delay to follow the documentation.
+
+Fixes: 9d00ccabfbb5 ("media: i2c: dw9714: Fix occasional probe errors")
+Signed-off-by: Ricardo Ribalda <ribalda@chromium.org>
+Reviewed-by: Hans de Goede <johannes.goede@oss.qualcomm.com>
+Tested-by: Neil Sun <neil.sun@lcfuturecenter.com>
+Reported-by: Naomi Huang <naomi.huang@lcfuturecenter.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Signed-off-by: Hans Verkuil <hverkuil+cisco@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/media/i2c/dw9714.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/media/i2c/dw9714.c b/drivers/media/i2c/dw9714.c
+index e69dd3e14b844..8fee13e9b3a0b 100644
+--- a/drivers/media/i2c/dw9714.c
++++ b/drivers/media/i2c/dw9714.c
+@@ -149,7 +149,7 @@ static int dw9714_power_up(struct dw9714_device *dw9714_dev)
+
+ gpiod_set_value_cansleep(dw9714_dev->powerdown_gpio, 0);
+
+- usleep_range(1000, 2000);
++ usleep_range(12000, 14000);
+
+ return 0;
+ }
+--
+2.51.0
+
--- /dev/null
+From 0d97a3e1b6cd9d35bd00162f9863c715aab82d69 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 12 Jun 2025 08:54:11 +0200
+Subject: media: dw9714: move power sequences to dedicated functions
+
+From: Matthias Fend <matthias.fend@emfend.at>
+
+[ Upstream commit 1eefe42e9de503e422a9c925eebdbd215ee28966 ]
+
+Move the power-up and power-down sequences to their own functions. This is
+a preparation for the upcoming powerdown pin support.
+
+Signed-off-by: Matthias Fend <matthias.fend@emfend.at>
+Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Signed-off-by: Hans Verkuil <hverkuil@xs4all.nl>
+Stable-dep-of: 401aec35ac7b ("media: dw9714: Fix powerup sequence")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/media/i2c/dw9714.c | 44 +++++++++++++++++++++++++-------------
+ 1 file changed, 29 insertions(+), 15 deletions(-)
+
+diff --git a/drivers/media/i2c/dw9714.c b/drivers/media/i2c/dw9714.c
+index 2ddd7daa79e28..37572cd0c104b 100644
+--- a/drivers/media/i2c/dw9714.c
++++ b/drivers/media/i2c/dw9714.c
+@@ -137,6 +137,24 @@ static int dw9714_init_controls(struct dw9714_device *dev_vcm)
+ return hdl->error;
+ }
+
++static int dw9714_power_up(struct dw9714_device *dw9714_dev)
++{
++ int ret;
++
++ ret = regulator_enable(dw9714_dev->vcc);
++ if (ret)
++ return ret;
++
++ usleep_range(1000, 2000);
++
++ return 0;
++}
++
++static int dw9714_power_down(struct dw9714_device *dw9714_dev)
++{
++ return regulator_disable(dw9714_dev->vcc);
++}
++
+ static int dw9714_probe(struct i2c_client *client)
+ {
+ struct dw9714_device *dw9714_dev;
+@@ -151,13 +169,10 @@ static int dw9714_probe(struct i2c_client *client)
+ if (IS_ERR(dw9714_dev->vcc))
+ return PTR_ERR(dw9714_dev->vcc);
+
+- rval = regulator_enable(dw9714_dev->vcc);
+- if (rval < 0) {
+- dev_err(&client->dev, "failed to enable vcc: %d\n", rval);
+- return rval;
+- }
+-
+- usleep_range(1000, 2000);
++ rval = dw9714_power_up(dw9714_dev);
++ if (rval)
++ return dev_err_probe(&client->dev, rval,
++ "failed to power up: %d\n", rval);
+
+ v4l2_i2c_subdev_init(&dw9714_dev->sd, client, &dw9714_ops);
+ dw9714_dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE |
+@@ -185,7 +200,7 @@ static int dw9714_probe(struct i2c_client *client)
+ return 0;
+
+ err_cleanup:
+- regulator_disable(dw9714_dev->vcc);
++ dw9714_power_down(dw9714_dev);
+ v4l2_ctrl_handler_free(&dw9714_dev->ctrls_vcm);
+ media_entity_cleanup(&dw9714_dev->sd.entity);
+
+@@ -200,10 +215,10 @@ static void dw9714_remove(struct i2c_client *client)
+
+ pm_runtime_disable(&client->dev);
+ if (!pm_runtime_status_suspended(&client->dev)) {
+- ret = regulator_disable(dw9714_dev->vcc);
++ ret = dw9714_power_down(dw9714_dev);
+ if (ret) {
+ dev_err(&client->dev,
+- "Failed to disable vcc: %d\n", ret);
++ "Failed to power down: %d\n", ret);
+ }
+ }
+ pm_runtime_set_suspended(&client->dev);
+@@ -234,9 +249,9 @@ static int __maybe_unused dw9714_vcm_suspend(struct device *dev)
+ usleep_range(DW9714_CTRL_DELAY_US, DW9714_CTRL_DELAY_US + 10);
+ }
+
+- ret = regulator_disable(dw9714_dev->vcc);
++ ret = dw9714_power_down(dw9714_dev);
+ if (ret)
+- dev_err(dev, "Failed to disable vcc: %d\n", ret);
++ dev_err(dev, "Failed to power down: %d\n", ret);
+
+ return ret;
+ }
+@@ -257,12 +272,11 @@ static int __maybe_unused dw9714_vcm_resume(struct device *dev)
+ if (pm_runtime_suspended(&client->dev))
+ return 0;
+
+- ret = regulator_enable(dw9714_dev->vcc);
++ ret = dw9714_power_up(dw9714_dev);
+ if (ret) {
+- dev_err(dev, "Failed to enable vcc: %d\n", ret);
++ dev_err(dev, "Failed to power up: %d\n", ret);
+ return ret;
+ }
+- usleep_range(1000, 2000);
+
+ for (val = dw9714_dev->current_val % DW9714_CTRL_STEPS;
+ val < dw9714_dev->current_val + DW9714_CTRL_STEPS - 1;
+--
+2.51.0
+
--- /dev/null
+From 9a191c2db582f771f04f8949a3c370dbb2f09b55 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 14 Nov 2025 09:12:57 +0000
+Subject: media: tegra-video: Fix memory leak in __tegra_channel_try_format()
+
+From: Zilin Guan <zilin@seu.edu.cn>
+
+[ Upstream commit 43e5302d22334f1183dec3e0d5d8007eefe2817c ]
+
+The state object allocated by __v4l2_subdev_state_alloc() must be freed
+with __v4l2_subdev_state_free() when it is no longer needed.
+
+In __tegra_channel_try_format(), two error paths return directly after
+v4l2_subdev_call() fails, without freeing the allocated 'sd_state'
+object. This violates the requirement and causes a memory leak.
+
+Fix this by introducing a cleanup label and using goto statements in the
+error paths to ensure that __v4l2_subdev_state_free() is always called
+before the function returns.
+
+Fixes: 56f64b82356b7 ("media: tegra-video: Use zero crop settings if subdev has no get_selection")
+Fixes: 1ebaeb09830f3 ("media: tegra-video: Add support for external sensor capture")
+Cc: stable@vger.kernel.org
+Signed-off-by: Zilin Guan <zilin@seu.edu.cn>
+Signed-off-by: Hans Verkuil <hverkuil+cisco@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/staging/media/tegra-video/vi.c | 13 ++++++++-----
+ 1 file changed, 8 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/staging/media/tegra-video/vi.c b/drivers/staging/media/tegra-video/vi.c
+index 57a856a21e901..463410349d07e 100644
+--- a/drivers/staging/media/tegra-video/vi.c
++++ b/drivers/staging/media/tegra-video/vi.c
+@@ -440,7 +440,7 @@ static int __tegra_channel_try_format(struct tegra_vi_channel *chan,
+ .target = V4L2_SEL_TGT_CROP_BOUNDS,
+ };
+ struct v4l2_rect *try_crop;
+- int ret;
++ int ret = 0;
+
+ subdev = tegra_channel_get_remote_source_subdev(chan);
+ if (!subdev)
+@@ -484,8 +484,10 @@ static int __tegra_channel_try_format(struct tegra_vi_channel *chan,
+ } else {
+ ret = v4l2_subdev_call(subdev, pad, get_selection,
+ NULL, &sdsel);
+- if (ret)
+- return -EINVAL;
++ if (ret) {
++ ret = -EINVAL;
++ goto out_free;
++ }
+
+ try_crop->width = sdsel.r.width;
+ try_crop->height = sdsel.r.height;
+@@ -497,14 +499,15 @@ static int __tegra_channel_try_format(struct tegra_vi_channel *chan,
+
+ ret = v4l2_subdev_call(subdev, pad, set_fmt, sd_state, &fmt);
+ if (ret < 0)
+- return ret;
++ goto out_free;
+
+ v4l2_fill_pix_format(pix, &fmt.format);
+ chan->vi->ops->vi_fmt_align(pix, fmtinfo->bpp);
+
++out_free:
+ __v4l2_subdev_state_free(sd_state);
+
+- return 0;
++ return ret;
+ }
+
+ static int tegra_channel_try_format(struct file *file, void *fh,
+--
+2.51.0
+
--- /dev/null
+From 9aa31597e28063994b6f3f0f0d06a4927231609d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 5 Dec 2025 09:54:24 +0800
+Subject: media: v4l2-mem2mem: Add a kref to the v4l2_m2m_dev structure
+
+From: Nicolas Dufresne <nicolas.dufresne@collabora.com>
+
+[ Upstream commit db6b97a4f8041e479be9ef4b8b07022636c96f50 ]
+
+Adding a reference count to the v4l2_m2m_dev structure allow safely
+sharing it across multiple hardware nodes. This can be used to prevent
+running jobs concurrently on m2m cores that have some internal resource
+sharing.
+
+Signed-off-by: Ming Qian <ming.qian@oss.nxp.com>
+Reviewed-by: Frank Li <Frank.Li@nxp.com>
+Signed-off-by: Nicolas Dufresne <nicolas.dufresne@collabora.com>
+Signed-off-by: Hans Verkuil <hverkuil+cisco@kernel.org>
+[hverkuil: fix typos in v4l2_m2m_put documentation]
+Stable-dep-of: e0203ddf9af7 ("media: verisilicon: Avoid G2 bus error while decoding H.264 and HEVC")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/media/v4l2-core/v4l2-mem2mem.c | 23 +++++++++++++++++++++++
+ include/media/v4l2-mem2mem.h | 21 +++++++++++++++++++++
+ 2 files changed, 44 insertions(+)
+
+diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
+index eb22d6172462d..85006fb18f720 100644
+--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
++++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
+@@ -90,6 +90,7 @@ static const char * const m2m_entity_name[] = {
+ * @job_work: worker to run queued jobs.
+ * @job_queue_flags: flags of the queue status, %QUEUE_PAUSED.
+ * @m2m_ops: driver callbacks
++ * @kref: device reference count
+ */
+ struct v4l2_m2m_dev {
+ struct v4l2_m2m_ctx *curr_ctx;
+@@ -109,6 +110,8 @@ struct v4l2_m2m_dev {
+ unsigned long job_queue_flags;
+
+ const struct v4l2_m2m_ops *m2m_ops;
++
++ struct kref kref;
+ };
+
+ static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx,
+@@ -1210,6 +1213,7 @@ struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops)
+ INIT_LIST_HEAD(&m2m_dev->job_queue);
+ spin_lock_init(&m2m_dev->job_spinlock);
+ INIT_WORK(&m2m_dev->job_work, v4l2_m2m_device_run_work);
++ kref_init(&m2m_dev->kref);
+
+ return m2m_dev;
+ }
+@@ -1221,6 +1225,25 @@ void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev)
+ }
+ EXPORT_SYMBOL_GPL(v4l2_m2m_release);
+
++void v4l2_m2m_get(struct v4l2_m2m_dev *m2m_dev)
++{
++ kref_get(&m2m_dev->kref);
++}
++EXPORT_SYMBOL_GPL(v4l2_m2m_get);
++
++static void v4l2_m2m_release_from_kref(struct kref *kref)
++{
++ struct v4l2_m2m_dev *m2m_dev = container_of(kref, struct v4l2_m2m_dev, kref);
++
++ v4l2_m2m_release(m2m_dev);
++}
++
++void v4l2_m2m_put(struct v4l2_m2m_dev *m2m_dev)
++{
++ kref_put(&m2m_dev->kref, v4l2_m2m_release_from_kref);
++}
++EXPORT_SYMBOL_GPL(v4l2_m2m_put);
++
+ struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
+ void *drv_priv,
+ int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq))
+diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
+index 2e55a13ed3bb8..1f10f63ef3803 100644
+--- a/include/media/v4l2-mem2mem.h
++++ b/include/media/v4l2-mem2mem.h
+@@ -544,6 +544,27 @@ v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev,
+ */
+ void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev);
+
++/**
++ * v4l2_m2m_get() - take a reference to the m2m_dev structure
++ *
++ * @m2m_dev: opaque pointer to the internal data to handle M2M context
++ *
++ * This is used to share the M2M device across multiple devices. This
++ * can be used to avoid scheduling two hardware nodes concurrently.
++ */
++void v4l2_m2m_get(struct v4l2_m2m_dev *m2m_dev);
++
++/**
++ * v4l2_m2m_put() - remove a reference to the m2m_dev structure
++ *
++ * @m2m_dev: opaque pointer to the internal data to handle M2M context
++ *
++ * Once the M2M device has no more references, v4l2_m2m_release() will be
++ * called automatically. Users of this method should never call
++ * v4l2_m2m_release() directly. See v4l2_m2m_get() for more details.
++ */
++void v4l2_m2m_put(struct v4l2_m2m_dev *m2m_dev);
++
+ /**
+ * v4l2_m2m_ctx_init() - allocate and initialize a m2m context
+ *
+--
+2.51.0
+
--- /dev/null
+From 66025d48611fb6b74640a347733a72df72fb8794 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 5 Dec 2025 09:54:26 +0800
+Subject: media: verisilicon: Avoid G2 bus error while decoding H.264 and HEVC
+
+From: Ming Qian <ming.qian@oss.nxp.com>
+
+[ Upstream commit e0203ddf9af7c8e170e1e99ce83b4dc07f0cd765 ]
+
+For the i.MX8MQ platform, there is a hardware limitation: the g1 VPU and
+g2 VPU cannot decode simultaneously; otherwise, it will cause below bus
+error and produce corrupted pictures, even potentially lead to system hang.
+
+[ 110.527986] hantro-vpu 38310000.video-codec: frame decode timed out.
+[ 110.583517] hantro-vpu 38310000.video-codec: bus error detected.
+
+Therefore, it is necessary to ensure that g1 and g2 operate alternately.
+This allows for successful multi-instance decoding of H.264 and HEVC.
+
+To achieve this, g1 and g2 share the same v4l2_m2m_dev, and then the
+v4l2_m2m_dev can handle the scheduling.
+
+Fixes: cb5dd5a0fa518 ("media: hantro: Introduce G2/HEVC decoder")
+Cc: stable@vger.kernel.org
+Signed-off-by: Ming Qian <ming.qian@oss.nxp.com>
+Reviewed-by: Frank Li <Frank.Li@nxp.com>
+Co-developed-by: Nicolas Dufresne <nicolas.dufresne@collabora.com>
+Signed-off-by: Nicolas Dufresne <nicolas.dufresne@collabora.com>
+Signed-off-by: Hans Verkuil <hverkuil+cisco@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/media/platform/verisilicon/hantro.h | 2 +
+ .../media/platform/verisilicon/hantro_drv.c | 42 +++++++++++++++++--
+ .../media/platform/verisilicon/imx8m_vpu_hw.c | 8 ++++
+ 3 files changed, 49 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/media/platform/verisilicon/hantro.h b/drivers/media/platform/verisilicon/hantro.h
+index 811260dc3c777..439f0adb4e816 100644
+--- a/drivers/media/platform/verisilicon/hantro.h
++++ b/drivers/media/platform/verisilicon/hantro.h
+@@ -77,6 +77,7 @@ struct hantro_irq {
+ * @double_buffer: core needs double buffering
+ * @legacy_regs: core uses legacy register set
+ * @late_postproc: postproc must be set up at the end of the job
++ * @shared_devices: an array of device ids that cannot run concurrently
+ */
+ struct hantro_variant {
+ unsigned int enc_offset;
+@@ -101,6 +102,7 @@ struct hantro_variant {
+ unsigned int double_buffer : 1;
+ unsigned int legacy_regs : 1;
+ unsigned int late_postproc : 1;
++ const struct of_device_id *shared_devices;
+ };
+
+ /**
+diff --git a/drivers/media/platform/verisilicon/hantro_drv.c b/drivers/media/platform/verisilicon/hantro_drv.c
+index 137ca13eeed7c..01d6ab67a8bf3 100644
+--- a/drivers/media/platform/verisilicon/hantro_drv.c
++++ b/drivers/media/platform/verisilicon/hantro_drv.c
+@@ -13,6 +13,7 @@
+ #include <linux/clk.h>
+ #include <linux/module.h>
+ #include <linux/of.h>
++#include <linux/of_platform.h>
+ #include <linux/platform_device.h>
+ #include <linux/pm.h>
+ #include <linux/pm_runtime.h>
+@@ -1038,6 +1039,41 @@ static int hantro_disable_multicore(struct hantro_dev *vpu)
+ return 0;
+ }
+
++static struct v4l2_m2m_dev *hantro_get_v4l2_m2m_dev(struct hantro_dev *vpu)
++{
++ struct device_node *node;
++ struct hantro_dev *shared_vpu;
++
++ if (!vpu->variant || !vpu->variant->shared_devices)
++ goto init_new_m2m_dev;
++
++ for_each_matching_node(node, vpu->variant->shared_devices) {
++ struct platform_device *pdev;
++ struct v4l2_m2m_dev *m2m_dev;
++
++ pdev = of_find_device_by_node(node);
++ if (!pdev)
++ continue;
++
++ shared_vpu = platform_get_drvdata(pdev);
++ if (IS_ERR_OR_NULL(shared_vpu) || shared_vpu == vpu) {
++ platform_device_put(pdev);
++ continue;
++ }
++
++ v4l2_m2m_get(shared_vpu->m2m_dev);
++ m2m_dev = shared_vpu->m2m_dev;
++ platform_device_put(pdev);
++
++ of_node_put(node);
++
++ return m2m_dev;
++ }
++
++init_new_m2m_dev:
++ return v4l2_m2m_init(&vpu_m2m_ops);
++}
++
+ static int hantro_probe(struct platform_device *pdev)
+ {
+ const struct of_device_id *match;
+@@ -1189,7 +1225,7 @@ static int hantro_probe(struct platform_device *pdev)
+ }
+ platform_set_drvdata(pdev, vpu);
+
+- vpu->m2m_dev = v4l2_m2m_init(&vpu_m2m_ops);
++ vpu->m2m_dev = hantro_get_v4l2_m2m_dev(vpu);
+ if (IS_ERR(vpu->m2m_dev)) {
+ v4l2_err(&vpu->v4l2_dev, "Failed to init mem2mem device\n");
+ ret = PTR_ERR(vpu->m2m_dev);
+@@ -1228,7 +1264,7 @@ static int hantro_probe(struct platform_device *pdev)
+ hantro_remove_enc_func(vpu);
+ err_m2m_rel:
+ media_device_cleanup(&vpu->mdev);
+- v4l2_m2m_release(vpu->m2m_dev);
++ v4l2_m2m_put(vpu->m2m_dev);
+ err_v4l2_unreg:
+ v4l2_device_unregister(&vpu->v4l2_dev);
+ err_clk_unprepare:
+@@ -1251,7 +1287,7 @@ static void hantro_remove(struct platform_device *pdev)
+ hantro_remove_dec_func(vpu);
+ hantro_remove_enc_func(vpu);
+ media_device_cleanup(&vpu->mdev);
+- v4l2_m2m_release(vpu->m2m_dev);
++ v4l2_m2m_put(vpu->m2m_dev);
+ v4l2_device_unregister(&vpu->v4l2_dev);
+ clk_bulk_unprepare(vpu->variant->num_clocks, vpu->clocks);
+ reset_control_assert(vpu->resets);
+diff --git a/drivers/media/platform/verisilicon/imx8m_vpu_hw.c b/drivers/media/platform/verisilicon/imx8m_vpu_hw.c
+index 74fd985a8aad1..cdaac2f18fb54 100644
+--- a/drivers/media/platform/verisilicon/imx8m_vpu_hw.c
++++ b/drivers/media/platform/verisilicon/imx8m_vpu_hw.c
+@@ -361,6 +361,12 @@ const struct hantro_variant imx8mq_vpu_variant = {
+ .num_regs = ARRAY_SIZE(imx8mq_reg_names)
+ };
+
++static const struct of_device_id imx8mq_vpu_shared_resources[] __initconst = {
++ { .compatible = "nxp,imx8mq-vpu-g1", },
++ { .compatible = "nxp,imx8mq-vpu-g2", },
++ { /* sentinel */ }
++};
++
+ const struct hantro_variant imx8mq_vpu_g1_variant = {
+ .dec_fmts = imx8m_vpu_dec_fmts,
+ .num_dec_fmts = ARRAY_SIZE(imx8m_vpu_dec_fmts),
+@@ -374,6 +380,7 @@ const struct hantro_variant imx8mq_vpu_g1_variant = {
+ .num_irqs = ARRAY_SIZE(imx8mq_irqs),
+ .clk_names = imx8mq_g1_clk_names,
+ .num_clocks = ARRAY_SIZE(imx8mq_g1_clk_names),
++ .shared_devices = imx8mq_vpu_shared_resources,
+ };
+
+ const struct hantro_variant imx8mq_vpu_g2_variant = {
+@@ -389,6 +396,7 @@ const struct hantro_variant imx8mq_vpu_g2_variant = {
+ .num_irqs = ARRAY_SIZE(imx8mq_g2_irqs),
+ .clk_names = imx8mq_g2_clk_names,
+ .num_clocks = ARRAY_SIZE(imx8mq_g2_clk_names),
++ .shared_devices = imx8mq_vpu_shared_resources,
+ };
+
+ const struct hantro_variant imx8mm_vpu_g1_variant = {
+--
+2.51.0
+
--- /dev/null
+From a8218c0530eebf7c4f710f887a7fadee3d0333be Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 Nov 2025 17:46:23 +0100
+Subject: memory: mtk-smi: fix device leak on larb probe
+
+From: Johan Hovold <johan@kernel.org>
+
+[ Upstream commit 9dae65913b32d05dbc8ff4b8a6bf04a0e49a8eb6 ]
+
+Make sure to drop the reference taken when looking up the SMI device
+during larb probe on late probe failure (e.g. probe deferral) and on
+driver unbind.
+
+Fixes: cc8bbe1a8312 ("memory: mediatek: Add SMI driver")
+Fixes: 038ae37c510f ("memory: mtk-smi: add missing put_device() call in mtk_smi_device_link_common")
+Cc: stable@vger.kernel.org # 4.6: 038ae37c510f
+Cc: stable@vger.kernel.org # 4.6
+Cc: Yong Wu <yong.wu@mediatek.com>
+Cc: Miaoqian Lin <linmq006@gmail.com>
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Link: https://patch.msgid.link/20251121164624.13685-3-johan@kernel.org
+Signed-off-by: Krzysztof Kozlowski <krzk@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/memory/mtk-smi.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
+index 1bce32bed9a1f..2d7f7cc5bfa9c 100644
+--- a/drivers/memory/mtk-smi.c
++++ b/drivers/memory/mtk-smi.c
+@@ -575,6 +575,7 @@ static void mtk_smi_larb_remove(struct platform_device *pdev)
+ device_link_remove(&pdev->dev, larb->smi_common_dev);
+ pm_runtime_disable(&pdev->dev);
+ component_del(&pdev->dev, &mtk_smi_larb_component_ops);
++ put_device(larb->smi_common_dev);
+ }
+
+ static int __maybe_unused mtk_smi_larb_resume(struct device *dev)
+--
+2.51.0
+
--- /dev/null
+From b143297ecef9996965f9dc607445e523cd40d000 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 Nov 2025 17:46:22 +0100
+Subject: memory: mtk-smi: fix device leaks on common probe
+
+From: Johan Hovold <johan@kernel.org>
+
+[ Upstream commit 6cfa038bddd710f544076ea2ef7792fc82fbedd6 ]
+
+Make sure to drop the reference taken when looking up the SMI device
+during common probe on late probe failure (e.g. probe deferral) and on
+driver unbind.
+
+Fixes: 47404757702e ("memory: mtk-smi: Add device link for smi-sub-common")
+Fixes: 038ae37c510f ("memory: mtk-smi: add missing put_device() call in mtk_smi_device_link_common")
+Cc: stable@vger.kernel.org # 5.16: 038ae37c510f
+Cc: stable@vger.kernel.org # 5.16
+Cc: Yong Wu <yong.wu@mediatek.com>
+Cc: Miaoqian Lin <linmq006@gmail.com>
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Link: https://patch.msgid.link/20251121164624.13685-2-johan@kernel.org
+Signed-off-by: Krzysztof Kozlowski <krzk@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/memory/mtk-smi.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
+index 2bc034dff691b..1bce32bed9a1f 100644
+--- a/drivers/memory/mtk-smi.c
++++ b/drivers/memory/mtk-smi.c
+@@ -564,6 +564,7 @@ static int mtk_smi_larb_probe(struct platform_device *pdev)
+ err_pm_disable:
+ pm_runtime_disable(dev);
+ device_link_remove(dev, larb->smi_common_dev);
++ put_device(larb->smi_common_dev);
+ return ret;
+ }
+
+@@ -799,6 +800,7 @@ static void mtk_smi_common_remove(struct platform_device *pdev)
+ if (common->plat->type == MTK_SMI_GEN2_SUB_COMM)
+ device_link_remove(&pdev->dev, common->smi_common_dev);
+ pm_runtime_disable(&pdev->dev);
++ put_device(common->smi_common_dev);
+ }
+
+ static int __maybe_unused mtk_smi_common_resume(struct device *dev)
+--
+2.51.0
+
--- /dev/null
+From 69eb2c3c6bcf31b7b60eb5a7be317166e494598f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 12 Feb 2026 20:55:09 -0800
+Subject: net: arcnet: com20020-pci: fix support for 2.5Mbit cards
+
+From: Ethan Nelson-Moore <enelsonmoore@gmail.com>
+
+[ Upstream commit c7d9be66b71af490446127c6ffcb66d6bb71b8b9 ]
+
+Commit 8c14f9c70327 ("ARCNET: add com20020 PCI IDs with metadata")
+converted the com20020-pci driver to use a card info structure instead
+of a single flag mask in driver_data. However, it failed to take into
+account that in the original code, driver_data of 0 indicates a card
+with no special flags, not a card that should not have any card info
+structure. This introduced a null pointer dereference when cards with
+no flags were probed.
+
+Commit bd6f1fd5d33d ("net: arcnet: com20020: Fix null-ptr-deref in
+com20020pci_probe()") then papered over this issue by rejecting cards
+with no driver_data instead of resolving the problem at its source.
+
+Fix the original issue by introducing a new card info structure for
+2.5Mbit cards that does not set any flags and using it if no
+driver_data is present.
+
+Fixes: 8c14f9c70327 ("ARCNET: add com20020 PCI IDs with metadata")
+Fixes: bd6f1fd5d33d ("net: arcnet: com20020: Fix null-ptr-deref in com20020pci_probe()")
+Cc: stable@vger.kernel.org
+Reviewed-by: Simon Horman <horms@kernel.org>
+Signed-off-by: Ethan Nelson-Moore <enelsonmoore@gmail.com>
+Link: https://patch.msgid.link/20260213045510.32368-1-enelsonmoore@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/arcnet/com20020-pci.c | 16 +++++++++++++++-
+ 1 file changed, 15 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
+index 0472bcdff1307..b5729d6c0b47c 100644
+--- a/drivers/net/arcnet/com20020-pci.c
++++ b/drivers/net/arcnet/com20020-pci.c
+@@ -115,6 +115,8 @@ static const struct attribute_group com20020_state_group = {
+ .attrs = com20020_state_attrs,
+ };
+
++static struct com20020_pci_card_info card_info_2p5mbit;
++
+ static void com20020pci_remove(struct pci_dev *pdev);
+
+ static int com20020pci_probe(struct pci_dev *pdev,
+@@ -140,7 +142,7 @@ static int com20020pci_probe(struct pci_dev *pdev,
+
+ ci = (struct com20020_pci_card_info *)id->driver_data;
+ if (!ci)
+- return -EINVAL;
++ ci = &card_info_2p5mbit;
+
+ priv->ci = ci;
+ mm = &ci->misc_map;
+@@ -347,6 +349,18 @@ static struct com20020_pci_card_info card_info_5mbit = {
+ .flags = ARC_IS_5MBIT,
+ };
+
++static struct com20020_pci_card_info card_info_2p5mbit = {
++ .name = "ARC-PCI",
++ .devcount = 1,
++ .chan_map_tbl = {
++ {
++ .bar = 2,
++ .offset = 0x00,
++ .size = 0x08,
++ },
++ },
++};
++
+ static struct com20020_pci_card_info card_info_sohard = {
+ .name = "SOHARD SH ARC-PCI",
+ .devcount = 1,
+--
+2.51.0
+
--- /dev/null
+From 7fd33397c5cdd1f2fbf3468036fc14e536d7bc7e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 30 Dec 2025 22:16:08 -0800
+Subject: of/kexec: refactor ima_get_kexec_buffer() to use ima_validate_range()
+
+From: Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+
+[ Upstream commit 4d02233235ed0450de9c10fcdcf3484e3c9401ce ]
+
+Refactor the OF/DT ima_get_kexec_buffer() to use a generic helper to
+validate the address range. No functional change intended.
+
+Link: https://lkml.kernel.org/r/20251231061609.907170-3-harshit.m.mogalapalli@oracle.com
+Signed-off-by: Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Reviewed-by: Mimi Zohar <zohar@linux.ibm.com>
+Cc: Alexander Graf <graf@amazon.com>
+Cc: Ard Biesheuvel <ardb@kernel.org>
+Cc: Baoquan He <bhe@redhat.com>
+Cc: Borislav Betkov <bp@alien8.de>
+Cc: guoweikang <guoweikang.kernel@gmail.com>
+Cc: Henry Willard <henry.willard@oracle.com>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Jiri Bohac <jbohac@suse.cz>
+Cc: Joel Granados <joel.granados@kernel.org>
+Cc: Jonathan McDowell <noodles@fb.com>
+Cc: Mike Rapoport <rppt@kernel.org>
+Cc: Paul Webb <paul.x.webb@oracle.com>
+Cc: Sohil Mehta <sohil.mehta@intel.com>
+Cc: Sourabh Jain <sourabhjain@linux.ibm.com>
+Cc: Thomas Gleinxer <tglx@linutronix.de>
+Cc: Yifei Liu <yifei.l.liu@oracle.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/of/kexec.c | 15 +++------------
+ 1 file changed, 3 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/of/kexec.c b/drivers/of/kexec.c
+index 5b924597a4deb..81f272b154760 100644
+--- a/drivers/of/kexec.c
++++ b/drivers/of/kexec.c
+@@ -128,7 +128,6 @@ int __init ima_get_kexec_buffer(void **addr, size_t *size)
+ {
+ int ret, len;
+ unsigned long tmp_addr;
+- unsigned long start_pfn, end_pfn;
+ size_t tmp_size;
+ const void *prop;
+
+@@ -144,17 +143,9 @@ int __init ima_get_kexec_buffer(void **addr, size_t *size)
+ if (!tmp_size)
+ return -ENOENT;
+
+- /*
+- * Calculate the PFNs for the buffer and ensure
+- * they are with in addressable memory.
+- */
+- start_pfn = PHYS_PFN(tmp_addr);
+- end_pfn = PHYS_PFN(tmp_addr + tmp_size - 1);
+- if (!page_is_ram(start_pfn) || !page_is_ram(end_pfn)) {
+- pr_warn("IMA buffer at 0x%lx, size = 0x%zx beyond memory\n",
+- tmp_addr, tmp_size);
+- return -EINVAL;
+- }
++ ret = ima_validate_range(tmp_addr, tmp_size);
++ if (ret)
++ return ret;
+
+ *addr = __va(tmp_addr);
+ *size = tmp_size;
+--
+2.51.0
+
--- /dev/null
+From 960bf5b154aa7349ba40c7beb52be14e94e659d5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Jan 2025 11:59:34 +0100
+Subject: PCI: dw-rockchip: Don't wait for link since we can detect Link Up
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Niklas Cassel <cassel@kernel.org>
+
+[ Upstream commit ec9fd499b9c60a187ac8d6414c3c343c77d32e42 ]
+
+The Root Complex specific device tree binding for pcie-dw-rockchip has the
+'sys' interrupt marked as required.
+
+The driver requests the 'sys' IRQ unconditionally, and errors out if not
+provided.
+
+Thus, we can unconditionally set 'use_linkup_irq', so dw_pcie_host_init()
+doesn't wait for the link to come up.
+
+This will skip the wait for link up (since the bus will be enumerated once
+the link up IRQ is triggered), which reduces the bootup time.
+
+Link: https://lore.kernel.org/r/20250113-rockchip-no-wait-v1-1-25417f37b92f@kernel.org
+Signed-off-by: Niklas Cassel <cassel@kernel.org>
+[bhelgaas: commit log]
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Signed-off-by: Krzysztof Wilczyński <kwilczynski@kernel.org>
+Stable-dep-of: fc6298086bfa ("Revert "PCI: dw-rockchip: Don't wait for link since we can detect Link Up"")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pci/controller/dwc/pcie-dw-rockchip.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/pci/controller/dwc/pcie-dw-rockchip.c b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
+index 6b113a1212a92..8bcde64a7fe52 100644
+--- a/drivers/pci/controller/dwc/pcie-dw-rockchip.c
++++ b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
+@@ -433,6 +433,7 @@ static int rockchip_pcie_configure_rc(struct rockchip_pcie *rockchip)
+
+ pp = &rockchip->pci.pp;
+ pp->ops = &rockchip_pcie_host_ops;
++ pp->use_linkup_irq = true;
+
+ return dw_pcie_host_init(pp);
+ }
+--
+2.51.0
+
--- /dev/null
+From f9595b4786f1b5f991133725cc68bd2ed1cf6f7e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 23 Nov 2024 00:40:00 +0530
+Subject: PCI: qcom: Don't wait for link if we can detect Link Up
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Krishna chaitanya chundru <quic_krichai@quicinc.com>
+
+[ Upstream commit 36971d6c5a9a134c15760ae9fd13c6d5f9a36abb ]
+
+If we have a 'global' IRQ for Link Up events, we need not wait for the
+link to be up during PCI initialization, which reduces startup time.
+
+Check for 'global' IRQ, and if present, set 'use_linkup_irq',
+so dw_pcie_host_init() doesn't wait for the link to come up.
+
+Link: https://lore.kernel.org/r/20241123-remove_wait2-v5-2-b5f9e6b794c2@quicinc.com
+Signed-off-by: Krishna chaitanya chundru <quic_krichai@quicinc.com>
+Signed-off-by: Krzysztof Wilczyński <kwilczynski@kernel.org>
+[bhelgaas: commit log]
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Reviewed-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Reviewed-by: Niklas Cassel <cassel@kernel.org>
+Stable-dep-of: e9ce5b380443 ("Revert "PCI: qcom: Don't wait for link if we can detect Link Up"")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pci/controller/dwc/pcie-qcom.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
+index 5d27cd149f512..0205c18d95a01 100644
+--- a/drivers/pci/controller/dwc/pcie-qcom.c
++++ b/drivers/pci/controller/dwc/pcie-qcom.c
+@@ -1696,6 +1696,10 @@ static int qcom_pcie_probe(struct platform_device *pdev)
+
+ platform_set_drvdata(pdev, pcie);
+
++ irq = platform_get_irq_byname_optional(pdev, "global");
++ if (irq > 0)
++ pp->use_linkup_irq = true;
++
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ dev_err(dev, "cannot initialize host\n");
+@@ -1709,7 +1713,6 @@ static int qcom_pcie_probe(struct platform_device *pdev)
+ goto err_host_deinit;
+ }
+
+- irq = platform_get_irq_byname_optional(pdev, "global");
+ if (irq > 0) {
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ qcom_pcie_global_irq_thread,
+--
+2.51.0
+
--- /dev/null
+From e9e06daee214fe0b2b1b25055c80c3dca2983542 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 8 Dec 2025 16:56:54 +0200
+Subject: PCI: Use resource_set_range() that correctly sets ->end
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+
+[ Upstream commit 11721c45a8266a9d0c9684153d20e37159465f96 ]
+
+__pci_read_base() sets resource start and end addresses when resource
+is larger than 4G but pci_bus_addr_t or resource_size_t are not capable
+of representing 64-bit PCI addresses. This creates a problematic
+resource that has non-zero flags but the start and end addresses do not
+yield to resource size of 0 but 1.
+
+Replace custom resource addresses setup with resource_set_range()
+that correctly sets end address as -1 which results in resource_size()
+returning 0.
+
+For consistency, also use resource_set_range() in the other branch that
+does size based resource setup.
+
+Fixes: 23b13bc76f35 ("PCI: Fail safely if we can't handle BARs larger than 4GB")
+Link: https://lore.kernel.org/all/20251207215359.28895-1-ansuelsmth@gmail.com/T/#m990492684913c5a158ff0e5fc90697d8ad95351b
+Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Reviewed-by: Andy Shevchenko <andriy.shevchenko@intel.com>
+Cc: stable@vger.kernel.org
+Cc: Christian Marangi <ansuelsmth@gmail.com>
+Link: https://patch.msgid.link/20251208145654.5294-1-ilpo.jarvinen@linux.intel.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pci/probe.c | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
+index 9e419f14738a2..9e71eb4d1010e 100644
+--- a/drivers/pci/probe.c
++++ b/drivers/pci/probe.c
+@@ -263,8 +263,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
+ if ((sizeof(pci_bus_addr_t) < 8 || sizeof(resource_size_t) < 8)
+ && sz64 > 0x100000000ULL) {
+ res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
+- res->start = 0;
+- res->end = 0;
++ resource_set_range(res, 0, 0);
+ pci_err(dev, "%s: can't handle BAR larger than 4GB (size %#010llx)\n",
+ res_name, (unsigned long long)sz64);
+ goto out;
+@@ -273,8 +272,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
+ if ((sizeof(pci_bus_addr_t) < 8) && l) {
+ /* Above 32-bit boundary; try to reallocate */
+ res->flags |= IORESOURCE_UNSET;
+- res->start = 0;
+- res->end = sz64 - 1;
++ resource_set_range(res, 0, sz64);
+ pci_info(dev, "%s: can't handle BAR above 4GB (bus address %#010llx)\n",
+ res_name, (unsigned long long)l64);
+ goto out;
+--
+2.51.0
+
--- /dev/null
+From f4bbd32b2d2714484612a53146d04f8f7a7fc292 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 14 Jun 2024 13:06:03 +0300
+Subject: resource: Add resource set range and size helpers
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+
+[ Upstream commit 9fb6fef0fb49124291837af1da5028f79d53f98e ]
+
+Setting the end address for a resource with a given size lacks a helper and
+is therefore coded manually unlike the getter side which has a helper for
+resource size calculation. Also, almost all callsites that calculate the
+end address for a resource also set the start address right before it like
+this:
+
+ res->start = start_addr;
+ res->end = res->start + size - 1;
+
+Add resource_set_range(res, start_addr, size) that sets the start address
+and calculates the end address to simplify this often repeated fragment.
+
+Also add resource_set_size() for the cases where setting the start address
+of the resource is not necessary but mention in its kerneldoc that
+resource_set_range() is preferred when setting both addresses.
+
+Link: https://lore.kernel.org/r/20240614100606.15830-2-ilpo.jarvinen@linux.intel.com
+Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Stable-dep-of: 11721c45a826 ("PCI: Use resource_set_range() that correctly sets ->end")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/ioport.h | 32 ++++++++++++++++++++++++++++++++
+ 1 file changed, 32 insertions(+)
+
+diff --git a/include/linux/ioport.h b/include/linux/ioport.h
+index 6e9fb667a1c5a..5385349f0b8a6 100644
+--- a/include/linux/ioport.h
++++ b/include/linux/ioport.h
+@@ -249,6 +249,38 @@ struct resource *lookup_resource(struct resource *root, resource_size_t start);
+ int adjust_resource(struct resource *res, resource_size_t start,
+ resource_size_t size);
+ resource_size_t resource_alignment(struct resource *res);
++
++/**
++ * resource_set_size - Calculate resource end address from size and start
++ * @res: Resource descriptor
++ * @size: Size of the resource
++ *
++ * Calculate the end address for @res based on @size.
++ *
++ * Note: The start address of @res must be set when calling this function.
++ * Prefer resource_set_range() if setting both the start address and @size.
++ */
++static inline void resource_set_size(struct resource *res, resource_size_t size)
++{
++ res->end = res->start + size - 1;
++}
++
++/**
++ * resource_set_range - Set resource start and end addresses
++ * @res: Resource descriptor
++ * @start: Start address for the resource
++ * @size: Size of the resource
++ *
++ * Set @res start address and calculate the end address based on @size.
++ */
++static inline void resource_set_range(struct resource *res,
++ resource_size_t start,
++ resource_size_t size)
++{
++ res->start = start;
++ resource_set_size(res, size);
++}
++
+ static inline resource_size_t resource_size(const struct resource *res)
+ {
+ return res->end - res->start + 1;
+--
+2.51.0
+
--- /dev/null
+From e0b3ae0e51388c205c0d21fac5739e0c0c68a2d2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 22 Dec 2025 07:42:08 +0100
+Subject: Revert "PCI: dw-rockchip: Don't wait for link since we can detect
+ Link Up"
+
+From: Niklas Cassel <cassel@kernel.org>
+
+[ Upstream commit fc6298086bfacaa7003b0bd1da4e4f42b29f7d77 ]
+
+This reverts commit ec9fd499b9c60a187ac8d6414c3c343c77d32e42.
+
+While this fake hotplugging was a nice idea, it has shown that this feature
+does not handle PCIe switches correctly:
+pci_bus 0004:43: busn_res: can not insert [bus 43-41] under [bus 42-41] (conflicts with (null) [bus 42-41])
+pci_bus 0004:43: busn_res: [bus 43-41] end is updated to 43
+pci_bus 0004:43: busn_res: can not insert [bus 43] under [bus 42-41] (conflicts with (null) [bus 42-41])
+pci 0004:42:00.0: devices behind bridge are unusable because [bus 43] cannot be assigned for them
+pci_bus 0004:44: busn_res: can not insert [bus 44-41] under [bus 42-41] (conflicts with (null) [bus 42-41])
+pci_bus 0004:44: busn_res: [bus 44-41] end is updated to 44
+pci_bus 0004:44: busn_res: can not insert [bus 44] under [bus 42-41] (conflicts with (null) [bus 42-41])
+pci 0004:42:02.0: devices behind bridge are unusable because [bus 44] cannot be assigned for them
+pci_bus 0004:45: busn_res: can not insert [bus 45-41] under [bus 42-41] (conflicts with (null) [bus 42-41])
+pci_bus 0004:45: busn_res: [bus 45-41] end is updated to 45
+pci_bus 0004:45: busn_res: can not insert [bus 45] under [bus 42-41] (conflicts with (null) [bus 42-41])
+pci 0004:42:06.0: devices behind bridge are unusable because [bus 45] cannot be assigned for them
+pci_bus 0004:46: busn_res: can not insert [bus 46-41] under [bus 42-41] (conflicts with (null) [bus 42-41])
+pci_bus 0004:46: busn_res: [bus 46-41] end is updated to 46
+pci_bus 0004:46: busn_res: can not insert [bus 46] under [bus 42-41] (conflicts with (null) [bus 42-41])
+pci 0004:42:0e.0: devices behind bridge are unusable because [bus 46] cannot be assigned for them
+pci_bus 0004:42: busn_res: [bus 42-41] end is updated to 46
+pci_bus 0004:42: busn_res: can not insert [bus 42-46] under [bus 41] (conflicts with (null) [bus 41])
+pci 0004:41:00.0: devices behind bridge are unusable because [bus 42-46] cannot be assigned for them
+pcieport 0004:40:00.0: bridge has subordinate 41 but max busn 46
+
+During the initial scan, PCI core doesn't see the switch and since the Root
+Port is not hot plug capable, the secondary bus number gets assigned as the
+subordinate bus number. This means, the PCI core assumes that only one bus
+will appear behind the Root Port since the Root Port is not hot plug
+capable.
+
+This works perfectly fine for PCIe endpoints connected to the Root Port,
+since they don't extend the bus. However, if a PCIe switch is connected,
+then there is a problem when the downstream busses starts showing up and
+the PCI core doesn't extend the subordinate bus number and bridge resources
+after initial scan during boot.
+
+The long term plan is to migrate this driver to the upcoming pwrctrl APIs
+that are supposed to handle this problem elegantly.
+
+Suggested-by: Manivannan Sadhasivam <mani@kernel.org>
+Signed-off-by: Niklas Cassel <cassel@kernel.org>
+Signed-off-by: Manivannan Sadhasivam <mani@kernel.org>
+Tested-by: Shawn Lin <shawn.lin@rock-chips.com>
+Acked-by: Shawn Lin <shawn.lin@rock-chips.com>
+Cc: stable@vger.kernel.org
+Link: https://patch.msgid.link/20251222064207.3246632-9-cassel@kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pci/controller/dwc/pcie-dw-rockchip.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/drivers/pci/controller/dwc/pcie-dw-rockchip.c b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
+index 8bcde64a7fe52..6b113a1212a92 100644
+--- a/drivers/pci/controller/dwc/pcie-dw-rockchip.c
++++ b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
+@@ -433,7 +433,6 @@ static int rockchip_pcie_configure_rc(struct rockchip_pcie *rockchip)
+
+ pp = &rockchip->pci.pp;
+ pp->ops = &rockchip_pcie_host_ops;
+- pp->use_linkup_irq = true;
+
+ return dw_pcie_host_init(pp);
+ }
+--
+2.51.0
+
--- /dev/null
+From a95baac6939d49894d7aea67cb2a2dd2b1675f37 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 22 Dec 2025 07:42:10 +0100
+Subject: Revert "PCI: qcom: Don't wait for link if we can detect Link Up"
+
+From: Niklas Cassel <cassel@kernel.org>
+
+[ Upstream commit e9ce5b3804436301ab343bc14203a4c14b336d1b ]
+
+This reverts commit 36971d6c5a9a134c15760ae9fd13c6d5f9a36abb.
+
+While this fake hotplugging was a nice idea, it has shown that this feature
+does not handle PCIe switches correctly:
+pci_bus 0004:43: busn_res: can not insert [bus 43-41] under [bus 42-41] (conflicts with (null) [bus 42-41])
+pci_bus 0004:43: busn_res: [bus 43-41] end is updated to 43
+pci_bus 0004:43: busn_res: can not insert [bus 43] under [bus 42-41] (conflicts with (null) [bus 42-41])
+pci 0004:42:00.0: devices behind bridge are unusable because [bus 43] cannot be assigned for them
+pci_bus 0004:44: busn_res: can not insert [bus 44-41] under [bus 42-41] (conflicts with (null) [bus 42-41])
+pci_bus 0004:44: busn_res: [bus 44-41] end is updated to 44
+pci_bus 0004:44: busn_res: can not insert [bus 44] under [bus 42-41] (conflicts with (null) [bus 42-41])
+pci 0004:42:02.0: devices behind bridge are unusable because [bus 44] cannot be assigned for them
+pci_bus 0004:45: busn_res: can not insert [bus 45-41] under [bus 42-41] (conflicts with (null) [bus 42-41])
+pci_bus 0004:45: busn_res: [bus 45-41] end is updated to 45
+pci_bus 0004:45: busn_res: can not insert [bus 45] under [bus 42-41] (conflicts with (null) [bus 42-41])
+pci 0004:42:06.0: devices behind bridge are unusable because [bus 45] cannot be assigned for them
+pci_bus 0004:46: busn_res: can not insert [bus 46-41] under [bus 42-41] (conflicts with (null) [bus 42-41])
+pci_bus 0004:46: busn_res: [bus 46-41] end is updated to 46
+pci_bus 0004:46: busn_res: can not insert [bus 46] under [bus 42-41] (conflicts with (null) [bus 42-41])
+pci 0004:42:0e.0: devices behind bridge are unusable because [bus 46] cannot be assigned for them
+pci_bus 0004:42: busn_res: [bus 42-41] end is updated to 46
+pci_bus 0004:42: busn_res: can not insert [bus 42-46] under [bus 41] (conflicts with (null) [bus 41])
+pci 0004:41:00.0: devices behind bridge are unusable because [bus 42-46] cannot be assigned for them
+pcieport 0004:40:00.0: bridge has subordinate 41 but max busn 46
+
+During the initial scan, PCI core doesn't see the switch and since the Root
+Port is not hot plug capable, the secondary bus number gets assigned as the
+subordinate bus number. This means, the PCI core assumes that only one bus
+will appear behind the Root Port since the Root Port is not hot plug
+capable.
+
+This works perfectly fine for PCIe endpoints connected to the Root Port,
+since they don't extend the bus. However, if a PCIe switch is connected,
+then there is a problem when the downstream busses starts showing up and
+the PCI core doesn't extend the subordinate bus number and bridge resources
+after initial scan during boot.
+
+The long term plan is to migrate this driver to the upcoming pwrctrl APIs
+that are supposed to handle this problem elegantly.
+
+Suggested-by: Manivannan Sadhasivam <mani@kernel.org>
+Signed-off-by: Niklas Cassel <cassel@kernel.org>
+Signed-off-by: Manivannan Sadhasivam <mani@kernel.org>
+Tested-by: Shawn Lin <shawn.lin@rock-chips.com>
+Acked-by: Shawn Lin <shawn.lin@rock-chips.com>
+Cc: stable@vger.kernel.org
+Link: https://patch.msgid.link/20251222064207.3246632-11-cassel@kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pci/controller/dwc/pcie-qcom.c | 5 +----
+ 1 file changed, 1 insertion(+), 4 deletions(-)
+
+diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
+index 0205c18d95a01..5d27cd149f512 100644
+--- a/drivers/pci/controller/dwc/pcie-qcom.c
++++ b/drivers/pci/controller/dwc/pcie-qcom.c
+@@ -1696,10 +1696,6 @@ static int qcom_pcie_probe(struct platform_device *pdev)
+
+ platform_set_drvdata(pdev, pcie);
+
+- irq = platform_get_irq_byname_optional(pdev, "global");
+- if (irq > 0)
+- pp->use_linkup_irq = true;
+-
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ dev_err(dev, "cannot initialize host\n");
+@@ -1713,6 +1709,7 @@ static int qcom_pcie_probe(struct platform_device *pdev)
+ goto err_host_deinit;
+ }
+
++ irq = platform_get_irq_byname_optional(pdev, "global");
+ if (irq > 0) {
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ qcom_pcie_global_irq_thread,
+--
+2.51.0
+
bpf-arm64-force-8-byte-alignment-for-jit-buffer-to-p.patch
bpf-fix-stack-out-of-bounds-write-in-devmap.patch
pci-correct-pci_cap_exp_endpoint_sizeof_v2-value.patch
+x86-acpi-boot-correct-acpi_is_processor_usable-check.patch
+memory-mtk-smi-fix-device-leaks-on-common-probe.patch
+memory-mtk-smi-fix-device-leak-on-larb-probe.patch
+pci-dw-rockchip-don-t-wait-for-link-since-we-can-det.patch
+revert-pci-dw-rockchip-don-t-wait-for-link-since-we-.patch
+pci-qcom-don-t-wait-for-link-if-we-can-detect-link-u.patch
+revert-pci-qcom-don-t-wait-for-link-if-we-can-detect.patch
+resource-add-resource-set-range-and-size-helpers.patch
+pci-use-resource_set_range-that-correctly-sets-end.patch
+media-v4l2-mem2mem-add-a-kref-to-the-v4l2_m2m_dev-st.patch
+media-verisilicon-avoid-g2-bus-error-while-decoding-.patch
+media-tegra-video-fix-memory-leak-in-__tegra_channel.patch
+media-dw9714-move-power-sequences-to-dedicated-funct.patch
+media-dw9714-add-support-for-powerdown-pin.patch
+media-dw9714-fix-powerup-sequence.patch
+kvm-x86-ignore-ebusy-when-checking-nested-events-fro.patch
+ata-libata-scsi-refactor-ata_scsi_simulate.patch
+ata-libata-scsi-refactor-ata_scsiop_read_cap.patch
+ata-libata-scsi-refactor-ata_scsiop_maint_in.patch
+ata-libata-scsi-document-all-vpd-page-inquiry-actors.patch
+ata-libata-scsi-remove-struct-ata_scsi_args.patch
+ata-libata-remove-ata_dflag_zac-device-flag.patch
+ata-libata-introduce-ata_port_eh_scheduled.patch
+ata-libata-scsi-avoid-non-ncq-command-starvation.patch
+drm-tegra-dsi-fix-device-leak-on-probe.patch
+ext4-correct-the-comments-place-for-ext4_ext_may_zer.patch
+ext4-don-t-set-ext4_get_blocks_convert-when-splittin.patch
+mailbox-don-t-protect-of_parse_phandle_with_args-wit.patch
+mailbox-sort-headers-alphabetically.patch
+mailbox-remove-unused-header-files.patch
+mailbox-use-dev_err-when-there-is-error.patch
+mailbox-use-guard-scoped_guard-for-con_mutex.patch
+mailbox-allow-controller-specific-mapping-using-fwno.patch
+mailbox-prevent-out-of-bounds-access-in-fw_mbox_inde.patch
+ext4-add-ext4_try_lock_group-to-skip-busy-groups.patch
+ext4-factor-out-__ext4_mb_scan_group.patch
+ext4-factor-out-ext4_mb_might_prefetch.patch
+ext4-factor-out-ext4_mb_scan_group.patch
+ext4-convert-free-groups-order-lists-to-xarrays.patch
+ext4-refactor-choose-group-to-scan-group.patch
+ext4-implement-linear-like-traversal-across-order-xa.patch
+ext4-always-allocate-blocks-only-from-groups-inode-c.patch
+workqueue-add-system_percpu_wq-and-system_dfl_wq.patch
+input-synaptics_i2c-replace-use-of-system_wq-with-sy.patch
+input-synaptics_i2c-guard-polling-restart-in-resume.patch
+iommu-vt-d-skip-dev-iotlb-flush-for-inaccessible-pci.patch
+arm64-dts-rockchip-fix-rk356x-pcie-range-mappings.patch
+arm64-dts-rockchip-fix-rk3588-pcie-range-mappings.patch
+clk-tegra-tegra124-emc-fix-device-leak-on-set_rate.patch
+ima-kexec-silence-rcu-list-traversal-warning.patch
+ima-rename-variable-the-seq_file-file-to-ima_kexec_f.patch
+ima-define-and-call-ima_alloc_kexec_file_buf.patch
+kexec-define-functions-to-map-and-unmap-segments.patch
+ima-kexec-define-functions-to-copy-ima-log-at-soft-b.patch
+ima-verify-the-previous-kernel-s-ima-buffer-lies-in-.patch
+of-kexec-refactor-ima_get_kexec_buffer-to-use-ima_va.patch
+efi-cper-cxl-prefix-protocol-error-struct-and-functi.patch
+efi-cper-cxl-make-definitions-and-structures-global.patch
+acpi-ghes-cper-recognize-and-cache-cxl-protocol-erro.patch
+acpi-apei-ghes-add-helper-for-cper-cxl-protocol-erro.patch
+acpi-apei-ghes-disable-kasan-instrumentation-when-co.patch
+drm-exynos-vidi-fix-to-avoid-directly-dereferencing-.patch
+drm-exynos-vidi-remove-redundant-error-handling-in-v.patch
+drm-exynos-vidi-use-ctx-lock-to-protect-struct-vidi_.patch
+uprobes-switch-to-rcu-tasks-trace-flavor-for-better-.patch
+uprobes-fix-incorrect-lockdep-condition-in-filter_ch.patch
+btrfs-drop-unused-parameter-fs_info-from-do_reclaim_.patch
+btrfs-get-used-bytes-while-holding-lock-at-btrfs_rec.patch
+btrfs-fix-reclaimed-bytes-accounting-after-automatic.patch
+btrfs-fix-periodic-reclaim-condition.patch
+btrfs-zoned-fix-alloc_offset-calculation-for-partly-.patch
+btrfs-zoned-fixup-last-alloc-pointer-after-extent-re.patch
+btrfs-zoned-fixup-last-alloc-pointer-after-extent-re.patch-291
+btrfs-zoned-fix-stripe-width-calculation.patch
+btrfs-define-the-auto_kfree-auto_kvfree-helper-macro.patch
+btrfs-zoned-fixup-last-alloc-pointer-after-extent-re.patch-11126
+usb-cdns3-remove-redundant-if-branch.patch
+usb-cdns3-call-cdns_power_is_lost-only-once-in-cdns_.patch
+usb-cdns3-fix-role-switching-during-resume.patch
+drm-amd-fix-hang-on-amdgpu-unload-by-using-pci_dev_i.patch
+alsa-hda-conexant-add-quirk-for-hp-zbook-studio-g4.patch
+hwmon-max16065-use-read-write_once-to-avoid-compiler.patch
+ksmbd-check-return-value-of-xa_store-in-krb5_authent.patch
+ksmbd-add-chann_lock-to-protect-ksmbd_chann_list-xar.patch
+loongarch-orc-use-rcu-in-all-users-of-__module_addre.patch
+loongarch-remove-unnecessary-checks-for-orc-unwinder.patch
+loongarch-handle-percpu-handler-address-for-orc-unwi.patch
+loongarch-remove-some-extern-variables-in-source-fil.patch
+alsa-hda-realtek-add-quirk-for-gigabyte-g5-kf5-2023.patch
+alsa-hda-realtek-add-quirk-for-samsung-galaxy-book3-.patch
+alsa-hda-conexant-fix-headphone-jack-handling-on-ace.patch
+net-arcnet-com20020-pci-fix-support-for-2.5mbit-card.patch
--- /dev/null
+From 88cb079c522f9f33a8e25d9847eff3696480fd88 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 28 Jan 2026 10:16:11 -0800
+Subject: uprobes: Fix incorrect lockdep condition in filter_chain()
+
+From: Breno Leitao <leitao@debian.org>
+
+[ Upstream commit a56a38fd9196fc89401e498d70b7aa9c9679fa6e ]
+
+The list_for_each_entry_rcu() in filter_chain() uses
+rcu_read_lock_trace_held() as the lockdep condition, but the function
+holds consumer_rwsem, not the RCU trace lock.
+
+This gives me the following output when running with some locking debug
+option enabled:
+
+ kernel/events/uprobes.c:1141 RCU-list traversed in non-reader section!!
+ filter_chain
+ register_for_each_vma
+ uprobe_unregister_nosync
+ __probe_event_disable
+
+Remove the incorrect lockdep condition since the rwsem provides
+sufficient protection for the list traversal.
+
+Fixes: cc01bd044e6a ("uprobes: travers uprobe's consumer list locklessly under SRCU protection")
+Signed-off-by: Breno Leitao <leitao@debian.org>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Acked-by: Oleg Nesterov <oleg@redhat.com>
+Acked-by: Andrii Nakryiko <andrii@kernel.org>
+Acked-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+Cc: stable@vger.kernel.org
+Link: https://patch.msgid.link/20260128-uprobe_rcu-v2-1-994ea6d32730@debian.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/events/uprobes.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
+index 0eb9befe49a3c..e3c8d9900ca7f 100644
+--- a/kernel/events/uprobes.c
++++ b/kernel/events/uprobes.c
+@@ -949,7 +949,7 @@ static bool filter_chain(struct uprobe *uprobe, struct mm_struct *mm)
+ bool ret = false;
+
+ down_read(&uprobe->consumer_rwsem);
+- list_for_each_entry_rcu(uc, &uprobe->consumers, cons_node, rcu_read_lock_trace_held()) {
++ list_for_each_entry(uc, &uprobe->consumers, cons_node) {
+ ret = consumer_filter(uc, mm);
+ if (ret)
+ break;
+--
+2.51.0
+
--- /dev/null
+From aa9f74a8c72350afcff7886fcd177d41e6ee5d03 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 10 Sep 2024 10:43:12 -0700
+Subject: uprobes: switch to RCU Tasks Trace flavor for better performance
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Andrii Nakryiko <andrii@kernel.org>
+
+[ Upstream commit 87195a1ee332add27bd51448c6b54aad551a28f5 ]
+
+This patch switches uprobes SRCU usage to RCU Tasks Trace flavor, which
+is optimized for more lightweight and quick readers (at the expense of
+slower writers, which for uprobes is a fine tradeof) and has better
+performance and scalability with number of CPUs.
+
+Similarly to baseline vs SRCU, we've benchmarked SRCU-based
+implementation vs RCU Tasks Trace implementation.
+
+SRCU
+====
+uprobe-nop ( 1 cpus): 3.276 ± 0.005M/s ( 3.276M/s/cpu)
+uprobe-nop ( 2 cpus): 4.125 ± 0.002M/s ( 2.063M/s/cpu)
+uprobe-nop ( 4 cpus): 7.713 ± 0.002M/s ( 1.928M/s/cpu)
+uprobe-nop ( 8 cpus): 8.097 ± 0.006M/s ( 1.012M/s/cpu)
+uprobe-nop (16 cpus): 6.501 ± 0.056M/s ( 0.406M/s/cpu)
+uprobe-nop (32 cpus): 4.398 ± 0.084M/s ( 0.137M/s/cpu)
+uprobe-nop (64 cpus): 6.452 ± 0.000M/s ( 0.101M/s/cpu)
+
+uretprobe-nop ( 1 cpus): 2.055 ± 0.001M/s ( 2.055M/s/cpu)
+uretprobe-nop ( 2 cpus): 2.677 ± 0.000M/s ( 1.339M/s/cpu)
+uretprobe-nop ( 4 cpus): 4.561 ± 0.003M/s ( 1.140M/s/cpu)
+uretprobe-nop ( 8 cpus): 5.291 ± 0.002M/s ( 0.661M/s/cpu)
+uretprobe-nop (16 cpus): 5.065 ± 0.019M/s ( 0.317M/s/cpu)
+uretprobe-nop (32 cpus): 3.622 ± 0.003M/s ( 0.113M/s/cpu)
+uretprobe-nop (64 cpus): 3.723 ± 0.002M/s ( 0.058M/s/cpu)
+
+RCU Tasks Trace
+===============
+uprobe-nop ( 1 cpus): 3.396 ± 0.002M/s ( 3.396M/s/cpu)
+uprobe-nop ( 2 cpus): 4.271 ± 0.006M/s ( 2.135M/s/cpu)
+uprobe-nop ( 4 cpus): 8.499 ± 0.015M/s ( 2.125M/s/cpu)
+uprobe-nop ( 8 cpus): 10.355 ± 0.028M/s ( 1.294M/s/cpu)
+uprobe-nop (16 cpus): 7.615 ± 0.099M/s ( 0.476M/s/cpu)
+uprobe-nop (32 cpus): 4.430 ± 0.007M/s ( 0.138M/s/cpu)
+uprobe-nop (64 cpus): 6.887 ± 0.020M/s ( 0.108M/s/cpu)
+
+uretprobe-nop ( 1 cpus): 2.174 ± 0.001M/s ( 2.174M/s/cpu)
+uretprobe-nop ( 2 cpus): 2.853 ± 0.001M/s ( 1.426M/s/cpu)
+uretprobe-nop ( 4 cpus): 4.913 ± 0.002M/s ( 1.228M/s/cpu)
+uretprobe-nop ( 8 cpus): 5.883 ± 0.002M/s ( 0.735M/s/cpu)
+uretprobe-nop (16 cpus): 5.147 ± 0.001M/s ( 0.322M/s/cpu)
+uretprobe-nop (32 cpus): 3.738 ± 0.008M/s ( 0.117M/s/cpu)
+uretprobe-nop (64 cpus): 4.397 ± 0.002M/s ( 0.069M/s/cpu)
+
+Peak throughput for uprobes increases from 8 mln/s to 10.3 mln/s
+(+28%!), and for uretprobes from 5.3 mln/s to 5.8 mln/s (+11%), as we
+have more work to do on uretprobes side.
+
+Even single-thread (no contention) performance is slightly better: 3.276
+mln/s to 3.396 mln/s (+3.5%) for uprobes, and 2.055 mln/s to 2.174 mln/s
+(+5.8%) for uretprobes.
+
+We also select TASKS_TRACE_RCU for UPROBES in Kconfig due to the new
+dependency.
+
+Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Oleg Nesterov <oleg@redhat.com>
+Link: https://lkml.kernel.org/r/20240910174312.3646590-1-andrii@kernel.org
+Stable-dep-of: a56a38fd9196 ("uprobes: Fix incorrect lockdep condition in filter_chain()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/Kconfig | 1 +
+ kernel/events/uprobes.c | 38 ++++++++++++++++----------------------
+ 2 files changed, 17 insertions(+), 22 deletions(-)
+
+diff --git a/arch/Kconfig b/arch/Kconfig
+index 593452b43dd49..1812e4e4d7147 100644
+--- a/arch/Kconfig
++++ b/arch/Kconfig
+@@ -135,6 +135,7 @@ config KPROBES_ON_FTRACE
+ config UPROBES
+ def_bool n
+ depends on ARCH_SUPPORTS_UPROBES
++ select TASKS_TRACE_RCU
+ help
+ Uprobes is the user-space counterpart to kprobes: they
+ enable instrumentation applications (such as 'perf probe')
+diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
+index e30c4dd345f40..0eb9befe49a3c 100644
+--- a/kernel/events/uprobes.c
++++ b/kernel/events/uprobes.c
+@@ -26,6 +26,7 @@
+ #include <linux/task_work.h>
+ #include <linux/shmem_fs.h>
+ #include <linux/khugepaged.h>
++#include <linux/rcupdate_trace.h>
+
+ #include <linux/uprobes.h>
+
+@@ -42,8 +43,6 @@ static struct rb_root uprobes_tree = RB_ROOT;
+ static DEFINE_RWLOCK(uprobes_treelock); /* serialize rbtree access */
+ static seqcount_rwlock_t uprobes_seqcount = SEQCNT_RWLOCK_ZERO(uprobes_seqcount, &uprobes_treelock);
+
+-DEFINE_STATIC_SRCU(uprobes_srcu);
+-
+ #define UPROBES_HASH_SZ 13
+ /* serialize uprobe->pending_list */
+ static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ];
+@@ -667,7 +666,7 @@ static void put_uprobe(struct uprobe *uprobe)
+ delayed_uprobe_remove(uprobe, NULL);
+ mutex_unlock(&delayed_uprobe_lock);
+
+- call_srcu(&uprobes_srcu, &uprobe->rcu, uprobe_free_rcu);
++ call_rcu_tasks_trace(&uprobe->rcu, uprobe_free_rcu);
+ }
+
+ static __always_inline
+@@ -722,7 +721,7 @@ static struct uprobe *find_uprobe_rcu(struct inode *inode, loff_t offset)
+ struct rb_node *node;
+ unsigned int seq;
+
+- lockdep_assert(srcu_read_lock_held(&uprobes_srcu));
++ lockdep_assert(rcu_read_lock_trace_held());
+
+ do {
+ seq = read_seqcount_begin(&uprobes_seqcount);
+@@ -950,8 +949,7 @@ static bool filter_chain(struct uprobe *uprobe, struct mm_struct *mm)
+ bool ret = false;
+
+ down_read(&uprobe->consumer_rwsem);
+- list_for_each_entry_srcu(uc, &uprobe->consumers, cons_node,
+- srcu_read_lock_held(&uprobes_srcu)) {
++ list_for_each_entry_rcu(uc, &uprobe->consumers, cons_node, rcu_read_lock_trace_held()) {
+ ret = consumer_filter(uc, mm);
+ if (ret)
+ break;
+@@ -1172,7 +1170,7 @@ void uprobe_unregister_sync(void)
+ * unlucky enough caller can free consumer's memory and cause
+ * handler_chain() or handle_uretprobe_chain() to do an use-after-free.
+ */
+- synchronize_srcu(&uprobes_srcu);
++ synchronize_rcu_tasks_trace();
+ }
+ EXPORT_SYMBOL_GPL(uprobe_unregister_sync);
+
+@@ -1256,19 +1254,18 @@ EXPORT_SYMBOL_GPL(uprobe_register);
+ int uprobe_apply(struct uprobe *uprobe, struct uprobe_consumer *uc, bool add)
+ {
+ struct uprobe_consumer *con;
+- int ret = -ENOENT, srcu_idx;
++ int ret = -ENOENT;
+
+ down_write(&uprobe->register_rwsem);
+
+- srcu_idx = srcu_read_lock(&uprobes_srcu);
+- list_for_each_entry_srcu(con, &uprobe->consumers, cons_node,
+- srcu_read_lock_held(&uprobes_srcu)) {
++ rcu_read_lock_trace();
++ list_for_each_entry_rcu(con, &uprobe->consumers, cons_node, rcu_read_lock_trace_held()) {
+ if (con == uc) {
+ ret = register_for_each_vma(uprobe, add ? uc : NULL);
+ break;
+ }
+ }
+- srcu_read_unlock(&uprobes_srcu, srcu_idx);
++ rcu_read_unlock_trace();
+
+ up_write(&uprobe->register_rwsem);
+
+@@ -2150,8 +2147,7 @@ static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
+
+ current->utask->auprobe = &uprobe->arch;
+
+- list_for_each_entry_srcu(uc, &uprobe->consumers, cons_node,
+- srcu_read_lock_held(&uprobes_srcu)) {
++ list_for_each_entry_rcu(uc, &uprobe->consumers, cons_node, rcu_read_lock_trace_held()) {
+ int rc = 0;
+
+ if (uc->handler) {
+@@ -2189,15 +2185,13 @@ handle_uretprobe_chain(struct return_instance *ri, struct pt_regs *regs)
+ {
+ struct uprobe *uprobe = ri->uprobe;
+ struct uprobe_consumer *uc;
+- int srcu_idx;
+
+- srcu_idx = srcu_read_lock(&uprobes_srcu);
+- list_for_each_entry_srcu(uc, &uprobe->consumers, cons_node,
+- srcu_read_lock_held(&uprobes_srcu)) {
++ rcu_read_lock_trace();
++ list_for_each_entry_rcu(uc, &uprobe->consumers, cons_node, rcu_read_lock_trace_held()) {
+ if (uc->ret_handler)
+ uc->ret_handler(uc, ri->func, regs);
+ }
+- srcu_read_unlock(&uprobes_srcu, srcu_idx);
++ rcu_read_unlock_trace();
+ }
+
+ static struct return_instance *find_next_ret_chain(struct return_instance *ri)
+@@ -2282,13 +2276,13 @@ static void handle_swbp(struct pt_regs *regs)
+ {
+ struct uprobe *uprobe;
+ unsigned long bp_vaddr;
+- int is_swbp, srcu_idx;
++ int is_swbp;
+
+ bp_vaddr = uprobe_get_swbp_addr(regs);
+ if (bp_vaddr == uprobe_get_trampoline_vaddr())
+ return uprobe_handle_trampoline(regs);
+
+- srcu_idx = srcu_read_lock(&uprobes_srcu);
++ rcu_read_lock_trace();
+
+ uprobe = find_active_uprobe_rcu(bp_vaddr, &is_swbp);
+ if (!uprobe) {
+@@ -2353,7 +2347,7 @@ static void handle_swbp(struct pt_regs *regs)
+
+ out:
+ /* arch_uprobe_skip_sstep() succeeded, or restart if can't singlestep */
+- srcu_read_unlock(&uprobes_srcu, srcu_idx);
++ rcu_read_unlock_trace();
+ }
+
+ /*
+--
+2.51.0
+
--- /dev/null
+From 27596736f65560a3a158b2e085affe8bc27ba984 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 5 Feb 2025 18:36:49 +0100
+Subject: usb: cdns3: call cdns_power_is_lost() only once in cdns_resume()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Théo Lebrun <theo.lebrun@bootlin.com>
+
+[ Upstream commit 17c6526b333cfd89a4c888a6f7c876c8c326e5ae ]
+
+cdns_power_is_lost() does a register read.
+Call it only once rather than twice.
+
+Signed-off-by: Théo Lebrun <theo.lebrun@bootlin.com>
+Link: https://lore.kernel.org/r/20250205-s2r-cdns-v7-4-13658a271c3c@bootlin.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Stable-dep-of: 87e4b043b98a ("usb: cdns3: fix role switching during resume")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/cdns3/core.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/usb/cdns3/core.c b/drivers/usb/cdns3/core.c
+index 98980a23e1c22..1243a5cea91b5 100644
+--- a/drivers/usb/cdns3/core.c
++++ b/drivers/usb/cdns3/core.c
+@@ -524,11 +524,12 @@ EXPORT_SYMBOL_GPL(cdns_suspend);
+
+ int cdns_resume(struct cdns *cdns)
+ {
++ bool power_lost = cdns_power_is_lost(cdns);
+ enum usb_role real_role;
+ bool role_changed = false;
+ int ret = 0;
+
+- if (cdns_power_is_lost(cdns)) {
++ if (power_lost) {
+ if (!cdns->role_sw) {
+ real_role = cdns_hw_role_state_machine(cdns);
+ if (real_role != cdns->role) {
+@@ -551,7 +552,7 @@ int cdns_resume(struct cdns *cdns)
+ }
+
+ if (cdns->roles[cdns->role]->resume)
+- cdns->roles[cdns->role]->resume(cdns, cdns_power_is_lost(cdns));
++ cdns->roles[cdns->role]->resume(cdns, power_lost);
+
+ return 0;
+ }
+--
+2.51.0
+
--- /dev/null
+From e28dfb1322aa4a60bee947d0cabda8af9337c671 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 30 Jan 2026 11:05:45 +0100
+Subject: usb: cdns3: fix role switching during resume
+
+From: Thomas Richard (TI) <thomas.richard@bootlin.com>
+
+[ Upstream commit 87e4b043b98a1d269be0b812f383881abee0ca45 ]
+
+If the role change while we are suspended, the cdns3 driver switches to the
+new mode during resume. However, switching to host mode in this context
+causes a NULL pointer dereference.
+
+The host role's start() operation registers a xhci-hcd device, but its
+probe is deferred while we are in the resume path. The host role's resume()
+operation assumes the xhci-hcd device is already probed, which is not the
+case, leading to the dereference. Since the start() operation of the new
+role is already called, the resume operation can be skipped.
+
+So skip the resume operation for the new role if a role switch occurs
+during resume. Once the resume sequence is complete, the xhci-hcd device
+can be probed in case of host mode.
+
+Unable to handle kernel NULL pointer dereference at virtual address 0000000000000208
+Mem abort info:
+...
+Data abort info:
+...
+[0000000000000208] pgd=0000000000000000, p4d=0000000000000000
+Internal error: Oops: 0000000096000004 [#1] SMP
+Modules linked in:
+CPU: 0 UID: 0 PID: 146 Comm: sh Not tainted
+6.19.0-rc7-00013-g6e64f4aabfae-dirty #135 PREEMPT
+Hardware name: Texas Instruments J7200 EVM (DT)
+pstate: 20000005 (nzCv daif -PAN -UAO -TCO -DIT -SSBS BTYPE=--)
+pc : usb_hcd_is_primary_hcd+0x0/0x1c
+lr : cdns_host_resume+0x24/0x5c
+...
+Call trace:
+ usb_hcd_is_primary_hcd+0x0/0x1c (P)
+ cdns_resume+0x6c/0xbc
+ cdns3_controller_resume.isra.0+0xe8/0x17c
+ cdns3_plat_resume+0x18/0x24
+ platform_pm_resume+0x2c/0x68
+ dpm_run_callback+0x90/0x248
+ device_resume+0x100/0x24c
+ dpm_resume+0x190/0x2ec
+ dpm_resume_end+0x18/0x34
+ suspend_devices_and_enter+0x2b0/0xa44
+ pm_suspend+0x16c/0x5fc
+ state_store+0x80/0xec
+ kobj_attr_store+0x18/0x2c
+ sysfs_kf_write+0x7c/0x94
+ kernfs_fop_write_iter+0x130/0x1dc
+ vfs_write+0x240/0x370
+ ksys_write+0x70/0x108
+ __arm64_sys_write+0x1c/0x28
+ invoke_syscall+0x48/0x10c
+ el0_svc_common.constprop.0+0x40/0xe0
+ do_el0_svc+0x1c/0x28
+ el0_svc+0x34/0x108
+ el0t_64_sync_handler+0xa0/0xe4
+ el0t_64_sync+0x198/0x19c
+Code: 52800003 f9407ca5 d63f00a0 17ffffe4 (f9410401)
+---[ end trace 0000000000000000 ]---
+
+Cc: stable <stable@kernel.org>
+Fixes: 2cf2581cd229 ("usb: cdns3: add power lost support for system resume")
+Signed-off-by: Thomas Richard (TI) <thomas.richard@bootlin.com>
+Acked-by: Peter Chen <peter.chen@kernel.org>
+Link: https://patch.msgid.link/20260130-usb-cdns3-fix-role-switching-during-resume-v1-1-44c456852b52@bootlin.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/cdns3/core.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/usb/cdns3/core.c b/drivers/usb/cdns3/core.c
+index 1243a5cea91b5..f0e32227c0b79 100644
+--- a/drivers/usb/cdns3/core.c
++++ b/drivers/usb/cdns3/core.c
+@@ -551,7 +551,7 @@ int cdns_resume(struct cdns *cdns)
+ }
+ }
+
+- if (cdns->roles[cdns->role]->resume)
++ if (!role_changed && cdns->roles[cdns->role]->resume)
+ cdns->roles[cdns->role]->resume(cdns, power_lost);
+
+ return 0;
+--
+2.51.0
+
--- /dev/null
+From 11da45460ba271425055ffdafdba3c14a0ac78b4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 31 Dec 2024 09:36:41 +0800
+Subject: usb: cdns3: remove redundant if branch
+
+From: Hongyu Xie <xiehongyu1@kylinos.cn>
+
+[ Upstream commit dedab674428f8a99468a4864c067128ba9ea83a6 ]
+
+cdns->role_sw->dev->driver_data gets set in routines showing below,
+cdns_init
+ sw_desc.driver_data = cdns;
+ cdns->role_sw = usb_role_switch_register(dev, &sw_desc);
+ dev_set_drvdata(&sw->dev, desc->driver_data);
+
+In cdns_resume,
+cdns->role = cdns_role_get(cdns->role_sw); //line redundant
+ struct cdns *cdns = usb_role_switch_get_drvdata(sw);
+ dev_get_drvdata(&sw->dev)
+ return dev->driver_data
+return cdns->role;
+
+"line redundant" equals to,
+ cdns->role = cdns->role;
+
+So fix this if branch.
+
+Signed-off-by: Hongyu Xie <xiehongyu1@kylinos.cn>
+Acked-by: Peter Chen <peter.chen@kernel.org>
+Link: https://lore.kernel.org/r/20241231013641.23908-1-xiehongyu1@kylinos.cn
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Stable-dep-of: 87e4b043b98a ("usb: cdns3: fix role switching during resume")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/cdns3/core.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+diff --git a/drivers/usb/cdns3/core.c b/drivers/usb/cdns3/core.c
+index 465e9267b49c1..98980a23e1c22 100644
+--- a/drivers/usb/cdns3/core.c
++++ b/drivers/usb/cdns3/core.c
+@@ -529,9 +529,7 @@ int cdns_resume(struct cdns *cdns)
+ int ret = 0;
+
+ if (cdns_power_is_lost(cdns)) {
+- if (cdns->role_sw) {
+- cdns->role = cdns_role_get(cdns->role_sw);
+- } else {
++ if (!cdns->role_sw) {
+ real_role = cdns_hw_role_state_machine(cdns);
+ if (real_role != cdns->role) {
+ ret = cdns_hw_role_switch(cdns);
+--
+2.51.0
+
--- /dev/null
+From e6f18f25de213a912c6fe4e1b86764af404e9b17 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 14 Jun 2025 15:35:29 +0200
+Subject: workqueue: Add system_percpu_wq and system_dfl_wq
+
+From: Marco Crivellari <marco.crivellari@suse.com>
+
+[ Upstream commit 128ea9f6ccfb6960293ae4212f4f97165e42222d ]
+
+Currently, if a user enqueue a work item using schedule_delayed_work() the
+used wq is "system_wq" (per-cpu wq) while queue_delayed_work() use
+WORK_CPU_UNBOUND (used when a cpu is not specified). The same applies to
+schedule_work() that is using system_wq and queue_work(), that makes use
+again of WORK_CPU_UNBOUND.
+
+This lack of consistentcy cannot be addressed without refactoring the API.
+
+system_wq is a per-CPU worqueue, yet nothing in its name tells about that
+CPU affinity constraint, which is very often not required by users. Make it
+clear by adding a system_percpu_wq.
+
+system_unbound_wq should be the default workqueue so as not to enforce
+locality constraints for random work whenever it's not required.
+
+Adding system_dfl_wq to encourage its use when unbound work should be used.
+
+Suggested-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Marco Crivellari <marco.crivellari@suse.com>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Stable-dep-of: 870c2e7cd881 ("Input: synaptics_i2c - guard polling restart in resume")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/workqueue.h | 8 +++++---
+ kernel/workqueue.c | 13 +++++++++----
+ 2 files changed, 14 insertions(+), 7 deletions(-)
+
+diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
+index 59c2695e12e76..23642bb1a103c 100644
+--- a/include/linux/workqueue.h
++++ b/include/linux/workqueue.h
+@@ -427,7 +427,7 @@ enum wq_consts {
+ /*
+ * System-wide workqueues which are always present.
+ *
+- * system_wq is the one used by schedule[_delayed]_work[_on]().
++ * system_percpu_wq is the one used by schedule[_delayed]_work[_on]().
+ * Multi-CPU multi-threaded. There are users which expect relatively
+ * short queue flush time. Don't queue works which can run for too
+ * long.
+@@ -438,7 +438,7 @@ enum wq_consts {
+ * system_long_wq is similar to system_wq but may host long running
+ * works. Queue flushing might take relatively long.
+ *
+- * system_unbound_wq is unbound workqueue. Workers are not bound to
++ * system_dfl_wq is unbound workqueue. Workers are not bound to
+ * any specific CPU, not concurrency managed, and all queued works are
+ * executed immediately as long as max_active limit is not reached and
+ * resources are available.
+@@ -455,10 +455,12 @@ enum wq_consts {
+ * system_bh[_highpri]_wq are convenience interface to softirq. BH work items
+ * are executed in the queueing CPU's BH context in the queueing order.
+ */
+-extern struct workqueue_struct *system_wq;
++extern struct workqueue_struct *system_wq; /* use system_percpu_wq, this will be removed */
++extern struct workqueue_struct *system_percpu_wq;
+ extern struct workqueue_struct *system_highpri_wq;
+ extern struct workqueue_struct *system_long_wq;
+ extern struct workqueue_struct *system_unbound_wq;
++extern struct workqueue_struct *system_dfl_wq;
+ extern struct workqueue_struct *system_freezable_wq;
+ extern struct workqueue_struct *system_power_efficient_wq;
+ extern struct workqueue_struct *system_freezable_power_efficient_wq;
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index 9f7f7244bdc8e..3840d7ce9cda0 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -508,12 +508,16 @@ static struct kthread_worker *pwq_release_worker __ro_after_init;
+
+ struct workqueue_struct *system_wq __ro_after_init;
+ EXPORT_SYMBOL(system_wq);
++struct workqueue_struct *system_percpu_wq __ro_after_init;
++EXPORT_SYMBOL(system_percpu_wq);
+ struct workqueue_struct *system_highpri_wq __ro_after_init;
+ EXPORT_SYMBOL_GPL(system_highpri_wq);
+ struct workqueue_struct *system_long_wq __ro_after_init;
+ EXPORT_SYMBOL_GPL(system_long_wq);
+ struct workqueue_struct *system_unbound_wq __ro_after_init;
+ EXPORT_SYMBOL_GPL(system_unbound_wq);
++struct workqueue_struct *system_dfl_wq __ro_after_init;
++EXPORT_SYMBOL_GPL(system_dfl_wq);
+ struct workqueue_struct *system_freezable_wq __ro_after_init;
+ EXPORT_SYMBOL_GPL(system_freezable_wq);
+ struct workqueue_struct *system_power_efficient_wq __ro_after_init;
+@@ -7848,10 +7852,11 @@ void __init workqueue_init_early(void)
+ }
+
+ system_wq = alloc_workqueue("events", 0, 0);
++ system_percpu_wq = alloc_workqueue("events", 0, 0);
+ system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0);
+ system_long_wq = alloc_workqueue("events_long", 0, 0);
+- system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
+- WQ_MAX_ACTIVE);
++ system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND, WQ_MAX_ACTIVE);
++ system_dfl_wq = alloc_workqueue("events_unbound", WQ_UNBOUND, WQ_MAX_ACTIVE);
+ system_freezable_wq = alloc_workqueue("events_freezable",
+ WQ_FREEZABLE, 0);
+ system_power_efficient_wq = alloc_workqueue("events_power_efficient",
+@@ -7862,8 +7867,8 @@ void __init workqueue_init_early(void)
+ system_bh_wq = alloc_workqueue("events_bh", WQ_BH, 0);
+ system_bh_highpri_wq = alloc_workqueue("events_bh_highpri",
+ WQ_BH | WQ_HIGHPRI, 0);
+- BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq ||
+- !system_unbound_wq || !system_freezable_wq ||
++ BUG_ON(!system_wq || !system_percpu_wq|| !system_highpri_wq || !system_long_wq ||
++ !system_unbound_wq || !system_freezable_wq || !system_dfl_wq ||
+ !system_power_efficient_wq ||
+ !system_freezable_power_efficient_wq ||
+ !system_bh_wq || !system_bh_highpri_wq);
+--
+2.51.0
+
--- /dev/null
+From 98907d119e756f96c18949e8d6e2accae1fce894 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 11 Nov 2025 14:53:57 +0000
+Subject: x86/acpi/boot: Correct acpi_is_processor_usable() check again
+
+From: Yazen Ghannam <yazen.ghannam@amd.com>
+
+[ Upstream commit adbf61cc47cb72b102682e690ad323e1eda652c2 ]
+
+ACPI v6.3 defined a new "Online Capable" MADT LAPIC flag. This bit is
+used in conjunction with the "Enabled" MADT LAPIC flag to determine if
+a CPU can be enabled/hotplugged by the OS after boot.
+
+Before the new bit was defined, the "Enabled" bit was explicitly
+described like this (ACPI v6.0 wording provided):
+
+ "If zero, this processor is unusable, and the operating system
+ support will not attempt to use it"
+
+This means that CPU hotplug (based on MADT) is not possible. Many BIOS
+implementations follow this guidance. They may include LAPIC entries in
+MADT for unavailable CPUs, but since these entries are marked with
+"Enabled=0" it is expected that the OS will completely ignore these
+entries.
+
+However, QEMU will do the same (include entries with "Enabled=0") for
+the purpose of allowing CPU hotplug within the guest.
+
+Comment from QEMU function pc_madt_cpu_entry():
+
+ /* ACPI spec says that LAPIC entry for non present
+ * CPU may be omitted from MADT or it must be marked
+ * as disabled. However omitting non present CPU from
+ * MADT breaks hotplug on linux. So possible CPUs
+ * should be put in MADT but kept disabled.
+ */
+
+Recent Linux topology changes broke the QEMU use case. A following fix
+for the QEMU use case broke bare metal topology enumeration.
+
+Rework the Linux MADT LAPIC flags check to allow the QEMU use case only
+for guests and to maintain the ACPI spec behavior for bare metal.
+
+Remove an unnecessary check added to fix a bare metal case introduced by
+the QEMU "fix".
+
+ [ bp: Change logic as Michal suggested. ]
+ [ mingo: Removed misapplied -stable tag. ]
+
+Fixes: fed8d8773b8e ("x86/acpi/boot: Correct acpi_is_processor_usable() check")
+Fixes: f0551af02130 ("x86/topology: Ignore non-present APIC IDs in a present package")
+Closes: https://lore.kernel.org/r/20251024204658.3da9bf3f.michal.pecio@gmail.com
+Reported-by: Michal Pecio <michal.pecio@gmail.com>
+Signed-off-by: Yazen Ghannam <yazen.ghannam@amd.com>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Tested-by: Michal Pecio <michal.pecio@gmail.com>
+Tested-by: Ricardo Neri <ricardo.neri-calderon@linux.intel.com>
+Link: https://lore.kernel.org/20251111145357.4031846-1-yazen.ghannam@amd.com
+Cc: stable@vger.kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kernel/acpi/boot.c | 12 ++++++++----
+ arch/x86/kernel/cpu/topology.c | 15 ---------------
+ 2 files changed, 8 insertions(+), 19 deletions(-)
+
+diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
+index 63adda8a143f9..a1acff7782dbb 100644
+--- a/arch/x86/kernel/acpi/boot.c
++++ b/arch/x86/kernel/acpi/boot.c
+@@ -35,6 +35,7 @@
+ #include <asm/smp.h>
+ #include <asm/i8259.h>
+ #include <asm/setup.h>
++#include <asm/hypervisor.h>
+
+ #include "sleep.h" /* To include x86_acpi_suspend_lowlevel */
+ static int __initdata acpi_force = 0;
+@@ -164,11 +165,14 @@ static bool __init acpi_is_processor_usable(u32 lapic_flags)
+ if (lapic_flags & ACPI_MADT_ENABLED)
+ return true;
+
+- if (!acpi_support_online_capable ||
+- (lapic_flags & ACPI_MADT_ONLINE_CAPABLE))
+- return true;
++ if (acpi_support_online_capable)
++ return lapic_flags & ACPI_MADT_ONLINE_CAPABLE;
+
+- return false;
++ /*
++ * QEMU expects legacy "Enabled=0" LAPIC entries to be counted as usable
++ * in order to support CPU hotplug in guests.
++ */
++ return !hypervisor_is_type(X86_HYPER_NATIVE);
+ }
+
+ static int __init
+diff --git a/arch/x86/kernel/cpu/topology.c b/arch/x86/kernel/cpu/topology.c
+index b2e313ea17bf6..03d3e1f1a407c 100644
+--- a/arch/x86/kernel/cpu/topology.c
++++ b/arch/x86/kernel/cpu/topology.c
+@@ -27,7 +27,6 @@
+ #include <xen/xen.h>
+
+ #include <asm/apic.h>
+-#include <asm/hypervisor.h>
+ #include <asm/io_apic.h>
+ #include <asm/mpspec.h>
+ #include <asm/smp.h>
+@@ -239,20 +238,6 @@ static __init void topo_register_apic(u32 apic_id, u32 acpi_id, bool present)
+ cpuid_to_apicid[cpu] = apic_id;
+ topo_set_cpuids(cpu, apic_id, acpi_id);
+ } else {
+- u32 pkgid = topo_apicid(apic_id, TOPO_PKG_DOMAIN);
+-
+- /*
+- * Check for present APICs in the same package when running
+- * on bare metal. Allow the bogosity in a guest.
+- */
+- if (hypervisor_is_type(X86_HYPER_NATIVE) &&
+- topo_unit_count(pkgid, TOPO_PKG_DOMAIN, phys_cpu_present_map)) {
+- pr_info_once("Ignoring hot-pluggable APIC ID %x in present package.\n",
+- apic_id);
+- topo_info.nr_rejected_cpus++;
+- return;
+- }
+-
+ topo_info.nr_disabled_cpus++;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From b25ebf1adec9f4d047adb5dfef9a4b88ae155c3b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 15 Dec 2025 17:36:14 +0100
+Subject: accel/rocket: fix unwinding in error path in rocket_core_init
+
+From: Quentin Schulz <quentin.schulz@cherry.de>
+
+[ Upstream commit f509a081f6a289f7c66856333b3becce7a33c97e ]
+
+When rocket_job_init() is called, iommu_group_get() has already been
+called, therefore we should call iommu_group_put() and make the
+iommu_group pointer NULL. This aligns with what's done in
+rocket_core_fini().
+
+If pm_runtime_resume_and_get() somehow fails, not only should
+rocket_job_fini() be called but we should also unwind everything done
+before that, that is, disable PM, put the iommu_group, NULLify it and
+then call rocket_job_fini(). This is exactly what's done in
+rocket_core_fini() so let's call that function instead of duplicating
+the code.
+
+Fixes: 0810d5ad88a1 ("accel/rocket: Add job submission IOCTL")
+Cc: stable@vger.kernel.org
+Signed-off-by: Quentin Schulz <quentin.schulz@cherry.de>
+Reviewed-by: Tomeu Vizoso <tomeu@tomeuvizoso.net>
+Signed-off-by: Tomeu Vizoso <tomeu@tomeuvizoso.net>
+Link: https://patch.msgid.link/20251215-rocket-error-path-v1-1-eec3bf29dc3b@cherry.de
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/accel/rocket/rocket_core.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/accel/rocket/rocket_core.c b/drivers/accel/rocket/rocket_core.c
+index abe7719c1db46..b3b2fa9ba645a 100644
+--- a/drivers/accel/rocket/rocket_core.c
++++ b/drivers/accel/rocket/rocket_core.c
+@@ -59,8 +59,11 @@ int rocket_core_init(struct rocket_core *core)
+ core->iommu_group = iommu_group_get(dev);
+
+ err = rocket_job_init(core);
+- if (err)
++ if (err) {
++ iommu_group_put(core->iommu_group);
++ core->iommu_group = NULL;
+ return err;
++ }
+
+ pm_runtime_use_autosuspend(dev);
+
+@@ -76,7 +79,7 @@ int rocket_core_init(struct rocket_core *core)
+
+ err = pm_runtime_resume_and_get(dev);
+ if (err) {
+- rocket_job_fini(core);
++ rocket_core_fini(core);
+ return err;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From c78678a6ba9394a221edd447890806dcecd971a8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 15 Dec 2025 17:36:15 +0100
+Subject: accel/rocket: fix unwinding in error path in rocket_probe
+
+From: Quentin Schulz <quentin.schulz@cherry.de>
+
+[ Upstream commit 34f4495a7f72895776b81969639f527c99eb12b9 ]
+
+When rocket_core_init() fails (as could be the case with EPROBE_DEFER),
+we need to properly unwind by decrementing the counter we just
+incremented and if this is the first core we failed to probe, remove the
+rocket DRM device with rocket_device_fini() as well. This matches the
+logic in rocket_remove(). Failing to properly unwind results in
+out-of-bounds accesses.
+
+Fixes: 0810d5ad88a1 ("accel/rocket: Add job submission IOCTL")
+Cc: stable@vger.kernel.org
+Signed-off-by: Quentin Schulz <quentin.schulz@cherry.de>
+Reviewed-by: Tomeu Vizoso <tomeu@tomeuvizoso.net>
+Signed-off-by: Tomeu Vizoso <tomeu@tomeuvizoso.net>
+Link: https://patch.msgid.link/20251215-rocket-error-path-v1-2-eec3bf29dc3b@cherry.de
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/accel/rocket/rocket_drv.c | 15 ++++++++++++++-
+ 1 file changed, 14 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/accel/rocket/rocket_drv.c b/drivers/accel/rocket/rocket_drv.c
+index 5c0b63f0a8f00..f6ef4c7aeef11 100644
+--- a/drivers/accel/rocket/rocket_drv.c
++++ b/drivers/accel/rocket/rocket_drv.c
+@@ -13,6 +13,7 @@
+ #include <linux/platform_device.h>
+ #include <linux/pm_runtime.h>
+
++#include "rocket_device.h"
+ #include "rocket_drv.h"
+ #include "rocket_gem.h"
+ #include "rocket_job.h"
+@@ -158,6 +159,8 @@ static const struct drm_driver rocket_drm_driver = {
+
+ static int rocket_probe(struct platform_device *pdev)
+ {
++ int ret;
++
+ if (rdev == NULL) {
+ /* First core probing, initialize DRM device. */
+ rdev = rocket_device_init(drm_dev, &rocket_drm_driver);
+@@ -177,7 +180,17 @@ static int rocket_probe(struct platform_device *pdev)
+
+ rdev->num_cores++;
+
+- return rocket_core_init(&rdev->cores[core]);
++ ret = rocket_core_init(&rdev->cores[core]);
++ if (ret) {
++ rdev->num_cores--;
++
++ if (rdev->num_cores == 0) {
++ rocket_device_fini(rdev);
++ rdev = NULL;
++ }
++ }
++
++ return ret;
+ }
+
+ static void rocket_remove(struct platform_device *pdev)
+--
+2.51.0
+
--- /dev/null
+From a1c33a4e9add7f1d906b5e841a0a9a7d3f5fc204 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 14 Jan 2026 11:14:23 +0100
+Subject: ACPI: APEI: GHES: Add helper for CPER CXL protocol errors checks
+
+From: Fabio M. De Francesco <fabio.m.de.francesco@linux.intel.com>
+
+[ Upstream commit 70205869686212eb8e4cddf02bf87fd5fd597bc2 ]
+
+Move the CPER CXL protocol errors validity check out of
+cxl_cper_post_prot_err() to new cxl_cper_sec_prot_err_valid() and limit
+the serial number check only to CXL agents that are CXL devices (UEFI
+v2.10, Appendix N.2.13).
+
+Export the new symbol for reuse by ELOG.
+
+Reviewed-by: Dave Jiang <dave.jiang@intel.com>
+Reviewed-by: Hanjun Guo <guohanjun@huawei.com>
+Reviewed-by: Jonathan Cameron <jonathan.cameron@huawei.com>
+Signed-off-by: Fabio M. De Francesco <fabio.m.de.francesco@linux.intel.com>
+[ rjw: Subject tweak ]
+Link: https://patch.msgid.link/20260114101543.85926-4-fabio.m.de.francesco@linux.intel.com
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Stable-dep-of: b584bfbd7ec4 ("ACPI: APEI: GHES: Disable KASAN instrumentation when compile testing with clang < 18")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/acpi/apei/Makefile | 1 +
+ drivers/acpi/apei/ghes.c | 18 +----------------
+ drivers/acpi/apei/ghes_helpers.c | 33 ++++++++++++++++++++++++++++++++
+ include/cxl/event.h | 10 ++++++++++
+ 4 files changed, 45 insertions(+), 17 deletions(-)
+ create mode 100644 drivers/acpi/apei/ghes_helpers.c
+
+diff --git a/drivers/acpi/apei/Makefile b/drivers/acpi/apei/Makefile
+index 2c474e6477e12..5db61dfb46915 100644
+--- a/drivers/acpi/apei/Makefile
++++ b/drivers/acpi/apei/Makefile
+@@ -1,6 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ obj-$(CONFIG_ACPI_APEI) += apei.o
+ obj-$(CONFIG_ACPI_APEI_GHES) += ghes.o
++obj-$(CONFIG_ACPI_APEI_PCIEAER) += ghes_helpers.o
+ obj-$(CONFIG_ACPI_APEI_EINJ) += einj.o
+ einj-y := einj-core.o
+ einj-$(CONFIG_ACPI_APEI_EINJ_CXL) += einj-cxl.o
+diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
+index 42872fdc36bfc..fc96a5e234f06 100644
+--- a/drivers/acpi/apei/ghes.c
++++ b/drivers/acpi/apei/ghes.c
+@@ -746,24 +746,8 @@ static void cxl_cper_post_prot_err(struct cxl_cper_sec_prot_err *prot_err,
+ struct cxl_cper_prot_err_work_data wd;
+ u8 *dvsec_start, *cap_start;
+
+- if (!(prot_err->valid_bits & PROT_ERR_VALID_AGENT_ADDRESS)) {
+- pr_err_ratelimited("CXL CPER invalid agent type\n");
++ if (cxl_cper_sec_prot_err_valid(prot_err))
+ return;
+- }
+-
+- if (!(prot_err->valid_bits & PROT_ERR_VALID_ERROR_LOG)) {
+- pr_err_ratelimited("CXL CPER invalid protocol error log\n");
+- return;
+- }
+-
+- if (prot_err->err_len != sizeof(struct cxl_ras_capability_regs)) {
+- pr_err_ratelimited("CXL CPER invalid RAS Cap size (%u)\n",
+- prot_err->err_len);
+- return;
+- }
+-
+- if (!(prot_err->valid_bits & PROT_ERR_VALID_SERIAL_NUMBER))
+- pr_warn(FW_WARN "CXL CPER no device serial number\n");
+
+ guard(spinlock_irqsave)(&cxl_cper_prot_err_work_lock);
+
+diff --git a/drivers/acpi/apei/ghes_helpers.c b/drivers/acpi/apei/ghes_helpers.c
+new file mode 100644
+index 0000000000000..f3d162139a974
+--- /dev/null
++++ b/drivers/acpi/apei/ghes_helpers.c
+@@ -0,0 +1,33 @@
++// SPDX-License-Identifier: GPL-2.0-only
++// Copyright(c) 2025 Intel Corporation. All rights reserved
++
++#include <linux/printk.h>
++#include <cxl/event.h>
++
++int cxl_cper_sec_prot_err_valid(struct cxl_cper_sec_prot_err *prot_err)
++{
++ if (!(prot_err->valid_bits & PROT_ERR_VALID_AGENT_ADDRESS)) {
++ pr_err_ratelimited("CXL CPER invalid agent type\n");
++ return -EINVAL;
++ }
++
++ if (!(prot_err->valid_bits & PROT_ERR_VALID_ERROR_LOG)) {
++ pr_err_ratelimited("CXL CPER invalid protocol error log\n");
++ return -EINVAL;
++ }
++
++ if (prot_err->err_len != sizeof(struct cxl_ras_capability_regs)) {
++ pr_err_ratelimited("CXL CPER invalid RAS Cap size (%u)\n",
++ prot_err->err_len);
++ return -EINVAL;
++ }
++
++ if ((prot_err->agent_type == RCD || prot_err->agent_type == DEVICE ||
++ prot_err->agent_type == LD || prot_err->agent_type == FMLD) &&
++ !(prot_err->valid_bits & PROT_ERR_VALID_SERIAL_NUMBER))
++ pr_warn_ratelimited(FW_WARN
++ "CXL CPER no device serial number\n");
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(cxl_cper_sec_prot_err_valid);
+diff --git a/include/cxl/event.h b/include/cxl/event.h
+index 6fd90f9cc2034..4d7d1036ea9cb 100644
+--- a/include/cxl/event.h
++++ b/include/cxl/event.h
+@@ -320,4 +320,14 @@ static inline int cxl_cper_prot_err_kfifo_get(struct cxl_cper_prot_err_work_data
+ }
+ #endif
+
++#ifdef CONFIG_ACPI_APEI_PCIEAER
++int cxl_cper_sec_prot_err_valid(struct cxl_cper_sec_prot_err *prot_err);
++#else
++static inline int
++cxl_cper_sec_prot_err_valid(struct cxl_cper_sec_prot_err *prot_err)
++{
++ return -EOPNOTSUPP;
++}
++#endif
++
+ #endif /* _LINUX_CXL_EVENT_H */
+--
+2.51.0
+
--- /dev/null
+From 4b79f6d3d3cce077122f1c6ad58ced5ad3f09bd8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 14 Jan 2026 16:27:11 -0700
+Subject: ACPI: APEI: GHES: Disable KASAN instrumentation when compile testing
+ with clang < 18
+
+From: Nathan Chancellor <nathan@kernel.org>
+
+[ Upstream commit b584bfbd7ec417f257f651cc00a90c66e31dfbf1 ]
+
+After a recent innocuous change to drivers/acpi/apei/ghes.c, building
+ARCH=arm64 allmodconfig with clang-17 or older (which has both
+CONFIG_KASAN=y and CONFIG_WERROR=y) fails with:
+
+ drivers/acpi/apei/ghes.c:902:13: error: stack frame size (2768) exceeds limit (2048) in 'ghes_do_proc' [-Werror,-Wframe-larger-than]
+ 902 | static void ghes_do_proc(struct ghes *ghes,
+ | ^
+
+A KASAN pass that removes unneeded stack instrumentation, enabled by
+default in clang-18 [1], drastically improves stack usage in this case.
+
+To avoid the warning in the common allmodconfig case when it can break
+the build, disable KASAN for ghes.o when compile testing with clang-17
+and older. Disabling KASAN outright may hide legitimate runtime issues,
+so live with the warning in that case; the user can either increase the
+frame warning limit or disable -Werror, which they should probably do
+when debugging with KASAN anyways.
+
+Closes: https://github.com/ClangBuiltLinux/linux/issues/2148
+Link: https://github.com/llvm/llvm-project/commit/51fbab134560ece663517bf1e8c2a30300d08f1a [1]
+Signed-off-by: Nathan Chancellor <nathan@kernel.org>
+Cc: All applicable <stable@vger.kernel.org>
+Link: https://patch.msgid.link/20260114-ghes-avoid-wflt-clang-older-than-18-v1-1-9c8248bfe4f4@kernel.org
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/acpi/apei/Makefile | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/acpi/apei/Makefile b/drivers/acpi/apei/Makefile
+index 5db61dfb46915..1a0b85923cd42 100644
+--- a/drivers/acpi/apei/Makefile
++++ b/drivers/acpi/apei/Makefile
+@@ -1,6 +1,10 @@
+ # SPDX-License-Identifier: GPL-2.0
+ obj-$(CONFIG_ACPI_APEI) += apei.o
+ obj-$(CONFIG_ACPI_APEI_GHES) += ghes.o
++# clang versions prior to 18 may blow out the stack with KASAN
++ifeq ($(CONFIG_COMPILE_TEST)_$(CONFIG_CC_IS_CLANG)_$(call clang-min-version, 180000),y_y_)
++KASAN_SANITIZE_ghes.o := n
++endif
+ obj-$(CONFIG_ACPI_APEI_PCIEAER) += ghes_helpers.o
+ obj-$(CONFIG_ACPI_APEI_EINJ) += einj.o
+ einj-y := einj-core.o
+--
+2.51.0
+
--- /dev/null
+From e7e318b4fb5f9d84d1f4ea2db22270a544a585eb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 14 Aug 2025 09:47:44 +0200
+Subject: ARM: dts: imx53-usbarmory: Replace license text comment with SPDX
+ identifier
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Bence Csókás <csokas.bence@prolan.hu>
+
+[ Upstream commit faa6baa36497958dd8fd5561daa37249779446d7 ]
+
+Replace verbatim license text with a `SPDX-License-Identifier`.
+
+The comment header mis-attributes this license to be "X11", but the
+license text does not include the last line "Except as contained in this
+notice, the name of the X Consortium shall not be used in advertising or
+otherwise to promote the sale, use or other dealings in this Software
+without prior written authorization from the X Consortium.". Therefore,
+this license is actually equivalent to the SPDX "MIT" license (confirmed
+by text diffing).
+
+Cc: Andrej Rosano <andrej@inversepath.com>
+Signed-off-by: Bence Csókás <csokas.bence@prolan.hu>
+Acked-by: Andrej Rosano <andrej.rosano@reversec.com>
+Signed-off-by: Shawn Guo <shawnguo@kernel.org>
+Stable-dep-of: 43d67ec26b32 ("PCI: dwc: ep: Fix resizable BAR support for multi-PF configurations")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm/boot/dts/nxp/imx/imx53-usbarmory.dts | 39 +------------------
+ 1 file changed, 1 insertion(+), 38 deletions(-)
+
+diff --git a/arch/arm/boot/dts/nxp/imx/imx53-usbarmory.dts b/arch/arm/boot/dts/nxp/imx/imx53-usbarmory.dts
+index acc44010d5106..3ad9db4b14425 100644
+--- a/arch/arm/boot/dts/nxp/imx/imx53-usbarmory.dts
++++ b/arch/arm/boot/dts/nxp/imx/imx53-usbarmory.dts
+@@ -1,47 +1,10 @@
++// SPDX-License-Identifier: (GPL-2.0-or-later OR MIT)
+ /*
+ * USB armory MkI device tree file
+ * https://inversepath.com/usbarmory
+ *
+ * Copyright (C) 2015, Inverse Path
+ * Andrej Rosano <andrej@inversepath.com>
+- *
+- * This file is dual-licensed: you can use it either under the terms
+- * of the GPL or the X11 license, at your option. Note that this dual
+- * licensing only applies to this file, and not this project as a
+- * whole.
+- *
+- * a) This file is free software; you can redistribute it and/or
+- * modify it under the terms of the GNU General Public License as
+- * published by the Free Software Foundation; either version 2 of the
+- * License, or (at your option) any later version.
+- *
+- * This file is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- *
+- * Or, alternatively,
+- *
+- * b) Permission is hereby granted, free of charge, to any person
+- * obtaining a copy of this software and associated documentation
+- * files (the "Software"), to deal in the Software without
+- * restriction, including without limitation the rights to use,
+- * copy, modify, merge, publish, distribute, sublicense, and/or
+- * sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following
+- * conditions:
+- *
+- * The above copyright notice and this permission notice shall be
+- * included in all copies or substantial portions of the Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+- * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+ /dts-v1/;
+--
+2.51.0
+
--- /dev/null
+From 93e52ba86bd577191f3297ee7e99c587b1ef628d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 Jan 2026 16:15:28 +0800
+Subject: arm64: dts: rockchip: Fix rk356x PCIe range mappings
+
+From: Shawn Lin <shawn.lin@rock-chips.com>
+
+[ Upstream commit f63ea193a404481f080ca2958f73e9f364682db9 ]
+
+The pcie bus address should be mapped 1:1 to the cpu side MMIO address, so
+that there is no same address allocated from normal system memory. Otherwise
+it's broken if the same address assigned to the EP for DMA purpose.Fix it to
+sync with the vendor BSP.
+
+Fixes: 568a67e742df ("arm64: dts: rockchip: Fix rk356x PCIe register and range mappings")
+Fixes: 66b51ea7d70f ("arm64: dts: rockchip: Add rk3568 PCIe2x1 controller")
+Cc: stable@vger.kernel.org
+Cc: Andrew Powers-Holmes <aholmes@omnom.net>
+Signed-off-by: Shawn Lin <shawn.lin@rock-chips.com>
+Link: https://patch.msgid.link/1767600929-195341-1-git-send-email-shawn.lin@rock-chips.com
+Signed-off-by: Heiko Stuebner <heiko@sntech.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/boot/dts/rockchip/rk3568.dtsi | 4 ++--
+ arch/arm64/boot/dts/rockchip/rk356x-base.dtsi | 2 +-
+ 2 files changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/arch/arm64/boot/dts/rockchip/rk3568.dtsi b/arch/arm64/boot/dts/rockchip/rk3568.dtsi
+index e719a3df126c5..658097ed69714 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3568.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3568.dtsi
+@@ -185,7 +185,7 @@ pcie3x1: pcie@fe270000 {
+ <0x0 0xf2000000 0x0 0x00100000>;
+ ranges = <0x01000000 0x0 0xf2100000 0x0 0xf2100000 0x0 0x00100000>,
+ <0x02000000 0x0 0xf2200000 0x0 0xf2200000 0x0 0x01e00000>,
+- <0x03000000 0x0 0x40000000 0x3 0x40000000 0x0 0x40000000>;
++ <0x03000000 0x3 0x40000000 0x3 0x40000000 0x0 0x40000000>;
+ reg-names = "dbi", "apb", "config";
+ resets = <&cru SRST_PCIE30X1_POWERUP>;
+ reset-names = "pipe";
+@@ -238,7 +238,7 @@ pcie3x2: pcie@fe280000 {
+ <0x0 0xf0000000 0x0 0x00100000>;
+ ranges = <0x01000000 0x0 0xf0100000 0x0 0xf0100000 0x0 0x00100000>,
+ <0x02000000 0x0 0xf0200000 0x0 0xf0200000 0x0 0x01e00000>,
+- <0x03000000 0x0 0x40000000 0x3 0x80000000 0x0 0x40000000>;
++ <0x03000000 0x3 0x80000000 0x3 0x80000000 0x0 0x40000000>;
+ reg-names = "dbi", "apb", "config";
+ resets = <&cru SRST_PCIE30X2_POWERUP>;
+ reset-names = "pipe";
+diff --git a/arch/arm64/boot/dts/rockchip/rk356x-base.dtsi b/arch/arm64/boot/dts/rockchip/rk356x-base.dtsi
+index fd2214b6fad40..d654f98460ec0 100644
+--- a/arch/arm64/boot/dts/rockchip/rk356x-base.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk356x-base.dtsi
+@@ -975,7 +975,7 @@ pcie2x1: pcie@fe260000 {
+ power-domains = <&power RK3568_PD_PIPE>;
+ ranges = <0x01000000 0x0 0xf4100000 0x0 0xf4100000 0x0 0x00100000>,
+ <0x02000000 0x0 0xf4200000 0x0 0xf4200000 0x0 0x01e00000>,
+- <0x03000000 0x0 0x40000000 0x3 0x00000000 0x0 0x40000000>;
++ <0x03000000 0x3 0x00000000 0x3 0x00000000 0x0 0x40000000>;
+ resets = <&cru SRST_PCIE20_POWERUP>;
+ reset-names = "pipe";
+ #address-cells = <3>;
+--
+2.51.0
+
--- /dev/null
+From d109f56bdd6648c9362176751f947414b7b8817a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 Jan 2026 16:15:29 +0800
+Subject: arm64: dts: rockchip: Fix rk3588 PCIe range mappings
+
+From: Shawn Lin <shawn.lin@rock-chips.com>
+
+[ Upstream commit 46c56b737161060dfa468f25ae699749047902a2 ]
+
+The pcie bus address should be mapped 1:1 to the cpu side MMIO address, so
+that there is no same address allocated from normal system memory. Otherwise
+it's broken if the same address assigned to the EP for DMA purpose.Fix it to
+sync with the vendor BSP.
+
+Fixes: 0acf4fa7f187 ("arm64: dts: rockchip: add PCIe3 support for rk3588")
+Fixes: 8d81b77f4c49 ("arm64: dts: rockchip: add rk3588 PCIe2 support")
+Cc: stable@vger.kernel.org
+Cc: Sebastian Reichel <sebastian.reichel@collabora.com>
+Signed-off-by: Shawn Lin <shawn.lin@rock-chips.com>
+Link: https://patch.msgid.link/1767600929-195341-2-git-send-email-shawn.lin@rock-chips.com
+Signed-off-by: Heiko Stuebner <heiko@sntech.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/boot/dts/rockchip/rk3588-base.dtsi | 4 ++--
+ arch/arm64/boot/dts/rockchip/rk3588-extra.dtsi | 6 +++---
+ 2 files changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi
+index 2973f6bae1716..7e74e04057cfd 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi
+@@ -1955,7 +1955,7 @@ pcie2x1l1: pcie@fe180000 {
+ power-domains = <&power RK3588_PD_PCIE>;
+ ranges = <0x01000000 0x0 0xf3100000 0x0 0xf3100000 0x0 0x00100000>,
+ <0x02000000 0x0 0xf3200000 0x0 0xf3200000 0x0 0x00e00000>,
+- <0x03000000 0x0 0x40000000 0x9 0xc0000000 0x0 0x40000000>;
++ <0x03000000 0x9 0xc0000000 0x9 0xc0000000 0x0 0x40000000>;
+ reg = <0xa 0x40c00000 0x0 0x00400000>,
+ <0x0 0xfe180000 0x0 0x00010000>,
+ <0x0 0xf3000000 0x0 0x00100000>;
+@@ -2007,7 +2007,7 @@ pcie2x1l2: pcie@fe190000 {
+ power-domains = <&power RK3588_PD_PCIE>;
+ ranges = <0x01000000 0x0 0xf4100000 0x0 0xf4100000 0x0 0x00100000>,
+ <0x02000000 0x0 0xf4200000 0x0 0xf4200000 0x0 0x00e00000>,
+- <0x03000000 0x0 0x40000000 0xa 0x00000000 0x0 0x40000000>;
++ <0x03000000 0xa 0x00000000 0xa 0x00000000 0x0 0x40000000>;
+ reg = <0xa 0x41000000 0x0 0x00400000>,
+ <0x0 0xfe190000 0x0 0x00010000>,
+ <0x0 0xf4000000 0x0 0x00100000>;
+diff --git a/arch/arm64/boot/dts/rockchip/rk3588-extra.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-extra.dtsi
+index 6e5a58428bbab..a2640014ee042 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3588-extra.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3588-extra.dtsi
+@@ -375,7 +375,7 @@ pcie3x4: pcie@fe150000 {
+ power-domains = <&power RK3588_PD_PCIE>;
+ ranges = <0x01000000 0x0 0xf0100000 0x0 0xf0100000 0x0 0x00100000>,
+ <0x02000000 0x0 0xf0200000 0x0 0xf0200000 0x0 0x00e00000>,
+- <0x03000000 0x0 0x40000000 0x9 0x00000000 0x0 0x40000000>;
++ <0x03000000 0x9 0x00000000 0x9 0x00000000 0x0 0x40000000>;
+ reg = <0xa 0x40000000 0x0 0x00400000>,
+ <0x0 0xfe150000 0x0 0x00010000>,
+ <0x0 0xf0000000 0x0 0x00100000>;
+@@ -462,7 +462,7 @@ pcie3x2: pcie@fe160000 {
+ power-domains = <&power RK3588_PD_PCIE>;
+ ranges = <0x01000000 0x0 0xf1100000 0x0 0xf1100000 0x0 0x00100000>,
+ <0x02000000 0x0 0xf1200000 0x0 0xf1200000 0x0 0x00e00000>,
+- <0x03000000 0x0 0x40000000 0x9 0x40000000 0x0 0x40000000>;
++ <0x03000000 0x9 0x40000000 0x9 0x40000000 0x0 0x40000000>;
+ reg = <0xa 0x40400000 0x0 0x00400000>,
+ <0x0 0xfe160000 0x0 0x00010000>,
+ <0x0 0xf1000000 0x0 0x00100000>;
+@@ -512,7 +512,7 @@ pcie2x1l0: pcie@fe170000 {
+ power-domains = <&power RK3588_PD_PCIE>;
+ ranges = <0x01000000 0x0 0xf2100000 0x0 0xf2100000 0x0 0x00100000>,
+ <0x02000000 0x0 0xf2200000 0x0 0xf2200000 0x0 0x00e00000>,
+- <0x03000000 0x0 0x40000000 0x9 0x80000000 0x0 0x40000000>;
++ <0x03000000 0x9 0x80000000 0x9 0x80000000 0x0 0x40000000>;
+ reg = <0xa 0x40800000 0x0 0x00400000>,
+ <0x0 0xfe170000 0x0 0x00010000>,
+ <0x0 0xf2000000 0x0 0x00100000>;
+--
+2.51.0
+
--- /dev/null
+From 5e1e2d9d1e2f7aa645669cca03526907e95a29a3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 24 Oct 2025 12:21:41 +0200
+Subject: btrfs: define the AUTO_KFREE/AUTO_KVFREE helper macros
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Miquel Sabaté Solà <mssola@mssola.com>
+
+[ Upstream commit d00cbce0a7d5de5fc31bf60abd59b44d36806b6e ]
+
+These are two simple macros which ensure that a pointer is initialized
+to NULL and with the proper cleanup attribute for it.
+
+Signed-off-by: Miquel Sabaté Solà <mssola@mssola.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Stable-dep-of: 52ee9965d09b ("btrfs: zoned: fixup last alloc pointer after extent removal for RAID0/10")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/misc.h | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/fs/btrfs/misc.h b/fs/btrfs/misc.h
+index 60f9b000d644b..a82032c66ccd3 100644
+--- a/fs/btrfs/misc.h
++++ b/fs/btrfs/misc.h
+@@ -13,6 +13,13 @@
+ #include <linux/rbtree.h>
+ #include <linux/bio.h>
+
++/*
++ * Convenience macros to define a pointer with the __free(kfree) and
++ * __free(kvfree) cleanup attributes and initialized to NULL.
++ */
++#define AUTO_KFREE(name) *name __free(kfree) = NULL
++#define AUTO_KVFREE(name) *name __free(kvfree) = NULL
++
+ /*
+ * Enumerate bits using enum autoincrement. Define the @name as the n-th bit.
+ */
+--
+2.51.0
+
--- /dev/null
+From 3bf2965e2f44fe5a35922420ecc8682f7841640c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 23 Jan 2026 21:41:36 +0900
+Subject: btrfs: zoned: fixup last alloc pointer after extent removal for
+ RAID0/10
+
+From: Naohiro Aota <naohiro.aota@wdc.com>
+
+[ Upstream commit 52ee9965d09b2c56a027613db30d1fb20d623861 ]
+
+When a block group is composed of a sequential write zone and a
+conventional zone, we recover the (pseudo) write pointer of the
+conventional zone using the end of the last allocated position.
+
+However, if the last extent in a block group is removed, the last extent
+position will be smaller than the other real write pointer position.
+Then, that will cause an error due to mismatch of the write pointers.
+
+We can fixup this case by moving the alloc_offset to the corresponding
+write pointer position.
+
+Fixes: 568220fa9657 ("btrfs: zoned: support RAID0/1/10 on top of raid stripe tree")
+CC: stable@vger.kernel.org # 6.12+
+Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/zoned.c | 194 +++++++++++++++++++++++++++++++++++++++++++----
+ 1 file changed, 179 insertions(+), 15 deletions(-)
+
+diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
+index 4cbe1ba7af66d..e14a4234954ba 100644
+--- a/fs/btrfs/zoned.c
++++ b/fs/btrfs/zoned.c
+@@ -1553,7 +1553,9 @@ static int btrfs_load_block_group_raid0(struct btrfs_block_group *bg,
+ {
+ struct btrfs_fs_info *fs_info = bg->fs_info;
+ u64 stripe_nr = 0, stripe_offset = 0;
++ u64 prev_offset = 0;
+ u32 stripe_index = 0;
++ bool has_partial = false, has_conventional = false;
+
+ if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) {
+ btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree",
+@@ -1561,6 +1563,35 @@ static int btrfs_load_block_group_raid0(struct btrfs_block_group *bg,
+ return -EINVAL;
+ }
+
++ /*
++ * When the last extent is removed, last_alloc can be smaller than the other write
++ * pointer. In that case, last_alloc should be moved to the corresponding write
++ * pointer position.
++ */
++ for (int i = 0; i < map->num_stripes; i++) {
++ u64 alloc;
++
++ if (zone_info[i].alloc_offset == WP_MISSING_DEV ||
++ zone_info[i].alloc_offset == WP_CONVENTIONAL)
++ continue;
++
++ stripe_nr = zone_info[i].alloc_offset >> BTRFS_STRIPE_LEN_SHIFT;
++ stripe_offset = zone_info[i].alloc_offset & BTRFS_STRIPE_LEN_MASK;
++ if (stripe_offset == 0 && stripe_nr > 0) {
++ stripe_nr--;
++ stripe_offset = BTRFS_STRIPE_LEN;
++ }
++ alloc = ((stripe_nr * map->num_stripes + i) << BTRFS_STRIPE_LEN_SHIFT) +
++ stripe_offset;
++ last_alloc = max(last_alloc, alloc);
++
++ /* Partially written stripe found. It should be last. */
++ if (zone_info[i].alloc_offset & BTRFS_STRIPE_LEN_MASK)
++ break;
++ }
++ stripe_nr = 0;
++ stripe_offset = 0;
++
+ if (last_alloc) {
+ u32 factor = map->num_stripes;
+
+@@ -1574,7 +1605,7 @@ static int btrfs_load_block_group_raid0(struct btrfs_block_group *bg,
+ continue;
+
+ if (zone_info[i].alloc_offset == WP_CONVENTIONAL) {
+-
++ has_conventional = true;
+ zone_info[i].alloc_offset = btrfs_stripe_nr_to_offset(stripe_nr);
+
+ if (stripe_index > i)
+@@ -1583,6 +1614,28 @@ static int btrfs_load_block_group_raid0(struct btrfs_block_group *bg,
+ zone_info[i].alloc_offset += stripe_offset;
+ }
+
++ /* Verification */
++ if (i != 0) {
++ if (unlikely(prev_offset < zone_info[i].alloc_offset)) {
++ btrfs_err(fs_info,
++ "zoned: stripe position disorder found in block group %llu",
++ bg->start);
++ return -EIO;
++ }
++
++ if (unlikely(has_partial &&
++ (zone_info[i].alloc_offset & BTRFS_STRIPE_LEN_MASK))) {
++ btrfs_err(fs_info,
++ "zoned: multiple partial written stripe found in block group %llu",
++ bg->start);
++ return -EIO;
++ }
++ }
++ prev_offset = zone_info[i].alloc_offset;
++
++ if ((zone_info[i].alloc_offset & BTRFS_STRIPE_LEN_MASK) != 0)
++ has_partial = true;
++
+ if (test_bit(0, active) != test_bit(i, active)) {
+ if (unlikely(!btrfs_zone_activate(bg)))
+ return -EIO;
+@@ -1594,6 +1647,19 @@ static int btrfs_load_block_group_raid0(struct btrfs_block_group *bg,
+ bg->alloc_offset += zone_info[i].alloc_offset;
+ }
+
++ /* Check if all devices stay in the same stripe row. */
++ if (unlikely(zone_info[0].alloc_offset -
++ zone_info[map->num_stripes - 1].alloc_offset > BTRFS_STRIPE_LEN)) {
++ btrfs_err(fs_info, "zoned: stripe gap too large in block group %llu", bg->start);
++ return -EIO;
++ }
++
++ if (unlikely(has_conventional && bg->alloc_offset < last_alloc)) {
++ btrfs_err(fs_info, "zoned: allocated extent stays beyond write pointers %llu %llu",
++ bg->alloc_offset, last_alloc);
++ return -EIO;
++ }
++
+ return 0;
+ }
+
+@@ -1604,8 +1670,11 @@ static int btrfs_load_block_group_raid10(struct btrfs_block_group *bg,
+ u64 last_alloc)
+ {
+ struct btrfs_fs_info *fs_info = bg->fs_info;
++ u64 AUTO_KFREE(raid0_allocs);
+ u64 stripe_nr = 0, stripe_offset = 0;
+ u32 stripe_index = 0;
++ bool has_partial = false, has_conventional = false;
++ u64 prev_offset = 0;
+
+ if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) {
+ btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree",
+@@ -1613,6 +1682,60 @@ static int btrfs_load_block_group_raid10(struct btrfs_block_group *bg,
+ return -EINVAL;
+ }
+
++ raid0_allocs = kcalloc(map->num_stripes / map->sub_stripes, sizeof(*raid0_allocs),
++ GFP_NOFS);
++ if (!raid0_allocs)
++ return -ENOMEM;
++
++ /*
++ * When the last extent is removed, last_alloc can be smaller than the other write
++ * pointer. In that case, last_alloc should be moved to the corresponding write
++ * pointer position.
++ */
++ for (int i = 0; i < map->num_stripes; i += map->sub_stripes) {
++ u64 alloc = zone_info[i].alloc_offset;
++
++ for (int j = 1; j < map->sub_stripes; j++) {
++ int idx = i + j;
++
++ if (zone_info[idx].alloc_offset == WP_MISSING_DEV ||
++ zone_info[idx].alloc_offset == WP_CONVENTIONAL)
++ continue;
++ if (alloc == WP_MISSING_DEV || alloc == WP_CONVENTIONAL) {
++ alloc = zone_info[idx].alloc_offset;
++ } else if (unlikely(zone_info[idx].alloc_offset != alloc)) {
++ btrfs_err(fs_info,
++ "zoned: write pointer mismatch found in block group %llu",
++ bg->start);
++ return -EIO;
++ }
++ }
++
++ raid0_allocs[i / map->sub_stripes] = alloc;
++ if (alloc == WP_CONVENTIONAL)
++ continue;
++ if (unlikely(alloc == WP_MISSING_DEV)) {
++ btrfs_err(fs_info,
++ "zoned: cannot recover write pointer of block group %llu due to missing device",
++ bg->start);
++ return -EIO;
++ }
++
++ stripe_nr = alloc >> BTRFS_STRIPE_LEN_SHIFT;
++ stripe_offset = alloc & BTRFS_STRIPE_LEN_MASK;
++ if (stripe_offset == 0 && stripe_nr > 0) {
++ stripe_nr--;
++ stripe_offset = BTRFS_STRIPE_LEN;
++ }
++
++ alloc = ((stripe_nr * (map->num_stripes / map->sub_stripes) +
++ (i / map->sub_stripes)) <<
++ BTRFS_STRIPE_LEN_SHIFT) + stripe_offset;
++ last_alloc = max(last_alloc, alloc);
++ }
++ stripe_nr = 0;
++ stripe_offset = 0;
++
+ if (last_alloc) {
+ u32 factor = map->num_stripes / map->sub_stripes;
+
+@@ -1622,24 +1745,51 @@ static int btrfs_load_block_group_raid10(struct btrfs_block_group *bg,
+ }
+
+ for (int i = 0; i < map->num_stripes; i++) {
+- if (zone_info[i].alloc_offset == WP_MISSING_DEV)
+- continue;
++ int idx = i / map->sub_stripes;
+
+- if (test_bit(0, active) != test_bit(i, active)) {
+- if (unlikely(!btrfs_zone_activate(bg)))
+- return -EIO;
+- } else {
+- if (test_bit(0, active))
+- set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags);
++ if (raid0_allocs[idx] == WP_CONVENTIONAL) {
++ has_conventional = true;
++ raid0_allocs[idx] = btrfs_stripe_nr_to_offset(stripe_nr);
++
++ if (stripe_index > idx)
++ raid0_allocs[idx] += BTRFS_STRIPE_LEN;
++ else if (stripe_index == idx)
++ raid0_allocs[idx] += stripe_offset;
+ }
+
+- if (zone_info[i].alloc_offset == WP_CONVENTIONAL) {
+- zone_info[i].alloc_offset = btrfs_stripe_nr_to_offset(stripe_nr);
++ if ((i % map->sub_stripes) == 0) {
++ /* Verification */
++ if (i != 0) {
++ if (unlikely(prev_offset < raid0_allocs[idx])) {
++ btrfs_err(fs_info,
++ "zoned: stripe position disorder found in block group %llu",
++ bg->start);
++ return -EIO;
++ }
+
+- if (stripe_index > (i / map->sub_stripes))
+- zone_info[i].alloc_offset += BTRFS_STRIPE_LEN;
+- else if (stripe_index == (i / map->sub_stripes))
+- zone_info[i].alloc_offset += stripe_offset;
++ if (unlikely(has_partial &&
++ (raid0_allocs[idx] & BTRFS_STRIPE_LEN_MASK))) {
++ btrfs_err(fs_info,
++ "zoned: multiple partial written stripe found in block group %llu",
++ bg->start);
++ return -EIO;
++ }
++ }
++ prev_offset = raid0_allocs[idx];
++
++ if ((raid0_allocs[idx] & BTRFS_STRIPE_LEN_MASK) != 0)
++ has_partial = true;
++ }
++
++ if (zone_info[i].alloc_offset == WP_MISSING_DEV ||
++ zone_info[i].alloc_offset == WP_CONVENTIONAL)
++ zone_info[i].alloc_offset = raid0_allocs[idx];
++
++ if (test_bit(0, active) != test_bit(i, active)) {
++ if (unlikely(!btrfs_zone_activate(bg)))
++ return -EIO;
++ } else if (test_bit(0, active)) {
++ set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags);
+ }
+
+ if ((i % map->sub_stripes) == 0) {
+@@ -1648,6 +1798,20 @@ static int btrfs_load_block_group_raid10(struct btrfs_block_group *bg,
+ }
+ }
+
++ /* Check if all devices stay in the same stripe row. */
++ if (unlikely(zone_info[0].alloc_offset -
++ zone_info[map->num_stripes - 1].alloc_offset > BTRFS_STRIPE_LEN)) {
++ btrfs_err(fs_info, "zoned: stripe gap too large in block group %llu",
++ bg->start);
++ return -EIO;
++ }
++
++ if (unlikely(has_conventional && bg->alloc_offset < last_alloc)) {
++ btrfs_err(fs_info, "zoned: allocated extent stays beyond write pointers %llu %llu",
++ bg->alloc_offset, last_alloc);
++ return -EIO;
++ }
++
+ return 0;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From ee20392d343546316c4bb35e1c251fb0f4733d98 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 Nov 2025 17:40:03 +0100
+Subject: clk: tegra: tegra124-emc: fix device leak on set_rate()
+
+From: Johan Hovold <johan@kernel.org>
+
+[ Upstream commit da61439c63d34ae6503d080a847f144d587e3a48 ]
+
+Make sure to drop the reference taken when looking up the EMC device and
+its driver data on first set_rate().
+
+Note that holding a reference to a device does not prevent its driver
+data from going away so there is no point in keeping the reference.
+
+Fixes: 2db04f16b589 ("clk: tegra: Add EMC clock driver")
+Fixes: 6d6ef58c2470 ("clk: tegra: tegra124-emc: Fix missing put_device() call in emc_ensure_emc_driver")
+Cc: stable@vger.kernel.org # 4.2: 6d6ef58c2470
+Cc: Mikko Perttunen <mperttunen@nvidia.com>
+Cc: Miaoqian Lin <linmq006@gmail.com>
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Signed-off-by: Stephen Boyd <sboyd@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/clk/tegra/clk-tegra124-emc.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/clk/tegra/clk-tegra124-emc.c b/drivers/clk/tegra/clk-tegra124-emc.c
+index 0f6fb776b2298..5f1af6dfe7154 100644
+--- a/drivers/clk/tegra/clk-tegra124-emc.c
++++ b/drivers/clk/tegra/clk-tegra124-emc.c
+@@ -197,8 +197,8 @@ static struct tegra_emc *emc_ensure_emc_driver(struct tegra_clk_emc *tegra)
+ tegra->emc_node = NULL;
+
+ tegra->emc = platform_get_drvdata(pdev);
++ put_device(&pdev->dev);
+ if (!tegra->emc) {
+- put_device(&pdev->dev);
+ pr_err("%s: cannot find EMC driver\n", __func__);
+ return NULL;
+ }
+--
+2.51.0
+
--- /dev/null
+From f40920eaecf0878bb8b0f09d4001f64e37362355 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Feb 2026 10:42:54 -0600
+Subject: drm/amd: Fix hang on amdgpu unload by using pci_dev_is_disconnected()
+
+From: Mario Limonciello <mario.limonciello@amd.com>
+
+[ Upstream commit f7afda7fcd169a9168695247d07ad94cf7b9798f ]
+
+The commit 6a23e7b4332c ("drm/amd: Clean up kfd node on surprise
+disconnect") introduced early KFD cleanup when drm_dev_is_unplugged()
+returns true. However, this causes hangs during normal module unload
+(rmmod amdgpu).
+
+The issue occurs because drm_dev_unplug() is called in amdgpu_pci_remove()
+for all removal scenarios, not just surprise disconnects. This was done
+intentionally in commit 39934d3ed572 ("Revert "drm/amdgpu: TA unload
+messages are not actually sent to psp when amdgpu is uninstalled"") to
+fix IGT PCI software unplug test failures. As a result,
+drm_dev_is_unplugged() returns true even during normal module unload,
+triggering the early KFD cleanup inappropriately.
+
+The correct check should distinguish between:
+- Actual surprise disconnect (eGPU unplugged): pci_dev_is_disconnected()
+ returns true
+- Normal module unload (rmmod): pci_dev_is_disconnected() returns false
+
+Replace drm_dev_is_unplugged() with pci_dev_is_disconnected() to ensure
+the early cleanup only happens during true hardware disconnect events.
+
+Cc: stable@vger.kernel.org
+Reported-by: Cal Peake <cp@absolutedigital.net>
+Closes: https://lore.kernel.org/all/b0c22deb-c0fa-3343-33cf-fd9a77d7db99@absolutedigital.net/
+Fixes: 6a23e7b4332c ("drm/amd: Clean up kfd node on surprise disconnect")
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index fb096bf551ef2..dbcd55611a37d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -4991,7 +4991,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
+ * before ip_fini_early to prevent kfd locking refcount issues by calling
+ * amdgpu_amdkfd_suspend()
+ */
+- if (drm_dev_is_unplugged(adev_to_drm(adev)))
++ if (pci_dev_is_disconnected(adev->pdev))
+ amdgpu_amdkfd_device_fini_sw(adev);
+
+ amdgpu_device_ip_fini_early(adev);
+@@ -5003,7 +5003,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
+
+ amdgpu_gart_dummy_page_fini(adev);
+
+- if (drm_dev_is_unplugged(adev_to_drm(adev)))
++ if (pci_dev_is_disconnected(adev->pdev))
+ amdgpu_device_unmap_mmio(adev);
+
+ }
+--
+2.51.0
+
--- /dev/null
+From ae9a63a23b67b970edfa84ce13fbdbe28698ee93 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 15 Dec 2025 21:23:56 +0200
+Subject: drm/i915/dp: Fail state computation for invalid DSC source input BPP
+ values
+
+From: Imre Deak <imre.deak@intel.com>
+
+[ Upstream commit 338465490cf7bd4a700ecd33e4855fee4622fa5f ]
+
+There is no reason to accept an invalid minimum/maximum DSC source input
+BPP value (i.e a minimum DSC input BPP value above the maximum pipe BPP
+or a maximum DSC input BPP value below the minimum pipe BPP value), fail
+the state computation in these cases.
+
+Reviewed-by: Vinod Govindapillai <vinod.govindapillai@intel.com>
+Signed-off-by: Imre Deak <imre.deak@intel.com>
+Link: https://patch.msgid.link/20251215192357.172201-17-imre.deak@intel.com
+Stable-dep-of: fe26ae6ac8b8 ("drm/i915/dp: Fix pipe BPP clamping due to HDR")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/i915/display/intel_dp.c | 28 ++++++++++++++++++-------
+ 1 file changed, 21 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
+index 2eab591a8ef56..057b366c5ae29 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp.c
++++ b/drivers/gpu/drm/i915/display/intel_dp.c
+@@ -2523,16 +2523,30 @@ intel_dp_compute_config_link_bpp_limits(struct intel_dp *intel_dp,
+ return true;
+ }
+
+-static void
+-intel_dp_dsc_compute_pipe_bpp_limits(struct intel_dp *intel_dp,
++static bool
++intel_dp_dsc_compute_pipe_bpp_limits(struct intel_connector *connector,
+ struct link_config_limits *limits)
+ {
+- struct intel_display *display = to_intel_display(intel_dp);
++ struct intel_display *display = to_intel_display(connector);
++ const struct link_config_limits orig_limits = *limits;
+ int dsc_min_bpc = intel_dp_dsc_min_src_input_bpc();
+ int dsc_max_bpc = intel_dp_dsc_max_src_input_bpc(display);
+
+- limits->pipe.max_bpp = clamp(limits->pipe.max_bpp, dsc_min_bpc * 3, dsc_max_bpc * 3);
+- limits->pipe.min_bpp = clamp(limits->pipe.min_bpp, dsc_min_bpc * 3, dsc_max_bpc * 3);
++ limits->pipe.min_bpp = max(limits->pipe.min_bpp, dsc_min_bpc * 3);
++ limits->pipe.max_bpp = min(limits->pipe.max_bpp, dsc_max_bpc * 3);
++
++ if (limits->pipe.min_bpp <= 0 ||
++ limits->pipe.min_bpp > limits->pipe.max_bpp) {
++ drm_dbg_kms(display->drm,
++ "[CONNECTOR:%d:%s] Invalid DSC src/sink input BPP (src:%d-%d pipe:%d-%d)\n",
++ connector->base.base.id, connector->base.name,
++ dsc_min_bpc * 3, dsc_max_bpc * 3,
++ orig_limits.pipe.min_bpp, orig_limits.pipe.max_bpp);
++
++ return false;
++ }
++
++ return true;
+ }
+
+ bool
+@@ -2572,8 +2586,8 @@ intel_dp_compute_config_limits(struct intel_dp *intel_dp,
+ respect_downstream_limits);
+ }
+
+- if (dsc)
+- intel_dp_dsc_compute_pipe_bpp_limits(intel_dp, limits);
++ if (dsc && !intel_dp_dsc_compute_pipe_bpp_limits(connector, limits))
++ return false;
+
+ if (is_mst || intel_dp->use_max_params) {
+ /*
+--
+2.51.0
+
--- /dev/null
+From bb4b002f64d5308b16323b27a6c8f2f34c827244 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 Feb 2026 15:38:16 +0200
+Subject: drm/i915/dp: Fix pipe BPP clamping due to HDR
+
+From: Imre Deak <imre.deak@intel.com>
+
+[ Upstream commit fe26ae6ac8b88fcdac5036b557c129a17fe520d2 ]
+
+The pipe BPP value shouldn't be set outside of the source's / sink's
+valid pipe BPP range, ensure this when increasing the minimum pipe BPP
+value to 30 due to HDR.
+
+While at it debug print if the HDR mode was requested for a connector by
+setting the corresponding HDR connector property. This indicates
+if the requested HDR mode could not be enabled, since the selected
+pipe BPP is below 30, due to a sink capability or link BW limit.
+
+v2:
+- Also handle the case where the sink could support the target 30 BPP
+ only in DSC mode due to a BW limit, but the sink doesn't support DSC
+ or 30 BPP as a DSC input BPP. (Chaitanya)
+- Debug print the connector's HDR mode in the link config dump, to
+ indicate if a BPP >= 30 required by HDR couldn't be reached. (Ankit)
+- Add Closes: trailer. (Ankit)
+- Don't print the 30 BPP-outside of valid BPP range debug message if
+ the min BPP is already > 30 (and so a target BPP >= 30 required
+ for HDR is ensured).
+
+Closes: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/7052
+Closes: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/15503
+Fixes: ba49a4643cf53 ("drm/i915/dp: Set min_bpp limit to 30 in HDR mode")
+Cc: Chaitanya Kumar Borah <chaitanya.kumar.borah@intel.com>
+Cc: <stable@vger.kernel.org> # v6.18+
+Reviewed-by: Ankit Nautiyal <ankit.k.nautiyal@intel.com> # v1
+Reviewed-by: Chaitanya Kumar Borah <chaitanya.kumar.borah@intel.com>
+Signed-off-by: Imre Deak <imre.deak@intel.com>
+Link: https://patch.msgid.link/20260209133817.395823-1-imre.deak@intel.com
+(cherry picked from commit 08b7ef16b6a03e8c966e286ee1ac608a6ffb3d4a)
+Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/i915/display/intel_dp.c | 20 +++++++++++++++++---
+ 1 file changed, 17 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
+index 057b366c5ae29..be3d54729a440 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp.c
++++ b/drivers/gpu/drm/i915/display/intel_dp.c
+@@ -2557,6 +2557,7 @@ intel_dp_compute_config_limits(struct intel_dp *intel_dp,
+ bool dsc,
+ struct link_config_limits *limits)
+ {
++ struct intel_display *display = to_intel_display(intel_dp);
+ bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
+ struct intel_connector *connector =
+ to_intel_connector(conn_state->connector);
+@@ -2569,8 +2570,7 @@ intel_dp_compute_config_limits(struct intel_dp *intel_dp,
+ limits->min_lane_count = intel_dp_min_lane_count(intel_dp);
+ limits->max_lane_count = intel_dp_max_lane_count(intel_dp);
+
+- limits->pipe.min_bpp = intel_dp_in_hdr_mode(conn_state) ? 30 :
+- intel_dp_min_bpp(crtc_state->output_format);
++ limits->pipe.min_bpp = intel_dp_min_bpp(crtc_state->output_format);
+ if (is_mst) {
+ /*
+ * FIXME: If all the streams can't fit into the link with their
+@@ -2586,6 +2586,19 @@ intel_dp_compute_config_limits(struct intel_dp *intel_dp,
+ respect_downstream_limits);
+ }
+
++ if (!dsc && intel_dp_in_hdr_mode(conn_state)) {
++ if (intel_dp_supports_dsc(intel_dp, connector, crtc_state) &&
++ limits->pipe.max_bpp >= 30)
++ limits->pipe.min_bpp = max(limits->pipe.min_bpp, 30);
++ else
++ drm_dbg_kms(display->drm,
++ "[CONNECTOR:%d:%s] Can't force 30 bpp for HDR (pipe bpp: %d-%d DSC-support: %s)\n",
++ connector->base.base.id, connector->base.name,
++ limits->pipe.min_bpp, limits->pipe.max_bpp,
++ str_yes_no(intel_dp_supports_dsc(intel_dp, connector,
++ crtc_state)));
++ }
++
+ if (dsc && !intel_dp_dsc_compute_pipe_bpp_limits(connector, limits))
+ return false;
+
+@@ -2716,10 +2729,11 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
+ }
+
+ drm_dbg_kms(display->drm,
+- "DP lane count %d clock %d bpp input %d compressed " FXP_Q4_FMT " link rate required %d available %d\n",
++ "DP lane count %d clock %d bpp input %d compressed " FXP_Q4_FMT " HDR %s link rate required %d available %d\n",
+ pipe_config->lane_count, pipe_config->port_clock,
+ pipe_config->pipe_bpp,
+ FXP_Q4_ARGS(pipe_config->dsc.compressed_bpp_x16),
++ str_yes_no(intel_dp_in_hdr_mode(conn_state)),
+ intel_dp_config_required_rate(pipe_config),
+ intel_dp_max_link_data_rate(intel_dp,
+ pipe_config->port_clock,
+--
+2.51.0
+
--- /dev/null
+From a88523d2ecf3e7b9fdf61b308b810f9ae372b030 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 Nov 2025 17:42:01 +0100
+Subject: drm/tegra: dsi: fix device leak on probe
+
+From: Johan Hovold <johan@kernel.org>
+
+[ Upstream commit bfef062695570842cf96358f2f46f4c6642c6689 ]
+
+Make sure to drop the reference taken when looking up the companion
+(ganged) device and its driver data during probe().
+
+Note that holding a reference to a device does not prevent its driver
+data from going away so there is no point in keeping the reference.
+
+Fixes: e94236cde4d5 ("drm/tegra: dsi: Add ganged mode support")
+Fixes: 221e3638feb8 ("drm/tegra: Fix reference leak in tegra_dsi_ganged_probe")
+Cc: stable@vger.kernel.org # 3.19: 221e3638feb8
+Cc: Thierry Reding <treding@nvidia.com>
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Signed-off-by: Thierry Reding <treding@nvidia.com>
+Link: https://patch.msgid.link/20251121164201.13188-1-johan@kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/tegra/dsi.c | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
+index ddfb2858acbf1..5aa78b902adac 100644
+--- a/drivers/gpu/drm/tegra/dsi.c
++++ b/drivers/gpu/drm/tegra/dsi.c
+@@ -1540,11 +1540,9 @@ static int tegra_dsi_ganged_probe(struct tegra_dsi *dsi)
+ return -EPROBE_DEFER;
+
+ dsi->slave = platform_get_drvdata(gangster);
+-
+- if (!dsi->slave) {
+- put_device(&gangster->dev);
++ put_device(&gangster->dev);
++ if (!dsi->slave)
+ return -EPROBE_DEFER;
+- }
+
+ dsi->slave->master = dsi;
+ }
+--
+2.51.0
+
--- /dev/null
+From 949653468d893017a8335719af461de86a8cf6d2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 12 Nov 2025 16:45:38 +0800
+Subject: ext4: correct the comments place for EXT4_EXT_MAY_ZEROOUT
+
+From: Yang Erkun <yangerkun@huawei.com>
+
+[ Upstream commit cc742fd1d184bb2a11bacf50587d2c85290622e4 ]
+
+Move the comments just before we set EXT4_EXT_MAY_ZEROOUT in
+ext4_split_convert_extents.
+
+Signed-off-by: Yang Erkun <yangerkun@huawei.com>
+Message-ID: <20251112084538.1658232-4-yangerkun@huawei.com>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: feaf2a80e78f ("ext4: don't set EXT4_GET_BLOCKS_CONVERT when splitting before submitting I/O")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/extents.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 88187fddc6424..459453e8bb16b 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -3756,10 +3756,6 @@ static struct ext4_ext_path *ext4_split_convert_extents(handle_t *handle,
+ >> inode->i_sb->s_blocksize_bits;
+ if (eof_block < map->m_lblk + map->m_len)
+ eof_block = map->m_lblk + map->m_len;
+- /*
+- * It is safe to convert extent to initialized via explicit
+- * zeroout only if extent is fully inside i_size or new_size.
+- */
+ depth = ext_depth(inode);
+ ex = path[depth].p_ext;
+ ee_block = le32_to_cpu(ex->ee_block);
+@@ -3770,6 +3766,10 @@ static struct ext4_ext_path *ext4_split_convert_extents(handle_t *handle,
+ split_flag |= EXT4_EXT_DATA_ENTIRE_VALID1;
+ /* Convert to initialized */
+ } else if (flags & EXT4_GET_BLOCKS_CONVERT) {
++ /*
++ * It is safe to convert extent to initialized via explicit
++ * zeroout only if extent is fully inside i_size or new_size.
++ */
+ split_flag |= ee_block + ee_len <= eof_block ?
+ EXT4_EXT_MAY_ZEROOUT : 0;
+ split_flag |= (EXT4_EXT_MARK_UNWRIT2 | EXT4_EXT_DATA_VALID2);
+--
+2.51.0
+
--- /dev/null
+From 76b66e0c1796c35dc7abc23092d439641518d8ff Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 29 Nov 2025 18:32:35 +0800
+Subject: ext4: don't set EXT4_GET_BLOCKS_CONVERT when splitting before
+ submitting I/O
+
+From: Zhang Yi <yi.zhang@huawei.com>
+
+[ Upstream commit feaf2a80e78f89ee8a3464126077ba8683b62791 ]
+
+When allocating blocks during within-EOF DIO and writeback with
+dioread_nolock enabled, EXT4_GET_BLOCKS_PRE_IO was set to split an
+existing large unwritten extent. However, EXT4_GET_BLOCKS_CONVERT was
+set when calling ext4_split_convert_extents(), which may potentially
+result in stale data issues.
+
+Assume we have an unwritten extent, and then DIO writes the second half.
+
+ [UUUUUUUUUUUUUUUU] on-disk extent U: unwritten extent
+ [UUUUUUUUUUUUUUUU] extent status tree
+ |<- ->| ----> dio write this range
+
+First, ext4_iomap_alloc() call ext4_map_blocks() with
+EXT4_GET_BLOCKS_PRE_IO, EXT4_GET_BLOCKS_UNWRIT_EXT and
+EXT4_GET_BLOCKS_CREATE flags set. ext4_map_blocks() find this extent and
+call ext4_split_convert_extents() with EXT4_GET_BLOCKS_CONVERT and the
+above flags set.
+
+Then, ext4_split_convert_extents() calls ext4_split_extent() with
+EXT4_EXT_MAY_ZEROOUT, EXT4_EXT_MARK_UNWRIT2 and EXT4_EXT_DATA_VALID2
+flags set, and it calls ext4_split_extent_at() to split the second half
+with EXT4_EXT_DATA_VALID2, EXT4_EXT_MARK_UNWRIT1, EXT4_EXT_MAY_ZEROOUT
+and EXT4_EXT_MARK_UNWRIT2 flags set. However, ext4_split_extent_at()
+failed to insert extent since a temporary lack -ENOSPC. It zeroes out
+the first half but convert the entire on-disk extent to written since
+the EXT4_EXT_DATA_VALID2 flag set, but left the second half as unwritten
+in the extent status tree.
+
+ [0000000000SSSSSS] data S: stale data, 0: zeroed
+ [WWWWWWWWWWWWWWWW] on-disk extent W: written extent
+ [WWWWWWWWWWUUUUUU] extent status tree
+
+Finally, if the DIO failed to write data to the disk, the stale data in
+the second half will be exposed once the cached extent entry is gone.
+
+Fix this issue by not passing EXT4_GET_BLOCKS_CONVERT when splitting
+an unwritten extent before submitting I/O, and make
+ext4_split_convert_extents() to zero out the entire extent range
+to zero for this case, and also mark the extent in the extent status
+tree for consistency.
+
+Fixes: b8a8684502a0 ("ext4: Introduce FALLOC_FL_ZERO_RANGE flag for fallocate")
+Signed-off-by: Zhang Yi <yi.zhang@huawei.com>
+Reviewed-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Reviewed-by: Baokun Li <libaokun1@huawei.com>
+Cc: stable@kernel.org
+Message-ID: <20251129103247.686136-4-yi.zhang@huaweicloud.com>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/extents.c | 12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 459453e8bb16b..3ff8dcdd80ce9 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -3764,15 +3764,19 @@ static struct ext4_ext_path *ext4_split_convert_extents(handle_t *handle,
+ /* Convert to unwritten */
+ if (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN) {
+ split_flag |= EXT4_EXT_DATA_ENTIRE_VALID1;
+- /* Convert to initialized */
+- } else if (flags & EXT4_GET_BLOCKS_CONVERT) {
++ /* Split the existing unwritten extent */
++ } else if (flags & (EXT4_GET_BLOCKS_UNWRIT_EXT |
++ EXT4_GET_BLOCKS_CONVERT)) {
+ /*
+ * It is safe to convert extent to initialized via explicit
+ * zeroout only if extent is fully inside i_size or new_size.
+ */
+ split_flag |= ee_block + ee_len <= eof_block ?
+ EXT4_EXT_MAY_ZEROOUT : 0;
+- split_flag |= (EXT4_EXT_MARK_UNWRIT2 | EXT4_EXT_DATA_VALID2);
++ split_flag |= EXT4_EXT_MARK_UNWRIT2;
++ /* Convert to initialized */
++ if (flags & EXT4_GET_BLOCKS_CONVERT)
++ split_flag |= EXT4_EXT_DATA_VALID2;
+ }
+ flags |= EXT4_GET_BLOCKS_PRE_IO;
+ return ext4_split_extent(handle, inode, path, map, split_flag, flags,
+@@ -3951,7 +3955,7 @@ ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode,
+ /* get_block() before submitting IO, split the extent */
+ if (flags & EXT4_GET_BLOCKS_PRE_IO) {
+ path = ext4_split_convert_extents(handle, inode, map, path,
+- flags | EXT4_GET_BLOCKS_CONVERT, allocated);
++ flags, allocated);
+ if (IS_ERR(path))
+ return path;
+ /*
+--
+2.51.0
+
--- /dev/null
+From b69370b9326b7d6fe405d19e8750c3bdcf0038d5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Feb 2026 20:14:43 +0800
+Subject: hwmon: (max16065) Use READ/WRITE_ONCE to avoid compiler optimization
+ induced race
+
+From: Gui-Dong Han <hanguidong02@gmail.com>
+
+[ Upstream commit 007be4327e443d79c9dd9e56dc16c36f6395d208 ]
+
+Simply copying shared data to a local variable cannot prevent data
+races. The compiler is allowed to optimize away the local copy and
+re-read the shared memory, causing a Time-of-Check Time-of-Use (TOCTOU)
+issue if the data changes between the check and the usage.
+
+To enforce the use of the local variable, use READ_ONCE() when reading
+the shared data and WRITE_ONCE() when updating it. Apply these macros to
+the three identified locations (curr_sense, adc, and fault) where local
+variables are used for error validation, ensuring the value remains
+consistent.
+
+Reported-by: Ben Hutchings <ben@decadent.org.uk>
+Closes: https://lore.kernel.org/all/6fe17868327207e8b850cf9f88b7dc58b2021f73.camel@decadent.org.uk/
+Fixes: f5bae2642e3d ("hwmon: Driver for MAX16065 System Manager and compatibles")
+Fixes: b8d5acdcf525 ("hwmon: (max16065) Use local variable to avoid TOCTOU")
+Cc: stable@vger.kernel.org
+Signed-off-by: Gui-Dong Han <hanguidong02@gmail.com>
+Link: https://lore.kernel.org/r/20260203121443.5482-1-hanguidong02@gmail.com
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwmon/max16065.c | 26 +++++++++++++-------------
+ 1 file changed, 13 insertions(+), 13 deletions(-)
+
+diff --git a/drivers/hwmon/max16065.c b/drivers/hwmon/max16065.c
+index 4c9e7892a73c1..43fbb9b26b102 100644
+--- a/drivers/hwmon/max16065.c
++++ b/drivers/hwmon/max16065.c
+@@ -151,27 +151,27 @@ static struct max16065_data *max16065_update_device(struct device *dev)
+ int i;
+
+ for (i = 0; i < data->num_adc; i++)
+- data->adc[i]
+- = max16065_read_adc(client, MAX16065_ADC(i));
++ WRITE_ONCE(data->adc[i],
++ max16065_read_adc(client, MAX16065_ADC(i)));
+
+ if (data->have_current) {
+- data->adc[MAX16065_NUM_ADC]
+- = max16065_read_adc(client, MAX16065_CSP_ADC);
+- data->curr_sense
+- = i2c_smbus_read_byte_data(client,
+- MAX16065_CURR_SENSE);
++ WRITE_ONCE(data->adc[MAX16065_NUM_ADC],
++ max16065_read_adc(client, MAX16065_CSP_ADC));
++ WRITE_ONCE(data->curr_sense,
++ i2c_smbus_read_byte_data(client, MAX16065_CURR_SENSE));
+ }
+
+ for (i = 0; i < 2; i++)
+- data->fault[i]
+- = i2c_smbus_read_byte_data(client, MAX16065_FAULT(i));
++ WRITE_ONCE(data->fault[i],
++ i2c_smbus_read_byte_data(client, MAX16065_FAULT(i)));
+
+ /*
+ * MAX16067 and MAX16068 have separate undervoltage and
+ * overvoltage alarm bits. Squash them together.
+ */
+ if (data->chip == max16067 || data->chip == max16068)
+- data->fault[0] |= data->fault[1];
++ WRITE_ONCE(data->fault[0],
++ data->fault[0] | data->fault[1]);
+
+ data->last_updated = jiffies;
+ data->valid = true;
+@@ -185,7 +185,7 @@ static ssize_t max16065_alarm_show(struct device *dev,
+ {
+ struct sensor_device_attribute_2 *attr2 = to_sensor_dev_attr_2(da);
+ struct max16065_data *data = max16065_update_device(dev);
+- int val = data->fault[attr2->nr];
++ int val = READ_ONCE(data->fault[attr2->nr]);
+
+ if (val < 0)
+ return val;
+@@ -203,7 +203,7 @@ static ssize_t max16065_input_show(struct device *dev,
+ {
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct max16065_data *data = max16065_update_device(dev);
+- int adc = data->adc[attr->index];
++ int adc = READ_ONCE(data->adc[attr->index]);
+
+ if (unlikely(adc < 0))
+ return adc;
+@@ -216,7 +216,7 @@ static ssize_t max16065_current_show(struct device *dev,
+ struct device_attribute *da, char *buf)
+ {
+ struct max16065_data *data = max16065_update_device(dev);
+- int curr_sense = data->curr_sense;
++ int curr_sense = READ_ONCE(data->curr_sense);
+
+ if (unlikely(curr_sense < 0))
+ return curr_sense;
+--
+2.51.0
+
--- /dev/null
+From cd57d1034f3db7f58a7dd11689c0f562b38c1708 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 21 Jan 2026 10:02:02 -0800
+Subject: Input: synaptics_i2c - guard polling restart in resume
+
+From: Minseong Kim <ii4gsp@gmail.com>
+
+[ Upstream commit 870c2e7cd881d7a10abb91f2b38135622d9f9f65 ]
+
+synaptics_i2c_resume() restarts delayed work unconditionally, even when
+the input device is not opened. Guard the polling restart by taking the
+input device mutex and checking input_device_enabled() before re-queuing
+the delayed work.
+
+Fixes: eef3e4cab72ea ("Input: add driver for Synaptics I2C touchpad")
+Signed-off-by: Minseong Kim <ii4gsp@gmail.com>
+Cc: stable@vger.kernel.org
+Link: https://patch.msgid.link/20260121063738.799967-1-ii4gsp@gmail.com
+Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/input/mouse/synaptics_i2c.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/input/mouse/synaptics_i2c.c b/drivers/input/mouse/synaptics_i2c.c
+index c8ddfff2605ff..29da66af36d74 100644
+--- a/drivers/input/mouse/synaptics_i2c.c
++++ b/drivers/input/mouse/synaptics_i2c.c
+@@ -615,13 +615,16 @@ static int synaptics_i2c_resume(struct device *dev)
+ int ret;
+ struct i2c_client *client = to_i2c_client(dev);
+ struct synaptics_i2c *touch = i2c_get_clientdata(client);
++ struct input_dev *input = touch->input;
+
+ ret = synaptics_i2c_reset_config(client);
+ if (ret)
+ return ret;
+
+- mod_delayed_work(system_dfl_wq, &touch->dwork,
+- msecs_to_jiffies(NO_DATA_SLEEP_MSECS));
++ guard(mutex)(&input->mutex);
++ if (input_device_enabled(input))
++ mod_delayed_work(system_dfl_wq, &touch->dwork,
++ msecs_to_jiffies(NO_DATA_SLEEP_MSECS));
+
+ return 0;
+ }
+--
+2.51.0
+
--- /dev/null
+From 91d1067aceaff17de78c4969ba7b86935f852d3b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 6 Nov 2025 15:19:54 +0100
+Subject: Input: synaptics_i2c - replace use of system_wq with system_dfl_wq
+
+From: Marco Crivellari <marco.crivellari@suse.com>
+
+[ Upstream commit b3ee88e27798f0e8dd3a81867804d693da74d57d ]
+
+Currently if a user enqueues a work item using schedule_delayed_work() the
+used wq is "system_wq" (per-cpu wq) while queue_delayed_work() use
+WORK_CPU_UNBOUND (used when a cpu is not specified). The same applies to
+schedule_work() that is using system_wq and queue_work(), that makes use
+again of WORK_CPU_UNBOUND.
+
+This lack of consistency cannot be addressed without refactoring the API.
+
+This patch continues the effort to refactor worqueue APIs, which has begun
+with the change introducing new workqueues and a new alloc_workqueue flag:
+
+commit 128ea9f6ccfb ("workqueue: Add system_percpu_wq and system_dfl_wq")
+commit 930c2ea566af ("workqueue: Add new WQ_PERCPU flag")
+
+This specific workload do not benefit from a per-cpu workqueue, so use
+the default unbound workqueue (system_dfl_wq) instead.
+
+Suggested-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Marco Crivellari <marco.crivellari@suse.com>
+Link: https://patch.msgid.link/20251106141955.218911-4-marco.crivellari@suse.com
+Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+Stable-dep-of: 870c2e7cd881 ("Input: synaptics_i2c - guard polling restart in resume")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/input/mouse/synaptics_i2c.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/input/mouse/synaptics_i2c.c b/drivers/input/mouse/synaptics_i2c.c
+index a0d707e47d932..c8ddfff2605ff 100644
+--- a/drivers/input/mouse/synaptics_i2c.c
++++ b/drivers/input/mouse/synaptics_i2c.c
+@@ -372,7 +372,7 @@ static irqreturn_t synaptics_i2c_irq(int irq, void *dev_id)
+ {
+ struct synaptics_i2c *touch = dev_id;
+
+- mod_delayed_work(system_wq, &touch->dwork, 0);
++ mod_delayed_work(system_dfl_wq, &touch->dwork, 0);
+
+ return IRQ_HANDLED;
+ }
+@@ -448,7 +448,7 @@ static void synaptics_i2c_work_handler(struct work_struct *work)
+ * We poll the device once in THREAD_IRQ_SLEEP_SECS and
+ * if error is detected, we try to reset and reconfigure the touchpad.
+ */
+- mod_delayed_work(system_wq, &touch->dwork, delay);
++ mod_delayed_work(system_dfl_wq, &touch->dwork, delay);
+ }
+
+ static int synaptics_i2c_open(struct input_dev *input)
+@@ -461,7 +461,7 @@ static int synaptics_i2c_open(struct input_dev *input)
+ return ret;
+
+ if (polling_req)
+- mod_delayed_work(system_wq, &touch->dwork,
++ mod_delayed_work(system_dfl_wq, &touch->dwork,
+ msecs_to_jiffies(NO_DATA_SLEEP_MSECS));
+
+ return 0;
+@@ -620,7 +620,7 @@ static int synaptics_i2c_resume(struct device *dev)
+ if (ret)
+ return ret;
+
+- mod_delayed_work(system_wq, &touch->dwork,
++ mod_delayed_work(system_dfl_wq, &touch->dwork,
+ msecs_to_jiffies(NO_DATA_SLEEP_MSECS));
+
+ return 0;
+--
+2.51.0
+
--- /dev/null
+From 8c2742f29860a5cd21bd2ced3f51feb91bd5acda Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 22 Jan 2026 09:48:50 +0800
+Subject: iommu/vt-d: Skip dev-iotlb flush for inaccessible PCIe device without
+ scalable mode
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jinhui Guo <guojinhui.liam@bytedance.com>
+
+[ Upstream commit 42662d19839f34735b718129ea200e3734b07e50 ]
+
+PCIe endpoints with ATS enabled and passed through to userspace
+(e.g., QEMU, DPDK) can hard-lock the host when their link drops,
+either by surprise removal or by a link fault.
+
+Commit 4fc82cd907ac ("iommu/vt-d: Don't issue ATS Invalidation
+request when device is disconnected") adds pci_dev_is_disconnected()
+to devtlb_invalidation_with_pasid() so ATS invalidation is skipped
+only when the device is being safely removed, but it applies only
+when Intel IOMMU scalable mode is enabled.
+
+With scalable mode disabled or unsupported, a system hard-lock
+occurs when a PCIe endpoint's link drops because the Intel IOMMU
+waits indefinitely for an ATS invalidation that cannot complete.
+
+Call Trace:
+ qi_submit_sync
+ qi_flush_dev_iotlb
+ __context_flush_dev_iotlb.part.0
+ domain_context_clear_one_cb
+ pci_for_each_dma_alias
+ device_block_translation
+ blocking_domain_attach_dev
+ iommu_deinit_device
+ __iommu_group_remove_device
+ iommu_release_device
+ iommu_bus_notifier
+ blocking_notifier_call_chain
+ bus_notify
+ device_del
+ pci_remove_bus_device
+ pci_stop_and_remove_bus_device
+ pciehp_unconfigure_device
+ pciehp_disable_slot
+ pciehp_handle_presence_or_link_change
+ pciehp_ist
+
+Commit 81e921fd3216 ("iommu/vt-d: Fix NULL domain on device release")
+adds intel_pasid_teardown_sm_context() to intel_iommu_release_device(),
+which calls qi_flush_dev_iotlb() and can also hard-lock the system
+when a PCIe endpoint's link drops.
+
+Call Trace:
+ qi_submit_sync
+ qi_flush_dev_iotlb
+ __context_flush_dev_iotlb.part.0
+ intel_context_flush_no_pasid
+ device_pasid_table_teardown
+ pci_pasid_table_teardown
+ pci_for_each_dma_alias
+ intel_pasid_teardown_sm_context
+ intel_iommu_release_device
+ iommu_deinit_device
+ __iommu_group_remove_device
+ iommu_release_device
+ iommu_bus_notifier
+ blocking_notifier_call_chain
+ bus_notify
+ device_del
+ pci_remove_bus_device
+ pci_stop_and_remove_bus_device
+ pciehp_unconfigure_device
+ pciehp_disable_slot
+ pciehp_handle_presence_or_link_change
+ pciehp_ist
+
+Sometimes the endpoint loses connection without a link-down event
+(e.g., due to a link fault); killing the process (virsh destroy)
+then hard-locks the host.
+
+Call Trace:
+ qi_submit_sync
+ qi_flush_dev_iotlb
+ __context_flush_dev_iotlb.part.0
+ domain_context_clear_one_cb
+ pci_for_each_dma_alias
+ device_block_translation
+ blocking_domain_attach_dev
+ __iommu_attach_device
+ __iommu_device_set_domain
+ __iommu_group_set_domain_internal
+ iommu_detach_group
+ vfio_iommu_type1_detach_group
+ vfio_group_detach_container
+ vfio_group_fops_release
+ __fput
+
+pci_dev_is_disconnected() only covers safe-removal paths;
+pci_device_is_present() tests accessibility by reading
+vendor/device IDs and internally calls pci_dev_is_disconnected().
+On a ConnectX-5 (8 GT/s, x2) this costs ~70 µs.
+
+Since __context_flush_dev_iotlb() is only called on
+{attach,release}_dev paths (not hot), add pci_device_is_present()
+there to skip inaccessible devices and avoid the hard-lock.
+
+Fixes: 37764b952e1b ("iommu/vt-d: Global devTLB flush when present context entry changed")
+Fixes: 81e921fd3216 ("iommu/vt-d: Fix NULL domain on device release")
+Cc: stable@vger.kernel.org
+Signed-off-by: Jinhui Guo <guojinhui.liam@bytedance.com>
+Link: https://lore.kernel.org/r/20251211035946.2071-2-guojinhui.liam@bytedance.com
+Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
+Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iommu/intel/pasid.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
+index 6782ba5f5e57f..7a64a55fb5887 100644
+--- a/drivers/iommu/intel/pasid.c
++++ b/drivers/iommu/intel/pasid.c
+@@ -1114,6 +1114,14 @@ static void __context_flush_dev_iotlb(struct device_domain_info *info)
+ if (!info->ats_enabled)
+ return;
+
++ /*
++ * Skip dev-IOTLB flush for inaccessible PCIe devices to prevent the
++ * Intel IOMMU from waiting indefinitely for an ATS invalidation that
++ * cannot complete.
++ */
++ if (!pci_device_is_present(to_pci_dev(info->dev)))
++ return;
++
+ qi_flush_dev_iotlb(info->iommu, PCI_DEVID(info->bus, info->devfn),
+ info->pfsid, info->ats_qdep, 0, MAX_AGAW_PFN_WIDTH);
+
+--
+2.51.0
+
--- /dev/null
+From 98c1136c283eb57cfb65cf6c612fcbaef5d55ab5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 23 Jan 2026 12:56:25 +0000
+Subject: KVM: x86: Add x2APIC "features" to control EOI broadcast suppression
+
+From: Khushit Shah <khushit.shah@nutanix.com>
+
+[ Upstream commit 6517dfbcc918f970a928d9dc17586904bac06893 ]
+
+Add two flags for KVM_CAP_X2APIC_API to allow userspace to control support
+for Suppress EOI Broadcasts when using a split IRQCHIP (I/O APIC emulated
+by userspace), which KVM completely mishandles. When x2APIC support was
+first added, KVM incorrectly advertised and "enabled" Suppress EOI
+Broadcast, without fully supporting the I/O APIC side of the equation,
+i.e. without adding directed EOI to KVM's in-kernel I/O APIC.
+
+That flaw was carried over to split IRQCHIP support, i.e. KVM advertised
+support for Suppress EOI Broadcasts irrespective of whether or not the
+userspace I/O APIC implementation supported directed EOIs. Even worse,
+KVM didn't actually suppress EOI broadcasts, i.e. userspace VMMs without
+support for directed EOI came to rely on the "spurious" broadcasts.
+
+KVM "fixed" the in-kernel I/O APIC implementation by completely disabling
+support for Suppress EOI Broadcasts in commit 0bcc3fb95b97 ("KVM: lapic:
+stop advertising DIRECTED_EOI when in-kernel IOAPIC is in use"), but
+didn't do anything to remedy userspace I/O APIC implementations.
+
+KVM's bogus handling of Suppress EOI Broadcast is problematic when the
+guest relies on interrupts being masked in the I/O APIC until well after
+the initial local APIC EOI. E.g. Windows with Credential Guard enabled
+handles interrupts in the following order:
+ 1. Interrupt for L2 arrives.
+ 2. L1 APIC EOIs the interrupt.
+ 3. L1 resumes L2 and injects the interrupt.
+ 4. L2 EOIs after servicing.
+ 5. L1 performs the I/O APIC EOI.
+
+Because KVM EOIs the I/O APIC at step #2, the guest can get an interrupt
+storm, e.g. if the IRQ line is still asserted and userspace reacts to the
+EOI by re-injecting the IRQ, because the guest doesn't de-assert the line
+until step #4, and doesn't expect the interrupt to be re-enabled until
+step #5.
+
+Unfortunately, simply "fixing" the bug isn't an option, as KVM has no way
+of knowing if the userspace I/O APIC supports directed EOIs, i.e.
+suppressing EOI broadcasts would result in interrupts being stuck masked
+in the userspace I/O APIC due to step #5 being ignored by userspace. And
+fully disabling support for Suppress EOI Broadcast is also undesirable, as
+picking up the fix would require a guest reboot, *and* more importantly
+would change the virtual CPU model exposed to the guest without any buy-in
+from userspace.
+
+Add KVM_X2APIC_ENABLE_SUPPRESS_EOI_BROADCAST and
+KVM_X2APIC_DISABLE_SUPPRESS_EOI_BROADCAST flags to allow userspace to
+explicitly enable or disable support for Suppress EOI Broadcasts. This
+gives userspace control over the virtual CPU model exposed to the guest,
+as KVM should never have enabled support for Suppress EOI Broadcast without
+userspace opt-in. Not setting either flag will result in legacy quirky
+behavior for backward compatibility.
+
+Disallow fully enabling SUPPRESS_EOI_BROADCAST when using an in-kernel
+I/O APIC, as KVM's history/support is just as tragic. E.g. it's not clear
+that commit c806a6ad35bf ("KVM: x86: call irq notifiers with directed EOI")
+was entirely correct, i.e. it may have simply papered over the lack of
+Directed EOI emulation in the I/O APIC.
+
+Note, Suppress EOI Broadcasts is defined only in Intel's SDM, not in AMD's
+APM. But the bit is writable on some AMD CPUs, e.g. Turin, and KVM's ABI
+is to support Directed EOI (KVM's name) irrespective of guest CPU vendor.
+
+Fixes: 7543a635aa09 ("KVM: x86: Add KVM exit for IOAPIC EOIs")
+Closes: https://lore.kernel.org/kvm/7D497EF1-607D-4D37-98E7-DAF95F099342@nutanix.com
+Cc: stable@vger.kernel.org
+Suggested-by: David Woodhouse <dwmw2@infradead.org>
+Signed-off-by: Khushit Shah <khushit.shah@nutanix.com>
+Link: https://patch.msgid.link/20260123125657.3384063-1-khushit.shah@nutanix.com
+[sean: clean up minor formatting goofs and fix a comment typo]
+Co-developed-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ Documentation/virt/kvm/api.rst | 28 +++++++++++-
+ arch/x86/include/asm/kvm_host.h | 7 +++
+ arch/x86/include/uapi/asm/kvm.h | 6 ++-
+ arch/x86/kvm/ioapic.c | 2 +-
+ arch/x86/kvm/lapic.c | 76 +++++++++++++++++++++++++++++----
+ arch/x86/kvm/lapic.h | 2 +
+ arch/x86/kvm/x86.c | 21 ++++++++-
+ 7 files changed, 127 insertions(+), 15 deletions(-)
+
+diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
+index 57061fa29e6a0..ae8b02eb776a5 100644
+--- a/Documentation/virt/kvm/api.rst
++++ b/Documentation/virt/kvm/api.rst
+@@ -7800,8 +7800,10 @@ Will return -EBUSY if a VCPU has already been created.
+
+ Valid feature flags in args[0] are::
+
+- #define KVM_X2APIC_API_USE_32BIT_IDS (1ULL << 0)
+- #define KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK (1ULL << 1)
++ #define KVM_X2APIC_API_USE_32BIT_IDS (1ULL << 0)
++ #define KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK (1ULL << 1)
++ #define KVM_X2APIC_ENABLE_SUPPRESS_EOI_BROADCAST (1ULL << 2)
++ #define KVM_X2APIC_DISABLE_SUPPRESS_EOI_BROADCAST (1ULL << 3)
+
+ Enabling KVM_X2APIC_API_USE_32BIT_IDS changes the behavior of
+ KVM_SET_GSI_ROUTING, KVM_SIGNAL_MSI, KVM_SET_LAPIC, and KVM_GET_LAPIC,
+@@ -7814,6 +7816,28 @@ as a broadcast even in x2APIC mode in order to support physical x2APIC
+ without interrupt remapping. This is undesirable in logical mode,
+ where 0xff represents CPUs 0-7 in cluster 0.
+
++Setting KVM_X2APIC_ENABLE_SUPPRESS_EOI_BROADCAST instructs KVM to enable
++Suppress EOI Broadcasts. KVM will advertise support for Suppress EOI
++Broadcast to the guest and suppress LAPIC EOI broadcasts when the guest
++sets the Suppress EOI Broadcast bit in the SPIV register. This flag is
++supported only when using a split IRQCHIP.
++
++Setting KVM_X2APIC_DISABLE_SUPPRESS_EOI_BROADCAST disables support for
++Suppress EOI Broadcasts entirely, i.e. instructs KVM to NOT advertise
++support to the guest.
++
++Modern VMMs should either enable KVM_X2APIC_ENABLE_SUPPRESS_EOI_BROADCAST
++or KVM_X2APIC_DISABLE_SUPPRESS_EOI_BROADCAST. If not, legacy quirky
++behavior will be used by KVM: in split IRQCHIP mode, KVM will advertise
++support for Suppress EOI Broadcasts but not actually suppress EOI
++broadcasts; for in-kernel IRQCHIP mode, KVM will not advertise support for
++Suppress EOI Broadcasts.
++
++Setting both KVM_X2APIC_ENABLE_SUPPRESS_EOI_BROADCAST and
++KVM_X2APIC_DISABLE_SUPPRESS_EOI_BROADCAST will fail with an EINVAL error,
++as will setting KVM_X2APIC_ENABLE_SUPPRESS_EOI_BROADCAST without a split
++IRCHIP.
++
+ 7.8 KVM_CAP_S390_USER_INSTR0
+ ----------------------------
+
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index b74ae7183f3ae..0f2f9f1552a4f 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -1229,6 +1229,12 @@ enum kvm_irqchip_mode {
+ KVM_IRQCHIP_SPLIT, /* created with KVM_CAP_SPLIT_IRQCHIP */
+ };
+
++enum kvm_suppress_eoi_broadcast_mode {
++ KVM_SUPPRESS_EOI_BROADCAST_QUIRKED, /* Legacy behavior */
++ KVM_SUPPRESS_EOI_BROADCAST_ENABLED, /* Enable Suppress EOI broadcast */
++ KVM_SUPPRESS_EOI_BROADCAST_DISABLED /* Disable Suppress EOI broadcast */
++};
++
+ struct kvm_x86_msr_filter {
+ u8 count;
+ bool default_allow:1;
+@@ -1480,6 +1486,7 @@ struct kvm_arch {
+
+ bool x2apic_format;
+ bool x2apic_broadcast_quirk_disabled;
++ enum kvm_suppress_eoi_broadcast_mode suppress_eoi_broadcast_mode;
+
+ bool has_mapped_host_mmio;
+ bool guest_can_read_msr_platform_info;
+diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
+index d420c9c066d48..1584926cd1f4f 100644
+--- a/arch/x86/include/uapi/asm/kvm.h
++++ b/arch/x86/include/uapi/asm/kvm.h
+@@ -913,8 +913,10 @@ struct kvm_sev_snp_launch_finish {
+ __u64 pad1[4];
+ };
+
+-#define KVM_X2APIC_API_USE_32BIT_IDS (1ULL << 0)
+-#define KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK (1ULL << 1)
++#define KVM_X2APIC_API_USE_32BIT_IDS _BITULL(0)
++#define KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK _BITULL(1)
++#define KVM_X2APIC_ENABLE_SUPPRESS_EOI_BROADCAST _BITULL(2)
++#define KVM_X2APIC_DISABLE_SUPPRESS_EOI_BROADCAST _BITULL(3)
+
+ struct kvm_hyperv_eventfd {
+ __u32 conn_id;
+diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
+index 2c2783296aedb..a26fa4222f292 100644
+--- a/arch/x86/kvm/ioapic.c
++++ b/arch/x86/kvm/ioapic.c
+@@ -561,7 +561,7 @@ static void kvm_ioapic_update_eoi_one(struct kvm_vcpu *vcpu,
+ spin_lock(&ioapic->lock);
+
+ if (trigger_mode != IOAPIC_LEVEL_TRIG ||
+- kvm_lapic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI)
++ kvm_lapic_suppress_eoi_broadcast(apic))
+ return;
+
+ ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG);
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index 8b6ec3304100f..a9845a1a9cd5d 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -105,6 +105,63 @@ bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector)
+ apic_test_vector(vector, apic->regs + APIC_IRR);
+ }
+
++static bool kvm_lapic_advertise_suppress_eoi_broadcast(struct kvm *kvm)
++{
++ switch (kvm->arch.suppress_eoi_broadcast_mode) {
++ case KVM_SUPPRESS_EOI_BROADCAST_ENABLED:
++ return true;
++ case KVM_SUPPRESS_EOI_BROADCAST_DISABLED:
++ return false;
++ case KVM_SUPPRESS_EOI_BROADCAST_QUIRKED:
++ /*
++ * The default in-kernel I/O APIC emulates the 82093AA and does not
++ * implement an EOI register. Some guests (e.g. Windows with the
++ * Hyper-V role enabled) disable LAPIC EOI broadcast without
++ * checking the I/O APIC version, which can cause level-triggered
++ * interrupts to never be EOI'd.
++ *
++ * To avoid this, KVM doesn't advertise Suppress EOI Broadcast
++ * support when using the default in-kernel I/O APIC.
++ *
++ * Historically, in split IRQCHIP mode, KVM always advertised
++ * Suppress EOI Broadcast support but did not actually suppress
++ * EOIs, resulting in quirky behavior.
++ */
++ return !ioapic_in_kernel(kvm);
++ default:
++ WARN_ON_ONCE(1);
++ return false;
++ }
++}
++
++bool kvm_lapic_suppress_eoi_broadcast(struct kvm_lapic *apic)
++{
++ struct kvm *kvm = apic->vcpu->kvm;
++
++ if (!(kvm_lapic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI))
++ return false;
++
++ switch (kvm->arch.suppress_eoi_broadcast_mode) {
++ case KVM_SUPPRESS_EOI_BROADCAST_ENABLED:
++ return true;
++ case KVM_SUPPRESS_EOI_BROADCAST_DISABLED:
++ return false;
++ case KVM_SUPPRESS_EOI_BROADCAST_QUIRKED:
++ /*
++ * Historically, in split IRQCHIP mode, KVM ignored the suppress
++ * EOI broadcast bit set by the guest and broadcasts EOIs to the
++ * userspace I/O APIC. For In-kernel I/O APIC, the support itself
++ * is not advertised, can only be enabled via KVM_SET_APIC_STATE,
++ * and KVM's I/O APIC doesn't emulate Directed EOIs; but if the
++ * feature is enabled, it is respected (with odd behavior).
++ */
++ return ioapic_in_kernel(kvm);
++ default:
++ WARN_ON_ONCE(1);
++ return false;
++ }
++}
++
+ __read_mostly DEFINE_STATIC_KEY_FALSE(kvm_has_noapic_vcpu);
+ EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_has_noapic_vcpu);
+
+@@ -554,15 +611,9 @@ void kvm_apic_set_version(struct kvm_vcpu *vcpu)
+
+ v = APIC_VERSION | ((apic->nr_lvt_entries - 1) << 16);
+
+- /*
+- * KVM emulates 82093AA datasheet (with in-kernel IOAPIC implementation)
+- * which doesn't have EOI register; Some buggy OSes (e.g. Windows with
+- * Hyper-V role) disable EOI broadcast in lapic not checking for IOAPIC
+- * version first and level-triggered interrupts never get EOIed in
+- * IOAPIC.
+- */
++
+ if (guest_cpu_cap_has(vcpu, X86_FEATURE_X2APIC) &&
+- !ioapic_in_kernel(vcpu->kvm))
++ kvm_lapic_advertise_suppress_eoi_broadcast(vcpu->kvm))
+ v |= APIC_LVR_DIRECTED_EOI;
+ kvm_lapic_set_reg(apic, APIC_LVR, v);
+ }
+@@ -1517,6 +1568,15 @@ static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector)
+
+ /* Request a KVM exit to inform the userspace IOAPIC. */
+ if (irqchip_split(apic->vcpu->kvm)) {
++ /*
++ * Don't exit to userspace if the guest has enabled Directed
++ * EOI, a.k.a. Suppress EOI Broadcasts, in which case the local
++ * APIC doesn't broadcast EOIs (the guest must EOI the target
++ * I/O APIC(s) directly).
++ */
++ if (kvm_lapic_suppress_eoi_broadcast(apic))
++ return;
++
+ apic->vcpu->arch.pending_ioapic_eoi = vector;
+ kvm_make_request(KVM_REQ_IOAPIC_EOI_EXIT, apic->vcpu);
+ return;
+diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
+index 282b9b7da98cd..e5f5a222eced0 100644
+--- a/arch/x86/kvm/lapic.h
++++ b/arch/x86/kvm/lapic.h
+@@ -231,6 +231,8 @@ static inline int kvm_lapic_latched_init(struct kvm_vcpu *vcpu)
+
+ bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector);
+
++bool kvm_lapic_suppress_eoi_broadcast(struct kvm_lapic *apic);
++
+ void kvm_wait_lapic_expire(struct kvm_vcpu *vcpu);
+
+ void kvm_bitmap_or_dest_vcpus(struct kvm *kvm, struct kvm_lapic_irq *irq,
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 2ab445c0126b3..d15bd078a2d98 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -121,8 +121,10 @@ static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
+
+ #define KVM_CAP_PMU_VALID_MASK KVM_PMU_CAP_DISABLE
+
+-#define KVM_X2APIC_API_VALID_FLAGS (KVM_X2APIC_API_USE_32BIT_IDS | \
+- KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK)
++#define KVM_X2APIC_API_VALID_FLAGS (KVM_X2APIC_API_USE_32BIT_IDS | \
++ KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK | \
++ KVM_X2APIC_ENABLE_SUPPRESS_EOI_BROADCAST | \
++ KVM_X2APIC_DISABLE_SUPPRESS_EOI_BROADCAST)
+
+ static void update_cr8_intercept(struct kvm_vcpu *vcpu);
+ static void process_nmi(struct kvm_vcpu *vcpu);
+@@ -4966,6 +4968,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
+ break;
+ case KVM_CAP_X2APIC_API:
+ r = KVM_X2APIC_API_VALID_FLAGS;
++ if (kvm && !irqchip_split(kvm))
++ r &= ~KVM_X2APIC_ENABLE_SUPPRESS_EOI_BROADCAST;
+ break;
+ case KVM_CAP_NESTED_STATE:
+ r = kvm_x86_ops.nested_ops->get_state ?
+@@ -6783,11 +6787,24 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
+ if (cap->args[0] & ~KVM_X2APIC_API_VALID_FLAGS)
+ break;
+
++ if ((cap->args[0] & KVM_X2APIC_ENABLE_SUPPRESS_EOI_BROADCAST) &&
++ (cap->args[0] & KVM_X2APIC_DISABLE_SUPPRESS_EOI_BROADCAST))
++ break;
++
++ if ((cap->args[0] & KVM_X2APIC_ENABLE_SUPPRESS_EOI_BROADCAST) &&
++ !irqchip_split(kvm))
++ break;
++
+ if (cap->args[0] & KVM_X2APIC_API_USE_32BIT_IDS)
+ kvm->arch.x2apic_format = true;
+ if (cap->args[0] & KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK)
+ kvm->arch.x2apic_broadcast_quirk_disabled = true;
+
++ if (cap->args[0] & KVM_X2APIC_ENABLE_SUPPRESS_EOI_BROADCAST)
++ kvm->arch.suppress_eoi_broadcast_mode = KVM_SUPPRESS_EOI_BROADCAST_ENABLED;
++ if (cap->args[0] & KVM_X2APIC_DISABLE_SUPPRESS_EOI_BROADCAST)
++ kvm->arch.suppress_eoi_broadcast_mode = KVM_SUPPRESS_EOI_BROADCAST_DISABLED;
++
+ r = 0;
+ break;
+ case KVM_CAP_X86_DISABLE_EXITS:
+--
+2.51.0
+
--- /dev/null
+From 8d85b0430aa3298e476387390f245e4b1d48ed55 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 8 Jan 2026 19:06:57 -0800
+Subject: KVM: x86: Ignore -EBUSY when checking nested events from vcpu_block()
+
+From: Sean Christopherson <seanjc@google.com>
+
+[ Upstream commit ead63640d4e72e6f6d464f4e31f7fecb79af8869 ]
+
+Ignore -EBUSY when checking nested events after exiting a blocking state
+while L2 is active, as exiting to userspace will generate a spurious
+userspace exit, usually with KVM_EXIT_UNKNOWN, and likely lead to the VM's
+demise. Continuing with the wakeup isn't perfect either, as *something*
+has gone sideways if a vCPU is awakened in L2 with an injected event (or
+worse, a nested run pending), but continuing on gives the VM a decent
+chance of surviving without any major side effects.
+
+As explained in the Fixes commits, it _should_ be impossible for a vCPU to
+be put into a blocking state with an already-injected event (exception,
+IRQ, or NMI). Unfortunately, userspace can stuff MP_STATE and/or injected
+events, and thus put the vCPU into what should be an impossible state.
+
+Don't bother trying to preserve the WARN, e.g. with an anti-syzkaller
+Kconfig, as WARNs can (hopefully) be added in paths where _KVM_ would be
+violating x86 architecture, e.g. by WARNing if KVM attempts to inject an
+exception or interrupt while the vCPU isn't running.
+
+Cc: Alessandro Ratti <alessandro@0x65c.net>
+Cc: stable@vger.kernel.org
+Fixes: 26844fee6ade ("KVM: x86: never write to memory from kvm_vcpu_check_block()")
+Fixes: 45405155d876 ("KVM: x86: WARN if a vCPU gets a valid wakeup that KVM can't yet inject")
+Link: https://syzkaller.appspot.com/text?tag=ReproC&x=10d4261a580000
+Reported-by: syzbot+1522459a74d26b0ac33a@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/all/671bc7a7.050a0220.455e8.022a.GAE@google.com
+Link: https://patch.msgid.link/20260109030657.994759-1-seanjc@google.com
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kvm/x86.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index aeb7f902b3c7f..2ab445c0126b3 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -11598,8 +11598,7 @@ static inline int vcpu_block(struct kvm_vcpu *vcpu)
+ if (is_guest_mode(vcpu)) {
+ int r = kvm_check_nested_events(vcpu);
+
+- WARN_ON_ONCE(r == -EBUSY);
+- if (r < 0)
++ if (r < 0 && r != -EBUSY)
+ return 0;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From f13a8eb8db2241b98ee01abc55635345669e079f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 10 Feb 2026 19:31:13 +0800
+Subject: LoongArch: Handle percpu handler address for ORC unwinder
+
+From: Tiezhu Yang <yangtiezhu@loongson.cn>
+
+[ Upstream commit 055c7e75190e0be43037bd663a3f6aced194416e ]
+
+After commit 4cd641a79e69 ("LoongArch: Remove unnecessary checks for ORC
+unwinder"), the system can not boot normally under some configs (such as
+enable KASAN), there are many error messages "cannot find unwind pc".
+
+The kernel boots normally with the defconfig, so no problem found out at
+the first time. Here is one way to reproduce:
+
+ cd linux
+ make mrproper defconfig -j"$(nproc)"
+ scripts/config -e KASAN
+ make olddefconfig all -j"$(nproc)"
+ sudo make modules_install
+ sudo make install
+ sudo reboot
+
+The address that can not unwind is not a valid kernel address which is
+between "pcpu_handlers[cpu]" and "pcpu_handlers[cpu] + vec_sz" due to
+the code of eentry was copied to the new area of pcpu_handlers[cpu] in
+setup_tlb_handler(), handle this special case to get the valid address
+to unwind normally.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Tiezhu Yang <yangtiezhu@loongson.cn>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/loongarch/include/asm/setup.h | 3 +++
+ arch/loongarch/kernel/unwind_orc.c | 16 ++++++++++++++++
+ 2 files changed, 19 insertions(+)
+
+diff --git a/arch/loongarch/include/asm/setup.h b/arch/loongarch/include/asm/setup.h
+index 3c2fb16b11b64..f81375e5e89c0 100644
+--- a/arch/loongarch/include/asm/setup.h
++++ b/arch/loongarch/include/asm/setup.h
+@@ -7,6 +7,7 @@
+ #define _LOONGARCH_SETUP_H
+
+ #include <linux/types.h>
++#include <linux/threads.h>
+ #include <asm/sections.h>
+ #include <uapi/asm/setup.h>
+
+@@ -14,6 +15,8 @@
+
+ extern unsigned long eentry;
+ extern unsigned long tlbrentry;
++extern unsigned long pcpu_handlers[NR_CPUS];
++extern long exception_handlers[VECSIZE * 128 / sizeof(long)];
+ extern char init_command_line[COMMAND_LINE_SIZE];
+ extern void tlb_init(int cpu);
+ extern void cpu_cache_init(void);
+diff --git a/arch/loongarch/kernel/unwind_orc.c b/arch/loongarch/kernel/unwind_orc.c
+index b67f065905256..ad7e63f495045 100644
+--- a/arch/loongarch/kernel/unwind_orc.c
++++ b/arch/loongarch/kernel/unwind_orc.c
+@@ -360,6 +360,22 @@ static inline unsigned long bt_address(unsigned long ra)
+ {
+ extern unsigned long eentry;
+
++#if defined(CONFIG_NUMA) && !defined(CONFIG_PREEMPT_RT)
++ int cpu;
++ int vec_sz = sizeof(exception_handlers);
++
++ for_each_possible_cpu(cpu) {
++ if (!pcpu_handlers[cpu])
++ continue;
++
++ if (ra >= pcpu_handlers[cpu] &&
++ ra < pcpu_handlers[cpu] + vec_sz) {
++ ra = ra + eentry - pcpu_handlers[cpu];
++ break;
++ }
++ }
++#endif
++
+ if (ra >= eentry && ra < eentry + EXCCODE_INT_END * VECSIZE) {
+ unsigned long func;
+ unsigned long type = (ra - eentry) / VECSIZE;
+--
+2.51.0
+
--- /dev/null
+From aeb15ab520291f72f1bcb699a61bb96cb92322e0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 10 Feb 2026 19:31:14 +0800
+Subject: LoongArch: Remove some extern variables in source files
+
+From: Tiezhu Yang <yangtiezhu@loongson.cn>
+
+[ Upstream commit 0e6f596d6ac635e80bb265d587b2287ef8fa1cd6 ]
+
+There are declarations of the variable "eentry", "pcpu_handlers[]" and
+"exception_handlers[]" in asm/setup.h, the source files already include
+this header file directly or indirectly, so no need to declare them in
+the source files, just remove the code.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Tiezhu Yang <yangtiezhu@loongson.cn>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/loongarch/kernel/unwind_orc.c | 2 --
+ arch/loongarch/kernel/unwind_prologue.c | 4 ----
+ arch/loongarch/mm/tlb.c | 1 -
+ 3 files changed, 7 deletions(-)
+
+diff --git a/arch/loongarch/kernel/unwind_orc.c b/arch/loongarch/kernel/unwind_orc.c
+index ad7e63f495045..85c2fcb76930c 100644
+--- a/arch/loongarch/kernel/unwind_orc.c
++++ b/arch/loongarch/kernel/unwind_orc.c
+@@ -358,8 +358,6 @@ static bool is_entry_func(unsigned long addr)
+
+ static inline unsigned long bt_address(unsigned long ra)
+ {
+- extern unsigned long eentry;
+-
+ #if defined(CONFIG_NUMA) && !defined(CONFIG_PREEMPT_RT)
+ int cpu;
+ int vec_sz = sizeof(exception_handlers);
+diff --git a/arch/loongarch/kernel/unwind_prologue.c b/arch/loongarch/kernel/unwind_prologue.c
+index ee1c29686ab05..da07acad7973a 100644
+--- a/arch/loongarch/kernel/unwind_prologue.c
++++ b/arch/loongarch/kernel/unwind_prologue.c
+@@ -23,10 +23,6 @@ extern const int unwind_hint_lasx;
+ extern const int unwind_hint_lbt;
+ extern const int unwind_hint_ri;
+ extern const int unwind_hint_watch;
+-extern unsigned long eentry;
+-#ifdef CONFIG_NUMA
+-extern unsigned long pcpu_handlers[NR_CPUS];
+-#endif
+
+ static inline bool scan_handlers(unsigned long entry_offset)
+ {
+diff --git a/arch/loongarch/mm/tlb.c b/arch/loongarch/mm/tlb.c
+index f46c15d6e7eae..24add95ecb65e 100644
+--- a/arch/loongarch/mm/tlb.c
++++ b/arch/loongarch/mm/tlb.c
+@@ -260,7 +260,6 @@ static void output_pgtable_bits_defines(void)
+ #ifdef CONFIG_NUMA
+ unsigned long pcpu_handlers[NR_CPUS];
+ #endif
+-extern long exception_handlers[VECSIZE * 128 / sizeof(long)];
+
+ static void setup_tlb_handler(int cpu)
+ {
+--
+2.51.0
+
--- /dev/null
+From 72a6b4b2b477d57e47fa950e1f124a1bfd40e6fe Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 31 Dec 2025 15:19:19 +0800
+Subject: LoongArch: Remove unnecessary checks for ORC unwinder
+
+From: Tiezhu Yang <yangtiezhu@loongson.cn>
+
+[ Upstream commit 4cd641a79e69270a062777f64a0dd330abb9044a ]
+
+According to the following function definitions, __kernel_text_address()
+already checks __module_text_address(), so it should remove the check of
+__module_text_address() in bt_address() at least.
+
+int __kernel_text_address(unsigned long addr)
+{
+ if (kernel_text_address(addr))
+ return 1;
+ ...
+ return 0;
+}
+
+int kernel_text_address(unsigned long addr)
+{
+ bool no_rcu;
+ int ret = 1;
+ ...
+ if (is_module_text_address(addr))
+ goto out;
+ ...
+ return ret;
+}
+
+bool is_module_text_address(unsigned long addr)
+{
+ guard(rcu)();
+ return __module_text_address(addr) != NULL;
+}
+
+Furthermore, there are two checks of __kernel_text_address(), one is in
+bt_address() and the other is after calling bt_address(), it looks like
+redundant.
+
+Handle the exception address first and then use __kernel_text_address()
+to validate the calculated address for exception or the normal address
+in bt_address(), then it can remove the check of __kernel_text_address()
+after calling bt_address().
+
+Just remove unnecessary checks, no functional changes intended.
+
+Signed-off-by: Tiezhu Yang <yangtiezhu@loongson.cn>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Stable-dep-of: 055c7e75190e ("LoongArch: Handle percpu handler address for ORC unwinder")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/loongarch/kernel/unwind_orc.c | 16 +++++-----------
+ 1 file changed, 5 insertions(+), 11 deletions(-)
+
+diff --git a/arch/loongarch/kernel/unwind_orc.c b/arch/loongarch/kernel/unwind_orc.c
+index e410048489c66..b67f065905256 100644
+--- a/arch/loongarch/kernel/unwind_orc.c
++++ b/arch/loongarch/kernel/unwind_orc.c
+@@ -360,12 +360,6 @@ static inline unsigned long bt_address(unsigned long ra)
+ {
+ extern unsigned long eentry;
+
+- if (__kernel_text_address(ra))
+- return ra;
+-
+- if (__module_text_address(ra))
+- return ra;
+-
+ if (ra >= eentry && ra < eentry + EXCCODE_INT_END * VECSIZE) {
+ unsigned long func;
+ unsigned long type = (ra - eentry) / VECSIZE;
+@@ -383,10 +377,13 @@ static inline unsigned long bt_address(unsigned long ra)
+ break;
+ }
+
+- return func + offset;
++ ra = func + offset;
+ }
+
+- return ra;
++ if (__kernel_text_address(ra))
++ return ra;
++
++ return 0;
+ }
+
+ bool unwind_next_frame(struct unwind_state *state)
+@@ -512,9 +509,6 @@ bool unwind_next_frame(struct unwind_state *state)
+ goto err;
+ }
+
+- if (!__kernel_text_address(state->pc))
+- goto err;
+-
+ return true;
+
+ err:
+--
+2.51.0
+
--- /dev/null
+From a3cd4a59139ceef0dd9e3c8ed11be45a9acf7724 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 18 Dec 2025 12:24:09 +0530
+Subject: media: iris: Add missing platform data entries for SM8750
+
+From: Dikshita Agarwal <dikshita.agarwal@oss.qualcomm.com>
+
+[ Upstream commit bbef55f414100853d5bcea56a41f8b171bac8fcb ]
+
+Two platform-data fields for SM8750 were missed:
+
+ - get_vpu_buffer_size = iris_vpu33_buf_size
+ Without this, the driver fails to allocate the required internal
+ buffers, leading to basic decode/encode failures during session
+ bring-up.
+
+ - max_core_mbps = ((7680 * 4320) / 256) * 60
+ Without this capability exposed, capability checks are incomplete and
+ v4l2-compliance for encoder fails.
+
+Fixes: a5925a2ce077 ("media: iris: add VPU33 specific encoding buffer calculation")
+Fixes: a6882431a138 ("media: iris: Add support for ENUM_FRAMESIZES/FRAMEINTERVALS for encoder")
+Cc: stable@vger.kernel.org
+Signed-off-by: Dikshita Agarwal <dikshita.agarwal@oss.qualcomm.com>
+Reviewed-by: Vikash Garodia <vikash.garodia@oss.qualcomm.com>
+Reviewed-by: Konrad Dybcio <konrad.dybcio@oss.qualcomm.com>
+Signed-off-by: Bryan O'Donoghue <bod@kernel.org>
+Signed-off-by: Hans Verkuil <hverkuil+cisco@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/media/platform/qcom/iris/iris_platform_gen2.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/media/platform/qcom/iris/iris_platform_gen2.c b/drivers/media/platform/qcom/iris/iris_platform_gen2.c
+index 36d69cc73986b..85beb80476de8 100644
+--- a/drivers/media/platform/qcom/iris/iris_platform_gen2.c
++++ b/drivers/media/platform/qcom/iris/iris_platform_gen2.c
+@@ -916,6 +916,7 @@ struct iris_platform_data sm8750_data = {
+ .get_instance = iris_hfi_gen2_get_instance,
+ .init_hfi_command_ops = iris_hfi_gen2_command_ops_init,
+ .init_hfi_response_ops = iris_hfi_gen2_response_ops_init,
++ .get_vpu_buffer_size = iris_vpu33_buf_size,
+ .vpu_ops = &iris_vpu35_ops,
+ .set_preset_registers = iris_set_sm8550_preset_registers,
+ .icc_tbl = sm8550_icc_table,
+@@ -946,6 +947,7 @@ struct iris_platform_data sm8750_data = {
+ .num_vpp_pipe = 4,
+ .max_session_count = 16,
+ .max_core_mbpf = NUM_MBS_8K * 2,
++ .max_core_mbps = ((7680 * 4320) / 256) * 60,
+ .dec_input_config_params_default =
+ sm8550_vdec_input_config_params_default,
+ .dec_input_config_params_default_size =
+--
+2.51.0
+
--- /dev/null
+From 157d5caa863a7b45e7605ef9801ac94c7d45286b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 2 Nov 2025 09:10:19 +0530
+Subject: media: iris: remove v4l2_m2m_ioctl_{de,en}coder_cmd API usage during
+ STOP handling
+
+From: Dikshita Agarwal <dikshita.agarwal@oss.qualcomm.com>
+
+[ Upstream commit 8fc707d13df517222db12b465af4aa9df05c99e1 ]
+
+Currently v4l2_m2m_ioctl_{de,enc}coder_cmd is being invoked during STOP
+command handling. However, this is not required as the iris driver has
+its own drain and stop handling mechanism in place.
+
+Using the m2m command API in this context leads to incorrect behavior,
+where the LAST flag is prematurely attached to a capture buffer,
+when there are no buffers in m2m source queue. But, in this scenario
+even though the source buffers are returned to client, hardware might
+still need to process the pending capture buffers.
+
+Attaching LAST flag prematurely can result in the capture buffer being
+removed from the destination queue before the hardware has finished
+processing it, causing issues when the buffer is eventually returned by
+the hardware.
+
+To prevent this, remove the m2m API usage in stop handling.
+
+Fixes: d09100763bed ("media: iris: add support for drain sequence")
+Fixes: 75db90ae067d ("media: iris: Add support for drain sequence in encoder video device")
+Signed-off-by: Dikshita Agarwal <dikshita.agarwal@oss.qualcomm.com>
+Reviewed-by: Vikash Garodia <vikash.garodia@oss.qualcomm.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Bryan O'Donoghue <bod@kernel.org>
+Signed-off-by: Hans Verkuil <hverkuil+cisco@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/media/platform/qcom/iris/iris_vidc.c | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/media/platform/qcom/iris/iris_vidc.c b/drivers/media/platform/qcom/iris/iris_vidc.c
+index d38d0f6961cd5..07682400de690 100644
+--- a/drivers/media/platform/qcom/iris/iris_vidc.c
++++ b/drivers/media/platform/qcom/iris/iris_vidc.c
+@@ -572,9 +572,10 @@ static int iris_dec_cmd(struct file *filp, void *fh,
+
+ mutex_lock(&inst->lock);
+
+- ret = v4l2_m2m_ioctl_decoder_cmd(filp, fh, dec);
+- if (ret)
++ if (dec->cmd != V4L2_DEC_CMD_STOP && dec->cmd != V4L2_DEC_CMD_START) {
++ ret = -EINVAL;
+ goto unlock;
++ }
+
+ if (inst->state == IRIS_INST_DEINIT)
+ goto unlock;
+@@ -605,9 +606,10 @@ static int iris_enc_cmd(struct file *filp, void *fh,
+
+ mutex_lock(&inst->lock);
+
+- ret = v4l2_m2m_ioctl_encoder_cmd(filp, fh, enc);
+- if (ret)
++ if (enc->cmd != V4L2_ENC_CMD_STOP && enc->cmd != V4L2_ENC_CMD_START) {
++ ret = -EINVAL;
+ goto unlock;
++ }
+
+ if (inst->state == IRIS_INST_DEINIT)
+ goto unlock;
+--
+2.51.0
+
--- /dev/null
+From 0a9b7f9c15f548002346e2876cad2aa155da21f2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 14 Nov 2025 09:12:57 +0000
+Subject: media: tegra-video: Fix memory leak in __tegra_channel_try_format()
+
+From: Zilin Guan <zilin@seu.edu.cn>
+
+[ Upstream commit 43e5302d22334f1183dec3e0d5d8007eefe2817c ]
+
+The state object allocated by __v4l2_subdev_state_alloc() must be freed
+with __v4l2_subdev_state_free() when it is no longer needed.
+
+In __tegra_channel_try_format(), two error paths return directly after
+v4l2_subdev_call() fails, without freeing the allocated 'sd_state'
+object. This violates the requirement and causes a memory leak.
+
+Fix this by introducing a cleanup label and using goto statements in the
+error paths to ensure that __v4l2_subdev_state_free() is always called
+before the function returns.
+
+Fixes: 56f64b82356b7 ("media: tegra-video: Use zero crop settings if subdev has no get_selection")
+Fixes: 1ebaeb09830f3 ("media: tegra-video: Add support for external sensor capture")
+Cc: stable@vger.kernel.org
+Signed-off-by: Zilin Guan <zilin@seu.edu.cn>
+Signed-off-by: Hans Verkuil <hverkuil+cisco@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/staging/media/tegra-video/vi.c | 13 ++++++++-----
+ 1 file changed, 8 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/staging/media/tegra-video/vi.c b/drivers/staging/media/tegra-video/vi.c
+index c9276ff76157f..14b327afe045e 100644
+--- a/drivers/staging/media/tegra-video/vi.c
++++ b/drivers/staging/media/tegra-video/vi.c
+@@ -438,7 +438,7 @@ static int __tegra_channel_try_format(struct tegra_vi_channel *chan,
+ .target = V4L2_SEL_TGT_CROP_BOUNDS,
+ };
+ struct v4l2_rect *try_crop;
+- int ret;
++ int ret = 0;
+
+ subdev = tegra_channel_get_remote_source_subdev(chan);
+ if (!subdev)
+@@ -482,8 +482,10 @@ static int __tegra_channel_try_format(struct tegra_vi_channel *chan,
+ } else {
+ ret = v4l2_subdev_call(subdev, pad, get_selection,
+ NULL, &sdsel);
+- if (ret)
+- return -EINVAL;
++ if (ret) {
++ ret = -EINVAL;
++ goto out_free;
++ }
+
+ try_crop->width = sdsel.r.width;
+ try_crop->height = sdsel.r.height;
+@@ -495,14 +497,15 @@ static int __tegra_channel_try_format(struct tegra_vi_channel *chan,
+
+ ret = v4l2_subdev_call(subdev, pad, set_fmt, sd_state, &fmt);
+ if (ret < 0)
+- return ret;
++ goto out_free;
+
+ v4l2_fill_pix_format(pix, &fmt.format);
+ chan->vi->ops->vi_fmt_align(pix, fmtinfo->bpp);
+
++out_free:
+ __v4l2_subdev_state_free(sd_state);
+
+- return 0;
++ return ret;
+ }
+
+ static int tegra_channel_try_format(struct file *file, void *fh,
+--
+2.51.0
+
--- /dev/null
+From 1ebef733ac0afb8a72d3eedeb422767be5fa452d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 5 Dec 2025 09:54:24 +0800
+Subject: media: v4l2-mem2mem: Add a kref to the v4l2_m2m_dev structure
+
+From: Nicolas Dufresne <nicolas.dufresne@collabora.com>
+
+[ Upstream commit db6b97a4f8041e479be9ef4b8b07022636c96f50 ]
+
+Adding a reference count to the v4l2_m2m_dev structure allow safely
+sharing it across multiple hardware nodes. This can be used to prevent
+running jobs concurrently on m2m cores that have some internal resource
+sharing.
+
+Signed-off-by: Ming Qian <ming.qian@oss.nxp.com>
+Reviewed-by: Frank Li <Frank.Li@nxp.com>
+Signed-off-by: Nicolas Dufresne <nicolas.dufresne@collabora.com>
+Signed-off-by: Hans Verkuil <hverkuil+cisco@kernel.org>
+[hverkuil: fix typos in v4l2_m2m_put documentation]
+Stable-dep-of: e0203ddf9af7 ("media: verisilicon: Avoid G2 bus error while decoding H.264 and HEVC")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/media/v4l2-core/v4l2-mem2mem.c | 23 +++++++++++++++++++++++
+ include/media/v4l2-mem2mem.h | 21 +++++++++++++++++++++
+ 2 files changed, 44 insertions(+)
+
+diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
+index 21acd9bc86071..bc8218d1cab9f 100644
+--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
++++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
+@@ -90,6 +90,7 @@ static const char * const m2m_entity_name[] = {
+ * @job_work: worker to run queued jobs.
+ * @job_queue_flags: flags of the queue status, %QUEUE_PAUSED.
+ * @m2m_ops: driver callbacks
++ * @kref: device reference count
+ */
+ struct v4l2_m2m_dev {
+ struct v4l2_m2m_ctx *curr_ctx;
+@@ -109,6 +110,8 @@ struct v4l2_m2m_dev {
+ unsigned long job_queue_flags;
+
+ const struct v4l2_m2m_ops *m2m_ops;
++
++ struct kref kref;
+ };
+
+ static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx,
+@@ -1206,6 +1209,7 @@ struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops)
+ INIT_LIST_HEAD(&m2m_dev->job_queue);
+ spin_lock_init(&m2m_dev->job_spinlock);
+ INIT_WORK(&m2m_dev->job_work, v4l2_m2m_device_run_work);
++ kref_init(&m2m_dev->kref);
+
+ return m2m_dev;
+ }
+@@ -1217,6 +1221,25 @@ void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev)
+ }
+ EXPORT_SYMBOL_GPL(v4l2_m2m_release);
+
++void v4l2_m2m_get(struct v4l2_m2m_dev *m2m_dev)
++{
++ kref_get(&m2m_dev->kref);
++}
++EXPORT_SYMBOL_GPL(v4l2_m2m_get);
++
++static void v4l2_m2m_release_from_kref(struct kref *kref)
++{
++ struct v4l2_m2m_dev *m2m_dev = container_of(kref, struct v4l2_m2m_dev, kref);
++
++ v4l2_m2m_release(m2m_dev);
++}
++
++void v4l2_m2m_put(struct v4l2_m2m_dev *m2m_dev)
++{
++ kref_put(&m2m_dev->kref, v4l2_m2m_release_from_kref);
++}
++EXPORT_SYMBOL_GPL(v4l2_m2m_put);
++
+ struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
+ void *drv_priv,
+ int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq))
+diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
+index 500f81f399dfa..08e3c9c4f1e9d 100644
+--- a/include/media/v4l2-mem2mem.h
++++ b/include/media/v4l2-mem2mem.h
+@@ -544,6 +544,27 @@ v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev,
+ */
+ void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev);
+
++/**
++ * v4l2_m2m_get() - take a reference to the m2m_dev structure
++ *
++ * @m2m_dev: opaque pointer to the internal data to handle M2M context
++ *
++ * This is used to share the M2M device across multiple devices. This
++ * can be used to avoid scheduling two hardware nodes concurrently.
++ */
++void v4l2_m2m_get(struct v4l2_m2m_dev *m2m_dev);
++
++/**
++ * v4l2_m2m_put() - remove a reference to the m2m_dev structure
++ *
++ * @m2m_dev: opaque pointer to the internal data to handle M2M context
++ *
++ * Once the M2M device has no more references, v4l2_m2m_release() will be
++ * called automatically. Users of this method should never call
++ * v4l2_m2m_release() directly. See v4l2_m2m_get() for more details.
++ */
++void v4l2_m2m_put(struct v4l2_m2m_dev *m2m_dev);
++
+ /**
+ * v4l2_m2m_ctx_init() - allocate and initialize a m2m context
+ *
+--
+2.51.0
+
--- /dev/null
+From a5c324444d578d055bcc015e6efe420c204a374d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 5 Dec 2025 09:54:26 +0800
+Subject: media: verisilicon: Avoid G2 bus error while decoding H.264 and HEVC
+
+From: Ming Qian <ming.qian@oss.nxp.com>
+
+[ Upstream commit e0203ddf9af7c8e170e1e99ce83b4dc07f0cd765 ]
+
+For the i.MX8MQ platform, there is a hardware limitation: the g1 VPU and
+g2 VPU cannot decode simultaneously; otherwise, it will cause below bus
+error and produce corrupted pictures, even potentially lead to system hang.
+
+[ 110.527986] hantro-vpu 38310000.video-codec: frame decode timed out.
+[ 110.583517] hantro-vpu 38310000.video-codec: bus error detected.
+
+Therefore, it is necessary to ensure that g1 and g2 operate alternately.
+This allows for successful multi-instance decoding of H.264 and HEVC.
+
+To achieve this, g1 and g2 share the same v4l2_m2m_dev, and then the
+v4l2_m2m_dev can handle the scheduling.
+
+Fixes: cb5dd5a0fa518 ("media: hantro: Introduce G2/HEVC decoder")
+Cc: stable@vger.kernel.org
+Signed-off-by: Ming Qian <ming.qian@oss.nxp.com>
+Reviewed-by: Frank Li <Frank.Li@nxp.com>
+Co-developed-by: Nicolas Dufresne <nicolas.dufresne@collabora.com>
+Signed-off-by: Nicolas Dufresne <nicolas.dufresne@collabora.com>
+Signed-off-by: Hans Verkuil <hverkuil+cisco@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/media/platform/verisilicon/hantro.h | 2 +
+ .../media/platform/verisilicon/hantro_drv.c | 42 +++++++++++++++++--
+ .../media/platform/verisilicon/imx8m_vpu_hw.c | 8 ++++
+ 3 files changed, 49 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/media/platform/verisilicon/hantro.h b/drivers/media/platform/verisilicon/hantro.h
+index e0fdc4535b2d7..0353de154a1ec 100644
+--- a/drivers/media/platform/verisilicon/hantro.h
++++ b/drivers/media/platform/verisilicon/hantro.h
+@@ -77,6 +77,7 @@ struct hantro_irq {
+ * @double_buffer: core needs double buffering
+ * @legacy_regs: core uses legacy register set
+ * @late_postproc: postproc must be set up at the end of the job
++ * @shared_devices: an array of device ids that cannot run concurrently
+ */
+ struct hantro_variant {
+ unsigned int enc_offset;
+@@ -101,6 +102,7 @@ struct hantro_variant {
+ unsigned int double_buffer : 1;
+ unsigned int legacy_regs : 1;
+ unsigned int late_postproc : 1;
++ const struct of_device_id *shared_devices;
+ };
+
+ /**
+diff --git a/drivers/media/platform/verisilicon/hantro_drv.c b/drivers/media/platform/verisilicon/hantro_drv.c
+index e0c11fe8b55ca..418cfe3a14146 100644
+--- a/drivers/media/platform/verisilicon/hantro_drv.c
++++ b/drivers/media/platform/verisilicon/hantro_drv.c
+@@ -13,6 +13,7 @@
+ #include <linux/clk.h>
+ #include <linux/module.h>
+ #include <linux/of.h>
++#include <linux/of_platform.h>
+ #include <linux/platform_device.h>
+ #include <linux/pm.h>
+ #include <linux/pm_runtime.h>
+@@ -1035,6 +1036,41 @@ static int hantro_disable_multicore(struct hantro_dev *vpu)
+ return 0;
+ }
+
++static struct v4l2_m2m_dev *hantro_get_v4l2_m2m_dev(struct hantro_dev *vpu)
++{
++ struct device_node *node;
++ struct hantro_dev *shared_vpu;
++
++ if (!vpu->variant || !vpu->variant->shared_devices)
++ goto init_new_m2m_dev;
++
++ for_each_matching_node(node, vpu->variant->shared_devices) {
++ struct platform_device *pdev;
++ struct v4l2_m2m_dev *m2m_dev;
++
++ pdev = of_find_device_by_node(node);
++ if (!pdev)
++ continue;
++
++ shared_vpu = platform_get_drvdata(pdev);
++ if (IS_ERR_OR_NULL(shared_vpu) || shared_vpu == vpu) {
++ platform_device_put(pdev);
++ continue;
++ }
++
++ v4l2_m2m_get(shared_vpu->m2m_dev);
++ m2m_dev = shared_vpu->m2m_dev;
++ platform_device_put(pdev);
++
++ of_node_put(node);
++
++ return m2m_dev;
++ }
++
++init_new_m2m_dev:
++ return v4l2_m2m_init(&vpu_m2m_ops);
++}
++
+ static int hantro_probe(struct platform_device *pdev)
+ {
+ const struct of_device_id *match;
+@@ -1186,7 +1222,7 @@ static int hantro_probe(struct platform_device *pdev)
+ }
+ platform_set_drvdata(pdev, vpu);
+
+- vpu->m2m_dev = v4l2_m2m_init(&vpu_m2m_ops);
++ vpu->m2m_dev = hantro_get_v4l2_m2m_dev(vpu);
+ if (IS_ERR(vpu->m2m_dev)) {
+ v4l2_err(&vpu->v4l2_dev, "Failed to init mem2mem device\n");
+ ret = PTR_ERR(vpu->m2m_dev);
+@@ -1225,7 +1261,7 @@ static int hantro_probe(struct platform_device *pdev)
+ hantro_remove_enc_func(vpu);
+ err_m2m_rel:
+ media_device_cleanup(&vpu->mdev);
+- v4l2_m2m_release(vpu->m2m_dev);
++ v4l2_m2m_put(vpu->m2m_dev);
+ err_v4l2_unreg:
+ v4l2_device_unregister(&vpu->v4l2_dev);
+ err_clk_unprepare:
+@@ -1248,7 +1284,7 @@ static void hantro_remove(struct platform_device *pdev)
+ hantro_remove_dec_func(vpu);
+ hantro_remove_enc_func(vpu);
+ media_device_cleanup(&vpu->mdev);
+- v4l2_m2m_release(vpu->m2m_dev);
++ v4l2_m2m_put(vpu->m2m_dev);
+ v4l2_device_unregister(&vpu->v4l2_dev);
+ clk_bulk_unprepare(vpu->variant->num_clocks, vpu->clocks);
+ reset_control_assert(vpu->resets);
+diff --git a/drivers/media/platform/verisilicon/imx8m_vpu_hw.c b/drivers/media/platform/verisilicon/imx8m_vpu_hw.c
+index 5be0e2e76882f..6f8e43b7f1575 100644
+--- a/drivers/media/platform/verisilicon/imx8m_vpu_hw.c
++++ b/drivers/media/platform/verisilicon/imx8m_vpu_hw.c
+@@ -343,6 +343,12 @@ const struct hantro_variant imx8mq_vpu_variant = {
+ .num_regs = ARRAY_SIZE(imx8mq_reg_names)
+ };
+
++static const struct of_device_id imx8mq_vpu_shared_resources[] __initconst = {
++ { .compatible = "nxp,imx8mq-vpu-g1", },
++ { .compatible = "nxp,imx8mq-vpu-g2", },
++ { /* sentinel */ }
++};
++
+ const struct hantro_variant imx8mq_vpu_g1_variant = {
+ .dec_fmts = imx8m_vpu_dec_fmts,
+ .num_dec_fmts = ARRAY_SIZE(imx8m_vpu_dec_fmts),
+@@ -356,6 +362,7 @@ const struct hantro_variant imx8mq_vpu_g1_variant = {
+ .num_irqs = ARRAY_SIZE(imx8mq_irqs),
+ .clk_names = imx8mq_g1_clk_names,
+ .num_clocks = ARRAY_SIZE(imx8mq_g1_clk_names),
++ .shared_devices = imx8mq_vpu_shared_resources,
+ };
+
+ const struct hantro_variant imx8mq_vpu_g2_variant = {
+@@ -371,6 +378,7 @@ const struct hantro_variant imx8mq_vpu_g2_variant = {
+ .num_irqs = ARRAY_SIZE(imx8mq_g2_irqs),
+ .clk_names = imx8mq_g2_clk_names,
+ .num_clocks = ARRAY_SIZE(imx8mq_g2_clk_names),
++ .shared_devices = imx8mq_vpu_shared_resources,
+ };
+
+ const struct hantro_variant imx8mm_vpu_g1_variant = {
+--
+2.51.0
+
--- /dev/null
+From d29fbd2f133031eab995752ec55692e8bc2c6708 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 Nov 2025 17:46:23 +0100
+Subject: memory: mtk-smi: fix device leak on larb probe
+
+From: Johan Hovold <johan@kernel.org>
+
+[ Upstream commit 9dae65913b32d05dbc8ff4b8a6bf04a0e49a8eb6 ]
+
+Make sure to drop the reference taken when looking up the SMI device
+during larb probe on late probe failure (e.g. probe deferral) and on
+driver unbind.
+
+Fixes: cc8bbe1a8312 ("memory: mediatek: Add SMI driver")
+Fixes: 038ae37c510f ("memory: mtk-smi: add missing put_device() call in mtk_smi_device_link_common")
+Cc: stable@vger.kernel.org # 4.6: 038ae37c510f
+Cc: stable@vger.kernel.org # 4.6
+Cc: Yong Wu <yong.wu@mediatek.com>
+Cc: Miaoqian Lin <linmq006@gmail.com>
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Link: https://patch.msgid.link/20251121164624.13685-3-johan@kernel.org
+Signed-off-by: Krzysztof Kozlowski <krzk@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/memory/mtk-smi.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
+index dd6150d200e89..3609bfd3c64be 100644
+--- a/drivers/memory/mtk-smi.c
++++ b/drivers/memory/mtk-smi.c
+@@ -685,6 +685,7 @@ static void mtk_smi_larb_remove(struct platform_device *pdev)
+ device_link_remove(&pdev->dev, larb->smi_common_dev);
+ pm_runtime_disable(&pdev->dev);
+ component_del(&pdev->dev, &mtk_smi_larb_component_ops);
++ put_device(larb->smi_common_dev);
+ }
+
+ static int __maybe_unused mtk_smi_larb_resume(struct device *dev)
+--
+2.51.0
+
--- /dev/null
+From a3a989e34084fa8e54dc72f398a942121343b753 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 Nov 2025 17:46:22 +0100
+Subject: memory: mtk-smi: fix device leaks on common probe
+
+From: Johan Hovold <johan@kernel.org>
+
+[ Upstream commit 6cfa038bddd710f544076ea2ef7792fc82fbedd6 ]
+
+Make sure to drop the reference taken when looking up the SMI device
+during common probe on late probe failure (e.g. probe deferral) and on
+driver unbind.
+
+Fixes: 47404757702e ("memory: mtk-smi: Add device link for smi-sub-common")
+Fixes: 038ae37c510f ("memory: mtk-smi: add missing put_device() call in mtk_smi_device_link_common")
+Cc: stable@vger.kernel.org # 5.16: 038ae37c510f
+Cc: stable@vger.kernel.org # 5.16
+Cc: Yong Wu <yong.wu@mediatek.com>
+Cc: Miaoqian Lin <linmq006@gmail.com>
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Link: https://patch.msgid.link/20251121164624.13685-2-johan@kernel.org
+Signed-off-by: Krzysztof Kozlowski <krzk@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/memory/mtk-smi.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
+index 733e22f695ab7..dd6150d200e89 100644
+--- a/drivers/memory/mtk-smi.c
++++ b/drivers/memory/mtk-smi.c
+@@ -674,6 +674,7 @@ static int mtk_smi_larb_probe(struct platform_device *pdev)
+ err_pm_disable:
+ pm_runtime_disable(dev);
+ device_link_remove(dev, larb->smi_common_dev);
++ put_device(larb->smi_common_dev);
+ return ret;
+ }
+
+@@ -917,6 +918,7 @@ static void mtk_smi_common_remove(struct platform_device *pdev)
+ if (common->plat->type == MTK_SMI_GEN2_SUB_COMM)
+ device_link_remove(&pdev->dev, common->smi_common_dev);
+ pm_runtime_disable(&pdev->dev);
++ put_device(common->smi_common_dev);
+ }
+
+ static int __maybe_unused mtk_smi_common_resume(struct device *dev)
+--
+2.51.0
+
--- /dev/null
+From a44ecc48dd0a651b400da6943f669d2e5dc3e228 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 10 Feb 2026 17:19:00 +0900
+Subject: mm/slab: use prandom if !allow_spin
+
+From: Harry Yoo <harry.yoo@oracle.com>
+
+[ Upstream commit a1e244a9f177894969c6cd5ebbc6d72c19fc4a7a ]
+
+When CONFIG_SLAB_FREELIST_RANDOM is enabled and get_random_u32()
+is called in an NMI context, lockdep complains because it acquires
+a local_lock:
+
+ ================================
+ WARNING: inconsistent lock state
+ 6.19.0-rc5-slab-for-next+ #325 Tainted: G N
+ --------------------------------
+ inconsistent {INITIAL USE} -> {IN-NMI} usage.
+ kunit_try_catch/8312 [HC2[2]:SC0[0]:HE0:SE1] takes:
+ ffff88a02ec49cc0 (batched_entropy_u32.lock){-.-.}-{3:3}, at: get_random_u32+0x7f/0x2e0
+ {INITIAL USE} state was registered at:
+ lock_acquire+0xd9/0x2f0
+ get_random_u32+0x93/0x2e0
+ __get_random_u32_below+0x17/0x70
+ cache_random_seq_create+0x121/0x1c0
+ init_cache_random_seq+0x5d/0x110
+ do_kmem_cache_create+0x1e0/0xa30
+ __kmem_cache_create_args+0x4ec/0x830
+ create_kmalloc_caches+0xe6/0x130
+ kmem_cache_init+0x1b1/0x660
+ mm_core_init+0x1d8/0x4b0
+ start_kernel+0x620/0xcd0
+ x86_64_start_reservations+0x18/0x30
+ x86_64_start_kernel+0xf3/0x140
+ common_startup_64+0x13e/0x148
+ irq event stamp: 76
+ hardirqs last enabled at (75): [<ffffffff8298b77a>] exc_nmi+0x11a/0x240
+ hardirqs last disabled at (76): [<ffffffff8298b991>] sysvec_irq_work+0x11/0x110
+ softirqs last enabled at (0): [<ffffffff813b2dda>] copy_process+0xc7a/0x2350
+ softirqs last disabled at (0): [<0000000000000000>] 0x0
+
+ other info that might help us debug this:
+ Possible unsafe locking scenario:
+
+ CPU0
+ ----
+ lock(batched_entropy_u32.lock);
+ <Interrupt>
+ lock(batched_entropy_u32.lock);
+
+ *** DEADLOCK ***
+
+Fix this by using pseudo-random number generator if !allow_spin.
+This means kmalloc_nolock() users won't get truly random numbers,
+but there is not much we can do about it.
+
+Note that an NMI handler might interrupt prandom_u32_state() and
+change the random state, but that's safe.
+
+Link: https://lore.kernel.org/all/0c33bdee-6de8-4d9f-92ca-4f72c1b6fb9f@suse.cz
+Fixes: af92793e52c3 ("slab: Introduce kmalloc_nolock() and kfree_nolock().")
+Cc: stable@vger.kernel.org
+Signed-off-by: Harry Yoo <harry.yoo@oracle.com>
+Link: https://patch.msgid.link/20260210081900.329447-3-harry.yoo@oracle.com
+Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/slub.c | 28 ++++++++++++++++++++++++----
+ 1 file changed, 24 insertions(+), 4 deletions(-)
+
+diff --git a/mm/slub.c b/mm/slub.c
+index 4db84fbc71bab..870b8e00a938b 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -41,6 +41,7 @@
+ #include <linux/prefetch.h>
+ #include <linux/memcontrol.h>
+ #include <linux/random.h>
++#include <linux/prandom.h>
+ #include <kunit/test.h>
+ #include <kunit/test-bug.h>
+ #include <linux/sort.h>
+@@ -3176,8 +3177,11 @@ static void *next_freelist_entry(struct kmem_cache *s,
+ return (char *)start + idx;
+ }
+
++static DEFINE_PER_CPU(struct rnd_state, slab_rnd_state);
++
+ /* Shuffle the single linked freelist based on a random pre-computed sequence */
+-static bool shuffle_freelist(struct kmem_cache *s, struct slab *slab)
++static bool shuffle_freelist(struct kmem_cache *s, struct slab *slab,
++ bool allow_spin)
+ {
+ void *start;
+ void *cur;
+@@ -3188,7 +3192,19 @@ static bool shuffle_freelist(struct kmem_cache *s, struct slab *slab)
+ return false;
+
+ freelist_count = oo_objects(s->oo);
+- pos = get_random_u32_below(freelist_count);
++ if (allow_spin) {
++ pos = get_random_u32_below(freelist_count);
++ } else {
++ struct rnd_state *state;
++
++ /*
++ * An interrupt or NMI handler might interrupt and change
++ * the state in the middle, but that's safe.
++ */
++ state = &get_cpu_var(slab_rnd_state);
++ pos = prandom_u32_state(state) % freelist_count;
++ put_cpu_var(slab_rnd_state);
++ }
+
+ page_limit = slab->objects * s->size;
+ start = fixup_red_left(s, slab_address(slab));
+@@ -3215,7 +3231,8 @@ static inline int init_cache_random_seq(struct kmem_cache *s)
+ return 0;
+ }
+ static inline void init_freelist_randomization(void) { }
+-static inline bool shuffle_freelist(struct kmem_cache *s, struct slab *slab)
++static inline bool shuffle_freelist(struct kmem_cache *s, struct slab *slab,
++ bool allow_spin)
+ {
+ return false;
+ }
+@@ -3300,7 +3317,7 @@ static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
+
+ setup_slab_debug(s, slab, start);
+
+- shuffle = shuffle_freelist(s, slab);
++ shuffle = shuffle_freelist(s, slab, allow_spin);
+
+ if (!shuffle) {
+ start = fixup_red_left(s, start);
+@@ -8511,6 +8528,9 @@ void __init kmem_cache_init_late(void)
+ {
+ flushwq = alloc_workqueue("slub_flushwq", WQ_MEM_RECLAIM, 0);
+ WARN_ON(!flushwq);
++#ifdef CONFIG_SLAB_FREELIST_RANDOM
++ prandom_init_once(&slab_rnd_state);
++#endif
+ }
+
+ struct kmem_cache *
+--
+2.51.0
+
--- /dev/null
+From a9dc65be8e768c3a58023b806f560f4e426bbcf0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 12 Feb 2026 20:55:09 -0800
+Subject: net: arcnet: com20020-pci: fix support for 2.5Mbit cards
+
+From: Ethan Nelson-Moore <enelsonmoore@gmail.com>
+
+[ Upstream commit c7d9be66b71af490446127c6ffcb66d6bb71b8b9 ]
+
+Commit 8c14f9c70327 ("ARCNET: add com20020 PCI IDs with metadata")
+converted the com20020-pci driver to use a card info structure instead
+of a single flag mask in driver_data. However, it failed to take into
+account that in the original code, driver_data of 0 indicates a card
+with no special flags, not a card that should not have any card info
+structure. This introduced a null pointer dereference when cards with
+no flags were probed.
+
+Commit bd6f1fd5d33d ("net: arcnet: com20020: Fix null-ptr-deref in
+com20020pci_probe()") then papered over this issue by rejecting cards
+with no driver_data instead of resolving the problem at its source.
+
+Fix the original issue by introducing a new card info structure for
+2.5Mbit cards that does not set any flags and using it if no
+driver_data is present.
+
+Fixes: 8c14f9c70327 ("ARCNET: add com20020 PCI IDs with metadata")
+Fixes: bd6f1fd5d33d ("net: arcnet: com20020: Fix null-ptr-deref in com20020pci_probe()")
+Cc: stable@vger.kernel.org
+Reviewed-by: Simon Horman <horms@kernel.org>
+Signed-off-by: Ethan Nelson-Moore <enelsonmoore@gmail.com>
+Link: https://patch.msgid.link/20260213045510.32368-1-enelsonmoore@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/arcnet/com20020-pci.c | 16 +++++++++++++++-
+ 1 file changed, 15 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
+index 0472bcdff1307..b5729d6c0b47c 100644
+--- a/drivers/net/arcnet/com20020-pci.c
++++ b/drivers/net/arcnet/com20020-pci.c
+@@ -115,6 +115,8 @@ static const struct attribute_group com20020_state_group = {
+ .attrs = com20020_state_attrs,
+ };
+
++static struct com20020_pci_card_info card_info_2p5mbit;
++
+ static void com20020pci_remove(struct pci_dev *pdev);
+
+ static int com20020pci_probe(struct pci_dev *pdev,
+@@ -140,7 +142,7 @@ static int com20020pci_probe(struct pci_dev *pdev,
+
+ ci = (struct com20020_pci_card_info *)id->driver_data;
+ if (!ci)
+- return -EINVAL;
++ ci = &card_info_2p5mbit;
+
+ priv->ci = ci;
+ mm = &ci->misc_map;
+@@ -347,6 +349,18 @@ static struct com20020_pci_card_info card_info_5mbit = {
+ .flags = ARC_IS_5MBIT,
+ };
+
++static struct com20020_pci_card_info card_info_2p5mbit = {
++ .name = "ARC-PCI",
++ .devcount = 1,
++ .chan_map_tbl = {
++ {
++ .bar = 2,
++ .offset = 0x00,
++ .size = 0x08,
++ },
++ },
++};
++
+ static struct com20020_pci_card_info card_info_sohard = {
+ .name = "SOHARD SH ARC-PCI",
+ .devcount = 1,
+--
+2.51.0
+
--- /dev/null
+From 9684c57bef5e48fb32329af7cb72c12aec4cbc2c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 18 Dec 2025 22:21:44 +0530
+Subject: net: qrtr: Drop the MHI auto_queue feature for IPCR DL channels
+
+From: Manivannan Sadhasivam <manivannan.sadhasivam@oss.qualcomm.com>
+
+[ Upstream commit 51731792a25cb312ca94cdccfa139eb46de1b2ef ]
+
+MHI stack offers the 'auto_queue' feature, which allows the MHI stack to
+auto queue the buffers for the RX path (DL channel). Though this feature
+simplifies the client driver design, it introduces race between the client
+drivers and the MHI stack. For instance, with auto_queue, the 'dl_callback'
+for the DL channel may get called before the client driver is fully probed.
+This means, by the time the dl_callback gets called, the client driver's
+structures might not be initialized, leading to NULL ptr dereference.
+
+Currently, the drivers have to workaround this issue by initializing the
+internal structures before calling mhi_prepare_for_transfer_autoqueue().
+But even so, there is a chance that the client driver's internal code path
+may call the MHI queue APIs before mhi_prepare_for_transfer_autoqueue() is
+called, leading to similar NULL ptr dereference. This issue has been
+reported on the Qcom X1E80100 CRD machines affecting boot.
+
+So to properly fix all these races, drop the MHI 'auto_queue' feature
+altogether and let the client driver (QRTR) manage the RX buffers manually.
+In the QRTR driver, queue the RX buffers based on the ring length during
+probe and recycle the buffers in 'dl_callback' once they are consumed. This
+also warrants removing the setting of 'auto_queue' flag from controller
+drivers.
+
+Currently, this 'auto_queue' feature is only enabled for IPCR DL channel.
+So only the QRTR client driver requires the modification.
+
+Fixes: 227fee5fc99e ("bus: mhi: core: Add an API for auto queueing buffers for DL channel")
+Fixes: 68a838b84eff ("net: qrtr: start MHI channel after endpoit creation")
+Reported-by: Johan Hovold <johan@kernel.org>
+Closes: https://lore.kernel.org/linux-arm-msm/ZyTtVdkCCES0lkl4@hovoldconsulting.com
+Suggested-by: Chris Lew <quic_clew@quicinc.com>
+Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@oss.qualcomm.com>
+Reviewed-by: Jeff Hugo <jeff.hugo@oss.qualcomm.com>
+Reviewed-by: Loic Poulain <loic.poulain@oss.qualcomm.com>
+Acked-by: Jeff Johnson <jjohnson@kernel.org> # drivers/net/wireless/ath/...
+Acked-by: Jeff Hugo <jeff.hugo@oss.qualcomm.com>
+Acked-by: Paolo Abeni <pabeni@redhat.com>
+Cc: stable@vger.kernel.org
+Link: https://patch.msgid.link/20251218-qrtr-fix-v2-1-c7499bfcfbe0@oss.qualcomm.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/accel/qaic/mhi_controller.c | 44 -----------------
+ drivers/bus/mhi/host/pci_generic.c | 20 +-------
+ drivers/net/wireless/ath/ath11k/mhi.c | 4 --
+ drivers/net/wireless/ath/ath12k/mhi.c | 4 --
+ net/qrtr/mhi.c | 69 ++++++++++++++++++++++-----
+ 5 files changed, 60 insertions(+), 81 deletions(-)
+
+diff --git a/drivers/accel/qaic/mhi_controller.c b/drivers/accel/qaic/mhi_controller.c
+index 13a14c6c61689..4d787f77ce419 100644
+--- a/drivers/accel/qaic/mhi_controller.c
++++ b/drivers/accel/qaic/mhi_controller.c
+@@ -39,7 +39,6 @@ static const struct mhi_channel_config aic100_channels[] = {
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+- .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+@@ -55,7 +54,6 @@ static const struct mhi_channel_config aic100_channels[] = {
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+- .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+@@ -71,7 +69,6 @@ static const struct mhi_channel_config aic100_channels[] = {
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+- .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+@@ -87,7 +84,6 @@ static const struct mhi_channel_config aic100_channels[] = {
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+- .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+@@ -103,7 +99,6 @@ static const struct mhi_channel_config aic100_channels[] = {
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+- .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+@@ -119,7 +114,6 @@ static const struct mhi_channel_config aic100_channels[] = {
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+- .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+@@ -135,7 +129,6 @@ static const struct mhi_channel_config aic100_channels[] = {
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+- .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+@@ -151,7 +144,6 @@ static const struct mhi_channel_config aic100_channels[] = {
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+- .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+@@ -167,7 +159,6 @@ static const struct mhi_channel_config aic100_channels[] = {
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+- .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+@@ -183,7 +174,6 @@ static const struct mhi_channel_config aic100_channels[] = {
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+- .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+@@ -199,7 +189,6 @@ static const struct mhi_channel_config aic100_channels[] = {
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+- .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+@@ -215,7 +204,6 @@ static const struct mhi_channel_config aic100_channels[] = {
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+- .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+@@ -231,7 +219,6 @@ static const struct mhi_channel_config aic100_channels[] = {
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+- .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+@@ -247,7 +234,6 @@ static const struct mhi_channel_config aic100_channels[] = {
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+- .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+@@ -263,7 +249,6 @@ static const struct mhi_channel_config aic100_channels[] = {
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+- .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+@@ -279,7 +264,6 @@ static const struct mhi_channel_config aic100_channels[] = {
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+- .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+@@ -295,7 +279,6 @@ static const struct mhi_channel_config aic100_channels[] = {
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+- .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+@@ -311,7 +294,6 @@ static const struct mhi_channel_config aic100_channels[] = {
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+- .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+@@ -327,7 +309,6 @@ static const struct mhi_channel_config aic100_channels[] = {
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+- .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+@@ -343,7 +324,6 @@ static const struct mhi_channel_config aic100_channels[] = {
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+- .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+@@ -359,7 +339,6 @@ static const struct mhi_channel_config aic100_channels[] = {
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+- .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+@@ -375,7 +354,6 @@ static const struct mhi_channel_config aic100_channels[] = {
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+- .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+@@ -391,7 +369,6 @@ static const struct mhi_channel_config aic100_channels[] = {
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+- .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+@@ -407,7 +384,6 @@ static const struct mhi_channel_config aic100_channels[] = {
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+- .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+@@ -423,7 +399,6 @@ static const struct mhi_channel_config aic100_channels[] = {
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+- .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+@@ -439,7 +414,6 @@ static const struct mhi_channel_config aic100_channels[] = {
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+- .auto_queue = true,
+ .wake_capable = false,
+ },
+ };
+@@ -458,7 +432,6 @@ static const struct mhi_channel_config aic200_channels[] = {
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+- .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+@@ -474,7 +447,6 @@ static const struct mhi_channel_config aic200_channels[] = {
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+- .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+@@ -490,7 +462,6 @@ static const struct mhi_channel_config aic200_channels[] = {
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+- .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+@@ -506,7 +477,6 @@ static const struct mhi_channel_config aic200_channels[] = {
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+- .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+@@ -522,7 +492,6 @@ static const struct mhi_channel_config aic200_channels[] = {
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+- .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+@@ -538,7 +507,6 @@ static const struct mhi_channel_config aic200_channels[] = {
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+- .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+@@ -554,7 +522,6 @@ static const struct mhi_channel_config aic200_channels[] = {
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+- .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+@@ -570,7 +537,6 @@ static const struct mhi_channel_config aic200_channels[] = {
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+- .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+@@ -586,7 +552,6 @@ static const struct mhi_channel_config aic200_channels[] = {
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+- .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+@@ -602,7 +567,6 @@ static const struct mhi_channel_config aic200_channels[] = {
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+- .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+@@ -618,7 +582,6 @@ static const struct mhi_channel_config aic200_channels[] = {
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+- .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+@@ -634,7 +597,6 @@ static const struct mhi_channel_config aic200_channels[] = {
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+- .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+@@ -650,7 +612,6 @@ static const struct mhi_channel_config aic200_channels[] = {
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+- .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+@@ -666,7 +627,6 @@ static const struct mhi_channel_config aic200_channels[] = {
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+- .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+@@ -682,7 +642,6 @@ static const struct mhi_channel_config aic200_channels[] = {
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+- .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+@@ -698,7 +657,6 @@ static const struct mhi_channel_config aic200_channels[] = {
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+- .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+@@ -714,7 +672,6 @@ static const struct mhi_channel_config aic200_channels[] = {
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+- .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+@@ -730,7 +687,6 @@ static const struct mhi_channel_config aic200_channels[] = {
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+- .auto_queue = true,
+ .wake_capable = false,
+ },
+ };
+diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c
+index 3d8c9729fcfc5..21f0522d9ff29 100644
+--- a/drivers/bus/mhi/host/pci_generic.c
++++ b/drivers/bus/mhi/host/pci_generic.c
+@@ -94,22 +94,6 @@ struct mhi_pci_dev_info {
+ .doorbell_mode_switch = false, \
+ }
+
+-#define MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(ch_num, ch_name, el_count, ev_ring) \
+- { \
+- .num = ch_num, \
+- .name = ch_name, \
+- .num_elements = el_count, \
+- .event_ring = ev_ring, \
+- .dir = DMA_FROM_DEVICE, \
+- .ee_mask = BIT(MHI_EE_AMSS), \
+- .pollcfg = 0, \
+- .doorbell = MHI_DB_BRST_DISABLE, \
+- .lpm_notify = false, \
+- .offload_channel = false, \
+- .doorbell_mode_switch = false, \
+- .auto_queue = true, \
+- }
+-
+ #define MHI_EVENT_CONFIG_CTRL(ev_ring, el_count) \
+ { \
+ .num_elements = el_count, \
+@@ -329,7 +313,7 @@ static const struct mhi_channel_config modem_qcom_v1_mhi_channels[] = {
+ MHI_CHANNEL_CONFIG_UL(14, "QMI", 4, 0),
+ MHI_CHANNEL_CONFIG_DL(15, "QMI", 4, 0),
+ MHI_CHANNEL_CONFIG_UL(20, "IPCR", 8, 0),
+- MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(21, "IPCR", 8, 0),
++ MHI_CHANNEL_CONFIG_DL(21, "IPCR", 8, 0),
+ MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0),
+ MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(46, "IP_SW0", 64, 2),
+@@ -751,7 +735,7 @@ static const struct mhi_channel_config mhi_telit_fn980_hw_v1_channels[] = {
+ MHI_CHANNEL_CONFIG_UL(14, "QMI", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(15, "QMI", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(20, "IPCR", 16, 0),
+- MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(21, "IPCR", 16, 0),
++ MHI_CHANNEL_CONFIG_DL(21, "IPCR", 16, 0),
+ MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 1),
+ MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 128, 2),
+ };
+diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c
+index acd76e9392d31..d2c44f7f9b622 100644
+--- a/drivers/net/wireless/ath/ath11k/mhi.c
++++ b/drivers/net/wireless/ath/ath11k/mhi.c
+@@ -34,7 +34,6 @@ static const struct mhi_channel_config ath11k_mhi_channels_qca6390[] = {
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+- .auto_queue = false,
+ },
+ {
+ .num = 21,
+@@ -48,7 +47,6 @@ static const struct mhi_channel_config ath11k_mhi_channels_qca6390[] = {
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+- .auto_queue = true,
+ },
+ };
+
+@@ -99,7 +97,6 @@ static const struct mhi_channel_config ath11k_mhi_channels_qcn9074[] = {
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+- .auto_queue = false,
+ },
+ {
+ .num = 21,
+@@ -113,7 +110,6 @@ static const struct mhi_channel_config ath11k_mhi_channels_qcn9074[] = {
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+- .auto_queue = true,
+ },
+ };
+
+diff --git a/drivers/net/wireless/ath/ath12k/mhi.c b/drivers/net/wireless/ath/ath12k/mhi.c
+index 08f44baf182a5..2dbdb95ae7bea 100644
+--- a/drivers/net/wireless/ath/ath12k/mhi.c
++++ b/drivers/net/wireless/ath/ath12k/mhi.c
+@@ -31,7 +31,6 @@ static const struct mhi_channel_config ath12k_mhi_channels_qcn9274[] = {
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+- .auto_queue = false,
+ },
+ {
+ .num = 21,
+@@ -45,7 +44,6 @@ static const struct mhi_channel_config ath12k_mhi_channels_qcn9274[] = {
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+- .auto_queue = true,
+ },
+ };
+
+@@ -96,7 +94,6 @@ static const struct mhi_channel_config ath12k_mhi_channels_wcn7850[] = {
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+- .auto_queue = false,
+ },
+ {
+ .num = 21,
+@@ -110,7 +107,6 @@ static const struct mhi_channel_config ath12k_mhi_channels_wcn7850[] = {
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+- .auto_queue = true,
+ },
+ };
+
+diff --git a/net/qrtr/mhi.c b/net/qrtr/mhi.c
+index 69f53625a049d..80e341d2f8a45 100644
+--- a/net/qrtr/mhi.c
++++ b/net/qrtr/mhi.c
+@@ -24,13 +24,25 @@ static void qcom_mhi_qrtr_dl_callback(struct mhi_device *mhi_dev,
+ struct qrtr_mhi_dev *qdev = dev_get_drvdata(&mhi_dev->dev);
+ int rc;
+
+- if (!qdev || mhi_res->transaction_status)
++ if (!qdev || (mhi_res->transaction_status && mhi_res->transaction_status != -ENOTCONN))
+ return;
+
++ /* Channel got reset. So just free the buffer */
++ if (mhi_res->transaction_status == -ENOTCONN) {
++ devm_kfree(&mhi_dev->dev, mhi_res->buf_addr);
++ return;
++ }
++
+ rc = qrtr_endpoint_post(&qdev->ep, mhi_res->buf_addr,
+ mhi_res->bytes_xferd);
+ if (rc == -EINVAL)
+ dev_err(qdev->dev, "invalid ipcrouter packet\n");
++
++ /* Done with the buffer, now recycle it for future use */
++ rc = mhi_queue_buf(mhi_dev, DMA_FROM_DEVICE, mhi_res->buf_addr,
++ mhi_dev->mhi_cntrl->buffer_len, MHI_EOT);
++ if (rc)
++ dev_err(&mhi_dev->dev, "Failed to recycle the buffer: %d\n", rc);
+ }
+
+ /* From QRTR to MHI */
+@@ -72,6 +84,29 @@ static int qcom_mhi_qrtr_send(struct qrtr_endpoint *ep, struct sk_buff *skb)
+ return rc;
+ }
+
++static int qcom_mhi_qrtr_queue_dl_buffers(struct mhi_device *mhi_dev)
++{
++ u32 free_desc;
++ void *buf;
++ int ret;
++
++ free_desc = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
++ while (free_desc--) {
++ buf = devm_kmalloc(&mhi_dev->dev, mhi_dev->mhi_cntrl->buffer_len, GFP_KERNEL);
++ if (!buf)
++ return -ENOMEM;
++
++ ret = mhi_queue_buf(mhi_dev, DMA_FROM_DEVICE, buf, mhi_dev->mhi_cntrl->buffer_len,
++ MHI_EOT);
++ if (ret) {
++ dev_err(&mhi_dev->dev, "Failed to queue buffer: %d\n", ret);
++ return ret;
++ }
++ }
++
++ return 0;
++}
++
+ static int qcom_mhi_qrtr_probe(struct mhi_device *mhi_dev,
+ const struct mhi_device_id *id)
+ {
+@@ -87,20 +122,30 @@ static int qcom_mhi_qrtr_probe(struct mhi_device *mhi_dev,
+ qdev->ep.xmit = qcom_mhi_qrtr_send;
+
+ dev_set_drvdata(&mhi_dev->dev, qdev);
+- rc = qrtr_endpoint_register(&qdev->ep, QRTR_EP_NID_AUTO);
+- if (rc)
+- return rc;
+
+ /* start channels */
+- rc = mhi_prepare_for_transfer_autoqueue(mhi_dev);
+- if (rc) {
+- qrtr_endpoint_unregister(&qdev->ep);
++ rc = mhi_prepare_for_transfer(mhi_dev);
++ if (rc)
+ return rc;
+- }
++
++ rc = qrtr_endpoint_register(&qdev->ep, QRTR_EP_NID_AUTO);
++ if (rc)
++ goto err_unprepare;
++
++ rc = qcom_mhi_qrtr_queue_dl_buffers(mhi_dev);
++ if (rc)
++ goto err_unregister;
+
+ dev_dbg(qdev->dev, "Qualcomm MHI QRTR driver probed\n");
+
+ return 0;
++
++err_unregister:
++ qrtr_endpoint_unregister(&qdev->ep);
++err_unprepare:
++ mhi_unprepare_from_transfer(mhi_dev);
++
++ return rc;
+ }
+
+ static void qcom_mhi_qrtr_remove(struct mhi_device *mhi_dev)
+@@ -151,11 +196,13 @@ static int __maybe_unused qcom_mhi_qrtr_pm_resume_early(struct device *dev)
+ if (state == MHI_STATE_M3)
+ return 0;
+
+- rc = mhi_prepare_for_transfer_autoqueue(mhi_dev);
+- if (rc)
++ rc = mhi_prepare_for_transfer(mhi_dev);
++ if (rc) {
+ dev_err(dev, "failed to prepare for autoqueue transfer %d\n", rc);
++ return rc;
++ }
+
+- return rc;
++ return qcom_mhi_qrtr_queue_dl_buffers(mhi_dev);
+ }
+
+ static const struct dev_pm_ops qcom_mhi_qrtr_pm_ops = {
+--
+2.51.0
+
--- /dev/null
+From 5d7b2012aea108a8b37e167b069741a9ee4a26be Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 9 Nov 2025 22:59:40 -0800
+Subject: PCI: Add preceding capability position support in PCI_FIND_NEXT_*_CAP
+ macros
+
+From: Qiang Yu <qiang.yu@oss.qualcomm.com>
+
+[ Upstream commit a2582e05e39adf9ab82a02561cd6f70738540ae0 ]
+
+Add support for finding the preceding capability position in PCI
+capability list by extending the capability finding macros with an
+additional parameter. This functionality is essential for modifying PCI
+capability list, as it provides the necessary information to update the
+"next" pointer of the predecessor capability when removing entries.
+
+Modify two macros to accept a new 'prev_ptr' parameter:
+- PCI_FIND_NEXT_CAP - Now accepts 'prev_ptr' parameter for standard
+ capabilities
+- PCI_FIND_NEXT_EXT_CAP - Now accepts 'prev_ptr' parameter for extended
+ capabilities
+
+When a capability is found, these macros:
+- Store the position of the preceding capability in *prev_ptr
+ (if prev_ptr != NULL)
+- Maintain all existing functionality when prev_ptr is NULL
+
+Update current callers to accommodate this API change by passing NULL to
+'prev_ptr' argument if they do not care about the preceding capability
+position.
+
+No functional changes to driver behavior result from this commit as it
+maintains the existing capability finding functionality while adding the
+infrastructure for future capability removal operations.
+
+Signed-off-by: Qiang Yu <qiang.yu@oss.qualcomm.com>
+Signed-off-by: Manivannan Sadhasivam <mani@kernel.org>
+Link: https://patch.msgid.link/20251109-remove_cap-v1-1-2208f46f4dc2@oss.qualcomm.com
+Stable-dep-of: 43d67ec26b32 ("PCI: dwc: ep: Fix resizable BAR support for multi-PF configurations")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pci/controller/cadence/pcie-cadence.c | 4 ++--
+ .../pci/controller/dwc/pcie-designware-ep.c | 2 +-
+ drivers/pci/controller/dwc/pcie-designware.c | 6 ++---
+ drivers/pci/pci.c | 8 +++----
+ drivers/pci/pci.h | 23 +++++++++++++++----
+ 5 files changed, 29 insertions(+), 14 deletions(-)
+
+diff --git a/drivers/pci/controller/cadence/pcie-cadence.c b/drivers/pci/controller/cadence/pcie-cadence.c
+index bd683d0fecb22..d614452861f77 100644
+--- a/drivers/pci/controller/cadence/pcie-cadence.c
++++ b/drivers/pci/controller/cadence/pcie-cadence.c
+@@ -13,13 +13,13 @@
+ u8 cdns_pcie_find_capability(struct cdns_pcie *pcie, u8 cap)
+ {
+ return PCI_FIND_NEXT_CAP(cdns_pcie_read_cfg, PCI_CAPABILITY_LIST,
+- cap, pcie);
++ cap, NULL, pcie);
+ }
+ EXPORT_SYMBOL_GPL(cdns_pcie_find_capability);
+
+ u16 cdns_pcie_find_ext_capability(struct cdns_pcie *pcie, u8 cap)
+ {
+- return PCI_FIND_NEXT_EXT_CAP(cdns_pcie_read_cfg, 0, cap, pcie);
++ return PCI_FIND_NEXT_EXT_CAP(cdns_pcie_read_cfg, 0, cap, NULL, pcie);
+ }
+ EXPORT_SYMBOL_GPL(cdns_pcie_find_ext_capability);
+
+diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
+index 6d3beec92b54e..7f10d764f52b0 100644
+--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
++++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
+@@ -72,7 +72,7 @@ EXPORT_SYMBOL_GPL(dw_pcie_ep_reset_bar);
+ static u8 dw_pcie_ep_find_capability(struct dw_pcie_ep *ep, u8 func_no, u8 cap)
+ {
+ return PCI_FIND_NEXT_CAP(dw_pcie_ep_read_cfg, PCI_CAPABILITY_LIST,
+- cap, ep, func_no);
++ cap, NULL, ep, func_no);
+ }
+
+ /**
+diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
+index 75fc8b767fccf..5d7a7e6f5724e 100644
+--- a/drivers/pci/controller/dwc/pcie-designware.c
++++ b/drivers/pci/controller/dwc/pcie-designware.c
+@@ -226,13 +226,13 @@ void dw_pcie_version_detect(struct dw_pcie *pci)
+ u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap)
+ {
+ return PCI_FIND_NEXT_CAP(dw_pcie_read_cfg, PCI_CAPABILITY_LIST, cap,
+- pci);
++ NULL, pci);
+ }
+ EXPORT_SYMBOL_GPL(dw_pcie_find_capability);
+
+ u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap)
+ {
+- return PCI_FIND_NEXT_EXT_CAP(dw_pcie_read_cfg, 0, cap, pci);
++ return PCI_FIND_NEXT_EXT_CAP(dw_pcie_read_cfg, 0, cap, NULL, pci);
+ }
+ EXPORT_SYMBOL_GPL(dw_pcie_find_ext_capability);
+
+@@ -246,7 +246,7 @@ static u16 __dw_pcie_find_vsec_capability(struct dw_pcie *pci, u16 vendor_id,
+ return 0;
+
+ while ((vsec = PCI_FIND_NEXT_EXT_CAP(dw_pcie_read_cfg, vsec,
+- PCI_EXT_CAP_ID_VNDR, pci))) {
++ PCI_EXT_CAP_ID_VNDR, NULL, pci))) {
+ header = dw_pcie_readl_dbi(pci, vsec + PCI_VNDR_HEADER);
+ if (PCI_VNDR_HEADER_ID(header) == vsec_id)
+ return vsec;
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index d4e70570d09f2..e128696d5b761 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -426,7 +426,7 @@ static int pci_dev_str_match(struct pci_dev *dev, const char *p,
+ static u8 __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
+ u8 pos, int cap)
+ {
+- return PCI_FIND_NEXT_CAP(pci_bus_read_config, pos, cap, bus, devfn);
++ return PCI_FIND_NEXT_CAP(pci_bus_read_config, pos, cap, NULL, bus, devfn);
+ }
+
+ u8 pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
+@@ -531,7 +531,7 @@ u16 pci_find_next_ext_capability(struct pci_dev *dev, u16 start, int cap)
+ return 0;
+
+ return PCI_FIND_NEXT_EXT_CAP(pci_bus_read_config, start, cap,
+- dev->bus, dev->devfn);
++ NULL, dev->bus, dev->devfn);
+ }
+ EXPORT_SYMBOL_GPL(pci_find_next_ext_capability);
+
+@@ -600,7 +600,7 @@ static u8 __pci_find_next_ht_cap(struct pci_dev *dev, u8 pos, int ht_cap)
+ mask = HT_5BIT_CAP_MASK;
+
+ pos = PCI_FIND_NEXT_CAP(pci_bus_read_config, pos,
+- PCI_CAP_ID_HT, dev->bus, dev->devfn);
++ PCI_CAP_ID_HT, NULL, dev->bus, dev->devfn);
+ while (pos) {
+ rc = pci_read_config_byte(dev, pos + 3, &cap);
+ if (rc != PCIBIOS_SUCCESSFUL)
+@@ -611,7 +611,7 @@ static u8 __pci_find_next_ht_cap(struct pci_dev *dev, u8 pos, int ht_cap)
+
+ pos = PCI_FIND_NEXT_CAP(pci_bus_read_config,
+ pos + PCI_CAP_LIST_NEXT,
+- PCI_CAP_ID_HT, dev->bus,
++ PCI_CAP_ID_HT, NULL, dev->bus,
+ dev->devfn);
+ }
+
+diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
+index 36cf1ffb2023c..5510c103be2d3 100644
+--- a/drivers/pci/pci.h
++++ b/drivers/pci/pci.h
+@@ -106,17 +106,21 @@ bool pcie_cap_has_rtctl(const struct pci_dev *dev);
+ * @read_cfg: Function pointer for reading PCI config space
+ * @start: Starting position to begin search
+ * @cap: Capability ID to find
++ * @prev_ptr: Pointer to store position of preceding capability (optional)
+ * @args: Arguments to pass to read_cfg function
+ *
+- * Search the capability list in PCI config space to find @cap.
++ * Search the capability list in PCI config space to find @cap. If
++ * found, update *prev_ptr with the position of the preceding capability
++ * (if prev_ptr != NULL)
+ * Implements TTL (time-to-live) protection against infinite loops.
+ *
+ * Return: Position of the capability if found, 0 otherwise.
+ */
+-#define PCI_FIND_NEXT_CAP(read_cfg, start, cap, args...) \
++#define PCI_FIND_NEXT_CAP(read_cfg, start, cap, prev_ptr, args...) \
+ ({ \
+ int __ttl = PCI_FIND_CAP_TTL; \
+- u8 __id, __found_pos = 0; \
++ u8 __id, __found_pos = 0; \
++ u8 __prev_pos = (start); \
+ u8 __pos = (start); \
+ u16 __ent; \
+ \
+@@ -135,9 +139,12 @@ bool pcie_cap_has_rtctl(const struct pci_dev *dev);
+ \
+ if (__id == (cap)) { \
+ __found_pos = __pos; \
++ if (prev_ptr != NULL) \
++ *(u8 *)prev_ptr = __prev_pos; \
+ break; \
+ } \
+ \
++ __prev_pos = __pos; \
+ __pos = FIELD_GET(PCI_CAP_LIST_NEXT_MASK, __ent); \
+ } \
+ __found_pos; \
+@@ -149,21 +156,26 @@ bool pcie_cap_has_rtctl(const struct pci_dev *dev);
+ * @read_cfg: Function pointer for reading PCI config space
+ * @start: Starting position to begin search (0 for initial search)
+ * @cap: Extended capability ID to find
++ * @prev_ptr: Pointer to store position of preceding capability (optional)
+ * @args: Arguments to pass to read_cfg function
+ *
+ * Search the extended capability list in PCI config space to find @cap.
++ * If found, update *prev_ptr with the position of the preceding capability
++ * (if prev_ptr != NULL)
+ * Implements TTL protection against infinite loops using a calculated
+ * maximum search count.
+ *
+ * Return: Position of the capability if found, 0 otherwise.
+ */
+-#define PCI_FIND_NEXT_EXT_CAP(read_cfg, start, cap, args...) \
++#define PCI_FIND_NEXT_EXT_CAP(read_cfg, start, cap, prev_ptr, args...) \
+ ({ \
+ u16 __pos = (start) ?: PCI_CFG_SPACE_SIZE; \
+ u16 __found_pos = 0; \
++ u16 __prev_pos; \
+ int __ttl, __ret; \
+ u32 __header; \
+ \
++ __prev_pos = __pos; \
+ __ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8; \
+ while (__ttl-- > 0 && __pos >= PCI_CFG_SPACE_SIZE) { \
+ __ret = read_cfg##_dword(args, __pos, &__header); \
+@@ -175,9 +187,12 @@ bool pcie_cap_has_rtctl(const struct pci_dev *dev);
+ \
+ if (PCI_EXT_CAP_ID(__header) == (cap) && __pos != start) {\
+ __found_pos = __pos; \
++ if (prev_ptr != NULL) \
++ *(u16 *)prev_ptr = __prev_pos; \
+ break; \
+ } \
+ \
++ __prev_pos = __pos; \
+ __pos = PCI_EXT_CAP_NEXT(__header); \
+ } \
+ __found_pos; \
+--
+2.51.0
+
--- /dev/null
+From 51b503660239c3b10d044872f211a19a1ab65518 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 12 Dec 2025 09:33:25 +0800
+Subject: PCI: dw-rockchip: Change get_ltssm() to provide L1 Substates info
+
+From: Shawn Lin <shawn.lin@rock-chips.com>
+
+[ Upstream commit f994bb8f1c94726e0124356ccd31c3c23a8a69f4 ]
+
+Rename rockchip_pcie_get_ltssm() to rockchip_pcie_get_ltssm_reg() and add
+rockchip_pcie_get_ltssm() to get_ltssm() callback in order to show the
+proper L1 Substates. The PCIE_CLIENT_LTSSM_STATUS[5:0] register returns
+the same LTSSM layout as enum dw_pcie_ltssm. So the driver just need to
+convey L1 PM Substates by returning the proper value defined in
+pcie-designware.h.
+
+ cat /sys/kernel/debug/dwc_pcie_a40000000.pcie/ltssm_status
+ L1_2 (0x142)
+
+Signed-off-by: Shawn Lin <shawn.lin@rock-chips.com>
+Signed-off-by: Manivannan Sadhasivam <mani@kernel.org>
+Link: https://patch.msgid.link/1765503205-22184-2-git-send-email-shawn.lin@rock-chips.com
+Stable-dep-of: 180c3cfe3678 ("Revert "PCI: dw-rockchip: Enumerate endpoints based on dll_link_up IRQ"")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pci/controller/dwc/pcie-dw-rockchip.c | 29 ++++++++++++++++---
+ 1 file changed, 25 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/pci/controller/dwc/pcie-dw-rockchip.c b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
+index 85999fc316c9f..0a5d1cfb88437 100644
+--- a/drivers/pci/controller/dwc/pcie-dw-rockchip.c
++++ b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
+@@ -68,6 +68,11 @@
+ #define PCIE_CLKREQ_NOT_READY FIELD_PREP_WM16(BIT(0), 0)
+ #define PCIE_CLKREQ_PULL_DOWN FIELD_PREP_WM16(GENMASK(13, 12), 1)
+
++/* RASDES TBA information */
++#define PCIE_CLIENT_CDM_RASDES_TBA_INFO_CMN 0x154
++#define PCIE_CLIENT_CDM_RASDES_TBA_L1_1 BIT(4)
++#define PCIE_CLIENT_CDM_RASDES_TBA_L1_2 BIT(5)
++
+ /* Hot Reset Control Register */
+ #define PCIE_CLIENT_HOT_RESET_CTRL 0x180
+ #define PCIE_LTSSM_APP_DLY2_EN BIT(1)
+@@ -184,11 +189,26 @@ static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip)
+ return 0;
+ }
+
+-static u32 rockchip_pcie_get_ltssm(struct rockchip_pcie *rockchip)
++static u32 rockchip_pcie_get_ltssm_reg(struct rockchip_pcie *rockchip)
+ {
+ return rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_LTSSM_STATUS);
+ }
+
++static enum dw_pcie_ltssm rockchip_pcie_get_ltssm(struct dw_pcie *pci)
++{
++ struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
++ u32 val = rockchip_pcie_readl_apb(rockchip,
++ PCIE_CLIENT_CDM_RASDES_TBA_INFO_CMN);
++
++ if (val & PCIE_CLIENT_CDM_RASDES_TBA_L1_1)
++ return DW_PCIE_LTSSM_L1_1;
++
++ if (val & PCIE_CLIENT_CDM_RASDES_TBA_L1_2)
++ return DW_PCIE_LTSSM_L1_2;
++
++ return rockchip_pcie_get_ltssm_reg(rockchip) & PCIE_LTSSM_STATUS_MASK;
++}
++
+ static void rockchip_pcie_enable_ltssm(struct rockchip_pcie *rockchip)
+ {
+ rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_ENABLE_LTSSM,
+@@ -204,7 +224,7 @@ static void rockchip_pcie_disable_ltssm(struct rockchip_pcie *rockchip)
+ static bool rockchip_pcie_link_up(struct dw_pcie *pci)
+ {
+ struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
+- u32 val = rockchip_pcie_get_ltssm(rockchip);
++ u32 val = rockchip_pcie_get_ltssm_reg(rockchip);
+
+ return FIELD_GET(PCIE_LINKUP_MASK, val) == PCIE_LINKUP;
+ }
+@@ -494,6 +514,7 @@ static const struct dw_pcie_ops dw_pcie_ops = {
+ .link_up = rockchip_pcie_link_up,
+ .start_link = rockchip_pcie_start_link,
+ .stop_link = rockchip_pcie_stop_link,
++ .get_ltssm = rockchip_pcie_get_ltssm,
+ };
+
+ static irqreturn_t rockchip_pcie_rc_sys_irq_thread(int irq, void *arg)
+@@ -508,7 +529,7 @@ static irqreturn_t rockchip_pcie_rc_sys_irq_thread(int irq, void *arg)
+ rockchip_pcie_writel_apb(rockchip, reg, PCIE_CLIENT_INTR_STATUS_MISC);
+
+ dev_dbg(dev, "PCIE_CLIENT_INTR_STATUS_MISC: %#x\n", reg);
+- dev_dbg(dev, "LTSSM_STATUS: %#x\n", rockchip_pcie_get_ltssm(rockchip));
++ dev_dbg(dev, "LTSSM_STATUS: %#x\n", rockchip_pcie_get_ltssm_reg(rockchip));
+
+ if (reg & PCIE_RDLH_LINK_UP_CHGED) {
+ if (rockchip_pcie_link_up(pci)) {
+@@ -535,7 +556,7 @@ static irqreturn_t rockchip_pcie_ep_sys_irq_thread(int irq, void *arg)
+ rockchip_pcie_writel_apb(rockchip, reg, PCIE_CLIENT_INTR_STATUS_MISC);
+
+ dev_dbg(dev, "PCIE_CLIENT_INTR_STATUS_MISC: %#x\n", reg);
+- dev_dbg(dev, "LTSSM_STATUS: %#x\n", rockchip_pcie_get_ltssm(rockchip));
++ dev_dbg(dev, "LTSSM_STATUS: %#x\n", rockchip_pcie_get_ltssm_reg(rockchip));
+
+ if (reg & PCIE_LINK_REQ_RST_NOT_INT) {
+ dev_dbg(dev, "hot reset or link-down reset\n");
+--
+2.51.0
+
--- /dev/null
+From 85a7f30976d7d7041e93140e48b0863ce7eda096 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 18 Nov 2025 15:42:17 -0600
+Subject: PCI: dw-rockchip: Configure L1SS support
+
+From: Shawn Lin <shawn.lin@rock-chips.com>
+
+[ Upstream commit b5e719f26107f4a7f82946dc5be92dceb9b443cb ]
+
+L1 PM Substates for RC mode require support in the dw-rockchip driver
+including proper handling of the CLKREQ# sideband signal. It is mostly
+handled by hardware, but software still needs to set the clkreq fields
+in the PCIE_CLIENT_POWER_CON register to match the hardware implementation.
+
+For more details, see section '18.6.6.4 L1 Substate' in the RK3568 TRM 1.1
+Part 2, or section '11.6.6.4 L1 Substate' in the RK3588 TRM 1.0 Part2.
+
+[bhelgaas: set pci->l1ss_support so DWC core preserves L1SS Capability bits;
+drop corresponding code here, include updates from
+https://lore.kernel.org/r/aRRG8wv13HxOCqgA@ryzen]
+
+Signed-off-by: Shawn Lin <shawn.lin@rock-chips.com>
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Link: https://patch.msgid.link/1761187883-150120-1-git-send-email-shawn.lin@rock-chips.com
+Link: https://patch.msgid.link/20251118214312.2598220-4-helgaas@kernel.org
+Stable-dep-of: 180c3cfe3678 ("Revert "PCI: dw-rockchip: Enumerate endpoints based on dll_link_up IRQ"")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pci/controller/dwc/pcie-dw-rockchip.c | 40 +++++++++++++++++++
+ 1 file changed, 40 insertions(+)
+
+diff --git a/drivers/pci/controller/dwc/pcie-dw-rockchip.c b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
+index 7be6351686e21..85999fc316c9f 100644
+--- a/drivers/pci/controller/dwc/pcie-dw-rockchip.c
++++ b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
+@@ -62,6 +62,12 @@
+ /* Interrupt Mask Register Related to Miscellaneous Operation */
+ #define PCIE_CLIENT_INTR_MASK_MISC 0x24
+
++/* Power Management Control Register */
++#define PCIE_CLIENT_POWER_CON 0x2c
++#define PCIE_CLKREQ_READY FIELD_PREP_WM16(BIT(0), 1)
++#define PCIE_CLKREQ_NOT_READY FIELD_PREP_WM16(BIT(0), 0)
++#define PCIE_CLKREQ_PULL_DOWN FIELD_PREP_WM16(GENMASK(13, 12), 1)
++
+ /* Hot Reset Control Register */
+ #define PCIE_CLIENT_HOT_RESET_CTRL 0x180
+ #define PCIE_LTSSM_APP_DLY2_EN BIT(1)
+@@ -87,6 +93,7 @@ struct rockchip_pcie {
+ struct regulator *vpcie3v3;
+ struct irq_domain *irq_domain;
+ const struct rockchip_pcie_of_data *data;
++ bool supports_clkreq;
+ };
+
+ struct rockchip_pcie_of_data {
+@@ -202,6 +209,35 @@ static bool rockchip_pcie_link_up(struct dw_pcie *pci)
+ return FIELD_GET(PCIE_LINKUP_MASK, val) == PCIE_LINKUP;
+ }
+
++/*
++ * See e.g. section '11.6.6.4 L1 Substate' in the RK3588 TRM V1.0 for the steps
++ * needed to support L1 substates. Currently, just enable L1 substates for RC
++ * mode if CLKREQ# is properly connected and supports-clkreq is present in DT.
++ * For EP mode, there are more things should be done to actually save power in
++ * L1 substates, so disable L1 substates until there is proper support.
++ */
++static void rockchip_pcie_configure_l1ss(struct dw_pcie *pci)
++{
++ struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
++
++ /* Enable L1 substates if CLKREQ# is properly connected */
++ if (rockchip->supports_clkreq) {
++ rockchip_pcie_writel_apb(rockchip, PCIE_CLKREQ_READY,
++ PCIE_CLIENT_POWER_CON);
++ pci->l1ss_support = true;
++ return;
++ }
++
++ /*
++ * Otherwise, assert CLKREQ# unconditionally. Since
++ * pci->l1ss_support is not set, the DWC core will prevent L1
++ * Substates support from being advertised.
++ */
++ rockchip_pcie_writel_apb(rockchip,
++ PCIE_CLKREQ_PULL_DOWN | PCIE_CLKREQ_NOT_READY,
++ PCIE_CLIENT_POWER_CON);
++}
++
+ static void rockchip_pcie_enable_l0s(struct dw_pcie *pci)
+ {
+ u32 cap, lnkcap;
+@@ -268,6 +304,7 @@ static int rockchip_pcie_host_init(struct dw_pcie_rp *pp)
+ irq_set_chained_handler_and_data(irq, rockchip_pcie_intx_handler,
+ rockchip);
+
++ rockchip_pcie_configure_l1ss(pci);
+ rockchip_pcie_enable_l0s(pci);
+
+ /* Disable Root Ports BAR0 and BAR1 as they report bogus size */
+@@ -420,6 +457,9 @@ static int rockchip_pcie_resource_get(struct platform_device *pdev,
+ return dev_err_probe(&pdev->dev, PTR_ERR(rockchip->rst),
+ "failed to get reset lines\n");
+
++ rockchip->supports_clkreq = of_property_read_bool(pdev->dev.of_node,
++ "supports-clkreq");
++
+ return 0;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 7fc49a88bac1b81385a7e8e4595e6688ab9f591a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 12 Dec 2025 09:33:24 +0800
+Subject: PCI: dwc: Add L1 Substates context to ltssm_status of debugfs
+
+From: Shawn Lin <shawn.lin@rock-chips.com>
+
+[ Upstream commit 679ec639f29cbdaf36bd79bf3e98240fffa335ee ]
+
+DWC core couldn't distinguish LTSSM state among L1.0, L1.1 and L1.2. But
+the vendor glue driver may implement additional logic to convey this
+information. So add two pseudo definitions for vendor glue drivers to
+translate their internal L1 Substates for debugfs to show.
+
+Signed-off-by: Shawn Lin <shawn.lin@rock-chips.com>
+Signed-off-by: Manivannan Sadhasivam <mani@kernel.org>
+Link: https://patch.msgid.link/1765503205-22184-1-git-send-email-shawn.lin@rock-chips.com
+Stable-dep-of: 180c3cfe3678 ("Revert "PCI: dw-rockchip: Enumerate endpoints based on dll_link_up IRQ"")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pci/controller/dwc/pcie-designware-debugfs.c | 2 ++
+ drivers/pci/controller/dwc/pcie-designware.h | 4 ++++
+ 2 files changed, 6 insertions(+)
+
+diff --git a/drivers/pci/controller/dwc/pcie-designware-debugfs.c b/drivers/pci/controller/dwc/pcie-designware-debugfs.c
+index 0fbf86c0b97e0..df98fee69892b 100644
+--- a/drivers/pci/controller/dwc/pcie-designware-debugfs.c
++++ b/drivers/pci/controller/dwc/pcie-designware-debugfs.c
+@@ -485,6 +485,8 @@ static const char *ltssm_status_string(enum dw_pcie_ltssm ltssm)
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_EQ1);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_EQ2);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_EQ3);
++ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L1_1);
++ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L1_2);
+ default:
+ str = "DW_PCIE_LTSSM_UNKNOWN";
+ break;
+diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
+index 82336a204569f..6c04ac0196794 100644
+--- a/drivers/pci/controller/dwc/pcie-designware.h
++++ b/drivers/pci/controller/dwc/pcie-designware.h
+@@ -380,6 +380,10 @@ enum dw_pcie_ltssm {
+ DW_PCIE_LTSSM_RCVRY_EQ2 = 0x22,
+ DW_PCIE_LTSSM_RCVRY_EQ3 = 0x23,
+
++ /* Vendor glue drivers provide pseudo L1 substates from get_ltssm() */
++ DW_PCIE_LTSSM_L1_1 = 0x141,
++ DW_PCIE_LTSSM_L1_2 = 0x142,
++
+ DW_PCIE_LTSSM_UNKNOWN = 0xFFFFFFFF,
+ };
+
+--
+2.51.0
+
--- /dev/null
+From 9d64864db26fef72054f7b03026fdda7da01ee03 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 9 Nov 2025 22:59:41 -0800
+Subject: PCI: dwc: Add new APIs to remove standard and extended Capability
+
+From: Qiang Yu <qiang.yu@oss.qualcomm.com>
+
+[ Upstream commit 0183562f1e824c0ca6c918309a0978e9a269af3e ]
+
+On some platforms, certain PCIe Capabilities may be present in hardware
+but are not fully implemented as defined in PCIe spec. These incomplete
+capabilities should be hidden from the PCI framework to prevent unexpected
+behavior.
+
+Introduce two APIs to remove a specific PCIe Capability and Extended
+Capability by updating the previous capability's next offset field to skip
+over the unwanted capability. These APIs allow RC drivers to easily hide
+unsupported or partially implemented capabilities from software.
+
+Co-developed-by: Wenbin Yao <wenbin.yao@oss.qualcomm.com>
+Signed-off-by: Wenbin Yao <wenbin.yao@oss.qualcomm.com>
+Signed-off-by: Qiang Yu <qiang.yu@oss.qualcomm.com>
+Signed-off-by: Manivannan Sadhasivam <mani@kernel.org>
+Link: https://patch.msgid.link/20251109-remove_cap-v1-2-2208f46f4dc2@oss.qualcomm.com
+Stable-dep-of: 43d67ec26b32 ("PCI: dwc: ep: Fix resizable BAR support for multi-PF configurations")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pci/controller/dwc/pcie-designware.c | 53 ++++++++++++++++++++
+ drivers/pci/controller/dwc/pcie-designware.h | 2 +
+ 2 files changed, 55 insertions(+)
+
+diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
+index 5d7a7e6f5724e..345365ea97c74 100644
+--- a/drivers/pci/controller/dwc/pcie-designware.c
++++ b/drivers/pci/controller/dwc/pcie-designware.c
+@@ -236,6 +236,59 @@ u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap)
+ }
+ EXPORT_SYMBOL_GPL(dw_pcie_find_ext_capability);
+
++void dw_pcie_remove_capability(struct dw_pcie *pci, u8 cap)
++{
++ u8 cap_pos, pre_pos, next_pos;
++ u16 reg;
++
++ cap_pos = PCI_FIND_NEXT_CAP(dw_pcie_read_cfg, PCI_CAPABILITY_LIST, cap,
++ &pre_pos, pci);
++ if (!cap_pos)
++ return;
++
++ reg = dw_pcie_readw_dbi(pci, cap_pos);
++ next_pos = (reg & 0xff00) >> 8;
++
++ dw_pcie_dbi_ro_wr_en(pci);
++ if (pre_pos == PCI_CAPABILITY_LIST)
++ dw_pcie_writeb_dbi(pci, PCI_CAPABILITY_LIST, next_pos);
++ else
++ dw_pcie_writeb_dbi(pci, pre_pos + 1, next_pos);
++ dw_pcie_dbi_ro_wr_dis(pci);
++}
++EXPORT_SYMBOL_GPL(dw_pcie_remove_capability);
++
++void dw_pcie_remove_ext_capability(struct dw_pcie *pci, u8 cap)
++{
++ int cap_pos, next_pos, pre_pos;
++ u32 pre_header, header;
++
++ cap_pos = PCI_FIND_NEXT_EXT_CAP(dw_pcie_read_cfg, 0, cap, &pre_pos, pci);
++ if (!cap_pos)
++ return;
++
++ header = dw_pcie_readl_dbi(pci, cap_pos);
++ /*
++ * If the first cap at offset PCI_CFG_SPACE_SIZE is removed,
++ * only set it's capid to zero as it cannot be skipped.
++ */
++ if (cap_pos == PCI_CFG_SPACE_SIZE) {
++ dw_pcie_dbi_ro_wr_en(pci);
++ dw_pcie_writel_dbi(pci, cap_pos, header & 0xffff0000);
++ dw_pcie_dbi_ro_wr_dis(pci);
++ return;
++ }
++
++ pre_header = dw_pcie_readl_dbi(pci, pre_pos);
++ next_pos = PCI_EXT_CAP_NEXT(header);
++
++ dw_pcie_dbi_ro_wr_en(pci);
++ dw_pcie_writel_dbi(pci, pre_pos,
++ (pre_header & 0xfffff) | (next_pos << 20));
++ dw_pcie_dbi_ro_wr_dis(pci);
++}
++EXPORT_SYMBOL_GPL(dw_pcie_remove_ext_capability);
++
+ static u16 __dw_pcie_find_vsec_capability(struct dw_pcie *pci, u16 vendor_id,
+ u16 vsec_id)
+ {
+diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
+index 6c04ac0196794..a59ea4078cca8 100644
+--- a/drivers/pci/controller/dwc/pcie-designware.h
++++ b/drivers/pci/controller/dwc/pcie-designware.h
+@@ -557,6 +557,8 @@ void dw_pcie_version_detect(struct dw_pcie *pci);
+
+ u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap);
+ u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap);
++void dw_pcie_remove_capability(struct dw_pcie *pci, u8 cap);
++void dw_pcie_remove_ext_capability(struct dw_pcie *pci, u8 cap);
+ u16 dw_pcie_find_rasdes_capability(struct dw_pcie *pci);
+ u16 dw_pcie_find_ptm_capability(struct dw_pcie *pci);
+
+--
+2.51.0
+
--- /dev/null
+From 4280d566495c3f7f0d19a2e1dbeff94b54614ab3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 18 Nov 2025 15:42:15 -0600
+Subject: PCI: dwc: Advertise L1 PM Substates only if driver requests it
+
+From: Bjorn Helgaas <bhelgaas@google.com>
+
+[ Upstream commit a00bba406b5a682764ecb507e580ca8159196aa3 ]
+
+L1 PM Substates require the CLKREQ# signal and may also require
+device-specific support. If CLKREQ# is not supported or driver support is
+lacking, enabling L1.1 or L1.2 may cause errors when accessing devices,
+e.g.,
+
+ nvme nvme0: controller is down; will reset: CSTS=0xffffffff, PCI_STATUS=0x10
+
+If the kernel is built with CONFIG_PCIEASPM_POWER_SUPERSAVE=y or users
+enable L1.x via sysfs, users may trip over these errors even if L1
+Substates haven't been enabled by firmware or the driver.
+
+To prevent such errors, disable advertising the L1 PM Substates unless the
+driver sets "dw_pcie.l1ss_support" to indicate that it knows CLKREQ# is
+present and any device-specific configuration has been done.
+
+Set "dw_pcie.l1ss_support" in tegra194 (if DT includes the
+"supports-clkreq' property) and qcom (for cfg_2_7_0, cfg_1_9_0, cfg_1_34_0,
+and cfg_sc8280xp controllers) so they can continue to use L1 Substates.
+
+Based on Niklas's patch:
+https://patch.msgid.link/20251017163252.598812-2-cassel@kernel.org
+
+[bhelgaas: drop hiding for endpoints]
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Link: https://patch.msgid.link/20251118214312.2598220-2-helgaas@kernel.org
+Stable-dep-of: 180c3cfe3678 ("Revert "PCI: dw-rockchip: Enumerate endpoints based on dll_link_up IRQ"")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../pci/controller/dwc/pcie-designware-host.c | 2 ++
+ drivers/pci/controller/dwc/pcie-designware.c | 24 +++++++++++++++++++
+ drivers/pci/controller/dwc/pcie-designware.h | 2 ++
+ drivers/pci/controller/dwc/pcie-qcom.c | 2 ++
+ drivers/pci/controller/dwc/pcie-tegra194.c | 3 +++
+ 5 files changed, 33 insertions(+)
+
+diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
+index 925d5f818f12b..894bf23529df5 100644
+--- a/drivers/pci/controller/dwc/pcie-designware-host.c
++++ b/drivers/pci/controller/dwc/pcie-designware-host.c
+@@ -1081,6 +1081,8 @@ int dw_pcie_setup_rc(struct dw_pcie_rp *pp)
+ PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
+ dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
+
++ dw_pcie_hide_unsupported_l1ss(pci);
++
+ dw_pcie_config_presets(pp);
+ /*
+ * If the platform provides its own child bus config accesses, it means
+diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
+index 06eca858eb1b3..75fc8b767fccf 100644
+--- a/drivers/pci/controller/dwc/pcie-designware.c
++++ b/drivers/pci/controller/dwc/pcie-designware.c
+@@ -1083,6 +1083,30 @@ void dw_pcie_edma_remove(struct dw_pcie *pci)
+ dw_edma_remove(&pci->edma);
+ }
+
++void dw_pcie_hide_unsupported_l1ss(struct dw_pcie *pci)
++{
++ u16 l1ss;
++ u32 l1ss_cap;
++
++ if (pci->l1ss_support)
++ return;
++
++ l1ss = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_L1SS);
++ if (!l1ss)
++ return;
++
++ /*
++ * Unless the driver claims "l1ss_support", don't advertise L1 PM
++ * Substates because they require CLKREQ# and possibly other
++ * device-specific configuration.
++ */
++ l1ss_cap = dw_pcie_readl_dbi(pci, l1ss + PCI_L1SS_CAP);
++ l1ss_cap &= ~(PCI_L1SS_CAP_PCIPM_L1_1 | PCI_L1SS_CAP_ASPM_L1_1 |
++ PCI_L1SS_CAP_PCIPM_L1_2 | PCI_L1SS_CAP_ASPM_L1_2 |
++ PCI_L1SS_CAP_L1_PM_SS);
++ dw_pcie_writel_dbi(pci, l1ss + PCI_L1SS_CAP, l1ss_cap);
++}
++
+ void dw_pcie_setup(struct dw_pcie *pci)
+ {
+ u32 val;
+diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
+index 96e89046614da..82336a204569f 100644
+--- a/drivers/pci/controller/dwc/pcie-designware.h
++++ b/drivers/pci/controller/dwc/pcie-designware.h
+@@ -516,6 +516,7 @@ struct dw_pcie {
+ int max_link_speed;
+ u8 n_fts[2];
+ struct dw_edma_chip edma;
++ bool l1ss_support; /* L1 PM Substates support */
+ struct clk_bulk_data app_clks[DW_PCIE_NUM_APP_CLKS];
+ struct clk_bulk_data core_clks[DW_PCIE_NUM_CORE_CLKS];
+ struct reset_control_bulk_data app_rsts[DW_PCIE_NUM_APP_RSTS];
+@@ -573,6 +574,7 @@ int dw_pcie_prog_ep_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
+ int type, u64 parent_bus_addr,
+ u8 bar, size_t size);
+ void dw_pcie_disable_atu(struct dw_pcie *pci, u32 dir, int index);
++void dw_pcie_hide_unsupported_l1ss(struct dw_pcie *pci);
+ void dw_pcie_setup(struct dw_pcie *pci);
+ void dw_pcie_iatu_detect(struct dw_pcie *pci);
+ int dw_pcie_edma_detect(struct dw_pcie *pci);
+diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
+index 5311cd5d96372..789cc0e3c10da 100644
+--- a/drivers/pci/controller/dwc/pcie-qcom.c
++++ b/drivers/pci/controller/dwc/pcie-qcom.c
+@@ -1005,6 +1005,8 @@ static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)
+ val &= ~REQ_NOT_ENTR_L1;
+ writel(val, pcie->parf + PARF_PM_CTRL);
+
++ pci->l1ss_support = true;
++
+ val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2);
+ val |= EN;
+ writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2);
+diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
+index 10e74458e667b..3934757baa30c 100644
+--- a/drivers/pci/controller/dwc/pcie-tegra194.c
++++ b/drivers/pci/controller/dwc/pcie-tegra194.c
+@@ -703,6 +703,9 @@ static void init_host_aspm(struct tegra_pcie_dw *pcie)
+ val |= (pcie->aspm_pwr_on_t << 19);
+ dw_pcie_writel_dbi(pci, pcie->cfg_link_cap_l1sub, val);
+
++ if (pcie->supports_clkreq)
++ pci->l1ss_support = true;
++
+ /* Program L0s and L1 entrance latencies */
+ val = dw_pcie_readl_dbi(pci, PCIE_PORT_AFR);
+ val &= ~PORT_AFR_L0S_ENTRANCE_LAT_MASK;
+--
+2.51.0
+
--- /dev/null
+From 98dec0f637b6faf434715fa859276c1372b6b0f9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 30 Jan 2026 17:25:14 +0530
+Subject: PCI: dwc: ep: Fix resizable BAR support for multi-PF configurations
+
+From: Aksh Garg <a-garg7@ti.com>
+
+[ Upstream commit 43d67ec26b329f8aea34ba9dff23d69b84a8e564 ]
+
+The resizable BAR support added by the commit 3a3d4cabe681 ("PCI: dwc: ep:
+Allow EPF drivers to configure the size of Resizable BARs") incorrectly
+configures the resizable BARs only for the first Physical Function (PF0)
+in EP mode.
+
+The resizable BAR configuration functions use generic dw_pcie_*_dbi()
+operations instead of physical function specific dw_pcie_ep_*_dbi()
+operations. This causes resizable BAR configuration to always target
+PF0 regardless of the requested function number.
+
+Additionally, dw_pcie_ep_init_non_sticky_registers() only initializes
+resizable BAR registers for PF0, leaving other PFs unconfigured during
+the execution of this function.
+
+Fix this by using physical function specific configuration space access
+operations throughout the resizable BAR code path and initializing
+registers for all the physical functions that support resizable BARs.
+
+Fixes: 3a3d4cabe681 ("PCI: dwc: ep: Allow EPF drivers to configure the size of Resizable BARs")
+Signed-off-by: Aksh Garg <a-garg7@ti.com>
+[mani: added stable tag]
+Signed-off-by: Manivannan Sadhasivam <mani@kernel.org>
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Reviewed-by: Niklas Cassel <cassel@kernel.org>
+Cc: stable@vger.kernel.org
+Link: https://patch.msgid.link/20260130115516.515082-2-a-garg7@ti.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../pci/controller/dwc/pcie-designware-ep.c | 48 ++++++++++++-------
+ 1 file changed, 32 insertions(+), 16 deletions(-)
+
+diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
+index b6cee9eaa1165..e2e18beb2951d 100644
+--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
++++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
+@@ -75,6 +75,13 @@ static u8 dw_pcie_ep_find_capability(struct dw_pcie_ep *ep, u8 func_no, u8 cap)
+ cap, NULL, ep, func_no);
+ }
+
++static u16 dw_pcie_ep_find_ext_capability(struct dw_pcie_ep *ep,
++ u8 func_no, u8 cap)
++{
++ return PCI_FIND_NEXT_EXT_CAP(dw_pcie_ep_read_cfg, 0,
++ cap, NULL, ep, func_no);
++}
++
+ static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ struct pci_epf_header *hdr)
+ {
+@@ -178,22 +185,22 @@ static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ ep->bar_to_atu[bar] = 0;
+ }
+
+-static unsigned int dw_pcie_ep_get_rebar_offset(struct dw_pcie *pci,
++static unsigned int dw_pcie_ep_get_rebar_offset(struct dw_pcie_ep *ep, u8 func_no,
+ enum pci_barno bar)
+ {
+ u32 reg, bar_index;
+ unsigned int offset, nbars;
+ int i;
+
+- offset = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
++ offset = dw_pcie_ep_find_ext_capability(ep, func_no, PCI_EXT_CAP_ID_REBAR);
+ if (!offset)
+ return offset;
+
+- reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
++ reg = dw_pcie_ep_readl_dbi(ep, func_no, offset + PCI_REBAR_CTRL);
+ nbars = FIELD_GET(PCI_REBAR_CTRL_NBAR_MASK, reg);
+
+ for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL) {
+- reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
++ reg = dw_pcie_ep_readl_dbi(ep, func_no, offset + PCI_REBAR_CTRL);
+ bar_index = FIELD_GET(PCI_REBAR_CTRL_BAR_IDX, reg);
+ if (bar_index == bar)
+ return offset;
+@@ -214,7 +221,7 @@ static int dw_pcie_ep_set_bar_resizable(struct dw_pcie_ep *ep, u8 func_no,
+ u32 rebar_cap, rebar_ctrl;
+ int ret;
+
+- rebar_offset = dw_pcie_ep_get_rebar_offset(pci, bar);
++ rebar_offset = dw_pcie_ep_get_rebar_offset(ep, func_no, bar);
+ if (!rebar_offset)
+ return -EINVAL;
+
+@@ -244,16 +251,16 @@ static int dw_pcie_ep_set_bar_resizable(struct dw_pcie_ep *ep, u8 func_no,
+ * 1 MB to 128 TB. Bits 31:16 in PCI_REBAR_CTRL define "supported sizes"
+ * bits for sizes 256 TB to 8 EB. Disallow sizes 256 TB to 8 EB.
+ */
+- rebar_ctrl = dw_pcie_readl_dbi(pci, rebar_offset + PCI_REBAR_CTRL);
++ rebar_ctrl = dw_pcie_ep_readl_dbi(ep, func_no, rebar_offset + PCI_REBAR_CTRL);
+ rebar_ctrl &= ~GENMASK(31, 16);
+- dw_pcie_writel_dbi(pci, rebar_offset + PCI_REBAR_CTRL, rebar_ctrl);
++ dw_pcie_ep_writel_dbi(ep, func_no, rebar_offset + PCI_REBAR_CTRL, rebar_ctrl);
+
+ /*
+ * The "selected size" (bits 13:8) in PCI_REBAR_CTRL are automatically
+ * updated when writing PCI_REBAR_CAP, see "Figure 3-26 Resizable BAR
+ * Example for 32-bit Memory BAR0" in DWC EP databook 5.96a.
+ */
+- dw_pcie_writel_dbi(pci, rebar_offset + PCI_REBAR_CAP, rebar_cap);
++ dw_pcie_ep_writel_dbi(ep, func_no, rebar_offset + PCI_REBAR_CAP, rebar_cap);
+
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+@@ -799,20 +806,17 @@ void dw_pcie_ep_deinit(struct dw_pcie_ep *ep)
+ }
+ EXPORT_SYMBOL_GPL(dw_pcie_ep_deinit);
+
+-static void dw_pcie_ep_init_non_sticky_registers(struct dw_pcie *pci)
++static void dw_pcie_ep_init_rebar_registers(struct dw_pcie_ep *ep, u8 func_no)
+ {
+- struct dw_pcie_ep *ep = &pci->ep;
+ unsigned int offset;
+ unsigned int nbars;
+ enum pci_barno bar;
+ u32 reg, i, val;
+
+- offset = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
+-
+- dw_pcie_dbi_ro_wr_en(pci);
++ offset = dw_pcie_ep_find_ext_capability(ep, func_no, PCI_EXT_CAP_ID_REBAR);
+
+ if (offset) {
+- reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
++ reg = dw_pcie_ep_readl_dbi(ep, func_no, offset + PCI_REBAR_CTRL);
+ nbars = FIELD_GET(PCI_REBAR_CTRL_NBAR_MASK, reg);
+
+ /*
+@@ -833,16 +837,28 @@ static void dw_pcie_ep_init_non_sticky_registers(struct dw_pcie *pci)
+ * the controller when RESBAR_CAP_REG is written, which
+ * is why RESBAR_CAP_REG is written here.
+ */
+- val = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
++ val = dw_pcie_ep_readl_dbi(ep, func_no, offset + PCI_REBAR_CTRL);
+ bar = FIELD_GET(PCI_REBAR_CTRL_BAR_IDX, val);
+ if (ep->epf_bar[bar])
+ pci_epc_bar_size_to_rebar_cap(ep->epf_bar[bar]->size, &val);
+ else
+ val = BIT(4);
+
+- dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, val);
++ dw_pcie_ep_writel_dbi(ep, func_no, offset + PCI_REBAR_CAP, val);
+ }
+ }
++}
++
++static void dw_pcie_ep_init_non_sticky_registers(struct dw_pcie *pci)
++{
++ struct dw_pcie_ep *ep = &pci->ep;
++ u8 funcs = ep->epc->max_functions;
++ u8 func_no;
++
++ dw_pcie_dbi_ro_wr_en(pci);
++
++ for (func_no = 0; func_no < funcs; func_no++)
++ dw_pcie_ep_init_rebar_registers(ep, func_no);
+
+ dw_pcie_setup(pci);
+ dw_pcie_dbi_ro_wr_dis(pci);
+--
+2.51.0
+
--- /dev/null
+From f28ae495e6b748a39157fa749a43a67bc7c42bfd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 24 Dec 2025 02:10:46 -0800
+Subject: PCI: dwc: Remove duplicate dw_pcie_ep_hide_ext_capability() function
+
+From: Qiang Yu <qiang.yu@oss.qualcomm.com>
+
+[ Upstream commit 86291f774fe8524178446cb2c792939640b4970c ]
+
+Remove dw_pcie_ep_hide_ext_capability() and replace its usage with
+dw_pcie_remove_ext_capability(). Both functions serve the same purpose
+of hiding PCIe extended capabilities, but dw_pcie_remove_ext_capability()
+provides a cleaner API that doesn't require the caller to specify the
+previous capability ID.
+
+Suggested-by: Niklas Cassel <cassel@kernel.org>
+Signed-off-by: Qiang Yu <qiang.yu@oss.qualcomm.com>
+Signed-off-by: Manivannan Sadhasivam <mani@kernel.org>
+Tested-by: Niklas Cassel <cassel@kernel.org>
+Link: https://patch.msgid.link/20251224-remove_dw_pcie_ep_hide_ext_capability-v1-1-4302c9cdc316@oss.qualcomm.com
+Stable-dep-of: 43d67ec26b32 ("PCI: dwc: ep: Fix resizable BAR support for multi-PF configurations")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../pci/controller/dwc/pcie-designware-ep.c | 39 -------------------
+ drivers/pci/controller/dwc/pcie-designware.h | 7 ----
+ drivers/pci/controller/dwc/pcie-dw-rockchip.c | 4 +-
+ 3 files changed, 1 insertion(+), 49 deletions(-)
+
+diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
+index 7f10d764f52b0..b6cee9eaa1165 100644
+--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
++++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
+@@ -75,45 +75,6 @@ static u8 dw_pcie_ep_find_capability(struct dw_pcie_ep *ep, u8 func_no, u8 cap)
+ cap, NULL, ep, func_no);
+ }
+
+-/**
+- * dw_pcie_ep_hide_ext_capability - Hide a capability from the linked list
+- * @pci: DWC PCI device
+- * @prev_cap: Capability preceding the capability that should be hidden
+- * @cap: Capability that should be hidden
+- *
+- * Return: 0 if success, errno otherwise.
+- */
+-int dw_pcie_ep_hide_ext_capability(struct dw_pcie *pci, u8 prev_cap, u8 cap)
+-{
+- u16 prev_cap_offset, cap_offset;
+- u32 prev_cap_header, cap_header;
+-
+- prev_cap_offset = dw_pcie_find_ext_capability(pci, prev_cap);
+- if (!prev_cap_offset)
+- return -EINVAL;
+-
+- prev_cap_header = dw_pcie_readl_dbi(pci, prev_cap_offset);
+- cap_offset = PCI_EXT_CAP_NEXT(prev_cap_header);
+- cap_header = dw_pcie_readl_dbi(pci, cap_offset);
+-
+- /* cap must immediately follow prev_cap. */
+- if (PCI_EXT_CAP_ID(cap_header) != cap)
+- return -EINVAL;
+-
+- /* Clear next ptr. */
+- prev_cap_header &= ~GENMASK(31, 20);
+-
+- /* Set next ptr to next ptr of cap. */
+- prev_cap_header |= cap_header & GENMASK(31, 20);
+-
+- dw_pcie_dbi_ro_wr_en(pci);
+- dw_pcie_writel_dbi(pci, prev_cap_offset, prev_cap_header);
+- dw_pcie_dbi_ro_wr_dis(pci);
+-
+- return 0;
+-}
+-EXPORT_SYMBOL_GPL(dw_pcie_ep_hide_ext_capability);
+-
+ static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ struct pci_epf_header *hdr)
+ {
+diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
+index a59ea4078cca8..d32ee1fa3cf85 100644
+--- a/drivers/pci/controller/dwc/pcie-designware.h
++++ b/drivers/pci/controller/dwc/pcie-designware.h
+@@ -888,7 +888,6 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
+ int dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep *ep, u8 func_no,
+ u16 interrupt_num);
+ void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar);
+-int dw_pcie_ep_hide_ext_capability(struct dw_pcie *pci, u8 prev_cap, u8 cap);
+ struct dw_pcie_ep_func *
+ dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no);
+ #else
+@@ -946,12 +945,6 @@ static inline void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
+ {
+ }
+
+-static inline int dw_pcie_ep_hide_ext_capability(struct dw_pcie *pci,
+- u8 prev_cap, u8 cap)
+-{
+- return 0;
+-}
+-
+ static inline struct dw_pcie_ep_func *
+ dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no)
+ {
+diff --git a/drivers/pci/controller/dwc/pcie-dw-rockchip.c b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
+index b5442ee2920e5..c2c36290be061 100644
+--- a/drivers/pci/controller/dwc/pcie-dw-rockchip.c
++++ b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
+@@ -356,9 +356,7 @@ static void rockchip_pcie_ep_hide_broken_ats_cap_rk3588(struct dw_pcie_ep *ep)
+ if (!of_device_is_compatible(dev->of_node, "rockchip,rk3588-pcie-ep"))
+ return;
+
+- if (dw_pcie_ep_hide_ext_capability(pci, PCI_EXT_CAP_ID_SECPCI,
+- PCI_EXT_CAP_ID_ATS))
+- dev_err(dev, "failed to hide ATS capability\n");
++ dw_pcie_remove_ext_capability(pci, PCI_EXT_CAP_ID_ATS);
+ }
+
+ static void rockchip_pcie_ep_init(struct dw_pcie_ep *ep)
+--
+2.51.0
+
--- /dev/null
+From 58fa8880dd6bee34b1a5e7b2a5d39db69f8d8af8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 17 Nov 2025 17:02:06 +0530
+Subject: PCI: j721e: Add config guards for Cadence Host and Endpoint library
+ APIs
+
+From: Siddharth Vadapalli <s-vadapalli@ti.com>
+
+[ Upstream commit 4b361b1e92be255ff923453fe8db74086cc7cf66 ]
+
+Commit under Fixes enabled loadable module support for the driver under
+the assumption that it shall be the sole user of the Cadence Host and
+Endpoint library APIs. This assumption guarantees that we won't end up
+in a case where the driver is built-in and the library support is built
+as a loadable module.
+
+With the introduction of [1], this assumption is no longer valid. The
+SG2042 driver could be built as a loadable module, implying that the
+Cadence Host library is also selected as a loadable module. However, the
+pci-j721e.c driver could be built-in as indicated by CONFIG_PCI_J721E=y
+due to which the Cadence Endpoint library is built-in. Despite the
+library drivers being built as specified by their respective consumers,
+since the 'pci-j721e.c' driver has references to the Cadence Host
+library APIs as well, we run into a build error as reported at [0].
+
+Fix this by adding config guards as a temporary workaround. The proper
+fix is to split the 'pci-j721e.c' driver into independent Host and
+Endpoint drivers as aligned at [2].
+
+[0]: https://lore.kernel.org/r/202511111705.MZ7ls8Hm-lkp@intel.com/
+[1]: commit 1c72774df028 ("PCI: sg2042: Add Sophgo SG2042 PCIe driver")
+[2]: https://lore.kernel.org/r/37f6f8ce-12b2-44ee-a94c-f21b29c98821@app.fastmail.com/
+
+Fixes: a2790bf81f0f ("PCI: j721e: Add support to build as a loadable module")
+Reported-by: kernel test robot <lkp@intel.com>
+Closes: https://lore.kernel.org/oe-kbuild-all/202511111705.MZ7ls8Hm-lkp@intel.com/
+Suggested-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Siddharth Vadapalli <s-vadapalli@ti.com>
+Signed-off-by: Manivannan Sadhasivam <mani@kernel.org>
+Reviewed-by: Arnd Bergmann <arnd@arndb.de>
+Cc: stable@vger.kernel.org
+Link: https://patch.msgid.link/20251117113246.1460644-1-s-vadapalli@ti.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pci/controller/cadence/pci-j721e.c | 41 +++++++++++++---------
+ 1 file changed, 25 insertions(+), 16 deletions(-)
+
+diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c
+index a88b2e52fd782..0413d163cfea0 100644
+--- a/drivers/pci/controller/cadence/pci-j721e.c
++++ b/drivers/pci/controller/cadence/pci-j721e.c
+@@ -621,9 +621,11 @@ static int j721e_pcie_probe(struct platform_device *pdev)
+ gpiod_set_value_cansleep(gpiod, 1);
+ }
+
+- ret = cdns_pcie_host_setup(rc);
+- if (ret < 0)
+- goto err_pcie_setup;
++ if (IS_ENABLED(CONFIG_PCI_J721E_HOST)) {
++ ret = cdns_pcie_host_setup(rc);
++ if (ret < 0)
++ goto err_pcie_setup;
++ }
+
+ break;
+ case PCI_MODE_EP:
+@@ -633,9 +635,11 @@ static int j721e_pcie_probe(struct platform_device *pdev)
+ goto err_get_sync;
+ }
+
+- ret = cdns_pcie_ep_setup(ep);
+- if (ret < 0)
+- goto err_pcie_setup;
++ if (IS_ENABLED(CONFIG_PCI_J721E_EP)) {
++ ret = cdns_pcie_ep_setup(ep);
++ if (ret < 0)
++ goto err_pcie_setup;
++ }
+
+ break;
+ }
+@@ -660,10 +664,11 @@ static void j721e_pcie_remove(struct platform_device *pdev)
+ struct cdns_pcie_ep *ep;
+ struct cdns_pcie_rc *rc;
+
+- if (pcie->mode == PCI_MODE_RC) {
++ if (IS_ENABLED(CONFIG_PCI_J721E_HOST) &&
++ pcie->mode == PCI_MODE_RC) {
+ rc = container_of(cdns_pcie, struct cdns_pcie_rc, pcie);
+ cdns_pcie_host_disable(rc);
+- } else {
++ } else if (IS_ENABLED(CONFIG_PCI_J721E_EP)) {
+ ep = container_of(cdns_pcie, struct cdns_pcie_ep, pcie);
+ cdns_pcie_ep_disable(ep);
+ }
+@@ -729,10 +734,12 @@ static int j721e_pcie_resume_noirq(struct device *dev)
+ gpiod_set_value_cansleep(pcie->reset_gpio, 1);
+ }
+
+- ret = cdns_pcie_host_link_setup(rc);
+- if (ret < 0) {
+- clk_disable_unprepare(pcie->refclk);
+- return ret;
++ if (IS_ENABLED(CONFIG_PCI_J721E_HOST)) {
++ ret = cdns_pcie_host_link_setup(rc);
++ if (ret < 0) {
++ clk_disable_unprepare(pcie->refclk);
++ return ret;
++ }
+ }
+
+ /*
+@@ -742,10 +749,12 @@ static int j721e_pcie_resume_noirq(struct device *dev)
+ for (enum cdns_pcie_rp_bar bar = RP_BAR0; bar <= RP_NO_BAR; bar++)
+ rc->avail_ib_bar[bar] = true;
+
+- ret = cdns_pcie_host_init(rc);
+- if (ret) {
+- clk_disable_unprepare(pcie->refclk);
+- return ret;
++ if (IS_ENABLED(CONFIG_PCI_J721E_HOST)) {
++ ret = cdns_pcie_host_init(rc);
++ if (ret) {
++ clk_disable_unprepare(pcie->refclk);
++ return ret;
++ }
+ }
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 5761a44cd2337404b70824c5102276fccd2728c6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 28 Oct 2025 21:12:23 +0530
+Subject: PCI: j721e: Use devm_clk_get_optional_enabled() to get and enable the
+ clock
+
+From: Anand Moon <linux.amoon@gmail.com>
+
+[ Upstream commit 6fad11c61d0dbf87601ab9e2e37cba7a9a427f7b ]
+
+Use devm_clk_get_optional_enabled() helper instead of calling
+devm_clk_get_optional() and then clk_prepare_enable().
+
+Assign the result of devm_clk_get_optional_enabled() directly to
+pcie->refclk to avoid using a local 'clk' variable.
+
+Signed-off-by: Anand Moon <linux.amoon@gmail.com>
+Signed-off-by: Manivannan Sadhasivam <mani@kernel.org>
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Reviewed-by: Siddharth Vadapalli <s-vadapalli@ti.com>
+Link: https://patch.msgid.link/20251028154229.6774-2-linux.amoon@gmail.com
+Stable-dep-of: 4b361b1e92be ("PCI: j721e: Add config guards for Cadence Host and Endpoint library APIs")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pci/controller/cadence/pci-j721e.c | 20 +++++---------------
+ 1 file changed, 5 insertions(+), 15 deletions(-)
+
+diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c
+index 5bc5ab20aa6d9..a88b2e52fd782 100644
+--- a/drivers/pci/controller/cadence/pci-j721e.c
++++ b/drivers/pci/controller/cadence/pci-j721e.c
+@@ -479,7 +479,6 @@ static int j721e_pcie_probe(struct platform_device *pdev)
+ struct cdns_pcie_ep *ep = NULL;
+ struct gpio_desc *gpiod;
+ void __iomem *base;
+- struct clk *clk;
+ u32 num_lanes;
+ u32 mode;
+ int ret;
+@@ -603,19 +602,13 @@ static int j721e_pcie_probe(struct platform_device *pdev)
+ goto err_get_sync;
+ }
+
+- clk = devm_clk_get_optional(dev, "pcie_refclk");
+- if (IS_ERR(clk)) {
+- ret = dev_err_probe(dev, PTR_ERR(clk), "failed to get pcie_refclk\n");
++ pcie->refclk = devm_clk_get_optional_enabled(dev, "pcie_refclk");
++ if (IS_ERR(pcie->refclk)) {
++ ret = dev_err_probe(dev, PTR_ERR(pcie->refclk),
++ "failed to enable pcie_refclk\n");
+ goto err_pcie_setup;
+ }
+
+- ret = clk_prepare_enable(clk);
+- if (ret) {
+- dev_err_probe(dev, ret, "failed to enable pcie_refclk\n");
+- goto err_pcie_setup;
+- }
+- pcie->refclk = clk;
+-
+ /*
+ * Section 2.2 of the PCI Express Card Electromechanical
+ * Specification (Revision 5.1) mandates that the deassertion
+@@ -629,10 +622,8 @@ static int j721e_pcie_probe(struct platform_device *pdev)
+ }
+
+ ret = cdns_pcie_host_setup(rc);
+- if (ret < 0) {
+- clk_disable_unprepare(pcie->refclk);
++ if (ret < 0)
+ goto err_pcie_setup;
+- }
+
+ break;
+ case PCI_MODE_EP:
+@@ -679,7 +670,6 @@ static void j721e_pcie_remove(struct platform_device *pdev)
+
+ gpiod_set_value_cansleep(pcie->reset_gpio, 0);
+
+- clk_disable_unprepare(pcie->refclk);
+ cdns_pcie_disable_phy(cdns_pcie);
+ j721e_pcie_disable_link_irq(pcie);
+ pm_runtime_put(dev);
+--
+2.51.0
+
--- /dev/null
+From 7683dcf3566ed885dacd5ccc360f33abc8f483a6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Feb 2026 13:25:09 +0100
+Subject: PM: sleep: core: Avoid bit field races related to work_in_progress
+
+From: Xuewen Yan <xuewen.yan@unisoc.com>
+
+[ Upstream commit 0491f3f9f664e7e0131eb4d2a8b19c49562e5c64 ]
+
+In all of the system suspend transition phases, the async processing of
+a device may be carried out in parallel with power.work_in_progress
+updates for the device's parent or suppliers and if it touches bit
+fields from the same group (for example, power.must_resume or
+power.wakeup_path), bit field corruption is possible.
+
+To avoid that, turn work_in_progress in struct dev_pm_info into a proper
+bool field and relocate it to save space.
+
+Fixes: aa7a9275ab81 ("PM: sleep: Suspend async parents after suspending children")
+Fixes: 443046d1ad66 ("PM: sleep: Make suspend of devices more asynchronous")
+Signed-off-by: Xuewen Yan <xuewen.yan@unisoc.com>
+Closes: https://lore.kernel.org/linux-pm/20260203063459.12808-1-xuewen.yan@unisoc.com/
+Cc: All applicable <stable@vger.kernel.org>
+[ rjw: Added subject and changelog ]
+Link: https://patch.msgid.link/CAB8ipk_VX2VPm706Jwa1=8NSA7_btWL2ieXmBgHr2JcULEP76g@mail.gmail.com
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/pm.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/include/linux/pm.h b/include/linux/pm.h
+index cc7b2dc28574c..12782f775a17e 100644
+--- a/include/linux/pm.h
++++ b/include/linux/pm.h
+@@ -677,10 +677,10 @@ struct dev_pm_info {
+ struct list_head entry;
+ struct completion completion;
+ struct wakeup_source *wakeup;
++ bool work_in_progress; /* Owned by the PM core */
+ bool wakeup_path:1;
+ bool syscore:1;
+ bool no_pm_callbacks:1; /* Owned by the PM core */
+- bool work_in_progress:1; /* Owned by the PM core */
+ bool smart_suspend:1; /* Owned by the PM core */
+ bool must_resume:1; /* Owned by the PM core */
+ bool may_skip_resume:1; /* Set by subsystems */
+--
+2.51.0
+
--- /dev/null
+From c27ec4776eeb05b338489842fcdd25ab7264997c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 22 Dec 2025 07:42:09 +0100
+Subject: Revert "PCI: dw-rockchip: Enumerate endpoints based on dll_link_up
+ IRQ"
+
+From: Niklas Cassel <cassel@kernel.org>
+
+[ Upstream commit 180c3cfe36786d261a55da52a161f9e279b19a6f ]
+
+This reverts commit 0e0b45ab5d770a748487ba0ae8f77d1fb0f0de3e.
+
+While this fake hotplugging was a nice idea, it has shown that this feature
+does not handle PCIe switches correctly:
+pci_bus 0004:43: busn_res: can not insert [bus 43-41] under [bus 42-41] (conflicts with (null) [bus 42-41])
+pci_bus 0004:43: busn_res: [bus 43-41] end is updated to 43
+pci_bus 0004:43: busn_res: can not insert [bus 43] under [bus 42-41] (conflicts with (null) [bus 42-41])
+pci 0004:42:00.0: devices behind bridge are unusable because [bus 43] cannot be assigned for them
+pci_bus 0004:44: busn_res: can not insert [bus 44-41] under [bus 42-41] (conflicts with (null) [bus 42-41])
+pci_bus 0004:44: busn_res: [bus 44-41] end is updated to 44
+pci_bus 0004:44: busn_res: can not insert [bus 44] under [bus 42-41] (conflicts with (null) [bus 42-41])
+pci 0004:42:02.0: devices behind bridge are unusable because [bus 44] cannot be assigned for them
+pci_bus 0004:45: busn_res: can not insert [bus 45-41] under [bus 42-41] (conflicts with (null) [bus 42-41])
+pci_bus 0004:45: busn_res: [bus 45-41] end is updated to 45
+pci_bus 0004:45: busn_res: can not insert [bus 45] under [bus 42-41] (conflicts with (null) [bus 42-41])
+pci 0004:42:06.0: devices behind bridge are unusable because [bus 45] cannot be assigned for them
+pci_bus 0004:46: busn_res: can not insert [bus 46-41] under [bus 42-41] (conflicts with (null) [bus 42-41])
+pci_bus 0004:46: busn_res: [bus 46-41] end is updated to 46
+pci_bus 0004:46: busn_res: can not insert [bus 46] under [bus 42-41] (conflicts with (null) [bus 42-41])
+pci 0004:42:0e.0: devices behind bridge are unusable because [bus 46] cannot be assigned for them
+pci_bus 0004:42: busn_res: [bus 42-41] end is updated to 46
+pci_bus 0004:42: busn_res: can not insert [bus 42-46] under [bus 41] (conflicts with (null) [bus 41])
+pci 0004:41:00.0: devices behind bridge are unusable because [bus 42-46] cannot be assigned for them
+pcieport 0004:40:00.0: bridge has subordinate 41 but max busn 46
+
+During the initial scan, PCI core doesn't see the switch and since the Root
+Port is not hot plug capable, the secondary bus number gets assigned as the
+subordinate bus number. This means, the PCI core assumes that only one bus
+will appear behind the Root Port since the Root Port is not hot plug
+capable.
+
+This works perfectly fine for PCIe endpoints connected to the Root Port,
+since they don't extend the bus. However, if a PCIe switch is connected,
+then there is a problem when the downstream busses starts showing up and
+the PCI core doesn't extend the subordinate bus number and bridge resources
+after initial scan during boot.
+
+The long term plan is to migrate this driver to the upcoming pwrctrl APIs
+that are supposed to handle this problem elegantly.
+
+Suggested-by: Manivannan Sadhasivam <mani@kernel.org>
+Signed-off-by: Niklas Cassel <cassel@kernel.org>
+Signed-off-by: Manivannan Sadhasivam <mani@kernel.org>
+Tested-by: Shawn Lin <shawn.lin@rock-chips.com>
+Acked-by: Shawn Lin <shawn.lin@rock-chips.com>
+Cc: stable@vger.kernel.org
+Link: https://patch.msgid.link/20251222064207.3246632-10-cassel@kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pci/controller/dwc/pcie-dw-rockchip.c | 59 +------------------
+ 1 file changed, 3 insertions(+), 56 deletions(-)
+
+diff --git a/drivers/pci/controller/dwc/pcie-dw-rockchip.c b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
+index 0a5d1cfb88437..b5442ee2920e5 100644
+--- a/drivers/pci/controller/dwc/pcie-dw-rockchip.c
++++ b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
+@@ -517,34 +517,6 @@ static const struct dw_pcie_ops dw_pcie_ops = {
+ .get_ltssm = rockchip_pcie_get_ltssm,
+ };
+
+-static irqreturn_t rockchip_pcie_rc_sys_irq_thread(int irq, void *arg)
+-{
+- struct rockchip_pcie *rockchip = arg;
+- struct dw_pcie *pci = &rockchip->pci;
+- struct dw_pcie_rp *pp = &pci->pp;
+- struct device *dev = pci->dev;
+- u32 reg;
+-
+- reg = rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_INTR_STATUS_MISC);
+- rockchip_pcie_writel_apb(rockchip, reg, PCIE_CLIENT_INTR_STATUS_MISC);
+-
+- dev_dbg(dev, "PCIE_CLIENT_INTR_STATUS_MISC: %#x\n", reg);
+- dev_dbg(dev, "LTSSM_STATUS: %#x\n", rockchip_pcie_get_ltssm_reg(rockchip));
+-
+- if (reg & PCIE_RDLH_LINK_UP_CHGED) {
+- if (rockchip_pcie_link_up(pci)) {
+- msleep(PCIE_RESET_CONFIG_WAIT_MS);
+- dev_dbg(dev, "Received Link up event. Starting enumeration!\n");
+- /* Rescan the bus to enumerate endpoint devices */
+- pci_lock_rescan_remove();
+- pci_rescan_bus(pp->bridge->bus);
+- pci_unlock_rescan_remove();
+- }
+- }
+-
+- return IRQ_HANDLED;
+-}
+-
+ static irqreturn_t rockchip_pcie_ep_sys_irq_thread(int irq, void *arg)
+ {
+ struct rockchip_pcie *rockchip = arg;
+@@ -577,29 +549,14 @@ static irqreturn_t rockchip_pcie_ep_sys_irq_thread(int irq, void *arg)
+ return IRQ_HANDLED;
+ }
+
+-static int rockchip_pcie_configure_rc(struct platform_device *pdev,
+- struct rockchip_pcie *rockchip)
++static int rockchip_pcie_configure_rc(struct rockchip_pcie *rockchip)
+ {
+- struct device *dev = &pdev->dev;
+ struct dw_pcie_rp *pp;
+- int irq, ret;
+ u32 val;
+
+ if (!IS_ENABLED(CONFIG_PCIE_ROCKCHIP_DW_HOST))
+ return -ENODEV;
+
+- irq = platform_get_irq_byname(pdev, "sys");
+- if (irq < 0)
+- return irq;
+-
+- ret = devm_request_threaded_irq(dev, irq, NULL,
+- rockchip_pcie_rc_sys_irq_thread,
+- IRQF_ONESHOT, "pcie-sys-rc", rockchip);
+- if (ret) {
+- dev_err(dev, "failed to request PCIe sys IRQ\n");
+- return ret;
+- }
+-
+ /* LTSSM enable control mode */
+ val = FIELD_PREP_WM16(PCIE_LTSSM_ENABLE_ENHANCE, 1);
+ rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_HOT_RESET_CTRL);
+@@ -611,17 +568,7 @@ static int rockchip_pcie_configure_rc(struct platform_device *pdev,
+ pp = &rockchip->pci.pp;
+ pp->ops = &rockchip_pcie_host_ops;
+
+- ret = dw_pcie_host_init(pp);
+- if (ret) {
+- dev_err(dev, "failed to initialize host\n");
+- return ret;
+- }
+-
+- /* unmask DLL up/down indicator */
+- val = FIELD_PREP_WM16(PCIE_RDLH_LINK_UP_CHGED, 0);
+- rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_INTR_MASK_MISC);
+-
+- return ret;
++ return dw_pcie_host_init(pp);
+ }
+
+ static int rockchip_pcie_configure_ep(struct platform_device *pdev,
+@@ -747,7 +694,7 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
+
+ switch (data->mode) {
+ case DW_PCIE_RC_TYPE:
+- ret = rockchip_pcie_configure_rc(pdev, rockchip);
++ ret = rockchip_pcie_configure_rc(rockchip);
+ if (ret)
+ goto deinit_clk;
+ break;
+--
+2.51.0
+
bpf-add-bitwise-tracking-for-bpf_end.patch
bpf-introduce-tnum_step-to-step-through-tnum-s-membe.patch
bpf-improve-bounds-when-tnum-has-a-single-possible-v.patch
+x86-acpi-boot-correct-acpi_is_processor_usable-check.patch
+memory-mtk-smi-fix-device-leaks-on-common-probe.patch
+memory-mtk-smi-fix-device-leak-on-larb-probe.patch
+pci-j721e-use-devm_clk_get_optional_enabled-to-get-a.patch
+pci-j721e-add-config-guards-for-cadence-host-and-end.patch
+pci-dwc-advertise-l1-pm-substates-only-if-driver-req.patch
+pci-dw-rockchip-configure-l1ss-support.patch
+pci-dwc-add-l1-substates-context-to-ltssm_status-of-.patch
+pci-dw-rockchip-change-get_ltssm-to-provide-l1-subst.patch
+revert-pci-dw-rockchip-enumerate-endpoints-based-on-.patch
+net-qrtr-drop-the-mhi-auto_queue-feature-for-ipcr-dl.patch
+media-v4l2-mem2mem-add-a-kref-to-the-v4l2_m2m_dev-st.patch
+media-verisilicon-avoid-g2-bus-error-while-decoding-.patch
+usb-gadget-u_ether-add-gether_opts-for-config-cachin.patch
+usb-gadget-u_ether-add-auto-cleanup-helper-for-freei.patch
+usb-gadget-f_ncm-align-net_device-lifecycle-with-bin.patch
+accel-rocket-fix-unwinding-in-error-path-in-rocket_c.patch
+accel-rocket-fix-unwinding-in-error-path-in-rocket_p.patch
+media-tegra-video-fix-memory-leak-in-__tegra_channel.patch
+kvm-x86-ignore-ebusy-when-checking-nested-events-fro.patch
+drm-tegra-dsi-fix-device-leak-on-probe.patch
+unwind-simplify-unwind_user_next_fp-alignment-check.patch
+unwind-implement-compat-fp-unwind.patch
+unwind_user-x86-enable-frame-pointer-unwinding-on-x8.patch
+unwind_user-x86-teach-fp-unwind-about-start-of-funct.patch
+x86-uprobes-fix-xol-allocation-failure-for-32-bit-ta.patch
+ext4-correct-the-comments-place-for-ext4_ext_may_zer.patch
+ext4-don-t-set-ext4_get_blocks_convert-when-splittin.patch
+media-iris-remove-v4l2_m2m_ioctl_-de-en-coder_cmd-ap.patch
+media-iris-add-missing-platform-data-entries-for-sm8.patch
+input-synaptics_i2c-replace-use-of-system_wq-with-sy.patch
+input-synaptics_i2c-guard-polling-restart-in-resume.patch
+iommu-vt-d-skip-dev-iotlb-flush-for-inaccessible-pci.patch
+arm64-dts-rockchip-fix-rk356x-pcie-range-mappings.patch
+arm64-dts-rockchip-fix-rk3588-pcie-range-mappings.patch
+clk-tegra-tegra124-emc-fix-device-leak-on-set_rate.patch
+acpi-apei-ghes-add-helper-for-cper-cxl-protocol-erro.patch
+acpi-apei-ghes-disable-kasan-instrumentation-when-co.patch
+arm-dts-imx53-usbarmory-replace-license-text-comment.patch
+pci-add-preceding-capability-position-support-in-pci.patch
+pci-dwc-add-new-apis-to-remove-standard-and-extended.patch
+pci-dwc-remove-duplicate-dw_pcie_ep_hide_ext_capabil.patch
+pci-dwc-ep-fix-resizable-bar-support-for-multi-pf-co.patch
+kvm-x86-add-x2apic-features-to-control-eoi-broadcast.patch
+btrfs-define-the-auto_kfree-auto_kvfree-helper-macro.patch
+btrfs-zoned-fixup-last-alloc-pointer-after-extent-re.patch
+pm-sleep-core-avoid-bit-field-races-related-to-work_.patch
+drm-amd-fix-hang-on-amdgpu-unload-by-using-pci_dev_i.patch
+hwmon-max16065-use-read-write_once-to-avoid-compiler.patch
+slub-remove-config_slub_tiny-specific-code-paths.patch
+mm-slab-use-prandom-if-allow_spin.patch
+loongarch-remove-unnecessary-checks-for-orc-unwinder.patch
+loongarch-handle-percpu-handler-address-for-orc-unwi.patch
+loongarch-remove-some-extern-variables-in-source-fil.patch
+drm-i915-dp-fail-state-computation-for-invalid-dsc-s.patch
+drm-i915-dp-fix-pipe-bpp-clamping-due-to-hdr.patch
+net-arcnet-com20020-pci-fix-support-for-2.5mbit-card.patch
--- /dev/null
+From 577f6e38655b16a590be71d5d9150dec476cece3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 5 Nov 2025 10:05:32 +0100
+Subject: slub: remove CONFIG_SLUB_TINY specific code paths
+
+From: Vlastimil Babka <vbabka@suse.cz>
+
+[ Upstream commit 31e0886fd57d426d18a239dd55e176032c9c1cb0 ]
+
+CONFIG_SLUB_TINY minimizes the SLUB's memory overhead in multiple ways,
+mainly by avoiding percpu caching of slabs and objects. It also reduces
+code size by replacing some code paths with simplified ones through
+ifdefs, but the benefits of that are smaller and would complicate the
+upcoming changes.
+
+Thus remove these code paths and associated ifdefs and simplify the code
+base.
+
+Link: https://patch.msgid.link/20251105-sheaves-cleanups-v1-4-b8218e1ac7ef@suse.cz
+Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
+Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
+Stable-dep-of: a1e244a9f177 ("mm/slab: use prandom if !allow_spin")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/slab.h | 2 -
+ mm/slub.c | 107 ++----------------------------------------------------
+ 2 files changed, 4 insertions(+), 105 deletions(-)
+
+diff --git a/mm/slab.h b/mm/slab.h
+index bf9d8940b8f21..36893299fa67c 100644
+--- a/mm/slab.h
++++ b/mm/slab.h
+@@ -236,10 +236,8 @@ struct kmem_cache_order_objects {
+ * Slab cache management.
+ */
+ struct kmem_cache {
+-#ifndef CONFIG_SLUB_TINY
+ struct kmem_cache_cpu __percpu *cpu_slab;
+ struct lock_class_key lock_key;
+-#endif
+ struct slub_percpu_sheaves __percpu *cpu_sheaves;
+ /* Used for retrieving partial slabs, etc. */
+ slab_flags_t flags;
+diff --git a/mm/slub.c b/mm/slub.c
+index 4e2a3f7656099..4db84fbc71bab 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -410,7 +410,6 @@ enum stat_item {
+ NR_SLUB_STAT_ITEMS
+ };
+
+-#ifndef CONFIG_SLUB_TINY
+ /*
+ * When changing the layout, make sure freelist and tid are still compatible
+ * with this_cpu_cmpxchg_double() alignment requirements.
+@@ -432,7 +431,6 @@ struct kmem_cache_cpu {
+ unsigned int stat[NR_SLUB_STAT_ITEMS];
+ #endif
+ };
+-#endif /* CONFIG_SLUB_TINY */
+
+ static inline void stat(const struct kmem_cache *s, enum stat_item si)
+ {
+@@ -594,12 +592,10 @@ static inline void *get_freepointer(struct kmem_cache *s, void *object)
+ return freelist_ptr_decode(s, p, ptr_addr);
+ }
+
+-#ifndef CONFIG_SLUB_TINY
+ static void prefetch_freepointer(const struct kmem_cache *s, void *object)
+ {
+ prefetchw(object + s->offset);
+ }
+-#endif
+
+ /*
+ * When running under KMSAN, get_freepointer_safe() may return an uninitialized
+@@ -711,10 +707,12 @@ static inline unsigned int slub_get_cpu_partial(struct kmem_cache *s)
+ return s->cpu_partial_slabs;
+ }
+ #else
++#ifdef SLAB_SUPPORTS_SYSFS
+ static inline void
+ slub_set_cpu_partial(struct kmem_cache *s, unsigned int nr_objects)
+ {
+ }
++#endif
+
+ static inline unsigned int slub_get_cpu_partial(struct kmem_cache *s)
+ {
+@@ -2023,13 +2021,11 @@ static inline void inc_slabs_node(struct kmem_cache *s, int node,
+ int objects) {}
+ static inline void dec_slabs_node(struct kmem_cache *s, int node,
+ int objects) {}
+-#ifndef CONFIG_SLUB_TINY
+ static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab,
+ void **freelist, void *nextfree)
+ {
+ return false;
+ }
+-#endif
+ #endif /* CONFIG_SLUB_DEBUG */
+
+ #ifdef CONFIG_SLAB_OBJ_EXT
+@@ -3673,8 +3669,6 @@ static struct slab *get_partial(struct kmem_cache *s, int node,
+ return get_any_partial(s, pc);
+ }
+
+-#ifndef CONFIG_SLUB_TINY
+-
+ #ifdef CONFIG_PREEMPTION
+ /*
+ * Calculate the next globally unique transaction for disambiguation
+@@ -4074,12 +4068,6 @@ static bool has_cpu_slab(int cpu, struct kmem_cache *s)
+ return c->slab || slub_percpu_partial(c);
+ }
+
+-#else /* CONFIG_SLUB_TINY */
+-static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) { }
+-static inline bool has_cpu_slab(int cpu, struct kmem_cache *s) { return false; }
+-static inline void flush_this_cpu_slab(struct kmem_cache *s) { }
+-#endif /* CONFIG_SLUB_TINY */
+-
+ static bool has_pcs_used(int cpu, struct kmem_cache *s)
+ {
+ struct slub_percpu_sheaves *pcs;
+@@ -4425,7 +4413,6 @@ static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags)
+ return true;
+ }
+
+-#ifndef CONFIG_SLUB_TINY
+ static inline bool
+ __update_cpu_freelist_fast(struct kmem_cache *s,
+ void *freelist_old, void *freelist_new,
+@@ -4689,7 +4676,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
+ pc.orig_size = orig_size;
+ slab = get_partial(s, node, &pc);
+ if (slab) {
+- if (kmem_cache_debug(s)) {
++ if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) {
+ freelist = pc.object;
+ /*
+ * For debug caches here we had to go through
+@@ -4727,7 +4714,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
+
+ stat(s, ALLOC_SLAB);
+
+- if (kmem_cache_debug(s)) {
++ if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) {
+ freelist = alloc_single_from_new_slab(s, slab, orig_size, gfpflags);
+
+ if (unlikely(!freelist)) {
+@@ -4939,32 +4926,6 @@ static __always_inline void *__slab_alloc_node(struct kmem_cache *s,
+
+ return object;
+ }
+-#else /* CONFIG_SLUB_TINY */
+-static void *__slab_alloc_node(struct kmem_cache *s,
+- gfp_t gfpflags, int node, unsigned long addr, size_t orig_size)
+-{
+- struct partial_context pc;
+- struct slab *slab;
+- void *object;
+-
+- pc.flags = gfpflags;
+- pc.orig_size = orig_size;
+- slab = get_partial(s, node, &pc);
+-
+- if (slab)
+- return pc.object;
+-
+- slab = new_slab(s, gfpflags, node);
+- if (unlikely(!slab)) {
+- slab_out_of_memory(s, gfpflags, node);
+- return NULL;
+- }
+-
+- object = alloc_single_from_new_slab(s, slab, orig_size, gfpflags);
+-
+- return object;
+-}
+-#endif /* CONFIG_SLUB_TINY */
+
+ /*
+ * If the object has been wiped upon free, make sure it's fully initialized by
+@@ -5787,9 +5748,7 @@ void *kmalloc_nolock_noprof(size_t size, gfp_t gfp_flags, int node)
+ * it did local_lock_irqsave(&s->cpu_slab->lock, flags).
+ * In this case fast path with __update_cpu_freelist_fast() is not safe.
+ */
+-#ifndef CONFIG_SLUB_TINY
+ if (!in_nmi() || !local_lock_is_locked(&s->cpu_slab->lock))
+-#endif
+ ret = __slab_alloc_node(s, alloc_gfp, node, _RET_IP_, size);
+
+ if (PTR_ERR(ret) == -EBUSY) {
+@@ -6571,14 +6530,10 @@ static void free_deferred_objects(struct irq_work *work)
+ llist_for_each_safe(pos, t, llnode) {
+ struct slab *slab = container_of(pos, struct slab, llnode);
+
+-#ifdef CONFIG_SLUB_TINY
+- free_slab(slab->slab_cache, slab);
+-#else
+ if (slab->frozen)
+ deactivate_slab(slab->slab_cache, slab, slab->flush_freelist);
+ else
+ free_slab(slab->slab_cache, slab);
+-#endif
+ }
+ }
+
+@@ -6616,7 +6571,6 @@ void defer_free_barrier(void)
+ irq_work_sync(&per_cpu_ptr(&defer_free_objects, cpu)->work);
+ }
+
+-#ifndef CONFIG_SLUB_TINY
+ /*
+ * Fastpath with forced inlining to produce a kfree and kmem_cache_free that
+ * can perform fastpath freeing without additional function calls.
+@@ -6709,14 +6663,6 @@ static __always_inline void do_slab_free(struct kmem_cache *s,
+ }
+ stat_add(s, FREE_FASTPATH, cnt);
+ }
+-#else /* CONFIG_SLUB_TINY */
+-static void do_slab_free(struct kmem_cache *s,
+- struct slab *slab, void *head, void *tail,
+- int cnt, unsigned long addr)
+-{
+- __slab_free(s, slab, head, tail, cnt, addr);
+-}
+-#endif /* CONFIG_SLUB_TINY */
+
+ static __fastpath_inline
+ void slab_free(struct kmem_cache *s, struct slab *slab, void *object,
+@@ -6997,11 +6943,7 @@ void kfree_nolock(const void *object)
+ * since kasan quarantine takes locks and not supported from NMI.
+ */
+ kasan_slab_free(s, x, false, false, /* skip quarantine */true);
+-#ifndef CONFIG_SLUB_TINY
+ do_slab_free(s, slab, x, x, 0, _RET_IP_);
+-#else
+- defer_free(s, x);
+-#endif
+ }
+ EXPORT_SYMBOL_GPL(kfree_nolock);
+
+@@ -7451,7 +7393,6 @@ void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
+ }
+ EXPORT_SYMBOL(kmem_cache_free_bulk);
+
+-#ifndef CONFIG_SLUB_TINY
+ static inline
+ int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
+ void **p)
+@@ -7522,35 +7463,6 @@ int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
+ return 0;
+
+ }
+-#else /* CONFIG_SLUB_TINY */
+-static int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags,
+- size_t size, void **p)
+-{
+- int i;
+-
+- for (i = 0; i < size; i++) {
+- void *object = kfence_alloc(s, s->object_size, flags);
+-
+- if (unlikely(object)) {
+- p[i] = object;
+- continue;
+- }
+-
+- p[i] = __slab_alloc_node(s, flags, NUMA_NO_NODE,
+- _RET_IP_, s->object_size);
+- if (unlikely(!p[i]))
+- goto error;
+-
+- maybe_wipe_obj_freeptr(s, p[i]);
+- }
+-
+- return i;
+-
+-error:
+- __kmem_cache_free_bulk(s, i, p);
+- return 0;
+-}
+-#endif /* CONFIG_SLUB_TINY */
+
+ /* Note that interrupts must be enabled when calling this function. */
+ int kmem_cache_alloc_bulk_noprof(struct kmem_cache *s, gfp_t flags, size_t size,
+@@ -7741,7 +7653,6 @@ init_kmem_cache_node(struct kmem_cache_node *n, struct node_barn *barn)
+ barn_init(barn);
+ }
+
+-#ifndef CONFIG_SLUB_TINY
+ static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
+ {
+ BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
+@@ -7762,12 +7673,6 @@ static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
+
+ return 1;
+ }
+-#else
+-static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
+-{
+- return 1;
+-}
+-#endif /* CONFIG_SLUB_TINY */
+
+ static int init_percpu_sheaves(struct kmem_cache *s)
+ {
+@@ -7857,13 +7762,11 @@ void __kmem_cache_release(struct kmem_cache *s)
+ cache_random_seq_destroy(s);
+ if (s->cpu_sheaves)
+ pcs_destroy(s);
+-#ifndef CONFIG_SLUB_TINY
+ #ifdef CONFIG_PREEMPT_RT
+ if (s->cpu_slab)
+ lockdep_unregister_key(&s->lock_key);
+ #endif
+ free_percpu(s->cpu_slab);
+-#endif
+ free_kmem_cache_nodes(s);
+ }
+
+@@ -8606,10 +8509,8 @@ void __init kmem_cache_init(void)
+
+ void __init kmem_cache_init_late(void)
+ {
+-#ifndef CONFIG_SLUB_TINY
+ flushwq = alloc_workqueue("slub_flushwq", WQ_MEM_RECLAIM, 0);
+ WARN_ON(!flushwq);
+-#endif
+ }
+
+ struct kmem_cache *
+--
+2.51.0
+
--- /dev/null
+From 71e3980bf52174fea151aebec25f8a02a6aa1e05 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 23 Sep 2025 13:27:34 +0200
+Subject: unwind: Implement compat fp unwind
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+[ Upstream commit c79dd946e370af3537edb854f210cba3a94b4516 ]
+
+It is important to be able to unwind compat tasks too.
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://patch.msgid.link/20250924080119.613695709@infradead.org
+Stable-dep-of: d55c571e4333 ("x86/uprobes: Fix XOL allocation failure for 32-bit tasks")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/unwind_user_types.h | 1 +
+ kernel/unwind/user.c | 40 ++++++++++++++++++++++---------
+ 2 files changed, 30 insertions(+), 11 deletions(-)
+
+diff --git a/include/linux/unwind_user_types.h b/include/linux/unwind_user_types.h
+index a449f15be8902..938f7e6233327 100644
+--- a/include/linux/unwind_user_types.h
++++ b/include/linux/unwind_user_types.h
+@@ -36,6 +36,7 @@ struct unwind_user_state {
+ unsigned long ip;
+ unsigned long sp;
+ unsigned long fp;
++ unsigned int ws;
+ enum unwind_user_type current_type;
+ unsigned int available_types;
+ bool done;
+diff --git a/kernel/unwind/user.c b/kernel/unwind/user.c
+index 9dcde797b5d93..642871527a132 100644
+--- a/kernel/unwind/user.c
++++ b/kernel/unwind/user.c
+@@ -8,19 +8,32 @@
+ #include <linux/unwind_user.h>
+ #include <linux/uaccess.h>
+
+-static const struct unwind_user_frame fp_frame = {
+- ARCH_INIT_USER_FP_FRAME
+-};
+-
+ #define for_each_user_frame(state) \
+ for (unwind_user_start(state); !(state)->done; unwind_user_next(state))
+
++static inline int
++get_user_word(unsigned long *word, unsigned long base, int off, unsigned int ws)
++{
++ unsigned long __user *addr = (void __user *)base + off;
++#ifdef CONFIG_COMPAT
++ if (ws == sizeof(int)) {
++ unsigned int data;
++ int ret = get_user(data, (unsigned int __user *)addr);
++ *word = data;
++ return ret;
++ }
++#endif
++ return get_user(*word, addr);
++}
++
+ static int unwind_user_next_fp(struct unwind_user_state *state)
+ {
+- const struct unwind_user_frame *frame = &fp_frame;
++ const struct unwind_user_frame frame = {
++ ARCH_INIT_USER_FP_FRAME(state->ws)
++ };
+ unsigned long cfa, fp, ra;
+
+- if (frame->use_fp) {
++ if (frame.use_fp) {
+ if (state->fp < state->sp)
+ return -EINVAL;
+ cfa = state->fp;
+@@ -29,26 +42,26 @@ static int unwind_user_next_fp(struct unwind_user_state *state)
+ }
+
+ /* Get the Canonical Frame Address (CFA) */
+- cfa += frame->cfa_off;
++ cfa += frame.cfa_off;
+
+ /* stack going in wrong direction? */
+ if (cfa <= state->sp)
+ return -EINVAL;
+
+ /* Make sure that the address is word aligned */
+- if (cfa & (sizeof(long) - 1))
++ if (cfa & (state->ws - 1))
+ return -EINVAL;
+
+ /* Find the Return Address (RA) */
+- if (get_user(ra, (unsigned long *)(cfa + frame->ra_off)))
++ if (get_user_word(&ra, cfa, frame.ra_off, state->ws))
+ return -EINVAL;
+
+- if (frame->fp_off && get_user(fp, (unsigned long __user *)(cfa + frame->fp_off)))
++ if (frame.fp_off && get_user_word(&fp, cfa, frame.fp_off, state->ws))
+ return -EINVAL;
+
+ state->ip = ra;
+ state->sp = cfa;
+- if (frame->fp_off)
++ if (frame.fp_off)
+ state->fp = fp;
+ return 0;
+ }
+@@ -100,6 +113,11 @@ static int unwind_user_start(struct unwind_user_state *state)
+ state->ip = instruction_pointer(regs);
+ state->sp = user_stack_pointer(regs);
+ state->fp = frame_pointer(regs);
++ state->ws = unwind_user_word_size(regs);
++ if (!state->ws) {
++ state->done = true;
++ return -EINVAL;
++ }
+
+ return 0;
+ }
+--
+2.51.0
+
--- /dev/null
+From 1a1e76ca0d75b06dcea0d053483f7d815e688015 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 23 Sep 2025 13:04:09 +0200
+Subject: unwind: Simplify unwind_user_next_fp() alignment check
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+[ Upstream commit 5578534e4b92350995a20068f2e6ea3186c62d7f ]
+
+ 2^log_2(n) == n
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Link: https://patch.msgid.link/20250924080119.497867836@infradead.org
+Stable-dep-of: d55c571e4333 ("x86/uprobes: Fix XOL allocation failure for 32-bit tasks")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/unwind/user.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+diff --git a/kernel/unwind/user.c b/kernel/unwind/user.c
+index 97a8415e3216f..9dcde797b5d93 100644
+--- a/kernel/unwind/user.c
++++ b/kernel/unwind/user.c
+@@ -19,7 +19,6 @@ static int unwind_user_next_fp(struct unwind_user_state *state)
+ {
+ const struct unwind_user_frame *frame = &fp_frame;
+ unsigned long cfa, fp, ra;
+- unsigned int shift;
+
+ if (frame->use_fp) {
+ if (state->fp < state->sp)
+@@ -37,8 +36,7 @@ static int unwind_user_next_fp(struct unwind_user_state *state)
+ return -EINVAL;
+
+ /* Make sure that the address is word aligned */
+- shift = sizeof(long) == 4 ? 2 : 3;
+- if (cfa & ((1 << shift) - 1))
++ if (cfa & (sizeof(long) - 1))
+ return -EINVAL;
+
+ /* Find the Return Address (RA) */
+--
+2.51.0
+
--- /dev/null
+From 9e99d78cebcd5bd8f7e8df31aa07fc22ce941a97 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 27 Aug 2025 15:36:45 -0400
+Subject: unwind_user/x86: Enable frame pointer unwinding on x86
+
+From: Josh Poimboeuf <jpoimboe@kernel.org>
+
+[ Upstream commit 49cf34c0815f93fb2ea3ab5cfbac1124bd9b45d0 ]
+
+Use ARCH_INIT_USER_FP_FRAME to describe how frame pointers are unwound
+on x86, and enable CONFIG_HAVE_UNWIND_USER_FP accordingly so the
+unwind_user interfaces can be used.
+
+Signed-off-by: Josh Poimboeuf <jpoimboe@kernel.org>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://patch.msgid.link/20250827193828.347397433@kernel.org
+Stable-dep-of: d55c571e4333 ("x86/uprobes: Fix XOL allocation failure for 32-bit tasks")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/Kconfig | 1 +
+ arch/x86/include/asm/unwind_user.h | 25 +++++++++++++++++++++++++
+ 2 files changed, 26 insertions(+)
+ create mode 100644 arch/x86/include/asm/unwind_user.h
+
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index a3700766a8c08..ee41af778a9dc 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -298,6 +298,7 @@ config X86
+ select HAVE_SYSCALL_TRACEPOINTS
+ select HAVE_UACCESS_VALIDATION if HAVE_OBJTOOL
+ select HAVE_UNSTABLE_SCHED_CLOCK
++ select HAVE_UNWIND_USER_FP if X86_64
+ select HAVE_USER_RETURN_NOTIFIER
+ select HAVE_GENERIC_VDSO
+ select VDSO_GETRANDOM if X86_64
+diff --git a/arch/x86/include/asm/unwind_user.h b/arch/x86/include/asm/unwind_user.h
+new file mode 100644
+index 0000000000000..b166e102d4445
+--- /dev/null
++++ b/arch/x86/include/asm/unwind_user.h
+@@ -0,0 +1,25 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef _ASM_X86_UNWIND_USER_H
++#define _ASM_X86_UNWIND_USER_H
++
++#include <asm/ptrace.h>
++
++#define ARCH_INIT_USER_FP_FRAME(ws) \
++ .cfa_off = 2*(ws), \
++ .ra_off = -1*(ws), \
++ .fp_off = -2*(ws), \
++ .use_fp = true,
++
++static inline int unwind_user_word_size(struct pt_regs *regs)
++{
++ /* We can't unwind VM86 stacks */
++ if (regs->flags & X86_VM_MASK)
++ return 0;
++#ifdef CONFIG_X86_64
++ if (!user_64bit_mode(regs))
++ return sizeof(int);
++#endif
++ return sizeof(long);
++}
++
++#endif /* _ASM_X86_UNWIND_USER_H */
+--
+2.51.0
+
--- /dev/null
+From 8f061a579749ecfad0499d95fe656b0c81489f38 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 24 Oct 2025 12:31:10 +0200
+Subject: unwind_user/x86: Teach FP unwind about start of function
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+[ Upstream commit ae25884ad749e7f6e0c3565513bdc8aa2554a425 ]
+
+When userspace is interrupted at the start of a function, before we
+get a chance to complete the frame, unwind will miss one caller.
+
+X86 has a uprobe specific fixup for this, add bits to the generic
+unwinder to support this.
+
+Suggested-by: Jens Remus <jremus@linux.ibm.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://patch.msgid.link/20251024145156.GM4068168@noisy.programming.kicks-ass.net
+Stable-dep-of: d55c571e4333 ("x86/uprobes: Fix XOL allocation failure for 32-bit tasks")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/events/core.c | 40 ------------------------------
+ arch/x86/include/asm/unwind_user.h | 12 +++++++++
+ arch/x86/include/asm/uprobes.h | 9 +++++++
+ arch/x86/kernel/uprobes.c | 32 ++++++++++++++++++++++++
+ include/linux/unwind_user_types.h | 1 +
+ kernel/unwind/user.c | 39 ++++++++++++++++++++++-------
+ 6 files changed, 84 insertions(+), 49 deletions(-)
+
+diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
+index 56df4855f38e9..64e2bf2d4a615 100644
+--- a/arch/x86/events/core.c
++++ b/arch/x86/events/core.c
+@@ -2846,46 +2846,6 @@ static unsigned long get_segment_base(unsigned int segment)
+ return get_desc_base(desc);
+ }
+
+-#ifdef CONFIG_UPROBES
+-/*
+- * Heuristic-based check if uprobe is installed at the function entry.
+- *
+- * Under assumption of user code being compiled with frame pointers,
+- * `push %rbp/%ebp` is a good indicator that we indeed are.
+- *
+- * Similarly, `endbr64` (assuming 64-bit mode) is also a common pattern.
+- * If we get this wrong, captured stack trace might have one extra bogus
+- * entry, but the rest of stack trace will still be meaningful.
+- */
+-static bool is_uprobe_at_func_entry(struct pt_regs *regs)
+-{
+- struct arch_uprobe *auprobe;
+-
+- if (!current->utask)
+- return false;
+-
+- auprobe = current->utask->auprobe;
+- if (!auprobe)
+- return false;
+-
+- /* push %rbp/%ebp */
+- if (auprobe->insn[0] == 0x55)
+- return true;
+-
+- /* endbr64 (64-bit only) */
+- if (user_64bit_mode(regs) && is_endbr((u32 *)auprobe->insn))
+- return true;
+-
+- return false;
+-}
+-
+-#else
+-static bool is_uprobe_at_func_entry(struct pt_regs *regs)
+-{
+- return false;
+-}
+-#endif /* CONFIG_UPROBES */
+-
+ #ifdef CONFIG_IA32_EMULATION
+
+ #include <linux/compat.h>
+diff --git a/arch/x86/include/asm/unwind_user.h b/arch/x86/include/asm/unwind_user.h
+index b166e102d4445..c4f1ff8874d67 100644
+--- a/arch/x86/include/asm/unwind_user.h
++++ b/arch/x86/include/asm/unwind_user.h
+@@ -3,6 +3,7 @@
+ #define _ASM_X86_UNWIND_USER_H
+
+ #include <asm/ptrace.h>
++#include <asm/uprobes.h>
+
+ #define ARCH_INIT_USER_FP_FRAME(ws) \
+ .cfa_off = 2*(ws), \
+@@ -10,6 +11,12 @@
+ .fp_off = -2*(ws), \
+ .use_fp = true,
+
++#define ARCH_INIT_USER_FP_ENTRY_FRAME(ws) \
++ .cfa_off = 1*(ws), \
++ .ra_off = -1*(ws), \
++ .fp_off = 0, \
++ .use_fp = false,
++
+ static inline int unwind_user_word_size(struct pt_regs *regs)
+ {
+ /* We can't unwind VM86 stacks */
+@@ -22,4 +29,9 @@ static inline int unwind_user_word_size(struct pt_regs *regs)
+ return sizeof(long);
+ }
+
++static inline bool unwind_user_at_function_start(struct pt_regs *regs)
++{
++ return is_uprobe_at_func_entry(regs);
++}
++
+ #endif /* _ASM_X86_UNWIND_USER_H */
+diff --git a/arch/x86/include/asm/uprobes.h b/arch/x86/include/asm/uprobes.h
+index 1ee2e5115955c..362210c799987 100644
+--- a/arch/x86/include/asm/uprobes.h
++++ b/arch/x86/include/asm/uprobes.h
+@@ -62,4 +62,13 @@ struct arch_uprobe_task {
+ unsigned int saved_tf;
+ };
+
++#ifdef CONFIG_UPROBES
++extern bool is_uprobe_at_func_entry(struct pt_regs *regs);
++#else
++static bool is_uprobe_at_func_entry(struct pt_regs *regs)
++{
++ return false;
++}
++#endif /* CONFIG_UPROBES */
++
+ #endif /* _ASM_UPROBES_H */
+diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
+index 845aeaf36b8d2..7ef0535fcd547 100644
+--- a/arch/x86/kernel/uprobes.c
++++ b/arch/x86/kernel/uprobes.c
+@@ -1819,3 +1819,35 @@ bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
+ else
+ return regs->sp <= ret->stack;
+ }
++
++/*
++ * Heuristic-based check if uprobe is installed at the function entry.
++ *
++ * Under assumption of user code being compiled with frame pointers,
++ * `push %rbp/%ebp` is a good indicator that we indeed are.
++ *
++ * Similarly, `endbr64` (assuming 64-bit mode) is also a common pattern.
++ * If we get this wrong, captured stack trace might have one extra bogus
++ * entry, but the rest of stack trace will still be meaningful.
++ */
++bool is_uprobe_at_func_entry(struct pt_regs *regs)
++{
++ struct arch_uprobe *auprobe;
++
++ if (!current->utask)
++ return false;
++
++ auprobe = current->utask->auprobe;
++ if (!auprobe)
++ return false;
++
++ /* push %rbp/%ebp */
++ if (auprobe->insn[0] == 0x55)
++ return true;
++
++ /* endbr64 (64-bit only) */
++ if (user_64bit_mode(regs) && is_endbr((u32 *)auprobe->insn))
++ return true;
++
++ return false;
++}
+diff --git a/include/linux/unwind_user_types.h b/include/linux/unwind_user_types.h
+index 938f7e6233327..412729a269bcc 100644
+--- a/include/linux/unwind_user_types.h
++++ b/include/linux/unwind_user_types.h
+@@ -39,6 +39,7 @@ struct unwind_user_state {
+ unsigned int ws;
+ enum unwind_user_type current_type;
+ unsigned int available_types;
++ bool topmost;
+ bool done;
+ };
+
+diff --git a/kernel/unwind/user.c b/kernel/unwind/user.c
+index 642871527a132..39e2707894447 100644
+--- a/kernel/unwind/user.c
++++ b/kernel/unwind/user.c
+@@ -26,14 +26,12 @@ get_user_word(unsigned long *word, unsigned long base, int off, unsigned int ws)
+ return get_user(*word, addr);
+ }
+
+-static int unwind_user_next_fp(struct unwind_user_state *state)
++static int unwind_user_next_common(struct unwind_user_state *state,
++ const struct unwind_user_frame *frame)
+ {
+- const struct unwind_user_frame frame = {
+- ARCH_INIT_USER_FP_FRAME(state->ws)
+- };
+ unsigned long cfa, fp, ra;
+
+- if (frame.use_fp) {
++ if (frame->use_fp) {
+ if (state->fp < state->sp)
+ return -EINVAL;
+ cfa = state->fp;
+@@ -42,7 +40,7 @@ static int unwind_user_next_fp(struct unwind_user_state *state)
+ }
+
+ /* Get the Canonical Frame Address (CFA) */
+- cfa += frame.cfa_off;
++ cfa += frame->cfa_off;
+
+ /* stack going in wrong direction? */
+ if (cfa <= state->sp)
+@@ -53,19 +51,41 @@ static int unwind_user_next_fp(struct unwind_user_state *state)
+ return -EINVAL;
+
+ /* Find the Return Address (RA) */
+- if (get_user_word(&ra, cfa, frame.ra_off, state->ws))
++ if (get_user_word(&ra, cfa, frame->ra_off, state->ws))
+ return -EINVAL;
+
+- if (frame.fp_off && get_user_word(&fp, cfa, frame.fp_off, state->ws))
++ if (frame->fp_off && get_user_word(&fp, cfa, frame->fp_off, state->ws))
+ return -EINVAL;
+
+ state->ip = ra;
+ state->sp = cfa;
+- if (frame.fp_off)
++ if (frame->fp_off)
+ state->fp = fp;
++ state->topmost = false;
+ return 0;
+ }
+
++static int unwind_user_next_fp(struct unwind_user_state *state)
++{
++#ifdef CONFIG_HAVE_UNWIND_USER_FP
++ struct pt_regs *regs = task_pt_regs(current);
++
++ if (state->topmost && unwind_user_at_function_start(regs)) {
++ const struct unwind_user_frame fp_entry_frame = {
++ ARCH_INIT_USER_FP_ENTRY_FRAME(state->ws)
++ };
++ return unwind_user_next_common(state, &fp_entry_frame);
++ }
++
++ const struct unwind_user_frame fp_frame = {
++ ARCH_INIT_USER_FP_FRAME(state->ws)
++ };
++ return unwind_user_next_common(state, &fp_frame);
++#else
++ return -EINVAL;
++#endif
++}
++
+ static int unwind_user_next(struct unwind_user_state *state)
+ {
+ unsigned long iter_mask = state->available_types;
+@@ -118,6 +138,7 @@ static int unwind_user_start(struct unwind_user_state *state)
+ state->done = true;
+ return -EINVAL;
+ }
++ state->topmost = true;
+
+ return 0;
+ }
+--
+2.51.0
+
--- /dev/null
+From 49a3bfb8cff512477ab21b2ebc65569e4353ec98 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 30 Dec 2025 18:13:16 +0800
+Subject: usb: gadget: f_ncm: align net_device lifecycle with bind/unbind
+
+From: Kuen-Han Tsai <khtsai@google.com>
+
+[ Upstream commit 56a512a9b4107079f68701e7d55da8507eb963d9 ]
+
+Currently, the net_device is allocated in ncm_alloc_inst() and freed in
+ncm_free_inst(). This ties the network interface's lifetime to the
+configuration instance rather than the USB connection (bind/unbind).
+
+This decoupling causes issues when the USB gadget is disconnected where
+the underlying gadget device is removed. The net_device can outlive its
+parent, leading to dangling sysfs links and NULL pointer dereferences
+when accessing the freed gadget device.
+
+Problem 1: NULL pointer dereference on disconnect
+ Unable to handle kernel NULL pointer dereference at virtual address
+ 0000000000000000
+ Call trace:
+ __pi_strlen+0x14/0x150
+ rtnl_fill_ifinfo+0x6b4/0x708
+ rtmsg_ifinfo_build_skb+0xd8/0x13c
+ rtmsg_ifinfo+0x50/0xa0
+ __dev_notify_flags+0x4c/0x1f0
+ dev_change_flags+0x54/0x70
+ do_setlink+0x390/0xebc
+ rtnl_newlink+0x7d0/0xac8
+ rtnetlink_rcv_msg+0x27c/0x410
+ netlink_rcv_skb+0x134/0x150
+ rtnetlink_rcv+0x18/0x28
+ netlink_unicast+0x254/0x3f0
+ netlink_sendmsg+0x2e0/0x3d4
+
+Problem 2: Dangling sysfs symlinks
+ console:/ # ls -l /sys/class/net/ncm0
+ lrwxrwxrwx ... /sys/class/net/ncm0 ->
+ /sys/devices/platform/.../gadget.0/net/ncm0
+ console:/ # ls -l /sys/devices/platform/.../gadget.0/net/ncm0
+ ls: .../gadget.0/net/ncm0: No such file or directory
+
+Move the net_device allocation to ncm_bind() and deallocation to
+ncm_unbind(). This ensures the network interface exists only when the
+gadget function is actually bound to a configuration.
+
+To support pre-bind configuration (e.g., setting interface name or MAC
+address via configfs), cache user-provided options in f_ncm_opts
+using the gether_opts structure. Apply these cached settings to the
+net_device upon creation in ncm_bind().
+
+Preserve the use-after-free fix from commit 6334b8e4553c ("usb: gadget:
+f_ncm: Fix UAF ncm object at re-bind after usb ep transport error").
+Check opts->net in ncm_set_alt() and ncm_disable() to ensure
+gether_disconnect() runs only if a connection was established.
+
+Fixes: 40d133d7f542 ("usb: gadget: f_ncm: convert to new function interface with backward compatibility")
+Cc: stable@kernel.org
+Signed-off-by: Kuen-Han Tsai <khtsai@google.com>
+Link: https://patch.msgid.link/20251230-ncm-refactor-v1-3-793e347bc7a7@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/gadget/function/f_ncm.c | 128 ++++++++++++++--------------
+ drivers/usb/gadget/function/u_ncm.h | 4 +-
+ 2 files changed, 66 insertions(+), 66 deletions(-)
+
+diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
+index 0e38330271d5a..e23adc132f886 100644
+--- a/drivers/usb/gadget/function/f_ncm.c
++++ b/drivers/usb/gadget/function/f_ncm.c
+@@ -83,6 +83,11 @@ static inline struct f_ncm *func_to_ncm(struct usb_function *f)
+ return container_of(f, struct f_ncm, port.func);
+ }
+
++static inline struct f_ncm_opts *func_to_ncm_opts(struct usb_function *f)
++{
++ return container_of(f->fi, struct f_ncm_opts, func_inst);
++}
++
+ /*-------------------------------------------------------------------------*/
+
+ /*
+@@ -859,6 +864,7 @@ static int ncm_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+ static int ncm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+ {
+ struct f_ncm *ncm = func_to_ncm(f);
++ struct f_ncm_opts *opts = func_to_ncm_opts(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+
+ /* Control interface has only altsetting 0 */
+@@ -881,12 +887,13 @@ static int ncm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+ if (alt > 1)
+ goto fail;
+
+- if (ncm->netdev) {
+- DBG(cdev, "reset ncm\n");
+- ncm->netdev = NULL;
+- gether_disconnect(&ncm->port);
+- ncm_reset_values(ncm);
+- }
++ scoped_guard(mutex, &opts->lock)
++ if (opts->net) {
++ DBG(cdev, "reset ncm\n");
++ opts->net = NULL;
++ gether_disconnect(&ncm->port);
++ ncm_reset_values(ncm);
++ }
+
+ /*
+ * CDC Network only sends data in non-default altsettings.
+@@ -919,7 +926,8 @@ static int ncm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+ net = gether_connect(&ncm->port);
+ if (IS_ERR(net))
+ return PTR_ERR(net);
+- ncm->netdev = net;
++ scoped_guard(mutex, &opts->lock)
++ opts->net = net;
+ }
+
+ spin_lock(&ncm->lock);
+@@ -1366,14 +1374,16 @@ static int ncm_unwrap_ntb(struct gether *port,
+ static void ncm_disable(struct usb_function *f)
+ {
+ struct f_ncm *ncm = func_to_ncm(f);
++ struct f_ncm_opts *opts = func_to_ncm_opts(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+
+ DBG(cdev, "ncm deactivated\n");
+
+- if (ncm->netdev) {
+- ncm->netdev = NULL;
+- gether_disconnect(&ncm->port);
+- }
++ scoped_guard(mutex, &opts->lock)
++ if (opts->net) {
++ opts->net = NULL;
++ gether_disconnect(&ncm->port);
++ }
+
+ if (ncm->notify->enabled) {
+ usb_ep_disable(ncm->notify);
+@@ -1433,39 +1443,44 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
+ {
+ struct usb_composite_dev *cdev = c->cdev;
+ struct f_ncm *ncm = func_to_ncm(f);
++ struct f_ncm_opts *ncm_opts = func_to_ncm_opts(f);
+ struct usb_string *us;
+ int status = 0;
+ struct usb_ep *ep;
+- struct f_ncm_opts *ncm_opts;
+
+ struct usb_os_desc_table *os_desc_table __free(kfree) = NULL;
++ struct net_device *netdev __free(free_gether_netdev) = NULL;
+ struct usb_request *request __free(free_usb_request) = NULL;
+
+ if (!can_support_ecm(cdev->gadget))
+ return -EINVAL;
+
+- ncm_opts = container_of(f->fi, struct f_ncm_opts, func_inst);
+-
+ if (cdev->use_os_string) {
+ os_desc_table = kzalloc(sizeof(*os_desc_table), GFP_KERNEL);
+ if (!os_desc_table)
+ return -ENOMEM;
+ }
+
+- mutex_lock(&ncm_opts->lock);
+- gether_set_gadget(ncm_opts->net, cdev->gadget);
+- if (!ncm_opts->bound) {
+- ncm_opts->net->mtu = (ncm_opts->max_segment_size - ETH_HLEN);
+- status = gether_register_netdev(ncm_opts->net);
++ netdev = gether_setup_default();
++ if (IS_ERR(netdev))
++ return -ENOMEM;
++
++ scoped_guard(mutex, &ncm_opts->lock) {
++ gether_apply_opts(netdev, &ncm_opts->net_opts);
++ netdev->mtu = ncm_opts->max_segment_size - ETH_HLEN;
+ }
+- mutex_unlock(&ncm_opts->lock);
+
++ gether_set_gadget(netdev, cdev->gadget);
++ status = gether_register_netdev(netdev);
+ if (status)
+ return status;
+
+- ncm_opts->bound = true;
+-
+- ncm_string_defs[1].s = ncm->ethaddr;
++ /* export host's Ethernet address in CDC format */
++ status = gether_get_host_addr_cdc(netdev, ncm->ethaddr,
++ sizeof(ncm->ethaddr));
++ if (status < 12)
++ return -EINVAL;
++ ncm_string_defs[STRING_MAC_IDX].s = ncm->ethaddr;
+
+ us = usb_gstrings_attach(cdev, ncm_strings,
+ ARRAY_SIZE(ncm_string_defs));
+@@ -1563,6 +1578,8 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
+ f->os_desc_n = 1;
+ }
+ ncm->notify_req = no_free_ptr(request);
++ ncm->netdev = no_free_ptr(netdev);
++ ncm->port.ioport = netdev_priv(ncm->netdev);
+
+ DBG(cdev, "CDC Network: IN/%s OUT/%s NOTIFY/%s\n",
+ ncm->port.in_ep->name, ncm->port.out_ep->name,
+@@ -1577,19 +1594,19 @@ static inline struct f_ncm_opts *to_f_ncm_opts(struct config_item *item)
+ }
+
+ /* f_ncm_item_ops */
+-USB_ETHERNET_CONFIGFS_ITEM(ncm);
++USB_ETHER_OPTS_ITEM(ncm);
+
+ /* f_ncm_opts_dev_addr */
+-USB_ETHERNET_CONFIGFS_ITEM_ATTR_DEV_ADDR(ncm);
++USB_ETHER_OPTS_ATTR_DEV_ADDR(ncm);
+
+ /* f_ncm_opts_host_addr */
+-USB_ETHERNET_CONFIGFS_ITEM_ATTR_HOST_ADDR(ncm);
++USB_ETHER_OPTS_ATTR_HOST_ADDR(ncm);
+
+ /* f_ncm_opts_qmult */
+-USB_ETHERNET_CONFIGFS_ITEM_ATTR_QMULT(ncm);
++USB_ETHER_OPTS_ATTR_QMULT(ncm);
+
+ /* f_ncm_opts_ifname */
+-USB_ETHERNET_CONFIGFS_ITEM_ATTR_IFNAME(ncm);
++USB_ETHER_OPTS_ATTR_IFNAME(ncm);
+
+ static ssize_t ncm_opts_max_segment_size_show(struct config_item *item,
+ char *page)
+@@ -1655,34 +1672,27 @@ static void ncm_free_inst(struct usb_function_instance *f)
+ struct f_ncm_opts *opts;
+
+ opts = container_of(f, struct f_ncm_opts, func_inst);
+- if (opts->bound)
+- gether_cleanup(netdev_priv(opts->net));
+- else
+- free_netdev(opts->net);
+ kfree(opts->ncm_interf_group);
+ kfree(opts);
+ }
+
+ static struct usb_function_instance *ncm_alloc_inst(void)
+ {
+- struct f_ncm_opts *opts;
++ struct usb_function_instance *ret;
+ struct usb_os_desc *descs[1];
+ char *names[1];
+ struct config_group *ncm_interf_group;
+
+- opts = kzalloc(sizeof(*opts), GFP_KERNEL);
++ struct f_ncm_opts *opts __free(kfree) = kzalloc(sizeof(*opts), GFP_KERNEL);
+ if (!opts)
+ return ERR_PTR(-ENOMEM);
++
++ opts->net = NULL;
+ opts->ncm_os_desc.ext_compat_id = opts->ncm_ext_compat_id;
++ gether_setup_opts_default(&opts->net_opts, "usb");
+
+ mutex_init(&opts->lock);
+ opts->func_inst.free_func_inst = ncm_free_inst;
+- opts->net = gether_setup_default();
+- if (IS_ERR(opts->net)) {
+- struct net_device *net = opts->net;
+- kfree(opts);
+- return ERR_CAST(net);
+- }
+ opts->max_segment_size = ETH_FRAME_LEN;
+ INIT_LIST_HEAD(&opts->ncm_os_desc.ext_prop);
+
+@@ -1693,26 +1703,22 @@ static struct usb_function_instance *ncm_alloc_inst(void)
+ ncm_interf_group =
+ usb_os_desc_prepare_interf_dir(&opts->func_inst.group, 1, descs,
+ names, THIS_MODULE);
+- if (IS_ERR(ncm_interf_group)) {
+- ncm_free_inst(&opts->func_inst);
++ if (IS_ERR(ncm_interf_group))
+ return ERR_CAST(ncm_interf_group);
+- }
+ opts->ncm_interf_group = ncm_interf_group;
+
+- return &opts->func_inst;
++ ret = &opts->func_inst;
++ retain_and_null_ptr(opts);
++ return ret;
+ }
+
+ static void ncm_free(struct usb_function *f)
+ {
+- struct f_ncm *ncm;
+- struct f_ncm_opts *opts;
++ struct f_ncm_opts *opts = func_to_ncm_opts(f);
+
+- ncm = func_to_ncm(f);
+- opts = container_of(f->fi, struct f_ncm_opts, func_inst);
+- kfree(ncm);
+- mutex_lock(&opts->lock);
+- opts->refcnt--;
+- mutex_unlock(&opts->lock);
++ scoped_guard(mutex, &opts->lock)
++ opts->refcnt--;
++ kfree(func_to_ncm(f));
+ }
+
+ static void ncm_unbind(struct usb_configuration *c, struct usb_function *f)
+@@ -1736,13 +1742,15 @@ static void ncm_unbind(struct usb_configuration *c, struct usb_function *f)
+
+ kfree(ncm->notify_req->buf);
+ usb_ep_free_request(ncm->notify, ncm->notify_req);
++
++ ncm->port.ioport = NULL;
++ gether_cleanup(netdev_priv(ncm->netdev));
+ }
+
+ static struct usb_function *ncm_alloc(struct usb_function_instance *fi)
+ {
+ struct f_ncm *ncm;
+ struct f_ncm_opts *opts;
+- int status;
+
+ /* allocate and initialize one new instance */
+ ncm = kzalloc(sizeof(*ncm), GFP_KERNEL);
+@@ -1750,22 +1758,12 @@ static struct usb_function *ncm_alloc(struct usb_function_instance *fi)
+ return ERR_PTR(-ENOMEM);
+
+ opts = container_of(fi, struct f_ncm_opts, func_inst);
+- mutex_lock(&opts->lock);
+- opts->refcnt++;
+
+- /* export host's Ethernet address in CDC format */
+- status = gether_get_host_addr_cdc(opts->net, ncm->ethaddr,
+- sizeof(ncm->ethaddr));
+- if (status < 12) { /* strlen("01234567890a") */
+- kfree(ncm);
+- mutex_unlock(&opts->lock);
+- return ERR_PTR(-EINVAL);
+- }
++ scoped_guard(mutex, &opts->lock)
++ opts->refcnt++;
+
+ spin_lock_init(&ncm->lock);
+ ncm_reset_values(ncm);
+- ncm->port.ioport = netdev_priv(opts->net);
+- mutex_unlock(&opts->lock);
+ ncm->port.is_fixed = true;
+ ncm->port.supports_multi_frame = true;
+
+diff --git a/drivers/usb/gadget/function/u_ncm.h b/drivers/usb/gadget/function/u_ncm.h
+index 49ec095cdb4b6..d99330fe31e88 100644
+--- a/drivers/usb/gadget/function/u_ncm.h
++++ b/drivers/usb/gadget/function/u_ncm.h
+@@ -15,11 +15,13 @@
+
+ #include <linux/usb/composite.h>
+
++#include "u_ether.h"
++
+ struct f_ncm_opts {
+ struct usb_function_instance func_inst;
+ struct net_device *net;
+- bool bound;
+
++ struct gether_opts net_opts;
+ struct config_group *ncm_interf_group;
+ struct usb_os_desc ncm_os_desc;
+ char ncm_ext_compat_id[16];
+--
+2.51.0
+
--- /dev/null
+From f672eabfdea16c2818c80b21dae3e656182e2402 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 30 Dec 2025 18:13:15 +0800
+Subject: usb: gadget: u_ether: Add auto-cleanup helper for freeing net_device
+
+From: Kuen-Han Tsai <khtsai@google.com>
+
+[ Upstream commit 0c0981126b99288ed354d3d414c8a5fd42ac9e25 ]
+
+The net_device in the u_ether framework currently requires explicit
+calls to unregister and free the device.
+
+Introduce gether_unregister_free_netdev() and the corresponding
+auto-cleanup macro. This ensures that if a net_device is registered, it
+is properly unregistered and the associated work queue is flushed before
+the memory is freed.
+
+This is a preparatory patch to simplify error handling paths in gadget
+drivers by removing the need for explicit goto labels for net_device
+cleanup.
+
+Signed-off-by: Kuen-Han Tsai <khtsai@google.com>
+Link: https://patch.msgid.link/20251230-ncm-refactor-v1-2-793e347bc7a7@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Stable-dep-of: 56a512a9b410 ("usb: gadget: f_ncm: align net_device lifecycle with bind/unbind")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/gadget/function/u_ether.c | 15 +++++++++++++++
+ drivers/usb/gadget/function/u_ether.h | 2 ++
+ 2 files changed, 17 insertions(+)
+
+diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
+index 745ed2c212e3a..6c32665538cc0 100644
+--- a/drivers/usb/gadget/function/u_ether.c
++++ b/drivers/usb/gadget/function/u_ether.c
+@@ -1125,6 +1125,21 @@ void gether_cleanup(struct eth_dev *dev)
+ }
+ EXPORT_SYMBOL_GPL(gether_cleanup);
+
++void gether_unregister_free_netdev(struct net_device *net)
++{
++ if (!net)
++ return;
++
++ struct eth_dev *dev = netdev_priv(net);
++
++ if (net->reg_state == NETREG_REGISTERED) {
++ unregister_netdev(net);
++ flush_work(&dev->work);
++ }
++ free_netdev(net);
++}
++EXPORT_SYMBOL_GPL(gether_unregister_free_netdev);
++
+ /**
+ * gether_connect - notify network layer that USB link is active
+ * @link: the USB link, set up with endpoints, descriptors matching
+diff --git a/drivers/usb/gadget/function/u_ether.h b/drivers/usb/gadget/function/u_ether.h
+index 63a0240df4d74..a212a8ec5eb1b 100644
+--- a/drivers/usb/gadget/function/u_ether.h
++++ b/drivers/usb/gadget/function/u_ether.h
+@@ -283,6 +283,8 @@ int gether_get_ifname(struct net_device *net, char *name, int len);
+ int gether_set_ifname(struct net_device *net, const char *name, int len);
+
+ void gether_cleanup(struct eth_dev *dev);
++void gether_unregister_free_netdev(struct net_device *net);
++DEFINE_FREE(free_gether_netdev, struct net_device *, gether_unregister_free_netdev(_T));
+
+ void gether_setup_opts_default(struct gether_opts *opts, const char *name);
+ void gether_apply_opts(struct net_device *net, struct gether_opts *opts);
+--
+2.51.0
+
--- /dev/null
+From ff0ecd4c79b0a528c43884742ecdfc43bd004b76 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 30 Dec 2025 18:13:14 +0800
+Subject: usb: gadget: u_ether: add gether_opts for config caching
+
+From: Kuen-Han Tsai <khtsai@google.com>
+
+[ Upstream commit e065c6a7e46c2ee9c677fdbf50035323d2de1215 ]
+
+Currently, the net_device is allocated when the function instance is
+created (e.g., in ncm_alloc_inst()). While this allows userspace to
+configure the device early, it decouples the net_device lifecycle from
+the actual USB connection state (bind/unbind). The goal is to defer
+net_device creation to the bind callback to properly align the lifecycle
+with its parent gadget device.
+
+However, deferring net_device allocation would prevent userspace from
+configuring parameters (like interface name or MAC address) before the
+net_device exists.
+
+Introduce a new structure, struct gether_opts, associated with the
+usb_function_instance, to cache settings independently of the
+net_device. These settings include the interface name pattern, MAC
+addresses (device and host), queue multiplier, and address assignment
+type.
+
+New helper functions are added:
+- gether_setup_opts_default(): Initializes struct gether_opts with
+ defaults, including random MAC addresses.
+- gether_apply_opts(): Applies the cached options from a struct
+ gether_opts to a valid net_device.
+
+To expose these options to userspace, new configfs macros
+(USB_ETHER_OPTS_ITEM and USB_ETHER_OPTS_ATTR_*) are defined in
+u_ether_configfs.h. These attributes are part of the function
+instance's configfs group.
+
+This refactoring is a preparatory step. It allows the subsequent patch
+to safely move the net_device allocation from the instance creation
+phase to the bind phase without losing the ability to pre-configure
+the interface via configfs.
+
+Signed-off-by: Kuen-Han Tsai <khtsai@google.com>
+Link: https://patch.msgid.link/20251230-ncm-refactor-v1-1-793e347bc7a7@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Stable-dep-of: 56a512a9b410 ("usb: gadget: f_ncm: align net_device lifecycle with bind/unbind")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/gadget/function/u_ether.c | 30 +++
+ drivers/usb/gadget/function/u_ether.h | 28 +++
+ .../usb/gadget/function/u_ether_configfs.h | 176 ++++++++++++++++++
+ 3 files changed, 234 insertions(+)
+
+diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
+index f58590bf5e02f..745ed2c212e3a 100644
+--- a/drivers/usb/gadget/function/u_ether.c
++++ b/drivers/usb/gadget/function/u_ether.c
+@@ -1039,6 +1039,36 @@ int gether_set_ifname(struct net_device *net, const char *name, int len)
+ }
+ EXPORT_SYMBOL_GPL(gether_set_ifname);
+
++void gether_setup_opts_default(struct gether_opts *opts, const char *name)
++{
++ opts->qmult = QMULT_DEFAULT;
++ snprintf(opts->name, sizeof(opts->name), "%s%%d", name);
++ eth_random_addr(opts->dev_mac);
++ opts->addr_assign_type = NET_ADDR_RANDOM;
++ eth_random_addr(opts->host_mac);
++}
++EXPORT_SYMBOL_GPL(gether_setup_opts_default);
++
++void gether_apply_opts(struct net_device *net, struct gether_opts *opts)
++{
++ struct eth_dev *dev = netdev_priv(net);
++
++ dev->qmult = opts->qmult;
++
++ if (opts->ifname_set) {
++ strscpy(net->name, opts->name, sizeof(net->name));
++ dev->ifname_set = true;
++ }
++
++ memcpy(dev->host_mac, opts->host_mac, sizeof(dev->host_mac));
++
++ if (opts->addr_assign_type == NET_ADDR_SET) {
++ memcpy(dev->dev_mac, opts->dev_mac, sizeof(dev->dev_mac));
++ net->addr_assign_type = opts->addr_assign_type;
++ }
++}
++EXPORT_SYMBOL_GPL(gether_apply_opts);
++
+ void gether_suspend(struct gether *link)
+ {
+ struct eth_dev *dev = link->ioport;
+diff --git a/drivers/usb/gadget/function/u_ether.h b/drivers/usb/gadget/function/u_ether.h
+index 34be220cef77c..63a0240df4d74 100644
+--- a/drivers/usb/gadget/function/u_ether.h
++++ b/drivers/usb/gadget/function/u_ether.h
+@@ -38,6 +38,31 @@
+
+ struct eth_dev;
+
++/**
++ * struct gether_opts - Options for Ethernet gadget function instances
++ * @name: Pattern for the network interface name (e.g., "usb%d").
++ * Used to generate the net device name.
++ * @qmult: Queue length multiplier for high/super speed.
++ * @host_mac: The MAC address to be used by the host side.
++ * @dev_mac: The MAC address to be used by the device side.
++ * @ifname_set: True if the interface name pattern has been set by userspace.
++ * @addr_assign_type: The method used for assigning the device MAC address
++ * (e.g., NET_ADDR_RANDOM, NET_ADDR_SET).
++ *
++ * This structure caches network-related settings provided through configfs
++ * before the net_device is fully instantiated. This allows for early
++ * configuration while deferring net_device allocation until the function
++ * is bound.
++ */
++struct gether_opts {
++ char name[IFNAMSIZ];
++ unsigned int qmult;
++ u8 host_mac[ETH_ALEN];
++ u8 dev_mac[ETH_ALEN];
++ bool ifname_set;
++ unsigned char addr_assign_type;
++};
++
+ /*
+ * This represents the USB side of an "ethernet" link, managed by a USB
+ * function which provides control and (maybe) framing. Two functions
+@@ -259,6 +284,9 @@ int gether_set_ifname(struct net_device *net, const char *name, int len);
+
+ void gether_cleanup(struct eth_dev *dev);
+
++void gether_setup_opts_default(struct gether_opts *opts, const char *name);
++void gether_apply_opts(struct net_device *net, struct gether_opts *opts);
++
+ void gether_suspend(struct gether *link);
+ void gether_resume(struct gether *link);
+
+diff --git a/drivers/usb/gadget/function/u_ether_configfs.h b/drivers/usb/gadget/function/u_ether_configfs.h
+index f558c3139ebe5..a3696797e074a 100644
+--- a/drivers/usb/gadget/function/u_ether_configfs.h
++++ b/drivers/usb/gadget/function/u_ether_configfs.h
+@@ -13,6 +13,12 @@
+ #ifndef __U_ETHER_CONFIGFS_H
+ #define __U_ETHER_CONFIGFS_H
+
++#include <linux/cleanup.h>
++#include <linux/if_ether.h>
++#include <linux/mutex.h>
++#include <linux/netdevice.h>
++#include <linux/rtnetlink.h>
++
+ #define USB_ETHERNET_CONFIGFS_ITEM(_f_) \
+ static void _f_##_attr_release(struct config_item *item) \
+ { \
+@@ -197,4 +203,174 @@ out: \
+ \
+ CONFIGFS_ATTR(_f_##_opts_, _n_)
+
++#define USB_ETHER_OPTS_ITEM(_f_) \
++ static void _f_##_attr_release(struct config_item *item) \
++ { \
++ struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
++ \
++ usb_put_function_instance(&opts->func_inst); \
++ } \
++ \
++ static struct configfs_item_operations _f_##_item_ops = { \
++ .release = _f_##_attr_release, \
++ }
++
++#define USB_ETHER_OPTS_ATTR_DEV_ADDR(_f_) \
++ static ssize_t _f_##_opts_dev_addr_show(struct config_item *item, \
++ char *page) \
++ { \
++ struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
++ \
++ guard(mutex)(&opts->lock); \
++ return sysfs_emit(page, "%pM\n", opts->net_opts.dev_mac); \
++ } \
++ \
++ static ssize_t _f_##_opts_dev_addr_store(struct config_item *item, \
++ const char *page, size_t len) \
++ { \
++ struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
++ u8 new_addr[ETH_ALEN]; \
++ const char *p = page; \
++ \
++ guard(mutex)(&opts->lock); \
++ if (opts->refcnt) \
++ return -EBUSY; \
++ \
++ for (int i = 0; i < ETH_ALEN; i++) { \
++ unsigned char num; \
++ if ((*p == '.') || (*p == ':')) \
++ p++; \
++ num = hex_to_bin(*p++) << 4; \
++ num |= hex_to_bin(*p++); \
++ new_addr[i] = num; \
++ } \
++ if (!is_valid_ether_addr(new_addr)) \
++ return -EINVAL; \
++ memcpy(opts->net_opts.dev_mac, new_addr, ETH_ALEN); \
++ opts->net_opts.addr_assign_type = NET_ADDR_SET; \
++ return len; \
++ } \
++ \
++ CONFIGFS_ATTR(_f_##_opts_, dev_addr)
++
++#define USB_ETHER_OPTS_ATTR_HOST_ADDR(_f_) \
++ static ssize_t _f_##_opts_host_addr_show(struct config_item *item, \
++ char *page) \
++ { \
++ struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
++ \
++ guard(mutex)(&opts->lock); \
++ return sysfs_emit(page, "%pM\n", opts->net_opts.host_mac); \
++ } \
++ \
++ static ssize_t _f_##_opts_host_addr_store(struct config_item *item, \
++ const char *page, size_t len) \
++ { \
++ struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
++ u8 new_addr[ETH_ALEN]; \
++ const char *p = page; \
++ \
++ guard(mutex)(&opts->lock); \
++ if (opts->refcnt) \
++ return -EBUSY; \
++ \
++ for (int i = 0; i < ETH_ALEN; i++) { \
++ unsigned char num; \
++ if ((*p == '.') || (*p == ':')) \
++ p++; \
++ num = hex_to_bin(*p++) << 4; \
++ num |= hex_to_bin(*p++); \
++ new_addr[i] = num; \
++ } \
++ if (!is_valid_ether_addr(new_addr)) \
++ return -EINVAL; \
++ memcpy(opts->net_opts.host_mac, new_addr, ETH_ALEN); \
++ return len; \
++ } \
++ \
++ CONFIGFS_ATTR(_f_##_opts_, host_addr)
++
++#define USB_ETHER_OPTS_ATTR_QMULT(_f_) \
++ static ssize_t _f_##_opts_qmult_show(struct config_item *item, \
++ char *page) \
++ { \
++ struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
++ \
++ guard(mutex)(&opts->lock); \
++ return sysfs_emit(page, "%u\n", opts->net_opts.qmult); \
++ } \
++ \
++ static ssize_t _f_##_opts_qmult_store(struct config_item *item, \
++ const char *page, size_t len) \
++ { \
++ struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
++ u32 val; \
++ int ret; \
++ \
++ guard(mutex)(&opts->lock); \
++ if (opts->refcnt) \
++ return -EBUSY; \
++ \
++ ret = kstrtou32(page, 0, &val); \
++ if (ret) \
++ return ret; \
++ \
++ opts->net_opts.qmult = val; \
++ return len; \
++ } \
++ \
++ CONFIGFS_ATTR(_f_##_opts_, qmult)
++
++#define USB_ETHER_OPTS_ATTR_IFNAME(_f_) \
++ static ssize_t _f_##_opts_ifname_show(struct config_item *item, \
++ char *page) \
++ { \
++ struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
++ const char *name; \
++ \
++ guard(mutex)(&opts->lock); \
++ rtnl_lock(); \
++ if (opts->net_opts.ifname_set) \
++ name = opts->net_opts.name; \
++ else if (opts->net) \
++ name = netdev_name(opts->net); \
++ else \
++ name = "(inactive net_device)"; \
++ rtnl_unlock(); \
++ return sysfs_emit(page, "%s\n", name); \
++ } \
++ \
++ static ssize_t _f_##_opts_ifname_store(struct config_item *item, \
++ const char *page, size_t len) \
++ { \
++ struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
++ char tmp[IFNAMSIZ]; \
++ const char *p; \
++ size_t c_len = len; \
++ \
++ if (c_len > 0 && page[c_len - 1] == '\n') \
++ c_len--; \
++ \
++ if (c_len >= sizeof(tmp)) \
++ return -E2BIG; \
++ \
++ strscpy(tmp, page, c_len + 1); \
++ if (!dev_valid_name(tmp)) \
++ return -EINVAL; \
++ \
++ /* Require exactly one %d */ \
++ p = strchr(tmp, '%'); \
++ if (!p || p[1] != 'd' || strchr(p + 2, '%')) \
++ return -EINVAL; \
++ \
++ guard(mutex)(&opts->lock); \
++ if (opts->refcnt) \
++ return -EBUSY; \
++ strscpy(opts->net_opts.name, tmp, sizeof(opts->net_opts.name)); \
++ opts->net_opts.ifname_set = true; \
++ return len; \
++ } \
++ \
++ CONFIGFS_ATTR(_f_##_opts_, ifname)
++
+ #endif /* __U_ETHER_CONFIGFS_H */
+--
+2.51.0
+
--- /dev/null
+From 1edf92befd1b3156f2fc7375165318d7fab32401 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 11 Nov 2025 14:53:57 +0000
+Subject: x86/acpi/boot: Correct acpi_is_processor_usable() check again
+
+From: Yazen Ghannam <yazen.ghannam@amd.com>
+
+[ Upstream commit adbf61cc47cb72b102682e690ad323e1eda652c2 ]
+
+ACPI v6.3 defined a new "Online Capable" MADT LAPIC flag. This bit is
+used in conjunction with the "Enabled" MADT LAPIC flag to determine if
+a CPU can be enabled/hotplugged by the OS after boot.
+
+Before the new bit was defined, the "Enabled" bit was explicitly
+described like this (ACPI v6.0 wording provided):
+
+ "If zero, this processor is unusable, and the operating system
+ support will not attempt to use it"
+
+This means that CPU hotplug (based on MADT) is not possible. Many BIOS
+implementations follow this guidance. They may include LAPIC entries in
+MADT for unavailable CPUs, but since these entries are marked with
+"Enabled=0" it is expected that the OS will completely ignore these
+entries.
+
+However, QEMU will do the same (include entries with "Enabled=0") for
+the purpose of allowing CPU hotplug within the guest.
+
+Comment from QEMU function pc_madt_cpu_entry():
+
+ /* ACPI spec says that LAPIC entry for non present
+ * CPU may be omitted from MADT or it must be marked
+ * as disabled. However omitting non present CPU from
+ * MADT breaks hotplug on linux. So possible CPUs
+ * should be put in MADT but kept disabled.
+ */
+
+Recent Linux topology changes broke the QEMU use case. A following fix
+for the QEMU use case broke bare metal topology enumeration.
+
+Rework the Linux MADT LAPIC flags check to allow the QEMU use case only
+for guests and to maintain the ACPI spec behavior for bare metal.
+
+Remove an unnecessary check added to fix a bare metal case introduced by
+the QEMU "fix".
+
+ [ bp: Change logic as Michal suggested. ]
+ [ mingo: Removed misapplied -stable tag. ]
+
+Fixes: fed8d8773b8e ("x86/acpi/boot: Correct acpi_is_processor_usable() check")
+Fixes: f0551af02130 ("x86/topology: Ignore non-present APIC IDs in a present package")
+Closes: https://lore.kernel.org/r/20251024204658.3da9bf3f.michal.pecio@gmail.com
+Reported-by: Michal Pecio <michal.pecio@gmail.com>
+Signed-off-by: Yazen Ghannam <yazen.ghannam@amd.com>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Tested-by: Michal Pecio <michal.pecio@gmail.com>
+Tested-by: Ricardo Neri <ricardo.neri-calderon@linux.intel.com>
+Link: https://lore.kernel.org/20251111145357.4031846-1-yazen.ghannam@amd.com
+Cc: stable@vger.kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kernel/acpi/boot.c | 12 ++++++++----
+ arch/x86/kernel/cpu/topology.c | 15 ---------------
+ 2 files changed, 8 insertions(+), 19 deletions(-)
+
+diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
+index 9fa321a95eb33..d6138b2b633a3 100644
+--- a/arch/x86/kernel/acpi/boot.c
++++ b/arch/x86/kernel/acpi/boot.c
+@@ -35,6 +35,7 @@
+ #include <asm/smp.h>
+ #include <asm/i8259.h>
+ #include <asm/setup.h>
++#include <asm/hypervisor.h>
+
+ #include "sleep.h" /* To include x86_acpi_suspend_lowlevel */
+ static int __initdata acpi_force = 0;
+@@ -164,11 +165,14 @@ static bool __init acpi_is_processor_usable(u32 lapic_flags)
+ if (lapic_flags & ACPI_MADT_ENABLED)
+ return true;
+
+- if (!acpi_support_online_capable ||
+- (lapic_flags & ACPI_MADT_ONLINE_CAPABLE))
+- return true;
++ if (acpi_support_online_capable)
++ return lapic_flags & ACPI_MADT_ONLINE_CAPABLE;
+
+- return false;
++ /*
++ * QEMU expects legacy "Enabled=0" LAPIC entries to be counted as usable
++ * in order to support CPU hotplug in guests.
++ */
++ return !hypervisor_is_type(X86_HYPER_NATIVE);
+ }
+
+ static int __init
+diff --git a/arch/x86/kernel/cpu/topology.c b/arch/x86/kernel/cpu/topology.c
+index 6073a16628f9e..425404e7b7b42 100644
+--- a/arch/x86/kernel/cpu/topology.c
++++ b/arch/x86/kernel/cpu/topology.c
+@@ -27,7 +27,6 @@
+ #include <xen/xen.h>
+
+ #include <asm/apic.h>
+-#include <asm/hypervisor.h>
+ #include <asm/io_apic.h>
+ #include <asm/mpspec.h>
+ #include <asm/msr.h>
+@@ -240,20 +239,6 @@ static __init void topo_register_apic(u32 apic_id, u32 acpi_id, bool present)
+ cpuid_to_apicid[cpu] = apic_id;
+ topo_set_cpuids(cpu, apic_id, acpi_id);
+ } else {
+- u32 pkgid = topo_apicid(apic_id, TOPO_PKG_DOMAIN);
+-
+- /*
+- * Check for present APICs in the same package when running
+- * on bare metal. Allow the bogosity in a guest.
+- */
+- if (hypervisor_is_type(X86_HYPER_NATIVE) &&
+- topo_unit_count(pkgid, TOPO_PKG_DOMAIN, phys_cpu_present_map)) {
+- pr_info_once("Ignoring hot-pluggable APIC ID %x in present package.\n",
+- apic_id);
+- topo_info.nr_rejected_cpus++;
+- return;
+- }
+-
+ topo_info.nr_disabled_cpus++;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From abf8fd56479f456740bbf575536341f184d22ab0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 11 Jan 2026 16:00:37 +0100
+Subject: x86/uprobes: Fix XOL allocation failure for 32-bit tasks
+
+From: Oleg Nesterov <oleg@redhat.com>
+
+[ Upstream commit d55c571e4333fac71826e8db3b9753fadfbead6a ]
+
+This script
+
+ #!/usr/bin/bash
+
+ echo 0 > /proc/sys/kernel/randomize_va_space
+
+ echo 'void main(void) {}' > TEST.c
+
+ # -fcf-protection to ensure that the 1st endbr32 insn can't be emulated
+ gcc -m32 -fcf-protection=branch TEST.c -o test
+
+ bpftrace -e 'uprobe:./test:main {}' -c ./test
+
+"hangs", the probed ./test task enters an endless loop.
+
+The problem is that with randomize_va_space == 0
+get_unmapped_area(TASK_SIZE - PAGE_SIZE) called by xol_add_vma() can not
+just return the "addr == TASK_SIZE - PAGE_SIZE" hint, this addr is used
+by the stack vma.
+
+arch_get_unmapped_area_topdown() doesn't take TIF_ADDR32 into account and
+in_32bit_syscall() is false, this leads to info.high_limit > TASK_SIZE.
+vm_unmapped_area() happily returns the high address > TASK_SIZE and then
+get_unmapped_area() returns -ENOMEM after the "if (addr > TASK_SIZE - len)"
+check.
+
+handle_swbp() doesn't report this failure (probably it should) and silently
+restarts the probed insn. Endless loop.
+
+I think that the right fix should change the x86 get_unmapped_area() paths
+to rely on TIF_ADDR32 rather than in_32bit_syscall(). Note also that if
+CONFIG_X86_X32_ABI=y, in_x32_syscall() falsely returns true in this case
+because ->orig_ax = -1.
+
+But we need a simple fix for -stable, so this patch just sets TS_COMPAT if
+the probed task is 32-bit to make in_ia32_syscall() true.
+
+Fixes: 1b028f784e8c ("x86/mm: Introduce mmap_compat_base() for 32-bit mmap()")
+Reported-by: Paulo Andrade <pandrade@redhat.com>
+Signed-off-by: Oleg Nesterov <oleg@redhat.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lore.kernel.org/all/aV5uldEvV7pb4RA8@redhat.com/
+Cc: stable@vger.kernel.org
+Link: https://patch.msgid.link/aWO7Fdxn39piQnxu@redhat.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kernel/uprobes.c | 24 ++++++++++++++++++++++++
+ include/linux/uprobes.h | 1 +
+ kernel/events/uprobes.c | 10 +++++++---
+ 3 files changed, 32 insertions(+), 3 deletions(-)
+
+diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
+index 7ef0535fcd547..46aed82243964 100644
+--- a/arch/x86/kernel/uprobes.c
++++ b/arch/x86/kernel/uprobes.c
+@@ -1851,3 +1851,27 @@ bool is_uprobe_at_func_entry(struct pt_regs *regs)
+
+ return false;
+ }
++
++#ifdef CONFIG_IA32_EMULATION
++unsigned long arch_uprobe_get_xol_area(void)
++{
++ struct thread_info *ti = current_thread_info();
++ unsigned long vaddr;
++
++ /*
++ * HACK: we are not in a syscall, but x86 get_unmapped_area() paths
++ * ignore TIF_ADDR32 and rely on in_32bit_syscall() to calculate
++ * vm_unmapped_area_info.high_limit.
++ *
++ * The #ifdef above doesn't cover the CONFIG_X86_X32_ABI=y case,
++ * but in this case in_32bit_syscall() -> in_x32_syscall() always
++ * (falsely) returns true because ->orig_ax == -1.
++ */
++ if (test_thread_flag(TIF_ADDR32))
++ ti->status |= TS_COMPAT;
++ vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE, PAGE_SIZE, 0, 0);
++ ti->status &= ~TS_COMPAT;
++
++ return vaddr;
++}
++#endif
+diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
+index ee3d36eda45dd..f548fea2adec8 100644
+--- a/include/linux/uprobes.h
++++ b/include/linux/uprobes.h
+@@ -242,6 +242,7 @@ extern void arch_uprobe_clear_state(struct mm_struct *mm);
+ extern void arch_uprobe_init_state(struct mm_struct *mm);
+ extern void handle_syscall_uprobe(struct pt_regs *regs, unsigned long bp_vaddr);
+ extern void arch_uprobe_optimize(struct arch_uprobe *auprobe, unsigned long vaddr);
++extern unsigned long arch_uprobe_get_xol_area(void);
+ #else /* !CONFIG_UPROBES */
+ struct uprobes_state {
+ };
+diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
+index 4f42e7af575f5..725c5772429dc 100644
+--- a/kernel/events/uprobes.c
++++ b/kernel/events/uprobes.c
+@@ -1694,6 +1694,12 @@ static const struct vm_special_mapping xol_mapping = {
+ .mremap = xol_mremap,
+ };
+
++unsigned long __weak arch_uprobe_get_xol_area(void)
++{
++ /* Try to map as high as possible, this is only a hint. */
++ return get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE, PAGE_SIZE, 0, 0);
++}
++
+ /* Slot allocation for XOL */
+ static int xol_add_vma(struct mm_struct *mm, struct xol_area *area)
+ {
+@@ -1709,9 +1715,7 @@ static int xol_add_vma(struct mm_struct *mm, struct xol_area *area)
+ }
+
+ if (!area->vaddr) {
+- /* Try to map as high as possible, this is only a hint. */
+- area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE,
+- PAGE_SIZE, 0, 0);
++ area->vaddr = arch_uprobe_get_xol_area();
+ if (IS_ERR_VALUE(area->vaddr)) {
+ ret = area->vaddr;
+ goto fail;
+--
+2.51.0
+
--- /dev/null
+From c467dc3c578b3cb4ef7c3395f19e826638156032 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 15 Dec 2025 17:36:14 +0100
+Subject: accel/rocket: fix unwinding in error path in rocket_core_init
+
+From: Quentin Schulz <quentin.schulz@cherry.de>
+
+[ Upstream commit f509a081f6a289f7c66856333b3becce7a33c97e ]
+
+When rocket_job_init() is called, iommu_group_get() has already been
+called, therefore we should call iommu_group_put() and make the
+iommu_group pointer NULL. This aligns with what's done in
+rocket_core_fini().
+
+If pm_runtime_resume_and_get() somehow fails, not only should
+rocket_job_fini() be called but we should also unwind everything done
+before that, that is, disable PM, put the iommu_group, NULLify it and
+then call rocket_job_fini(). This is exactly what's done in
+rocket_core_fini() so let's call that function instead of duplicating
+the code.
+
+Fixes: 0810d5ad88a1 ("accel/rocket: Add job submission IOCTL")
+Cc: stable@vger.kernel.org
+Signed-off-by: Quentin Schulz <quentin.schulz@cherry.de>
+Reviewed-by: Tomeu Vizoso <tomeu@tomeuvizoso.net>
+Signed-off-by: Tomeu Vizoso <tomeu@tomeuvizoso.net>
+Link: https://patch.msgid.link/20251215-rocket-error-path-v1-1-eec3bf29dc3b@cherry.de
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/accel/rocket/rocket_core.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/accel/rocket/rocket_core.c b/drivers/accel/rocket/rocket_core.c
+index abe7719c1db46..b3b2fa9ba645a 100644
+--- a/drivers/accel/rocket/rocket_core.c
++++ b/drivers/accel/rocket/rocket_core.c
+@@ -59,8 +59,11 @@ int rocket_core_init(struct rocket_core *core)
+ core->iommu_group = iommu_group_get(dev);
+
+ err = rocket_job_init(core);
+- if (err)
++ if (err) {
++ iommu_group_put(core->iommu_group);
++ core->iommu_group = NULL;
+ return err;
++ }
+
+ pm_runtime_use_autosuspend(dev);
+
+@@ -76,7 +79,7 @@ int rocket_core_init(struct rocket_core *core)
+
+ err = pm_runtime_resume_and_get(dev);
+ if (err) {
+- rocket_job_fini(core);
++ rocket_core_fini(core);
+ return err;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From da707e559ba139c56742425629e8f2f72f4e74ae Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 15 Dec 2025 17:36:15 +0100
+Subject: accel/rocket: fix unwinding in error path in rocket_probe
+
+From: Quentin Schulz <quentin.schulz@cherry.de>
+
+[ Upstream commit 34f4495a7f72895776b81969639f527c99eb12b9 ]
+
+When rocket_core_init() fails (as could be the case with EPROBE_DEFER),
+we need to properly unwind by decrementing the counter we just
+incremented and if this is the first core we failed to probe, remove the
+rocket DRM device with rocket_device_fini() as well. This matches the
+logic in rocket_remove(). Failing to properly unwind results in
+out-of-bounds accesses.
+
+Fixes: 0810d5ad88a1 ("accel/rocket: Add job submission IOCTL")
+Cc: stable@vger.kernel.org
+Signed-off-by: Quentin Schulz <quentin.schulz@cherry.de>
+Reviewed-by: Tomeu Vizoso <tomeu@tomeuvizoso.net>
+Signed-off-by: Tomeu Vizoso <tomeu@tomeuvizoso.net>
+Link: https://patch.msgid.link/20251215-rocket-error-path-v1-2-eec3bf29dc3b@cherry.de
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/accel/rocket/rocket_drv.c | 15 ++++++++++++++-
+ 1 file changed, 14 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/accel/rocket/rocket_drv.c b/drivers/accel/rocket/rocket_drv.c
+index 5c0b63f0a8f00..f6ef4c7aeef11 100644
+--- a/drivers/accel/rocket/rocket_drv.c
++++ b/drivers/accel/rocket/rocket_drv.c
+@@ -13,6 +13,7 @@
+ #include <linux/platform_device.h>
+ #include <linux/pm_runtime.h>
+
++#include "rocket_device.h"
+ #include "rocket_drv.h"
+ #include "rocket_gem.h"
+ #include "rocket_job.h"
+@@ -158,6 +159,8 @@ static const struct drm_driver rocket_drm_driver = {
+
+ static int rocket_probe(struct platform_device *pdev)
+ {
++ int ret;
++
+ if (rdev == NULL) {
+ /* First core probing, initialize DRM device. */
+ rdev = rocket_device_init(drm_dev, &rocket_drm_driver);
+@@ -177,7 +180,17 @@ static int rocket_probe(struct platform_device *pdev)
+
+ rdev->num_cores++;
+
+- return rocket_core_init(&rdev->cores[core]);
++ ret = rocket_core_init(&rdev->cores[core]);
++ if (ret) {
++ rdev->num_cores--;
++
++ if (rdev->num_cores == 0) {
++ rocket_device_fini(rdev);
++ rdev = NULL;
++ }
++ }
++
++ return ret;
+ }
+
+ static void rocket_remove(struct platform_device *pdev)
+--
+2.51.0
+
--- /dev/null
+From deb8e148f19a0e7ec43c11e6c2dae6be7b8d3193 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 23 Jan 2026 12:56:25 +0000
+Subject: KVM: x86: Add x2APIC "features" to control EOI broadcast suppression
+
+From: Khushit Shah <khushit.shah@nutanix.com>
+
+[ Upstream commit 6517dfbcc918f970a928d9dc17586904bac06893 ]
+
+Add two flags for KVM_CAP_X2APIC_API to allow userspace to control support
+for Suppress EOI Broadcasts when using a split IRQCHIP (I/O APIC emulated
+by userspace), which KVM completely mishandles. When x2APIC support was
+first added, KVM incorrectly advertised and "enabled" Suppress EOI
+Broadcast, without fully supporting the I/O APIC side of the equation,
+i.e. without adding directed EOI to KVM's in-kernel I/O APIC.
+
+That flaw was carried over to split IRQCHIP support, i.e. KVM advertised
+support for Suppress EOI Broadcasts irrespective of whether or not the
+userspace I/O APIC implementation supported directed EOIs. Even worse,
+KVM didn't actually suppress EOI broadcasts, i.e. userspace VMMs without
+support for directed EOI came to rely on the "spurious" broadcasts.
+
+KVM "fixed" the in-kernel I/O APIC implementation by completely disabling
+support for Suppress EOI Broadcasts in commit 0bcc3fb95b97 ("KVM: lapic:
+stop advertising DIRECTED_EOI when in-kernel IOAPIC is in use"), but
+didn't do anything to remedy userspace I/O APIC implementations.
+
+KVM's bogus handling of Suppress EOI Broadcast is problematic when the
+guest relies on interrupts being masked in the I/O APIC until well after
+the initial local APIC EOI. E.g. Windows with Credential Guard enabled
+handles interrupts in the following order:
+ 1. Interrupt for L2 arrives.
+ 2. L1 APIC EOIs the interrupt.
+ 3. L1 resumes L2 and injects the interrupt.
+ 4. L2 EOIs after servicing.
+ 5. L1 performs the I/O APIC EOI.
+
+Because KVM EOIs the I/O APIC at step #2, the guest can get an interrupt
+storm, e.g. if the IRQ line is still asserted and userspace reacts to the
+EOI by re-injecting the IRQ, because the guest doesn't de-assert the line
+until step #4, and doesn't expect the interrupt to be re-enabled until
+step #5.
+
+Unfortunately, simply "fixing" the bug isn't an option, as KVM has no way
+of knowing if the userspace I/O APIC supports directed EOIs, i.e.
+suppressing EOI broadcasts would result in interrupts being stuck masked
+in the userspace I/O APIC due to step #5 being ignored by userspace. And
+fully disabling support for Suppress EOI Broadcast is also undesirable, as
+picking up the fix would require a guest reboot, *and* more importantly
+would change the virtual CPU model exposed to the guest without any buy-in
+from userspace.
+
+Add KVM_X2APIC_ENABLE_SUPPRESS_EOI_BROADCAST and
+KVM_X2APIC_DISABLE_SUPPRESS_EOI_BROADCAST flags to allow userspace to
+explicitly enable or disable support for Suppress EOI Broadcasts. This
+gives userspace control over the virtual CPU model exposed to the guest,
+as KVM should never have enabled support for Suppress EOI Broadcast without
+userspace opt-in. Not setting either flag will result in legacy quirky
+behavior for backward compatibility.
+
+Disallow fully enabling SUPPRESS_EOI_BROADCAST when using an in-kernel
+I/O APIC, as KVM's history/support is just as tragic. E.g. it's not clear
+that commit c806a6ad35bf ("KVM: x86: call irq notifiers with directed EOI")
+was entirely correct, i.e. it may have simply papered over the lack of
+Directed EOI emulation in the I/O APIC.
+
+Note, Suppress EOI Broadcasts is defined only in Intel's SDM, not in AMD's
+APM. But the bit is writable on some AMD CPUs, e.g. Turin, and KVM's ABI
+is to support Directed EOI (KVM's name) irrespective of guest CPU vendor.
+
+Fixes: 7543a635aa09 ("KVM: x86: Add KVM exit for IOAPIC EOIs")
+Closes: https://lore.kernel.org/kvm/7D497EF1-607D-4D37-98E7-DAF95F099342@nutanix.com
+Cc: stable@vger.kernel.org
+Suggested-by: David Woodhouse <dwmw2@infradead.org>
+Signed-off-by: Khushit Shah <khushit.shah@nutanix.com>
+Link: https://patch.msgid.link/20260123125657.3384063-1-khushit.shah@nutanix.com
+[sean: clean up minor formatting goofs and fix a comment typo]
+Co-developed-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ Documentation/virt/kvm/api.rst | 28 +++++++++++-
+ arch/x86/include/asm/kvm_host.h | 7 +++
+ arch/x86/include/uapi/asm/kvm.h | 6 ++-
+ arch/x86/kvm/ioapic.c | 2 +-
+ arch/x86/kvm/lapic.c | 76 +++++++++++++++++++++++++++++----
+ arch/x86/kvm/lapic.h | 2 +
+ arch/x86/kvm/x86.c | 21 ++++++++-
+ 7 files changed, 127 insertions(+), 15 deletions(-)
+
+diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
+index 01a3abef8abb9..f1f1d2e5dc7c9 100644
+--- a/Documentation/virt/kvm/api.rst
++++ b/Documentation/virt/kvm/api.rst
+@@ -7835,8 +7835,10 @@ Will return -EBUSY if a VCPU has already been created.
+
+ Valid feature flags in args[0] are::
+
+- #define KVM_X2APIC_API_USE_32BIT_IDS (1ULL << 0)
+- #define KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK (1ULL << 1)
++ #define KVM_X2APIC_API_USE_32BIT_IDS (1ULL << 0)
++ #define KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK (1ULL << 1)
++ #define KVM_X2APIC_ENABLE_SUPPRESS_EOI_BROADCAST (1ULL << 2)
++ #define KVM_X2APIC_DISABLE_SUPPRESS_EOI_BROADCAST (1ULL << 3)
+
+ Enabling KVM_X2APIC_API_USE_32BIT_IDS changes the behavior of
+ KVM_SET_GSI_ROUTING, KVM_SIGNAL_MSI, KVM_SET_LAPIC, and KVM_GET_LAPIC,
+@@ -7849,6 +7851,28 @@ as a broadcast even in x2APIC mode in order to support physical x2APIC
+ without interrupt remapping. This is undesirable in logical mode,
+ where 0xff represents CPUs 0-7 in cluster 0.
+
++Setting KVM_X2APIC_ENABLE_SUPPRESS_EOI_BROADCAST instructs KVM to enable
++Suppress EOI Broadcasts. KVM will advertise support for Suppress EOI
++Broadcast to the guest and suppress LAPIC EOI broadcasts when the guest
++sets the Suppress EOI Broadcast bit in the SPIV register. This flag is
++supported only when using a split IRQCHIP.
++
++Setting KVM_X2APIC_DISABLE_SUPPRESS_EOI_BROADCAST disables support for
++Suppress EOI Broadcasts entirely, i.e. instructs KVM to NOT advertise
++support to the guest.
++
++Modern VMMs should either enable KVM_X2APIC_ENABLE_SUPPRESS_EOI_BROADCAST
++or KVM_X2APIC_DISABLE_SUPPRESS_EOI_BROADCAST. If not, legacy quirky
++behavior will be used by KVM: in split IRQCHIP mode, KVM will advertise
++support for Suppress EOI Broadcasts but not actually suppress EOI
++broadcasts; for in-kernel IRQCHIP mode, KVM will not advertise support for
++Suppress EOI Broadcasts.
++
++Setting both KVM_X2APIC_ENABLE_SUPPRESS_EOI_BROADCAST and
++KVM_X2APIC_DISABLE_SUPPRESS_EOI_BROADCAST will fail with an EINVAL error,
++as will setting KVM_X2APIC_ENABLE_SUPPRESS_EOI_BROADCAST without a split
++IRCHIP.
++
+ 7.8 KVM_CAP_S390_USER_INSTR0
+ ----------------------------
+
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 5a3bfa293e8b1..c27b3e5f60c23 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -1226,6 +1226,12 @@ enum kvm_irqchip_mode {
+ KVM_IRQCHIP_SPLIT, /* created with KVM_CAP_SPLIT_IRQCHIP */
+ };
+
++enum kvm_suppress_eoi_broadcast_mode {
++ KVM_SUPPRESS_EOI_BROADCAST_QUIRKED, /* Legacy behavior */
++ KVM_SUPPRESS_EOI_BROADCAST_ENABLED, /* Enable Suppress EOI broadcast */
++ KVM_SUPPRESS_EOI_BROADCAST_DISABLED /* Disable Suppress EOI broadcast */
++};
++
+ struct kvm_x86_msr_filter {
+ u8 count;
+ bool default_allow:1;
+@@ -1475,6 +1481,7 @@ struct kvm_arch {
+
+ bool x2apic_format;
+ bool x2apic_broadcast_quirk_disabled;
++ enum kvm_suppress_eoi_broadcast_mode suppress_eoi_broadcast_mode;
+
+ bool has_mapped_host_mmio;
+ bool guest_can_read_msr_platform_info;
+diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
+index 7ceff65836525..1208932e5cc3c 100644
+--- a/arch/x86/include/uapi/asm/kvm.h
++++ b/arch/x86/include/uapi/asm/kvm.h
+@@ -914,8 +914,10 @@ struct kvm_sev_snp_launch_finish {
+ __u64 pad1[4];
+ };
+
+-#define KVM_X2APIC_API_USE_32BIT_IDS (1ULL << 0)
+-#define KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK (1ULL << 1)
++#define KVM_X2APIC_API_USE_32BIT_IDS _BITULL(0)
++#define KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK _BITULL(1)
++#define KVM_X2APIC_ENABLE_SUPPRESS_EOI_BROADCAST _BITULL(2)
++#define KVM_X2APIC_DISABLE_SUPPRESS_EOI_BROADCAST _BITULL(3)
+
+ struct kvm_hyperv_eventfd {
+ __u32 conn_id;
+diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
+index 2c2783296aedb..a26fa4222f292 100644
+--- a/arch/x86/kvm/ioapic.c
++++ b/arch/x86/kvm/ioapic.c
+@@ -561,7 +561,7 @@ static void kvm_ioapic_update_eoi_one(struct kvm_vcpu *vcpu,
+ spin_lock(&ioapic->lock);
+
+ if (trigger_mode != IOAPIC_LEVEL_TRIG ||
+- kvm_lapic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI)
++ kvm_lapic_suppress_eoi_broadcast(apic))
+ return;
+
+ ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG);
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index 1597dd0b0cc66..9ec577b10e051 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -105,6 +105,63 @@ bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector)
+ apic_test_vector(vector, apic->regs + APIC_IRR);
+ }
+
++static bool kvm_lapic_advertise_suppress_eoi_broadcast(struct kvm *kvm)
++{
++ switch (kvm->arch.suppress_eoi_broadcast_mode) {
++ case KVM_SUPPRESS_EOI_BROADCAST_ENABLED:
++ return true;
++ case KVM_SUPPRESS_EOI_BROADCAST_DISABLED:
++ return false;
++ case KVM_SUPPRESS_EOI_BROADCAST_QUIRKED:
++ /*
++ * The default in-kernel I/O APIC emulates the 82093AA and does not
++ * implement an EOI register. Some guests (e.g. Windows with the
++ * Hyper-V role enabled) disable LAPIC EOI broadcast without
++ * checking the I/O APIC version, which can cause level-triggered
++ * interrupts to never be EOI'd.
++ *
++ * To avoid this, KVM doesn't advertise Suppress EOI Broadcast
++ * support when using the default in-kernel I/O APIC.
++ *
++ * Historically, in split IRQCHIP mode, KVM always advertised
++ * Suppress EOI Broadcast support but did not actually suppress
++ * EOIs, resulting in quirky behavior.
++ */
++ return !ioapic_in_kernel(kvm);
++ default:
++ WARN_ON_ONCE(1);
++ return false;
++ }
++}
++
++bool kvm_lapic_suppress_eoi_broadcast(struct kvm_lapic *apic)
++{
++ struct kvm *kvm = apic->vcpu->kvm;
++
++ if (!(kvm_lapic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI))
++ return false;
++
++ switch (kvm->arch.suppress_eoi_broadcast_mode) {
++ case KVM_SUPPRESS_EOI_BROADCAST_ENABLED:
++ return true;
++ case KVM_SUPPRESS_EOI_BROADCAST_DISABLED:
++ return false;
++ case KVM_SUPPRESS_EOI_BROADCAST_QUIRKED:
++ /*
++ * Historically, in split IRQCHIP mode, KVM ignored the suppress
++ * EOI broadcast bit set by the guest and broadcasts EOIs to the
++ * userspace I/O APIC. For In-kernel I/O APIC, the support itself
++ * is not advertised, can only be enabled via KVM_SET_APIC_STATE,
++ * and KVM's I/O APIC doesn't emulate Directed EOIs; but if the
++ * feature is enabled, it is respected (with odd behavior).
++ */
++ return ioapic_in_kernel(kvm);
++ default:
++ WARN_ON_ONCE(1);
++ return false;
++ }
++}
++
+ __read_mostly DEFINE_STATIC_KEY_FALSE(kvm_has_noapic_vcpu);
+ EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_has_noapic_vcpu);
+
+@@ -554,15 +611,9 @@ void kvm_apic_set_version(struct kvm_vcpu *vcpu)
+
+ v = APIC_VERSION | ((apic->nr_lvt_entries - 1) << 16);
+
+- /*
+- * KVM emulates 82093AA datasheet (with in-kernel IOAPIC implementation)
+- * which doesn't have EOI register; Some buggy OSes (e.g. Windows with
+- * Hyper-V role) disable EOI broadcast in lapic not checking for IOAPIC
+- * version first and level-triggered interrupts never get EOIed in
+- * IOAPIC.
+- */
++
+ if (guest_cpu_cap_has(vcpu, X86_FEATURE_X2APIC) &&
+- !ioapic_in_kernel(vcpu->kvm))
++ kvm_lapic_advertise_suppress_eoi_broadcast(vcpu->kvm))
+ v |= APIC_LVR_DIRECTED_EOI;
+ kvm_lapic_set_reg(apic, APIC_LVR, v);
+ }
+@@ -1517,6 +1568,15 @@ static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector)
+
+ /* Request a KVM exit to inform the userspace IOAPIC. */
+ if (irqchip_split(apic->vcpu->kvm)) {
++ /*
++ * Don't exit to userspace if the guest has enabled Directed
++ * EOI, a.k.a. Suppress EOI Broadcasts, in which case the local
++ * APIC doesn't broadcast EOIs (the guest must EOI the target
++ * I/O APIC(s) directly).
++ */
++ if (kvm_lapic_suppress_eoi_broadcast(apic))
++ return;
++
+ apic->vcpu->arch.pending_ioapic_eoi = vector;
+ kvm_make_request(KVM_REQ_IOAPIC_EOI_EXIT, apic->vcpu);
+ return;
+diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
+index 282b9b7da98cd..e5f5a222eced0 100644
+--- a/arch/x86/kvm/lapic.h
++++ b/arch/x86/kvm/lapic.h
+@@ -231,6 +231,8 @@ static inline int kvm_lapic_latched_init(struct kvm_vcpu *vcpu)
+
+ bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector);
+
++bool kvm_lapic_suppress_eoi_broadcast(struct kvm_lapic *apic);
++
+ void kvm_wait_lapic_expire(struct kvm_vcpu *vcpu);
+
+ void kvm_bitmap_or_dest_vcpus(struct kvm *kvm, struct kvm_lapic_irq *irq,
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 8b12bf0774c77..0d731ce4c4e16 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -121,8 +121,10 @@ static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
+
+ #define KVM_CAP_PMU_VALID_MASK KVM_PMU_CAP_DISABLE
+
+-#define KVM_X2APIC_API_VALID_FLAGS (KVM_X2APIC_API_USE_32BIT_IDS | \
+- KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK)
++#define KVM_X2APIC_API_VALID_FLAGS (KVM_X2APIC_API_USE_32BIT_IDS | \
++ KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK | \
++ KVM_X2APIC_ENABLE_SUPPRESS_EOI_BROADCAST | \
++ KVM_X2APIC_DISABLE_SUPPRESS_EOI_BROADCAST)
+
+ static void update_cr8_intercept(struct kvm_vcpu *vcpu);
+ static void process_nmi(struct kvm_vcpu *vcpu);
+@@ -4931,6 +4933,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
+ break;
+ case KVM_CAP_X2APIC_API:
+ r = KVM_X2APIC_API_VALID_FLAGS;
++ if (kvm && !irqchip_split(kvm))
++ r &= ~KVM_X2APIC_ENABLE_SUPPRESS_EOI_BROADCAST;
+ break;
+ case KVM_CAP_NESTED_STATE:
+ r = kvm_x86_ops.nested_ops->get_state ?
+@@ -6748,11 +6752,24 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
+ if (cap->args[0] & ~KVM_X2APIC_API_VALID_FLAGS)
+ break;
+
++ if ((cap->args[0] & KVM_X2APIC_ENABLE_SUPPRESS_EOI_BROADCAST) &&
++ (cap->args[0] & KVM_X2APIC_DISABLE_SUPPRESS_EOI_BROADCAST))
++ break;
++
++ if ((cap->args[0] & KVM_X2APIC_ENABLE_SUPPRESS_EOI_BROADCAST) &&
++ !irqchip_split(kvm))
++ break;
++
+ if (cap->args[0] & KVM_X2APIC_API_USE_32BIT_IDS)
+ kvm->arch.x2apic_format = true;
+ if (cap->args[0] & KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK)
+ kvm->arch.x2apic_broadcast_quirk_disabled = true;
+
++ if (cap->args[0] & KVM_X2APIC_ENABLE_SUPPRESS_EOI_BROADCAST)
++ kvm->arch.suppress_eoi_broadcast_mode = KVM_SUPPRESS_EOI_BROADCAST_ENABLED;
++ if (cap->args[0] & KVM_X2APIC_DISABLE_SUPPRESS_EOI_BROADCAST)
++ kvm->arch.suppress_eoi_broadcast_mode = KVM_SUPPRESS_EOI_BROADCAST_DISABLED;
++
+ r = 0;
+ break;
+ case KVM_CAP_X86_DISABLE_EXITS:
+--
+2.51.0
+
bpf-introduce-tnum_step-to-step-through-tnum-s-membe.patch
bpf-improve-bounds-when-tnum-has-a-single-possible-v.patch
uaccess-fix-scoped_user_read_access-for-pointer-to-c.patch
+usb-gadget-u_ether-add-gether_opts-for-config-cachin.patch
+usb-gadget-u_ether-add-auto-cleanup-helper-for-freei.patch
+usb-gadget-f_ncm-align-net_device-lifecycle-with-bin.patch
+accel-rocket-fix-unwinding-in-error-path-in-rocket_c.patch
+accel-rocket-fix-unwinding-in-error-path-in-rocket_p.patch
+kvm-x86-add-x2apic-features-to-control-eoi-broadcast.patch
--- /dev/null
+From ce6017fa4a2e43cc5448bc4d307cfc8e5698d17f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 30 Dec 2025 18:13:16 +0800
+Subject: usb: gadget: f_ncm: align net_device lifecycle with bind/unbind
+
+From: Kuen-Han Tsai <khtsai@google.com>
+
+[ Upstream commit 56a512a9b4107079f68701e7d55da8507eb963d9 ]
+
+Currently, the net_device is allocated in ncm_alloc_inst() and freed in
+ncm_free_inst(). This ties the network interface's lifetime to the
+configuration instance rather than the USB connection (bind/unbind).
+
+This decoupling causes issues when the USB gadget is disconnected where
+the underlying gadget device is removed. The net_device can outlive its
+parent, leading to dangling sysfs links and NULL pointer dereferences
+when accessing the freed gadget device.
+
+Problem 1: NULL pointer dereference on disconnect
+ Unable to handle kernel NULL pointer dereference at virtual address
+ 0000000000000000
+ Call trace:
+ __pi_strlen+0x14/0x150
+ rtnl_fill_ifinfo+0x6b4/0x708
+ rtmsg_ifinfo_build_skb+0xd8/0x13c
+ rtmsg_ifinfo+0x50/0xa0
+ __dev_notify_flags+0x4c/0x1f0
+ dev_change_flags+0x54/0x70
+ do_setlink+0x390/0xebc
+ rtnl_newlink+0x7d0/0xac8
+ rtnetlink_rcv_msg+0x27c/0x410
+ netlink_rcv_skb+0x134/0x150
+ rtnetlink_rcv+0x18/0x28
+ netlink_unicast+0x254/0x3f0
+ netlink_sendmsg+0x2e0/0x3d4
+
+Problem 2: Dangling sysfs symlinks
+ console:/ # ls -l /sys/class/net/ncm0
+ lrwxrwxrwx ... /sys/class/net/ncm0 ->
+ /sys/devices/platform/.../gadget.0/net/ncm0
+ console:/ # ls -l /sys/devices/platform/.../gadget.0/net/ncm0
+ ls: .../gadget.0/net/ncm0: No such file or directory
+
+Move the net_device allocation to ncm_bind() and deallocation to
+ncm_unbind(). This ensures the network interface exists only when the
+gadget function is actually bound to a configuration.
+
+To support pre-bind configuration (e.g., setting interface name or MAC
+address via configfs), cache user-provided options in f_ncm_opts
+using the gether_opts structure. Apply these cached settings to the
+net_device upon creation in ncm_bind().
+
+Preserve the use-after-free fix from commit 6334b8e4553c ("usb: gadget:
+f_ncm: Fix UAF ncm object at re-bind after usb ep transport error").
+Check opts->net in ncm_set_alt() and ncm_disable() to ensure
+gether_disconnect() runs only if a connection was established.
+
+Fixes: 40d133d7f542 ("usb: gadget: f_ncm: convert to new function interface with backward compatibility")
+Cc: stable@kernel.org
+Signed-off-by: Kuen-Han Tsai <khtsai@google.com>
+Link: https://patch.msgid.link/20251230-ncm-refactor-v1-3-793e347bc7a7@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/gadget/function/f_ncm.c | 128 ++++++++++++++--------------
+ drivers/usb/gadget/function/u_ncm.h | 4 +-
+ 2 files changed, 66 insertions(+), 66 deletions(-)
+
+diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
+index 0e38330271d5a..e23adc132f886 100644
+--- a/drivers/usb/gadget/function/f_ncm.c
++++ b/drivers/usb/gadget/function/f_ncm.c
+@@ -83,6 +83,11 @@ static inline struct f_ncm *func_to_ncm(struct usb_function *f)
+ return container_of(f, struct f_ncm, port.func);
+ }
+
++static inline struct f_ncm_opts *func_to_ncm_opts(struct usb_function *f)
++{
++ return container_of(f->fi, struct f_ncm_opts, func_inst);
++}
++
+ /*-------------------------------------------------------------------------*/
+
+ /*
+@@ -859,6 +864,7 @@ static int ncm_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+ static int ncm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+ {
+ struct f_ncm *ncm = func_to_ncm(f);
++ struct f_ncm_opts *opts = func_to_ncm_opts(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+
+ /* Control interface has only altsetting 0 */
+@@ -881,12 +887,13 @@ static int ncm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+ if (alt > 1)
+ goto fail;
+
+- if (ncm->netdev) {
+- DBG(cdev, "reset ncm\n");
+- ncm->netdev = NULL;
+- gether_disconnect(&ncm->port);
+- ncm_reset_values(ncm);
+- }
++ scoped_guard(mutex, &opts->lock)
++ if (opts->net) {
++ DBG(cdev, "reset ncm\n");
++ opts->net = NULL;
++ gether_disconnect(&ncm->port);
++ ncm_reset_values(ncm);
++ }
+
+ /*
+ * CDC Network only sends data in non-default altsettings.
+@@ -919,7 +926,8 @@ static int ncm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+ net = gether_connect(&ncm->port);
+ if (IS_ERR(net))
+ return PTR_ERR(net);
+- ncm->netdev = net;
++ scoped_guard(mutex, &opts->lock)
++ opts->net = net;
+ }
+
+ spin_lock(&ncm->lock);
+@@ -1366,14 +1374,16 @@ static int ncm_unwrap_ntb(struct gether *port,
+ static void ncm_disable(struct usb_function *f)
+ {
+ struct f_ncm *ncm = func_to_ncm(f);
++ struct f_ncm_opts *opts = func_to_ncm_opts(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+
+ DBG(cdev, "ncm deactivated\n");
+
+- if (ncm->netdev) {
+- ncm->netdev = NULL;
+- gether_disconnect(&ncm->port);
+- }
++ scoped_guard(mutex, &opts->lock)
++ if (opts->net) {
++ opts->net = NULL;
++ gether_disconnect(&ncm->port);
++ }
+
+ if (ncm->notify->enabled) {
+ usb_ep_disable(ncm->notify);
+@@ -1433,39 +1443,44 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
+ {
+ struct usb_composite_dev *cdev = c->cdev;
+ struct f_ncm *ncm = func_to_ncm(f);
++ struct f_ncm_opts *ncm_opts = func_to_ncm_opts(f);
+ struct usb_string *us;
+ int status = 0;
+ struct usb_ep *ep;
+- struct f_ncm_opts *ncm_opts;
+
+ struct usb_os_desc_table *os_desc_table __free(kfree) = NULL;
++ struct net_device *netdev __free(free_gether_netdev) = NULL;
+ struct usb_request *request __free(free_usb_request) = NULL;
+
+ if (!can_support_ecm(cdev->gadget))
+ return -EINVAL;
+
+- ncm_opts = container_of(f->fi, struct f_ncm_opts, func_inst);
+-
+ if (cdev->use_os_string) {
+ os_desc_table = kzalloc(sizeof(*os_desc_table), GFP_KERNEL);
+ if (!os_desc_table)
+ return -ENOMEM;
+ }
+
+- mutex_lock(&ncm_opts->lock);
+- gether_set_gadget(ncm_opts->net, cdev->gadget);
+- if (!ncm_opts->bound) {
+- ncm_opts->net->mtu = (ncm_opts->max_segment_size - ETH_HLEN);
+- status = gether_register_netdev(ncm_opts->net);
++ netdev = gether_setup_default();
++ if (IS_ERR(netdev))
++ return -ENOMEM;
++
++ scoped_guard(mutex, &ncm_opts->lock) {
++ gether_apply_opts(netdev, &ncm_opts->net_opts);
++ netdev->mtu = ncm_opts->max_segment_size - ETH_HLEN;
+ }
+- mutex_unlock(&ncm_opts->lock);
+
++ gether_set_gadget(netdev, cdev->gadget);
++ status = gether_register_netdev(netdev);
+ if (status)
+ return status;
+
+- ncm_opts->bound = true;
+-
+- ncm_string_defs[1].s = ncm->ethaddr;
++ /* export host's Ethernet address in CDC format */
++ status = gether_get_host_addr_cdc(netdev, ncm->ethaddr,
++ sizeof(ncm->ethaddr));
++ if (status < 12)
++ return -EINVAL;
++ ncm_string_defs[STRING_MAC_IDX].s = ncm->ethaddr;
+
+ us = usb_gstrings_attach(cdev, ncm_strings,
+ ARRAY_SIZE(ncm_string_defs));
+@@ -1563,6 +1578,8 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
+ f->os_desc_n = 1;
+ }
+ ncm->notify_req = no_free_ptr(request);
++ ncm->netdev = no_free_ptr(netdev);
++ ncm->port.ioport = netdev_priv(ncm->netdev);
+
+ DBG(cdev, "CDC Network: IN/%s OUT/%s NOTIFY/%s\n",
+ ncm->port.in_ep->name, ncm->port.out_ep->name,
+@@ -1577,19 +1594,19 @@ static inline struct f_ncm_opts *to_f_ncm_opts(struct config_item *item)
+ }
+
+ /* f_ncm_item_ops */
+-USB_ETHERNET_CONFIGFS_ITEM(ncm);
++USB_ETHER_OPTS_ITEM(ncm);
+
+ /* f_ncm_opts_dev_addr */
+-USB_ETHERNET_CONFIGFS_ITEM_ATTR_DEV_ADDR(ncm);
++USB_ETHER_OPTS_ATTR_DEV_ADDR(ncm);
+
+ /* f_ncm_opts_host_addr */
+-USB_ETHERNET_CONFIGFS_ITEM_ATTR_HOST_ADDR(ncm);
++USB_ETHER_OPTS_ATTR_HOST_ADDR(ncm);
+
+ /* f_ncm_opts_qmult */
+-USB_ETHERNET_CONFIGFS_ITEM_ATTR_QMULT(ncm);
++USB_ETHER_OPTS_ATTR_QMULT(ncm);
+
+ /* f_ncm_opts_ifname */
+-USB_ETHERNET_CONFIGFS_ITEM_ATTR_IFNAME(ncm);
++USB_ETHER_OPTS_ATTR_IFNAME(ncm);
+
+ static ssize_t ncm_opts_max_segment_size_show(struct config_item *item,
+ char *page)
+@@ -1655,34 +1672,27 @@ static void ncm_free_inst(struct usb_function_instance *f)
+ struct f_ncm_opts *opts;
+
+ opts = container_of(f, struct f_ncm_opts, func_inst);
+- if (opts->bound)
+- gether_cleanup(netdev_priv(opts->net));
+- else
+- free_netdev(opts->net);
+ kfree(opts->ncm_interf_group);
+ kfree(opts);
+ }
+
+ static struct usb_function_instance *ncm_alloc_inst(void)
+ {
+- struct f_ncm_opts *opts;
++ struct usb_function_instance *ret;
+ struct usb_os_desc *descs[1];
+ char *names[1];
+ struct config_group *ncm_interf_group;
+
+- opts = kzalloc(sizeof(*opts), GFP_KERNEL);
++ struct f_ncm_opts *opts __free(kfree) = kzalloc(sizeof(*opts), GFP_KERNEL);
+ if (!opts)
+ return ERR_PTR(-ENOMEM);
++
++ opts->net = NULL;
+ opts->ncm_os_desc.ext_compat_id = opts->ncm_ext_compat_id;
++ gether_setup_opts_default(&opts->net_opts, "usb");
+
+ mutex_init(&opts->lock);
+ opts->func_inst.free_func_inst = ncm_free_inst;
+- opts->net = gether_setup_default();
+- if (IS_ERR(opts->net)) {
+- struct net_device *net = opts->net;
+- kfree(opts);
+- return ERR_CAST(net);
+- }
+ opts->max_segment_size = ETH_FRAME_LEN;
+ INIT_LIST_HEAD(&opts->ncm_os_desc.ext_prop);
+
+@@ -1693,26 +1703,22 @@ static struct usb_function_instance *ncm_alloc_inst(void)
+ ncm_interf_group =
+ usb_os_desc_prepare_interf_dir(&opts->func_inst.group, 1, descs,
+ names, THIS_MODULE);
+- if (IS_ERR(ncm_interf_group)) {
+- ncm_free_inst(&opts->func_inst);
++ if (IS_ERR(ncm_interf_group))
+ return ERR_CAST(ncm_interf_group);
+- }
+ opts->ncm_interf_group = ncm_interf_group;
+
+- return &opts->func_inst;
++ ret = &opts->func_inst;
++ retain_and_null_ptr(opts);
++ return ret;
+ }
+
+ static void ncm_free(struct usb_function *f)
+ {
+- struct f_ncm *ncm;
+- struct f_ncm_opts *opts;
++ struct f_ncm_opts *opts = func_to_ncm_opts(f);
+
+- ncm = func_to_ncm(f);
+- opts = container_of(f->fi, struct f_ncm_opts, func_inst);
+- kfree(ncm);
+- mutex_lock(&opts->lock);
+- opts->refcnt--;
+- mutex_unlock(&opts->lock);
++ scoped_guard(mutex, &opts->lock)
++ opts->refcnt--;
++ kfree(func_to_ncm(f));
+ }
+
+ static void ncm_unbind(struct usb_configuration *c, struct usb_function *f)
+@@ -1736,13 +1742,15 @@ static void ncm_unbind(struct usb_configuration *c, struct usb_function *f)
+
+ kfree(ncm->notify_req->buf);
+ usb_ep_free_request(ncm->notify, ncm->notify_req);
++
++ ncm->port.ioport = NULL;
++ gether_cleanup(netdev_priv(ncm->netdev));
+ }
+
+ static struct usb_function *ncm_alloc(struct usb_function_instance *fi)
+ {
+ struct f_ncm *ncm;
+ struct f_ncm_opts *opts;
+- int status;
+
+ /* allocate and initialize one new instance */
+ ncm = kzalloc(sizeof(*ncm), GFP_KERNEL);
+@@ -1750,22 +1758,12 @@ static struct usb_function *ncm_alloc(struct usb_function_instance *fi)
+ return ERR_PTR(-ENOMEM);
+
+ opts = container_of(fi, struct f_ncm_opts, func_inst);
+- mutex_lock(&opts->lock);
+- opts->refcnt++;
+
+- /* export host's Ethernet address in CDC format */
+- status = gether_get_host_addr_cdc(opts->net, ncm->ethaddr,
+- sizeof(ncm->ethaddr));
+- if (status < 12) { /* strlen("01234567890a") */
+- kfree(ncm);
+- mutex_unlock(&opts->lock);
+- return ERR_PTR(-EINVAL);
+- }
++ scoped_guard(mutex, &opts->lock)
++ opts->refcnt++;
+
+ spin_lock_init(&ncm->lock);
+ ncm_reset_values(ncm);
+- ncm->port.ioport = netdev_priv(opts->net);
+- mutex_unlock(&opts->lock);
+ ncm->port.is_fixed = true;
+ ncm->port.supports_multi_frame = true;
+
+diff --git a/drivers/usb/gadget/function/u_ncm.h b/drivers/usb/gadget/function/u_ncm.h
+index 49ec095cdb4b6..d99330fe31e88 100644
+--- a/drivers/usb/gadget/function/u_ncm.h
++++ b/drivers/usb/gadget/function/u_ncm.h
+@@ -15,11 +15,13 @@
+
+ #include <linux/usb/composite.h>
+
++#include "u_ether.h"
++
+ struct f_ncm_opts {
+ struct usb_function_instance func_inst;
+ struct net_device *net;
+- bool bound;
+
++ struct gether_opts net_opts;
+ struct config_group *ncm_interf_group;
+ struct usb_os_desc ncm_os_desc;
+ char ncm_ext_compat_id[16];
+--
+2.51.0
+
--- /dev/null
+From 87e5173654dfc57cbb75e9a0b040e87ba03bcc5b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 30 Dec 2025 18:13:15 +0800
+Subject: usb: gadget: u_ether: Add auto-cleanup helper for freeing net_device
+
+From: Kuen-Han Tsai <khtsai@google.com>
+
+[ Upstream commit 0c0981126b99288ed354d3d414c8a5fd42ac9e25 ]
+
+The net_device in the u_ether framework currently requires explicit
+calls to unregister and free the device.
+
+Introduce gether_unregister_free_netdev() and the corresponding
+auto-cleanup macro. This ensures that if a net_device is registered, it
+is properly unregistered and the associated work queue is flushed before
+the memory is freed.
+
+This is a preparatory patch to simplify error handling paths in gadget
+drivers by removing the need for explicit goto labels for net_device
+cleanup.
+
+Signed-off-by: Kuen-Han Tsai <khtsai@google.com>
+Link: https://patch.msgid.link/20251230-ncm-refactor-v1-2-793e347bc7a7@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Stable-dep-of: 56a512a9b410 ("usb: gadget: f_ncm: align net_device lifecycle with bind/unbind")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/gadget/function/u_ether.c | 15 +++++++++++++++
+ drivers/usb/gadget/function/u_ether.h | 2 ++
+ 2 files changed, 17 insertions(+)
+
+diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
+index 745ed2c212e3a..6c32665538cc0 100644
+--- a/drivers/usb/gadget/function/u_ether.c
++++ b/drivers/usb/gadget/function/u_ether.c
+@@ -1125,6 +1125,21 @@ void gether_cleanup(struct eth_dev *dev)
+ }
+ EXPORT_SYMBOL_GPL(gether_cleanup);
+
++void gether_unregister_free_netdev(struct net_device *net)
++{
++ if (!net)
++ return;
++
++ struct eth_dev *dev = netdev_priv(net);
++
++ if (net->reg_state == NETREG_REGISTERED) {
++ unregister_netdev(net);
++ flush_work(&dev->work);
++ }
++ free_netdev(net);
++}
++EXPORT_SYMBOL_GPL(gether_unregister_free_netdev);
++
+ /**
+ * gether_connect - notify network layer that USB link is active
+ * @link: the USB link, set up with endpoints, descriptors matching
+diff --git a/drivers/usb/gadget/function/u_ether.h b/drivers/usb/gadget/function/u_ether.h
+index 63a0240df4d74..a212a8ec5eb1b 100644
+--- a/drivers/usb/gadget/function/u_ether.h
++++ b/drivers/usb/gadget/function/u_ether.h
+@@ -283,6 +283,8 @@ int gether_get_ifname(struct net_device *net, char *name, int len);
+ int gether_set_ifname(struct net_device *net, const char *name, int len);
+
+ void gether_cleanup(struct eth_dev *dev);
++void gether_unregister_free_netdev(struct net_device *net);
++DEFINE_FREE(free_gether_netdev, struct net_device *, gether_unregister_free_netdev(_T));
+
+ void gether_setup_opts_default(struct gether_opts *opts, const char *name);
+ void gether_apply_opts(struct net_device *net, struct gether_opts *opts);
+--
+2.51.0
+
--- /dev/null
+From c22403dca8dc5f3eb32c9caa8f325f6709964d2b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 30 Dec 2025 18:13:14 +0800
+Subject: usb: gadget: u_ether: add gether_opts for config caching
+
+From: Kuen-Han Tsai <khtsai@google.com>
+
+[ Upstream commit e065c6a7e46c2ee9c677fdbf50035323d2de1215 ]
+
+Currently, the net_device is allocated when the function instance is
+created (e.g., in ncm_alloc_inst()). While this allows userspace to
+configure the device early, it decouples the net_device lifecycle from
+the actual USB connection state (bind/unbind). The goal is to defer
+net_device creation to the bind callback to properly align the lifecycle
+with its parent gadget device.
+
+However, deferring net_device allocation would prevent userspace from
+configuring parameters (like interface name or MAC address) before the
+net_device exists.
+
+Introduce a new structure, struct gether_opts, associated with the
+usb_function_instance, to cache settings independently of the
+net_device. These settings include the interface name pattern, MAC
+addresses (device and host), queue multiplier, and address assignment
+type.
+
+New helper functions are added:
+- gether_setup_opts_default(): Initializes struct gether_opts with
+ defaults, including random MAC addresses.
+- gether_apply_opts(): Applies the cached options from a struct
+ gether_opts to a valid net_device.
+
+To expose these options to userspace, new configfs macros
+(USB_ETHER_OPTS_ITEM and USB_ETHER_OPTS_ATTR_*) are defined in
+u_ether_configfs.h. These attributes are part of the function
+instance's configfs group.
+
+This refactoring is a preparatory step. It allows the subsequent patch
+to safely move the net_device allocation from the instance creation
+phase to the bind phase without losing the ability to pre-configure
+the interface via configfs.
+
+Signed-off-by: Kuen-Han Tsai <khtsai@google.com>
+Link: https://patch.msgid.link/20251230-ncm-refactor-v1-1-793e347bc7a7@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Stable-dep-of: 56a512a9b410 ("usb: gadget: f_ncm: align net_device lifecycle with bind/unbind")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/gadget/function/u_ether.c | 30 +++
+ drivers/usb/gadget/function/u_ether.h | 28 +++
+ .../usb/gadget/function/u_ether_configfs.h | 176 ++++++++++++++++++
+ 3 files changed, 234 insertions(+)
+
+diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
+index f58590bf5e02f..745ed2c212e3a 100644
+--- a/drivers/usb/gadget/function/u_ether.c
++++ b/drivers/usb/gadget/function/u_ether.c
+@@ -1039,6 +1039,36 @@ int gether_set_ifname(struct net_device *net, const char *name, int len)
+ }
+ EXPORT_SYMBOL_GPL(gether_set_ifname);
+
++void gether_setup_opts_default(struct gether_opts *opts, const char *name)
++{
++ opts->qmult = QMULT_DEFAULT;
++ snprintf(opts->name, sizeof(opts->name), "%s%%d", name);
++ eth_random_addr(opts->dev_mac);
++ opts->addr_assign_type = NET_ADDR_RANDOM;
++ eth_random_addr(opts->host_mac);
++}
++EXPORT_SYMBOL_GPL(gether_setup_opts_default);
++
++void gether_apply_opts(struct net_device *net, struct gether_opts *opts)
++{
++ struct eth_dev *dev = netdev_priv(net);
++
++ dev->qmult = opts->qmult;
++
++ if (opts->ifname_set) {
++ strscpy(net->name, opts->name, sizeof(net->name));
++ dev->ifname_set = true;
++ }
++
++ memcpy(dev->host_mac, opts->host_mac, sizeof(dev->host_mac));
++
++ if (opts->addr_assign_type == NET_ADDR_SET) {
++ memcpy(dev->dev_mac, opts->dev_mac, sizeof(dev->dev_mac));
++ net->addr_assign_type = opts->addr_assign_type;
++ }
++}
++EXPORT_SYMBOL_GPL(gether_apply_opts);
++
+ void gether_suspend(struct gether *link)
+ {
+ struct eth_dev *dev = link->ioport;
+diff --git a/drivers/usb/gadget/function/u_ether.h b/drivers/usb/gadget/function/u_ether.h
+index 34be220cef77c..63a0240df4d74 100644
+--- a/drivers/usb/gadget/function/u_ether.h
++++ b/drivers/usb/gadget/function/u_ether.h
+@@ -38,6 +38,31 @@
+
+ struct eth_dev;
+
++/**
++ * struct gether_opts - Options for Ethernet gadget function instances
++ * @name: Pattern for the network interface name (e.g., "usb%d").
++ * Used to generate the net device name.
++ * @qmult: Queue length multiplier for high/super speed.
++ * @host_mac: The MAC address to be used by the host side.
++ * @dev_mac: The MAC address to be used by the device side.
++ * @ifname_set: True if the interface name pattern has been set by userspace.
++ * @addr_assign_type: The method used for assigning the device MAC address
++ * (e.g., NET_ADDR_RANDOM, NET_ADDR_SET).
++ *
++ * This structure caches network-related settings provided through configfs
++ * before the net_device is fully instantiated. This allows for early
++ * configuration while deferring net_device allocation until the function
++ * is bound.
++ */
++struct gether_opts {
++ char name[IFNAMSIZ];
++ unsigned int qmult;
++ u8 host_mac[ETH_ALEN];
++ u8 dev_mac[ETH_ALEN];
++ bool ifname_set;
++ unsigned char addr_assign_type;
++};
++
+ /*
+ * This represents the USB side of an "ethernet" link, managed by a USB
+ * function which provides control and (maybe) framing. Two functions
+@@ -259,6 +284,9 @@ int gether_set_ifname(struct net_device *net, const char *name, int len);
+
+ void gether_cleanup(struct eth_dev *dev);
+
++void gether_setup_opts_default(struct gether_opts *opts, const char *name);
++void gether_apply_opts(struct net_device *net, struct gether_opts *opts);
++
+ void gether_suspend(struct gether *link);
+ void gether_resume(struct gether *link);
+
+diff --git a/drivers/usb/gadget/function/u_ether_configfs.h b/drivers/usb/gadget/function/u_ether_configfs.h
+index f558c3139ebe5..a3696797e074a 100644
+--- a/drivers/usb/gadget/function/u_ether_configfs.h
++++ b/drivers/usb/gadget/function/u_ether_configfs.h
+@@ -13,6 +13,12 @@
+ #ifndef __U_ETHER_CONFIGFS_H
+ #define __U_ETHER_CONFIGFS_H
+
++#include <linux/cleanup.h>
++#include <linux/if_ether.h>
++#include <linux/mutex.h>
++#include <linux/netdevice.h>
++#include <linux/rtnetlink.h>
++
+ #define USB_ETHERNET_CONFIGFS_ITEM(_f_) \
+ static void _f_##_attr_release(struct config_item *item) \
+ { \
+@@ -197,4 +203,174 @@ out: \
+ \
+ CONFIGFS_ATTR(_f_##_opts_, _n_)
+
++#define USB_ETHER_OPTS_ITEM(_f_) \
++ static void _f_##_attr_release(struct config_item *item) \
++ { \
++ struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
++ \
++ usb_put_function_instance(&opts->func_inst); \
++ } \
++ \
++ static struct configfs_item_operations _f_##_item_ops = { \
++ .release = _f_##_attr_release, \
++ }
++
++#define USB_ETHER_OPTS_ATTR_DEV_ADDR(_f_) \
++ static ssize_t _f_##_opts_dev_addr_show(struct config_item *item, \
++ char *page) \
++ { \
++ struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
++ \
++ guard(mutex)(&opts->lock); \
++ return sysfs_emit(page, "%pM\n", opts->net_opts.dev_mac); \
++ } \
++ \
++ static ssize_t _f_##_opts_dev_addr_store(struct config_item *item, \
++ const char *page, size_t len) \
++ { \
++ struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
++ u8 new_addr[ETH_ALEN]; \
++ const char *p = page; \
++ \
++ guard(mutex)(&opts->lock); \
++ if (opts->refcnt) \
++ return -EBUSY; \
++ \
++ for (int i = 0; i < ETH_ALEN; i++) { \
++ unsigned char num; \
++ if ((*p == '.') || (*p == ':')) \
++ p++; \
++ num = hex_to_bin(*p++) << 4; \
++ num |= hex_to_bin(*p++); \
++ new_addr[i] = num; \
++ } \
++ if (!is_valid_ether_addr(new_addr)) \
++ return -EINVAL; \
++ memcpy(opts->net_opts.dev_mac, new_addr, ETH_ALEN); \
++ opts->net_opts.addr_assign_type = NET_ADDR_SET; \
++ return len; \
++ } \
++ \
++ CONFIGFS_ATTR(_f_##_opts_, dev_addr)
++
++#define USB_ETHER_OPTS_ATTR_HOST_ADDR(_f_) \
++ static ssize_t _f_##_opts_host_addr_show(struct config_item *item, \
++ char *page) \
++ { \
++ struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
++ \
++ guard(mutex)(&opts->lock); \
++ return sysfs_emit(page, "%pM\n", opts->net_opts.host_mac); \
++ } \
++ \
++ static ssize_t _f_##_opts_host_addr_store(struct config_item *item, \
++ const char *page, size_t len) \
++ { \
++ struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
++ u8 new_addr[ETH_ALEN]; \
++ const char *p = page; \
++ \
++ guard(mutex)(&opts->lock); \
++ if (opts->refcnt) \
++ return -EBUSY; \
++ \
++ for (int i = 0; i < ETH_ALEN; i++) { \
++ unsigned char num; \
++ if ((*p == '.') || (*p == ':')) \
++ p++; \
++ num = hex_to_bin(*p++) << 4; \
++ num |= hex_to_bin(*p++); \
++ new_addr[i] = num; \
++ } \
++ if (!is_valid_ether_addr(new_addr)) \
++ return -EINVAL; \
++ memcpy(opts->net_opts.host_mac, new_addr, ETH_ALEN); \
++ return len; \
++ } \
++ \
++ CONFIGFS_ATTR(_f_##_opts_, host_addr)
++
++#define USB_ETHER_OPTS_ATTR_QMULT(_f_) \
++ static ssize_t _f_##_opts_qmult_show(struct config_item *item, \
++ char *page) \
++ { \
++ struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
++ \
++ guard(mutex)(&opts->lock); \
++ return sysfs_emit(page, "%u\n", opts->net_opts.qmult); \
++ } \
++ \
++ static ssize_t _f_##_opts_qmult_store(struct config_item *item, \
++ const char *page, size_t len) \
++ { \
++ struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
++ u32 val; \
++ int ret; \
++ \
++ guard(mutex)(&opts->lock); \
++ if (opts->refcnt) \
++ return -EBUSY; \
++ \
++ ret = kstrtou32(page, 0, &val); \
++ if (ret) \
++ return ret; \
++ \
++ opts->net_opts.qmult = val; \
++ return len; \
++ } \
++ \
++ CONFIGFS_ATTR(_f_##_opts_, qmult)
++
++#define USB_ETHER_OPTS_ATTR_IFNAME(_f_) \
++ static ssize_t _f_##_opts_ifname_show(struct config_item *item, \
++ char *page) \
++ { \
++ struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
++ const char *name; \
++ \
++ guard(mutex)(&opts->lock); \
++ rtnl_lock(); \
++ if (opts->net_opts.ifname_set) \
++ name = opts->net_opts.name; \
++ else if (opts->net) \
++ name = netdev_name(opts->net); \
++ else \
++ name = "(inactive net_device)"; \
++ rtnl_unlock(); \
++ return sysfs_emit(page, "%s\n", name); \
++ } \
++ \
++ static ssize_t _f_##_opts_ifname_store(struct config_item *item, \
++ const char *page, size_t len) \
++ { \
++ struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
++ char tmp[IFNAMSIZ]; \
++ const char *p; \
++ size_t c_len = len; \
++ \
++ if (c_len > 0 && page[c_len - 1] == '\n') \
++ c_len--; \
++ \
++ if (c_len >= sizeof(tmp)) \
++ return -E2BIG; \
++ \
++ strscpy(tmp, page, c_len + 1); \
++ if (!dev_valid_name(tmp)) \
++ return -EINVAL; \
++ \
++ /* Require exactly one %d */ \
++ p = strchr(tmp, '%'); \
++ if (!p || p[1] != 'd' || strchr(p + 2, '%')) \
++ return -EINVAL; \
++ \
++ guard(mutex)(&opts->lock); \
++ if (opts->refcnt) \
++ return -EBUSY; \
++ strscpy(opts->net_opts.name, tmp, sizeof(opts->net_opts.name)); \
++ opts->net_opts.ifname_set = true; \
++ return len; \
++ } \
++ \
++ CONFIGFS_ATTR(_f_##_opts_, ifname)
++
+ #endif /* __U_ETHER_CONFIGFS_H */
+--
+2.51.0
+
--- /dev/null
+From 90e37599db0b29e7c872e1f602a6f56864b4ec3b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 7 Feb 2026 14:13:17 +0100
+Subject: ALSA: hda/conexant: Add quirk for HP ZBook Studio G4
+
+From: Takashi Iwai <tiwai@suse.de>
+
+[ Upstream commit 1585cf83e98db32463e5d54161b06a5f01fe9976 ]
+
+It was reported that we need the same quirk for HP ZBook Studio G4
+(SSID 103c:826b) as other HP models to make the mute-LED working.
+
+Cc: <stable@vger.kernel.org>
+Link: https://lore.kernel.org/64d78753-b9ff-4c64-8920-64d8d31cd20c@gmail.com
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=221002
+Link: https://patch.msgid.link/20260207131324.2428030-1-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/pci/hda/patch_conexant.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index fd141185ce2b9..192d13f829e19 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -1085,6 +1085,7 @@ static const struct hda_quirk cxt5066_fixups[] = {
+ SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
+ SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO),
+ SND_PCI_QUIRK(0x103c, 0x8231, "HP ProBook 450 G4", CXT_FIXUP_MUTE_LED_GPIO),
++ SND_PCI_QUIRK(0x103c, 0x826b, "HP ZBook Studio G4", CXT_FIXUP_MUTE_LED_GPIO),
+ SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK),
+ SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+--
+2.51.0
+
--- /dev/null
+From 0c96c2aacf05b9a2a895012cd9b43755bcb5fbc1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Feb 2026 11:44:11 +0100
+Subject: ALSA: hda/conexant: Fix headphone jack handling on Acer Swift SF314
+
+From: Takashi Iwai <tiwai@suse.de>
+
+[ Upstream commit 7bc0df86c2384bc1e2012a2c946f82305054da64 ]
+
+Acer Swift SF314 (SSID 1025:136d) needs a bit of tweaks of the pin
+configurations for NID 0x16 and 0x19 to make the headphone / headset
+jack working. NID 0x17 can remain as is for the working speaker, and
+the built-in mic is supported via SOF.
+
+Cc: <stable@vger.kernel.org>
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=221086
+Link: https://patch.msgid.link/20260217104414.62911-1-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/pci/hda/patch_conexant.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index 192d13f829e19..355b26583eb4e 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -312,6 +312,7 @@ enum {
+ CXT_PINCFG_SWS_JS201D,
+ CXT_PINCFG_TOP_SPEAKER,
+ CXT_FIXUP_HP_A_U,
++ CXT_FIXUP_ACER_SWIFT_HP,
+ };
+
+ /* for hda_fixup_thinkpad_acpi() */
+@@ -1028,6 +1029,14 @@ static const struct hda_fixup cxt_fixups[] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = cxt_fixup_hp_a_u,
+ },
++ [CXT_FIXUP_ACER_SWIFT_HP] = {
++ .type = HDA_FIXUP_PINS,
++ .v.pins = (const struct hda_pintbl[]) {
++ { 0x16, 0x0321403f }, /* Headphone */
++ { 0x19, 0x40f001f0 }, /* Mic */
++ { }
++ },
++ },
+ };
+
+ static const struct hda_quirk cxt5045_fixups[] = {
+@@ -1077,6 +1086,7 @@ static const struct hda_quirk cxt5066_fixups[] = {
+ SND_PCI_QUIRK(0x1025, 0x0543, "Acer Aspire One 522", CXT_FIXUP_STEREO_DMIC),
+ SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_ASPIRE_DMIC),
+ SND_PCI_QUIRK(0x1025, 0x054f, "Acer Aspire 4830T", CXT_FIXUP_ASPIRE_DMIC),
++ SND_PCI_QUIRK(0x1025, 0x136d, "Acer Swift SF314", CXT_FIXUP_ACER_SWIFT_HP),
+ SND_PCI_QUIRK(0x103c, 0x8079, "HP EliteBook 840 G3", CXT_FIXUP_HP_DOCK),
+ SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK),
+ SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK),
+--
+2.51.0
+
--- /dev/null
+From 677712afd4b2011924284ee77d8bea5f568f3f9d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 Jan 2026 16:15:28 +0800
+Subject: arm64: dts: rockchip: Fix rk356x PCIe range mappings
+
+From: Shawn Lin <shawn.lin@rock-chips.com>
+
+[ Upstream commit f63ea193a404481f080ca2958f73e9f364682db9 ]
+
+The pcie bus address should be mapped 1:1 to the cpu side MMIO address, so
+that there is no same address allocated from normal system memory. Otherwise
+it's broken if the same address assigned to the EP for DMA purpose.Fix it to
+sync with the vendor BSP.
+
+Fixes: 568a67e742df ("arm64: dts: rockchip: Fix rk356x PCIe register and range mappings")
+Fixes: 66b51ea7d70f ("arm64: dts: rockchip: Add rk3568 PCIe2x1 controller")
+Cc: stable@vger.kernel.org
+Cc: Andrew Powers-Holmes <aholmes@omnom.net>
+Signed-off-by: Shawn Lin <shawn.lin@rock-chips.com>
+Link: https://patch.msgid.link/1767600929-195341-1-git-send-email-shawn.lin@rock-chips.com
+Signed-off-by: Heiko Stuebner <heiko@sntech.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/boot/dts/rockchip/rk3568.dtsi | 4 ++--
+ arch/arm64/boot/dts/rockchip/rk356x.dtsi | 2 +-
+ 2 files changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/arch/arm64/boot/dts/rockchip/rk3568.dtsi b/arch/arm64/boot/dts/rockchip/rk3568.dtsi
+index f1be76a54ceb0..4305fd20b5c32 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3568.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3568.dtsi
+@@ -97,7 +97,7 @@ pcie3x1: pcie@fe270000 {
+ <0x0 0xf2000000 0x0 0x00100000>;
+ ranges = <0x01000000 0x0 0xf2100000 0x0 0xf2100000 0x0 0x00100000>,
+ <0x02000000 0x0 0xf2200000 0x0 0xf2200000 0x0 0x01e00000>,
+- <0x03000000 0x0 0x40000000 0x3 0x40000000 0x0 0x40000000>;
++ <0x03000000 0x3 0x40000000 0x3 0x40000000 0x0 0x40000000>;
+ reg-names = "dbi", "apb", "config";
+ resets = <&cru SRST_PCIE30X1_POWERUP>;
+ reset-names = "pipe";
+@@ -150,7 +150,7 @@ pcie3x2: pcie@fe280000 {
+ <0x0 0xf0000000 0x0 0x00100000>;
+ ranges = <0x01000000 0x0 0xf0100000 0x0 0xf0100000 0x0 0x00100000>,
+ <0x02000000 0x0 0xf0200000 0x0 0xf0200000 0x0 0x01e00000>,
+- <0x03000000 0x0 0x40000000 0x3 0x80000000 0x0 0x40000000>;
++ <0x03000000 0x3 0x80000000 0x3 0x80000000 0x0 0x40000000>;
+ reg-names = "dbi", "apb", "config";
+ resets = <&cru SRST_PCIE30X2_POWERUP>;
+ reset-names = "pipe";
+diff --git a/arch/arm64/boot/dts/rockchip/rk356x.dtsi b/arch/arm64/boot/dts/rockchip/rk356x.dtsi
+index 2f885bc3665b5..6377f2a0b4017 100644
+--- a/arch/arm64/boot/dts/rockchip/rk356x.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk356x.dtsi
+@@ -997,7 +997,7 @@ pcie2x1: pcie@fe260000 {
+ power-domains = <&power RK3568_PD_PIPE>;
+ ranges = <0x01000000 0x0 0xf4100000 0x0 0xf4100000 0x0 0x00100000>,
+ <0x02000000 0x0 0xf4200000 0x0 0xf4200000 0x0 0x01e00000>,
+- <0x03000000 0x0 0x40000000 0x3 0x00000000 0x0 0x40000000>;
++ <0x03000000 0x3 0x00000000 0x3 0x00000000 0x0 0x40000000>;
+ resets = <&cru SRST_PCIE20_POWERUP>;
+ reset-names = "pipe";
+ #address-cells = <3>;
+--
+2.51.0
+
--- /dev/null
+From 57dedc95f221e2334551760f98a319d340821e97 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 9 Nov 2023 21:28:32 +0100
+Subject: bus: omap-ocp2scp: Convert to platform remove callback returning void
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+
+[ Upstream commit 854f89a5b56354ba4135e0e1f0e57ab2caee59ee ]
+
+The .remove() callback for a platform driver returns an int which makes
+many driver authors wrongly assume it's possible to do error handling by
+returning an error code. However the value returned is ignored (apart
+from emitting a warning) and this typically results in resource leaks.
+
+To improve here there is a quest to make the remove callback return
+void. In the first step of this quest all drivers are converted to
+.remove_new(), which already returns void. Eventually after all drivers
+are converted, .remove_new() will be renamed to .remove().
+
+Trivially convert this driver from always returning zero in the remove
+callback to the void returning variant.
+
+Link: https://lore.kernel.org/r/20231109202830.4124591-3-u.kleine-koenig@pengutronix.de
+Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+Stable-dep-of: 5eb63e9bb65d ("bus: omap-ocp2scp: fix OF populate on driver rebind")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/bus/omap-ocp2scp.c | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/bus/omap-ocp2scp.c b/drivers/bus/omap-ocp2scp.c
+index e02d0656242b8..7d7479ba0a759 100644
+--- a/drivers/bus/omap-ocp2scp.c
++++ b/drivers/bus/omap-ocp2scp.c
+@@ -84,12 +84,10 @@ static int omap_ocp2scp_probe(struct platform_device *pdev)
+ return ret;
+ }
+
+-static int omap_ocp2scp_remove(struct platform_device *pdev)
++static void omap_ocp2scp_remove(struct platform_device *pdev)
+ {
+ pm_runtime_disable(&pdev->dev);
+ device_for_each_child(&pdev->dev, NULL, ocp2scp_remove_devices);
+-
+- return 0;
+ }
+
+ #ifdef CONFIG_OF
+@@ -103,7 +101,7 @@ MODULE_DEVICE_TABLE(of, omap_ocp2scp_id_table);
+
+ static struct platform_driver omap_ocp2scp_driver = {
+ .probe = omap_ocp2scp_probe,
+- .remove = omap_ocp2scp_remove,
++ .remove_new = omap_ocp2scp_remove,
+ .driver = {
+ .name = "omap-ocp2scp",
+ .of_match_table = of_match_ptr(omap_ocp2scp_id_table),
+--
+2.51.0
+
--- /dev/null
+From 077762453cbac2873302063986906ccaa6fd56e8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 19 Dec 2025 12:01:19 +0100
+Subject: bus: omap-ocp2scp: fix OF populate on driver rebind
+
+From: Johan Hovold <johan@kernel.org>
+
+[ Upstream commit 5eb63e9bb65d88abde647ced50fe6ad40c11de1a ]
+
+Since commit c6e126de43e7 ("of: Keep track of populated platform
+devices") child devices will not be created by of_platform_populate()
+if the devices had previously been deregistered individually so that the
+OF_POPULATED flag is still set in the corresponding OF nodes.
+
+Switch to using of_platform_depopulate() instead of open coding so that
+the child devices are created if the driver is rebound.
+
+Fixes: c6e126de43e7 ("of: Keep track of populated platform devices")
+Cc: stable@vger.kernel.org # 3.16
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Link: https://patch.msgid.link/20251219110119.23507-1-johan@kernel.org
+Signed-off-by: Kevin Hilman <khilman@baylibre.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/bus/omap-ocp2scp.c | 13 ++-----------
+ 1 file changed, 2 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/bus/omap-ocp2scp.c b/drivers/bus/omap-ocp2scp.c
+index 7d7479ba0a759..87e290a3dc817 100644
+--- a/drivers/bus/omap-ocp2scp.c
++++ b/drivers/bus/omap-ocp2scp.c
+@@ -17,15 +17,6 @@
+ #define OCP2SCP_TIMING 0x18
+ #define SYNC2_MASK 0xf
+
+-static int ocp2scp_remove_devices(struct device *dev, void *c)
+-{
+- struct platform_device *pdev = to_platform_device(dev);
+-
+- platform_device_unregister(pdev);
+-
+- return 0;
+-}
+-
+ static int omap_ocp2scp_probe(struct platform_device *pdev)
+ {
+ int ret;
+@@ -79,7 +70,7 @@ static int omap_ocp2scp_probe(struct platform_device *pdev)
+ pm_runtime_disable(&pdev->dev);
+
+ err0:
+- device_for_each_child(&pdev->dev, NULL, ocp2scp_remove_devices);
++ of_platform_depopulate(&pdev->dev);
+
+ return ret;
+ }
+@@ -87,7 +78,7 @@ static int omap_ocp2scp_probe(struct platform_device *pdev)
+ static void omap_ocp2scp_remove(struct platform_device *pdev)
+ {
+ pm_runtime_disable(&pdev->dev);
+- device_for_each_child(&pdev->dev, NULL, ocp2scp_remove_devices);
++ of_platform_depopulate(&pdev->dev);
+ }
+
+ #ifdef CONFIG_OF
+--
+2.51.0
+
--- /dev/null
+From ebfaaed0eba9eea6acd0d53da0ffda4011d64185 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 Nov 2025 17:40:03 +0100
+Subject: clk: tegra: tegra124-emc: fix device leak on set_rate()
+
+From: Johan Hovold <johan@kernel.org>
+
+[ Upstream commit da61439c63d34ae6503d080a847f144d587e3a48 ]
+
+Make sure to drop the reference taken when looking up the EMC device and
+its driver data on first set_rate().
+
+Note that holding a reference to a device does not prevent its driver
+data from going away so there is no point in keeping the reference.
+
+Fixes: 2db04f16b589 ("clk: tegra: Add EMC clock driver")
+Fixes: 6d6ef58c2470 ("clk: tegra: tegra124-emc: Fix missing put_device() call in emc_ensure_emc_driver")
+Cc: stable@vger.kernel.org # 4.2: 6d6ef58c2470
+Cc: Mikko Perttunen <mperttunen@nvidia.com>
+Cc: Miaoqian Lin <linmq006@gmail.com>
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Signed-off-by: Stephen Boyd <sboyd@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/clk/tegra/clk-tegra124-emc.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/clk/tegra/clk-tegra124-emc.c b/drivers/clk/tegra/clk-tegra124-emc.c
+index 0f6fb776b2298..5f1af6dfe7154 100644
+--- a/drivers/clk/tegra/clk-tegra124-emc.c
++++ b/drivers/clk/tegra/clk-tegra124-emc.c
+@@ -197,8 +197,8 @@ static struct tegra_emc *emc_ensure_emc_driver(struct tegra_clk_emc *tegra)
+ tegra->emc_node = NULL;
+
+ tegra->emc = platform_get_drvdata(pdev);
++ put_device(&pdev->dev);
+ if (!tegra->emc) {
+- put_device(&pdev->dev);
+ pr_err("%s: cannot find EMC driver\n", __func__);
+ return NULL;
+ }
+--
+2.51.0
+
--- /dev/null
+From 536e66b2a26816173ee76cdb8311a95a0b05b93f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 5 Sep 2023 14:25:57 -0500
+Subject: drm/amd: Drop special case for yellow carp without discovery
+
+From: Mario Limonciello <mario.limonciello@amd.com>
+
+[ Upstream commit 3ef07651a5756e7de65615e18eacbf8822c23016 ]
+
+`amdgpu_gmc_get_vbios_allocations` has a special case for how to
+bring up yellow carp when amdgpu discovery is turned off. As this ASIC
+ships with discovery turned on, it's generally dead code and worse it
+causes `adev->mman.keep_stolen_vga_memory` to not be initialized for
+yellow carp.
+
+Remove it.
+
+Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Stable-dep-of: 096bb75e13cc ("drm/amdgpu: keep vga memory on MacBooks with switchable graphics")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c | 6 ------
+ 1 file changed, 6 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+index 3c24637f3d6e9..7d120d4175499 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+@@ -728,12 +728,6 @@ void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev)
+ case CHIP_RENOIR:
+ adev->mman.keep_stolen_vga_memory = true;
+ break;
+- case CHIP_YELLOW_CARP:
+- if (amdgpu_discovery == 0) {
+- adev->mman.stolen_reserved_offset = 0x1ffb0000;
+- adev->mman.stolen_reserved_size = 64 * PAGE_SIZE;
+- }
+- break;
+ default:
+ adev->mman.keep_stolen_vga_memory = false;
+ break;
+--
+2.51.0
+
--- /dev/null
+From 371dfabad493e69de96281512bcc82cf2be2393b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Feb 2026 10:42:54 -0600
+Subject: drm/amd: Fix hang on amdgpu unload by using pci_dev_is_disconnected()
+
+From: Mario Limonciello <mario.limonciello@amd.com>
+
+[ Upstream commit f7afda7fcd169a9168695247d07ad94cf7b9798f ]
+
+The commit 6a23e7b4332c ("drm/amd: Clean up kfd node on surprise
+disconnect") introduced early KFD cleanup when drm_dev_is_unplugged()
+returns true. However, this causes hangs during normal module unload
+(rmmod amdgpu).
+
+The issue occurs because drm_dev_unplug() is called in amdgpu_pci_remove()
+for all removal scenarios, not just surprise disconnects. This was done
+intentionally in commit 39934d3ed572 ("Revert "drm/amdgpu: TA unload
+messages are not actually sent to psp when amdgpu is uninstalled"") to
+fix IGT PCI software unplug test failures. As a result,
+drm_dev_is_unplugged() returns true even during normal module unload,
+triggering the early KFD cleanup inappropriately.
+
+The correct check should distinguish between:
+- Actual surprise disconnect (eGPU unplugged): pci_dev_is_disconnected()
+ returns true
+- Normal module unload (rmmod): pci_dev_is_disconnected() returns false
+
+Replace drm_dev_is_unplugged() with pci_dev_is_disconnected() to ensure
+the early cleanup only happens during true hardware disconnect events.
+
+Cc: stable@vger.kernel.org
+Reported-by: Cal Peake <cp@absolutedigital.net>
+Closes: https://lore.kernel.org/all/b0c22deb-c0fa-3343-33cf-fd9a77d7db99@absolutedigital.net/
+Fixes: 6a23e7b4332c ("drm/amd: Clean up kfd node on surprise disconnect")
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 9481d450809b5..1251303b52d21 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -4034,7 +4034,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
+ * before ip_fini_early to prevent kfd locking refcount issues by calling
+ * amdgpu_amdkfd_suspend()
+ */
+- if (drm_dev_is_unplugged(adev_to_drm(adev)))
++ if (pci_dev_is_disconnected(adev->pdev))
+ amdgpu_amdkfd_device_fini_sw(adev);
+
+ amdgpu_device_ip_fini_early(adev);
+@@ -4046,7 +4046,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
+
+ amdgpu_gart_dummy_page_fini(adev);
+
+- if (drm_dev_is_unplugged(adev_to_drm(adev)))
++ if (pci_dev_is_disconnected(adev->pdev))
+ amdgpu_device_unmap_mmio(adev);
+
+ }
+--
+2.51.0
+
--- /dev/null
+From 5f298fb56171e8b4ea97b7f75a276dd0c1e57266 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 16 Feb 2026 10:02:32 -0500
+Subject: drm/amdgpu: keep vga memory on MacBooks with switchable graphics
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+[ Upstream commit 096bb75e13cc508d3915b7604e356bcb12b17766 ]
+
+On Intel MacBookPros with switchable graphics, when the iGPU
+is enabled, the address of VRAM gets put at 0 in the dGPU's
+virtual address space. This is non-standard and seems to cause
+issues with the cursor if it ends up at 0. We have the framework
+to reserve memory at 0 in the address space, so enable it here if
+the vram start address is 0.
+
+Reviewed-and-tested-by: Mario Kleiner <mario.kleiner.de@gmail.com>
+Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/4302
+Cc: stable@vger.kernel.org
+Cc: Mario Kleiner <mario.kleiner.de@gmail.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+index 7d120d4175499..8cb192636368f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+@@ -728,6 +728,16 @@ void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev)
+ case CHIP_RENOIR:
+ adev->mman.keep_stolen_vga_memory = true;
+ break;
++ case CHIP_POLARIS10:
++ case CHIP_POLARIS11:
++ case CHIP_POLARIS12:
++ /* MacBookPros with switchable graphics put VRAM at 0 when
++ * the iGPU is enabled which results in cursor issues if
++ * the cursor ends up at 0. Reserve vram at 0 in that case.
++ */
++ if (adev->gmc.vram_start == 0)
++ adev->mman.keep_stolen_vga_memory = true;
++ break;
+ default:
+ adev->mman.keep_stolen_vga_memory = false;
+ break;
+--
+2.51.0
+
--- /dev/null
+From bf192aa42d427d3e6aa0cd7a89b91b8e9ae75f58 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 Nov 2025 17:42:01 +0100
+Subject: drm/tegra: dsi: fix device leak on probe
+
+From: Johan Hovold <johan@kernel.org>
+
+[ Upstream commit bfef062695570842cf96358f2f46f4c6642c6689 ]
+
+Make sure to drop the reference taken when looking up the companion
+(ganged) device and its driver data during probe().
+
+Note that holding a reference to a device does not prevent its driver
+data from going away so there is no point in keeping the reference.
+
+Fixes: e94236cde4d5 ("drm/tegra: dsi: Add ganged mode support")
+Fixes: 221e3638feb8 ("drm/tegra: Fix reference leak in tegra_dsi_ganged_probe")
+Cc: stable@vger.kernel.org # 3.19: 221e3638feb8
+Cc: Thierry Reding <treding@nvidia.com>
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Signed-off-by: Thierry Reding <treding@nvidia.com>
+Link: https://patch.msgid.link/20251121164201.13188-1-johan@kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/tegra/dsi.c | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
+index 49fc4690c63af..e98eb8d0c4d77 100644
+--- a/drivers/gpu/drm/tegra/dsi.c
++++ b/drivers/gpu/drm/tegra/dsi.c
+@@ -1539,11 +1539,9 @@ static int tegra_dsi_ganged_probe(struct tegra_dsi *dsi)
+ return -EPROBE_DEFER;
+
+ dsi->slave = platform_get_drvdata(gangster);
+-
+- if (!dsi->slave) {
+- put_device(&gangster->dev);
++ put_device(&gangster->dev);
++ if (!dsi->slave)
+ return -EPROBE_DEFER;
+- }
+
+ dsi->slave->master = dsi;
+ }
+--
+2.51.0
+
--- /dev/null
+From 5b1dade2c9246ca9914b72974e41a75b4272070d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 16 Apr 2024 18:28:54 +0100
+Subject: ext4: convert bd_bitmap_page to bd_bitmap_folio
+
+From: Matthew Wilcox (Oracle) <willy@infradead.org>
+
+[ Upstream commit 99b150d84e4939735cfce245e32e3d29312c68ec ]
+
+There is no need to make this a multi-page folio, so leave all the
+infrastructure around it in pages. But since we're locking it, playing
+with its refcount and checking whether it's uptodate, it needs to move
+to the folio API.
+
+Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Link: https://lore.kernel.org/r/20240416172900.244637-2-willy@infradead.org
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: bdc56a9c46b2 ("ext4: fix e4b bitmap inconsistency reports")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/mballoc.c | 98 ++++++++++++++++++++++++-----------------------
+ fs/ext4/mballoc.h | 2 +-
+ 2 files changed, 52 insertions(+), 48 deletions(-)
+
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index ade2090155c1c..b5a5b89dfc98f 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -1461,9 +1461,10 @@ static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
+ int block, pnum, poff;
+ int blocks_per_page;
+ struct page *page;
++ struct folio *folio;
+
+ e4b->bd_buddy_page = NULL;
+- e4b->bd_bitmap_page = NULL;
++ e4b->bd_bitmap_folio = NULL;
+
+ blocks_per_page = PAGE_SIZE / sb->s_blocksize;
+ /*
+@@ -1474,12 +1475,13 @@ static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
+ block = group * 2;
+ pnum = block / blocks_per_page;
+ poff = block % blocks_per_page;
+- page = find_or_create_page(inode->i_mapping, pnum, gfp);
+- if (!page)
+- return -ENOMEM;
+- BUG_ON(page->mapping != inode->i_mapping);
+- e4b->bd_bitmap_page = page;
+- e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
++ folio = __filemap_get_folio(inode->i_mapping, pnum,
++ FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
++ if (IS_ERR(folio))
++ return PTR_ERR(folio);
++ BUG_ON(folio->mapping != inode->i_mapping);
++ e4b->bd_bitmap_folio = folio;
++ e4b->bd_bitmap = folio_address(folio) + (poff * sb->s_blocksize);
+
+ if (blocks_per_page >= 2) {
+ /* buddy and bitmap are on the same page */
+@@ -1497,9 +1499,9 @@ static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
+
+ static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b)
+ {
+- if (e4b->bd_bitmap_page) {
+- unlock_page(e4b->bd_bitmap_page);
+- put_page(e4b->bd_bitmap_page);
++ if (e4b->bd_bitmap_folio) {
++ folio_unlock(e4b->bd_bitmap_folio);
++ folio_put(e4b->bd_bitmap_folio);
+ }
+ if (e4b->bd_buddy_page) {
+ unlock_page(e4b->bd_buddy_page);
+@@ -1519,6 +1521,7 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp)
+ struct ext4_group_info *this_grp;
+ struct ext4_buddy e4b;
+ struct page *page;
++ struct folio *folio;
+ int ret = 0;
+
+ might_sleep();
+@@ -1545,11 +1548,11 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp)
+ goto err;
+ }
+
+- page = e4b.bd_bitmap_page;
+- ret = ext4_mb_init_cache(page, NULL, gfp);
++ folio = e4b.bd_bitmap_folio;
++ ret = ext4_mb_init_cache(&folio->page, NULL, gfp);
+ if (ret)
+ goto err;
+- if (!PageUptodate(page)) {
++ if (!folio_test_uptodate(folio)) {
+ ret = -EIO;
+ goto err;
+ }
+@@ -1591,6 +1594,7 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
+ int pnum;
+ int poff;
+ struct page *page;
++ struct folio *folio;
+ int ret;
+ struct ext4_group_info *grp;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+@@ -1609,7 +1613,7 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
+ e4b->bd_sb = sb;
+ e4b->bd_group = group;
+ e4b->bd_buddy_page = NULL;
+- e4b->bd_bitmap_page = NULL;
++ e4b->bd_bitmap_folio = NULL;
+
+ if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
+ /*
+@@ -1630,53 +1634,53 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
+ pnum = block / blocks_per_page;
+ poff = block % blocks_per_page;
+
+- /* we could use find_or_create_page(), but it locks page
+- * what we'd like to avoid in fast path ... */
+- page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
+- if (page == NULL || !PageUptodate(page)) {
+- if (page)
++ /* Avoid locking the folio in the fast path ... */
++ folio = __filemap_get_folio(inode->i_mapping, pnum, FGP_ACCESSED, 0);
++ if (IS_ERR(folio) || !folio_test_uptodate(folio)) {
++ if (!IS_ERR(folio))
+ /*
+- * drop the page reference and try
+- * to get the page with lock. If we
++ * drop the folio reference and try
++ * to get the folio with lock. If we
+ * are not uptodate that implies
+- * somebody just created the page but
+- * is yet to initialize the same. So
++ * somebody just created the folio but
++ * is yet to initialize it. So
+ * wait for it to initialize.
+ */
+- put_page(page);
+- page = find_or_create_page(inode->i_mapping, pnum, gfp);
+- if (page) {
+- if (WARN_RATELIMIT(page->mapping != inode->i_mapping,
+- "ext4: bitmap's paging->mapping != inode->i_mapping\n")) {
++ folio_put(folio);
++ folio = __filemap_get_folio(inode->i_mapping, pnum,
++ FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
++ if (!IS_ERR(folio)) {
++ if (WARN_RATELIMIT(folio->mapping != inode->i_mapping,
++ "ext4: bitmap's mapping != inode->i_mapping\n")) {
+ /* should never happen */
+- unlock_page(page);
++ folio_unlock(folio);
+ ret = -EINVAL;
+ goto err;
+ }
+- if (!PageUptodate(page)) {
+- ret = ext4_mb_init_cache(page, NULL, gfp);
++ if (!folio_test_uptodate(folio)) {
++ ret = ext4_mb_init_cache(&folio->page, NULL, gfp);
+ if (ret) {
+- unlock_page(page);
++ folio_unlock(folio);
+ goto err;
+ }
+- mb_cmp_bitmaps(e4b, page_address(page) +
++ mb_cmp_bitmaps(e4b, folio_address(folio) +
+ (poff * sb->s_blocksize));
+ }
+- unlock_page(page);
++ folio_unlock(folio);
+ }
+ }
+- if (page == NULL) {
+- ret = -ENOMEM;
++ if (IS_ERR(folio)) {
++ ret = PTR_ERR(folio);
+ goto err;
+ }
+- if (!PageUptodate(page)) {
++ if (!folio_test_uptodate(folio)) {
+ ret = -EIO;
+ goto err;
+ }
+
+ /* Pages marked accessed already */
+- e4b->bd_bitmap_page = page;
+- e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
++ e4b->bd_bitmap_folio = folio;
++ e4b->bd_bitmap = folio_address(folio) + (poff * sb->s_blocksize);
+
+ block++;
+ pnum = block / blocks_per_page;
+@@ -1724,8 +1728,8 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
+ err:
+ if (page)
+ put_page(page);
+- if (e4b->bd_bitmap_page)
+- put_page(e4b->bd_bitmap_page);
++ if (e4b->bd_bitmap_folio)
++ folio_put(e4b->bd_bitmap_folio);
+
+ e4b->bd_buddy = NULL;
+ e4b->bd_bitmap = NULL;
+@@ -1740,8 +1744,8 @@ static int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
+
+ static void ext4_mb_unload_buddy(struct ext4_buddy *e4b)
+ {
+- if (e4b->bd_bitmap_page)
+- put_page(e4b->bd_bitmap_page);
++ if (e4b->bd_bitmap_folio)
++ folio_put(e4b->bd_bitmap_folio);
+ if (e4b->bd_buddy_page)
+ put_page(e4b->bd_buddy_page);
+ }
+@@ -2167,7 +2171,7 @@ static void ext4_mb_use_best_found(struct ext4_allocation_context *ac,
+ * double allocate blocks. The reference is dropped
+ * in ext4_mb_release_context
+ */
+- ac->ac_bitmap_page = e4b->bd_bitmap_page;
++ ac->ac_bitmap_page = &e4b->bd_bitmap_folio->page;
+ get_page(ac->ac_bitmap_page);
+ ac->ac_buddy_page = e4b->bd_buddy_page;
+ get_page(ac->ac_buddy_page);
+@@ -3902,7 +3906,7 @@ static void ext4_free_data_in_buddy(struct super_block *sb,
+ * balance refcounts from ext4_mb_free_metadata()
+ */
+ put_page(e4b.bd_buddy_page);
+- put_page(e4b.bd_bitmap_page);
++ folio_put(e4b.bd_bitmap_folio);
+ }
+ ext4_unlock_group(sb, entry->efd_group);
+ ext4_mb_unload_buddy(&e4b);
+@@ -6348,7 +6352,7 @@ ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
+ struct rb_node *parent = NULL, *new_node;
+
+ BUG_ON(!ext4_handle_valid(handle));
+- BUG_ON(e4b->bd_bitmap_page == NULL);
++ BUG_ON(e4b->bd_bitmap_folio == NULL);
+ BUG_ON(e4b->bd_buddy_page == NULL);
+
+ new_node = &new_entry->efd_node;
+@@ -6361,7 +6365,7 @@ ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
+ * on-disk bitmap and lose not-yet-available
+ * blocks */
+ get_page(e4b->bd_buddy_page);
+- get_page(e4b->bd_bitmap_page);
++ folio_get(e4b->bd_bitmap_folio);
+ }
+ while (*n) {
+ parent = *n;
+diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h
+index dd16050022f52..2d0aca8dc02e8 100644
+--- a/fs/ext4/mballoc.h
++++ b/fs/ext4/mballoc.h
+@@ -218,7 +218,7 @@ struct ext4_allocation_context {
+ struct ext4_buddy {
+ struct page *bd_buddy_page;
+ void *bd_buddy;
+- struct page *bd_bitmap_page;
++ struct folio *bd_bitmap_folio;
+ void *bd_bitmap;
+ struct ext4_group_info *bd_info;
+ struct super_block *bd_sb;
+--
+2.51.0
+
--- /dev/null
+From 6a421dae040ff92f6ec5bf519ba9e199c131833d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 16 Apr 2024 18:28:55 +0100
+Subject: ext4: convert bd_buddy_page to bd_buddy_folio
+
+From: Matthew Wilcox (Oracle) <willy@infradead.org>
+
+[ Upstream commit 5eea586b47f05b5f5518cf8f9dd9283a01a8066d ]
+
+There is no need to make this a multi-page folio, so leave all the
+infrastructure around it in pages. But since we're locking it, playing
+with its refcount and checking whether it's uptodate, it needs to move
+to the folio API.
+
+Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Link: https://lore.kernel.org/r/20240416172900.244637-3-willy@infradead.org
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: bdc56a9c46b2 ("ext4: fix e4b bitmap inconsistency reports")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/mballoc.c | 91 +++++++++++++++++++++++------------------------
+ fs/ext4/mballoc.h | 2 +-
+ 2 files changed, 46 insertions(+), 47 deletions(-)
+
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index b5a5b89dfc98f..877b336c651f7 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -1452,7 +1452,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
+ * Lock the buddy and bitmap pages. This make sure other parallel init_group
+ * on the same buddy page doesn't happen whild holding the buddy page lock.
+ * Return locked buddy and bitmap pages on e4b struct. If buddy and bitmap
+- * are on the same page e4b->bd_buddy_page is NULL and return value is 0.
++ * are on the same page e4b->bd_buddy_folio is NULL and return value is 0.
+ */
+ static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
+ ext4_group_t group, struct ext4_buddy *e4b, gfp_t gfp)
+@@ -1460,10 +1460,9 @@ static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
+ struct inode *inode = EXT4_SB(sb)->s_buddy_cache;
+ int block, pnum, poff;
+ int blocks_per_page;
+- struct page *page;
+ struct folio *folio;
+
+- e4b->bd_buddy_page = NULL;
++ e4b->bd_buddy_folio = NULL;
+ e4b->bd_bitmap_folio = NULL;
+
+ blocks_per_page = PAGE_SIZE / sb->s_blocksize;
+@@ -1489,11 +1488,12 @@ static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
+ }
+
+ /* blocks_per_page == 1, hence we need another page for the buddy */
+- page = find_or_create_page(inode->i_mapping, block + 1, gfp);
+- if (!page)
+- return -ENOMEM;
+- BUG_ON(page->mapping != inode->i_mapping);
+- e4b->bd_buddy_page = page;
++ folio = __filemap_get_folio(inode->i_mapping, block + 1,
++ FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
++ if (IS_ERR(folio))
++ return PTR_ERR(folio);
++ BUG_ON(folio->mapping != inode->i_mapping);
++ e4b->bd_buddy_folio = folio;
+ return 0;
+ }
+
+@@ -1503,9 +1503,9 @@ static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b)
+ folio_unlock(e4b->bd_bitmap_folio);
+ folio_put(e4b->bd_bitmap_folio);
+ }
+- if (e4b->bd_buddy_page) {
+- unlock_page(e4b->bd_buddy_page);
+- put_page(e4b->bd_buddy_page);
++ if (e4b->bd_buddy_folio) {
++ folio_unlock(e4b->bd_buddy_folio);
++ folio_put(e4b->bd_buddy_folio);
+ }
+ }
+
+@@ -1520,7 +1520,6 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp)
+
+ struct ext4_group_info *this_grp;
+ struct ext4_buddy e4b;
+- struct page *page;
+ struct folio *folio;
+ int ret = 0;
+
+@@ -1557,7 +1556,7 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp)
+ goto err;
+ }
+
+- if (e4b.bd_buddy_page == NULL) {
++ if (e4b.bd_buddy_folio == NULL) {
+ /*
+ * If both the bitmap and buddy are in
+ * the same page we don't need to force
+@@ -1567,11 +1566,11 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp)
+ goto err;
+ }
+ /* init buddy cache */
+- page = e4b.bd_buddy_page;
+- ret = ext4_mb_init_cache(page, e4b.bd_bitmap, gfp);
++ folio = e4b.bd_buddy_folio;
++ ret = ext4_mb_init_cache(&folio->page, e4b.bd_bitmap, gfp);
+ if (ret)
+ goto err;
+- if (!PageUptodate(page)) {
++ if (!folio_test_uptodate(folio)) {
+ ret = -EIO;
+ goto err;
+ }
+@@ -1593,7 +1592,6 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
+ int block;
+ int pnum;
+ int poff;
+- struct page *page;
+ struct folio *folio;
+ int ret;
+ struct ext4_group_info *grp;
+@@ -1612,7 +1610,7 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
+ e4b->bd_info = grp;
+ e4b->bd_sb = sb;
+ e4b->bd_group = group;
+- e4b->bd_buddy_page = NULL;
++ e4b->bd_buddy_folio = NULL;
+ e4b->bd_bitmap_folio = NULL;
+
+ if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
+@@ -1678,7 +1676,7 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
+ goto err;
+ }
+
+- /* Pages marked accessed already */
++ /* Folios marked accessed already */
+ e4b->bd_bitmap_folio = folio;
+ e4b->bd_bitmap = folio_address(folio) + (poff * sb->s_blocksize);
+
+@@ -1686,48 +1684,49 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
+ pnum = block / blocks_per_page;
+ poff = block % blocks_per_page;
+
+- page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
+- if (page == NULL || !PageUptodate(page)) {
+- if (page)
+- put_page(page);
+- page = find_or_create_page(inode->i_mapping, pnum, gfp);
+- if (page) {
+- if (WARN_RATELIMIT(page->mapping != inode->i_mapping,
+- "ext4: buddy bitmap's page->mapping != inode->i_mapping\n")) {
++ folio = __filemap_get_folio(inode->i_mapping, pnum, FGP_ACCESSED, 0);
++ if (IS_ERR(folio) || !folio_test_uptodate(folio)) {
++ if (!IS_ERR(folio))
++ folio_put(folio);
++ folio = __filemap_get_folio(inode->i_mapping, pnum,
++ FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
++ if (!IS_ERR(folio)) {
++ if (WARN_RATELIMIT(folio->mapping != inode->i_mapping,
++ "ext4: buddy bitmap's mapping != inode->i_mapping\n")) {
+ /* should never happen */
+- unlock_page(page);
++ folio_unlock(folio);
+ ret = -EINVAL;
+ goto err;
+ }
+- if (!PageUptodate(page)) {
+- ret = ext4_mb_init_cache(page, e4b->bd_bitmap,
++ if (!folio_test_uptodate(folio)) {
++ ret = ext4_mb_init_cache(&folio->page, e4b->bd_bitmap,
+ gfp);
+ if (ret) {
+- unlock_page(page);
++ folio_unlock(folio);
+ goto err;
+ }
+ }
+- unlock_page(page);
++ folio_unlock(folio);
+ }
+ }
+- if (page == NULL) {
+- ret = -ENOMEM;
++ if (IS_ERR(folio)) {
++ ret = PTR_ERR(folio);
+ goto err;
+ }
+- if (!PageUptodate(page)) {
++ if (!folio_test_uptodate(folio)) {
+ ret = -EIO;
+ goto err;
+ }
+
+- /* Pages marked accessed already */
+- e4b->bd_buddy_page = page;
+- e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
++ /* Folios marked accessed already */
++ e4b->bd_buddy_folio = folio;
++ e4b->bd_buddy = folio_address(folio) + (poff * sb->s_blocksize);
+
+ return 0;
+
+ err:
+- if (page)
+- put_page(page);
++ if (folio)
++ folio_put(folio);
+ if (e4b->bd_bitmap_folio)
+ folio_put(e4b->bd_bitmap_folio);
+
+@@ -1746,8 +1745,8 @@ static void ext4_mb_unload_buddy(struct ext4_buddy *e4b)
+ {
+ if (e4b->bd_bitmap_folio)
+ folio_put(e4b->bd_bitmap_folio);
+- if (e4b->bd_buddy_page)
+- put_page(e4b->bd_buddy_page);
++ if (e4b->bd_buddy_folio)
++ folio_put(e4b->bd_buddy_folio);
+ }
+
+
+@@ -2173,7 +2172,7 @@ static void ext4_mb_use_best_found(struct ext4_allocation_context *ac,
+ */
+ ac->ac_bitmap_page = &e4b->bd_bitmap_folio->page;
+ get_page(ac->ac_bitmap_page);
+- ac->ac_buddy_page = e4b->bd_buddy_page;
++ ac->ac_buddy_page = &e4b->bd_buddy_folio->page;
+ get_page(ac->ac_buddy_page);
+ /* store last allocated for subsequent stream allocation */
+ if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
+@@ -3905,7 +3904,7 @@ static void ext4_free_data_in_buddy(struct super_block *sb,
+ /* No more items in the per group rb tree
+ * balance refcounts from ext4_mb_free_metadata()
+ */
+- put_page(e4b.bd_buddy_page);
++ folio_put(e4b.bd_buddy_folio);
+ folio_put(e4b.bd_bitmap_folio);
+ }
+ ext4_unlock_group(sb, entry->efd_group);
+@@ -6353,7 +6352,7 @@ ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
+
+ BUG_ON(!ext4_handle_valid(handle));
+ BUG_ON(e4b->bd_bitmap_folio == NULL);
+- BUG_ON(e4b->bd_buddy_page == NULL);
++ BUG_ON(e4b->bd_buddy_folio == NULL);
+
+ new_node = &new_entry->efd_node;
+ cluster = new_entry->efd_start_cluster;
+@@ -6364,7 +6363,7 @@ ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
+ * otherwise we'll refresh it from
+ * on-disk bitmap and lose not-yet-available
+ * blocks */
+- get_page(e4b->bd_buddy_page);
++ folio_get(e4b->bd_buddy_folio);
+ folio_get(e4b->bd_bitmap_folio);
+ }
+ while (*n) {
+diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h
+index 2d0aca8dc02e8..0dd6bc69ab611 100644
+--- a/fs/ext4/mballoc.h
++++ b/fs/ext4/mballoc.h
+@@ -216,7 +216,7 @@ struct ext4_allocation_context {
+ #define AC_STATUS_BREAK 3
+
+ struct ext4_buddy {
+- struct page *bd_buddy_page;
++ struct folio *bd_buddy_folio;
+ void *bd_buddy;
+ struct folio *bd_bitmap_folio;
+ void *bd_bitmap;
+--
+2.51.0
+
--- /dev/null
+From 9aace10ba5820e72145ebc6d2dbb9bf94a014ba9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 12 Nov 2025 16:45:38 +0800
+Subject: ext4: correct the comments place for EXT4_EXT_MAY_ZEROOUT
+
+From: Yang Erkun <yangerkun@huawei.com>
+
+[ Upstream commit cc742fd1d184bb2a11bacf50587d2c85290622e4 ]
+
+Move the comments just before we set EXT4_EXT_MAY_ZEROOUT in
+ext4_split_convert_extents.
+
+Signed-off-by: Yang Erkun <yangerkun@huawei.com>
+Message-ID: <20251112084538.1658232-4-yangerkun@huawei.com>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: feaf2a80e78f ("ext4: don't set EXT4_GET_BLOCKS_CONVERT when splitting before submitting I/O")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/extents.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 86c814bede1c5..4507e42869854 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -3727,10 +3727,6 @@ static struct ext4_ext_path *ext4_split_convert_extents(handle_t *handle,
+ >> inode->i_sb->s_blocksize_bits;
+ if (eof_block < map->m_lblk + map->m_len)
+ eof_block = map->m_lblk + map->m_len;
+- /*
+- * It is safe to convert extent to initialized via explicit
+- * zeroout only if extent is fully inside i_size or new_size.
+- */
+ depth = ext_depth(inode);
+ ex = path[depth].p_ext;
+ ee_block = le32_to_cpu(ex->ee_block);
+@@ -3741,6 +3737,10 @@ static struct ext4_ext_path *ext4_split_convert_extents(handle_t *handle,
+ split_flag |= EXT4_EXT_DATA_ENTIRE_VALID1;
+ /* Convert to initialized */
+ } else if (flags & EXT4_GET_BLOCKS_CONVERT) {
++ /*
++ * It is safe to convert extent to initialized via explicit
++ * zeroout only if extent is fully inside i_size or new_size.
++ */
+ split_flag |= ee_block + ee_len <= eof_block ?
+ EXT4_EXT_MAY_ZEROOUT : 0;
+ split_flag |= (EXT4_EXT_MARK_UNWRIT2 | EXT4_EXT_DATA_VALID2);
+--
+2.51.0
+
--- /dev/null
+From 77e2a57011020d48af0f9fbe3f4c68a88d2904ee Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Oct 2023 11:52:15 +0800
+Subject: ext4: delete redundant calculations in ext4_mb_get_buddy_page_lock()
+
+From: Gou Hao <gouhao@uniontech.com>
+
+[ Upstream commit f2fec3e99a32d7c14dbf63c824f8286ebc94b18d ]
+
+'blocks_per_page' is always 1 after 'if (blocks_per_page >= 2)',
+'pnum' and 'block' are equal in this case.
+
+Signed-off-by: Gou Hao <gouhao@uniontech.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Link: https://lore.kernel.org/r/20231024035215.29474-1-gouhao@uniontech.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: bdc56a9c46b2 ("ext4: fix e4b bitmap inconsistency reports")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/mballoc.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index d095c4a218a3a..ade2090155c1c 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -1486,9 +1486,8 @@ static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
+ return 0;
+ }
+
+- block++;
+- pnum = block / blocks_per_page;
+- page = find_or_create_page(inode->i_mapping, pnum, gfp);
++ /* blocks_per_page == 1, hence we need another page for the buddy */
++ page = find_or_create_page(inode->i_mapping, block + 1, gfp);
+ if (!page)
+ return -ENOMEM;
+ BUG_ON(page->mapping != inode->i_mapping);
+--
+2.51.0
+
--- /dev/null
+From 2a2fbcb53c9180b70071bc1e06a583379c4933e4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 29 Nov 2025 18:32:35 +0800
+Subject: ext4: don't set EXT4_GET_BLOCKS_CONVERT when splitting before
+ submitting I/O
+
+From: Zhang Yi <yi.zhang@huawei.com>
+
+[ Upstream commit feaf2a80e78f89ee8a3464126077ba8683b62791 ]
+
+When allocating blocks during within-EOF DIO and writeback with
+dioread_nolock enabled, EXT4_GET_BLOCKS_PRE_IO was set to split an
+existing large unwritten extent. However, EXT4_GET_BLOCKS_CONVERT was
+set when calling ext4_split_convert_extents(), which may potentially
+result in stale data issues.
+
+Assume we have an unwritten extent, and then DIO writes the second half.
+
+ [UUUUUUUUUUUUUUUU] on-disk extent U: unwritten extent
+ [UUUUUUUUUUUUUUUU] extent status tree
+ |<- ->| ----> dio write this range
+
+First, ext4_iomap_alloc() call ext4_map_blocks() with
+EXT4_GET_BLOCKS_PRE_IO, EXT4_GET_BLOCKS_UNWRIT_EXT and
+EXT4_GET_BLOCKS_CREATE flags set. ext4_map_blocks() find this extent and
+call ext4_split_convert_extents() with EXT4_GET_BLOCKS_CONVERT and the
+above flags set.
+
+Then, ext4_split_convert_extents() calls ext4_split_extent() with
+EXT4_EXT_MAY_ZEROOUT, EXT4_EXT_MARK_UNWRIT2 and EXT4_EXT_DATA_VALID2
+flags set, and it calls ext4_split_extent_at() to split the second half
+with EXT4_EXT_DATA_VALID2, EXT4_EXT_MARK_UNWRIT1, EXT4_EXT_MAY_ZEROOUT
+and EXT4_EXT_MARK_UNWRIT2 flags set. However, ext4_split_extent_at()
+failed to insert extent since a temporary lack -ENOSPC. It zeroes out
+the first half but convert the entire on-disk extent to written since
+the EXT4_EXT_DATA_VALID2 flag set, but left the second half as unwritten
+in the extent status tree.
+
+ [0000000000SSSSSS] data S: stale data, 0: zeroed
+ [WWWWWWWWWWWWWWWW] on-disk extent W: written extent
+ [WWWWWWWWWWUUUUUU] extent status tree
+
+Finally, if the DIO failed to write data to the disk, the stale data in
+the second half will be exposed once the cached extent entry is gone.
+
+Fix this issue by not passing EXT4_GET_BLOCKS_CONVERT when splitting
+an unwritten extent before submitting I/O, and make
+ext4_split_convert_extents() to zero out the entire extent range
+to zero for this case, and also mark the extent in the extent status
+tree for consistency.
+
+Fixes: b8a8684502a0 ("ext4: Introduce FALLOC_FL_ZERO_RANGE flag for fallocate")
+Signed-off-by: Zhang Yi <yi.zhang@huawei.com>
+Reviewed-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Reviewed-by: Baokun Li <libaokun1@huawei.com>
+Cc: stable@kernel.org
+Message-ID: <20251129103247.686136-4-yi.zhang@huaweicloud.com>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/extents.c | 12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 4507e42869854..ed63260d792b1 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -3735,15 +3735,19 @@ static struct ext4_ext_path *ext4_split_convert_extents(handle_t *handle,
+ /* Convert to unwritten */
+ if (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN) {
+ split_flag |= EXT4_EXT_DATA_ENTIRE_VALID1;
+- /* Convert to initialized */
+- } else if (flags & EXT4_GET_BLOCKS_CONVERT) {
++ /* Split the existing unwritten extent */
++ } else if (flags & (EXT4_GET_BLOCKS_UNWRIT_EXT |
++ EXT4_GET_BLOCKS_CONVERT)) {
+ /*
+ * It is safe to convert extent to initialized via explicit
+ * zeroout only if extent is fully inside i_size or new_size.
+ */
+ split_flag |= ee_block + ee_len <= eof_block ?
+ EXT4_EXT_MAY_ZEROOUT : 0;
+- split_flag |= (EXT4_EXT_MARK_UNWRIT2 | EXT4_EXT_DATA_VALID2);
++ split_flag |= EXT4_EXT_MARK_UNWRIT2;
++ /* Convert to initialized */
++ if (flags & EXT4_GET_BLOCKS_CONVERT)
++ split_flag |= EXT4_EXT_DATA_VALID2;
+ }
+ flags |= EXT4_GET_BLOCKS_PRE_IO;
+ return ext4_split_extent(handle, inode, path, map, split_flag, flags,
+@@ -3920,7 +3924,7 @@ ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode,
+ /* get_block() before submitting IO, split the extent */
+ if (flags & EXT4_GET_BLOCKS_PRE_IO) {
+ path = ext4_split_convert_extents(handle, inode, map, path,
+- flags | EXT4_GET_BLOCKS_CONVERT, allocated);
++ flags, allocated);
+ if (IS_ERR(path))
+ return path;
+ /*
+--
+2.51.0
+
--- /dev/null
+From 652525e28dbed4bc15ec4d633fd3134f06075eec Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 29 Nov 2025 18:32:34 +0800
+Subject: ext4: don't zero the entire extent if EXT4_EXT_DATA_PARTIAL_VALID1
+
+From: Zhang Yi <yi.zhang@huawei.com>
+
+[ Upstream commit 1bf6974822d1dba86cf11b5f05498581cf3488a2 ]
+
+When allocating initialized blocks from a large unwritten extent, or
+when splitting an unwritten extent during end I/O and converting it to
+initialized, there is currently a potential issue of stale data if the
+extent needs to be split in the middle.
+
+ 0 A B N
+ [UUUUUUUUUUUU] U: unwritten extent
+ [--DDDDDDDD--] D: valid data
+ |<- ->| ----> this range needs to be initialized
+
+ext4_split_extent() first try to split this extent at B with
+EXT4_EXT_DATA_ENTIRE_VALID1 and EXT4_EXT_MAY_ZEROOUT flag set, but
+ext4_split_extent_at() failed to split this extent due to temporary lack
+of space. It zeroout B to N and mark the entire extent from 0 to N
+as written.
+
+ 0 A B N
+ [WWWWWWWWWWWW] W: written extent
+ [SSDDDDDDDDZZ] Z: zeroed, S: stale data
+
+ext4_split_extent() then try to split this extent at A with
+EXT4_EXT_DATA_VALID2 flag set. This time, it split successfully and left
+a stale written extent from 0 to A.
+
+ 0 A B N
+ [WW|WWWWWWWWWW]
+ [SS|DDDDDDDDZZ]
+
+Fix this by pass EXT4_EXT_DATA_PARTIAL_VALID1 to ext4_split_extent_at()
+when splitting at B, don't convert the entire extent to written and left
+it as unwritten after zeroing out B to N. The remaining work is just
+like the standard two-part split. ext4_split_extent() will pass the
+EXT4_EXT_DATA_VALID2 flag when it calls ext4_split_extent_at() for the
+second time, allowing it to properly handle the split. If the split is
+successful, it will keep extent from 0 to A as unwritten.
+
+Signed-off-by: Zhang Yi <yi.zhang@huawei.com>
+Reviewed-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Reviewed-by: Baokun Li <libaokun1@huawei.com>
+Cc: stable@kernel.org
+Message-ID: <20251129103247.686136-3-yi.zhang@huaweicloud.com>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/extents.c | 13 ++++++++++++-
+ 1 file changed, 12 insertions(+), 1 deletion(-)
+
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 18520281e1b5f..fd9517dbf633e 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -3296,6 +3296,15 @@ static struct ext4_ext_path *ext4_split_extent_at(handle_t *handle,
+ }
+
+ if (!err) {
++ /*
++ * The first half contains partially valid data, the
++ * splitting of this extent has not been completed, fix
++ * extent length and ext4_split_extent() split will the
++ * first half again.
++ */
++ if (split_flag & EXT4_EXT_DATA_PARTIAL_VALID1)
++ goto fix_extent_len;
++
+ /* update the extent length and mark as initialized */
+ ex->ee_len = cpu_to_le16(ee_len);
+ ext4_ext_try_to_merge(handle, inode, path, ex);
+@@ -3371,7 +3380,9 @@ static int ext4_split_extent(handle_t *handle,
+ split_flag1 |= EXT4_EXT_MARK_UNWRIT1 |
+ EXT4_EXT_MARK_UNWRIT2;
+ if (split_flag & EXT4_EXT_DATA_VALID2)
+- split_flag1 |= EXT4_EXT_DATA_ENTIRE_VALID1;
++ split_flag1 |= map->m_lblk > ee_block ?
++ EXT4_EXT_DATA_PARTIAL_VALID1 :
++ EXT4_EXT_DATA_ENTIRE_VALID1;
+ path = ext4_split_extent_at(handle, inode, path,
+ map->m_lblk + map->m_len, split_flag1, flags1);
+ if (IS_ERR(path)) {
+--
+2.51.0
+
--- /dev/null
+From 13dc54060fea84e95f1c5136fd1f096e8502eb12 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 29 Nov 2025 18:32:38 +0800
+Subject: ext4: drop extent cache after doing PARTIAL_VALID1 zeroout
+
+From: Zhang Yi <yi.zhang@huawei.com>
+
+[ Upstream commit 6d882ea3b0931b43530d44149b79fcd4ffc13030 ]
+
+When splitting an unwritten extent in the middle and converting it to
+initialized in ext4_split_extent() with the EXT4_EXT_MAY_ZEROOUT and
+EXT4_EXT_DATA_VALID2 flags set, it could leave a stale unwritten extent.
+
+Assume we have an unwritten file and buffered write in the middle of it
+without dioread_nolock enabled, it will allocate blocks as written
+extent.
+
+ 0 A B N
+ [UUUUUUUUUUUU] on-disk extent U: unwritten extent
+ [UUUUUUUUUUUU] extent status tree
+ [--DDDDDDDD--] D: valid data
+ |<- ->| ----> this range needs to be initialized
+
+ext4_split_extent() first try to split this extent at B with
+EXT4_EXT_DATA_PARTIAL_VALID1 and EXT4_EXT_MAY_ZEROOUT flag set, but
+ext4_split_extent_at() failed to split this extent due to temporary lack
+of space. It zeroout B to N and leave the entire extent as unwritten.
+
+ 0 A B N
+ [UUUUUUUUUUUU] on-disk extent
+ [UUUUUUUUUUUU] extent status tree
+ [--DDDDDDDDZZ] Z: zeroed data
+
+ext4_split_extent() then try to split this extent at A with
+EXT4_EXT_DATA_VALID2 flag set. This time, it split successfully and
+leave an written extent from A to N.
+
+ 0 A B N
+ [UUWWWWWWWWWW] on-disk extent W: written extent
+ [UUUUUUUUUUUU] extent status tree
+ [--DDDDDDDDZZ]
+
+Finally ext4_map_create_blocks() only insert extent A to B to the extent
+status tree, and leave an stale unwritten extent in the status tree.
+
+ 0 A B N
+ [UUWWWWWWWWWW] on-disk extent W: written extent
+ [UUWWWWWWWWUU] extent status tree
+ [--DDDDDDDDZZ]
+
+Fix this issue by always cached extent status entry after zeroing out
+the second part.
+
+Signed-off-by: Zhang Yi <yi.zhang@huawei.com>
+Reviewed-by: Baokun Li <libaokun1@huawei.com>
+Cc: stable@kernel.org
+Reviewed-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Message-ID: <20251129103247.686136-7-yi.zhang@huaweicloud.com>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/extents.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index ed63260d792b1..2818d297ce464 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -3302,8 +3302,16 @@ static struct ext4_ext_path *ext4_split_extent_at(handle_t *handle,
+ * extent length and ext4_split_extent() split will the
+ * first half again.
+ */
+- if (split_flag & EXT4_EXT_DATA_PARTIAL_VALID1)
++ if (split_flag & EXT4_EXT_DATA_PARTIAL_VALID1) {
++ /*
++ * Drop extent cache to prevent stale unwritten
++ * extents remaining after zeroing out.
++ */
++ ext4_es_remove_extent(inode,
++ le32_to_cpu(zero_ex.ee_block),
++ ext4_ext_get_actual_len(&zero_ex));
+ goto fix_extent_len;
++ }
+
+ /* update the extent length and mark as initialized */
+ ex->ee_len = cpu_to_le16(ee_len);
+--
+2.51.0
+
--- /dev/null
+From 6495403661a665b0f818e9082dfd5ddbf0695266 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 29 Nov 2025 18:32:39 +0800
+Subject: ext4: drop extent cache when splitting extent fails
+
+From: Zhang Yi <yi.zhang@huawei.com>
+
+[ Upstream commit 79b592e8f1b435796cbc2722190368e3e8ffd7a1 ]
+
+When the split extent fails, we might leave some extents still being
+processed and return an error directly, which will result in stale
+extent entries remaining in the extent status tree. So drop all of the
+remaining potentially stale extents if the splitting fails.
+
+Signed-off-by: Zhang Yi <yi.zhang@huawei.com>
+Reviewed-by: Baokun Li <libaokun1@huawei.com>
+Cc: stable@kernel.org
+Reviewed-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Message-ID: <20251129103247.686136-8-yi.zhang@huaweicloud.com>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/extents.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 2818d297ce464..b7e9cbe832121 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -3250,7 +3250,7 @@ static struct ext4_ext_path *ext4_split_extent_at(handle_t *handle,
+
+ err = PTR_ERR(path);
+ if (err != -ENOSPC && err != -EDQUOT && err != -ENOMEM)
+- return path;
++ goto out_path;
+
+ /*
+ * Get a new path to try to zeroout or fix the extent length.
+@@ -3264,7 +3264,7 @@ static struct ext4_ext_path *ext4_split_extent_at(handle_t *handle,
+ if (IS_ERR(path)) {
+ EXT4_ERROR_INODE(inode, "Failed split extent on %u, err %ld",
+ split, PTR_ERR(path));
+- return path;
++ goto out_path;
+ }
+ depth = ext_depth(inode);
+ ex = path[depth].p_ext;
+@@ -3341,6 +3341,10 @@ static struct ext4_ext_path *ext4_split_extent_at(handle_t *handle,
+ ext4_free_ext_path(path);
+ path = ERR_PTR(err);
+ }
++out_path:
++ if (IS_ERR(path))
++ /* Remove all remaining potentially stale extents. */
++ ext4_es_remove_extent(inode, ee_block, ee_len);
+ ext4_ext_show_leaf(inode, path);
+ return path;
+ }
+--
+2.51.0
+
--- /dev/null
+From 6035671eed36e8d2d6907a34fa462ebc92c9b521 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Jan 2026 17:08:20 +0800
+Subject: ext4: fix e4b bitmap inconsistency reports
+
+From: Yongjian Sun <sunyongjian1@huawei.com>
+
+[ Upstream commit bdc56a9c46b2a99c12313122b9352b619a2e719e ]
+
+A bitmap inconsistency issue was observed during stress tests under
+mixed huge-page workloads. Ext4 reported multiple e4b bitmap check
+failures like:
+
+ext4_mb_complex_scan_group:2508: group 350, 8179 free clusters as
+per group info. But got 8192 blocks
+
+Analysis and experimentation confirmed that the issue is caused by a
+race condition between page migration and bitmap modification. Although
+this timing window is extremely narrow, it is still hit in practice:
+
+folio_lock ext4_mb_load_buddy
+__migrate_folio
+ check ref count
+ folio_mc_copy __filemap_get_folio
+ folio_try_get(folio)
+ ......
+ mb_mark_used
+ ext4_mb_unload_buddy
+ __folio_migrate_mapping
+ folio_ref_freeze
+folio_unlock
+
+The root cause of this issue is that the fast path of load_buddy only
+increments the folio's reference count, which is insufficient to prevent
+concurrent folio migration. We observed that the folio migration process
+acquires the folio lock. Therefore, we can determine whether to take the
+fast path in load_buddy by checking the lock status. If the folio is
+locked, we opt for the slow path (which acquires the lock) to close this
+concurrency window.
+
+Additionally, this change addresses the following issues:
+
+When the DOUBLE_CHECK macro is enabled to inspect bitmap-related
+issues, the following error may be triggered:
+
+corruption in group 324 at byte 784(6272): f in copy != ff on
+disk/prealloc
+
+Analysis reveals that this is a false positive. There is a specific race
+window where the bitmap and the group descriptor become momentarily
+inconsistent, leading to this error report:
+
+ext4_mb_load_buddy ext4_mb_load_buddy
+ __filemap_get_folio(create|lock)
+ folio_lock
+ ext4_mb_init_cache
+ folio_mark_uptodate
+ __filemap_get_folio(no lock)
+ ......
+ mb_mark_used
+ mb_mark_used_double
+ mb_cmp_bitmaps
+ mb_set_bits(e4b->bd_bitmap)
+ folio_unlock
+
+The original logic assumed that since mb_cmp_bitmaps is called when the
+bitmap is newly loaded from disk, the folio lock would be sufficient to
+prevent concurrent access. However, this overlooks a specific race
+condition: if another process attempts to load buddy and finds the folio
+is already in an uptodate state, it will immediately begin using it without
+holding folio lock.
+
+Signed-off-by: Yongjian Sun <sunyongjian1@huawei.com>
+Reviewed-by: Zhang Yi <yi.zhang@huawei.com>
+Reviewed-by: Baokun Li <libaokun1@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Link: https://patch.msgid.link/20260106090820.836242-1-sunyongjian@huaweicloud.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Cc: stable@kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/mballoc.c | 21 +++++++++++----------
+ 1 file changed, 11 insertions(+), 10 deletions(-)
+
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 877b336c651f7..d0f4e5905bf12 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -1634,16 +1634,17 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
+
+ /* Avoid locking the folio in the fast path ... */
+ folio = __filemap_get_folio(inode->i_mapping, pnum, FGP_ACCESSED, 0);
+- if (IS_ERR(folio) || !folio_test_uptodate(folio)) {
++ if (IS_ERR(folio) || !folio_test_uptodate(folio) || folio_test_locked(folio)) {
++ /*
++ * folio_test_locked is employed to detect ongoing folio
++ * migrations, since concurrent migrations can lead to
++ * bitmap inconsistency. And if we are not uptodate that
++ * implies somebody just created the folio but is yet to
++ * initialize it. We can drop the folio reference and
++ * try to get the folio with lock in both cases to avoid
++ * concurrency.
++ */
+ if (!IS_ERR(folio))
+- /*
+- * drop the folio reference and try
+- * to get the folio with lock. If we
+- * are not uptodate that implies
+- * somebody just created the folio but
+- * is yet to initialize it. So
+- * wait for it to initialize.
+- */
+ folio_put(folio);
+ folio = __filemap_get_folio(inode->i_mapping, pnum,
+ FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
+@@ -1685,7 +1686,7 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
+ poff = block % blocks_per_page;
+
+ folio = __filemap_get_folio(inode->i_mapping, pnum, FGP_ACCESSED, 0);
+- if (IS_ERR(folio) || !folio_test_uptodate(folio)) {
++ if (IS_ERR(folio) || !folio_test_uptodate(folio) || folio_test_locked(folio)) {
+ if (!IS_ERR(folio))
+ folio_put(folio);
+ folio = __filemap_get_folio(inode->i_mapping, pnum,
+--
+2.51.0
+
--- /dev/null
+From 988eb88518baad6ae89bd848b7d64e498f71484d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 22 Aug 2024 10:35:39 +0800
+Subject: ext4: get rid of ppath in ext4_convert_unwritten_extents_endio()
+
+From: Baokun Li <libaokun1@huawei.com>
+
+[ Upstream commit 8d5ad7b08f9234bc92b9567cfe52e521df5f6626 ]
+
+The use of path and ppath is now very confusing, so to make the code more
+readable, pass path between functions uniformly, and get rid of ppath.
+
+To get rid of the ppath in ext4_convert_unwritten_extents_endio(), the
+following is done here:
+
+ * Free the extents path when an error is encountered.
+ * Its caller needs to update ppath if it uses ppath.
+
+No functional changes.
+
+Signed-off-by: Baokun Li <libaokun1@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Tested-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Link: https://patch.msgid.link/20240822023545.1994557-20-libaokun@huaweicloud.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: feaf2a80e78f ("ext4: don't set EXT4_GET_BLOCKS_CONVERT when splitting before submitting I/O")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/extents.c | 43 +++++++++++++++++++++++--------------------
+ 1 file changed, 23 insertions(+), 20 deletions(-)
+
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 27bacfa7d492c..8eb004700437e 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -3753,12 +3753,11 @@ static struct ext4_ext_path *ext4_split_convert_extents(handle_t *handle,
+ allocated);
+ }
+
+-static int ext4_convert_unwritten_extents_endio(handle_t *handle,
+- struct inode *inode,
+- struct ext4_map_blocks *map,
+- struct ext4_ext_path **ppath)
++static struct ext4_ext_path *
++ext4_convert_unwritten_extents_endio(handle_t *handle, struct inode *inode,
++ struct ext4_map_blocks *map,
++ struct ext4_ext_path *path)
+ {
+- struct ext4_ext_path *path = *ppath;
+ struct ext4_extent *ex;
+ ext4_lblk_t ee_block;
+ unsigned int ee_len;
+@@ -3788,24 +3787,19 @@ static int ext4_convert_unwritten_extents_endio(handle_t *handle,
+ #endif
+ path = ext4_split_convert_extents(handle, inode, map, path,
+ EXT4_GET_BLOCKS_CONVERT, NULL);
+- if (IS_ERR(path)) {
+- *ppath = NULL;
+- return PTR_ERR(path);
+- }
++ if (IS_ERR(path))
++ return path;
+
+ path = ext4_find_extent(inode, map->m_lblk, path, 0);
+- if (IS_ERR(path)) {
+- *ppath = NULL;
+- return PTR_ERR(path);
+- }
+- *ppath = path;
++ if (IS_ERR(path))
++ return path;
+ depth = ext_depth(inode);
+ ex = path[depth].p_ext;
+ }
+
+ err = ext4_ext_get_access(handle, inode, path + depth);
+ if (err)
+- goto out;
++ goto errout;
+ /* first mark the extent as initialized */
+ ext4_ext_mark_initialized(ex);
+
+@@ -3816,9 +3810,15 @@ static int ext4_convert_unwritten_extents_endio(handle_t *handle,
+
+ /* Mark modified extent as dirty */
+ err = ext4_ext_dirty(handle, inode, path + path->p_depth);
+-out:
++ if (err)
++ goto errout;
++
+ ext4_ext_show_leaf(inode, path);
+- return err;
++ return path;
++
++errout:
++ ext4_free_ext_path(path);
++ return ERR_PTR(err);
+ }
+
+ static int
+@@ -3946,10 +3946,13 @@ ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode,
+ }
+ /* IO end_io complete, convert the filled extent to written */
+ if (flags & EXT4_GET_BLOCKS_CONVERT) {
+- err = ext4_convert_unwritten_extents_endio(handle, inode, map,
+- ppath);
+- if (err < 0)
++ *ppath = ext4_convert_unwritten_extents_endio(handle, inode,
++ map, *ppath);
++ if (IS_ERR(*ppath)) {
++ err = PTR_ERR(*ppath);
++ *ppath = NULL;
+ goto out2;
++ }
+ ext4_update_inode_fsync_trans(handle, inode, 1);
+ goto map_out;
+ }
+--
+2.51.0
+
--- /dev/null
+From ea180d62387ca7a025804e148775e3c13ad609e8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 22 Aug 2024 10:35:40 +0800
+Subject: ext4: get rid of ppath in ext4_ext_convert_to_initialized()
+
+From: Baokun Li <libaokun1@huawei.com>
+
+[ Upstream commit 33c14b8bd8a9ef8b3dfde136b0ca779e68c2f576 ]
+
+The use of path and ppath is now very confusing, so to make the code more
+readable, pass path between functions uniformly, and get rid of ppath.
+
+To get rid of the ppath in ext4_ext_convert_to_initialized(), the following
+is done here:
+
+ * Free the extents path when an error is encountered.
+ * Its caller needs to update ppath if it uses ppath.
+ * The 'allocated' is changed from passing a value to passing an address.
+
+No functional changes.
+
+Signed-off-by: Baokun Li <libaokun1@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Tested-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Link: https://patch.msgid.link/20240822023545.1994557-21-libaokun@huaweicloud.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: feaf2a80e78f ("ext4: don't set EXT4_GET_BLOCKS_CONVERT when splitting before submitting I/O")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/extents.c | 73 +++++++++++++++++++++++------------------------
+ 1 file changed, 35 insertions(+), 38 deletions(-)
+
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 8eb004700437e..fe39d86d3a7e6 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -3445,13 +3445,11 @@ static struct ext4_ext_path *ext4_split_extent(handle_t *handle,
+ * that are allocated and initialized.
+ * It is guaranteed to be >= map->m_len.
+ */
+-static int ext4_ext_convert_to_initialized(handle_t *handle,
+- struct inode *inode,
+- struct ext4_map_blocks *map,
+- struct ext4_ext_path **ppath,
+- int flags)
++static struct ext4_ext_path *
++ext4_ext_convert_to_initialized(handle_t *handle, struct inode *inode,
++ struct ext4_map_blocks *map, struct ext4_ext_path *path,
++ int flags, unsigned int *allocated)
+ {
+- struct ext4_ext_path *path = *ppath;
+ struct ext4_sb_info *sbi;
+ struct ext4_extent_header *eh;
+ struct ext4_map_blocks split_map;
+@@ -3461,7 +3459,6 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
+ unsigned int ee_len, depth, map_len = map->m_len;
+ int err = 0;
+ int split_flag = EXT4_EXT_DATA_VALID2;
+- int allocated = 0;
+ unsigned int max_zeroout = 0;
+
+ ext_debug(inode, "logical block %llu, max_blocks %u\n",
+@@ -3502,6 +3499,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
+ * - L2: we only attempt to merge with an extent stored in the
+ * same extent tree node.
+ */
++ *allocated = 0;
+ if ((map->m_lblk == ee_block) &&
+ /* See if we can merge left */
+ (map_len < ee_len) && /*L1*/
+@@ -3531,7 +3529,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
+ (prev_len < (EXT_INIT_MAX_LEN - map_len))) { /*C4*/
+ err = ext4_ext_get_access(handle, inode, path + depth);
+ if (err)
+- goto out;
++ goto errout;
+
+ trace_ext4_ext_convert_to_initialized_fastpath(inode,
+ map, ex, abut_ex);
+@@ -3546,7 +3544,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
+ abut_ex->ee_len = cpu_to_le16(prev_len + map_len);
+
+ /* Result: number of initialized blocks past m_lblk */
+- allocated = map_len;
++ *allocated = map_len;
+ }
+ } else if (((map->m_lblk + map_len) == (ee_block + ee_len)) &&
+ (map_len < ee_len) && /*L1*/
+@@ -3577,7 +3575,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
+ (next_len < (EXT_INIT_MAX_LEN - map_len))) { /*C4*/
+ err = ext4_ext_get_access(handle, inode, path + depth);
+ if (err)
+- goto out;
++ goto errout;
+
+ trace_ext4_ext_convert_to_initialized_fastpath(inode,
+ map, ex, abut_ex);
+@@ -3592,18 +3590,20 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
+ abut_ex->ee_len = cpu_to_le16(next_len + map_len);
+
+ /* Result: number of initialized blocks past m_lblk */
+- allocated = map_len;
++ *allocated = map_len;
+ }
+ }
+- if (allocated) {
++ if (*allocated) {
+ /* Mark the block containing both extents as dirty */
+ err = ext4_ext_dirty(handle, inode, path + depth);
+
+ /* Update path to point to the right extent */
+ path[depth].p_ext = abut_ex;
++ if (err)
++ goto errout;
+ goto out;
+ } else
+- allocated = ee_len - (map->m_lblk - ee_block);
++ *allocated = ee_len - (map->m_lblk - ee_block);
+
+ WARN_ON(map->m_lblk < ee_block);
+ /*
+@@ -3630,21 +3630,21 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
+ split_map.m_lblk = map->m_lblk;
+ split_map.m_len = map->m_len;
+
+- if (max_zeroout && (allocated > split_map.m_len)) {
+- if (allocated <= max_zeroout) {
++ if (max_zeroout && (*allocated > split_map.m_len)) {
++ if (*allocated <= max_zeroout) {
+ /* case 3 or 5 */
+ zero_ex1.ee_block =
+ cpu_to_le32(split_map.m_lblk +
+ split_map.m_len);
+ zero_ex1.ee_len =
+- cpu_to_le16(allocated - split_map.m_len);
++ cpu_to_le16(*allocated - split_map.m_len);
+ ext4_ext_store_pblock(&zero_ex1,
+ ext4_ext_pblock(ex) + split_map.m_lblk +
+ split_map.m_len - ee_block);
+ err = ext4_ext_zeroout(inode, &zero_ex1);
+ if (err)
+ goto fallback;
+- split_map.m_len = allocated;
++ split_map.m_len = *allocated;
+ }
+ if (split_map.m_lblk - ee_block + split_map.m_len <
+ max_zeroout) {
+@@ -3662,27 +3662,24 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
+
+ split_map.m_len += split_map.m_lblk - ee_block;
+ split_map.m_lblk = ee_block;
+- allocated = map->m_len;
++ *allocated = map->m_len;
+ }
+ }
+
+ fallback:
+ path = ext4_split_extent(handle, inode, path, &split_map, split_flag,
+ flags, NULL);
+- if (IS_ERR(path)) {
+- err = PTR_ERR(path);
+- *ppath = NULL;
+- goto out;
+- }
+- err = 0;
+- *ppath = path;
++ if (IS_ERR(path))
++ return path;
+ out:
+ /* If we have gotten a failure, don't zero out status tree */
+- if (!err) {
+- ext4_zeroout_es(inode, &zero_ex1);
+- ext4_zeroout_es(inode, &zero_ex2);
+- }
+- return err ? err : allocated;
++ ext4_zeroout_es(inode, &zero_ex1);
++ ext4_zeroout_es(inode, &zero_ex2);
++ return path;
++
++errout:
++ ext4_free_ext_path(path);
++ return ERR_PTR(err);
+ }
+
+ /*
+@@ -3904,7 +3901,6 @@ ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode,
+ struct ext4_ext_path **ppath, int flags,
+ unsigned int allocated, ext4_fsblk_t newblock)
+ {
+- int ret = 0;
+ int err = 0;
+
+ ext_debug(inode, "logical block %llu, max_blocks %u, flags 0x%x, allocated %u\n",
+@@ -3984,23 +3980,24 @@ ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode,
+ * For buffered writes, at writepage time, etc. Convert a
+ * discovered unwritten extent to written.
+ */
+- ret = ext4_ext_convert_to_initialized(handle, inode, map, ppath, flags);
+- if (ret < 0) {
+- err = ret;
++ *ppath = ext4_ext_convert_to_initialized(handle, inode, map, *ppath,
++ flags, &allocated);
++ if (IS_ERR(*ppath)) {
++ err = PTR_ERR(*ppath);
++ *ppath = NULL;
+ goto out2;
+ }
+ ext4_update_inode_fsync_trans(handle, inode, 1);
+ /*
+- * shouldn't get a 0 return when converting an unwritten extent
++ * shouldn't get a 0 allocated when converting an unwritten extent
+ * unless m_len is 0 (bug) or extent has been corrupted
+ */
+- if (unlikely(ret == 0)) {
+- EXT4_ERROR_INODE(inode, "unexpected ret == 0, m_len = %u",
++ if (unlikely(allocated == 0)) {
++ EXT4_ERROR_INODE(inode, "unexpected allocated == 0, m_len = %u",
+ map->m_len);
+ err = -EFSCORRUPTED;
+ goto out2;
+ }
+- allocated = ret;
+
+ out:
+ map->m_flags |= EXT4_MAP_NEW;
+--
+2.51.0
+
--- /dev/null
+From 6ed8c9199f4dda6cb0c5732d97fa19bcdb140592 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 22 Aug 2024 10:35:33 +0800
+Subject: ext4: get rid of ppath in ext4_ext_create_new_leaf()
+
+From: Baokun Li <libaokun1@huawei.com>
+
+[ Upstream commit a000bc8678cc2bb10a5b80b4e991e77c7b4612fd ]
+
+The use of path and ppath is now very confusing, so to make the code more
+readable, pass path between functions uniformly, and get rid of ppath.
+
+To get rid of the ppath in ext4_ext_create_new_leaf(), the following is
+done here:
+
+ * Free the extents path when an error is encountered.
+ * Its caller needs to update ppath if it uses ppath.
+
+No functional changes.
+
+Signed-off-by: Baokun Li <libaokun1@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Tested-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Link: https://patch.msgid.link/20240822023545.1994557-14-libaokun@huaweicloud.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: 22784ca541c0 ("ext4: subdivide EXT4_EXT_DATA_VALID1")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/extents.c | 43 ++++++++++++++++++++++---------------------
+ 1 file changed, 22 insertions(+), 21 deletions(-)
+
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index cd5f679648cea..7c2bc5c2c7664 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -1392,13 +1392,12 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
+ * finds empty index and adds new leaf.
+ * if no free index is found, then it requests in-depth growing.
+ */
+-static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
+- unsigned int mb_flags,
+- unsigned int gb_flags,
+- struct ext4_ext_path **ppath,
+- struct ext4_extent *newext)
++static struct ext4_ext_path *
++ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
++ unsigned int mb_flags, unsigned int gb_flags,
++ struct ext4_ext_path *path,
++ struct ext4_extent *newext)
+ {
+- struct ext4_ext_path *path = *ppath;
+ struct ext4_ext_path *curp;
+ int depth, i, err = 0;
+
+@@ -1419,28 +1418,25 @@ static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
+ * entry: create all needed subtree and add new leaf */
+ err = ext4_ext_split(handle, inode, mb_flags, path, newext, i);
+ if (err)
+- goto out;
++ goto errout;
+
+ /* refill path */
+ path = ext4_find_extent(inode,
+ (ext4_lblk_t)le32_to_cpu(newext->ee_block),
+ path, gb_flags);
+- if (IS_ERR(path))
+- err = PTR_ERR(path);
++ return path;
+ } else {
+ /* tree is full, time to grow in depth */
+ err = ext4_ext_grow_indepth(handle, inode, mb_flags);
+ if (err)
+- goto out;
++ goto errout;
+
+ /* refill path */
+ path = ext4_find_extent(inode,
+ (ext4_lblk_t)le32_to_cpu(newext->ee_block),
+ path, gb_flags);
+- if (IS_ERR(path)) {
+- err = PTR_ERR(path);
+- goto out;
+- }
++ if (IS_ERR(path))
++ return path;
+
+ /*
+ * only first (depth 0 -> 1) produces free space;
+@@ -1452,9 +1448,11 @@ static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
+ goto repeat;
+ }
+ }
+-out:
+- *ppath = IS_ERR(path) ? NULL : path;
+- return err;
++ return path;
++
++errout:
++ ext4_free_ext_path(path);
++ return ERR_PTR(err);
+ }
+
+ /*
+@@ -2097,11 +2095,14 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
+ */
+ if (gb_flags & EXT4_GET_BLOCKS_METADATA_NOFAIL)
+ mb_flags |= EXT4_MB_USE_RESERVED;
+- err = ext4_ext_create_new_leaf(handle, inode, mb_flags, gb_flags,
+- ppath, newext);
+- if (err)
++ path = ext4_ext_create_new_leaf(handle, inode, mb_flags, gb_flags,
++ path, newext);
++ if (IS_ERR(path)) {
++ *ppath = NULL;
++ err = PTR_ERR(path);
+ goto cleanup;
+- path = *ppath;
++ }
++ *ppath = path;
+ depth = ext_depth(inode);
+ eh = path[depth].p_hdr;
+
+--
+2.51.0
+
--- /dev/null
+From 4214036d1b91177519f74dff7faf80cf14c1c51f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 22 Aug 2024 10:35:41 +0800
+Subject: ext4: get rid of ppath in ext4_ext_handle_unwritten_extents()
+
+From: Baokun Li <libaokun1@huawei.com>
+
+[ Upstream commit 2ec2e1043473b3d4a3afbe6ad7c5a5b7a6fdf480 ]
+
+The use of path and ppath is now very confusing, so to make the code more
+readable, pass path between functions uniformly, and get rid of ppath.
+
+To get rid of the ppath in ext4_ext_handle_unwritten_extents(), the
+following is done here:
+
+ * Free the extents path when an error is encountered.
+ * The 'allocated' is changed from passing a value to passing an address.
+
+No functional changes.
+
+Signed-off-by: Baokun Li <libaokun1@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Tested-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Link: https://patch.msgid.link/20240822023545.1994557-22-libaokun@huaweicloud.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: feaf2a80e78f ("ext4: don't set EXT4_GET_BLOCKS_CONVERT when splitting before submitting I/O")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/extents.c | 82 +++++++++++++++++++++--------------------------
+ 1 file changed, 37 insertions(+), 45 deletions(-)
+
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index fe39d86d3a7e6..86c814bede1c5 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -3895,18 +3895,18 @@ convert_initialized_extent(handle_t *handle, struct inode *inode,
+ return 0;
+ }
+
+-static int
++static struct ext4_ext_path *
+ ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode,
+ struct ext4_map_blocks *map,
+- struct ext4_ext_path **ppath, int flags,
+- unsigned int allocated, ext4_fsblk_t newblock)
++ struct ext4_ext_path *path, int flags,
++ unsigned int *allocated, ext4_fsblk_t newblock)
+ {
+ int err = 0;
+
+ ext_debug(inode, "logical block %llu, max_blocks %u, flags 0x%x, allocated %u\n",
+ (unsigned long long)map->m_lblk, map->m_len, flags,
+- allocated);
+- ext4_ext_show_leaf(inode, *ppath);
++ *allocated);
++ ext4_ext_show_leaf(inode, path);
+
+ /*
+ * When writing into unwritten space, we should not fail to
+@@ -3915,40 +3915,34 @@ ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode,
+ flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL;
+
+ trace_ext4_ext_handle_unwritten_extents(inode, map, flags,
+- allocated, newblock);
++ *allocated, newblock);
+
+ /* get_block() before submitting IO, split the extent */
+ if (flags & EXT4_GET_BLOCKS_PRE_IO) {
+- *ppath = ext4_split_convert_extents(handle, inode, map, *ppath,
+- flags | EXT4_GET_BLOCKS_CONVERT, &allocated);
+- if (IS_ERR(*ppath)) {
+- err = PTR_ERR(*ppath);
+- *ppath = NULL;
+- goto out2;
+- }
++ path = ext4_split_convert_extents(handle, inode, map, path,
++ flags | EXT4_GET_BLOCKS_CONVERT, allocated);
++ if (IS_ERR(path))
++ return path;
+ /*
+ * shouldn't get a 0 allocated when splitting an extent unless
+ * m_len is 0 (bug) or extent has been corrupted
+ */
+- if (unlikely(allocated == 0)) {
++ if (unlikely(*allocated == 0)) {
+ EXT4_ERROR_INODE(inode,
+ "unexpected allocated == 0, m_len = %u",
+ map->m_len);
+ err = -EFSCORRUPTED;
+- goto out2;
++ goto errout;
+ }
+ map->m_flags |= EXT4_MAP_UNWRITTEN;
+ goto out;
+ }
+ /* IO end_io complete, convert the filled extent to written */
+ if (flags & EXT4_GET_BLOCKS_CONVERT) {
+- *ppath = ext4_convert_unwritten_extents_endio(handle, inode,
+- map, *ppath);
+- if (IS_ERR(*ppath)) {
+- err = PTR_ERR(*ppath);
+- *ppath = NULL;
+- goto out2;
+- }
++ path = ext4_convert_unwritten_extents_endio(handle, inode,
++ map, path);
++ if (IS_ERR(path))
++ return path;
+ ext4_update_inode_fsync_trans(handle, inode, 1);
+ goto map_out;
+ }
+@@ -3980,23 +3974,20 @@ ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode,
+ * For buffered writes, at writepage time, etc. Convert a
+ * discovered unwritten extent to written.
+ */
+- *ppath = ext4_ext_convert_to_initialized(handle, inode, map, *ppath,
+- flags, &allocated);
+- if (IS_ERR(*ppath)) {
+- err = PTR_ERR(*ppath);
+- *ppath = NULL;
+- goto out2;
+- }
++ path = ext4_ext_convert_to_initialized(handle, inode, map, path,
++ flags, allocated);
++ if (IS_ERR(path))
++ return path;
+ ext4_update_inode_fsync_trans(handle, inode, 1);
+ /*
+ * shouldn't get a 0 allocated when converting an unwritten extent
+ * unless m_len is 0 (bug) or extent has been corrupted
+ */
+- if (unlikely(allocated == 0)) {
++ if (unlikely(*allocated == 0)) {
+ EXT4_ERROR_INODE(inode, "unexpected allocated == 0, m_len = %u",
+ map->m_len);
+ err = -EFSCORRUPTED;
+- goto out2;
++ goto errout;
+ }
+
+ out:
+@@ -4005,12 +3996,15 @@ ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode,
+ map->m_flags |= EXT4_MAP_MAPPED;
+ out1:
+ map->m_pblk = newblock;
+- if (allocated > map->m_len)
+- allocated = map->m_len;
+- map->m_len = allocated;
+- ext4_ext_show_leaf(inode, *ppath);
+-out2:
+- return err ? err : allocated;
++ if (*allocated > map->m_len)
++ *allocated = map->m_len;
++ map->m_len = *allocated;
++ ext4_ext_show_leaf(inode, path);
++ return path;
++
++errout:
++ ext4_free_ext_path(path);
++ return ERR_PTR(err);
+ }
+
+ /*
+@@ -4204,7 +4198,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
+ struct ext4_extent newex, *ex, ex2;
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ ext4_fsblk_t newblock = 0, pblk;
+- int err = 0, depth, ret;
++ int err = 0, depth;
+ unsigned int allocated = 0, offset = 0;
+ unsigned int allocated_clusters = 0;
+ struct ext4_allocation_request ar;
+@@ -4279,13 +4273,11 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
+ goto out;
+ }
+
+- ret = ext4_ext_handle_unwritten_extents(
+- handle, inode, map, &path, flags,
+- allocated, newblock);
+- if (ret < 0)
+- err = ret;
+- else
+- allocated = ret;
++ path = ext4_ext_handle_unwritten_extents(
++ handle, inode, map, path, flags,
++ &allocated, newblock);
++ if (IS_ERR(path))
++ err = PTR_ERR(path);
+ goto out;
+ }
+ }
+--
+2.51.0
+
--- /dev/null
+From 5ca8825b4c7bbb8346b5e33a6a5d18c88bf80848 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 22 Aug 2024 10:35:34 +0800
+Subject: ext4: get rid of ppath in ext4_ext_insert_extent()
+
+From: Baokun Li <libaokun1@huawei.com>
+
+[ Upstream commit f7d1331f16a869c76a5102caebb58e840e1d509c ]
+
+The use of path and ppath is now very confusing, so to make the code more
+readable, pass path between functions uniformly, and get rid of ppath.
+
+To get rid of the ppath in ext4_ext_insert_extent(), the following is done
+here:
+
+ * Free the extents path when an error is encountered.
+ * Its caller needs to update ppath if it uses ppath.
+ * Free path when npath is used, free npath when it is not used.
+ * The got_allocated_blocks label in ext4_ext_map_blocks() does not
+ update err now, so err is updated to 0 if the err returned by
+ ext4_ext_search_right() is greater than 0 and is about to enter
+ got_allocated_blocks.
+
+No functional changes.
+
+Signed-off-by: Baokun Li <libaokun1@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Tested-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Link: https://patch.msgid.link/20240822023545.1994557-15-libaokun@huaweicloud.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: 22784ca541c0 ("ext4: subdivide EXT4_EXT_DATA_VALID1")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/ext4.h | 7 ++--
+ fs/ext4/extents.c | 88 ++++++++++++++++++++++++-------------------
+ fs/ext4/fast_commit.c | 8 ++--
+ fs/ext4/migrate.c | 5 ++-
+ 4 files changed, 61 insertions(+), 47 deletions(-)
+
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index dd0317d66c1db..ce8bd312c1b84 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -3708,9 +3708,10 @@ extern int ext4_map_blocks(handle_t *handle, struct inode *inode,
+ extern int ext4_ext_calc_credits_for_single_extent(struct inode *inode,
+ int num,
+ struct ext4_ext_path *path);
+-extern int ext4_ext_insert_extent(handle_t *, struct inode *,
+- struct ext4_ext_path **,
+- struct ext4_extent *, int);
++extern struct ext4_ext_path *ext4_ext_insert_extent(
++ handle_t *handle, struct inode *inode,
++ struct ext4_ext_path *path,
++ struct ext4_extent *newext, int gb_flags);
+ extern struct ext4_ext_path *ext4_find_extent(struct inode *, ext4_lblk_t,
+ struct ext4_ext_path *,
+ int flags);
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 7c2bc5c2c7664..4f15c26bafe53 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -1960,16 +1960,15 @@ static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi,
+ * inserts requested extent as new one into the tree,
+ * creating new leaf in the no-space case.
+ */
+-int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
+- struct ext4_ext_path **ppath,
+- struct ext4_extent *newext, int gb_flags)
++struct ext4_ext_path *
++ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
++ struct ext4_ext_path *path,
++ struct ext4_extent *newext, int gb_flags)
+ {
+- struct ext4_ext_path *path = *ppath;
+ struct ext4_extent_header *eh;
+ struct ext4_extent *ex, *fex;
+ struct ext4_extent *nearex; /* nearest extent */
+- struct ext4_ext_path *npath = NULL;
+- int depth, len, err;
++ int depth, len, err = 0;
+ ext4_lblk_t next;
+ int mb_flags = 0, unwritten;
+
+@@ -1977,14 +1976,16 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
+ mb_flags |= EXT4_MB_DELALLOC_RESERVED;
+ if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
+ EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0");
+- return -EFSCORRUPTED;
++ err = -EFSCORRUPTED;
++ goto errout;
+ }
+ depth = ext_depth(inode);
+ ex = path[depth].p_ext;
+ eh = path[depth].p_hdr;
+ if (unlikely(path[depth].p_hdr == NULL)) {
+ EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
+- return -EFSCORRUPTED;
++ err = -EFSCORRUPTED;
++ goto errout;
+ }
+
+ /* try to insert block into found extent and return */
+@@ -2022,7 +2023,7 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
+ err = ext4_ext_get_access(handle, inode,
+ path + depth);
+ if (err)
+- return err;
++ goto errout;
+ unwritten = ext4_ext_is_unwritten(ex);
+ ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
+ + ext4_ext_get_actual_len(newext));
+@@ -2047,7 +2048,7 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
+ err = ext4_ext_get_access(handle, inode,
+ path + depth);
+ if (err)
+- return err;
++ goto errout;
+
+ unwritten = ext4_ext_is_unwritten(ex);
+ ex->ee_block = newext->ee_block;
+@@ -2072,21 +2073,26 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
+ if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block))
+ next = ext4_ext_next_leaf_block(path);
+ if (next != EXT_MAX_BLOCKS) {
++ struct ext4_ext_path *npath;
++
+ ext_debug(inode, "next leaf block - %u\n", next);
+- BUG_ON(npath != NULL);
+ npath = ext4_find_extent(inode, next, NULL, gb_flags);
+- if (IS_ERR(npath))
+- return PTR_ERR(npath);
++ if (IS_ERR(npath)) {
++ err = PTR_ERR(npath);
++ goto errout;
++ }
+ BUG_ON(npath->p_depth != path->p_depth);
+ eh = npath[depth].p_hdr;
+ if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
+ ext_debug(inode, "next leaf isn't full(%d)\n",
+ le16_to_cpu(eh->eh_entries));
++ ext4_free_ext_path(path);
+ path = npath;
+ goto has_space;
+ }
+ ext_debug(inode, "next leaf has no free space(%d,%d)\n",
+ le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
++ ext4_free_ext_path(npath);
+ }
+
+ /*
+@@ -2097,12 +2103,8 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
+ mb_flags |= EXT4_MB_USE_RESERVED;
+ path = ext4_ext_create_new_leaf(handle, inode, mb_flags, gb_flags,
+ path, newext);
+- if (IS_ERR(path)) {
+- *ppath = NULL;
+- err = PTR_ERR(path);
+- goto cleanup;
+- }
+- *ppath = path;
++ if (IS_ERR(path))
++ return path;
+ depth = ext_depth(inode);
+ eh = path[depth].p_hdr;
+
+@@ -2111,7 +2113,7 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
+
+ err = ext4_ext_get_access(handle, inode, path + depth);
+ if (err)
+- goto cleanup;
++ goto errout;
+
+ if (!nearex) {
+ /* there is no extent in this leaf, create first one */
+@@ -2169,17 +2171,20 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
+ if (!(gb_flags & EXT4_GET_BLOCKS_PRE_IO))
+ ext4_ext_try_to_merge(handle, inode, path, nearex);
+
+-
+ /* time to correct all indexes above */
+ err = ext4_ext_correct_indexes(handle, inode, path);
+ if (err)
+- goto cleanup;
++ goto errout;
+
+ err = ext4_ext_dirty(handle, inode, path + path->p_depth);
++ if (err)
++ goto errout;
+
+-cleanup:
+- ext4_free_ext_path(npath);
+- return err;
++ return path;
++
++errout:
++ ext4_free_ext_path(path);
++ return ERR_PTR(err);
+ }
+
+ static int ext4_fill_es_cache_info(struct inode *inode,
+@@ -3230,24 +3235,29 @@ static int ext4_split_extent_at(handle_t *handle,
+ if (split_flag & EXT4_EXT_MARK_UNWRIT2)
+ ext4_ext_mark_unwritten(ex2);
+
+- err = ext4_ext_insert_extent(handle, inode, ppath, &newex, flags);
+- if (err != -ENOSPC && err != -EDQUOT && err != -ENOMEM)
++ path = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
++ if (!IS_ERR(path)) {
++ *ppath = path;
+ goto out;
++ }
++ *ppath = NULL;
++ err = PTR_ERR(path);
++ if (err != -ENOSPC && err != -EDQUOT && err != -ENOMEM)
++ return err;
+
+ /*
+- * Update path is required because previous ext4_ext_insert_extent()
+- * may have freed or reallocated the path. Using EXT4_EX_NOFAIL
+- * guarantees that ext4_find_extent() will not return -ENOMEM,
+- * otherwise -ENOMEM will cause a retry in do_writepages(), and a
+- * WARN_ON may be triggered in ext4_da_update_reserve_space() due to
+- * an incorrect ee_len causing the i_reserved_data_blocks exception.
++ * Get a new path to try to zeroout or fix the extent length.
++ * Using EXT4_EX_NOFAIL guarantees that ext4_find_extent()
++ * will not return -ENOMEM, otherwise -ENOMEM will cause a
++ * retry in do_writepages(), and a WARN_ON may be triggered
++ * in ext4_da_update_reserve_space() due to an incorrect
++ * ee_len causing the i_reserved_data_blocks exception.
+ */
+- path = ext4_find_extent(inode, ee_block, *ppath,
++ path = ext4_find_extent(inode, ee_block, NULL,
+ flags | EXT4_EX_NOFAIL);
+ if (IS_ERR(path)) {
+ EXT4_ERROR_INODE(inode, "Failed split extent on %u, err %ld",
+ split, PTR_ERR(path));
+- *ppath = NULL;
+ return PTR_ERR(path);
+ }
+ depth = ext_depth(inode);
+@@ -3306,7 +3316,7 @@ static int ext4_split_extent_at(handle_t *handle,
+ ext4_ext_dirty(handle, inode, path + path->p_depth);
+ return err;
+ out:
+- ext4_ext_show_leaf(inode, *ppath);
++ ext4_ext_show_leaf(inode, path);
+ return err;
+ }
+
+@@ -4296,6 +4306,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
+ get_implied_cluster_alloc(inode->i_sb, map, &ex2, path)) {
+ ar.len = allocated = map->m_len;
+ newblock = map->m_pblk;
++ err = 0;
+ goto got_allocated_blocks;
+ }
+
+@@ -4368,8 +4379,9 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
+ map->m_flags |= EXT4_MAP_UNWRITTEN;
+ }
+
+- err = ext4_ext_insert_extent(handle, inode, &path, &newex, flags);
+- if (err) {
++ path = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
++ if (IS_ERR(path)) {
++ err = PTR_ERR(path);
+ if (allocated_clusters) {
+ int fb_flags = 0;
+
+diff --git a/fs/ext4/fast_commit.c b/fs/ext4/fast_commit.c
+index 62a6960242c5a..be65b5f51d9e2 100644
+--- a/fs/ext4/fast_commit.c
++++ b/fs/ext4/fast_commit.c
+@@ -1806,12 +1806,12 @@ static int ext4_fc_replay_add_range(struct super_block *sb,
+ if (ext4_ext_is_unwritten(ex))
+ ext4_ext_mark_unwritten(&newex);
+ down_write(&EXT4_I(inode)->i_data_sem);
+- ret = ext4_ext_insert_extent(
+- NULL, inode, &path, &newex, 0);
++ path = ext4_ext_insert_extent(NULL, inode,
++ path, &newex, 0);
+ up_write((&EXT4_I(inode)->i_data_sem));
+- ext4_free_ext_path(path);
+- if (ret)
++ if (IS_ERR(path))
+ goto out;
++ ext4_free_ext_path(path);
+ goto next;
+ }
+
+diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
+index a5e1492bbaaa5..1b0dfd963d3f0 100644
+--- a/fs/ext4/migrate.c
++++ b/fs/ext4/migrate.c
+@@ -37,7 +37,6 @@ static int finish_range(handle_t *handle, struct inode *inode,
+ path = ext4_find_extent(inode, lb->first_block, NULL, 0);
+ if (IS_ERR(path)) {
+ retval = PTR_ERR(path);
+- path = NULL;
+ goto err_out;
+ }
+
+@@ -53,7 +52,9 @@ static int finish_range(handle_t *handle, struct inode *inode,
+ retval = ext4_datasem_ensure_credits(handle, inode, needed, needed, 0);
+ if (retval < 0)
+ goto err_out;
+- retval = ext4_ext_insert_extent(handle, inode, &path, &newext, 0);
++ path = ext4_ext_insert_extent(handle, inode, path, &newext, 0);
++ if (IS_ERR(path))
++ retval = PTR_ERR(path);
+ err_out:
+ up_write((&EXT4_I(inode)->i_data_sem));
+ ext4_free_ext_path(path);
+--
+2.51.0
+
--- /dev/null
+From 517972dc9b56150c82d45d17dac0ee8292895777 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 22 Aug 2024 10:35:31 +0800
+Subject: ext4: get rid of ppath in ext4_find_extent()
+
+From: Baokun Li <libaokun1@huawei.com>
+
+[ Upstream commit 0be4c0c2f17bd10ae16c852f02d51a6a7b318aca ]
+
+The use of path and ppath is now very confusing, so to make the code more
+readable, pass path between functions uniformly, and get rid of ppath.
+
+Getting rid of ppath in ext4_find_extent() requires its caller to update
+ppath. These ppaths will also be dropped later. No functional changes.
+
+Signed-off-by: Baokun Li <libaokun1@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Tested-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Link: https://patch.msgid.link/20240822023545.1994557-12-libaokun@huaweicloud.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: 22784ca541c0 ("ext4: subdivide EXT4_EXT_DATA_VALID1")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/ext4.h | 2 +-
+ fs/ext4/extents.c | 55 +++++++++++++++++++++++--------------------
+ fs/ext4/move_extent.c | 7 +++---
+ 3 files changed, 34 insertions(+), 30 deletions(-)
+
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 85ba12a48f26a..dd0317d66c1db 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -3712,7 +3712,7 @@ extern int ext4_ext_insert_extent(handle_t *, struct inode *,
+ struct ext4_ext_path **,
+ struct ext4_extent *, int);
+ extern struct ext4_ext_path *ext4_find_extent(struct inode *, ext4_lblk_t,
+- struct ext4_ext_path **,
++ struct ext4_ext_path *,
+ int flags);
+ extern void ext4_free_ext_path(struct ext4_ext_path *);
+ extern int ext4_ext_check_inode(struct inode *inode);
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 8d9cd6574d326..cd5f679648cea 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -881,11 +881,10 @@ void ext4_ext_tree_init(handle_t *handle, struct inode *inode)
+
+ struct ext4_ext_path *
+ ext4_find_extent(struct inode *inode, ext4_lblk_t block,
+- struct ext4_ext_path **orig_path, int flags)
++ struct ext4_ext_path *path, int flags)
+ {
+ struct ext4_extent_header *eh;
+ struct buffer_head *bh;
+- struct ext4_ext_path *path = orig_path ? *orig_path : NULL;
+ short int depth, i, ppos = 0;
+ int ret;
+ gfp_t gfp_flags = GFP_NOFS;
+@@ -906,7 +905,7 @@ ext4_find_extent(struct inode *inode, ext4_lblk_t block,
+ ext4_ext_drop_refs(path);
+ if (depth > path[0].p_maxdepth) {
+ kfree(path);
+- *orig_path = path = NULL;
++ path = NULL;
+ }
+ }
+ if (!path) {
+@@ -957,14 +956,10 @@ ext4_find_extent(struct inode *inode, ext4_lblk_t block,
+
+ ext4_ext_show_path(inode, path);
+
+- if (orig_path)
+- *orig_path = path;
+ return path;
+
+ err:
+ ext4_free_ext_path(path);
+- if (orig_path)
+- *orig_path = NULL;
+ return ERR_PTR(ret);
+ }
+
+@@ -1429,7 +1424,7 @@ static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
+ /* refill path */
+ path = ext4_find_extent(inode,
+ (ext4_lblk_t)le32_to_cpu(newext->ee_block),
+- ppath, gb_flags);
++ path, gb_flags);
+ if (IS_ERR(path))
+ err = PTR_ERR(path);
+ } else {
+@@ -1441,7 +1436,7 @@ static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
+ /* refill path */
+ path = ext4_find_extent(inode,
+ (ext4_lblk_t)le32_to_cpu(newext->ee_block),
+- ppath, gb_flags);
++ path, gb_flags);
+ if (IS_ERR(path)) {
+ err = PTR_ERR(path);
+ goto out;
+@@ -1457,8 +1452,8 @@ static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
+ goto repeat;
+ }
+ }
+-
+ out:
++ *ppath = IS_ERR(path) ? NULL : path;
+ return err;
+ }
+
+@@ -3246,15 +3241,17 @@ static int ext4_split_extent_at(handle_t *handle,
+ * WARN_ON may be triggered in ext4_da_update_reserve_space() due to
+ * an incorrect ee_len causing the i_reserved_data_blocks exception.
+ */
+- path = ext4_find_extent(inode, ee_block, ppath,
++ path = ext4_find_extent(inode, ee_block, *ppath,
+ flags | EXT4_EX_NOFAIL);
+ if (IS_ERR(path)) {
+ EXT4_ERROR_INODE(inode, "Failed split extent on %u, err %ld",
+ split, PTR_ERR(path));
++ *ppath = NULL;
+ return PTR_ERR(path);
+ }
+ depth = ext_depth(inode);
+ ex = path[depth].p_ext;
++ *ppath = path;
+
+ if (EXT4_EXT_MAY_ZEROOUT & split_flag) {
+ if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) {
+@@ -3367,9 +3364,12 @@ static int ext4_split_extent(handle_t *handle,
+ * Update path is required because previous ext4_split_extent_at() may
+ * result in split of original leaf or extent zeroout.
+ */
+- path = ext4_find_extent(inode, map->m_lblk, ppath, flags);
+- if (IS_ERR(path))
++ path = ext4_find_extent(inode, map->m_lblk, *ppath, flags);
++ if (IS_ERR(path)) {
++ *ppath = NULL;
+ return PTR_ERR(path);
++ }
++ *ppath = path;
+ depth = ext_depth(inode);
+ ex = path[depth].p_ext;
+ if (!ex) {
+@@ -3755,9 +3755,12 @@ static int ext4_convert_unwritten_extents_endio(handle_t *handle,
+ EXT4_GET_BLOCKS_CONVERT);
+ if (err < 0)
+ return err;
+- path = ext4_find_extent(inode, map->m_lblk, ppath, 0);
+- if (IS_ERR(path))
++ path = ext4_find_extent(inode, map->m_lblk, *ppath, 0);
++ if (IS_ERR(path)) {
++ *ppath = NULL;
+ return PTR_ERR(path);
++ }
++ *ppath = path;
+ depth = ext_depth(inode);
+ ex = path[depth].p_ext;
+ }
+@@ -3813,9 +3816,12 @@ convert_initialized_extent(handle_t *handle, struct inode *inode,
+ EXT4_GET_BLOCKS_CONVERT_UNWRITTEN);
+ if (err < 0)
+ return err;
+- path = ext4_find_extent(inode, map->m_lblk, ppath, 0);
+- if (IS_ERR(path))
++ path = ext4_find_extent(inode, map->m_lblk, *ppath, 0);
++ if (IS_ERR(path)) {
++ *ppath = NULL;
+ return PTR_ERR(path);
++ }
++ *ppath = path;
+ depth = ext_depth(inode);
+ ex = path[depth].p_ext;
+ if (!ex) {
+@@ -5200,7 +5206,7 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
+ * won't be shifted beyond EXT_MAX_BLOCKS.
+ */
+ if (SHIFT == SHIFT_LEFT) {
+- path = ext4_find_extent(inode, start - 1, &path,
++ path = ext4_find_extent(inode, start - 1, path,
+ EXT4_EX_NOCACHE);
+ if (IS_ERR(path))
+ return PTR_ERR(path);
+@@ -5249,7 +5255,7 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
+ * becomes NULL to indicate the end of the loop.
+ */
+ while (iterator && start <= stop) {
+- path = ext4_find_extent(inode, *iterator, &path,
++ path = ext4_find_extent(inode, *iterator, path,
+ EXT4_EX_NOCACHE);
+ if (IS_ERR(path))
+ return PTR_ERR(path);
+@@ -5832,11 +5838,8 @@ int ext4_clu_mapped(struct inode *inode, ext4_lblk_t lclu)
+
+ /* search for the extent closest to the first block in the cluster */
+ path = ext4_find_extent(inode, EXT4_C2B(sbi, lclu), NULL, 0);
+- if (IS_ERR(path)) {
+- err = PTR_ERR(path);
+- path = NULL;
+- goto out;
+- }
++ if (IS_ERR(path))
++ return PTR_ERR(path);
+
+ depth = ext_depth(inode);
+
+@@ -5920,7 +5923,7 @@ int ext4_ext_replay_update_ex(struct inode *inode, ext4_lblk_t start,
+ if (ret)
+ goto out;
+
+- path = ext4_find_extent(inode, start, &path, 0);
++ path = ext4_find_extent(inode, start, path, 0);
+ if (IS_ERR(path))
+ return PTR_ERR(path);
+ ex = path[path->p_depth].p_ext;
+@@ -5934,7 +5937,7 @@ int ext4_ext_replay_update_ex(struct inode *inode, ext4_lblk_t start,
+ if (ret)
+ goto out;
+
+- path = ext4_find_extent(inode, start, &path, 0);
++ path = ext4_find_extent(inode, start, path, 0);
+ if (IS_ERR(path))
+ return PTR_ERR(path);
+ ex = path[path->p_depth].p_ext;
+diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
+index a3b0acca02ca5..d5636a2a718a8 100644
+--- a/fs/ext4/move_extent.c
++++ b/fs/ext4/move_extent.c
+@@ -26,16 +26,17 @@ static inline int
+ get_ext_path(struct inode *inode, ext4_lblk_t lblock,
+ struct ext4_ext_path **ppath)
+ {
+- struct ext4_ext_path *path;
++ struct ext4_ext_path *path = *ppath;
+
+- path = ext4_find_extent(inode, lblock, ppath, EXT4_EX_NOCACHE);
++ *ppath = NULL;
++ path = ext4_find_extent(inode, lblock, path, EXT4_EX_NOCACHE);
+ if (IS_ERR(path))
+ return PTR_ERR(path);
+ if (path[ext_depth(inode)].p_ext == NULL) {
+ ext4_free_ext_path(path);
+- *ppath = NULL;
+ return -ENODATA;
+ }
++ *ppath = path;
+ return 0;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From d9f5e14b49b1e1333c4104786446ab203e4b42d0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 22 Aug 2024 10:35:38 +0800
+Subject: ext4: get rid of ppath in ext4_split_convert_extents()
+
+From: Baokun Li <libaokun1@huawei.com>
+
+[ Upstream commit 225057b1af381567ffa4eb813f4a28a5c38a25cf ]
+
+The use of path and ppath is now very confusing, so to make the code more
+readable, pass path between functions uniformly, and get rid of ppath.
+
+To get rid of the ppath in ext4_split_convert_extents(), the following is
+done here:
+
+ * Its caller needs to update ppath if it uses ppath.
+
+No functional changes.
+
+Signed-off-by: Baokun Li <libaokun1@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Tested-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Link: https://patch.msgid.link/20240822023545.1994557-19-libaokun@huaweicloud.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: feaf2a80e78f ("ext4: don't set EXT4_GET_BLOCKS_CONVERT when splitting before submitting I/O")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/extents.c | 65 ++++++++++++++++++++++++-----------------------
+ 1 file changed, 33 insertions(+), 32 deletions(-)
+
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 89d3baac7a79c..27bacfa7d492c 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -3707,21 +3707,21 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
+ * being filled will be convert to initialized by the end_io callback function
+ * via ext4_convert_unwritten_extents().
+ *
+- * Returns the size of unwritten extent to be written on success.
++ * The size of unwritten extent to be written is passed to the caller via the
++ * allocated pointer. Return an extent path pointer on success, or an error
++ * pointer on failure.
+ */
+-static int ext4_split_convert_extents(handle_t *handle,
++static struct ext4_ext_path *ext4_split_convert_extents(handle_t *handle,
+ struct inode *inode,
+ struct ext4_map_blocks *map,
+- struct ext4_ext_path **ppath,
+- int flags)
++ struct ext4_ext_path *path,
++ int flags, unsigned int *allocated)
+ {
+- struct ext4_ext_path *path = *ppath;
+ ext4_lblk_t eof_block;
+ ext4_lblk_t ee_block;
+ struct ext4_extent *ex;
+ unsigned int ee_len;
+ int split_flag = 0, depth;
+- unsigned int allocated = 0;
+
+ ext_debug(inode, "logical block %llu, max_blocks %u\n",
+ (unsigned long long)map->m_lblk, map->m_len);
+@@ -3749,14 +3749,8 @@ static int ext4_split_convert_extents(handle_t *handle,
+ split_flag |= (EXT4_EXT_MARK_UNWRIT2 | EXT4_EXT_DATA_VALID2);
+ }
+ flags |= EXT4_GET_BLOCKS_PRE_IO;
+- path = ext4_split_extent(handle, inode, path, map, split_flag, flags,
+- &allocated);
+- if (IS_ERR(path)) {
+- *ppath = NULL;
+- return PTR_ERR(path);
+- }
+- *ppath = path;
+- return allocated;
++ return ext4_split_extent(handle, inode, path, map, split_flag, flags,
++ allocated);
+ }
+
+ static int ext4_convert_unwritten_extents_endio(handle_t *handle,
+@@ -3792,11 +3786,14 @@ static int ext4_convert_unwritten_extents_endio(handle_t *handle,
+ inode->i_ino, (unsigned long long)ee_block, ee_len,
+ (unsigned long long)map->m_lblk, map->m_len);
+ #endif
+- err = ext4_split_convert_extents(handle, inode, map, ppath,
+- EXT4_GET_BLOCKS_CONVERT);
+- if (err < 0)
+- return err;
+- path = ext4_find_extent(inode, map->m_lblk, *ppath, 0);
++ path = ext4_split_convert_extents(handle, inode, map, path,
++ EXT4_GET_BLOCKS_CONVERT, NULL);
++ if (IS_ERR(path)) {
++ *ppath = NULL;
++ return PTR_ERR(path);
++ }
++
++ path = ext4_find_extent(inode, map->m_lblk, path, 0);
+ if (IS_ERR(path)) {
+ *ppath = NULL;
+ return PTR_ERR(path);
+@@ -3853,11 +3850,14 @@ convert_initialized_extent(handle_t *handle, struct inode *inode,
+ (unsigned long long)ee_block, ee_len);
+
+ if (ee_block != map->m_lblk || ee_len > map->m_len) {
+- err = ext4_split_convert_extents(handle, inode, map, ppath,
+- EXT4_GET_BLOCKS_CONVERT_UNWRITTEN);
+- if (err < 0)
+- return err;
+- path = ext4_find_extent(inode, map->m_lblk, *ppath, 0);
++ path = ext4_split_convert_extents(handle, inode, map, path,
++ EXT4_GET_BLOCKS_CONVERT_UNWRITTEN, NULL);
++ if (IS_ERR(path)) {
++ *ppath = NULL;
++ return PTR_ERR(path);
++ }
++
++ path = ext4_find_extent(inode, map->m_lblk, path, 0);
+ if (IS_ERR(path)) {
+ *ppath = NULL;
+ return PTR_ERR(path);
+@@ -3923,19 +3923,20 @@ ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode,
+
+ /* get_block() before submitting IO, split the extent */
+ if (flags & EXT4_GET_BLOCKS_PRE_IO) {
+- ret = ext4_split_convert_extents(handle, inode, map, ppath,
+- flags | EXT4_GET_BLOCKS_CONVERT);
+- if (ret < 0) {
+- err = ret;
++ *ppath = ext4_split_convert_extents(handle, inode, map, *ppath,
++ flags | EXT4_GET_BLOCKS_CONVERT, &allocated);
++ if (IS_ERR(*ppath)) {
++ err = PTR_ERR(*ppath);
++ *ppath = NULL;
+ goto out2;
+ }
+ /*
+- * shouldn't get a 0 return when splitting an extent unless
++ * shouldn't get a 0 allocated when splitting an extent unless
+ * m_len is 0 (bug) or extent has been corrupted
+ */
+- if (unlikely(ret == 0)) {
++ if (unlikely(allocated == 0)) {
+ EXT4_ERROR_INODE(inode,
+- "unexpected ret == 0, m_len = %u",
++ "unexpected allocated == 0, m_len = %u",
+ map->m_len);
+ err = -EFSCORRUPTED;
+ goto out2;
+@@ -3996,9 +3997,9 @@ ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode,
+ err = -EFSCORRUPTED;
+ goto out2;
+ }
++ allocated = ret;
+
+ out:
+- allocated = ret;
+ map->m_flags |= EXT4_MAP_NEW;
+ map_out:
+ map->m_flags |= EXT4_MAP_MAPPED;
+--
+2.51.0
+
--- /dev/null
+From 8670989e2b4ac9a83be7ccdb0bd31c633a675107 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 22 Aug 2024 10:35:37 +0800
+Subject: ext4: get rid of ppath in ext4_split_extent()
+
+From: Baokun Li <libaokun1@huawei.com>
+
+[ Upstream commit f74cde045617cc275c848c9692feac249ff7a3e7 ]
+
+The use of path and ppath is now very confusing, so to make the code more
+readable, pass path between functions uniformly, and get rid of ppath.
+
+To get rid of the ppath in ext4_split_extent(), the following is done here:
+
+ * The 'allocated' is changed from passing a value to passing an address.
+ * Its caller needs to update ppath if it uses ppath.
+
+No functional changes.
+
+Signed-off-by: Baokun Li <libaokun1@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Tested-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Link: https://patch.msgid.link/20240822023545.1994557-18-libaokun@huaweicloud.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: feaf2a80e78f ("ext4: don't set EXT4_GET_BLOCKS_CONVERT when splitting before submitting I/O")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/extents.c | 97 ++++++++++++++++++++++++-----------------------
+ 1 file changed, 50 insertions(+), 47 deletions(-)
+
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index fd9517dbf633e..89d3baac7a79c 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -3348,21 +3348,18 @@ static struct ext4_ext_path *ext4_split_extent_at(handle_t *handle,
+ * c> Splits in three extents: Somone is splitting in middle of the extent
+ *
+ */
+-static int ext4_split_extent(handle_t *handle,
+- struct inode *inode,
+- struct ext4_ext_path **ppath,
+- struct ext4_map_blocks *map,
+- int split_flag,
+- int flags)
++static struct ext4_ext_path *ext4_split_extent(handle_t *handle,
++ struct inode *inode,
++ struct ext4_ext_path *path,
++ struct ext4_map_blocks *map,
++ int split_flag, int flags,
++ unsigned int *allocated)
+ {
+- struct ext4_ext_path *path = *ppath;
+ ext4_lblk_t ee_block;
+ struct ext4_extent *ex;
+ unsigned int ee_len, depth;
+- int err = 0;
+ int unwritten;
+ int split_flag1, flags1;
+- int allocated = map->m_len;
+
+ depth = ext_depth(inode);
+ ex = path[depth].p_ext;
+@@ -3385,33 +3382,25 @@ static int ext4_split_extent(handle_t *handle,
+ EXT4_EXT_DATA_ENTIRE_VALID1;
+ path = ext4_split_extent_at(handle, inode, path,
+ map->m_lblk + map->m_len, split_flag1, flags1);
+- if (IS_ERR(path)) {
+- err = PTR_ERR(path);
+- *ppath = NULL;
+- goto out;
++ if (IS_ERR(path))
++ return path;
++ /*
++ * Update path is required because previous ext4_split_extent_at
++ * may result in split of original leaf or extent zeroout.
++ */
++ path = ext4_find_extent(inode, map->m_lblk, path, flags);
++ if (IS_ERR(path))
++ return path;
++ depth = ext_depth(inode);
++ ex = path[depth].p_ext;
++ if (!ex) {
++ EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
++ (unsigned long) map->m_lblk);
++ ext4_free_ext_path(path);
++ return ERR_PTR(-EFSCORRUPTED);
+ }
+- *ppath = path;
+- } else {
+- allocated = ee_len - (map->m_lblk - ee_block);
+- }
+- /*
+- * Update path is required because previous ext4_split_extent_at() may
+- * result in split of original leaf or extent zeroout.
+- */
+- path = ext4_find_extent(inode, map->m_lblk, path, flags);
+- if (IS_ERR(path)) {
+- *ppath = NULL;
+- return PTR_ERR(path);
+- }
+- *ppath = path;
+- depth = ext_depth(inode);
+- ex = path[depth].p_ext;
+- if (!ex) {
+- EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
+- (unsigned long) map->m_lblk);
+- return -EFSCORRUPTED;
++ unwritten = ext4_ext_is_unwritten(ex);
+ }
+- unwritten = ext4_ext_is_unwritten(ex);
+
+ if (map->m_lblk >= ee_block) {
+ split_flag1 = split_flag & EXT4_EXT_DATA_VALID2;
+@@ -3422,17 +3411,18 @@ static int ext4_split_extent(handle_t *handle,
+ }
+ path = ext4_split_extent_at(handle, inode, path,
+ map->m_lblk, split_flag1, flags);
+- if (IS_ERR(path)) {
+- err = PTR_ERR(path);
+- *ppath = NULL;
+- goto out;
+- }
+- *ppath = path;
++ if (IS_ERR(path))
++ return path;
+ }
+
++ if (allocated) {
++ if (map->m_lblk + map->m_len > ee_block + ee_len)
++ *allocated = ee_len - (map->m_lblk - ee_block);
++ else
++ *allocated = map->m_len;
++ }
+ ext4_ext_show_leaf(inode, path);
+-out:
+- return err ? err : allocated;
++ return path;
+ }
+
+ /*
+@@ -3677,10 +3667,15 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
+ }
+
+ fallback:
+- err = ext4_split_extent(handle, inode, ppath, &split_map, split_flag,
+- flags);
+- if (err > 0)
+- err = 0;
++ path = ext4_split_extent(handle, inode, path, &split_map, split_flag,
++ flags, NULL);
++ if (IS_ERR(path)) {
++ err = PTR_ERR(path);
++ *ppath = NULL;
++ goto out;
++ }
++ err = 0;
++ *ppath = path;
+ out:
+ /* If we have gotten a failure, don't zero out status tree */
+ if (!err) {
+@@ -3726,6 +3721,7 @@ static int ext4_split_convert_extents(handle_t *handle,
+ struct ext4_extent *ex;
+ unsigned int ee_len;
+ int split_flag = 0, depth;
++ unsigned int allocated = 0;
+
+ ext_debug(inode, "logical block %llu, max_blocks %u\n",
+ (unsigned long long)map->m_lblk, map->m_len);
+@@ -3753,7 +3749,14 @@ static int ext4_split_convert_extents(handle_t *handle,
+ split_flag |= (EXT4_EXT_MARK_UNWRIT2 | EXT4_EXT_DATA_VALID2);
+ }
+ flags |= EXT4_GET_BLOCKS_PRE_IO;
+- return ext4_split_extent(handle, inode, ppath, map, split_flag, flags);
++ path = ext4_split_extent(handle, inode, path, map, split_flag, flags,
++ &allocated);
++ if (IS_ERR(path)) {
++ *ppath = NULL;
++ return PTR_ERR(path);
++ }
++ *ppath = path;
++ return allocated;
+ }
+
+ static int ext4_convert_unwritten_extents_endio(handle_t *handle,
+--
+2.51.0
+
--- /dev/null
+From c111fa45f878077a072b483a62f48bfca67fb1f3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 22 Aug 2024 10:35:35 +0800
+Subject: ext4: get rid of ppath in ext4_split_extent_at()
+
+From: Baokun Li <libaokun1@huawei.com>
+
+[ Upstream commit 1de82b1b60d4613753254bf3cbf622a4c02c945c ]
+
+The use of path and ppath is now very confusing, so to make the code more
+readable, pass path between functions uniformly, and get rid of ppath.
+
+To get rid of the ppath in ext4_split_extent_at(), the following is done
+here:
+
+ * Free the extents path when an error is encountered.
+ * Its caller needs to update ppath if it uses ppath.
+ * Teach ext4_ext_show_leaf() to skip error pointer.
+
+No functional changes.
+
+Signed-off-by: Baokun Li <libaokun1@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Tested-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Link: https://patch.msgid.link/20240822023545.1994557-16-libaokun@huaweicloud.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: 22784ca541c0 ("ext4: subdivide EXT4_EXT_DATA_VALID1")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/extents.c | 85 ++++++++++++++++++++++++++---------------------
+ 1 file changed, 47 insertions(+), 38 deletions(-)
+
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 4f15c26bafe53..33ed753ea82e9 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -84,12 +84,11 @@ static void ext4_extent_block_csum_set(struct inode *inode,
+ et->et_checksum = ext4_extent_block_csum(inode, eh);
+ }
+
+-static int ext4_split_extent_at(handle_t *handle,
+- struct inode *inode,
+- struct ext4_ext_path **ppath,
+- ext4_lblk_t split,
+- int split_flag,
+- int flags);
++static struct ext4_ext_path *ext4_split_extent_at(handle_t *handle,
++ struct inode *inode,
++ struct ext4_ext_path *path,
++ ext4_lblk_t split,
++ int split_flag, int flags);
+
+ static int ext4_ext_trunc_restart_fn(struct inode *inode, int *dropped)
+ {
+@@ -335,9 +334,15 @@ ext4_force_split_extent_at(handle_t *handle, struct inode *inode,
+ if (nofail)
+ flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL | EXT4_EX_NOFAIL;
+
+- return ext4_split_extent_at(handle, inode, ppath, lblk, unwritten ?
++ path = ext4_split_extent_at(handle, inode, path, lblk, unwritten ?
+ EXT4_EXT_MARK_UNWRIT1|EXT4_EXT_MARK_UNWRIT2 : 0,
+ flags);
++ if (IS_ERR(path)) {
++ *ppath = NULL;
++ return PTR_ERR(path);
++ }
++ *ppath = path;
++ return 0;
+ }
+
+ static int
+@@ -689,7 +694,7 @@ static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
+ struct ext4_extent *ex;
+ int i;
+
+- if (!path)
++ if (IS_ERR_OR_NULL(path))
+ return;
+
+ eh = path[depth].p_hdr;
+@@ -3153,16 +3158,14 @@ static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
+ * a> the extent are splitted into two extent.
+ * b> split is not needed, and just mark the extent.
+ *
+- * return 0 on success.
++ * Return an extent path pointer on success, or an error pointer on failure.
+ */
+-static int ext4_split_extent_at(handle_t *handle,
+- struct inode *inode,
+- struct ext4_ext_path **ppath,
+- ext4_lblk_t split,
+- int split_flag,
+- int flags)
++static struct ext4_ext_path *ext4_split_extent_at(handle_t *handle,
++ struct inode *inode,
++ struct ext4_ext_path *path,
++ ext4_lblk_t split,
++ int split_flag, int flags)
+ {
+- struct ext4_ext_path *path = *ppath;
+ ext4_fsblk_t newblock;
+ ext4_lblk_t ee_block;
+ struct ext4_extent *ex, newex, orig_ex, zero_ex;
+@@ -3236,14 +3239,12 @@ static int ext4_split_extent_at(handle_t *handle,
+ ext4_ext_mark_unwritten(ex2);
+
+ path = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
+- if (!IS_ERR(path)) {
+- *ppath = path;
++ if (!IS_ERR(path))
+ goto out;
+- }
+- *ppath = NULL;
++
+ err = PTR_ERR(path);
+ if (err != -ENOSPC && err != -EDQUOT && err != -ENOMEM)
+- return err;
++ return path;
+
+ /*
+ * Get a new path to try to zeroout or fix the extent length.
+@@ -3253,16 +3254,14 @@ static int ext4_split_extent_at(handle_t *handle,
+ * in ext4_da_update_reserve_space() due to an incorrect
+ * ee_len causing the i_reserved_data_blocks exception.
+ */
+- path = ext4_find_extent(inode, ee_block, NULL,
+- flags | EXT4_EX_NOFAIL);
++ path = ext4_find_extent(inode, ee_block, NULL, flags | EXT4_EX_NOFAIL);
+ if (IS_ERR(path)) {
+ EXT4_ERROR_INODE(inode, "Failed split extent on %u, err %ld",
+ split, PTR_ERR(path));
+- return PTR_ERR(path);
++ return path;
+ }
+ depth = ext_depth(inode);
+ ex = path[depth].p_ext;
+- *ppath = path;
+
+ if (EXT4_EXT_MAY_ZEROOUT & split_flag) {
+ if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) {
+@@ -3314,10 +3313,13 @@ static int ext4_split_extent_at(handle_t *handle,
+ * and err is a non-zero error code.
+ */
+ ext4_ext_dirty(handle, inode, path + path->p_depth);
+- return err;
+ out:
++ if (err) {
++ ext4_free_ext_path(path);
++ path = ERR_PTR(err);
++ }
+ ext4_ext_show_leaf(inode, path);
+- return err;
++ return path;
+ }
+
+ /*
+@@ -3364,10 +3366,14 @@ static int ext4_split_extent(handle_t *handle,
+ EXT4_EXT_MARK_UNWRIT2;
+ if (split_flag & EXT4_EXT_DATA_VALID2)
+ split_flag1 |= EXT4_EXT_DATA_VALID1;
+- err = ext4_split_extent_at(handle, inode, ppath,
++ path = ext4_split_extent_at(handle, inode, path,
+ map->m_lblk + map->m_len, split_flag1, flags1);
+- if (err)
++ if (IS_ERR(path)) {
++ err = PTR_ERR(path);
++ *ppath = NULL;
+ goto out;
++ }
++ *ppath = path;
+ } else {
+ allocated = ee_len - (map->m_lblk - ee_block);
+ }
+@@ -3375,7 +3381,7 @@ static int ext4_split_extent(handle_t *handle,
+ * Update path is required because previous ext4_split_extent_at() may
+ * result in split of original leaf or extent zeroout.
+ */
+- path = ext4_find_extent(inode, map->m_lblk, *ppath, flags);
++ path = ext4_find_extent(inode, map->m_lblk, path, flags);
+ if (IS_ERR(path)) {
+ *ppath = NULL;
+ return PTR_ERR(path);
+@@ -3397,13 +3403,17 @@ static int ext4_split_extent(handle_t *handle,
+ split_flag1 |= split_flag & (EXT4_EXT_MAY_ZEROOUT |
+ EXT4_EXT_MARK_UNWRIT2);
+ }
+- err = ext4_split_extent_at(handle, inode, ppath,
++ path = ext4_split_extent_at(handle, inode, path,
+ map->m_lblk, split_flag1, flags);
+- if (err)
++ if (IS_ERR(path)) {
++ err = PTR_ERR(path);
++ *ppath = NULL;
+ goto out;
++ }
++ *ppath = path;
+ }
+
+- ext4_ext_show_leaf(inode, *ppath);
++ ext4_ext_show_leaf(inode, path);
+ out:
+ return err ? err : allocated;
+ }
+@@ -5590,22 +5600,21 @@ static int ext4_insert_range(struct file *file, loff_t offset, loff_t len)
+ if (ext4_ext_is_unwritten(extent))
+ split_flag = EXT4_EXT_MARK_UNWRIT1 |
+ EXT4_EXT_MARK_UNWRIT2;
+- ret = ext4_split_extent_at(handle, inode, &path,
++ path = ext4_split_extent_at(handle, inode, path,
+ offset_lblk, split_flag,
+ EXT4_EX_NOCACHE |
+ EXT4_GET_BLOCKS_PRE_IO |
+ EXT4_GET_BLOCKS_METADATA_NOFAIL);
+ }
+
+- ext4_free_ext_path(path);
+- if (ret < 0) {
++ if (IS_ERR(path)) {
+ up_write(&EXT4_I(inode)->i_data_sem);
++ ret = PTR_ERR(path);
+ goto out_stop;
+ }
+- } else {
+- ext4_free_ext_path(path);
+ }
+
++ ext4_free_ext_path(path);
+ ext4_es_remove_extent(inode, offset_lblk, EXT_MAX_BLOCKS - offset_lblk);
+
+ /*
+--
+2.51.0
+
--- /dev/null
+From 5e240aa2ff54dc67dd2b874fae3c8932d8d5ebd0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 29 Nov 2025 18:32:33 +0800
+Subject: ext4: subdivide EXT4_EXT_DATA_VALID1
+
+From: Zhang Yi <yi.zhang@huawei.com>
+
+[ Upstream commit 22784ca541c0f01c5ebad14e8228298dc0a390ed ]
+
+When splitting an extent, if the EXT4_GET_BLOCKS_CONVERT flag is set and
+it is necessary to split the target extent in the middle,
+ext4_split_extent() first handles splitting the latter half of the
+extent and passes the EXT4_EXT_DATA_VALID1 flag. This flag implies that
+all blocks before the split point contain valid data; however, this
+assumption is incorrect.
+
+Therefore, subdivid EXT4_EXT_DATA_VALID1 into
+EXT4_EXT_DATA_ENTIRE_VALID1 and EXT4_EXT_DATA_PARTIAL_VALID1, which
+indicate that the first half of the extent is either entirely valid or
+only partially valid, respectively. These two flags cannot be set
+simultaneously.
+
+This patch does not use EXT4_EXT_DATA_PARTIAL_VALID1, it only replaces
+EXT4_EXT_DATA_VALID1 with EXT4_EXT_DATA_ENTIRE_VALID1 at the location
+where it is set, no logical changes.
+
+Signed-off-by: Zhang Yi <yi.zhang@huawei.com>
+Reviewed-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Reviewed-by: Baokun Li <libaokun1@huawei.com>
+Cc: stable@kernel.org
+Message-ID: <20251129103247.686136-2-yi.zhang@huaweicloud.com>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/extents.c | 18 ++++++++++++------
+ 1 file changed, 12 insertions(+), 6 deletions(-)
+
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 33ed753ea82e9..18520281e1b5f 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -43,8 +43,13 @@
+ #define EXT4_EXT_MARK_UNWRIT1 0x2 /* mark first half unwritten */
+ #define EXT4_EXT_MARK_UNWRIT2 0x4 /* mark second half unwritten */
+
+-#define EXT4_EXT_DATA_VALID1 0x8 /* first half contains valid data */
+-#define EXT4_EXT_DATA_VALID2 0x10 /* second half contains valid data */
++/* first half contains valid data */
++#define EXT4_EXT_DATA_ENTIRE_VALID1 0x8 /* has entirely valid data */
++#define EXT4_EXT_DATA_PARTIAL_VALID1 0x10 /* has partially valid data */
++#define EXT4_EXT_DATA_VALID1 (EXT4_EXT_DATA_ENTIRE_VALID1 | \
++ EXT4_EXT_DATA_PARTIAL_VALID1)
++
++#define EXT4_EXT_DATA_VALID2 0x20 /* second half contains valid data */
+
+ static __le32 ext4_extent_block_csum(struct inode *inode,
+ struct ext4_extent_header *eh)
+@@ -3173,8 +3178,9 @@ static struct ext4_ext_path *ext4_split_extent_at(handle_t *handle,
+ unsigned int ee_len, depth;
+ int err = 0;
+
+- BUG_ON((split_flag & (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)) ==
+- (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2));
++ BUG_ON((split_flag & EXT4_EXT_DATA_VALID1) == EXT4_EXT_DATA_VALID1);
++ BUG_ON((split_flag & EXT4_EXT_DATA_VALID1) &&
++ (split_flag & EXT4_EXT_DATA_VALID2));
+
+ /* Do not cache extents that are in the process of being modified. */
+ flags |= EXT4_EX_NOCACHE;
+@@ -3365,7 +3371,7 @@ static int ext4_split_extent(handle_t *handle,
+ split_flag1 |= EXT4_EXT_MARK_UNWRIT1 |
+ EXT4_EXT_MARK_UNWRIT2;
+ if (split_flag & EXT4_EXT_DATA_VALID2)
+- split_flag1 |= EXT4_EXT_DATA_VALID1;
++ split_flag1 |= EXT4_EXT_DATA_ENTIRE_VALID1;
+ path = ext4_split_extent_at(handle, inode, path,
+ map->m_lblk + map->m_len, split_flag1, flags1);
+ if (IS_ERR(path)) {
+@@ -3728,7 +3734,7 @@ static int ext4_split_convert_extents(handle_t *handle,
+
+ /* Convert to unwritten */
+ if (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN) {
+- split_flag |= EXT4_EXT_DATA_VALID1;
++ split_flag |= EXT4_EXT_DATA_ENTIRE_VALID1;
+ /* Convert to initialized */
+ } else if (flags & EXT4_GET_BLOCKS_CONVERT) {
+ split_flag |= ee_block + ee_len <= eof_block ?
+--
+2.51.0
+
--- /dev/null
+From 1d8124cc6b302f4baf96fa8c9bad03ba429b44c2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Feb 2026 20:14:43 +0800
+Subject: hwmon: (max16065) Use READ/WRITE_ONCE to avoid compiler optimization
+ induced race
+
+From: Gui-Dong Han <hanguidong02@gmail.com>
+
+[ Upstream commit 007be4327e443d79c9dd9e56dc16c36f6395d208 ]
+
+Simply copying shared data to a local variable cannot prevent data
+races. The compiler is allowed to optimize away the local copy and
+re-read the shared memory, causing a Time-of-Check Time-of-Use (TOCTOU)
+issue if the data changes between the check and the usage.
+
+To enforce the use of the local variable, use READ_ONCE() when reading
+the shared data and WRITE_ONCE() when updating it. Apply these macros to
+the three identified locations (curr_sense, adc, and fault) where local
+variables are used for error validation, ensuring the value remains
+consistent.
+
+Reported-by: Ben Hutchings <ben@decadent.org.uk>
+Closes: https://lore.kernel.org/all/6fe17868327207e8b850cf9f88b7dc58b2021f73.camel@decadent.org.uk/
+Fixes: f5bae2642e3d ("hwmon: Driver for MAX16065 System Manager and compatibles")
+Fixes: b8d5acdcf525 ("hwmon: (max16065) Use local variable to avoid TOCTOU")
+Cc: stable@vger.kernel.org
+Signed-off-by: Gui-Dong Han <hanguidong02@gmail.com>
+Link: https://lore.kernel.org/r/20260203121443.5482-1-hanguidong02@gmail.com
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwmon/max16065.c | 26 +++++++++++++-------------
+ 1 file changed, 13 insertions(+), 13 deletions(-)
+
+diff --git a/drivers/hwmon/max16065.c b/drivers/hwmon/max16065.c
+index 4c9e7892a73c1..43fbb9b26b102 100644
+--- a/drivers/hwmon/max16065.c
++++ b/drivers/hwmon/max16065.c
+@@ -151,27 +151,27 @@ static struct max16065_data *max16065_update_device(struct device *dev)
+ int i;
+
+ for (i = 0; i < data->num_adc; i++)
+- data->adc[i]
+- = max16065_read_adc(client, MAX16065_ADC(i));
++ WRITE_ONCE(data->adc[i],
++ max16065_read_adc(client, MAX16065_ADC(i)));
+
+ if (data->have_current) {
+- data->adc[MAX16065_NUM_ADC]
+- = max16065_read_adc(client, MAX16065_CSP_ADC);
+- data->curr_sense
+- = i2c_smbus_read_byte_data(client,
+- MAX16065_CURR_SENSE);
++ WRITE_ONCE(data->adc[MAX16065_NUM_ADC],
++ max16065_read_adc(client, MAX16065_CSP_ADC));
++ WRITE_ONCE(data->curr_sense,
++ i2c_smbus_read_byte_data(client, MAX16065_CURR_SENSE));
+ }
+
+ for (i = 0; i < 2; i++)
+- data->fault[i]
+- = i2c_smbus_read_byte_data(client, MAX16065_FAULT(i));
++ WRITE_ONCE(data->fault[i],
++ i2c_smbus_read_byte_data(client, MAX16065_FAULT(i)));
+
+ /*
+ * MAX16067 and MAX16068 have separate undervoltage and
+ * overvoltage alarm bits. Squash them together.
+ */
+ if (data->chip == max16067 || data->chip == max16068)
+- data->fault[0] |= data->fault[1];
++ WRITE_ONCE(data->fault[0],
++ data->fault[0] | data->fault[1]);
+
+ data->last_updated = jiffies;
+ data->valid = true;
+@@ -185,7 +185,7 @@ static ssize_t max16065_alarm_show(struct device *dev,
+ {
+ struct sensor_device_attribute_2 *attr2 = to_sensor_dev_attr_2(da);
+ struct max16065_data *data = max16065_update_device(dev);
+- int val = data->fault[attr2->nr];
++ int val = READ_ONCE(data->fault[attr2->nr]);
+
+ if (val < 0)
+ return val;
+@@ -203,7 +203,7 @@ static ssize_t max16065_input_show(struct device *dev,
+ {
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct max16065_data *data = max16065_update_device(dev);
+- int adc = data->adc[attr->index];
++ int adc = READ_ONCE(data->adc[attr->index]);
+
+ if (unlikely(adc < 0))
+ return adc;
+@@ -216,7 +216,7 @@ static ssize_t max16065_current_show(struct device *dev,
+ struct device_attribute *da, char *buf)
+ {
+ struct max16065_data *data = max16065_update_device(dev);
+- int curr_sense = data->curr_sense;
++ int curr_sense = READ_ONCE(data->curr_sense);
+
+ if (unlikely(curr_sense < 0))
+ return curr_sense;
+--
+2.51.0
+
--- /dev/null
+From 2038d7e58be010d2fd6fabe6176b9d552f40a5f5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 3 Feb 2024 13:45:20 +0100
+Subject: KVM: x86: Fix KVM_GET_MSRS stack info leak
+
+From: Mathias Krause <minipli@grsecurity.net>
+
+[ Upstream commit 3376ca3f1a2075eaa23c5576c47d04d7e8a4adda ]
+
+Commit 6abe9c1386e5 ("KVM: X86: Move ignore_msrs handling upper the
+stack") changed the 'ignore_msrs' handling, including sanitizing return
+values to the caller. This was fine until commit 12bc2132b15e ("KVM:
+X86: Do the same ignore_msrs check for feature msrs") which allowed
+non-existing feature MSRs to be ignored, i.e. to not generate an error
+on the ioctl() level. It even tried to preserve the sanitization of the
+return value. However, the logic is flawed, as '*data' will be
+overwritten again with the uninitialized stack value of msr.data.
+
+Fix this by simplifying the logic and always initializing msr.data,
+vanishing the need for an additional error exit path.
+
+Fixes: 12bc2132b15e ("KVM: X86: Do the same ignore_msrs check for feature msrs")
+Signed-off-by: Mathias Krause <minipli@grsecurity.net>
+Reviewed-by: Xiaoyao Li <xiaoyao.li@intel.com>
+Link: https://lore.kernel.org/r/20240203124522.592778-2-minipli@grsecurity.net
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Stable-dep-of: 5bb9ac186512 ("KVM: x86: Return "unsupported" instead of "invalid" on access to unsupported PV MSR")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kvm/x86.c | 15 +++++----------
+ 1 file changed, 5 insertions(+), 10 deletions(-)
+
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 00bbee40dbec2..275dd7dc1d68b 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1719,22 +1719,17 @@ static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
+ struct kvm_msr_entry msr;
+ int r;
+
++ /* Unconditionally clear the output for simplicity */
++ msr.data = 0;
+ msr.index = index;
+ r = kvm_get_msr_feature(&msr);
+
+- if (r == KVM_MSR_RET_INVALID) {
+- /* Unconditionally clear the output for simplicity */
+- *data = 0;
+- if (kvm_msr_ignored_check(index, 0, false))
+- r = 0;
+- }
+-
+- if (r)
+- return r;
++ if (r == KVM_MSR_RET_INVALID && kvm_msr_ignored_check(index, 0, false))
++ r = 0;
+
+ *data = msr.data;
+
+- return 0;
++ return r;
+ }
+
+ static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
+--
+2.51.0
+
--- /dev/null
+From 64acebfb18f66e72a3dc96e33d9d59dcaa98b35d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 8 Jan 2026 19:06:57 -0800
+Subject: KVM: x86: Ignore -EBUSY when checking nested events from vcpu_block()
+
+From: Sean Christopherson <seanjc@google.com>
+
+[ Upstream commit ead63640d4e72e6f6d464f4e31f7fecb79af8869 ]
+
+Ignore -EBUSY when checking nested events after exiting a blocking state
+while L2 is active, as exiting to userspace will generate a spurious
+userspace exit, usually with KVM_EXIT_UNKNOWN, and likely lead to the VM's
+demise. Continuing with the wakeup isn't perfect either, as *something*
+has gone sideways if a vCPU is awakened in L2 with an injected event (or
+worse, a nested run pending), but continuing on gives the VM a decent
+chance of surviving without any major side effects.
+
+As explained in the Fixes commits, it _should_ be impossible for a vCPU to
+be put into a blocking state with an already-injected event (exception,
+IRQ, or NMI). Unfortunately, userspace can stuff MP_STATE and/or injected
+events, and thus put the vCPU into what should be an impossible state.
+
+Don't bother trying to preserve the WARN, e.g. with an anti-syzkaller
+Kconfig, as WARNs can (hopefully) be added in paths where _KVM_ would be
+violating x86 architecture, e.g. by WARNing if KVM attempts to inject an
+exception or interrupt while the vCPU isn't running.
+
+Cc: Alessandro Ratti <alessandro@0x65c.net>
+Cc: stable@vger.kernel.org
+Fixes: 26844fee6ade ("KVM: x86: never write to memory from kvm_vcpu_check_block()")
+Fixes: 45405155d876 ("KVM: x86: WARN if a vCPU gets a valid wakeup that KVM can't yet inject")
+Link: https://syzkaller.appspot.com/text?tag=ReproC&x=10d4261a580000
+Reported-by: syzbot+1522459a74d26b0ac33a@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/all/671bc7a7.050a0220.455e8.022a.GAE@google.com
+Link: https://patch.msgid.link/20260109030657.994759-1-seanjc@google.com
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kvm/x86.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 3edfcb4090b18..ac0b458582c38 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -11005,8 +11005,7 @@ static inline int vcpu_block(struct kvm_vcpu *vcpu)
+ if (is_guest_mode(vcpu)) {
+ int r = kvm_check_nested_events(vcpu);
+
+- WARN_ON_ONCE(r == -EBUSY);
+- if (r < 0)
++ if (r < 0 && r != -EBUSY)
+ return 0;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 7c34c9e6945cefce7491df81fab7f22374de8b91 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 2 Aug 2024 11:19:28 -0700
+Subject: KVM: x86: Rename KVM_MSR_RET_INVALID to KVM_MSR_RET_UNSUPPORTED
+
+From: Sean Christopherson <seanjc@google.com>
+
+[ Upstream commit aaecae7b6a2b19a874a7df0d474f44f3a5b5a74e ]
+
+Rename the "INVALID" internal MSR error return code to "UNSUPPORTED" to
+try and make it more clear that access was denied because the MSR itself
+is unsupported/unknown. "INVALID" is too ambiguous, as it could just as
+easily mean the value for WRMSR as invalid.
+
+Avoid UNKNOWN and UNIMPLEMENTED, as the error code is used for MSRs that
+_are_ actually implemented by KVM, e.g. if the MSR is unsupported because
+an associated feature flag is not present in guest CPUID.
+
+Opportunistically beef up the comments for the internal MSR error codes.
+
+Link: https://lore.kernel.org/r/20240802181935.292540-4-seanjc@google.com
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Stable-dep-of: 5bb9ac186512 ("KVM: x86: Return "unsupported" instead of "invalid" on access to unsupported PV MSR")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kvm/svm/svm.c | 2 +-
+ arch/x86/kvm/vmx/vmx.c | 2 +-
+ arch/x86/kvm/x86.c | 12 ++++++------
+ arch/x86/kvm/x86.h | 15 +++++++++++----
+ 4 files changed, 19 insertions(+), 12 deletions(-)
+
+diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
+index 9ddd1ee5f3123..a48616242affe 100644
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -2848,7 +2848,7 @@ static int svm_get_msr_feature(struct kvm_msr_entry *msr)
+ msr->data |= MSR_AMD64_DE_CFG_LFENCE_SERIALIZE;
+ break;
+ default:
+- return KVM_MSR_RET_INVALID;
++ return KVM_MSR_RET_UNSUPPORTED;
+ }
+
+ return 0;
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index 4dd3f64a1a8c7..b68fb5329a13e 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -1981,7 +1981,7 @@ static int vmx_get_msr_feature(struct kvm_msr_entry *msr)
+ return 1;
+ return vmx_get_vmx_msr(&vmcs_config.nested, msr->index, &msr->data);
+ default:
+- return KVM_MSR_RET_INVALID;
++ return KVM_MSR_RET_UNSUPPORTED;
+ }
+ }
+
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 275dd7dc1d68b..3e16513a4d9fd 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1724,7 +1724,7 @@ static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
+ msr.index = index;
+ r = kvm_get_msr_feature(&msr);
+
+- if (r == KVM_MSR_RET_INVALID && kvm_msr_ignored_check(index, 0, false))
++ if (r == KVM_MSR_RET_UNSUPPORTED && kvm_msr_ignored_check(index, 0, false))
+ r = 0;
+
+ *data = msr.data;
+@@ -1917,7 +1917,7 @@ static int kvm_set_msr_ignored_check(struct kvm_vcpu *vcpu,
+ {
+ int ret = __kvm_set_msr(vcpu, index, data, host_initiated);
+
+- if (ret == KVM_MSR_RET_INVALID)
++ if (ret == KVM_MSR_RET_UNSUPPORTED)
+ if (kvm_msr_ignored_check(index, data, true))
+ ret = 0;
+
+@@ -1962,7 +1962,7 @@ static int kvm_get_msr_ignored_check(struct kvm_vcpu *vcpu,
+ {
+ int ret = __kvm_get_msr(vcpu, index, data, host_initiated);
+
+- if (ret == KVM_MSR_RET_INVALID) {
++ if (ret == KVM_MSR_RET_UNSUPPORTED) {
+ /* Unconditionally clear *data for simplicity */
+ *data = 0;
+ if (kvm_msr_ignored_check(index, 0, false))
+@@ -2031,7 +2031,7 @@ static int complete_fast_rdmsr(struct kvm_vcpu *vcpu)
+ static u64 kvm_msr_reason(int r)
+ {
+ switch (r) {
+- case KVM_MSR_RET_INVALID:
++ case KVM_MSR_RET_UNSUPPORTED:
+ return KVM_MSR_EXIT_REASON_UNKNOWN;
+ case KVM_MSR_RET_FILTERED:
+ return KVM_MSR_EXIT_REASON_FILTER;
+@@ -3997,7 +3997,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ kvm_is_msr_to_save(msr))
+ break;
+
+- return KVM_MSR_RET_INVALID;
++ return KVM_MSR_RET_UNSUPPORTED;
+ }
+ return 0;
+ }
+@@ -4356,7 +4356,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ break;
+ }
+
+- return KVM_MSR_RET_INVALID;
++ return KVM_MSR_RET_UNSUPPORTED;
+ }
+ return 0;
+ }
+diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
+index 1e7be1f6ab299..1222e5b3d5580 100644
+--- a/arch/x86/kvm/x86.h
++++ b/arch/x86/kvm/x86.h
+@@ -501,11 +501,18 @@ bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type);
+
+ /*
+ * Internal error codes that are used to indicate that MSR emulation encountered
+- * an error that should result in #GP in the guest, unless userspace
+- * handles it.
++ * an error that should result in #GP in the guest, unless userspace handles it.
++ * Note, '1', '0', and negative numbers are off limits, as they are used by KVM
++ * as part of KVM's lightly documented internal KVM_RUN return codes.
++ *
++ * UNSUPPORTED - The MSR isn't supported, either because it is completely
++ * unknown to KVM, or because the MSR should not exist according
++ * to the vCPU model.
++ *
++ * FILTERED - Access to the MSR is denied by a userspace MSR filter.
+ */
+-#define KVM_MSR_RET_INVALID 2 /* in-kernel MSR emulation #GP condition */
+-#define KVM_MSR_RET_FILTERED 3 /* #GP due to userspace MSR filter */
++#define KVM_MSR_RET_UNSUPPORTED 2
++#define KVM_MSR_RET_FILTERED 3
+
+ #define __cr4_reserved_bits(__cpu_has, __c) \
+ ({ \
+--
+2.51.0
+
--- /dev/null
+From e03e838908f4cfaa29ca65eff93b4bf28c7719d9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 30 Dec 2025 12:59:48 -0800
+Subject: KVM: x86: Return "unsupported" instead of "invalid" on access to
+ unsupported PV MSR
+
+From: Sean Christopherson <seanjc@google.com>
+
+[ Upstream commit 5bb9ac1865123356337a389af935d3913ee917ed ]
+
+Return KVM_MSR_RET_UNSUPPORTED instead of '1' (which for all intents and
+purposes means "invalid") when rejecting accesses to KVM PV MSRs to adhere
+to KVM's ABI of allowing host reads and writes of '0' to MSRs that are
+advertised to userspace via KVM_GET_MSR_INDEX_LIST, even if the vCPU model
+doesn't support the MSR.
+
+E.g. running a QEMU VM with
+
+ -cpu host,-kvmclock,kvm-pv-enforce-cpuid
+
+yields:
+
+ qemu: error: failed to set MSR 0x12 to 0x0
+ qemu: target/i386/kvm/kvm.c:3301: kvm_buf_set_msrs:
+ Assertion `ret == cpu->kvm_msr_buf->nmsrs' failed.
+
+Fixes: 66570e966dd9 ("kvm: x86: only provide PV features if enabled in guest's CPUID")
+Cc: stable@vger.kernel.org
+Reviewed-by: Jim Mattson <jmattson@google.com>
+Link: https://patch.msgid.link/20251230205948.4094097-1-seanjc@google.com
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kvm/x86.c | 40 ++++++++++++++++++++--------------------
+ 1 file changed, 20 insertions(+), 20 deletions(-)
+
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 3e16513a4d9fd..19cae03e423b1 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -3812,47 +3812,47 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ break;
+ case MSR_KVM_WALL_CLOCK_NEW:
+ if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2))
+- return 1;
++ return KVM_MSR_RET_UNSUPPORTED;
+
+ vcpu->kvm->arch.wall_clock = data;
+ kvm_write_wall_clock(vcpu->kvm, data, 0);
+ break;
+ case MSR_KVM_WALL_CLOCK:
+ if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE))
+- return 1;
++ return KVM_MSR_RET_UNSUPPORTED;
+
+ vcpu->kvm->arch.wall_clock = data;
+ kvm_write_wall_clock(vcpu->kvm, data, 0);
+ break;
+ case MSR_KVM_SYSTEM_TIME_NEW:
+ if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2))
+- return 1;
++ return KVM_MSR_RET_UNSUPPORTED;
+
+ kvm_write_system_time(vcpu, data, false, msr_info->host_initiated);
+ break;
+ case MSR_KVM_SYSTEM_TIME:
+ if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE))
+- return 1;
++ return KVM_MSR_RET_UNSUPPORTED;
+
+ kvm_write_system_time(vcpu, data, true, msr_info->host_initiated);
+ break;
+ case MSR_KVM_ASYNC_PF_EN:
+ if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF))
+- return 1;
++ return KVM_MSR_RET_UNSUPPORTED;
+
+ if (kvm_pv_enable_async_pf(vcpu, data))
+ return 1;
+ break;
+ case MSR_KVM_ASYNC_PF_INT:
+ if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT))
+- return 1;
++ return KVM_MSR_RET_UNSUPPORTED;
+
+ if (kvm_pv_enable_async_pf_int(vcpu, data))
+ return 1;
+ break;
+ case MSR_KVM_ASYNC_PF_ACK:
+ if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT))
+- return 1;
++ return KVM_MSR_RET_UNSUPPORTED;
+ if (data & 0x1) {
+ vcpu->arch.apf.pageready_pending = false;
+ kvm_check_async_pf_completion(vcpu);
+@@ -3860,7 +3860,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ break;
+ case MSR_KVM_STEAL_TIME:
+ if (!guest_pv_has(vcpu, KVM_FEATURE_STEAL_TIME))
+- return 1;
++ return KVM_MSR_RET_UNSUPPORTED;
+
+ if (unlikely(!sched_info_on()))
+ return 1;
+@@ -3878,7 +3878,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ break;
+ case MSR_KVM_PV_EOI_EN:
+ if (!guest_pv_has(vcpu, KVM_FEATURE_PV_EOI))
+- return 1;
++ return KVM_MSR_RET_UNSUPPORTED;
+
+ if (kvm_lapic_set_pv_eoi(vcpu, data, sizeof(u8)))
+ return 1;
+@@ -3886,7 +3886,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+
+ case MSR_KVM_POLL_CONTROL:
+ if (!guest_pv_has(vcpu, KVM_FEATURE_POLL_CONTROL))
+- return 1;
++ return KVM_MSR_RET_UNSUPPORTED;
+
+ /* only enable bit supported */
+ if (data & (-1ULL << 1))
+@@ -4193,61 +4193,61 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ break;
+ case MSR_KVM_WALL_CLOCK:
+ if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE))
+- return 1;
++ return KVM_MSR_RET_UNSUPPORTED;
+
+ msr_info->data = vcpu->kvm->arch.wall_clock;
+ break;
+ case MSR_KVM_WALL_CLOCK_NEW:
+ if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2))
+- return 1;
++ return KVM_MSR_RET_UNSUPPORTED;
+
+ msr_info->data = vcpu->kvm->arch.wall_clock;
+ break;
+ case MSR_KVM_SYSTEM_TIME:
+ if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE))
+- return 1;
++ return KVM_MSR_RET_UNSUPPORTED;
+
+ msr_info->data = vcpu->arch.time;
+ break;
+ case MSR_KVM_SYSTEM_TIME_NEW:
+ if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2))
+- return 1;
++ return KVM_MSR_RET_UNSUPPORTED;
+
+ msr_info->data = vcpu->arch.time;
+ break;
+ case MSR_KVM_ASYNC_PF_EN:
+ if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF))
+- return 1;
++ return KVM_MSR_RET_UNSUPPORTED;
+
+ msr_info->data = vcpu->arch.apf.msr_en_val;
+ break;
+ case MSR_KVM_ASYNC_PF_INT:
+ if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT))
+- return 1;
++ return KVM_MSR_RET_UNSUPPORTED;
+
+ msr_info->data = vcpu->arch.apf.msr_int_val;
+ break;
+ case MSR_KVM_ASYNC_PF_ACK:
+ if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT))
+- return 1;
++ return KVM_MSR_RET_UNSUPPORTED;
+
+ msr_info->data = 0;
+ break;
+ case MSR_KVM_STEAL_TIME:
+ if (!guest_pv_has(vcpu, KVM_FEATURE_STEAL_TIME))
+- return 1;
++ return KVM_MSR_RET_UNSUPPORTED;
+
+ msr_info->data = vcpu->arch.st.msr_val;
+ break;
+ case MSR_KVM_PV_EOI_EN:
+ if (!guest_pv_has(vcpu, KVM_FEATURE_PV_EOI))
+- return 1;
++ return KVM_MSR_RET_UNSUPPORTED;
+
+ msr_info->data = vcpu->arch.pv_eoi.msr_val;
+ break;
+ case MSR_KVM_POLL_CONTROL:
+ if (!guest_pv_has(vcpu, KVM_FEATURE_POLL_CONTROL))
+- return 1;
++ return KVM_MSR_RET_UNSUPPORTED;
+
+ msr_info->data = vcpu->arch.msr_kvm_poll_control;
+ break;
+--
+2.51.0
+
--- /dev/null
+From 1b6f6230cd757de14b485b88651085898340ce2f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 7 Jun 2024 10:26:09 -0700
+Subject: KVM: x86: WARN if a vCPU gets a valid wakeup that KVM can't yet
+ inject
+
+From: Sean Christopherson <seanjc@google.com>
+
+[ Upstream commit 45405155d876c326da89162b8173b8cc9ab7ed75 ]
+
+WARN if a blocking vCPU is awakened by a valid wake event that KVM can't
+inject, e.g. because KVM needs to complete a nested VM-enter, or needs to
+re-inject an exception. For the nested VM-Enter case, KVM is supposed to
+clear "nested_run_pending" if L1 puts L2 into HLT, i.e. entering HLT
+"completes" the nested VM-Enter. And for already-injected exceptions, it
+should be impossible for the vCPU to be in a blocking state if a VM-Exit
+occurred while an exception was being vectored.
+
+Link: https://lore.kernel.org/r/20240607172609.3205077-7-seanjc@google.com
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Stable-dep-of: ead63640d4e7 ("KVM: x86: Ignore -EBUSY when checking nested events from vcpu_block()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kvm/x86.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 19cae03e423b1..3edfcb4090b18 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -11003,7 +11003,10 @@ static inline int vcpu_block(struct kvm_vcpu *vcpu)
+ * causes a spurious wakeup from HLT).
+ */
+ if (is_guest_mode(vcpu)) {
+- if (kvm_check_nested_events(vcpu) < 0)
++ int r = kvm_check_nested_events(vcpu);
++
++ WARN_ON_ONCE(r == -EBUSY);
++ if (r < 0)
+ return 0;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 8c53295a52f7bde9a40f7330d46d13dfaf555ff1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 18 Aug 2025 09:39:01 +0530
+Subject: mailbox: Allow controller specific mapping using fwnode
+
+From: Anup Patel <apatel@ventanamicro.com>
+
+[ Upstream commit ba879dfc0574878f3e08f217b2b4fdf845c426c0 ]
+
+Introduce optional fw_node() callback which allows a mailbox controller
+driver to provide controller specific mapping using fwnode.
+
+The Linux OF framework already implements fwnode operations for the
+Linux DD framework so the fw_xlate() callback works fine with device
+tree as well.
+
+Acked-by: Jassi Brar <jassisinghbrar@gmail.com>
+Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Signed-off-by: Anup Patel <apatel@ventanamicro.com>
+Link: https://lore.kernel.org/r/20250818040920.272664-6-apatel@ventanamicro.com
+Signed-off-by: Paul Walmsley <pjw@kernel.org>
+Stable-dep-of: fcd7f96c7836 ("mailbox: Prevent out-of-bounds access in fw_mbox_index_xlate()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mailbox/mailbox.c | 65 ++++++++++++++++++------------
+ include/linux/mailbox_controller.h | 3 ++
+ 2 files changed, 43 insertions(+), 25 deletions(-)
+
+diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
+index 7dcbca48d1a0f..892aa0a048e0f 100644
+--- a/drivers/mailbox/mailbox.c
++++ b/drivers/mailbox/mailbox.c
+@@ -15,6 +15,7 @@
+ #include <linux/module.h>
+ #include <linux/mutex.h>
+ #include <linux/of.h>
++#include <linux/property.h>
+ #include <linux/spinlock.h>
+
+ #include "mailbox.h"
+@@ -396,34 +397,56 @@ EXPORT_SYMBOL_GPL(mbox_bind_client);
+ */
+ struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index)
+ {
+- struct device *dev = cl->dev;
++ struct fwnode_reference_args fwspec;
++ struct fwnode_handle *fwnode;
+ struct mbox_controller *mbox;
+ struct of_phandle_args spec;
+ struct mbox_chan *chan;
++ struct device *dev;
++ unsigned int i;
+ int ret;
+
+- if (!dev || !dev->of_node) {
+- pr_debug("%s: No owner device node\n", __func__);
++ dev = cl->dev;
++ if (!dev) {
++ pr_debug("No owner device\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+- ret = of_parse_phandle_with_args(dev->of_node, "mboxes", "#mbox-cells",
+- index, &spec);
++ fwnode = dev_fwnode(dev);
++ if (!fwnode) {
++ dev_dbg(dev, "No owner fwnode\n");
++ return ERR_PTR(-ENODEV);
++ }
++
++ ret = fwnode_property_get_reference_args(fwnode, "mboxes", "#mbox-cells",
++ 0, index, &fwspec);
+ if (ret) {
+- dev_err(dev, "%s: can't parse \"mboxes\" property\n", __func__);
++ dev_err(dev, "%s: can't parse \"%s\" property\n", __func__, "mboxes");
+ return ERR_PTR(ret);
+ }
+
++ spec.np = to_of_node(fwspec.fwnode);
++ spec.args_count = fwspec.nargs;
++ for (i = 0; i < spec.args_count; i++)
++ spec.args[i] = fwspec.args[i];
++
+ scoped_guard(mutex, &con_mutex) {
+ chan = ERR_PTR(-EPROBE_DEFER);
+- list_for_each_entry(mbox, &mbox_cons, node)
+- if (mbox->dev->of_node == spec.np) {
+- chan = mbox->of_xlate(mbox, &spec);
+- if (!IS_ERR(chan))
+- break;
++ list_for_each_entry(mbox, &mbox_cons, node) {
++ if (device_match_fwnode(mbox->dev, fwspec.fwnode)) {
++ if (mbox->fw_xlate) {
++ chan = mbox->fw_xlate(mbox, &fwspec);
++ if (!IS_ERR(chan))
++ break;
++ } else if (mbox->of_xlate) {
++ chan = mbox->of_xlate(mbox, &spec);
++ if (!IS_ERR(chan))
++ break;
++ }
+ }
++ }
+
+- of_node_put(spec.np);
++ fwnode_handle_put(fwspec.fwnode);
+
+ if (IS_ERR(chan))
+ return chan;
+@@ -440,15 +463,8 @@ EXPORT_SYMBOL_GPL(mbox_request_channel);
+ struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl,
+ const char *name)
+ {
+- struct device_node *np = cl->dev->of_node;
+- int index;
+-
+- if (!np) {
+- dev_err(cl->dev, "%s() currently only supports DT\n", __func__);
+- return ERR_PTR(-EINVAL);
+- }
++ int index = device_property_match_string(cl->dev, "mbox-names", name);
+
+- index = of_property_match_string(np, "mbox-names", name);
+ if (index < 0) {
+ dev_err(cl->dev, "%s() could not locate channel named \"%s\"\n",
+ __func__, name);
+@@ -485,9 +501,8 @@ void mbox_free_channel(struct mbox_chan *chan)
+ }
+ EXPORT_SYMBOL_GPL(mbox_free_channel);
+
+-static struct mbox_chan *
+-of_mbox_index_xlate(struct mbox_controller *mbox,
+- const struct of_phandle_args *sp)
++static struct mbox_chan *fw_mbox_index_xlate(struct mbox_controller *mbox,
++ const struct fwnode_reference_args *sp)
+ {
+ int ind = sp->args[0];
+
+@@ -540,8 +555,8 @@ int mbox_controller_register(struct mbox_controller *mbox)
+ spin_lock_init(&chan->lock);
+ }
+
+- if (!mbox->of_xlate)
+- mbox->of_xlate = of_mbox_index_xlate;
++ if (!mbox->fw_xlate && !mbox->of_xlate)
++ mbox->fw_xlate = fw_mbox_index_xlate;
+
+ scoped_guard(mutex, &con_mutex)
+ list_add_tail(&mbox->node, &mbox_cons);
+diff --git a/include/linux/mailbox_controller.h b/include/linux/mailbox_controller.h
+index 5fb0b65f45a2c..b91379922cb33 100644
+--- a/include/linux/mailbox_controller.h
++++ b/include/linux/mailbox_controller.h
+@@ -66,6 +66,7 @@ struct mbox_chan_ops {
+ * no interrupt rises. Ignored if 'txdone_irq' is set.
+ * @txpoll_period: If 'txdone_poll' is in effect, the API polls for
+ * last TX's status after these many millisecs
++ * @fw_xlate: Controller driver specific mapping of channel via fwnode
+ * @of_xlate: Controller driver specific mapping of channel via DT
+ * @poll_hrt: API private. hrtimer used to poll for TXDONE on all
+ * channels.
+@@ -79,6 +80,8 @@ struct mbox_controller {
+ bool txdone_irq;
+ bool txdone_poll;
+ unsigned txpoll_period;
++ struct mbox_chan *(*fw_xlate)(struct mbox_controller *mbox,
++ const struct fwnode_reference_args *sp);
+ struct mbox_chan *(*of_xlate)(struct mbox_controller *mbox,
+ const struct of_phandle_args *sp);
+ /* Internal to API */
+--
+2.51.0
+
--- /dev/null
+From 95c663250ad1d5189c8d302f31609b082ea2bbd4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 24 Feb 2025 08:27:14 +0000
+Subject: mailbox: don't protect of_parse_phandle_with_args with con_mutex
+
+From: Tudor Ambarus <tudor.ambarus@linaro.org>
+
+[ Upstream commit 8c71c61fc613657d785a3377b4b34484bd978374 ]
+
+There are no concurrency problems if multiple consumers parse the
+phandle, don't gratuiously protect the parsing with the mutex used
+for the controllers list.
+
+Signed-off-by: Tudor Ambarus <tudor.ambarus@linaro.org>
+Signed-off-by: Jassi Brar <jassisinghbrar@gmail.com>
+Stable-dep-of: fcd7f96c7836 ("mailbox: Prevent out-of-bounds access in fw_mbox_index_xlate()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mailbox/mailbox.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
+index 92c2fb618c8e1..87de408fb068c 100644
+--- a/drivers/mailbox/mailbox.c
++++ b/drivers/mailbox/mailbox.c
+@@ -413,16 +413,15 @@ struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index)
+ return ERR_PTR(-ENODEV);
+ }
+
+- mutex_lock(&con_mutex);
+-
+ ret = of_parse_phandle_with_args(dev->of_node, "mboxes", "#mbox-cells",
+ index, &spec);
+ if (ret) {
+ dev_dbg(dev, "%s: can't parse \"mboxes\" property\n", __func__);
+- mutex_unlock(&con_mutex);
+ return ERR_PTR(ret);
+ }
+
++ mutex_lock(&con_mutex);
++
+ chan = ERR_PTR(-EPROBE_DEFER);
+ list_for_each_entry(mbox, &mbox_cons, node)
+ if (mbox->dev->of_node == spec.np) {
+--
+2.51.0
+
--- /dev/null
+From 82e5a307354882bcb2a171b4a7bbcc0763e62318 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 26 Nov 2025 06:22:50 +0000
+Subject: mailbox: Prevent out-of-bounds access in fw_mbox_index_xlate()
+
+From: Joonwon Kang <joonwonkang@google.com>
+
+[ Upstream commit fcd7f96c783626c07ee3ed75fa3739a8a2052310 ]
+
+Although it is guided that `#mbox-cells` must be at least 1, there are
+many instances of `#mbox-cells = <0>;` in the device tree. If that is
+the case and the corresponding mailbox controller does not provide
+`fw_xlate` and of_xlate` function pointers, `fw_mbox_index_xlate()` will
+be used by default and out-of-bounds accesses could occur due to lack of
+bounds check in that function.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Joonwon Kang <joonwonkang@google.com>
+Signed-off-by: Jassi Brar <jassisinghbrar@gmail.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mailbox/mailbox.c | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
+index 892aa0a048e0f..b4d52b814055b 100644
+--- a/drivers/mailbox/mailbox.c
++++ b/drivers/mailbox/mailbox.c
+@@ -504,12 +504,10 @@ EXPORT_SYMBOL_GPL(mbox_free_channel);
+ static struct mbox_chan *fw_mbox_index_xlate(struct mbox_controller *mbox,
+ const struct fwnode_reference_args *sp)
+ {
+- int ind = sp->args[0];
+-
+- if (ind >= mbox->num_chans)
++ if (sp->nargs < 1 || sp->args[0] >= mbox->num_chans)
+ return ERR_PTR(-EINVAL);
+
+- return &mbox->chans[ind];
++ return &mbox->chans[sp->args[0]];
+ }
+
+ /**
+--
+2.51.0
+
--- /dev/null
+From 1c7c71c8f648bddb1d730732296681c29c5cc132 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 24 Feb 2025 08:27:17 +0000
+Subject: mailbox: remove unused header files
+
+From: Tudor Ambarus <tudor.ambarus@linaro.org>
+
+[ Upstream commit 4de14ec76b5e67d824896f774b3a23d86a2ebc87 ]
+
+There's nothing used from these header files, remove their inclusion.
+
+Signed-off-by: Tudor Ambarus <tudor.ambarus@linaro.org>
+Signed-off-by: Jassi Brar <jassisinghbrar@gmail.com>
+Stable-dep-of: fcd7f96c7836 ("mailbox: Prevent out-of-bounds access in fw_mbox_index_xlate()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mailbox/mailbox.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
+index c7134ece6d5dd..693975a87e19e 100644
+--- a/drivers/mailbox/mailbox.c
++++ b/drivers/mailbox/mailbox.c
+@@ -6,17 +6,14 @@
+ * Author: Jassi Brar <jassisinghbrar@gmail.com>
+ */
+
+-#include <linux/bitops.h>
+ #include <linux/delay.h>
+ #include <linux/device.h>
+ #include <linux/err.h>
+-#include <linux/interrupt.h>
+ #include <linux/mailbox_client.h>
+ #include <linux/mailbox_controller.h>
+ #include <linux/module.h>
+ #include <linux/mutex.h>
+ #include <linux/of.h>
+-#include <linux/slab.h>
+ #include <linux/spinlock.h>
+
+ #include "mailbox.h"
+--
+2.51.0
+
--- /dev/null
+From d44d9434352c4c7f9f2cf72a7a99da016bc8f978 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 24 Feb 2025 08:27:15 +0000
+Subject: mailbox: sort headers alphabetically
+
+From: Tudor Ambarus <tudor.ambarus@linaro.org>
+
+[ Upstream commit db824c1119fc16556a84cb7a771ca6553b3c3a45 ]
+
+Sorting headers alphabetically helps locating duplicates,
+and makes it easier to figure out where to insert new headers.
+
+Signed-off-by: Tudor Ambarus <tudor.ambarus@linaro.org>
+Signed-off-by: Jassi Brar <jassisinghbrar@gmail.com>
+Stable-dep-of: fcd7f96c7836 ("mailbox: Prevent out-of-bounds access in fw_mbox_index_xlate()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mailbox/mailbox.c | 14 +++++++-------
+ include/linux/mailbox_client.h | 2 +-
+ include/linux/mailbox_controller.h | 6 +++---
+ 3 files changed, 11 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
+index 87de408fb068c..c7134ece6d5dd 100644
+--- a/drivers/mailbox/mailbox.c
++++ b/drivers/mailbox/mailbox.c
+@@ -6,18 +6,18 @@
+ * Author: Jassi Brar <jassisinghbrar@gmail.com>
+ */
+
+-#include <linux/interrupt.h>
+-#include <linux/spinlock.h>
+-#include <linux/mutex.h>
++#include <linux/bitops.h>
+ #include <linux/delay.h>
+-#include <linux/slab.h>
+-#include <linux/err.h>
+-#include <linux/module.h>
+ #include <linux/device.h>
+-#include <linux/bitops.h>
++#include <linux/err.h>
++#include <linux/interrupt.h>
+ #include <linux/mailbox_client.h>
+ #include <linux/mailbox_controller.h>
++#include <linux/module.h>
++#include <linux/mutex.h>
+ #include <linux/of.h>
++#include <linux/slab.h>
++#include <linux/spinlock.h>
+
+ #include "mailbox.h"
+
+diff --git a/include/linux/mailbox_client.h b/include/linux/mailbox_client.h
+index 734694912ef74..c6eea9afb943d 100644
+--- a/include/linux/mailbox_client.h
++++ b/include/linux/mailbox_client.h
+@@ -7,8 +7,8 @@
+ #ifndef __MAILBOX_CLIENT_H
+ #define __MAILBOX_CLIENT_H
+
+-#include <linux/of.h>
+ #include <linux/device.h>
++#include <linux/of.h>
+
+ struct mbox_chan;
+
+diff --git a/include/linux/mailbox_controller.h b/include/linux/mailbox_controller.h
+index 6fee33cb52f58..5fb0b65f45a2c 100644
+--- a/include/linux/mailbox_controller.h
++++ b/include/linux/mailbox_controller.h
+@@ -3,11 +3,11 @@
+ #ifndef __MAILBOX_CONTROLLER_H
+ #define __MAILBOX_CONTROLLER_H
+
++#include <linux/completion.h>
++#include <linux/device.h>
++#include <linux/hrtimer.h>
+ #include <linux/of.h>
+ #include <linux/types.h>
+-#include <linux/hrtimer.h>
+-#include <linux/device.h>
+-#include <linux/completion.h>
+
+ struct mbox_chan;
+
+--
+2.51.0
+
--- /dev/null
+From 9572539353d6a0d1e6d1fb9963ff79db3d987e01 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 11 Apr 2025 21:14:09 +0800
+Subject: mailbox: Use dev_err when there is error
+
+From: Peng Fan <peng.fan@nxp.com>
+
+[ Upstream commit 8da4988b6e645f3eaa590ea16f433583364fd09c ]
+
+Use dev_err to show the error log instead of using dev_dbg.
+
+Signed-off-by: Peng Fan <peng.fan@nxp.com>
+Signed-off-by: Jassi Brar <jassisinghbrar@gmail.com>
+Stable-dep-of: fcd7f96c7836 ("mailbox: Prevent out-of-bounds access in fw_mbox_index_xlate()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mailbox/mailbox.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
+index 693975a87e19e..4c27de9514e55 100644
+--- a/drivers/mailbox/mailbox.c
++++ b/drivers/mailbox/mailbox.c
+@@ -322,7 +322,7 @@ static int __mbox_bind_client(struct mbox_chan *chan, struct mbox_client *cl)
+ int ret;
+
+ if (chan->cl || !try_module_get(chan->mbox->dev->driver->owner)) {
+- dev_dbg(dev, "%s: mailbox not free\n", __func__);
++ dev_err(dev, "%s: mailbox not free\n", __func__);
+ return -EBUSY;
+ }
+
+@@ -413,7 +413,7 @@ struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index)
+ ret = of_parse_phandle_with_args(dev->of_node, "mboxes", "#mbox-cells",
+ index, &spec);
+ if (ret) {
+- dev_dbg(dev, "%s: can't parse \"mboxes\" property\n", __func__);
++ dev_err(dev, "%s: can't parse \"mboxes\" property\n", __func__);
+ return ERR_PTR(ret);
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 1818bcc186257d978f3402a443f606f2e07e0e6e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 11 Apr 2025 21:14:13 +0800
+Subject: mailbox: Use guard/scoped_guard for con_mutex
+
+From: Peng Fan <peng.fan@nxp.com>
+
+[ Upstream commit 16da9a653c5bf5d97fb296420899fe9735aa9c3c ]
+
+Use guard and scoped_guard for con_mutex to simplify code.
+
+Signed-off-by: Peng Fan <peng.fan@nxp.com>
+Signed-off-by: Jassi Brar <jassisinghbrar@gmail.com>
+Stable-dep-of: fcd7f96c7836 ("mailbox: Prevent out-of-bounds access in fw_mbox_index_xlate()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mailbox/mailbox.c | 61 +++++++++++++++++----------------------
+ 1 file changed, 26 insertions(+), 35 deletions(-)
+
+diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
+index 4c27de9514e55..7dcbca48d1a0f 100644
+--- a/drivers/mailbox/mailbox.c
++++ b/drivers/mailbox/mailbox.c
+@@ -6,6 +6,7 @@
+ * Author: Jassi Brar <jassisinghbrar@gmail.com>
+ */
+
++#include <linux/cleanup.h>
+ #include <linux/delay.h>
+ #include <linux/device.h>
+ #include <linux/err.h>
+@@ -370,13 +371,9 @@ static int __mbox_bind_client(struct mbox_chan *chan, struct mbox_client *cl)
+ */
+ int mbox_bind_client(struct mbox_chan *chan, struct mbox_client *cl)
+ {
+- int ret;
+-
+- mutex_lock(&con_mutex);
+- ret = __mbox_bind_client(chan, cl);
+- mutex_unlock(&con_mutex);
++ guard(mutex)(&con_mutex);
+
+- return ret;
++ return __mbox_bind_client(chan, cl);
+ }
+ EXPORT_SYMBOL_GPL(mbox_bind_client);
+
+@@ -417,28 +414,25 @@ struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index)
+ return ERR_PTR(ret);
+ }
+
+- mutex_lock(&con_mutex);
++ scoped_guard(mutex, &con_mutex) {
++ chan = ERR_PTR(-EPROBE_DEFER);
++ list_for_each_entry(mbox, &mbox_cons, node)
++ if (mbox->dev->of_node == spec.np) {
++ chan = mbox->of_xlate(mbox, &spec);
++ if (!IS_ERR(chan))
++ break;
++ }
+
+- chan = ERR_PTR(-EPROBE_DEFER);
+- list_for_each_entry(mbox, &mbox_cons, node)
+- if (mbox->dev->of_node == spec.np) {
+- chan = mbox->of_xlate(mbox, &spec);
+- if (!IS_ERR(chan))
+- break;
+- }
++ of_node_put(spec.np);
+
+- of_node_put(spec.np);
++ if (IS_ERR(chan))
++ return chan;
+
+- if (IS_ERR(chan)) {
+- mutex_unlock(&con_mutex);
+- return chan;
++ ret = __mbox_bind_client(chan, cl);
++ if (ret)
++ chan = ERR_PTR(ret);
+ }
+
+- ret = __mbox_bind_client(chan, cl);
+- if (ret)
+- chan = ERR_PTR(ret);
+-
+- mutex_unlock(&con_mutex);
+ return chan;
+ }
+ EXPORT_SYMBOL_GPL(mbox_request_channel);
+@@ -549,9 +543,8 @@ int mbox_controller_register(struct mbox_controller *mbox)
+ if (!mbox->of_xlate)
+ mbox->of_xlate = of_mbox_index_xlate;
+
+- mutex_lock(&con_mutex);
+- list_add_tail(&mbox->node, &mbox_cons);
+- mutex_unlock(&con_mutex);
++ scoped_guard(mutex, &con_mutex)
++ list_add_tail(&mbox->node, &mbox_cons);
+
+ return 0;
+ }
+@@ -568,17 +561,15 @@ void mbox_controller_unregister(struct mbox_controller *mbox)
+ if (!mbox)
+ return;
+
+- mutex_lock(&con_mutex);
+-
+- list_del(&mbox->node);
++ scoped_guard(mutex, &con_mutex) {
++ list_del(&mbox->node);
+
+- for (i = 0; i < mbox->num_chans; i++)
+- mbox_free_channel(&mbox->chans[i]);
++ for (i = 0; i < mbox->num_chans; i++)
++ mbox_free_channel(&mbox->chans[i]);
+
+- if (mbox->txdone_poll)
+- hrtimer_cancel(&mbox->poll_hrt);
+-
+- mutex_unlock(&con_mutex);
++ if (mbox->txdone_poll)
++ hrtimer_cancel(&mbox->poll_hrt);
++ }
+ }
+ EXPORT_SYMBOL_GPL(mbox_controller_unregister);
+
+--
+2.51.0
+
--- /dev/null
+From 5b01d2b63f2d49807fae816cca357a0aec4d17be Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 31 Jul 2024 14:16:08 -0600
+Subject: mailbox: Use of_property_match_string() instead of open-coding
+
+From: Rob Herring (Arm) <robh@kernel.org>
+
+[ Upstream commit 263dbd3cc88da7ea7413494eea66418b4f1b2e6d ]
+
+Use of_property_match_string() instead of open-coding the search. With
+this, of_get_property() can be removed as there is no need to check for
+"mbox-names" presence first.
+
+This is part of a larger effort to remove callers of of_get_property()
+and similar functions. of_get_property() leaks the DT property data
+pointer which is a problem for dynamically allocated nodes which may
+be freed.
+
+Signed-off-by: Rob Herring (Arm) <robh@kernel.org>
+Signed-off-by: Jassi Brar <jassisinghbrar@gmail.com>
+Stable-dep-of: fcd7f96c7836 ("mailbox: Prevent out-of-bounds access in fw_mbox_index_xlate()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mailbox/mailbox.c | 22 ++++++----------------
+ 1 file changed, 6 insertions(+), 16 deletions(-)
+
+diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
+index cb59b4dbad626..92c2fb618c8e1 100644
+--- a/drivers/mailbox/mailbox.c
++++ b/drivers/mailbox/mailbox.c
+@@ -451,30 +451,20 @@ struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl,
+ const char *name)
+ {
+ struct device_node *np = cl->dev->of_node;
+- struct property *prop;
+- const char *mbox_name;
+- int index = 0;
++ int index;
+
+ if (!np) {
+ dev_err(cl->dev, "%s() currently only supports DT\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
+- if (!of_get_property(np, "mbox-names", NULL)) {
+- dev_err(cl->dev,
+- "%s() requires an \"mbox-names\" property\n", __func__);
++ index = of_property_match_string(np, "mbox-names", name);
++ if (index < 0) {
++ dev_err(cl->dev, "%s() could not locate channel named \"%s\"\n",
++ __func__, name);
+ return ERR_PTR(-EINVAL);
+ }
+-
+- of_property_for_each_string(np, "mbox-names", prop, mbox_name) {
+- if (!strncmp(name, mbox_name, strlen(name)))
+- return mbox_request_channel(cl, index);
+- index++;
+- }
+-
+- dev_err(cl->dev, "%s() could not locate channel named \"%s\"\n",
+- __func__, name);
+- return ERR_PTR(-EINVAL);
++ return mbox_request_channel(cl, index);
+ }
+ EXPORT_SYMBOL_GPL(mbox_request_channel_byname);
+
+--
+2.51.0
+
--- /dev/null
+From 640169aba8c4313195e25a1ba6a339fce5544ecc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 18 Jun 2024 20:18:34 +0200
+Subject: media: hantro: Disable multicore support
+
+From: Sebastian Reichel <sebastian.reichel@collabora.com>
+
+[ Upstream commit ccdeb8d57f7fb3e5c05d72cb7dfb9bc78f09f542 ]
+
+Avoid exposing equal Hantro video codecs to userspace. Equal video
+codecs allow scheduling work between the cores. For that kernel support
+is required, which does not yet exist. Until that is implemented avoid
+exposing each core separately to userspace so that multicore can be
+added in the future without breaking userspace ABI.
+
+This was written with Rockchip RK3588 in mind (which has 4 Hantro H1
+cores), but applies to all SoCs.
+
+Signed-off-by: Sebastian Reichel <sebastian.reichel@collabora.com>
+Signed-off-by: Sebastian Fricke <sebastian.fricke@collabora.com>
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Stable-dep-of: e0203ddf9af7 ("media: verisilicon: Avoid G2 bus error while decoding H.264 and HEVC")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../media/platform/verisilicon/hantro_drv.c | 47 +++++++++++++++++++
+ 1 file changed, 47 insertions(+)
+
+diff --git a/drivers/media/platform/verisilicon/hantro_drv.c b/drivers/media/platform/verisilicon/hantro_drv.c
+index 35833ee8beb51..7892b9f34599a 100644
+--- a/drivers/media/platform/verisilicon/hantro_drv.c
++++ b/drivers/media/platform/verisilicon/hantro_drv.c
+@@ -987,6 +987,49 @@ static const struct media_device_ops hantro_m2m_media_ops = {
+ .req_queue = v4l2_m2m_request_queue,
+ };
+
++/*
++ * Some SoCs, like RK3588 have multiple identical Hantro cores, but the
++ * kernel is currently missing support for multi-core handling. Exposing
++ * separate devices for each core to userspace is bad, since that does
++ * not allow scheduling tasks properly (and creates ABI). With this workaround
++ * the driver will only probe for the first core and early exit for the other
++ * cores. Once the driver gains multi-core support, the same technique
++ * for detecting the main core can be used to cluster all cores together.
++ */
++static int hantro_disable_multicore(struct hantro_dev *vpu)
++{
++ struct device_node *node = NULL;
++ const char *compatible;
++ bool is_main_core;
++ int ret;
++
++ /* Intentionally ignores the fallback strings */
++ ret = of_property_read_string(vpu->dev->of_node, "compatible", &compatible);
++ if (ret)
++ return ret;
++
++ /* The first compatible and available node found is considered the main core */
++ do {
++ node = of_find_compatible_node(node, NULL, compatible);
++ if (of_device_is_available(node))
++ break;
++ } while (node);
++
++ if (!node)
++ return -EINVAL;
++
++ is_main_core = (vpu->dev->of_node == node);
++
++ of_node_put(node);
++
++ if (!is_main_core) {
++ dev_info(vpu->dev, "missing multi-core support, ignoring this instance\n");
++ return -ENODEV;
++ }
++
++ return 0;
++}
++
+ static int hantro_probe(struct platform_device *pdev)
+ {
+ const struct of_device_id *match;
+@@ -1006,6 +1049,10 @@ static int hantro_probe(struct platform_device *pdev)
+ match = of_match_node(of_hantro_match, pdev->dev.of_node);
+ vpu->variant = match->data;
+
++ ret = hantro_disable_multicore(vpu);
++ if (ret)
++ return ret;
++
+ /*
+ * Support for nxp,imx8mq-vpu is kept for backwards compatibility
+ * but it's deprecated. Please update your DTS file to use
+--
+2.51.0
+
--- /dev/null
+From 9558f61d46d37058f8ccb5626e97df5a2b0f32c2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 14 Nov 2025 09:12:57 +0000
+Subject: media: tegra-video: Fix memory leak in __tegra_channel_try_format()
+
+From: Zilin Guan <zilin@seu.edu.cn>
+
+[ Upstream commit 43e5302d22334f1183dec3e0d5d8007eefe2817c ]
+
+The state object allocated by __v4l2_subdev_state_alloc() must be freed
+with __v4l2_subdev_state_free() when it is no longer needed.
+
+In __tegra_channel_try_format(), two error paths return directly after
+v4l2_subdev_call() fails, without freeing the allocated 'sd_state'
+object. This violates the requirement and causes a memory leak.
+
+Fix this by introducing a cleanup label and using goto statements in the
+error paths to ensure that __v4l2_subdev_state_free() is always called
+before the function returns.
+
+Fixes: 56f64b82356b7 ("media: tegra-video: Use zero crop settings if subdev has no get_selection")
+Fixes: 1ebaeb09830f3 ("media: tegra-video: Add support for external sensor capture")
+Cc: stable@vger.kernel.org
+Signed-off-by: Zilin Guan <zilin@seu.edu.cn>
+Signed-off-by: Hans Verkuil <hverkuil+cisco@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/staging/media/tegra-video/vi.c | 13 ++++++++-----
+ 1 file changed, 8 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/staging/media/tegra-video/vi.c b/drivers/staging/media/tegra-video/vi.c
+index a2f21c70a5bc8..e8ba23e5bcde0 100644
+--- a/drivers/staging/media/tegra-video/vi.c
++++ b/drivers/staging/media/tegra-video/vi.c
+@@ -440,7 +440,7 @@ static int __tegra_channel_try_format(struct tegra_vi_channel *chan,
+ .target = V4L2_SEL_TGT_CROP_BOUNDS,
+ };
+ struct v4l2_rect *try_crop;
+- int ret;
++ int ret = 0;
+
+ subdev = tegra_channel_get_remote_source_subdev(chan);
+ if (!subdev)
+@@ -484,8 +484,10 @@ static int __tegra_channel_try_format(struct tegra_vi_channel *chan,
+ } else {
+ ret = v4l2_subdev_call(subdev, pad, get_selection,
+ NULL, &sdsel);
+- if (ret)
+- return -EINVAL;
++ if (ret) {
++ ret = -EINVAL;
++ goto out_free;
++ }
+
+ try_crop->width = sdsel.r.width;
+ try_crop->height = sdsel.r.height;
+@@ -497,14 +499,15 @@ static int __tegra_channel_try_format(struct tegra_vi_channel *chan,
+
+ ret = v4l2_subdev_call(subdev, pad, set_fmt, sd_state, &fmt);
+ if (ret < 0)
+- return ret;
++ goto out_free;
+
+ v4l2_fill_pix_format(pix, &fmt.format);
+ chan->vi->ops->vi_fmt_align(pix, fmtinfo->bpp);
+
++out_free:
+ __v4l2_subdev_state_free(sd_state);
+
+- return 0;
++ return ret;
+ }
+
+ static int tegra_channel_try_format(struct file *file, void *fh,
+--
+2.51.0
+
--- /dev/null
+From 7066f96841ebe117d3665afdc79dd313c83b2b1f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Oct 2023 23:40:09 +0200
+Subject: media: tegra-video: Use accessors for pad config 'try_*' fields
+
+From: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+
+[ Upstream commit 0623979d8352efe18f83c4fad95a2e61df17b3e7 ]
+
+The 'try_*' fields of the v4l2_subdev_pad_config structure are meant to
+be accessed through helper functions. Replace direct access with usage
+of the v4l2_subdev_get_pad_format(), v4l2_subdev_get_pad_crop() and
+v4l2_subdev_get_pad_compose() helpers.
+
+Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Reviewed-by: Luca Ceresoli <luca.ceresoli@bootlin.com>
+Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Signed-off-by: Mauro Carvalho Chehab <mchehab@kernel.org>
+Stable-dep-of: 43e5302d2233 ("media: tegra-video: Fix memory leak in __tegra_channel_try_format()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/staging/media/tegra-video/vi.c | 14 ++++++++------
+ 1 file changed, 8 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/staging/media/tegra-video/vi.c b/drivers/staging/media/tegra-video/vi.c
+index 94171e62dee9e..a2f21c70a5bc8 100644
+--- a/drivers/staging/media/tegra-video/vi.c
++++ b/drivers/staging/media/tegra-video/vi.c
+@@ -439,6 +439,7 @@ static int __tegra_channel_try_format(struct tegra_vi_channel *chan,
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ .target = V4L2_SEL_TGT_CROP_BOUNDS,
+ };
++ struct v4l2_rect *try_crop;
+ int ret;
+
+ subdev = tegra_channel_get_remote_source_subdev(chan);
+@@ -473,24 +474,25 @@ static int __tegra_channel_try_format(struct tegra_vi_channel *chan,
+ * Attempt to obtain the format size from subdev.
+ * If not available, try to get crop boundary from subdev.
+ */
++ try_crop = v4l2_subdev_get_pad_crop(subdev, sd_state, 0);
+ fse.code = fmtinfo->code;
+ ret = v4l2_subdev_call(subdev, pad, enum_frame_size, sd_state, &fse);
+ if (ret) {
+ if (!v4l2_subdev_has_op(subdev, pad, get_selection)) {
+- sd_state->pads->try_crop.width = 0;
+- sd_state->pads->try_crop.height = 0;
++ try_crop->width = 0;
++ try_crop->height = 0;
+ } else {
+ ret = v4l2_subdev_call(subdev, pad, get_selection,
+ NULL, &sdsel);
+ if (ret)
+ return -EINVAL;
+
+- sd_state->pads->try_crop.width = sdsel.r.width;
+- sd_state->pads->try_crop.height = sdsel.r.height;
++ try_crop->width = sdsel.r.width;
++ try_crop->height = sdsel.r.height;
+ }
+ } else {
+- sd_state->pads->try_crop.width = fse.max_width;
+- sd_state->pads->try_crop.height = fse.max_height;
++ try_crop->width = fse.max_width;
++ try_crop->height = fse.max_height;
+ }
+
+ ret = v4l2_subdev_call(subdev, pad, set_fmt, sd_state, &fmt);
+--
+2.51.0
+
--- /dev/null
+From 43ecdabd71969b9b9c6eb7f96a0a89c8c4108c5e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 5 Dec 2025 09:54:24 +0800
+Subject: media: v4l2-mem2mem: Add a kref to the v4l2_m2m_dev structure
+
+From: Nicolas Dufresne <nicolas.dufresne@collabora.com>
+
+[ Upstream commit db6b97a4f8041e479be9ef4b8b07022636c96f50 ]
+
+Adding a reference count to the v4l2_m2m_dev structure allow safely
+sharing it across multiple hardware nodes. This can be used to prevent
+running jobs concurrently on m2m cores that have some internal resource
+sharing.
+
+Signed-off-by: Ming Qian <ming.qian@oss.nxp.com>
+Reviewed-by: Frank Li <Frank.Li@nxp.com>
+Signed-off-by: Nicolas Dufresne <nicolas.dufresne@collabora.com>
+Signed-off-by: Hans Verkuil <hverkuil+cisco@kernel.org>
+[hverkuil: fix typos in v4l2_m2m_put documentation]
+Stable-dep-of: e0203ddf9af7 ("media: verisilicon: Avoid G2 bus error while decoding H.264 and HEVC")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/media/v4l2-core/v4l2-mem2mem.c | 23 +++++++++++++++++++++++
+ include/media/v4l2-mem2mem.h | 21 +++++++++++++++++++++
+ 2 files changed, 44 insertions(+)
+
+diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
+index 8db9ac9c1433f..494ddd7e142cc 100644
+--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
++++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
+@@ -90,6 +90,7 @@ static const char * const m2m_entity_name[] = {
+ * @job_work: worker to run queued jobs.
+ * @job_queue_flags: flags of the queue status, %QUEUE_PAUSED.
+ * @m2m_ops: driver callbacks
++ * @kref: device reference count
+ */
+ struct v4l2_m2m_dev {
+ struct v4l2_m2m_ctx *curr_ctx;
+@@ -109,6 +110,8 @@ struct v4l2_m2m_dev {
+ unsigned long job_queue_flags;
+
+ const struct v4l2_m2m_ops *m2m_ops;
++
++ struct kref kref;
+ };
+
+ static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx,
+@@ -1207,6 +1210,7 @@ struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops)
+ INIT_LIST_HEAD(&m2m_dev->job_queue);
+ spin_lock_init(&m2m_dev->job_spinlock);
+ INIT_WORK(&m2m_dev->job_work, v4l2_m2m_device_run_work);
++ kref_init(&m2m_dev->kref);
+
+ return m2m_dev;
+ }
+@@ -1218,6 +1222,25 @@ void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev)
+ }
+ EXPORT_SYMBOL_GPL(v4l2_m2m_release);
+
++void v4l2_m2m_get(struct v4l2_m2m_dev *m2m_dev)
++{
++ kref_get(&m2m_dev->kref);
++}
++EXPORT_SYMBOL_GPL(v4l2_m2m_get);
++
++static void v4l2_m2m_release_from_kref(struct kref *kref)
++{
++ struct v4l2_m2m_dev *m2m_dev = container_of(kref, struct v4l2_m2m_dev, kref);
++
++ v4l2_m2m_release(m2m_dev);
++}
++
++void v4l2_m2m_put(struct v4l2_m2m_dev *m2m_dev)
++{
++ kref_put(&m2m_dev->kref, v4l2_m2m_release_from_kref);
++}
++EXPORT_SYMBOL_GPL(v4l2_m2m_put);
++
+ struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
+ void *drv_priv,
+ int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq))
+diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
+index 370c230ad3bea..4a2649a4562ae 100644
+--- a/include/media/v4l2-mem2mem.h
++++ b/include/media/v4l2-mem2mem.h
+@@ -537,6 +537,27 @@ v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev,
+ */
+ void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev);
+
++/**
++ * v4l2_m2m_get() - take a reference to the m2m_dev structure
++ *
++ * @m2m_dev: opaque pointer to the internal data to handle M2M context
++ *
++ * This is used to share the M2M device across multiple devices. This
++ * can be used to avoid scheduling two hardware nodes concurrently.
++ */
++void v4l2_m2m_get(struct v4l2_m2m_dev *m2m_dev);
++
++/**
++ * v4l2_m2m_put() - remove a reference to the m2m_dev structure
++ *
++ * @m2m_dev: opaque pointer to the internal data to handle M2M context
++ *
++ * Once the M2M device has no more references, v4l2_m2m_release() will be
++ * called automatically. Users of this method should never call
++ * v4l2_m2m_release() directly. See v4l2_m2m_get() for more details.
++ */
++void v4l2_m2m_put(struct v4l2_m2m_dev *m2m_dev);
++
+ /**
+ * v4l2_m2m_ctx_init() - allocate and initialize a m2m context
+ *
+--
+2.51.0
+
--- /dev/null
+From 2739f07120ee5f578827b8561177517b582a2b7d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 5 Dec 2025 09:54:26 +0800
+Subject: media: verisilicon: Avoid G2 bus error while decoding H.264 and HEVC
+
+From: Ming Qian <ming.qian@oss.nxp.com>
+
+[ Upstream commit e0203ddf9af7c8e170e1e99ce83b4dc07f0cd765 ]
+
+For the i.MX8MQ platform, there is a hardware limitation: the g1 VPU and
+g2 VPU cannot decode simultaneously; otherwise, it will cause below bus
+error and produce corrupted pictures, even potentially lead to system hang.
+
+[ 110.527986] hantro-vpu 38310000.video-codec: frame decode timed out.
+[ 110.583517] hantro-vpu 38310000.video-codec: bus error detected.
+
+Therefore, it is necessary to ensure that g1 and g2 operate alternately.
+This allows for successful multi-instance decoding of H.264 and HEVC.
+
+To achieve this, g1 and g2 share the same v4l2_m2m_dev, and then the
+v4l2_m2m_dev can handle the scheduling.
+
+Fixes: cb5dd5a0fa518 ("media: hantro: Introduce G2/HEVC decoder")
+Cc: stable@vger.kernel.org
+Signed-off-by: Ming Qian <ming.qian@oss.nxp.com>
+Reviewed-by: Frank Li <Frank.Li@nxp.com>
+Co-developed-by: Nicolas Dufresne <nicolas.dufresne@collabora.com>
+Signed-off-by: Nicolas Dufresne <nicolas.dufresne@collabora.com>
+Signed-off-by: Hans Verkuil <hverkuil+cisco@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/media/platform/verisilicon/hantro.h | 2 +
+ .../media/platform/verisilicon/hantro_drv.c | 42 +++++++++++++++++--
+ .../media/platform/verisilicon/imx8m_vpu_hw.c | 8 ++++
+ 3 files changed, 49 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/media/platform/verisilicon/hantro.h b/drivers/media/platform/verisilicon/hantro.h
+index e9e15746f0aa1..ce13812a94001 100644
+--- a/drivers/media/platform/verisilicon/hantro.h
++++ b/drivers/media/platform/verisilicon/hantro.h
+@@ -77,6 +77,7 @@ struct hantro_irq {
+ * @double_buffer: core needs double buffering
+ * @legacy_regs: core uses legacy register set
+ * @late_postproc: postproc must be set up at the end of the job
++ * @shared_devices: an array of device ids that cannot run concurrently
+ */
+ struct hantro_variant {
+ unsigned int enc_offset;
+@@ -101,6 +102,7 @@ struct hantro_variant {
+ unsigned int double_buffer : 1;
+ unsigned int legacy_regs : 1;
+ unsigned int late_postproc : 1;
++ const struct of_device_id *shared_devices;
+ };
+
+ /**
+diff --git a/drivers/media/platform/verisilicon/hantro_drv.c b/drivers/media/platform/verisilicon/hantro_drv.c
+index 7892b9f34599a..6ebab13712af3 100644
+--- a/drivers/media/platform/verisilicon/hantro_drv.c
++++ b/drivers/media/platform/verisilicon/hantro_drv.c
+@@ -13,6 +13,7 @@
+ #include <linux/clk.h>
+ #include <linux/module.h>
+ #include <linux/of.h>
++#include <linux/of_platform.h>
+ #include <linux/platform_device.h>
+ #include <linux/pm.h>
+ #include <linux/pm_runtime.h>
+@@ -1030,6 +1031,41 @@ static int hantro_disable_multicore(struct hantro_dev *vpu)
+ return 0;
+ }
+
++static struct v4l2_m2m_dev *hantro_get_v4l2_m2m_dev(struct hantro_dev *vpu)
++{
++ struct device_node *node;
++ struct hantro_dev *shared_vpu;
++
++ if (!vpu->variant || !vpu->variant->shared_devices)
++ goto init_new_m2m_dev;
++
++ for_each_matching_node(node, vpu->variant->shared_devices) {
++ struct platform_device *pdev;
++ struct v4l2_m2m_dev *m2m_dev;
++
++ pdev = of_find_device_by_node(node);
++ if (!pdev)
++ continue;
++
++ shared_vpu = platform_get_drvdata(pdev);
++ if (IS_ERR_OR_NULL(shared_vpu) || shared_vpu == vpu) {
++ platform_device_put(pdev);
++ continue;
++ }
++
++ v4l2_m2m_get(shared_vpu->m2m_dev);
++ m2m_dev = shared_vpu->m2m_dev;
++ platform_device_put(pdev);
++
++ of_node_put(node);
++
++ return m2m_dev;
++ }
++
++init_new_m2m_dev:
++ return v4l2_m2m_init(&vpu_m2m_ops);
++}
++
+ static int hantro_probe(struct platform_device *pdev)
+ {
+ const struct of_device_id *match;
+@@ -1181,7 +1217,7 @@ static int hantro_probe(struct platform_device *pdev)
+ }
+ platform_set_drvdata(pdev, vpu);
+
+- vpu->m2m_dev = v4l2_m2m_init(&vpu_m2m_ops);
++ vpu->m2m_dev = hantro_get_v4l2_m2m_dev(vpu);
+ if (IS_ERR(vpu->m2m_dev)) {
+ v4l2_err(&vpu->v4l2_dev, "Failed to init mem2mem device\n");
+ ret = PTR_ERR(vpu->m2m_dev);
+@@ -1220,7 +1256,7 @@ static int hantro_probe(struct platform_device *pdev)
+ hantro_remove_enc_func(vpu);
+ err_m2m_rel:
+ media_device_cleanup(&vpu->mdev);
+- v4l2_m2m_release(vpu->m2m_dev);
++ v4l2_m2m_put(vpu->m2m_dev);
+ err_v4l2_unreg:
+ v4l2_device_unregister(&vpu->v4l2_dev);
+ err_clk_unprepare:
+@@ -1243,7 +1279,7 @@ static void hantro_remove(struct platform_device *pdev)
+ hantro_remove_dec_func(vpu);
+ hantro_remove_enc_func(vpu);
+ media_device_cleanup(&vpu->mdev);
+- v4l2_m2m_release(vpu->m2m_dev);
++ v4l2_m2m_put(vpu->m2m_dev);
+ v4l2_device_unregister(&vpu->v4l2_dev);
+ clk_bulk_unprepare(vpu->variant->num_clocks, vpu->clocks);
+ reset_control_assert(vpu->resets);
+diff --git a/drivers/media/platform/verisilicon/imx8m_vpu_hw.c b/drivers/media/platform/verisilicon/imx8m_vpu_hw.c
+index 74fd985a8aad1..cdaac2f18fb54 100644
+--- a/drivers/media/platform/verisilicon/imx8m_vpu_hw.c
++++ b/drivers/media/platform/verisilicon/imx8m_vpu_hw.c
+@@ -361,6 +361,12 @@ const struct hantro_variant imx8mq_vpu_variant = {
+ .num_regs = ARRAY_SIZE(imx8mq_reg_names)
+ };
+
++static const struct of_device_id imx8mq_vpu_shared_resources[] __initconst = {
++ { .compatible = "nxp,imx8mq-vpu-g1", },
++ { .compatible = "nxp,imx8mq-vpu-g2", },
++ { /* sentinel */ }
++};
++
+ const struct hantro_variant imx8mq_vpu_g1_variant = {
+ .dec_fmts = imx8m_vpu_dec_fmts,
+ .num_dec_fmts = ARRAY_SIZE(imx8m_vpu_dec_fmts),
+@@ -374,6 +380,7 @@ const struct hantro_variant imx8mq_vpu_g1_variant = {
+ .num_irqs = ARRAY_SIZE(imx8mq_irqs),
+ .clk_names = imx8mq_g1_clk_names,
+ .num_clocks = ARRAY_SIZE(imx8mq_g1_clk_names),
++ .shared_devices = imx8mq_vpu_shared_resources,
+ };
+
+ const struct hantro_variant imx8mq_vpu_g2_variant = {
+@@ -389,6 +396,7 @@ const struct hantro_variant imx8mq_vpu_g2_variant = {
+ .num_irqs = ARRAY_SIZE(imx8mq_g2_irqs),
+ .clk_names = imx8mq_g2_clk_names,
+ .num_clocks = ARRAY_SIZE(imx8mq_g2_clk_names),
++ .shared_devices = imx8mq_vpu_shared_resources,
+ };
+
+ const struct hantro_variant imx8mm_vpu_g1_variant = {
+--
+2.51.0
+
--- /dev/null
+From 60ac8746c5597f5d611e1acd1b2164a5432d8488 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 17 Dec 2023 15:29:33 +0100
+Subject: memory: mtk-smi: Convert to platform remove callback returning void
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+
+[ Upstream commit 08c1aeaa45ce0fd18912e92c6705586c8aa5240f ]
+
+The .remove() callback for a platform driver returns an int which makes
+many driver authors wrongly assume it's possible to do error handling by
+returning an error code. However the value returned is ignored (apart
+from emitting a warning) and this typically results in resource leaks.
+
+To improve here there is a quest to make the remove callback return
+void. In the first step of this quest all drivers are converted to
+.remove_new(), which already returns void. Eventually after all drivers
+are converted, .remove_new() will be renamed to .remove().
+
+Trivially convert this driver from always returning zero in the remove
+callback to the void returning variant.
+
+Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+Link: https://lore.kernel.org/r/5c35a33cfdc359842e034ddd2e9358f10e91fa1f.1702822744.git.u.kleine-koenig@pengutronix.de
+Signed-off-by: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+Stable-dep-of: 6cfa038bddd7 ("memory: mtk-smi: fix device leaks on common probe")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/memory/mtk-smi.c | 10 ++++------
+ 1 file changed, 4 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
+index 6523cb5105182..572c7fbdcfd3a 100644
+--- a/drivers/memory/mtk-smi.c
++++ b/drivers/memory/mtk-smi.c
+@@ -566,14 +566,13 @@ static int mtk_smi_larb_probe(struct platform_device *pdev)
+ return ret;
+ }
+
+-static int mtk_smi_larb_remove(struct platform_device *pdev)
++static void mtk_smi_larb_remove(struct platform_device *pdev)
+ {
+ struct mtk_smi_larb *larb = platform_get_drvdata(pdev);
+
+ device_link_remove(&pdev->dev, larb->smi_common_dev);
+ pm_runtime_disable(&pdev->dev);
+ component_del(&pdev->dev, &mtk_smi_larb_component_ops);
+- return 0;
+ }
+
+ static int __maybe_unused mtk_smi_larb_resume(struct device *dev)
+@@ -616,7 +615,7 @@ static const struct dev_pm_ops smi_larb_pm_ops = {
+
+ static struct platform_driver mtk_smi_larb_driver = {
+ .probe = mtk_smi_larb_probe,
+- .remove = mtk_smi_larb_remove,
++ .remove_new = mtk_smi_larb_remove,
+ .driver = {
+ .name = "mtk-smi-larb",
+ .of_match_table = mtk_smi_larb_of_ids,
+@@ -795,14 +794,13 @@ static int mtk_smi_common_probe(struct platform_device *pdev)
+ return 0;
+ }
+
+-static int mtk_smi_common_remove(struct platform_device *pdev)
++static void mtk_smi_common_remove(struct platform_device *pdev)
+ {
+ struct mtk_smi *common = dev_get_drvdata(&pdev->dev);
+
+ if (common->plat->type == MTK_SMI_GEN2_SUB_COMM)
+ device_link_remove(&pdev->dev, common->smi_common_dev);
+ pm_runtime_disable(&pdev->dev);
+- return 0;
+ }
+
+ static int __maybe_unused mtk_smi_common_resume(struct device *dev)
+@@ -842,7 +840,7 @@ static const struct dev_pm_ops smi_common_pm_ops = {
+
+ static struct platform_driver mtk_smi_common_driver = {
+ .probe = mtk_smi_common_probe,
+- .remove = mtk_smi_common_remove,
++ .remove_new = mtk_smi_common_remove,
+ .driver = {
+ .name = "mtk-smi-common",
+ .of_match_table = mtk_smi_common_of_ids,
+--
+2.51.0
+
--- /dev/null
+From e5edfaaca5bf031084553bc625cf6b6951b234f6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 Nov 2025 17:46:23 +0100
+Subject: memory: mtk-smi: fix device leak on larb probe
+
+From: Johan Hovold <johan@kernel.org>
+
+[ Upstream commit 9dae65913b32d05dbc8ff4b8a6bf04a0e49a8eb6 ]
+
+Make sure to drop the reference taken when looking up the SMI device
+during larb probe on late probe failure (e.g. probe deferral) and on
+driver unbind.
+
+Fixes: cc8bbe1a8312 ("memory: mediatek: Add SMI driver")
+Fixes: 038ae37c510f ("memory: mtk-smi: add missing put_device() call in mtk_smi_device_link_common")
+Cc: stable@vger.kernel.org # 4.6: 038ae37c510f
+Cc: stable@vger.kernel.org # 4.6
+Cc: Yong Wu <yong.wu@mediatek.com>
+Cc: Miaoqian Lin <linmq006@gmail.com>
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Link: https://patch.msgid.link/20251121164624.13685-3-johan@kernel.org
+Signed-off-by: Krzysztof Kozlowski <krzk@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/memory/mtk-smi.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
+index 668afd12e4c51..5ca197e15eb28 100644
+--- a/drivers/memory/mtk-smi.c
++++ b/drivers/memory/mtk-smi.c
+@@ -574,6 +574,7 @@ static void mtk_smi_larb_remove(struct platform_device *pdev)
+ device_link_remove(&pdev->dev, larb->smi_common_dev);
+ pm_runtime_disable(&pdev->dev);
+ component_del(&pdev->dev, &mtk_smi_larb_component_ops);
++ put_device(larb->smi_common_dev);
+ }
+
+ static int __maybe_unused mtk_smi_larb_resume(struct device *dev)
+--
+2.51.0
+
--- /dev/null
+From 75b09863e614e321c74ad0abf19b7f8f5e56d933 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 Nov 2025 17:46:22 +0100
+Subject: memory: mtk-smi: fix device leaks on common probe
+
+From: Johan Hovold <johan@kernel.org>
+
+[ Upstream commit 6cfa038bddd710f544076ea2ef7792fc82fbedd6 ]
+
+Make sure to drop the reference taken when looking up the SMI device
+during common probe on late probe failure (e.g. probe deferral) and on
+driver unbind.
+
+Fixes: 47404757702e ("memory: mtk-smi: Add device link for smi-sub-common")
+Fixes: 038ae37c510f ("memory: mtk-smi: add missing put_device() call in mtk_smi_device_link_common")
+Cc: stable@vger.kernel.org # 5.16: 038ae37c510f
+Cc: stable@vger.kernel.org # 5.16
+Cc: Yong Wu <yong.wu@mediatek.com>
+Cc: Miaoqian Lin <linmq006@gmail.com>
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Link: https://patch.msgid.link/20251121164624.13685-2-johan@kernel.org
+Signed-off-by: Krzysztof Kozlowski <krzk@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/memory/mtk-smi.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
+index 572c7fbdcfd3a..668afd12e4c51 100644
+--- a/drivers/memory/mtk-smi.c
++++ b/drivers/memory/mtk-smi.c
+@@ -563,6 +563,7 @@ static int mtk_smi_larb_probe(struct platform_device *pdev)
+ err_pm_disable:
+ pm_runtime_disable(dev);
+ device_link_remove(dev, larb->smi_common_dev);
++ put_device(larb->smi_common_dev);
+ return ret;
+ }
+
+@@ -801,6 +802,7 @@ static void mtk_smi_common_remove(struct platform_device *pdev)
+ if (common->plat->type == MTK_SMI_GEN2_SUB_COMM)
+ device_link_remove(&pdev->dev, common->smi_common_dev);
+ pm_runtime_disable(&pdev->dev);
++ put_device(common->smi_common_dev);
+ }
+
+ static int __maybe_unused mtk_smi_common_resume(struct device *dev)
+--
+2.51.0
+
--- /dev/null
+From fb483cb834ca0ef6577dc682f37592ed32e68477 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 23 Nov 2023 17:56:38 +0100
+Subject: mfd: omap-usb-host: Convert to platform remove callback returning
+ void
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+
+[ Upstream commit 418d1e74f8597e0b2d5d0d6e1be8f1f47e68f0a4 ]
+
+The .remove() callback for a platform driver returns an int which makes
+many driver authors wrongly assume it's possible to do error handling by
+returning an error code. However the value returned is ignored (apart
+from emitting a warning) and this typically results in resource leaks.
+
+To improve here there is a quest to make the remove callback return
+void. In the first step of this quest all drivers are converted to
+.remove_new(), which already returns void. Eventually after all drivers
+are converted, .remove_new() will be renamed to .remove().
+
+Trivially convert this driver from always returning zero in the remove
+callback to the void returning variant.
+
+Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+Link: https://lore.kernel.org/r/20231123165627.492259-11-u.kleine-koenig@pengutronix.de
+Signed-off-by: Lee Jones <lee@kernel.org>
+Stable-dep-of: 24804ba508a3 ("mfd: omap-usb-host: Fix OF populate on driver rebind")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mfd/omap-usb-host.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
+index 78f1bb55dbc0f..ebc62033db169 100644
+--- a/drivers/mfd/omap-usb-host.c
++++ b/drivers/mfd/omap-usb-host.c
+@@ -816,13 +816,12 @@ static int usbhs_omap_remove_child(struct device *dev, void *data)
+ *
+ * Reverses the effect of usbhs_omap_probe().
+ */
+-static int usbhs_omap_remove(struct platform_device *pdev)
++static void usbhs_omap_remove(struct platform_device *pdev)
+ {
+ pm_runtime_disable(&pdev->dev);
+
+ /* remove children */
+ device_for_each_child(&pdev->dev, NULL, usbhs_omap_remove_child);
+- return 0;
+ }
+
+ static const struct dev_pm_ops usbhsomap_dev_pm_ops = {
+@@ -845,7 +844,7 @@ static struct platform_driver usbhs_omap_driver = {
+ .of_match_table = usbhs_omap_dt_ids,
+ },
+ .probe = usbhs_omap_probe,
+- .remove = usbhs_omap_remove,
++ .remove_new = usbhs_omap_remove,
+ };
+
+ MODULE_AUTHOR("Keshava Munegowda <keshava_mgowda@ti.com>");
+--
+2.51.0
+
--- /dev/null
+From 62f7a6da0dd1b279935f1c300b42d4888182a0fc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 19 Dec 2025 12:07:14 +0100
+Subject: mfd: omap-usb-host: Fix OF populate on driver rebind
+
+From: Johan Hovold <johan@kernel.org>
+
+[ Upstream commit 24804ba508a3e240501c521685a1c4eb9f574f8e ]
+
+Since commit c6e126de43e7 ("of: Keep track of populated platform
+devices") child devices will not be created by of_platform_populate()
+if the devices had previously been deregistered individually so that the
+OF_POPULATED flag is still set in the corresponding OF nodes.
+
+Switch to using of_platform_depopulate() instead of open coding so that
+the child devices are created if the driver is rebound.
+
+Fixes: c6e126de43e7 ("of: Keep track of populated platform devices")
+Cc: stable@vger.kernel.org # 3.16
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Reviewed-by: Andreas Kemnade <andreas@kemnade.info>
+Link: https://patch.msgid.link/20251219110714.23919-1-johan@kernel.org
+Signed-off-by: Lee Jones <lee@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mfd/omap-usb-host.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
+index ebc62033db169..e3aae10295a15 100644
+--- a/drivers/mfd/omap-usb-host.c
++++ b/drivers/mfd/omap-usb-host.c
+@@ -820,8 +820,10 @@ static void usbhs_omap_remove(struct platform_device *pdev)
+ {
+ pm_runtime_disable(&pdev->dev);
+
+- /* remove children */
+- device_for_each_child(&pdev->dev, NULL, usbhs_omap_remove_child);
++ if (pdev->dev.of_node)
++ of_platform_depopulate(&pdev->dev);
++ else
++ device_for_each_child(&pdev->dev, NULL, usbhs_omap_remove_child);
+ }
+
+ static const struct dev_pm_ops usbhsomap_dev_pm_ops = {
+--
+2.51.0
+
--- /dev/null
+From 27337f55dcf5f331f816e6494167628fed392c8f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 23 Nov 2023 17:56:41 +0100
+Subject: mfd: qcom-pm8xxx: Convert to platform remove callback returning void
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+
+[ Upstream commit 19ea1d3953017518d85db35b69b5aea9bc64d630 ]
+
+The .remove() callback for a platform driver returns an int which makes
+many driver authors wrongly assume it's possible to do error handling by
+returning an error code. However the value returned is ignored (apart
+from emitting a warning) and this typically results in resource leaks.
+
+To improve here there is a quest to make the remove callback return
+void. In the first step of this quest all drivers are converted to
+.remove_new(), which already returns void. Eventually after all drivers
+are converted, .remove_new() will be renamed to .remove().
+
+Trivially convert this driver from always returning zero in the remove
+callback to the void returning variant.
+
+Reviewed-by: Konrad Dybcio <konrad.dybcio@linaro.org>
+Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+Link: https://lore.kernel.org/r/20231123165627.492259-14-u.kleine-koenig@pengutronix.de
+Signed-off-by: Lee Jones <lee@kernel.org>
+Stable-dep-of: 27a8acea47a9 ("mfd: qcom-pm8xxx: Fix OF populate on driver rebind")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mfd/qcom-pm8xxx.c | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/mfd/qcom-pm8xxx.c b/drivers/mfd/qcom-pm8xxx.c
+index 07c531bd1236e..8b6285f687da5 100644
+--- a/drivers/mfd/qcom-pm8xxx.c
++++ b/drivers/mfd/qcom-pm8xxx.c
+@@ -585,19 +585,17 @@ static int pm8xxx_remove_child(struct device *dev, void *unused)
+ return 0;
+ }
+
+-static int pm8xxx_remove(struct platform_device *pdev)
++static void pm8xxx_remove(struct platform_device *pdev)
+ {
+ struct pm_irq_chip *chip = platform_get_drvdata(pdev);
+
+ device_for_each_child(&pdev->dev, NULL, pm8xxx_remove_child);
+ irq_domain_remove(chip->irqdomain);
+-
+- return 0;
+ }
+
+ static struct platform_driver pm8xxx_driver = {
+ .probe = pm8xxx_probe,
+- .remove = pm8xxx_remove,
++ .remove_new = pm8xxx_remove,
+ .driver = {
+ .name = "pm8xxx-core",
+ .of_match_table = pm8xxx_id_table,
+--
+2.51.0
+
--- /dev/null
+From fc5f76dd3d0e8dcdc4648c9b431fb7914378edf8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 19 Dec 2025 12:09:47 +0100
+Subject: mfd: qcom-pm8xxx: Fix OF populate on driver rebind
+
+From: Johan Hovold <johan@kernel.org>
+
+[ Upstream commit 27a8acea47a93fea6ad0e2df4c20a9b51490e4d9 ]
+
+Since commit c6e126de43e7 ("of: Keep track of populated platform
+devices") child devices will not be created by of_platform_populate()
+if the devices had previously been deregistered individually so that the
+OF_POPULATED flag is still set in the corresponding OF nodes.
+
+Switch to using of_platform_depopulate() instead of open coding so that
+the child devices are created if the driver is rebound.
+
+Fixes: c6e126de43e7 ("of: Keep track of populated platform devices")
+Cc: stable@vger.kernel.org # 3.16
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@oss.qualcomm.com>
+Reviewed-by: Konrad Dybcio <konrad.dybcio@oss.qualcomm.com>
+Link: https://patch.msgid.link/20251219110947.24101-1-johan@kernel.org
+Signed-off-by: Lee Jones <lee@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mfd/qcom-pm8xxx.c | 8 +-------
+ 1 file changed, 1 insertion(+), 7 deletions(-)
+
+diff --git a/drivers/mfd/qcom-pm8xxx.c b/drivers/mfd/qcom-pm8xxx.c
+index 8b6285f687da5..0e490591177a2 100644
+--- a/drivers/mfd/qcom-pm8xxx.c
++++ b/drivers/mfd/qcom-pm8xxx.c
+@@ -579,17 +579,11 @@ static int pm8xxx_probe(struct platform_device *pdev)
+ return rc;
+ }
+
+-static int pm8xxx_remove_child(struct device *dev, void *unused)
+-{
+- platform_device_unregister(to_platform_device(dev));
+- return 0;
+-}
+-
+ static void pm8xxx_remove(struct platform_device *pdev)
+ {
+ struct pm_irq_chip *chip = platform_get_drvdata(pdev);
+
+- device_for_each_child(&pdev->dev, NULL, pm8xxx_remove_child);
++ of_platform_depopulate(&pdev->dev);
+ irq_domain_remove(chip->irqdomain);
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 775a7151c405ce56f9587406fb971eaa8767f406 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 12 Feb 2026 20:55:09 -0800
+Subject: net: arcnet: com20020-pci: fix support for 2.5Mbit cards
+
+From: Ethan Nelson-Moore <enelsonmoore@gmail.com>
+
+[ Upstream commit c7d9be66b71af490446127c6ffcb66d6bb71b8b9 ]
+
+Commit 8c14f9c70327 ("ARCNET: add com20020 PCI IDs with metadata")
+converted the com20020-pci driver to use a card info structure instead
+of a single flag mask in driver_data. However, it failed to take into
+account that in the original code, driver_data of 0 indicates a card
+with no special flags, not a card that should not have any card info
+structure. This introduced a null pointer dereference when cards with
+no flags were probed.
+
+Commit bd6f1fd5d33d ("net: arcnet: com20020: Fix null-ptr-deref in
+com20020pci_probe()") then papered over this issue by rejecting cards
+with no driver_data instead of resolving the problem at its source.
+
+Fix the original issue by introducing a new card info structure for
+2.5Mbit cards that does not set any flags and using it if no
+driver_data is present.
+
+Fixes: 8c14f9c70327 ("ARCNET: add com20020 PCI IDs with metadata")
+Fixes: bd6f1fd5d33d ("net: arcnet: com20020: Fix null-ptr-deref in com20020pci_probe()")
+Cc: stable@vger.kernel.org
+Reviewed-by: Simon Horman <horms@kernel.org>
+Signed-off-by: Ethan Nelson-Moore <enelsonmoore@gmail.com>
+Link: https://patch.msgid.link/20260213045510.32368-1-enelsonmoore@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/arcnet/com20020-pci.c | 16 +++++++++++++++-
+ 1 file changed, 15 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
+index e7db6a4e4dc9d..e9ee32b091a41 100644
+--- a/drivers/net/arcnet/com20020-pci.c
++++ b/drivers/net/arcnet/com20020-pci.c
+@@ -114,6 +114,8 @@ static const struct attribute_group com20020_state_group = {
+ .attrs = com20020_state_attrs,
+ };
+
++static struct com20020_pci_card_info card_info_2p5mbit;
++
+ static void com20020pci_remove(struct pci_dev *pdev);
+
+ static int com20020pci_probe(struct pci_dev *pdev,
+@@ -139,7 +141,7 @@ static int com20020pci_probe(struct pci_dev *pdev,
+
+ ci = (struct com20020_pci_card_info *)id->driver_data;
+ if (!ci)
+- return -EINVAL;
++ ci = &card_info_2p5mbit;
+
+ priv->ci = ci;
+ mm = &ci->misc_map;
+@@ -346,6 +348,18 @@ static struct com20020_pci_card_info card_info_5mbit = {
+ .flags = ARC_IS_5MBIT,
+ };
+
++static struct com20020_pci_card_info card_info_2p5mbit = {
++ .name = "ARC-PCI",
++ .devcount = 1,
++ .chan_map_tbl = {
++ {
++ .bar = 2,
++ .offset = 0x00,
++ .size = 0x08,
++ },
++ },
++};
++
+ static struct com20020_pci_card_info card_info_sohard = {
+ .name = "SOHARD SH ARC-PCI",
+ .devcount = 1,
+--
+2.51.0
+
--- /dev/null
+From fbe630de450c5ab2d72ebfbc74e3a7343404e941 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 6 Nov 2021 16:56:05 +0530
+Subject: PCI: Update BAR # and window messages
+
+From: Puranjay Mohan <puranjay12@gmail.com>
+
+[ Upstream commit 65f8e0beac5a495b8f3b387add1f9f4470678cb5 ]
+
+The PCI log messages print the register offsets at some places and BAR
+numbers at other places. There is no uniformity in this logging mechanism.
+It would be better to print names than register offsets.
+
+Add a helper function that aids in printing more meaningful information
+about the BAR numbers like "VF BAR", "ROM", "bridge window", etc. This
+function can be called while printing PCI log messages.
+
+[bhelgaas: fold in Lukas' static array suggestion from
+https: //lore.kernel.org/all/20211106115831.GA7452@wunner.de/]
+Link: https://lore.kernel.org/r/20211106112606.192563-2-puranjay12@gmail.com
+Signed-off-by: Puranjay Mohan <puranjay12@gmail.com>
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Stable-dep-of: 11721c45a826 ("PCI: Use resource_set_range() that correctly sets ->end")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pci/pci.c | 60 +++++++++++++++++++++++++++++++++++++++++++++++
+ drivers/pci/pci.h | 2 ++
+ 2 files changed, 62 insertions(+)
+
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index d7d7913eb0ee9..e3612e0e35639 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -850,6 +850,66 @@ struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res)
+ }
+ EXPORT_SYMBOL(pci_find_resource);
+
++/**
++ * pci_resource_name - Return the name of the PCI resource
++ * @dev: PCI device to query
++ * @i: index of the resource
++ *
++ * Return the standard PCI resource (BAR) name according to their index.
++ */
++const char *pci_resource_name(struct pci_dev *dev, unsigned int i)
++{
++ static const char * const bar_name[] = {
++ "BAR 0",
++ "BAR 1",
++ "BAR 2",
++ "BAR 3",
++ "BAR 4",
++ "BAR 5",
++ "ROM",
++#ifdef CONFIG_PCI_IOV
++ "VF BAR 0",
++ "VF BAR 1",
++ "VF BAR 2",
++ "VF BAR 3",
++ "VF BAR 4",
++ "VF BAR 5",
++#endif
++ "bridge window", /* "io" included in %pR */
++ "bridge window", /* "mem" included in %pR */
++ "bridge window", /* "mem pref" included in %pR */
++ };
++ static const char * const cardbus_name[] = {
++ "BAR 1",
++ "unknown",
++ "unknown",
++ "unknown",
++ "unknown",
++ "unknown",
++#ifdef CONFIG_PCI_IOV
++ "unknown",
++ "unknown",
++ "unknown",
++ "unknown",
++ "unknown",
++ "unknown",
++#endif
++ "CardBus bridge window 0", /* I/O */
++ "CardBus bridge window 1", /* I/O */
++ "CardBus bridge window 0", /* mem */
++ "CardBus bridge window 1", /* mem */
++ };
++
++ if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS &&
++ i < ARRAY_SIZE(cardbus_name))
++ return cardbus_name[i];
++
++ if (i < ARRAY_SIZE(bar_name))
++ return bar_name[i];
++
++ return "unknown";
++}
++
+ /**
+ * pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos
+ * @dev: the PCI device to operate on
+diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
+index 485f917641e11..dae7b98536f7a 100644
+--- a/drivers/pci/pci.h
++++ b/drivers/pci/pci.h
+@@ -281,6 +281,8 @@ void __pci_bus_assign_resources(const struct pci_bus *bus,
+ struct list_head *fail_head);
+ bool pci_bus_clip_resource(struct pci_dev *dev, int idx);
+
++const char *pci_resource_name(struct pci_dev *dev, unsigned int i);
++
+ void pci_reassigndev_resource_alignment(struct pci_dev *dev);
+ void pci_disable_bridge_window(struct pci_dev *dev);
+ struct pci_bus *pci_bus_get(struct pci_bus *bus);
+--
+2.51.0
+
--- /dev/null
+From 6a00db7448647f0fdb1ff8e7eec01d33fbce50a2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 6 Nov 2021 16:56:06 +0530
+Subject: PCI: Use resource names in PCI log messages
+
+From: Puranjay Mohan <puranjay12@gmail.com>
+
+[ Upstream commit dc4e6f21c3f844ebc1c52b6920b8ec5dfc73f4e8 ]
+
+Use the pci_resource_name() to get the name of the resource and use it
+while printing log messages.
+
+[bhelgaas: rename to match struct resource * names, also use names in other
+BAR messages]
+Link: https://lore.kernel.org/r/20211106112606.192563-3-puranjay12@gmail.com
+Signed-off-by: Puranjay Mohan <puranjay12@gmail.com>
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Stable-dep-of: 11721c45a826 ("PCI: Use resource_set_range() that correctly sets ->end")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pci/iov.c | 7 ++--
+ drivers/pci/pci.c | 25 +++++++-------
+ drivers/pci/probe.c | 26 +++++++--------
+ drivers/pci/quirks.c | 15 ++++++---
+ drivers/pci/setup-bus.c | 30 +++++++++++------
+ drivers/pci/setup-res.c | 72 +++++++++++++++++++++++------------------
+ 6 files changed, 103 insertions(+), 72 deletions(-)
+
+diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
+index b8bce45a59986..d595a345a7d47 100644
+--- a/drivers/pci/iov.c
++++ b/drivers/pci/iov.c
+@@ -749,6 +749,7 @@ static int sriov_init(struct pci_dev *dev, int pos)
+ u16 ctrl, total;
+ struct pci_sriov *iov;
+ struct resource *res;
++ const char *res_name;
+ struct pci_dev *pdev;
+
+ pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &ctrl);
+@@ -789,6 +790,8 @@ static int sriov_init(struct pci_dev *dev, int pos)
+ nres = 0;
+ for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
+ res = &dev->resource[i + PCI_IOV_RESOURCES];
++ res_name = pci_resource_name(dev, i + PCI_IOV_RESOURCES);
++
+ /*
+ * If it is already FIXED, don't change it, something
+ * (perhaps EA or header fixups) wants it this way.
+@@ -806,8 +809,8 @@ static int sriov_init(struct pci_dev *dev, int pos)
+ }
+ iov->barsz[i] = resource_size(res);
+ res->end = res->start + resource_size(res) * total - 1;
+- pci_info(dev, "VF(n) BAR%d space: %pR (contains BAR%d for %d VFs)\n",
+- i, res, i, total);
++ pci_info(dev, "%s %pR: contains BAR %d for %d VFs\n",
++ res_name, res, i, total);
+ i += bar64;
+ nres++;
+ }
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index e3612e0e35639..d015df77ddff5 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -3419,6 +3419,7 @@ static struct resource *pci_ea_get_resource(struct pci_dev *dev, u8 bei,
+ static int pci_ea_read(struct pci_dev *dev, int offset)
+ {
+ struct resource *res;
++ const char *res_name;
+ int ent_size, ent_offset = offset;
+ resource_size_t start, end;
+ unsigned long flags;
+@@ -3448,6 +3449,7 @@ static int pci_ea_read(struct pci_dev *dev, int offset)
+ goto out;
+
+ res = pci_ea_get_resource(dev, bei, prop);
++ res_name = pci_resource_name(dev, bei);
+ if (!res) {
+ pci_err(dev, "Unsupported EA entry BEI: %u\n", bei);
+ goto out;
+@@ -3521,16 +3523,16 @@ static int pci_ea_read(struct pci_dev *dev, int offset)
+ res->flags = flags;
+
+ if (bei <= PCI_EA_BEI_BAR5)
+- pci_info(dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
+- bei, res, prop);
++ pci_info(dev, "%s %pR: from Enhanced Allocation, properties %#02x\n",
++ res_name, res, prop);
+ else if (bei == PCI_EA_BEI_ROM)
+- pci_info(dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n",
+- res, prop);
++ pci_info(dev, "%s %pR: from Enhanced Allocation, properties %#02x\n",
++ res_name, res, prop);
+ else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5)
+- pci_info(dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
+- bei - PCI_EA_BEI_VF_BAR0, res, prop);
++ pci_info(dev, "%s %pR: from Enhanced Allocation, properties %#02x\n",
++ res_name, res, prop);
+ else
+- pci_info(dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n",
++ pci_info(dev, "BEI %d %pR: from Enhanced Allocation, properties %#02x\n",
+ bei, res, prop);
+
+ out:
+@@ -6840,14 +6842,15 @@ static void pci_request_resource_alignment(struct pci_dev *dev, int bar,
+ resource_size_t align, bool resize)
+ {
+ struct resource *r = &dev->resource[bar];
++ const char *r_name = pci_resource_name(dev, bar);
+ resource_size_t size;
+
+ if (!(r->flags & IORESOURCE_MEM))
+ return;
+
+ if (r->flags & IORESOURCE_PCI_FIXED) {
+- pci_info(dev, "BAR%d %pR: ignoring requested alignment %#llx\n",
+- bar, r, (unsigned long long)align);
++ pci_info(dev, "%s %pR: ignoring requested alignment %#llx\n",
++ r_name, r, (unsigned long long)align);
+ return;
+ }
+
+@@ -6883,8 +6886,8 @@ static void pci_request_resource_alignment(struct pci_dev *dev, int bar,
+ * devices and we use the second.
+ */
+
+- pci_info(dev, "BAR%d %pR: requesting alignment to %#llx\n",
+- bar, r, (unsigned long long)align);
++ pci_info(dev, "%s %pR: requesting alignment to %#llx\n",
++ r_name, r, (unsigned long long)align);
+
+ if (resize) {
+ r->start = 0;
+diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
+index cc56bf47c4a3f..92f1902afa3b7 100644
+--- a/drivers/pci/probe.c
++++ b/drivers/pci/probe.c
+@@ -180,6 +180,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
+ u64 l64, sz64, mask64;
+ u16 orig_cmd;
+ struct pci_bus_region region, inverted_region;
++ const char *res_name = pci_resource_name(dev, res - dev->resource);
+
+ mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
+
+@@ -254,8 +255,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
+
+ sz64 = pci_size(l64, sz64, mask64);
+ if (!sz64) {
+- pci_info(dev, FW_BUG "reg 0x%x: invalid BAR (can't size)\n",
+- pos);
++ pci_info(dev, FW_BUG "%s: invalid; can't size\n", res_name);
+ goto fail;
+ }
+
+@@ -265,8 +265,8 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
+ res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
+ res->start = 0;
+ res->end = 0;
+- pci_err(dev, "reg 0x%x: can't handle BAR larger than 4GB (size %#010llx)\n",
+- pos, (unsigned long long)sz64);
++ pci_err(dev, "%s: can't handle BAR larger than 4GB (size %#010llx)\n",
++ res_name, (unsigned long long)sz64);
+ goto out;
+ }
+
+@@ -275,8 +275,8 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
+ res->flags |= IORESOURCE_UNSET;
+ res->start = 0;
+ res->end = sz64 - 1;
+- pci_info(dev, "reg 0x%x: can't handle BAR above 4GB (bus address %#010llx)\n",
+- pos, (unsigned long long)l64);
++ pci_info(dev, "%s: can't handle BAR above 4GB (bus address %#010llx)\n",
++ res_name, (unsigned long long)l64);
+ goto out;
+ }
+ }
+@@ -302,8 +302,8 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
+ res->flags |= IORESOURCE_UNSET;
+ res->start = 0;
+ res->end = region.end - region.start;
+- pci_info(dev, "reg 0x%x: initial BAR value %#010llx invalid\n",
+- pos, (unsigned long long)region.start);
++ pci_info(dev, "%s: initial BAR value %#010llx invalid\n",
++ res_name, (unsigned long long)region.start);
+ }
+
+ goto out;
+@@ -313,7 +313,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
+ res->flags = 0;
+ out:
+ if (res->flags)
+- pci_info(dev, "reg 0x%x: %pR\n", pos, res);
++ pci_info(dev, "%s %pR\n", res_name, res);
+
+ return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
+ }
+@@ -1968,14 +1968,14 @@ int pci_setup_device(struct pci_dev *dev)
+ res = &dev->resource[0];
+ res->flags = LEGACY_IO_RESOURCE;
+ pcibios_bus_to_resource(dev->bus, res, ®ion);
+- pci_info(dev, "legacy IDE quirk: reg 0x10: %pR\n",
++ pci_info(dev, "BAR 0 %pR: legacy IDE quirk\n",
+ res);
+ region.start = 0x3F6;
+ region.end = 0x3F6;
+ res = &dev->resource[1];
+ res->flags = LEGACY_IO_RESOURCE;
+ pcibios_bus_to_resource(dev->bus, res, ®ion);
+- pci_info(dev, "legacy IDE quirk: reg 0x14: %pR\n",
++ pci_info(dev, "BAR 1 %pR: legacy IDE quirk\n",
+ res);
+ }
+ if ((progif & 4) == 0) {
+@@ -1984,14 +1984,14 @@ int pci_setup_device(struct pci_dev *dev)
+ res = &dev->resource[2];
+ res->flags = LEGACY_IO_RESOURCE;
+ pcibios_bus_to_resource(dev->bus, res, ®ion);
+- pci_info(dev, "legacy IDE quirk: reg 0x18: %pR\n",
++ pci_info(dev, "BAR 2 %pR: legacy IDE quirk\n",
+ res);
+ region.start = 0x376;
+ region.end = 0x376;
+ res = &dev->resource[3];
+ res->flags = LEGACY_IO_RESOURCE;
+ pcibios_bus_to_resource(dev->bus, res, ®ion);
+- pci_info(dev, "legacy IDE quirk: reg 0x1c: %pR\n",
++ pci_info(dev, "BAR 3 %pR: legacy IDE quirk\n",
+ res);
+ }
+ }
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index cab4cdbb31387..5df3a6ea66018 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -583,13 +583,14 @@ static void quirk_extend_bar_to_page(struct pci_dev *dev)
+
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
+ struct resource *r = &dev->resource[i];
++ const char *r_name = pci_resource_name(dev, i);
+
+ if (r->flags & IORESOURCE_MEM && resource_size(r) < PAGE_SIZE) {
+ r->end = PAGE_SIZE - 1;
+ r->start = 0;
+ r->flags |= IORESOURCE_UNSET;
+- pci_info(dev, "expanded BAR %d to page size: %pR\n",
+- i, r);
++ pci_info(dev, "%s %pR: expanded to page size\n",
++ r_name, r);
+ }
+ }
+ }
+@@ -618,6 +619,7 @@ static void quirk_io(struct pci_dev *dev, int pos, unsigned int size,
+ u32 region;
+ struct pci_bus_region bus_region;
+ struct resource *res = dev->resource + pos;
++ const char *res_name = pci_resource_name(dev, pos);
+
+ pci_read_config_dword(dev, PCI_BASE_ADDRESS_0 + (pos << 2), ®ion);
+
+@@ -635,8 +637,7 @@ static void quirk_io(struct pci_dev *dev, int pos, unsigned int size,
+ bus_region.end = region + size - 1;
+ pcibios_bus_to_resource(dev->bus, res, &bus_region);
+
+- pci_info(dev, FW_BUG "%s quirk: reg 0x%x: %pR\n",
+- name, PCI_BASE_ADDRESS_0 + (pos << 2), res);
++ pci_info(dev, FW_BUG "%s %pR: %s quirk\n", res_name, res, name);
+ }
+
+ /*
+@@ -683,6 +684,12 @@ static void quirk_io_region(struct pci_dev *dev, int port,
+ bus_region.end = region + size - 1;
+ pcibios_bus_to_resource(dev->bus, res, &bus_region);
+
++ /*
++ * "res" is typically a bridge window resource that's not being
++ * used for a bridge window, so it's just a place to stash this
++ * non-standard resource. Printing "nr" or pci_resource_name() of
++ * it doesn't really make sense.
++ */
+ if (!pci_claim_resource(dev, nr))
+ pci_info(dev, "quirk: %pR claimed by %s\n", res, name);
+ }
+diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
+index 3f40be417856e..d07c1d9ed0620 100644
+--- a/drivers/pci/setup-bus.c
++++ b/drivers/pci/setup-bus.c
+@@ -213,6 +213,7 @@ static void reassign_resources_sorted(struct list_head *realloc_head,
+ struct list_head *head)
+ {
+ struct resource *res;
++ const char *res_name;
+ struct pci_dev_resource *add_res, *tmp;
+ struct pci_dev_resource *dev_res;
+ resource_size_t add_size, align;
+@@ -222,6 +223,7 @@ static void reassign_resources_sorted(struct list_head *realloc_head,
+ bool found_match = false;
+
+ res = add_res->res;
++
+ /* Skip resource that has been reset */
+ if (!res->flags)
+ goto out;
+@@ -237,6 +239,7 @@ static void reassign_resources_sorted(struct list_head *realloc_head,
+ continue;
+
+ idx = res - &add_res->dev->resource[0];
++ res_name = pci_resource_name(add_res->dev, idx);
+ add_size = add_res->add_size;
+ align = add_res->min_align;
+ if (!resource_size(res)) {
+@@ -249,9 +252,9 @@ static void reassign_resources_sorted(struct list_head *realloc_head,
+ (IORESOURCE_STARTALIGN|IORESOURCE_SIZEALIGN);
+ if (pci_reassign_resource(add_res->dev, idx,
+ add_size, align))
+- pci_info(add_res->dev, "failed to add %llx res[%d]=%pR\n",
+- (unsigned long long) add_size, idx,
+- res);
++ pci_info(add_res->dev, "%s %pR: failed to add %llx\n",
++ res_name, res,
++ (unsigned long long) add_size);
+ }
+ out:
+ list_del(&add_res->list);
+@@ -571,6 +574,7 @@ EXPORT_SYMBOL(pci_setup_cardbus);
+ static void pci_setup_bridge_io(struct pci_dev *bridge)
+ {
+ struct resource *res;
++ const char *res_name;
+ struct pci_bus_region region;
+ unsigned long io_mask;
+ u8 io_base_lo, io_limit_lo;
+@@ -583,6 +587,7 @@ static void pci_setup_bridge_io(struct pci_dev *bridge)
+
+ /* Set up the top and bottom of the PCI I/O segment for this bus */
+ res = &bridge->resource[PCI_BRIDGE_IO_WINDOW];
++ res_name = pci_resource_name(bridge, PCI_BRIDGE_IO_WINDOW);
+ pcibios_resource_to_bus(bridge->bus, ®ion, res);
+ if (res->flags & IORESOURCE_IO) {
+ pci_read_config_word(bridge, PCI_IO_BASE, &l);
+@@ -591,7 +596,7 @@ static void pci_setup_bridge_io(struct pci_dev *bridge)
+ l = ((u16) io_limit_lo << 8) | io_base_lo;
+ /* Set up upper 16 bits of I/O base/limit */
+ io_upper16 = (region.end & 0xffff0000) | (region.start >> 16);
+- pci_info(bridge, " bridge window %pR\n", res);
++ pci_info(bridge, " %s %pR\n", res_name, res);
+ } else {
+ /* Clear upper 16 bits of I/O base/limit */
+ io_upper16 = 0;
+@@ -608,16 +613,18 @@ static void pci_setup_bridge_io(struct pci_dev *bridge)
+ static void pci_setup_bridge_mmio(struct pci_dev *bridge)
+ {
+ struct resource *res;
++ const char *res_name;
+ struct pci_bus_region region;
+ u32 l;
+
+ /* Set up the top and bottom of the PCI Memory segment for this bus */
+ res = &bridge->resource[PCI_BRIDGE_MEM_WINDOW];
++ res_name = pci_resource_name(bridge, PCI_BRIDGE_MEM_WINDOW);
+ pcibios_resource_to_bus(bridge->bus, ®ion, res);
+ if (res->flags & IORESOURCE_MEM) {
+ l = (region.start >> 16) & 0xfff0;
+ l |= region.end & 0xfff00000;
+- pci_info(bridge, " bridge window %pR\n", res);
++ pci_info(bridge, " %s %pR\n", res_name, res);
+ } else {
+ l = 0x0000fff0;
+ }
+@@ -627,6 +634,7 @@ static void pci_setup_bridge_mmio(struct pci_dev *bridge)
+ static void pci_setup_bridge_mmio_pref(struct pci_dev *bridge)
+ {
+ struct resource *res;
++ const char *res_name;
+ struct pci_bus_region region;
+ u32 l, bu, lu;
+
+@@ -640,6 +648,7 @@ static void pci_setup_bridge_mmio_pref(struct pci_dev *bridge)
+ /* Set up PREF base/limit */
+ bu = lu = 0;
+ res = &bridge->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
++ res_name = pci_resource_name(bridge, PCI_BRIDGE_PREF_MEM_WINDOW);
+ pcibios_resource_to_bus(bridge->bus, ®ion, res);
+ if (res->flags & IORESOURCE_PREFETCH) {
+ l = (region.start >> 16) & 0xfff0;
+@@ -648,7 +657,7 @@ static void pci_setup_bridge_mmio_pref(struct pci_dev *bridge)
+ bu = upper_32_bits(region.start);
+ lu = upper_32_bits(region.end);
+ }
+- pci_info(bridge, " bridge window %pR\n", res);
++ pci_info(bridge, " %s %pR\n", res_name, res);
+ } else {
+ l = 0x0000fff0;
+ }
+@@ -1009,6 +1018,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
+ int i;
+
+ pci_dev_for_each_resource(dev, r, i) {
++ const char *r_name = pci_resource_name(dev, i);
+ resource_size_t r_size;
+
+ if (r->parent || (r->flags & IORESOURCE_PCI_FIXED) ||
+@@ -1039,8 +1049,8 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
+ if (order < 0)
+ order = 0;
+ if (order >= ARRAY_SIZE(aligns)) {
+- pci_warn(dev, "disabling BAR %d: %pR (bad alignment %#llx)\n",
+- i, r, (unsigned long long) align);
++ pci_warn(dev, "%s %pR: disabling; bad alignment %#llx\n",
++ r_name, r, (unsigned long long) align);
+ r->flags = 0;
+ continue;
+ }
+@@ -2230,6 +2240,7 @@ int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type)
+ for (i = PCI_BRIDGE_RESOURCES; i < PCI_BRIDGE_RESOURCE_END;
+ i++) {
+ struct resource *res = &bridge->resource[i];
++ const char *res_name = pci_resource_name(bridge, i);
+
+ if ((res->flags ^ type) & PCI_RES_TYPE_MASK)
+ continue;
+@@ -2242,8 +2253,7 @@ int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type)
+ if (ret)
+ goto cleanup;
+
+- pci_info(bridge, "BAR %d: releasing %pR\n",
+- i, res);
++ pci_info(bridge, "%s %pR: releasing\n", res_name, res);
+
+ if (res->parent)
+ release_resource(res);
+diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
+index ceaa69491f5ef..c6d933ddfd464 100644
+--- a/drivers/pci/setup-res.c
++++ b/drivers/pci/setup-res.c
+@@ -30,6 +30,7 @@ static void pci_std_update_resource(struct pci_dev *dev, int resno)
+ u32 new, check, mask;
+ int reg;
+ struct resource *res = dev->resource + resno;
++ const char *res_name = pci_resource_name(dev, resno);
+
+ /* Per SR-IOV spec 3.4.1.11, VF BARs are RO zero */
+ if (dev->is_virtfn)
+@@ -104,8 +105,8 @@ static void pci_std_update_resource(struct pci_dev *dev, int resno)
+ pci_read_config_dword(dev, reg, &check);
+
+ if ((new ^ check) & mask) {
+- pci_err(dev, "BAR %d: error updating (%#010x != %#010x)\n",
+- resno, new, check);
++ pci_err(dev, "%s: error updating (%#010x != %#010x)\n",
++ res_name, new, check);
+ }
+
+ if (res->flags & IORESOURCE_MEM_64) {
+@@ -113,8 +114,8 @@ static void pci_std_update_resource(struct pci_dev *dev, int resno)
+ pci_write_config_dword(dev, reg + 4, new);
+ pci_read_config_dword(dev, reg + 4, &check);
+ if (check != new) {
+- pci_err(dev, "BAR %d: error updating (high %#010x != %#010x)\n",
+- resno, new, check);
++ pci_err(dev, "%s: error updating (high %#010x != %#010x)\n",
++ res_name, new, check);
+ }
+ }
+
+@@ -135,11 +136,12 @@ void pci_update_resource(struct pci_dev *dev, int resno)
+ int pci_claim_resource(struct pci_dev *dev, int resource)
+ {
+ struct resource *res = &dev->resource[resource];
++ const char *res_name = pci_resource_name(dev, resource);
+ struct resource *root, *conflict;
+
+ if (res->flags & IORESOURCE_UNSET) {
+- pci_info(dev, "can't claim BAR %d %pR: no address assigned\n",
+- resource, res);
++ pci_info(dev, "%s %pR: can't claim; no address assigned\n",
++ res_name, res);
+ return -EINVAL;
+ }
+
+@@ -153,16 +155,16 @@ int pci_claim_resource(struct pci_dev *dev, int resource)
+
+ root = pci_find_parent_resource(dev, res);
+ if (!root) {
+- pci_info(dev, "can't claim BAR %d %pR: no compatible bridge window\n",
+- resource, res);
++ pci_info(dev, "%s %pR: can't claim; no compatible bridge window\n",
++ res_name, res);
+ res->flags |= IORESOURCE_UNSET;
+ return -EINVAL;
+ }
+
+ conflict = request_resource_conflict(root, res);
+ if (conflict) {
+- pci_info(dev, "can't claim BAR %d %pR: address conflict with %s %pR\n",
+- resource, res, conflict->name, conflict);
++ pci_info(dev, "%s %pR: can't claim; address conflict with %s %pR\n",
++ res_name, res, conflict->name, conflict);
+ res->flags |= IORESOURCE_UNSET;
+ return -EBUSY;
+ }
+@@ -201,6 +203,7 @@ static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev,
+ {
+ struct resource *root, *conflict;
+ resource_size_t fw_addr, start, end;
++ const char *res_name = pci_resource_name(dev, resno);
+
+ fw_addr = pcibios_retrieve_fw_addr(dev, resno);
+ if (!fw_addr)
+@@ -231,12 +234,11 @@ static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev,
+ root = &iomem_resource;
+ }
+
+- pci_info(dev, "BAR %d: trying firmware assignment %pR\n",
+- resno, res);
++ pci_info(dev, "%s: trying firmware assignment %pR\n", res_name, res);
+ conflict = request_resource_conflict(root, res);
+ if (conflict) {
+- pci_info(dev, "BAR %d: %pR conflicts with %s %pR\n",
+- resno, res, conflict->name, conflict);
++ pci_info(dev, "%s %pR: conflicts with %s %pR\n", res_name, res,
++ conflict->name, conflict);
+ res->start = start;
+ res->end = end;
+ res->flags |= IORESOURCE_UNSET;
+@@ -325,6 +327,7 @@ static int _pci_assign_resource(struct pci_dev *dev, int resno,
+ int pci_assign_resource(struct pci_dev *dev, int resno)
+ {
+ struct resource *res = dev->resource + resno;
++ const char *res_name = pci_resource_name(dev, resno);
+ resource_size_t align, size;
+ int ret;
+
+@@ -334,8 +337,8 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
+ res->flags |= IORESOURCE_UNSET;
+ align = pci_resource_alignment(dev, res);
+ if (!align) {
+- pci_info(dev, "BAR %d: can't assign %pR (bogus alignment)\n",
+- resno, res);
++ pci_info(dev, "%s %pR: can't assign; bogus alignment\n",
++ res_name, res);
+ return -EINVAL;
+ }
+
+@@ -348,18 +351,18 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
+ * working, which is better than just leaving it disabled.
+ */
+ if (ret < 0) {
+- pci_info(dev, "BAR %d: no space for %pR\n", resno, res);
++ pci_info(dev, "%s %pR: can't assign; no space\n", res_name, res);
+ ret = pci_revert_fw_address(res, dev, resno, size);
+ }
+
+ if (ret < 0) {
+- pci_info(dev, "BAR %d: failed to assign %pR\n", resno, res);
++ pci_info(dev, "%s %pR: failed to assign\n", res_name, res);
+ return ret;
+ }
+
+ res->flags &= ~IORESOURCE_UNSET;
+ res->flags &= ~IORESOURCE_STARTALIGN;
+- pci_info(dev, "BAR %d: assigned %pR\n", resno, res);
++ pci_info(dev, "%s %pR: assigned\n", res_name, res);
+ if (resno < PCI_BRIDGE_RESOURCES)
+ pci_update_resource(dev, resno);
+
+@@ -367,10 +370,11 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
+ }
+ EXPORT_SYMBOL(pci_assign_resource);
+
+-int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsize,
+- resource_size_t min_align)
++int pci_reassign_resource(struct pci_dev *dev, int resno,
++ resource_size_t addsize, resource_size_t min_align)
+ {
+ struct resource *res = dev->resource + resno;
++ const char *res_name = pci_resource_name(dev, resno);
+ unsigned long flags;
+ resource_size_t new_size;
+ int ret;
+@@ -381,8 +385,8 @@ int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsiz
+ flags = res->flags;
+ res->flags |= IORESOURCE_UNSET;
+ if (!res->parent) {
+- pci_info(dev, "BAR %d: can't reassign an unassigned resource %pR\n",
+- resno, res);
++ pci_info(dev, "%s %pR: can't reassign; unassigned resource\n",
++ res_name, res);
+ return -EINVAL;
+ }
+
+@@ -391,15 +395,15 @@ int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsiz
+ ret = _pci_assign_resource(dev, resno, new_size, min_align);
+ if (ret) {
+ res->flags = flags;
+- pci_info(dev, "BAR %d: %pR (failed to expand by %#llx)\n",
+- resno, res, (unsigned long long) addsize);
++ pci_info(dev, "%s %pR: failed to expand by %#llx\n",
++ res_name, res, (unsigned long long) addsize);
+ return ret;
+ }
+
+ res->flags &= ~IORESOURCE_UNSET;
+ res->flags &= ~IORESOURCE_STARTALIGN;
+- pci_info(dev, "BAR %d: reassigned %pR (expanded by %#llx)\n",
+- resno, res, (unsigned long long) addsize);
++ pci_info(dev, "%s %pR: reassigned; expanded by %#llx\n",
++ res_name, res, (unsigned long long) addsize);
+ if (resno < PCI_BRIDGE_RESOURCES)
+ pci_update_resource(dev, resno);
+
+@@ -409,8 +413,9 @@ int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsiz
+ void pci_release_resource(struct pci_dev *dev, int resno)
+ {
+ struct resource *res = dev->resource + resno;
++ const char *res_name = pci_resource_name(dev, resno);
+
+- pci_info(dev, "BAR %d: releasing %pR\n", resno, res);
++ pci_info(dev, "%s %pR: releasing\n", res_name, res);
+
+ if (!res->parent)
+ return;
+@@ -480,6 +485,7 @@ int pci_enable_resources(struct pci_dev *dev, int mask)
+ u16 cmd, old_cmd;
+ int i;
+ struct resource *r;
++ const char *r_name;
+
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ old_cmd = cmd;
+@@ -488,6 +494,8 @@ int pci_enable_resources(struct pci_dev *dev, int mask)
+ if (!(mask & (1 << i)))
+ continue;
+
++ r_name = pci_resource_name(dev, i);
++
+ if (!(r->flags & (IORESOURCE_IO | IORESOURCE_MEM)))
+ continue;
+ if ((i == PCI_ROM_RESOURCE) &&
+@@ -495,14 +503,14 @@ int pci_enable_resources(struct pci_dev *dev, int mask)
+ continue;
+
+ if (r->flags & IORESOURCE_UNSET) {
+- pci_err(dev, "can't enable device: BAR %d %pR not assigned\n",
+- i, r);
++ pci_err(dev, "%s %pR: not assigned; can't enable device\n",
++ r_name, r);
+ return -EINVAL;
+ }
+
+ if (!r->parent) {
+- pci_err(dev, "can't enable device: BAR %d %pR not claimed\n",
+- i, r);
++ pci_err(dev, "%s %pR: not claimed; can't enable device\n",
++ r_name, r);
+ return -EINVAL;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 807df93f63133edf21b5d18fede28d8b159a771a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 8 Dec 2025 16:56:54 +0200
+Subject: PCI: Use resource_set_range() that correctly sets ->end
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+
+[ Upstream commit 11721c45a8266a9d0c9684153d20e37159465f96 ]
+
+__pci_read_base() sets resource start and end addresses when resource
+is larger than 4G but pci_bus_addr_t or resource_size_t are not capable
+of representing 64-bit PCI addresses. This creates a problematic
+resource that has non-zero flags but the start and end addresses do not
+yield to resource size of 0 but 1.
+
+Replace custom resource addresses setup with resource_set_range()
+that correctly sets end address as -1 which results in resource_size()
+returning 0.
+
+For consistency, also use resource_set_range() in the other branch that
+does size based resource setup.
+
+Fixes: 23b13bc76f35 ("PCI: Fail safely if we can't handle BARs larger than 4GB")
+Link: https://lore.kernel.org/all/20251207215359.28895-1-ansuelsmth@gmail.com/T/#m990492684913c5a158ff0e5fc90697d8ad95351b
+Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Reviewed-by: Andy Shevchenko <andriy.shevchenko@intel.com>
+Cc: stable@vger.kernel.org
+Cc: Christian Marangi <ansuelsmth@gmail.com>
+Link: https://patch.msgid.link/20251208145654.5294-1-ilpo.jarvinen@linux.intel.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pci/probe.c | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
+index 92f1902afa3b7..d90ffbb47f0e2 100644
+--- a/drivers/pci/probe.c
++++ b/drivers/pci/probe.c
+@@ -263,8 +263,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
+ if ((sizeof(pci_bus_addr_t) < 8 || sizeof(resource_size_t) < 8)
+ && sz64 > 0x100000000ULL) {
+ res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
+- res->start = 0;
+- res->end = 0;
++ resource_set_range(res, 0, 0);
+ pci_err(dev, "%s: can't handle BAR larger than 4GB (size %#010llx)\n",
+ res_name, (unsigned long long)sz64);
+ goto out;
+@@ -273,8 +272,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
+ if ((sizeof(pci_bus_addr_t) < 8) && l) {
+ /* Above 32-bit boundary; try to reallocate */
+ res->flags |= IORESOURCE_UNSET;
+- res->start = 0;
+- res->end = sz64 - 1;
++ resource_set_range(res, 0, sz64);
+ pci_info(dev, "%s: can't handle BAR above 4GB (bus address %#010llx)\n",
+ res_name, (unsigned long long)l64);
+ goto out;
+--
+2.51.0
+
--- /dev/null
+From 8bfbfe0532e30d287b0d8a61fa70450f5eba0b2d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 14 Jun 2024 13:06:03 +0300
+Subject: resource: Add resource set range and size helpers
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+
+[ Upstream commit 9fb6fef0fb49124291837af1da5028f79d53f98e ]
+
+Setting the end address for a resource with a given size lacks a helper and
+is therefore coded manually unlike the getter side which has a helper for
+resource size calculation. Also, almost all callsites that calculate the
+end address for a resource also set the start address right before it like
+this:
+
+ res->start = start_addr;
+ res->end = res->start + size - 1;
+
+Add resource_set_range(res, start_addr, size) that sets the start address
+and calculates the end address to simplify this often repeated fragment.
+
+Also add resource_set_size() for the cases where setting the start address
+of the resource is not necessary but mention in its kerneldoc that
+resource_set_range() is preferred when setting both addresses.
+
+Link: https://lore.kernel.org/r/20240614100606.15830-2-ilpo.jarvinen@linux.intel.com
+Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Stable-dep-of: 11721c45a826 ("PCI: Use resource_set_range() that correctly sets ->end")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/ioport.h | 32 ++++++++++++++++++++++++++++++++
+ 1 file changed, 32 insertions(+)
+
+diff --git a/include/linux/ioport.h b/include/linux/ioport.h
+index 25d768d489701..d10749797f18d 100644
+--- a/include/linux/ioport.h
++++ b/include/linux/ioport.h
+@@ -216,6 +216,38 @@ struct resource *lookup_resource(struct resource *root, resource_size_t start);
+ int adjust_resource(struct resource *res, resource_size_t start,
+ resource_size_t size);
+ resource_size_t resource_alignment(struct resource *res);
++
++/**
++ * resource_set_size - Calculate resource end address from size and start
++ * @res: Resource descriptor
++ * @size: Size of the resource
++ *
++ * Calculate the end address for @res based on @size.
++ *
++ * Note: The start address of @res must be set when calling this function.
++ * Prefer resource_set_range() if setting both the start address and @size.
++ */
++static inline void resource_set_size(struct resource *res, resource_size_t size)
++{
++ res->end = res->start + size - 1;
++}
++
++/**
++ * resource_set_range - Set resource start and end addresses
++ * @res: Resource descriptor
++ * @start: Start address for the resource
++ * @size: Size of the resource
++ *
++ * Set @res start address and calculate the end address based on @size.
++ */
++static inline void resource_set_range(struct resource *res,
++ resource_size_t start,
++ resource_size_t size)
++{
++ res->start = start;
++ resource_set_size(res, size);
++}
++
+ static inline resource_size_t resource_size(const struct resource *res)
+ {
+ return res->end - res->start + 1;
+--
+2.51.0
+
btrfs-fix-compat-mask-in-error-messages-in-btrfs_che.patch
bpf-fix-stack-out-of-bounds-write-in-devmap.patch
pci-correct-pci_cap_exp_endpoint_sizeof_v2-value.patch
+memory-mtk-smi-convert-to-platform-remove-callback-r.patch
+memory-mtk-smi-fix-device-leaks-on-common-probe.patch
+memory-mtk-smi-fix-device-leak-on-larb-probe.patch
+pci-update-bar-and-window-messages.patch
+pci-use-resource-names-in-pci-log-messages.patch
+resource-add-resource-set-range-and-size-helpers.patch
+pci-use-resource_set_range-that-correctly-sets-end.patch
+media-hantro-disable-multicore-support.patch
+media-v4l2-mem2mem-add-a-kref-to-the-v4l2_m2m_dev-st.patch
+media-verisilicon-avoid-g2-bus-error-while-decoding-.patch
+kvm-x86-fix-kvm_get_msrs-stack-info-leak.patch
+kvm-x86-rename-kvm_msr_ret_invalid-to-kvm_msr_ret_un.patch
+kvm-x86-return-unsupported-instead-of-invalid-on-acc.patch
+media-tegra-video-use-accessors-for-pad-config-try_-.patch
+media-tegra-video-fix-memory-leak-in-__tegra_channel.patch
+kvm-x86-warn-if-a-vcpu-gets-a-valid-wakeup-that-kvm-.patch
+kvm-x86-ignore-ebusy-when-checking-nested-events-fro.patch
+drm-tegra-dsi-fix-device-leak-on-probe.patch
+bus-omap-ocp2scp-convert-to-platform-remove-callback.patch
+bus-omap-ocp2scp-fix-of-populate-on-driver-rebind.patch
+ext4-get-rid-of-ppath-in-ext4_find_extent.patch
+ext4-get-rid-of-ppath-in-ext4_ext_create_new_leaf.patch
+ext4-get-rid-of-ppath-in-ext4_ext_insert_extent.patch
+ext4-get-rid-of-ppath-in-ext4_split_extent_at.patch
+ext4-subdivide-ext4_ext_data_valid1.patch
+ext4-don-t-zero-the-entire-extent-if-ext4_ext_data_p.patch
+ext4-get-rid-of-ppath-in-ext4_split_extent.patch
+ext4-get-rid-of-ppath-in-ext4_split_convert_extents.patch
+ext4-get-rid-of-ppath-in-ext4_convert_unwritten_exte.patch
+ext4-get-rid-of-ppath-in-ext4_ext_convert_to_initial.patch
+ext4-get-rid-of-ppath-in-ext4_ext_handle_unwritten_e.patch
+ext4-correct-the-comments-place-for-ext4_ext_may_zer.patch
+ext4-don-t-set-ext4_get_blocks_convert-when-splittin.patch
+ext4-drop-extent-cache-after-doing-partial_valid1-ze.patch
+ext4-drop-extent-cache-when-splitting-extent-fails.patch
+mailbox-use-of_property_match_string-instead-of-open.patch
+mailbox-don-t-protect-of_parse_phandle_with_args-wit.patch
+mailbox-sort-headers-alphabetically.patch
+mailbox-remove-unused-header-files.patch
+mailbox-use-dev_err-when-there-is-error.patch
+mailbox-use-guard-scoped_guard-for-con_mutex.patch
+mailbox-allow-controller-specific-mapping-using-fwno.patch
+mailbox-prevent-out-of-bounds-access-in-fw_mbox_inde.patch
+ext4-delete-redundant-calculations-in-ext4_mb_get_bu.patch
+ext4-convert-bd_bitmap_page-to-bd_bitmap_folio.patch
+ext4-convert-bd_buddy_page-to-bd_buddy_folio.patch
+ext4-fix-e4b-bitmap-inconsistency-reports.patch
+mfd-qcom-pm8xxx-convert-to-platform-remove-callback-.patch
+mfd-qcom-pm8xxx-fix-of-populate-on-driver-rebind.patch
+mfd-omap-usb-host-convert-to-platform-remove-callbac.patch
+mfd-omap-usb-host-fix-of-populate-on-driver-rebind.patch
+arm64-dts-rockchip-fix-rk356x-pcie-range-mappings.patch
+clk-tegra-tegra124-emc-fix-device-leak-on-set_rate.patch
+usb-cdns3-remove-redundant-if-branch.patch
+usb-cdns3-call-cdns_power_is_lost-only-once-in-cdns_.patch
+usb-cdns3-fix-role-switching-during-resume.patch
+drm-amd-fix-hang-on-amdgpu-unload-by-using-pci_dev_i.patch
+alsa-hda-conexant-add-quirk-for-hp-zbook-studio-g4.patch
+hwmon-max16065-use-read-write_once-to-avoid-compiler.patch
+alsa-hda-conexant-fix-headphone-jack-handling-on-ace.patch
+net-arcnet-com20020-pci-fix-support-for-2.5mbit-card.patch
+drm-amd-drop-special-case-for-yellow-carp-without-di.patch
+drm-amdgpu-keep-vga-memory-on-macbooks-with-switchab.patch
--- /dev/null
+From 1cbebb0e97b3243b8f0bb7a2e2b0504c82170055 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 5 Feb 2025 18:36:49 +0100
+Subject: usb: cdns3: call cdns_power_is_lost() only once in cdns_resume()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Théo Lebrun <theo.lebrun@bootlin.com>
+
+[ Upstream commit 17c6526b333cfd89a4c888a6f7c876c8c326e5ae ]
+
+cdns_power_is_lost() does a register read.
+Call it only once rather than twice.
+
+Signed-off-by: Théo Lebrun <theo.lebrun@bootlin.com>
+Link: https://lore.kernel.org/r/20250205-s2r-cdns-v7-4-13658a271c3c@bootlin.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Stable-dep-of: 87e4b043b98a ("usb: cdns3: fix role switching during resume")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/cdns3/core.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/usb/cdns3/core.c b/drivers/usb/cdns3/core.c
+index 98980a23e1c22..1243a5cea91b5 100644
+--- a/drivers/usb/cdns3/core.c
++++ b/drivers/usb/cdns3/core.c
+@@ -524,11 +524,12 @@ EXPORT_SYMBOL_GPL(cdns_suspend);
+
+ int cdns_resume(struct cdns *cdns)
+ {
++ bool power_lost = cdns_power_is_lost(cdns);
+ enum usb_role real_role;
+ bool role_changed = false;
+ int ret = 0;
+
+- if (cdns_power_is_lost(cdns)) {
++ if (power_lost) {
+ if (!cdns->role_sw) {
+ real_role = cdns_hw_role_state_machine(cdns);
+ if (real_role != cdns->role) {
+@@ -551,7 +552,7 @@ int cdns_resume(struct cdns *cdns)
+ }
+
+ if (cdns->roles[cdns->role]->resume)
+- cdns->roles[cdns->role]->resume(cdns, cdns_power_is_lost(cdns));
++ cdns->roles[cdns->role]->resume(cdns, power_lost);
+
+ return 0;
+ }
+--
+2.51.0
+
--- /dev/null
+From bc814e0b8a8bef4987b099f424425abf3e616be8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 30 Jan 2026 11:05:45 +0100
+Subject: usb: cdns3: fix role switching during resume
+
+From: Thomas Richard (TI) <thomas.richard@bootlin.com>
+
+[ Upstream commit 87e4b043b98a1d269be0b812f383881abee0ca45 ]
+
+If the role change while we are suspended, the cdns3 driver switches to the
+new mode during resume. However, switching to host mode in this context
+causes a NULL pointer dereference.
+
+The host role's start() operation registers a xhci-hcd device, but its
+probe is deferred while we are in the resume path. The host role's resume()
+operation assumes the xhci-hcd device is already probed, which is not the
+case, leading to the dereference. Since the start() operation of the new
+role is already called, the resume operation can be skipped.
+
+So skip the resume operation for the new role if a role switch occurs
+during resume. Once the resume sequence is complete, the xhci-hcd device
+can be probed in case of host mode.
+
+Unable to handle kernel NULL pointer dereference at virtual address 0000000000000208
+Mem abort info:
+...
+Data abort info:
+...
+[0000000000000208] pgd=0000000000000000, p4d=0000000000000000
+Internal error: Oops: 0000000096000004 [#1] SMP
+Modules linked in:
+CPU: 0 UID: 0 PID: 146 Comm: sh Not tainted
+6.19.0-rc7-00013-g6e64f4aabfae-dirty #135 PREEMPT
+Hardware name: Texas Instruments J7200 EVM (DT)
+pstate: 20000005 (nzCv daif -PAN -UAO -TCO -DIT -SSBS BTYPE=--)
+pc : usb_hcd_is_primary_hcd+0x0/0x1c
+lr : cdns_host_resume+0x24/0x5c
+...
+Call trace:
+ usb_hcd_is_primary_hcd+0x0/0x1c (P)
+ cdns_resume+0x6c/0xbc
+ cdns3_controller_resume.isra.0+0xe8/0x17c
+ cdns3_plat_resume+0x18/0x24
+ platform_pm_resume+0x2c/0x68
+ dpm_run_callback+0x90/0x248
+ device_resume+0x100/0x24c
+ dpm_resume+0x190/0x2ec
+ dpm_resume_end+0x18/0x34
+ suspend_devices_and_enter+0x2b0/0xa44
+ pm_suspend+0x16c/0x5fc
+ state_store+0x80/0xec
+ kobj_attr_store+0x18/0x2c
+ sysfs_kf_write+0x7c/0x94
+ kernfs_fop_write_iter+0x130/0x1dc
+ vfs_write+0x240/0x370
+ ksys_write+0x70/0x108
+ __arm64_sys_write+0x1c/0x28
+ invoke_syscall+0x48/0x10c
+ el0_svc_common.constprop.0+0x40/0xe0
+ do_el0_svc+0x1c/0x28
+ el0_svc+0x34/0x108
+ el0t_64_sync_handler+0xa0/0xe4
+ el0t_64_sync+0x198/0x19c
+Code: 52800003 f9407ca5 d63f00a0 17ffffe4 (f9410401)
+---[ end trace 0000000000000000 ]---
+
+Cc: stable <stable@kernel.org>
+Fixes: 2cf2581cd229 ("usb: cdns3: add power lost support for system resume")
+Signed-off-by: Thomas Richard (TI) <thomas.richard@bootlin.com>
+Acked-by: Peter Chen <peter.chen@kernel.org>
+Link: https://patch.msgid.link/20260130-usb-cdns3-fix-role-switching-during-resume-v1-1-44c456852b52@bootlin.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/cdns3/core.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/usb/cdns3/core.c b/drivers/usb/cdns3/core.c
+index 1243a5cea91b5..f0e32227c0b79 100644
+--- a/drivers/usb/cdns3/core.c
++++ b/drivers/usb/cdns3/core.c
+@@ -551,7 +551,7 @@ int cdns_resume(struct cdns *cdns)
+ }
+ }
+
+- if (cdns->roles[cdns->role]->resume)
++ if (!role_changed && cdns->roles[cdns->role]->resume)
+ cdns->roles[cdns->role]->resume(cdns, power_lost);
+
+ return 0;
+--
+2.51.0
+
--- /dev/null
+From 121c4cd5357907b3a114dab0e81910759556d4a6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 31 Dec 2024 09:36:41 +0800
+Subject: usb: cdns3: remove redundant if branch
+
+From: Hongyu Xie <xiehongyu1@kylinos.cn>
+
+[ Upstream commit dedab674428f8a99468a4864c067128ba9ea83a6 ]
+
+cdns->role_sw->dev->driver_data gets set in routines showing below,
+cdns_init
+ sw_desc.driver_data = cdns;
+ cdns->role_sw = usb_role_switch_register(dev, &sw_desc);
+ dev_set_drvdata(&sw->dev, desc->driver_data);
+
+In cdns_resume,
+cdns->role = cdns_role_get(cdns->role_sw); //line redundant
+ struct cdns *cdns = usb_role_switch_get_drvdata(sw);
+ dev_get_drvdata(&sw->dev)
+ return dev->driver_data
+return cdns->role;
+
+"line redundant" equals to,
+ cdns->role = cdns->role;
+
+So fix this if branch.
+
+Signed-off-by: Hongyu Xie <xiehongyu1@kylinos.cn>
+Acked-by: Peter Chen <peter.chen@kernel.org>
+Link: https://lore.kernel.org/r/20241231013641.23908-1-xiehongyu1@kylinos.cn
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Stable-dep-of: 87e4b043b98a ("usb: cdns3: fix role switching during resume")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/cdns3/core.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+diff --git a/drivers/usb/cdns3/core.c b/drivers/usb/cdns3/core.c
+index 465e9267b49c1..98980a23e1c22 100644
+--- a/drivers/usb/cdns3/core.c
++++ b/drivers/usb/cdns3/core.c
+@@ -529,9 +529,7 @@ int cdns_resume(struct cdns *cdns)
+ int ret = 0;
+
+ if (cdns_power_is_lost(cdns)) {
+- if (cdns->role_sw) {
+- cdns->role = cdns_role_get(cdns->role_sw);
+- } else {
++ if (!cdns->role_sw) {
+ real_role = cdns_hw_role_state_machine(cdns);
+ if (real_role != cdns->role) {
+ ret = cdns_hw_role_switch(cdns);
+--
+2.51.0
+