--- /dev/null
+From c7cd02328cdba7c344f6d6eeeadc8297b35079c2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Apr 2026 14:37:42 +0300
+Subject: phy: renesas: rcar-gen3-usb2: Assert PLL reset on PHY power off
+
+From: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com>
+
+commit 9ce71e85b29eb63e48e294479742e670513f03a0 upstream.
+
+Assert PLL reset on PHY power off. This saves power.
+
+[claudiu.beznea: fixed conflict in rcar_gen3_phy_usb2_power_off() by
+ using spin_lock_irqsave()/spin_unlock_irqrestore() instead of
+ scoped_guard()]
+
+Fixes: f3b5a8d9b50d ("phy: rcar-gen3-usb2: Add R-Car Gen3 USB2 PHY driver")
+Cc: stable@vger.kernel.org
+Reviewed-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+Tested-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+Reviewed-by: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
+Signed-off-by: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com>
+Link: https://lore.kernel.org/r/20250507125032.565017-5-claudiu.beznea.uj@bp.renesas.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/phy/renesas/phy-rcar-gen3-usb2.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
+index 558c07512c05b..9fcbde094699d 100644
+--- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c
++++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
+@@ -516,8 +516,15 @@ static int rcar_gen3_phy_usb2_power_off(struct phy *p)
+
+ spin_lock_irqsave(&channel->lock, flags);
+ rphy->powered = false;
+- spin_unlock_irqrestore(&channel->lock, flags);
+
++ if (rcar_gen3_are_all_rphys_power_off(channel)) {
++ u32 val = readl(channel->base + USB2_USBCTR);
++
++ val |= USB2_USBCTR_PLL_RST;
++ writel(val, channel->base + USB2_USBCTR);
++ }
++ spin_unlock_irqrestore(&channel->lock, flags);
++
+ if (channel->vbus)
+ ret = regulator_disable(channel->vbus);
+
+--
+2.53.0
+
--- /dev/null
+From 581174631d7ef2da1e842a5807003e1982e42c3c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Apr 2026 14:37:39 +0300
+Subject: phy: renesas: rcar-gen3-usb2: Fix role detection on unbind/bind
+
+From: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com>
+
+commit 54c4c58713aaff76c2422ff5750e557ab3b100d7 upstream.
+
+It has been observed on the Renesas RZ/G3S SoC that unbinding and binding
+the PHY driver leads to role autodetection failures. This issue occurs when
+PHY 3 is the first initialized PHY. PHY 3 does not have an interrupt
+associated with the USB2_INT_ENABLE register (as
+rcar_gen3_int_enable[3] = 0). As a result, rcar_gen3_init_otg() is called
+to initialize OTG without enabling PHY interrupts.
+
+To resolve this, add rcar_gen3_is_any_otg_rphy_initialized() and call it in
+role_store(), role_show(), and rcar_gen3_init_otg(). At the same time,
+rcar_gen3_init_otg() is only called when initialization for a PHY with
+interrupt bits is in progress. As a result, the
+struct rcar_gen3_phy::otg_initialized is no longer needed.
+
+[claudiu.beznea: declare the i iterrator from
+ rcar_gen3_is_any_otg_rphy_initialized() outside of for loop]
+
+Fixes: 549b6b55b005 ("phy: renesas: rcar-gen3-usb2: enable/disable independent irqs")
+Cc: stable@vger.kernel.org
+Reviewed-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+Tested-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+Reviewed-by: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
+Signed-off-by: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com>
+Link: https://lore.kernel.org/r/20250507125032.565017-2-claudiu.beznea.uj@bp.renesas.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/phy/renesas/phy-rcar-gen3-usb2.c | 32 +++++++++++-------------
+ 1 file changed, 14 insertions(+), 18 deletions(-)
+
+diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
+index ea01a121b8fc5..646a5140b30e1 100644
+--- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c
++++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
+@@ -98,7 +98,6 @@ struct rcar_gen3_phy {
+ struct rcar_gen3_chan *ch;
+ u32 int_enable_bits;
+ bool initialized;
+- bool otg_initialized;
+ bool powered;
+ };
+
+@@ -288,16 +287,16 @@ static bool rcar_gen3_is_any_rphy_initialized(struct rcar_gen3_chan *ch)
+ return false;
+ }
+
+-static bool rcar_gen3_needs_init_otg(struct rcar_gen3_chan *ch)
++static bool rcar_gen3_is_any_otg_rphy_initialized(struct rcar_gen3_chan *ch)
+ {
+- int i;
++ enum rcar_gen3_phy_index i;
+
+- for (i = 0; i < NUM_OF_PHYS; i++) {
+- if (ch->rphys[i].otg_initialized)
+- return false;
++ for (i = PHY_INDEX_BOTH_HC; i <= PHY_INDEX_EHCI; i++) {
++ if (ch->rphys[i].initialized)
++ return true;
+ }
+
+- return true;
++ return false;
+ }
+
+ static bool rcar_gen3_are_all_rphys_power_off(struct rcar_gen3_chan *ch)
+@@ -319,7 +318,7 @@ static ssize_t role_store(struct device *dev, struct device_attribute *attr,
+ bool is_b_device;
+ enum phy_mode cur_mode, new_mode;
+
+- if (!ch->is_otg_channel || !rcar_gen3_is_any_rphy_initialized(ch))
++ if (!ch->is_otg_channel || !rcar_gen3_is_any_otg_rphy_initialized(ch))
+ return -EIO;
+
+ if (sysfs_streq(buf, "host"))
+@@ -357,7 +356,7 @@ static ssize_t role_show(struct device *dev, struct device_attribute *attr,
+ {
+ struct rcar_gen3_chan *ch = dev_get_drvdata(dev);
+
+- if (!ch->is_otg_channel || !rcar_gen3_is_any_rphy_initialized(ch))
++ if (!ch->is_otg_channel || !rcar_gen3_is_any_otg_rphy_initialized(ch))
+ return -EIO;
+
+ return sprintf(buf, "%s\n", rcar_gen3_is_host(ch) ? "host" :
+@@ -370,6 +369,9 @@ static void rcar_gen3_init_otg(struct rcar_gen3_chan *ch)
+ void __iomem *usb2_base = ch->base;
+ u32 val;
+
++ if (!ch->is_otg_channel || rcar_gen3_is_any_otg_rphy_initialized(ch))
++ return;
++
+ /* Should not use functions of read-modify-write a register */
+ val = readl(usb2_base + USB2_LINECTRL1);
+ val = (val & ~USB2_LINECTRL1_DP_RPD) | USB2_LINECTRL1_DPRPD_EN |
+@@ -435,12 +437,9 @@ static int rcar_gen3_phy_usb2_init(struct phy *p)
+ writel(USB2_OC_TIMSET_INIT, usb2_base + USB2_OC_TIMSET);
+ }
+
+- /* Initialize otg part */
+- if (channel->is_otg_channel) {
+- if (rcar_gen3_needs_init_otg(channel))
+- rcar_gen3_init_otg(channel);
+- rphy->otg_initialized = true;
+- }
++ /* Initialize otg part (only if we initialize a PHY with IRQs). */
++ if (rphy->int_enable_bits)
++ rcar_gen3_init_otg(channel);
+
+ rphy->initialized = true;
+
+@@ -456,9 +455,6 @@ static int rcar_gen3_phy_usb2_exit(struct phy *p)
+
+ rphy->initialized = false;
+
+- if (channel->is_otg_channel)
+- rphy->otg_initialized = false;
+-
+ val = readl(usb2_base + USB2_INT_ENABLE);
+ val &= ~rphy->int_enable_bits;
+ if (!rcar_gen3_is_any_rphy_initialized(channel))
+--
+2.53.0
+
--- /dev/null
+From 5071fe1dee00a6bb7bf77928f2ad8bd606c8687d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Apr 2026 14:37:41 +0300
+Subject: phy: renesas: rcar-gen3-usb2: Lock around hardware registers and
+ driver data
+
+From: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com>
+
+commit 55a387ebb9219cbe4edfa8ba9996ccb0e7ad4932 upstream.
+
+The phy-rcar-gen3-usb2 driver exposes four individual PHYs that are
+requested and configured by PHY users. The struct phy_ops APIs access the
+same set of registers to configure all PHYs. Additionally, PHY settings can
+be modified through sysfs or an IRQ handler. While some struct phy_ops APIs
+are protected by a driver-wide mutex, others rely on individual
+PHY-specific mutexes.
+
+This approach can lead to various issues, including:
+1/ the IRQ handler may interrupt PHY settings in progress, racing with
+ hardware configuration protected by a mutex lock
+2/ due to msleep(20) in rcar_gen3_init_otg(), while a configuration thread
+ suspends to wait for the delay, another thread may try to configure
+ another PHY (with phy_init() + phy_power_on()); re-running the
+ phy_init() goes to the exact same configuration code, re-running the
+ same hardware configuration on the same set of registers (and bits)
+ which might impact the result of the msleep for the 1st configuring
+ thread
+3/ sysfs can configure the hardware (though role_store()) and it can
+ still race with the phy_init()/phy_power_on() APIs calling into the
+ drivers struct phy_ops
+
+To address these issues, add a spinlock to protect hardware register access
+and driver private data structures (e.g., calls to
+rcar_gen3_is_any_rphy_initialized()). Checking driver-specific data remains
+necessary as all PHY instances share common settings. With this change,
+the existing mutex protection is removed and the cleanup.h helpers are
+used.
+
+While at it, to keep the code simpler, do not skip
+regulator_enable()/regulator_disable() APIs in
+rcar_gen3_phy_usb2_power_on()/rcar_gen3_phy_usb2_power_off() as the
+regulators enable/disable operations are reference counted anyway.
+
+[claudiu.beznea:
+ - in rcar_gen3_init_otg(): fixed conflict by droppping ch->soc_no_adp_ctrl check
+ - in rcar_gen3_phy_usb2_irq() use spin_lock()/spin_unlock() as scoped_guard()
+ is not avaialable in v5.10
+ - in probe(): replace mutex_init() with spin_lock_init()
+ - rcar_gen3_phy_usb2_power_off() replaced scoped_guard() as it is not
+ available in v5.10
+ - in rcar_gen3_phy_usb2_power_on() droppped guard to avoid compilation
+ warning "ISO C90 forbids mixed declarations and code"]
+
+Fixes: f3b5a8d9b50d ("phy: rcar-gen3-usb2: Add R-Car Gen3 USB2 PHY driver")
+Cc: stable@vger.kernel.org
+Reviewed-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+Tested-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+Reviewed-by: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
+Signed-off-by: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com>
+Link: https://lore.kernel.org/r/20250507125032.565017-4-claudiu.beznea.uj@bp.renesas.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/phy/renesas/phy-rcar-gen3-usb2.c | 41 +++++++++++++++---------
+ 1 file changed, 26 insertions(+), 15 deletions(-)
+
+diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
+index f66e0daa23648..558c07512c05b 100644
+--- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c
++++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
+@@ -9,6 +9,7 @@
+ * Copyright (C) 2014 Cogent Embedded, Inc.
+ */
+
++#include <linux/cleanup.h>
+ #include <linux/extcon-provider.h>
+ #include <linux/interrupt.h>
+ #include <linux/io.h>
+@@ -108,7 +109,7 @@ struct rcar_gen3_chan {
+ struct rcar_gen3_phy rphys[NUM_OF_PHYS];
+ struct regulator *vbus;
+ struct work_struct work;
+- struct mutex lock; /* protects rphys[...].powered */
++ spinlock_t lock; /* protects access to hardware and driver data structure. */
+ enum usb_dr_mode dr_mode;
+ bool extcon_host;
+ bool is_otg_channel;
+@@ -317,6 +318,8 @@ static ssize_t role_store(struct device *dev, struct device_attribute *attr,
+ bool is_b_device;
+ enum phy_mode cur_mode, new_mode;
+
++ guard(spinlock_irqsave)(&ch->lock);
++
+ if (!ch->is_otg_channel || !rcar_gen3_is_any_otg_rphy_initialized(ch))
+ return -EIO;
+
+@@ -404,6 +407,8 @@ static irqreturn_t rcar_gen3_phy_usb2_irq(int irq, void *_ch)
+ if (pm_runtime_suspended(dev))
+ goto rpm_put;
+
++ spin_lock(&ch->lock);
++
+ status = readl(usb2_base + USB2_OBINTSTA);
+ if (status & USB2_OBINT_BITS) {
+ dev_vdbg(dev, "%s: %08x\n", __func__, status);
+@@ -412,6 +417,8 @@ static irqreturn_t rcar_gen3_phy_usb2_irq(int irq, void *_ch)
+ ret = IRQ_HANDLED;
+ }
+
++ spin_unlock(&ch->lock);
++
+ rpm_put:
+ pm_runtime_put_noidle(dev);
+ return ret;
+@@ -424,6 +431,8 @@ static int rcar_gen3_phy_usb2_init(struct phy *p)
+ void __iomem *usb2_base = channel->base;
+ u32 val;
+
++ guard(spinlock_irqsave)(&channel->lock);
++
+ /* Initialize USB2 part */
+ val = readl(usb2_base + USB2_INT_ENABLE);
+ val |= USB2_INT_ENABLE_UCOM_INTEN | rphy->int_enable_bits;
+@@ -450,6 +459,8 @@ static int rcar_gen3_phy_usb2_exit(struct phy *p)
+ void __iomem *usb2_base = channel->base;
+ u32 val;
+
++ guard(spinlock_irqsave)(&channel->lock);
++
+ rphy->initialized = false;
+
+ val = readl(usb2_base + USB2_INT_ENABLE);
+@@ -466,19 +477,21 @@ static int rcar_gen3_phy_usb2_power_on(struct phy *p)
+ struct rcar_gen3_phy *rphy = phy_get_drvdata(p);
+ struct rcar_gen3_chan *channel = rphy->ch;
+ void __iomem *usb2_base = channel->base;
++ unsigned long flags;
+ u32 val;
+ int ret = 0;
+
+- mutex_lock(&channel->lock);
+- if (!rcar_gen3_are_all_rphys_power_off(channel))
+- goto out;
+-
+ if (channel->vbus) {
+ ret = regulator_enable(channel->vbus);
+ if (ret)
+- goto out;
++ return ret;
+ }
+
++ spin_lock_irqsave(&channel->lock, flags);
++
++ if (!rcar_gen3_are_all_rphys_power_off(channel))
++ goto out;
++
+ val = readl(usb2_base + USB2_USBCTR);
+ val |= USB2_USBCTR_PLL_RST;
+ writel(val, usb2_base + USB2_USBCTR);
+@@ -488,7 +501,8 @@ static int rcar_gen3_phy_usb2_power_on(struct phy *p)
+ out:
+ /* The powered flag should be set for any other phys anyway */
+ rphy->powered = true;
+- mutex_unlock(&channel->lock);
++
++ spin_unlock_irqrestore(&channel->lock, flags);
+
+ return 0;
+ }
+@@ -497,20 +511,16 @@ static int rcar_gen3_phy_usb2_power_off(struct phy *p)
+ {
+ struct rcar_gen3_phy *rphy = phy_get_drvdata(p);
+ struct rcar_gen3_chan *channel = rphy->ch;
++ unsigned long flags;
+ int ret = 0;
+
+- mutex_lock(&channel->lock);
++ spin_lock_irqsave(&channel->lock, flags);
+ rphy->powered = false;
+-
+- if (!rcar_gen3_are_all_rphys_power_off(channel))
+- goto out;
++ spin_unlock_irqrestore(&channel->lock, flags);
+
+ if (channel->vbus)
+ ret = regulator_disable(channel->vbus);
+
+-out:
+- mutex_unlock(&channel->lock);
+-
+ return ret;
+ }
+
+@@ -650,7 +660,8 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
+ goto error;
+ }
+
+- mutex_init(&channel->lock);
++ spin_lock_init(&channel->lock);
++
+ for (i = 0; i < NUM_OF_PHYS; i++) {
+ channel->rphys[i].phy = devm_phy_create(dev, NULL,
+ phy_usb2_ops);
+--
+2.53.0
+
--- /dev/null
+From 8f5b94bf37140234f64aa8c0db67cccbf648b0ca Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Apr 2026 14:37:40 +0300
+Subject: phy: renesas: rcar-gen3-usb2: Move IRQ request in probe
+
+From: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com>
+
+commit de76809f60cc938d3580bbbd5b04b7d12af6ce3a upstream.
+
+Commit 08b0ad375ca6 ("phy: renesas: rcar-gen3-usb2: move IRQ registration
+to init") moved the IRQ request operation from probe to
+struct phy_ops::phy_init API to avoid triggering interrupts (which lead to
+register accesses) while the PHY clocks (enabled through runtime PM APIs)
+are not active. If this happens, it results in a synchronous abort.
+
+One way to reproduce this issue is by enabling CONFIG_DEBUG_SHIRQ, which
+calls free_irq() on driver removal.
+
+Move the IRQ request and free operations back to probe, and take the
+runtime PM state into account in IRQ handler. This commit is preparatory
+for the subsequent fixes in this series.
+
+[claudiu.beznea: fixed conflicts by:
+ - dropping irq and obint_enable_bits members of rcar_gen3_chan
+ - using USB2_OBINT_BITS marco in rcar_gen3_phy_usb2_irq()
+ - keeping irq local variable in rcar_gen3_phy_usb2_probe()
+ - dropping channel->irq and channel->obint_enable_bits asssignement from
+ probe
+ - keeping platform_set_drvdata() and channel->dev assignment in probe]
+
+Reviewed-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+Tested-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+Reviewed-by: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
+Signed-off-by: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com>
+Link: https://lore.kernel.org/r/20250507125032.565017-3-claudiu.beznea.uj@bp.renesas.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/phy/renesas/phy-rcar-gen3-usb2.c | 46 +++++++++++++-----------
+ 1 file changed, 26 insertions(+), 20 deletions(-)
+
+diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
+index 646a5140b30e1..f66e0daa23648 100644
+--- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c
++++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
+@@ -110,7 +110,6 @@ struct rcar_gen3_chan {
+ struct work_struct work;
+ struct mutex lock; /* protects rphys[...].powered */
+ enum usb_dr_mode dr_mode;
+- int irq;
+ bool extcon_host;
+ bool is_otg_channel;
+ bool uses_otg_pins;
+@@ -396,16 +395,25 @@ static irqreturn_t rcar_gen3_phy_usb2_irq(int irq, void *_ch)
+ {
+ struct rcar_gen3_chan *ch = _ch;
+ void __iomem *usb2_base = ch->base;
+- u32 status = readl(usb2_base + USB2_OBINTSTA);
++ struct device *dev = ch->dev;
+ irqreturn_t ret = IRQ_NONE;
++ u32 status;
+
++ pm_runtime_get_noresume(dev);
++
++ if (pm_runtime_suspended(dev))
++ goto rpm_put;
++
++ status = readl(usb2_base + USB2_OBINTSTA);
+ if (status & USB2_OBINT_BITS) {
+- dev_vdbg(ch->dev, "%s: %08x\n", __func__, status);
++ dev_vdbg(dev, "%s: %08x\n", __func__, status);
+ writel(USB2_OBINT_BITS, usb2_base + USB2_OBINTSTA);
+ rcar_gen3_device_recognition(ch);
+ ret = IRQ_HANDLED;
+ }
+
++rpm_put:
++ pm_runtime_put_noidle(dev);
+ return ret;
+ }
+
+@@ -415,17 +423,6 @@ static int rcar_gen3_phy_usb2_init(struct phy *p)
+ struct rcar_gen3_chan *channel = rphy->ch;
+ void __iomem *usb2_base = channel->base;
+ u32 val;
+- int ret;
+-
+- if (!rcar_gen3_is_any_rphy_initialized(channel) && channel->irq >= 0) {
+- INIT_WORK(&channel->work, rcar_gen3_phy_usb2_work);
+- ret = request_irq(channel->irq, rcar_gen3_phy_usb2_irq,
+- IRQF_SHARED, dev_name(channel->dev), channel);
+- if (ret < 0) {
+- dev_err(channel->dev, "No irq handler (%d)\n", channel->irq);
+- return ret;
+- }
+- }
+
+ /* Initialize USB2 part */
+ val = readl(usb2_base + USB2_INT_ENABLE);
+@@ -461,9 +458,6 @@ static int rcar_gen3_phy_usb2_exit(struct phy *p)
+ val &= ~USB2_INT_ENABLE_UCOM_INTEN;
+ writel(val, usb2_base + USB2_INT_ENABLE);
+
+- if (channel->irq >= 0 && !rcar_gen3_is_any_rphy_initialized(channel))
+- free_irq(channel->irq, channel);
+-
+ return 0;
+ }
+
+@@ -612,7 +606,7 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
+ struct phy_provider *provider;
+ struct resource *res;
+ const struct phy_ops *phy_usb2_ops;
+- int ret = 0, i;
++ int ret = 0, i, irq;
+
+ if (!dev->of_node) {
+ dev_err(dev, "This driver needs device tree\n");
+@@ -628,8 +622,6 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
+ if (IS_ERR(channel->base))
+ return PTR_ERR(channel->base);
+
+- /* get irq number here and request_irq for OTG in phy_init */
+- channel->irq = platform_get_irq_optional(pdev, 0);
+ channel->dr_mode = rcar_gen3_get_dr_mode(dev->of_node);
+ if (channel->dr_mode != USB_DR_MODE_UNKNOWN) {
+ channel->is_otg_channel = true;
+@@ -683,6 +675,20 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
+
+ platform_set_drvdata(pdev, channel);
+ channel->dev = dev;
++
++ irq = platform_get_irq_optional(pdev, 0);
++ if (irq < 0 && irq != -ENXIO) {
++ ret = irq;
++ goto error;
++ } else if (irq > 0) {
++ INIT_WORK(&channel->work, rcar_gen3_phy_usb2_work);
++ ret = devm_request_irq(dev, irq, rcar_gen3_phy_usb2_irq,
++ IRQF_SHARED, dev_name(dev), channel);
++ if (ret < 0) {
++ dev_err(dev, "Failed to request irq (%d)\n", irq);
++ goto error;
++ }
++ }
+
+ provider = devm_of_phy_provider_register(dev, rcar_gen3_phy_usb2_xlate);
+ if (IS_ERR(provider)) {
+--
+2.53.0
+
usb-ulpi-fix-double-free-in-ulpi_register_interface-error-path.patch
usb-usbtmc-flush-anchored-urbs-in-usbtmc_release.patch
usb-ehci-brcm-fix-sleep-during-atomic.patch
+phy-renesas-rcar-gen3-usb2-fix-role-detection-on-unb.patch
+phy-renesas-rcar-gen3-usb2-move-irq-request-in-probe.patch
+phy-renesas-rcar-gen3-usb2-lock-around-hardware-regi.patch
+phy-renesas-rcar-gen3-usb2-assert-pll-reset-on-phy-p.patch
--- /dev/null
+From 602f4edf9f23a759f04ea726dcf6ca475c8de016 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 5 Apr 2026 00:23:36 +0300
+Subject: nvmet-tcp: fix use-before-check of sg in bounds validation
+
+From: Cengiz Can <cengiz.can@canonical.com>
+
+The stable backport of commit 52a0a9854934 ("nvmet-tcp: add bounds
+checks in nvmet_tcp_build_pdu_iovec") placed the bounds checks after
+the iov_len calculation:
+
+ while (length) {
+ u32 iov_len = min_t(u32, length, sg->length - sg_offset);
+
+ if (!sg_remaining) { /* too late: sg already dereferenced */
+
+In mainline, the checks come first because C99 allows mid-block variable
+declarations. The stable backport moved the declaration to the top of the
+loop to satisfy C89 declaration rules, but this ended up placing the
+sg->length dereference before the sg_remaining and sg->length guards.
+
+If sg_next() returns NULL at the end of the scatterlist, the next
+iteration dereferences a NULL pointer in the iov_len calculation before
+the sg_remaining check can prevent it.
+
+Fix this by moving the iov_len declaration to function scope and
+keeping the assignment after the bounds checks, matching the ordering
+in mainline.
+
+Fixes: 42afe8ed8ad2 ("nvmet-tcp: add bounds checks in nvmet_tcp_build_pdu_iovec")
+Cc: stable@vger.kernel.org
+Cc: YunJe Shin <ioerts@kookmin.ac.kr>
+Cc: Sagi Grimberg <sagi@grimberg.me>
+Cc: Keith Busch <kbusch@kernel.org>
+Cc: linux-nvme@lists.infradead.org
+Signed-off-by: Cengiz Can <cengiz.can@canonical.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/target/tcp.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
+index 8f7984c53f3f2..c6cc1dfef92cf 100644
+--- a/drivers/nvme/target/tcp.c
++++ b/drivers/nvme/target/tcp.c
+@@ -312,7 +312,7 @@ static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd)
+ {
+ struct bio_vec *iov = cmd->iov;
+ struct scatterlist *sg;
+- u32 length, offset, sg_offset;
++ u32 length, offset, sg_offset, iov_len;
+ unsigned int sg_remaining;
+ int nr_pages;
+
+@@ -329,8 +329,6 @@ static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd)
+ sg_remaining = cmd->req.sg_cnt - cmd->sg_idx;
+
+ while (length) {
+- u32 iov_len = min_t(u32, length, sg->length - sg_offset);
+-
+ if (!sg_remaining) {
+ nvmet_tcp_fatal_error(cmd->queue);
+ return;
+@@ -340,6 +338,8 @@ static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd)
+ return;
+ }
+
++ iov_len = min_t(u32, length, sg->length - sg_offset);
++
+ iov->bv_page = sg_page(sg);
+ iov->bv_len = iov_len;
+ iov->bv_offset = sg->offset + sg_offset;
+--
+2.53.0
+
--- /dev/null
+From 51959ac123e71f09b1bc1d33432d38378ac18e59 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Apr 2026 14:38:07 +0300
+Subject: phy: renesas: rcar-gen3-usb2: Assert PLL reset on PHY power off
+
+From: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com>
+
+commit 9ce71e85b29eb63e48e294479742e670513f03a0 upstream.
+
+Assert PLL reset on PHY power off. This saves power.
+
+[claudiu.beznea: fixed conflict in rcar_gen3_phy_usb2_power_off() by
+ using spin_lock_irqsave()/spin_unlock_irqrestore() instead of
+ scoped_guard()]
+
+Fixes: f3b5a8d9b50d ("phy: rcar-gen3-usb2: Add R-Car Gen3 USB2 PHY driver")
+Cc: stable@vger.kernel.org
+Reviewed-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+Tested-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+Reviewed-by: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
+Signed-off-by: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com>
+Link: https://lore.kernel.org/r/20250507125032.565017-5-claudiu.beznea.uj@bp.renesas.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/phy/renesas/phy-rcar-gen3-usb2.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
+index 0626e00ccea7e..7e25c0e053a44 100644
+--- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c
++++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
+@@ -540,6 +540,13 @@ static int rcar_gen3_phy_usb2_power_off(struct phy *p)
+
+ spin_lock_irqsave(&channel->lock, flags);
+ rphy->powered = false;
++
++ if (rcar_gen3_are_all_rphys_power_off(channel)) {
++ u32 val = readl(channel->base + USB2_USBCTR);
++
++ val |= USB2_USBCTR_PLL_RST;
++ writel(val, channel->base + USB2_USBCTR);
++ }
+ spin_unlock_irqrestore(&channel->lock, flags);
+
+ if (channel->vbus)
+--
+2.53.0
+
--- /dev/null
+From 94872668db44d36c000aaf32390d5011cb6d6a0d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Apr 2026 14:38:04 +0300
+Subject: phy: renesas: rcar-gen3-usb2: Fix role detection on unbind/bind
+
+From: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com>
+
+commit 54c4c58713aaff76c2422ff5750e557ab3b100d7 upstream.
+
+It has been observed on the Renesas RZ/G3S SoC that unbinding and binding
+the PHY driver leads to role autodetection failures. This issue occurs when
+PHY 3 is the first initialized PHY. PHY 3 does not have an interrupt
+associated with the USB2_INT_ENABLE register (as
+rcar_gen3_int_enable[3] = 0). As a result, rcar_gen3_init_otg() is called
+to initialize OTG without enabling PHY interrupts.
+
+To resolve this, add rcar_gen3_is_any_otg_rphy_initialized() and call it in
+role_store(), role_show(), and rcar_gen3_init_otg(). At the same time,
+rcar_gen3_init_otg() is only called when initialization for a PHY with
+interrupt bits is in progress. As a result, the
+struct rcar_gen3_phy::otg_initialized is no longer needed.
+
+[claudiu.beznea: declare the i iterrator from
+ rcar_gen3_is_any_otg_rphy_initialized() outside of for loop]
+
+Fixes: 549b6b55b005 ("phy: renesas: rcar-gen3-usb2: enable/disable independent irqs")
+Cc: stable@vger.kernel.org
+Reviewed-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+Tested-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+Reviewed-by: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
+Signed-off-by: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com>
+Link: https://lore.kernel.org/r/20250507125032.565017-2-claudiu.beznea.uj@bp.renesas.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/phy/renesas/phy-rcar-gen3-usb2.c | 32 +++++++++++-------------
+ 1 file changed, 14 insertions(+), 18 deletions(-)
+
+diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
+index 670514d44fe3f..8c03b683ba1c9 100644
+--- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c
++++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
+@@ -103,7 +103,6 @@ struct rcar_gen3_phy {
+ struct rcar_gen3_chan *ch;
+ u32 int_enable_bits;
+ bool initialized;
+- bool otg_initialized;
+ bool powered;
+ };
+
+@@ -311,16 +310,16 @@ static bool rcar_gen3_is_any_rphy_initialized(struct rcar_gen3_chan *ch)
+ return false;
+ }
+
+-static bool rcar_gen3_needs_init_otg(struct rcar_gen3_chan *ch)
++static bool rcar_gen3_is_any_otg_rphy_initialized(struct rcar_gen3_chan *ch)
+ {
+- int i;
++ enum rcar_gen3_phy_index i;
+
+- for (i = 0; i < NUM_OF_PHYS; i++) {
+- if (ch->rphys[i].otg_initialized)
+- return false;
++ for (i = PHY_INDEX_BOTH_HC; i <= PHY_INDEX_EHCI; i++) {
++ if (ch->rphys[i].initialized)
++ return true;
+ }
+
+- return true;
++ return false;
+ }
+
+ static bool rcar_gen3_are_all_rphys_power_off(struct rcar_gen3_chan *ch)
+@@ -342,7 +341,7 @@ static ssize_t role_store(struct device *dev, struct device_attribute *attr,
+ bool is_b_device;
+ enum phy_mode cur_mode, new_mode;
+
+- if (!ch->is_otg_channel || !rcar_gen3_is_any_rphy_initialized(ch))
++ if (!ch->is_otg_channel || !rcar_gen3_is_any_otg_rphy_initialized(ch))
+ return -EIO;
+
+ if (sysfs_streq(buf, "host"))
+@@ -380,7 +379,7 @@ static ssize_t role_show(struct device *dev, struct device_attribute *attr,
+ {
+ struct rcar_gen3_chan *ch = dev_get_drvdata(dev);
+
+- if (!ch->is_otg_channel || !rcar_gen3_is_any_rphy_initialized(ch))
++ if (!ch->is_otg_channel || !rcar_gen3_is_any_otg_rphy_initialized(ch))
+ return -EIO;
+
+ return sprintf(buf, "%s\n", rcar_gen3_is_host(ch) ? "host" :
+@@ -393,6 +392,9 @@ static void rcar_gen3_init_otg(struct rcar_gen3_chan *ch)
+ void __iomem *usb2_base = ch->base;
+ u32 val;
+
++ if (!ch->is_otg_channel || rcar_gen3_is_any_otg_rphy_initialized(ch))
++ return;
++
+ /* Should not use functions of read-modify-write a register */
+ val = readl(usb2_base + USB2_LINECTRL1);
+ val = (val & ~USB2_LINECTRL1_DP_RPD) | USB2_LINECTRL1_DPRPD_EN |
+@@ -459,12 +461,9 @@ static int rcar_gen3_phy_usb2_init(struct phy *p)
+ writel(USB2_OC_TIMSET_INIT, usb2_base + USB2_OC_TIMSET);
+ }
+
+- /* Initialize otg part */
+- if (channel->is_otg_channel) {
+- if (rcar_gen3_needs_init_otg(channel))
+- rcar_gen3_init_otg(channel);
+- rphy->otg_initialized = true;
+- }
++ /* Initialize otg part (only if we initialize a PHY with IRQs). */
++ if (rphy->int_enable_bits)
++ rcar_gen3_init_otg(channel);
+
+ rphy->initialized = true;
+
+@@ -480,9 +479,6 @@ static int rcar_gen3_phy_usb2_exit(struct phy *p)
+
+ rphy->initialized = false;
+
+- if (channel->is_otg_channel)
+- rphy->otg_initialized = false;
+-
+ val = readl(usb2_base + USB2_INT_ENABLE);
+ val &= ~rphy->int_enable_bits;
+ if (!rcar_gen3_is_any_rphy_initialized(channel))
+--
+2.53.0
+
--- /dev/null
+From 78c845ba71a280594b574718aaf0072573e262ad Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Apr 2026 14:38:06 +0300
+Subject: phy: renesas: rcar-gen3-usb2: Lock around hardware registers and
+ driver data
+
+From: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com>
+
+commit 55a387ebb9219cbe4edfa8ba9996ccb0e7ad4932 upstream.
+
+The phy-rcar-gen3-usb2 driver exposes four individual PHYs that are
+requested and configured by PHY users. The struct phy_ops APIs access the
+same set of registers to configure all PHYs. Additionally, PHY settings can
+be modified through sysfs or an IRQ handler. While some struct phy_ops APIs
+are protected by a driver-wide mutex, others rely on individual
+PHY-specific mutexes.
+
+This approach can lead to various issues, including:
+1/ the IRQ handler may interrupt PHY settings in progress, racing with
+ hardware configuration protected by a mutex lock
+2/ due to msleep(20) in rcar_gen3_init_otg(), while a configuration thread
+ suspends to wait for the delay, another thread may try to configure
+ another PHY (with phy_init() + phy_power_on()); re-running the
+ phy_init() goes to the exact same configuration code, re-running the
+ same hardware configuration on the same set of registers (and bits)
+ which might impact the result of the msleep for the 1st configuring
+ thread
+3/ sysfs can configure the hardware (though role_store()) and it can
+ still race with the phy_init()/phy_power_on() APIs calling into the
+ drivers struct phy_ops
+
+To address these issues, add a spinlock to protect hardware register access
+and driver private data structures (e.g., calls to
+rcar_gen3_is_any_rphy_initialized()). Checking driver-specific data remains
+necessary as all PHY instances share common settings. With this change,
+the existing mutex protection is removed and the cleanup.h helpers are
+used.
+
+While at it, to keep the code simpler, do not skip
+regulator_enable()/regulator_disable() APIs in
+rcar_gen3_phy_usb2_power_on()/rcar_gen3_phy_usb2_power_off() as the
+regulators enable/disable operations are reference counted anyway.
+
+[claudiu.beznea:
+ - in rcar_gen3_phy_usb2_irq() and rcar_gen3_phy_usb2_power_off() replaced
+ scoped_guard() with spin_lock()/spin_unlock(), since scoped_guard() is
+ not available in v5.15
+ - in rcar_gen3_phy_usb2_power_on() used spin_lock_irqsave()/
+ spin_unlock_irqrestore() instead of guard() to avoid compilation warning
+ "ISO C90 forbids mixed declarations and code"]
+
+Fixes: f3b5a8d9b50d ("phy: rcar-gen3-usb2: Add R-Car Gen3 USB2 PHY driver")
+Cc: stable@vger.kernel.org
+Reviewed-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+Tested-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+Reviewed-by: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
+Signed-off-by: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com>
+Link: https://lore.kernel.org/r/20250507125032.565017-4-claudiu.beznea.uj@bp.renesas.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/phy/renesas/phy-rcar-gen3-usb2.c | 42 +++++++++++++++---------
+ 1 file changed, 26 insertions(+), 16 deletions(-)
+
+diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
+index d873c49500cdd..0626e00ccea7e 100644
+--- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c
++++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
+@@ -9,6 +9,7 @@
+ * Copyright (C) 2014 Cogent Embedded, Inc.
+ */
+
++#include <linux/cleanup.h>
+ #include <linux/extcon-provider.h>
+ #include <linux/interrupt.h>
+ #include <linux/io.h>
+@@ -113,7 +114,7 @@ struct rcar_gen3_chan {
+ struct rcar_gen3_phy rphys[NUM_OF_PHYS];
+ struct regulator *vbus;
+ struct work_struct work;
+- struct mutex lock; /* protects rphys[...].powered */
++ spinlock_t lock; /* protects access to hardware and driver data structure. */
+ enum usb_dr_mode dr_mode;
+ u32 obint_enable_bits;
+ bool extcon_host;
+@@ -340,6 +341,8 @@ static ssize_t role_store(struct device *dev, struct device_attribute *attr,
+ bool is_b_device;
+ enum phy_mode cur_mode, new_mode;
+
++ guard(spinlock_irqsave)(&ch->lock);
++
+ if (!ch->is_otg_channel || !rcar_gen3_is_any_otg_rphy_initialized(ch))
+ return -EIO;
+
+@@ -407,7 +410,7 @@ static void rcar_gen3_init_otg(struct rcar_gen3_chan *ch)
+ val = readl(usb2_base + USB2_ADPCTRL);
+ writel(val | USB2_ADPCTRL_IDPULLUP, usb2_base + USB2_ADPCTRL);
+ }
+- msleep(20);
++ mdelay(20);
+
+ writel(0xffffffff, usb2_base + USB2_OBINTSTA);
+ writel(ch->obint_enable_bits, usb2_base + USB2_OBINTEN);
+@@ -428,6 +431,8 @@ static irqreturn_t rcar_gen3_phy_usb2_irq(int irq, void *_ch)
+ if (pm_runtime_suspended(dev))
+ goto rpm_put;
+
++ spin_lock(&ch->lock);
++
+ status = readl(usb2_base + USB2_OBINTSTA);
+ if (status & ch->obint_enable_bits) {
+ dev_vdbg(dev, "%s: %08x\n", __func__, status);
+@@ -436,6 +441,8 @@ static irqreturn_t rcar_gen3_phy_usb2_irq(int irq, void *_ch)
+ ret = IRQ_HANDLED;
+ }
+
++ spin_unlock(&ch->lock);
++
+ rpm_put:
+ pm_runtime_put_noidle(dev);
+ return ret;
+@@ -448,6 +455,8 @@ static int rcar_gen3_phy_usb2_init(struct phy *p)
+ void __iomem *usb2_base = channel->base;
+ u32 val;
+
++ guard(spinlock_irqsave)(&channel->lock);
++
+ /* Initialize USB2 part */
+ val = readl(usb2_base + USB2_INT_ENABLE);
+ val |= USB2_INT_ENABLE_UCOM_INTEN | rphy->int_enable_bits;
+@@ -474,6 +483,8 @@ static int rcar_gen3_phy_usb2_exit(struct phy *p)
+ void __iomem *usb2_base = channel->base;
+ u32 val;
+
++ guard(spinlock_irqsave)(&channel->lock);
++
+ rphy->initialized = false;
+
+ val = readl(usb2_base + USB2_INT_ENABLE);
+@@ -490,19 +501,21 @@ static int rcar_gen3_phy_usb2_power_on(struct phy *p)
+ struct rcar_gen3_phy *rphy = phy_get_drvdata(p);
+ struct rcar_gen3_chan *channel = rphy->ch;
+ void __iomem *usb2_base = channel->base;
++ unsigned long flags;
+ u32 val;
+ int ret = 0;
+
+- mutex_lock(&channel->lock);
+- if (!rcar_gen3_are_all_rphys_power_off(channel))
+- goto out;
+-
+ if (channel->vbus) {
+ ret = regulator_enable(channel->vbus);
+ if (ret)
+- goto out;
++ return ret;
+ }
+
++ spin_lock_irqsave(&channel->lock, flags);
++
++ if (!rcar_gen3_are_all_rphys_power_off(channel))
++ goto out;
++
+ val = readl(usb2_base + USB2_USBCTR);
+ val |= USB2_USBCTR_PLL_RST;
+ writel(val, usb2_base + USB2_USBCTR);
+@@ -512,7 +525,8 @@ static int rcar_gen3_phy_usb2_power_on(struct phy *p)
+ out:
+ /* The powered flag should be set for any other phys anyway */
+ rphy->powered = true;
+- mutex_unlock(&channel->lock);
++
++ spin_unlock_irqrestore(&channel->lock, flags);
+
+ return 0;
+ }
+@@ -521,20 +535,16 @@ static int rcar_gen3_phy_usb2_power_off(struct phy *p)
+ {
+ struct rcar_gen3_phy *rphy = phy_get_drvdata(p);
+ struct rcar_gen3_chan *channel = rphy->ch;
++ unsigned long flags;
+ int ret = 0;
+
+- mutex_lock(&channel->lock);
++ spin_lock_irqsave(&channel->lock, flags);
+ rphy->powered = false;
+-
+- if (!rcar_gen3_are_all_rphys_power_off(channel))
+- goto out;
++ spin_unlock_irqrestore(&channel->lock, flags);
+
+ if (channel->vbus)
+ ret = regulator_disable(channel->vbus);
+
+-out:
+- mutex_unlock(&channel->lock);
+-
+ return ret;
+ }
+
+@@ -697,7 +707,7 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
+ if (phy_data->no_adp_ctrl)
+ channel->obint_enable_bits = USB2_OBINT_IDCHG_EN;
+
+- mutex_init(&channel->lock);
++ spin_lock_init(&channel->lock);
+ for (i = 0; i < NUM_OF_PHYS; i++) {
+ channel->rphys[i].phy = devm_phy_create(dev, NULL,
+ phy_data->phy_usb2_ops);
+--
+2.53.0
+
--- /dev/null
+From af5290bcaaf58db53770d1bf9f775925c02b5a2c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Apr 2026 14:38:05 +0300
+Subject: phy: renesas: rcar-gen3-usb2: Move IRQ request in probe
+
+From: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com>
+
+commit de76809f60cc938d3580bbbd5b04b7d12af6ce3a upstream.
+
+Commit 08b0ad375ca6 ("phy: renesas: rcar-gen3-usb2: move IRQ registration
+to init") moved the IRQ request operation from probe to
+struct phy_ops::phy_init API to avoid triggering interrupts (which lead to
+register accesses) while the PHY clocks (enabled through runtime PM APIs)
+are not active. If this happens, it results in a synchronous abort.
+
+One way to reproduce this issue is by enabling CONFIG_DEBUG_SHIRQ, which
+calls free_irq() on driver removal.
+
+Move the IRQ request and free operations back to probe, and take the
+runtime PM state into account in IRQ handler. This commit is preparatory
+for the subsequent fixes in this series.
+
+[claudiu.beznea: fixed conflict in probe b/w IRQ request probe and
+ platform_set_drvdata() by keeping platform_set_drvdata() code before
+ IRQ request code]
+
+Reviewed-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+Tested-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+Reviewed-by: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
+Signed-off-by: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com>
+Link: https://lore.kernel.org/r/20250507125032.565017-3-claudiu.beznea.uj@bp.renesas.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/phy/renesas/phy-rcar-gen3-usb2.c | 46 +++++++++++++-----------
+ 1 file changed, 26 insertions(+), 20 deletions(-)
+
+diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
+index 8c03b683ba1c9..d873c49500cdd 100644
+--- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c
++++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
+@@ -115,7 +115,6 @@ struct rcar_gen3_chan {
+ struct work_struct work;
+ struct mutex lock; /* protects rphys[...].powered */
+ enum usb_dr_mode dr_mode;
+- int irq;
+ u32 obint_enable_bits;
+ bool extcon_host;
+ bool is_otg_channel;
+@@ -420,16 +419,25 @@ static irqreturn_t rcar_gen3_phy_usb2_irq(int irq, void *_ch)
+ {
+ struct rcar_gen3_chan *ch = _ch;
+ void __iomem *usb2_base = ch->base;
+- u32 status = readl(usb2_base + USB2_OBINTSTA);
++ struct device *dev = ch->dev;
+ irqreturn_t ret = IRQ_NONE;
++ u32 status;
+
++ pm_runtime_get_noresume(dev);
++
++ if (pm_runtime_suspended(dev))
++ goto rpm_put;
++
++ status = readl(usb2_base + USB2_OBINTSTA);
+ if (status & ch->obint_enable_bits) {
+- dev_vdbg(ch->dev, "%s: %08x\n", __func__, status);
++ dev_vdbg(dev, "%s: %08x\n", __func__, status);
+ writel(ch->obint_enable_bits, usb2_base + USB2_OBINTSTA);
+ rcar_gen3_device_recognition(ch);
+ ret = IRQ_HANDLED;
+ }
+
++rpm_put:
++ pm_runtime_put_noidle(dev);
+ return ret;
+ }
+
+@@ -439,17 +447,6 @@ static int rcar_gen3_phy_usb2_init(struct phy *p)
+ struct rcar_gen3_chan *channel = rphy->ch;
+ void __iomem *usb2_base = channel->base;
+ u32 val;
+- int ret;
+-
+- if (!rcar_gen3_is_any_rphy_initialized(channel) && channel->irq >= 0) {
+- INIT_WORK(&channel->work, rcar_gen3_phy_usb2_work);
+- ret = request_irq(channel->irq, rcar_gen3_phy_usb2_irq,
+- IRQF_SHARED, dev_name(channel->dev), channel);
+- if (ret < 0) {
+- dev_err(channel->dev, "No irq handler (%d)\n", channel->irq);
+- return ret;
+- }
+- }
+
+ /* Initialize USB2 part */
+ val = readl(usb2_base + USB2_INT_ENABLE);
+@@ -485,9 +482,6 @@ static int rcar_gen3_phy_usb2_exit(struct phy *p)
+ val &= ~USB2_INT_ENABLE_UCOM_INTEN;
+ writel(val, usb2_base + USB2_INT_ENABLE);
+
+- if (channel->irq >= 0 && !rcar_gen3_is_any_rphy_initialized(channel))
+- free_irq(channel->irq, channel);
+-
+ return 0;
+ }
+
+@@ -654,7 +648,7 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
+ struct device *dev = &pdev->dev;
+ struct rcar_gen3_chan *channel;
+ struct phy_provider *provider;
+- int ret = 0, i;
++ int ret = 0, i, irq;
+
+ if (!dev->of_node) {
+ dev_err(dev, "This driver needs device tree\n");
+@@ -670,8 +664,6 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
+ return PTR_ERR(channel->base);
+
+ channel->obint_enable_bits = USB2_OBINT_BITS;
+- /* get irq number here and request_irq for OTG in phy_init */
+- channel->irq = platform_get_irq_optional(pdev, 0);
+ channel->dr_mode = rcar_gen3_get_dr_mode(dev->of_node);
+ if (channel->dr_mode != USB_DR_MODE_UNKNOWN) {
+ channel->is_otg_channel = true;
+@@ -731,6 +723,20 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
+ platform_set_drvdata(pdev, channel);
+ channel->dev = dev;
+
++ irq = platform_get_irq_optional(pdev, 0);
++ if (irq < 0 && irq != -ENXIO) {
++ ret = irq;
++ goto error;
++ } else if (irq > 0) {
++ INIT_WORK(&channel->work, rcar_gen3_phy_usb2_work);
++ ret = devm_request_irq(dev, irq, rcar_gen3_phy_usb2_irq,
++ IRQF_SHARED, dev_name(dev), channel);
++ if (ret < 0) {
++ dev_err(dev, "Failed to request irq (%d)\n", irq);
++ goto error;
++ }
++ }
++
+ provider = devm_of_phy_provider_register(dev, rcar_gen3_phy_usb2_xlate);
+ if (IS_ERR(provider)) {
+ dev_err(dev, "Failed to register PHY provider\n");
+--
+2.53.0
+
usb-dwc2-gadget-fix-spin_lock-unlock-mismatch-in-dwc2_hsotg_udc_stop.patch
usb-cdns3-gadget-fix-null-pointer-dereference-in-ep_queue.patch
usb-cdns3-gadget-fix-state-inconsistency-on-gadget-init-failure.patch
+nvmet-tcp-fix-use-before-check-of-sg-in-bounds-valid.patch
+phy-renesas-rcar-gen3-usb2-fix-role-detection-on-unb.patch
+phy-renesas-rcar-gen3-usb2-move-irq-request-in-probe.patch
+phy-renesas-rcar-gen3-usb2-lock-around-hardware-regi.patch
+phy-renesas-rcar-gen3-usb2-assert-pll-reset-on-phy-p.patch
--- /dev/null
+From 08bd3186eece179f241e52b53d397b6f39188e41 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Apr 2026 20:41:11 -0400
+Subject: Revert "ext4: avoid infinite loops caused by residual data"
+
+This reverts commit 4fee3f2f4839571a6294946a2efcdb69caa61393.
+
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/extents.c | 8 ++------
+ 1 file changed, 2 insertions(+), 6 deletions(-)
+
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 42a06360086a8..bb27c04798d2b 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -4424,13 +4424,9 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
+ path = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
+ if (IS_ERR(path)) {
+ err = PTR_ERR(path);
+- /*
+- * Gracefully handle out of space conditions. If the filesystem
+- * is inconsistent, we'll just leak allocated blocks to avoid
+- * causing even more damage.
+- */
+- if (allocated_clusters && (err == -EDQUOT || err == -ENOSPC)) {
++ if (allocated_clusters) {
+ int fb_flags = 0;
++
+ /*
+ * free data blocks we just allocated.
+ * not a good idea to call discard here directly,
+--
+2.53.0
+
--- /dev/null
+From 1dc636819f519e87a08532faf102b69fdeaa14cc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Apr 2026 20:41:11 -0400
+Subject: Revert "ext4: don't zero the entire extent if
+ EXT4_EXT_DATA_PARTIAL_VALID1"
+
+This reverts commit ddf854e59166533b0f46ba32cd6cd9aca3197d1b.
+
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/extents.c | 13 +------------
+ 1 file changed, 1 insertion(+), 12 deletions(-)
+
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index da7414e84ead8..e2f9c27c7e161 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -3298,15 +3298,6 @@ static struct ext4_ext_path *ext4_split_extent_at(handle_t *handle,
+ }
+
+ if (!err) {
+- /*
+- * The first half contains partially valid data, the
+- * splitting of this extent has not been completed, fix
+- * extent length and ext4_split_extent() split will the
+- * first half again.
+- */
+- if (split_flag & EXT4_EXT_DATA_PARTIAL_VALID1)
+- goto fix_extent_len;
+-
+ /* update the extent length and mark as initialized */
+ ex->ee_len = cpu_to_le16(ee_len);
+ ext4_ext_try_to_merge(handle, inode, path, ex);
+@@ -3382,9 +3373,7 @@ static int ext4_split_extent(handle_t *handle,
+ split_flag1 |= EXT4_EXT_MARK_UNWRIT1 |
+ EXT4_EXT_MARK_UNWRIT2;
+ if (split_flag & EXT4_EXT_DATA_VALID2)
+- split_flag1 |= map->m_lblk > ee_block ?
+- EXT4_EXT_DATA_PARTIAL_VALID1 :
+- EXT4_EXT_DATA_ENTIRE_VALID1;
++ split_flag1 |= EXT4_EXT_DATA_ENTIRE_VALID1;
+ path = ext4_split_extent_at(handle, inode, path,
+ map->m_lblk + map->m_len, split_flag1, flags1);
+ if (IS_ERR(path)) {
+--
+2.53.0
+
--- /dev/null
+From 06e8b70b032c388628f52482000b20a20b1a4961 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Apr 2026 20:41:11 -0400
+Subject: Revert "ext4: drop extent cache after doing PARTIAL_VALID1 zeroout"
+
+This reverts commit 9e79460b3aae6bbf33f5ccea6c44bf2eefa45daf.
+
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/extents.c | 10 +---------
+ 1 file changed, 1 insertion(+), 9 deletions(-)
+
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 30b0b25aac9ff..da7414e84ead8 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -3304,16 +3304,8 @@ static struct ext4_ext_path *ext4_split_extent_at(handle_t *handle,
+ * extent length and ext4_split_extent() split will the
+ * first half again.
+ */
+- if (split_flag & EXT4_EXT_DATA_PARTIAL_VALID1) {
+- /*
+- * Drop extent cache to prevent stale unwritten
+- * extents remaining after zeroing out.
+- */
+- ext4_es_remove_extent(inode,
+- le32_to_cpu(zero_ex.ee_block),
+- ext4_ext_get_actual_len(&zero_ex));
++ if (split_flag & EXT4_EXT_DATA_PARTIAL_VALID1)
+ goto fix_extent_len;
+- }
+
+ /* update the extent length and mark as initialized */
+ ex->ee_len = cpu_to_le16(ee_len);
+--
+2.53.0
+
--- /dev/null
+From 7d9a6979c17e800a566d8a9b2ddcda2d94171481 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Apr 2026 20:41:11 -0400
+Subject: Revert "ext4: drop extent cache when splitting extent fails"
+
+This reverts commit de8e1b17e3876a44c4537bff0bc2dfd244efe8d9.
+
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/extents.c | 8 ++------
+ 1 file changed, 2 insertions(+), 6 deletions(-)
+
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index bb27c04798d2b..30b0b25aac9ff 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -3252,7 +3252,7 @@ static struct ext4_ext_path *ext4_split_extent_at(handle_t *handle,
+
+ err = PTR_ERR(path);
+ if (err != -ENOSPC && err != -EDQUOT && err != -ENOMEM)
+- goto out_path;
++ return path;
+
+ /*
+ * Get a new path to try to zeroout or fix the extent length.
+@@ -3266,7 +3266,7 @@ static struct ext4_ext_path *ext4_split_extent_at(handle_t *handle,
+ if (IS_ERR(path)) {
+ EXT4_ERROR_INODE(inode, "Failed split extent on %u, err %ld",
+ split, PTR_ERR(path));
+- goto out_path;
++ return path;
+ }
+ depth = ext_depth(inode);
+ ex = path[depth].p_ext;
+@@ -3343,10 +3343,6 @@ static struct ext4_ext_path *ext4_split_extent_at(handle_t *handle,
+ ext4_free_ext_path(path);
+ path = ERR_PTR(err);
+ }
+-out_path:
+- if (IS_ERR(path))
+- /* Remove all remaining potentially stale extents. */
+- ext4_es_remove_extent(inode, ee_block, ee_len);
+ ext4_ext_show_leaf(inode, path);
+ return path;
+ }
+--
+2.53.0
+
--- /dev/null
+From 472215bf99a003dd5efc9fe51f8112a1b80a6856 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Apr 2026 20:41:11 -0400
+Subject: Revert "ext4: get rid of ppath in ext4_ext_create_new_leaf()"
+
+This reverts commit 15908fc35056e9a6fd71552eda884a353496e6c7.
+
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/extents.c | 43 +++++++++++++++++++++----------------------
+ 1 file changed, 21 insertions(+), 22 deletions(-)
+
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index eda6f92a42330..a58f415f882b2 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -1392,12 +1392,13 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
+ * finds empty index and adds new leaf.
+ * if no free index is found, then it requests in-depth growing.
+ */
+-static struct ext4_ext_path *
+-ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
+- unsigned int mb_flags, unsigned int gb_flags,
+- struct ext4_ext_path *path,
+- struct ext4_extent *newext)
++static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
++ unsigned int mb_flags,
++ unsigned int gb_flags,
++ struct ext4_ext_path **ppath,
++ struct ext4_extent *newext)
+ {
++ struct ext4_ext_path *path = *ppath;
+ struct ext4_ext_path *curp;
+ int depth, i, err = 0;
+
+@@ -1418,25 +1419,28 @@ ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
+ * entry: create all needed subtree and add new leaf */
+ err = ext4_ext_split(handle, inode, mb_flags, path, newext, i);
+ if (err)
+- goto errout;
++ goto out;
+
+ /* refill path */
+ path = ext4_find_extent(inode,
+ (ext4_lblk_t)le32_to_cpu(newext->ee_block),
+ path, gb_flags);
+- return path;
++ if (IS_ERR(path))
++ err = PTR_ERR(path);
+ } else {
+ /* tree is full, time to grow in depth */
+ err = ext4_ext_grow_indepth(handle, inode, mb_flags);
+ if (err)
+- goto errout;
++ goto out;
+
+ /* refill path */
+ path = ext4_find_extent(inode,
+ (ext4_lblk_t)le32_to_cpu(newext->ee_block),
+ path, gb_flags);
+- if (IS_ERR(path))
+- return path;
++ if (IS_ERR(path)) {
++ err = PTR_ERR(path);
++ goto out;
++ }
+
+ /*
+ * only first (depth 0 -> 1) produces free space;
+@@ -1448,11 +1452,9 @@ ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
+ goto repeat;
+ }
+ }
+- return path;
+-
+-errout:
+- ext4_free_ext_path(path);
+- return ERR_PTR(err);
++out:
++ *ppath = IS_ERR(path) ? NULL : path;
++ return err;
+ }
+
+ /*
+@@ -2095,14 +2097,11 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
+ */
+ if (gb_flags & EXT4_GET_BLOCKS_METADATA_NOFAIL)
+ mb_flags |= EXT4_MB_USE_RESERVED;
+- path = ext4_ext_create_new_leaf(handle, inode, mb_flags, gb_flags,
+- path, newext);
+- if (IS_ERR(path)) {
+- *ppath = NULL;
+- err = PTR_ERR(path);
++ err = ext4_ext_create_new_leaf(handle, inode, mb_flags, gb_flags,
++ ppath, newext);
++ if (err)
+ goto cleanup;
+- }
+- *ppath = path;
++ path = *ppath;
+ depth = ext_depth(inode);
+ eh = path[depth].p_hdr;
+
+--
+2.53.0
+
--- /dev/null
+From e8146900c9603e1d99a575cb3f30057c474ea028 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Apr 2026 20:41:11 -0400
+Subject: Revert "ext4: get rid of ppath in ext4_ext_insert_extent()"
+
+This reverts commit b6a01b66cdaa2da526b512fc0f9938ea5d6c7a1c.
+
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/ext4.h | 7 ++--
+ fs/ext4/extents.c | 88 +++++++++++++++++++------------------------
+ fs/ext4/fast_commit.c | 8 ++--
+ fs/ext4/migrate.c | 5 +--
+ 4 files changed, 47 insertions(+), 61 deletions(-)
+
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 7449777fabc36..490496adf17cc 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -3719,10 +3719,9 @@ extern int ext4_map_blocks(handle_t *handle, struct inode *inode,
+ extern int ext4_ext_calc_credits_for_single_extent(struct inode *inode,
+ int num,
+ struct ext4_ext_path *path);
+-extern struct ext4_ext_path *ext4_ext_insert_extent(
+- handle_t *handle, struct inode *inode,
+- struct ext4_ext_path *path,
+- struct ext4_extent *newext, int gb_flags);
++extern int ext4_ext_insert_extent(handle_t *, struct inode *,
++ struct ext4_ext_path **,
++ struct ext4_extent *, int);
+ extern struct ext4_ext_path *ext4_find_extent(struct inode *, ext4_lblk_t,
+ struct ext4_ext_path *,
+ int flags);
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 59c0bffc691d1..eda6f92a42330 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -1960,15 +1960,16 @@ static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi,
+ * inserts requested extent as new one into the tree,
+ * creating new leaf in the no-space case.
+ */
+-struct ext4_ext_path *
+-ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
+- struct ext4_ext_path *path,
+- struct ext4_extent *newext, int gb_flags)
++int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
++ struct ext4_ext_path **ppath,
++ struct ext4_extent *newext, int gb_flags)
+ {
++ struct ext4_ext_path *path = *ppath;
+ struct ext4_extent_header *eh;
+ struct ext4_extent *ex, *fex;
+ struct ext4_extent *nearex; /* nearest extent */
+- int depth, len, err = 0;
++ struct ext4_ext_path *npath = NULL;
++ int depth, len, err;
+ ext4_lblk_t next;
+ int mb_flags = 0, unwritten;
+
+@@ -1976,16 +1977,14 @@ ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
+ mb_flags |= EXT4_MB_DELALLOC_RESERVED;
+ if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
+ EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0");
+- err = -EFSCORRUPTED;
+- goto errout;
++ return -EFSCORRUPTED;
+ }
+ depth = ext_depth(inode);
+ ex = path[depth].p_ext;
+ eh = path[depth].p_hdr;
+ if (unlikely(path[depth].p_hdr == NULL)) {
+ EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
+- err = -EFSCORRUPTED;
+- goto errout;
++ return -EFSCORRUPTED;
+ }
+
+ /* try to insert block into found extent and return */
+@@ -2023,7 +2022,7 @@ ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
+ err = ext4_ext_get_access(handle, inode,
+ path + depth);
+ if (err)
+- goto errout;
++ return err;
+ unwritten = ext4_ext_is_unwritten(ex);
+ ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
+ + ext4_ext_get_actual_len(newext));
+@@ -2048,7 +2047,7 @@ ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
+ err = ext4_ext_get_access(handle, inode,
+ path + depth);
+ if (err)
+- goto errout;
++ return err;
+
+ unwritten = ext4_ext_is_unwritten(ex);
+ ex->ee_block = newext->ee_block;
+@@ -2073,26 +2072,21 @@ ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
+ if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block))
+ next = ext4_ext_next_leaf_block(path);
+ if (next != EXT_MAX_BLOCKS) {
+- struct ext4_ext_path *npath;
+-
+ ext_debug(inode, "next leaf block - %u\n", next);
++ BUG_ON(npath != NULL);
+ npath = ext4_find_extent(inode, next, NULL, gb_flags);
+- if (IS_ERR(npath)) {
+- err = PTR_ERR(npath);
+- goto errout;
+- }
++ if (IS_ERR(npath))
++ return PTR_ERR(npath);
+ BUG_ON(npath->p_depth != path->p_depth);
+ eh = npath[depth].p_hdr;
+ if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
+ ext_debug(inode, "next leaf isn't full(%d)\n",
+ le16_to_cpu(eh->eh_entries));
+- ext4_free_ext_path(path);
+ path = npath;
+ goto has_space;
+ }
+ ext_debug(inode, "next leaf has no free space(%d,%d)\n",
+ le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
+- ext4_free_ext_path(npath);
+ }
+
+ /*
+@@ -2103,8 +2097,12 @@ ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
+ mb_flags |= EXT4_MB_USE_RESERVED;
+ path = ext4_ext_create_new_leaf(handle, inode, mb_flags, gb_flags,
+ path, newext);
+- if (IS_ERR(path))
+- return path;
++ if (IS_ERR(path)) {
++ *ppath = NULL;
++ err = PTR_ERR(path);
++ goto cleanup;
++ }
++ *ppath = path;
+ depth = ext_depth(inode);
+ eh = path[depth].p_hdr;
+
+@@ -2113,7 +2111,7 @@ ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
+
+ err = ext4_ext_get_access(handle, inode, path + depth);
+ if (err)
+- goto errout;
++ goto cleanup;
+
+ if (!nearex) {
+ /* there is no extent in this leaf, create first one */
+@@ -2171,20 +2169,17 @@ ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
+ if (!(gb_flags & EXT4_GET_BLOCKS_PRE_IO))
+ ext4_ext_try_to_merge(handle, inode, path, nearex);
+
++
+ /* time to correct all indexes above */
+ err = ext4_ext_correct_indexes(handle, inode, path);
+ if (err)
+- goto errout;
++ goto cleanup;
+
+ err = ext4_ext_dirty(handle, inode, path + path->p_depth);
+- if (err)
+- goto errout;
+-
+- return path;
+
+-errout:
+- ext4_free_ext_path(path);
+- return ERR_PTR(err);
++cleanup:
++ ext4_free_ext_path(npath);
++ return err;
+ }
+
+ static int ext4_fill_es_cache_info(struct inode *inode,
+@@ -3237,29 +3232,24 @@ static int ext4_split_extent_at(handle_t *handle,
+ if (split_flag & EXT4_EXT_MARK_UNWRIT2)
+ ext4_ext_mark_unwritten(ex2);
+
+- path = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
+- if (!IS_ERR(path)) {
+- *ppath = path;
+- goto out;
+- }
+- *ppath = NULL;
+- err = PTR_ERR(path);
++ err = ext4_ext_insert_extent(handle, inode, ppath, &newex, flags);
+ if (err != -ENOSPC && err != -EDQUOT && err != -ENOMEM)
+- return err;
++ goto out;
+
+ /*
+- * Get a new path to try to zeroout or fix the extent length.
+- * Using EXT4_EX_NOFAIL guarantees that ext4_find_extent()
+- * will not return -ENOMEM, otherwise -ENOMEM will cause a
+- * retry in do_writepages(), and a WARN_ON may be triggered
+- * in ext4_da_update_reserve_space() due to an incorrect
+- * ee_len causing the i_reserved_data_blocks exception.
++ * Update path is required because previous ext4_ext_insert_extent()
++ * may have freed or reallocated the path. Using EXT4_EX_NOFAIL
++ * guarantees that ext4_find_extent() will not return -ENOMEM,
++ * otherwise -ENOMEM will cause a retry in do_writepages(), and a
++ * WARN_ON may be triggered in ext4_da_update_reserve_space() due to
++ * an incorrect ee_len causing the i_reserved_data_blocks exception.
+ */
+- path = ext4_find_extent(inode, ee_block, NULL,
++ path = ext4_find_extent(inode, ee_block, *ppath,
+ flags | EXT4_EX_NOFAIL);
+ if (IS_ERR(path)) {
+ EXT4_ERROR_INODE(inode, "Failed split extent on %u, err %ld",
+ split, PTR_ERR(path));
++ *ppath = NULL;
+ return PTR_ERR(path);
+ }
+ depth = ext_depth(inode);
+@@ -3318,7 +3308,7 @@ static int ext4_split_extent_at(handle_t *handle,
+ ext4_ext_dirty(handle, inode, path + path->p_depth);
+ return err;
+ out:
+- ext4_ext_show_leaf(inode, path);
++ ext4_ext_show_leaf(inode, *ppath);
+ return err;
+ }
+
+@@ -4309,7 +4299,6 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
+ get_implied_cluster_alloc(inode->i_sb, map, &ex2, path)) {
+ ar.len = allocated = map->m_len;
+ newblock = map->m_pblk;
+- err = 0;
+ goto got_allocated_blocks;
+ }
+
+@@ -4382,9 +4371,8 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
+ map->m_flags |= EXT4_MAP_UNWRITTEN;
+ }
+
+- path = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
+- if (IS_ERR(path)) {
+- err = PTR_ERR(path);
++ err = ext4_ext_insert_extent(handle, inode, &path, &newex, flags);
++ if (err) {
+ if (allocated_clusters) {
+ int fb_flags = 0;
+
+diff --git a/fs/ext4/fast_commit.c b/fs/ext4/fast_commit.c
+index a6fa8013c02f5..eee771bef0272 100644
+--- a/fs/ext4/fast_commit.c
++++ b/fs/ext4/fast_commit.c
+@@ -1831,12 +1831,12 @@ static int ext4_fc_replay_add_range(struct super_block *sb,
+ if (ext4_ext_is_unwritten(ex))
+ ext4_ext_mark_unwritten(&newex);
+ down_write(&EXT4_I(inode)->i_data_sem);
+- path = ext4_ext_insert_extent(NULL, inode,
+- path, &newex, 0);
++ ret = ext4_ext_insert_extent(
++ NULL, inode, &path, &newex, 0);
+ up_write((&EXT4_I(inode)->i_data_sem));
+- if (IS_ERR(path))
+- goto out;
+ ext4_free_ext_path(path);
++ if (ret)
++ goto out;
+ goto next;
+ }
+
+diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
+index 7a0e429507cf3..0be0467ae6dd2 100644
+--- a/fs/ext4/migrate.c
++++ b/fs/ext4/migrate.c
+@@ -37,6 +37,7 @@ static int finish_range(handle_t *handle, struct inode *inode,
+ path = ext4_find_extent(inode, lb->first_block, NULL, 0);
+ if (IS_ERR(path)) {
+ retval = PTR_ERR(path);
++ path = NULL;
+ goto err_out;
+ }
+
+@@ -52,9 +53,7 @@ static int finish_range(handle_t *handle, struct inode *inode,
+ retval = ext4_datasem_ensure_credits(handle, inode, needed, needed, 0);
+ if (retval < 0)
+ goto err_out;
+- path = ext4_ext_insert_extent(handle, inode, path, &newext, 0);
+- if (IS_ERR(path))
+- retval = PTR_ERR(path);
++ retval = ext4_ext_insert_extent(handle, inode, &path, &newext, 0);
+ err_out:
+ up_write((&EXT4_I(inode)->i_data_sem));
+ ext4_free_ext_path(path);
+--
+2.53.0
+
--- /dev/null
+From 3e305a40c8726f66e73ea4afa43c80d08e4e3fa4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Apr 2026 20:41:11 -0400
+Subject: Revert "ext4: get rid of ppath in ext4_find_extent()"
+
+This reverts commit b5a010bc7dba7e3d0966c0231335ca76b3f8780e.
+
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/ext4.h | 2 +-
+ fs/ext4/extents.c | 55 ++++++++++++++++++++-----------------------
+ fs/ext4/move_extent.c | 7 +++---
+ 3 files changed, 30 insertions(+), 34 deletions(-)
+
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 490496adf17cc..27753291fb7ec 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -3723,7 +3723,7 @@ extern int ext4_ext_insert_extent(handle_t *, struct inode *,
+ struct ext4_ext_path **,
+ struct ext4_extent *, int);
+ extern struct ext4_ext_path *ext4_find_extent(struct inode *, ext4_lblk_t,
+- struct ext4_ext_path *,
++ struct ext4_ext_path **,
+ int flags);
+ extern void ext4_free_ext_path(struct ext4_ext_path *);
+ extern int ext4_ext_check_inode(struct inode *inode);
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index a58f415f882b2..af4cae13685d7 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -881,10 +881,11 @@ void ext4_ext_tree_init(handle_t *handle, struct inode *inode)
+
+ struct ext4_ext_path *
+ ext4_find_extent(struct inode *inode, ext4_lblk_t block,
+- struct ext4_ext_path *path, int flags)
++ struct ext4_ext_path **orig_path, int flags)
+ {
+ struct ext4_extent_header *eh;
+ struct buffer_head *bh;
++ struct ext4_ext_path *path = orig_path ? *orig_path : NULL;
+ short int depth, i, ppos = 0;
+ int ret;
+ gfp_t gfp_flags = GFP_NOFS;
+@@ -905,7 +906,7 @@ ext4_find_extent(struct inode *inode, ext4_lblk_t block,
+ ext4_ext_drop_refs(path);
+ if (depth > path[0].p_maxdepth) {
+ kfree(path);
+- path = NULL;
++ *orig_path = path = NULL;
+ }
+ }
+ if (!path) {
+@@ -956,10 +957,14 @@ ext4_find_extent(struct inode *inode, ext4_lblk_t block,
+
+ ext4_ext_show_path(inode, path);
+
++ if (orig_path)
++ *orig_path = path;
+ return path;
+
+ err:
+ ext4_free_ext_path(path);
++ if (orig_path)
++ *orig_path = NULL;
+ return ERR_PTR(ret);
+ }
+
+@@ -1424,7 +1429,7 @@ static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
+ /* refill path */
+ path = ext4_find_extent(inode,
+ (ext4_lblk_t)le32_to_cpu(newext->ee_block),
+- path, gb_flags);
++ ppath, gb_flags);
+ if (IS_ERR(path))
+ err = PTR_ERR(path);
+ } else {
+@@ -1436,7 +1441,7 @@ static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
+ /* refill path */
+ path = ext4_find_extent(inode,
+ (ext4_lblk_t)le32_to_cpu(newext->ee_block),
+- path, gb_flags);
++ ppath, gb_flags);
+ if (IS_ERR(path)) {
+ err = PTR_ERR(path);
+ goto out;
+@@ -1452,8 +1457,8 @@ static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
+ goto repeat;
+ }
+ }
++
+ out:
+- *ppath = IS_ERR(path) ? NULL : path;
+ return err;
+ }
+
+@@ -3243,17 +3248,15 @@ static int ext4_split_extent_at(handle_t *handle,
+ * WARN_ON may be triggered in ext4_da_update_reserve_space() due to
+ * an incorrect ee_len causing the i_reserved_data_blocks exception.
+ */
+- path = ext4_find_extent(inode, ee_block, *ppath,
++ path = ext4_find_extent(inode, ee_block, ppath,
+ flags | EXT4_EX_NOFAIL);
+ if (IS_ERR(path)) {
+ EXT4_ERROR_INODE(inode, "Failed split extent on %u, err %ld",
+ split, PTR_ERR(path));
+- *ppath = NULL;
+ return PTR_ERR(path);
+ }
+ depth = ext_depth(inode);
+ ex = path[depth].p_ext;
+- *ppath = path;
+
+ if (EXT4_EXT_MAY_ZEROOUT & split_flag) {
+ if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) {
+@@ -3366,12 +3369,9 @@ static int ext4_split_extent(handle_t *handle,
+ * Update path is required because previous ext4_split_extent_at() may
+ * result in split of original leaf or extent zeroout.
+ */
+- path = ext4_find_extent(inode, map->m_lblk, *ppath, flags);
+- if (IS_ERR(path)) {
+- *ppath = NULL;
++ path = ext4_find_extent(inode, map->m_lblk, ppath, flags);
++ if (IS_ERR(path))
+ return PTR_ERR(path);
+- }
+- *ppath = path;
+ depth = ext_depth(inode);
+ ex = path[depth].p_ext;
+ if (!ex) {
+@@ -3758,12 +3758,9 @@ static int ext4_convert_unwritten_extents_endio(handle_t *handle,
+ EXT4_GET_BLOCKS_CONVERT);
+ if (err < 0)
+ return err;
+- path = ext4_find_extent(inode, map->m_lblk, *ppath, 0);
+- if (IS_ERR(path)) {
+- *ppath = NULL;
++ path = ext4_find_extent(inode, map->m_lblk, ppath, 0);
++ if (IS_ERR(path))
+ return PTR_ERR(path);
+- }
+- *ppath = path;
+ depth = ext_depth(inode);
+ ex = path[depth].p_ext;
+ }
+@@ -3819,12 +3816,9 @@ convert_initialized_extent(handle_t *handle, struct inode *inode,
+ EXT4_GET_BLOCKS_CONVERT_UNWRITTEN);
+ if (err < 0)
+ return err;
+- path = ext4_find_extent(inode, map->m_lblk, *ppath, 0);
+- if (IS_ERR(path)) {
+- *ppath = NULL;
++ path = ext4_find_extent(inode, map->m_lblk, ppath, 0);
++ if (IS_ERR(path))
+ return PTR_ERR(path);
+- }
+- *ppath = path;
+ depth = ext_depth(inode);
+ ex = path[depth].p_ext;
+ if (!ex) {
+@@ -5203,7 +5197,7 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
+ * won't be shifted beyond EXT_MAX_BLOCKS.
+ */
+ if (SHIFT == SHIFT_LEFT) {
+- path = ext4_find_extent(inode, start - 1, path,
++ path = ext4_find_extent(inode, start - 1, &path,
+ EXT4_EX_NOCACHE);
+ if (IS_ERR(path))
+ return PTR_ERR(path);
+@@ -5252,7 +5246,7 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
+ * becomes NULL to indicate the end of the loop.
+ */
+ while (iterator && start <= stop) {
+- path = ext4_find_extent(inode, *iterator, path,
++ path = ext4_find_extent(inode, *iterator, &path,
+ EXT4_EX_NOCACHE);
+ if (IS_ERR(path))
+ return PTR_ERR(path);
+@@ -5850,8 +5844,11 @@ int ext4_clu_mapped(struct inode *inode, ext4_lblk_t lclu)
+
+ /* search for the extent closest to the first block in the cluster */
+ path = ext4_find_extent(inode, EXT4_C2B(sbi, lclu), NULL, 0);
+- if (IS_ERR(path))
+- return PTR_ERR(path);
++ if (IS_ERR(path)) {
++ err = PTR_ERR(path);
++ path = NULL;
++ goto out;
++ }
+
+ depth = ext_depth(inode);
+
+@@ -5935,7 +5932,7 @@ int ext4_ext_replay_update_ex(struct inode *inode, ext4_lblk_t start,
+ if (ret)
+ goto out;
+
+- path = ext4_find_extent(inode, start, path, 0);
++ path = ext4_find_extent(inode, start, &path, 0);
+ if (IS_ERR(path))
+ return PTR_ERR(path);
+ ex = path[path->p_depth].p_ext;
+@@ -5949,7 +5946,7 @@ int ext4_ext_replay_update_ex(struct inode *inode, ext4_lblk_t start,
+ if (ret)
+ goto out;
+
+- path = ext4_find_extent(inode, start, path, 0);
++ path = ext4_find_extent(inode, start, &path, 0);
+ if (IS_ERR(path))
+ return PTR_ERR(path);
+ ex = path[path->p_depth].p_ext;
+diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
+index 0aff07c570a46..e01632462db9f 100644
+--- a/fs/ext4/move_extent.c
++++ b/fs/ext4/move_extent.c
+@@ -26,17 +26,16 @@ static inline int
+ get_ext_path(struct inode *inode, ext4_lblk_t lblock,
+ struct ext4_ext_path **ppath)
+ {
+- struct ext4_ext_path *path = *ppath;
++ struct ext4_ext_path *path;
+
+- *ppath = NULL;
+- path = ext4_find_extent(inode, lblock, path, EXT4_EX_NOCACHE);
++ path = ext4_find_extent(inode, lblock, ppath, EXT4_EX_NOCACHE);
+ if (IS_ERR(path))
+ return PTR_ERR(path);
+ if (path[ext_depth(inode)].p_ext == NULL) {
+ ext4_free_ext_path(path);
++ *ppath = NULL;
+ return -ENODATA;
+ }
+- *ppath = path;
+ return 0;
+ }
+
+--
+2.53.0
+
--- /dev/null
+From fc303240789fb8c893b877d3f84931e318f073ec Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Apr 2026 20:41:11 -0400
+Subject: Revert "ext4: get rid of ppath in ext4_split_extent_at()"
+
+This reverts commit 4d03e2046f73158feb886a45d5682c3b79066872.
+
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/extents.c | 85 +++++++++++++++++++++--------------------------
+ 1 file changed, 38 insertions(+), 47 deletions(-)
+
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 6da0bf3cf406d..59c0bffc691d1 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -84,11 +84,12 @@ static void ext4_extent_block_csum_set(struct inode *inode,
+ et->et_checksum = ext4_extent_block_csum(inode, eh);
+ }
+
+-static struct ext4_ext_path *ext4_split_extent_at(handle_t *handle,
+- struct inode *inode,
+- struct ext4_ext_path *path,
+- ext4_lblk_t split,
+- int split_flag, int flags);
++static int ext4_split_extent_at(handle_t *handle,
++ struct inode *inode,
++ struct ext4_ext_path **ppath,
++ ext4_lblk_t split,
++ int split_flag,
++ int flags);
+
+ static int ext4_ext_trunc_restart_fn(struct inode *inode, int *dropped)
+ {
+@@ -334,15 +335,9 @@ ext4_force_split_extent_at(handle_t *handle, struct inode *inode,
+ if (nofail)
+ flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL | EXT4_EX_NOFAIL;
+
+- path = ext4_split_extent_at(handle, inode, path, lblk, unwritten ?
++ return ext4_split_extent_at(handle, inode, ppath, lblk, unwritten ?
+ EXT4_EXT_MARK_UNWRIT1|EXT4_EXT_MARK_UNWRIT2 : 0,
+ flags);
+- if (IS_ERR(path)) {
+- *ppath = NULL;
+- return PTR_ERR(path);
+- }
+- *ppath = path;
+- return 0;
+ }
+
+ static int
+@@ -694,7 +689,7 @@ static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
+ struct ext4_extent *ex;
+ int i;
+
+- if (IS_ERR_OR_NULL(path))
++ if (!path)
+ return;
+
+ eh = path[depth].p_hdr;
+@@ -3160,14 +3155,16 @@ static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
+ * a> the extent are splitted into two extent.
+ * b> split is not needed, and just mark the extent.
+ *
+- * Return an extent path pointer on success, or an error pointer on failure.
++ * return 0 on success.
+ */
+-static struct ext4_ext_path *ext4_split_extent_at(handle_t *handle,
+- struct inode *inode,
+- struct ext4_ext_path *path,
+- ext4_lblk_t split,
+- int split_flag, int flags)
++static int ext4_split_extent_at(handle_t *handle,
++ struct inode *inode,
++ struct ext4_ext_path **ppath,
++ ext4_lblk_t split,
++ int split_flag,
++ int flags)
+ {
++ struct ext4_ext_path *path = *ppath;
+ ext4_fsblk_t newblock;
+ ext4_lblk_t ee_block;
+ struct ext4_extent *ex, newex, orig_ex, zero_ex;
+@@ -3241,12 +3238,14 @@ static struct ext4_ext_path *ext4_split_extent_at(handle_t *handle,
+ ext4_ext_mark_unwritten(ex2);
+
+ path = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
+- if (!IS_ERR(path))
++ if (!IS_ERR(path)) {
++ *ppath = path;
+ goto out;
+-
++ }
++ *ppath = NULL;
+ err = PTR_ERR(path);
+ if (err != -ENOSPC && err != -EDQUOT && err != -ENOMEM)
+- return path;
++ return err;
+
+ /*
+ * Get a new path to try to zeroout or fix the extent length.
+@@ -3256,14 +3255,16 @@ static struct ext4_ext_path *ext4_split_extent_at(handle_t *handle,
+ * in ext4_da_update_reserve_space() due to an incorrect
+ * ee_len causing the i_reserved_data_blocks exception.
+ */
+- path = ext4_find_extent(inode, ee_block, NULL, flags | EXT4_EX_NOFAIL);
++ path = ext4_find_extent(inode, ee_block, NULL,
++ flags | EXT4_EX_NOFAIL);
+ if (IS_ERR(path)) {
+ EXT4_ERROR_INODE(inode, "Failed split extent on %u, err %ld",
+ split, PTR_ERR(path));
+- return path;
++ return PTR_ERR(path);
+ }
+ depth = ext_depth(inode);
+ ex = path[depth].p_ext;
++ *ppath = path;
+
+ if (EXT4_EXT_MAY_ZEROOUT & split_flag) {
+ if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) {
+@@ -3315,13 +3316,10 @@ static struct ext4_ext_path *ext4_split_extent_at(handle_t *handle,
+ * and err is a non-zero error code.
+ */
+ ext4_ext_dirty(handle, inode, path + path->p_depth);
++ return err;
+ out:
+- if (err) {
+- ext4_free_ext_path(path);
+- path = ERR_PTR(err);
+- }
+ ext4_ext_show_leaf(inode, path);
+- return path;
++ return err;
+ }
+
+ /*
+@@ -3368,14 +3366,10 @@ static int ext4_split_extent(handle_t *handle,
+ EXT4_EXT_MARK_UNWRIT2;
+ if (split_flag & EXT4_EXT_DATA_VALID2)
+ split_flag1 |= EXT4_EXT_DATA_VALID1;
+- path = ext4_split_extent_at(handle, inode, path,
++ err = ext4_split_extent_at(handle, inode, ppath,
+ map->m_lblk + map->m_len, split_flag1, flags1);
+- if (IS_ERR(path)) {
+- err = PTR_ERR(path);
+- *ppath = NULL;
++ if (err)
+ goto out;
+- }
+- *ppath = path;
+ } else {
+ allocated = ee_len - (map->m_lblk - ee_block);
+ }
+@@ -3383,7 +3377,7 @@ static int ext4_split_extent(handle_t *handle,
+ * Update path is required because previous ext4_split_extent_at() may
+ * result in split of original leaf or extent zeroout.
+ */
+- path = ext4_find_extent(inode, map->m_lblk, path, flags);
++ path = ext4_find_extent(inode, map->m_lblk, *ppath, flags);
+ if (IS_ERR(path)) {
+ *ppath = NULL;
+ return PTR_ERR(path);
+@@ -3405,17 +3399,13 @@ static int ext4_split_extent(handle_t *handle,
+ split_flag1 |= split_flag & (EXT4_EXT_MAY_ZEROOUT |
+ EXT4_EXT_MARK_UNWRIT2);
+ }
+- path = ext4_split_extent_at(handle, inode, path,
++ err = ext4_split_extent_at(handle, inode, ppath,
+ map->m_lblk, split_flag1, flags);
+- if (IS_ERR(path)) {
+- err = PTR_ERR(path);
+- *ppath = NULL;
++ if (err)
+ goto out;
+- }
+- *ppath = path;
+ }
+
+- ext4_ext_show_leaf(inode, path);
++ ext4_ext_show_leaf(inode, *ppath);
+ out:
+ return err ? err : allocated;
+ }
+@@ -5611,21 +5601,22 @@ static int ext4_insert_range(struct file *file, loff_t offset, loff_t len)
+ if (ext4_ext_is_unwritten(extent))
+ split_flag = EXT4_EXT_MARK_UNWRIT1 |
+ EXT4_EXT_MARK_UNWRIT2;
+- path = ext4_split_extent_at(handle, inode, path,
++ ret = ext4_split_extent_at(handle, inode, &path,
+ offset_lblk, split_flag,
+ EXT4_EX_NOCACHE |
+ EXT4_GET_BLOCKS_PRE_IO |
+ EXT4_GET_BLOCKS_METADATA_NOFAIL);
+ }
+
+- if (IS_ERR(path)) {
++ ext4_free_ext_path(path);
++ if (ret < 0) {
+ up_write(&EXT4_I(inode)->i_data_sem);
+- ret = PTR_ERR(path);
+ goto out_stop;
+ }
++ } else {
++ ext4_free_ext_path(path);
+ }
+
+- ext4_free_ext_path(path);
+ ext4_es_remove_extent(inode, offset_lblk, EXT_MAX_BLOCKS - offset_lblk);
+
+ /*
+--
+2.53.0
+
--- /dev/null
+From f8e0f5d37751c692cf227e2059bac9d7ce00a585 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Apr 2026 20:41:11 -0400
+Subject: Revert "ext4: make ext4_es_remove_extent() return void"
+
+This reverts commit bfe24a48c1d56b046052014534bde1680fadb9dc.
+
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/extents.c | 34 ++++++++++++++++++++++++++++------
+ fs/ext4/extents_status.c | 12 ++++++------
+ fs/ext4/extents_status.h | 4 ++--
+ fs/ext4/inline.c | 12 ++++++++++--
+ fs/ext4/inode.c | 8 ++++++--
+ 5 files changed, 52 insertions(+), 18 deletions(-)
+
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index af4cae13685d7..1df7174774694 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -4463,8 +4463,15 @@ int ext4_ext_truncate(handle_t *handle, struct inode *inode)
+
+ last_block = (inode->i_size + sb->s_blocksize - 1)
+ >> EXT4_BLOCK_SIZE_BITS(sb);
+- ext4_es_remove_extent(inode, last_block, EXT_MAX_BLOCKS - last_block);
+-
++retry:
++ err = ext4_es_remove_extent(inode, last_block,
++ EXT_MAX_BLOCKS - last_block);
++ if (err == -ENOMEM) {
++ memalloc_retry_wait(GFP_ATOMIC);
++ goto retry;
++ }
++ if (err)
++ return err;
+ retry_remove_space:
+ err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1);
+ if (err == -ENOMEM) {
+@@ -5412,7 +5419,13 @@ static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len)
+
+ down_write(&EXT4_I(inode)->i_data_sem);
+ ext4_discard_preallocations(inode, 0);
+- ext4_es_remove_extent(inode, punch_start, EXT_MAX_BLOCKS - punch_start);
++
++ ret = ext4_es_remove_extent(inode, punch_start,
++ EXT_MAX_BLOCKS - punch_start);
++ if (ret) {
++ up_write(&EXT4_I(inode)->i_data_sem);
++ goto out_stop;
++ }
+
+ ret = ext4_ext_remove_space(inode, punch_start, punch_stop - 1);
+ if (ret) {
+@@ -5598,7 +5611,12 @@ static int ext4_insert_range(struct file *file, loff_t offset, loff_t len)
+ ext4_free_ext_path(path);
+ }
+
+- ext4_es_remove_extent(inode, offset_lblk, EXT_MAX_BLOCKS - offset_lblk);
++ ret = ext4_es_remove_extent(inode, offset_lblk,
++ EXT_MAX_BLOCKS - offset_lblk);
++ if (ret) {
++ up_write(&EXT4_I(inode)->i_data_sem);
++ goto out_stop;
++ }
+
+ /*
+ * if offset_lblk lies in a hole which is at start of file, use
+@@ -5657,8 +5675,12 @@ ext4_swap_extents(handle_t *handle, struct inode *inode1,
+ BUG_ON(!inode_is_locked(inode1));
+ BUG_ON(!inode_is_locked(inode2));
+
+- ext4_es_remove_extent(inode1, lblk1, count);
+- ext4_es_remove_extent(inode2, lblk2, count);
++ *erp = ext4_es_remove_extent(inode1, lblk1, count);
++ if (unlikely(*erp))
++ return 0;
++ *erp = ext4_es_remove_extent(inode2, lblk2, count);
++ if (unlikely(*erp))
++ return 0;
+
+ while (count) {
+ struct ext4_extent *ex1, *ex2, tmp_ex;
+diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
+index 862a8308cd9b0..592229027af72 100644
+--- a/fs/ext4/extents_status.c
++++ b/fs/ext4/extents_status.c
+@@ -1494,10 +1494,10 @@ static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
+ * @len - number of blocks to remove
+ *
+ * Reduces block/cluster reservation count and for bigalloc cancels pending
+- * reservations as needed.
++ * reservations as needed. Returns 0 on success, error code on failure.
+ */
+-void ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
+- ext4_lblk_t len)
++int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
++ ext4_lblk_t len)
+ {
+ ext4_lblk_t end;
+ int err = 0;
+@@ -1505,14 +1505,14 @@ void ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
+ struct extent_status *es = NULL;
+
+ if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
+- return;
++ return 0;
+
+ trace_ext4_es_remove_extent(inode, lblk, len);
+ es_debug("remove [%u/%u) from extent status tree of inode %lu\n",
+ lblk, len, inode->i_ino);
+
+ if (!len)
+- return;
++ return err;
+
+ end = lblk + len - 1;
+ BUG_ON(end < lblk);
+@@ -1539,7 +1539,7 @@ void ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
+
+ ext4_es_print_tree(inode);
+ ext4_da_release_space(inode, reserved);
+- return;
++ return 0;
+ }
+
+ static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
+diff --git a/fs/ext4/extents_status.h b/fs/ext4/extents_status.h
+index 1d1247bbfd477..481ec4381bee6 100644
+--- a/fs/ext4/extents_status.h
++++ b/fs/ext4/extents_status.h
+@@ -133,8 +133,8 @@ extern void ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
+ extern void ext4_es_cache_extent(struct inode *inode, ext4_lblk_t lblk,
+ ext4_lblk_t len, ext4_fsblk_t pblk,
+ unsigned int status);
+-extern void ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
+- ext4_lblk_t len);
++extern int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
++ ext4_lblk_t len);
+ extern void ext4_es_find_extent_range(struct inode *inode,
+ int (*match_fn)(struct extent_status *es),
+ ext4_lblk_t lblk, ext4_lblk_t end,
+diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
+index c15ea7589945f..a1fb99d2b472b 100644
+--- a/fs/ext4/inline.c
++++ b/fs/ext4/inline.c
+@@ -2004,8 +2004,16 @@ int ext4_inline_data_truncate(struct inode *inode, int *has_inline)
+ * the extent status cache must be cleared to avoid leaving
+ * behind stale delayed allocated extent entries
+ */
+- if (!ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA))
+- ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);
++ if (!ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
++retry:
++ err = ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);
++ if (err == -ENOMEM) {
++ memalloc_retry_wait(GFP_ATOMIC);
++ goto retry;
++ }
++ if (err)
++ goto out_error;
++ }
+
+ /* Clear the content in the xattr space. */
+ if (inline_size > EXT4_MIN_INLINE_DATA_SIZE) {
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index bd66dec36da7b..3dd867b6ad657 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -4134,8 +4134,12 @@ int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
+ down_write(&EXT4_I(inode)->i_data_sem);
+ ext4_discard_preallocations(inode, 0);
+
+- ext4_es_remove_extent(inode, first_block,
+- stop_block - first_block);
++ ret = ext4_es_remove_extent(inode, first_block,
++ stop_block - first_block);
++ if (ret) {
++ up_write(&EXT4_I(inode)->i_data_sem);
++ goto out_stop;
++ }
+
+ if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
+ ret = ext4_ext_remove_space(inode, first_block,
+--
+2.53.0
+
--- /dev/null
+From 327050bab0ce1190947b91e10417ec19169247c2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Apr 2026 20:41:11 -0400
+Subject: Revert "ext4: subdivide EXT4_EXT_DATA_VALID1"
+
+This reverts commit 1606176c5c6c323167dcd7d4b4f7212b2c8d3d13.
+
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/extents.c | 18 ++++++------------
+ 1 file changed, 6 insertions(+), 12 deletions(-)
+
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index e2f9c27c7e161..6da0bf3cf406d 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -43,13 +43,8 @@
+ #define EXT4_EXT_MARK_UNWRIT1 0x2 /* mark first half unwritten */
+ #define EXT4_EXT_MARK_UNWRIT2 0x4 /* mark second half unwritten */
+
+-/* first half contains valid data */
+-#define EXT4_EXT_DATA_ENTIRE_VALID1 0x8 /* has entirely valid data */
+-#define EXT4_EXT_DATA_PARTIAL_VALID1 0x10 /* has partially valid data */
+-#define EXT4_EXT_DATA_VALID1 (EXT4_EXT_DATA_ENTIRE_VALID1 | \
+- EXT4_EXT_DATA_PARTIAL_VALID1)
+-
+-#define EXT4_EXT_DATA_VALID2 0x20 /* second half contains valid data */
++#define EXT4_EXT_DATA_VALID1 0x8 /* first half contains valid data */
++#define EXT4_EXT_DATA_VALID2 0x10 /* second half contains valid data */
+
+ static __le32 ext4_extent_block_csum(struct inode *inode,
+ struct ext4_extent_header *eh)
+@@ -3180,9 +3175,8 @@ static struct ext4_ext_path *ext4_split_extent_at(handle_t *handle,
+ unsigned int ee_len, depth;
+ int err = 0;
+
+- BUG_ON((split_flag & EXT4_EXT_DATA_VALID1) == EXT4_EXT_DATA_VALID1);
+- BUG_ON((split_flag & EXT4_EXT_DATA_VALID1) &&
+- (split_flag & EXT4_EXT_DATA_VALID2));
++ BUG_ON((split_flag & (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)) ==
++ (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2));
+
+ /* Do not cache extents that are in the process of being modified. */
+ flags |= EXT4_EX_NOCACHE;
+@@ -3373,7 +3367,7 @@ static int ext4_split_extent(handle_t *handle,
+ split_flag1 |= EXT4_EXT_MARK_UNWRIT1 |
+ EXT4_EXT_MARK_UNWRIT2;
+ if (split_flag & EXT4_EXT_DATA_VALID2)
+- split_flag1 |= EXT4_EXT_DATA_ENTIRE_VALID1;
++ split_flag1 |= EXT4_EXT_DATA_VALID1;
+ path = ext4_split_extent_at(handle, inode, path,
+ map->m_lblk + map->m_len, split_flag1, flags1);
+ if (IS_ERR(path)) {
+@@ -3737,7 +3731,7 @@ static int ext4_split_convert_extents(handle_t *handle,
+
+ /* Convert to unwritten */
+ if (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN) {
+- split_flag |= EXT4_EXT_DATA_ENTIRE_VALID1;
++ split_flag |= EXT4_EXT_DATA_VALID1;
+ /* Convert to initialized */
+ } else if (flags & EXT4_GET_BLOCKS_CONVERT) {
+ split_flag |= ee_block + ee_len <= eof_block ?
+--
+2.53.0
+
usb-dwc2-gadget-fix-spin_lock-unlock-mismatch-in-dwc2_hsotg_udc_stop.patch
usb-cdns3-gadget-fix-null-pointer-dereference-in-ep_queue.patch
usb-cdns3-gadget-fix-state-inconsistency-on-gadget-init-failure.patch
+revert-ext4-avoid-infinite-loops-caused-by-residual-.patch
+revert-ext4-drop-extent-cache-when-splitting-extent-.patch
+revert-ext4-drop-extent-cache-after-doing-partial_va.patch
+revert-ext4-don-t-zero-the-entire-extent-if-ext4_ext.patch
+revert-ext4-subdivide-ext4_ext_data_valid1.patch
+revert-ext4-get-rid-of-ppath-in-ext4_split_extent_at.patch
+revert-ext4-get-rid-of-ppath-in-ext4_ext_insert_exte.patch
+revert-ext4-get-rid-of-ppath-in-ext4_ext_create_new_.patch
+revert-ext4-get-rid-of-ppath-in-ext4_find_extent.patch
+revert-ext4-make-ext4_es_remove_extent-return-void.patch
--- /dev/null
+From 6694f3167701abd0534f9174b6a94c6f19dc1698 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Apr 2026 20:00:56 -0400
+Subject: Revert "LoongArch: Handle percpu handler address for ORC unwinder"
+
+This reverts commit 8eeb34ae9d4c743b1fd2cf58f9c51def37091cf5.
+
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/loongarch/include/asm/setup.h | 3 ---
+ arch/loongarch/kernel/unwind_orc.c | 16 +---------------
+ 2 files changed, 1 insertion(+), 18 deletions(-)
+
+diff --git a/arch/loongarch/include/asm/setup.h b/arch/loongarch/include/asm/setup.h
+index f81375e5e89c0..3c2fb16b11b64 100644
+--- a/arch/loongarch/include/asm/setup.h
++++ b/arch/loongarch/include/asm/setup.h
+@@ -7,7 +7,6 @@
+ #define _LOONGARCH_SETUP_H
+
+ #include <linux/types.h>
+-#include <linux/threads.h>
+ #include <asm/sections.h>
+ #include <uapi/asm/setup.h>
+
+@@ -15,8 +14,6 @@
+
+ extern unsigned long eentry;
+ extern unsigned long tlbrentry;
+-extern unsigned long pcpu_handlers[NR_CPUS];
+-extern long exception_handlers[VECSIZE * 128 / sizeof(long)];
+ extern char init_command_line[COMMAND_LINE_SIZE];
+ extern void tlb_init(int cpu);
+ extern void cpu_cache_init(void);
+diff --git a/arch/loongarch/kernel/unwind_orc.c b/arch/loongarch/kernel/unwind_orc.c
+index e8b95f1bc5786..4924d1ecc4579 100644
+--- a/arch/loongarch/kernel/unwind_orc.c
++++ b/arch/loongarch/kernel/unwind_orc.c
+@@ -357,21 +357,7 @@ static bool is_entry_func(unsigned long addr)
+
+ static inline unsigned long bt_address(unsigned long ra)
+ {
+-#if defined(CONFIG_NUMA) && !defined(CONFIG_PREEMPT_RT)
+- int cpu;
+- int vec_sz = sizeof(exception_handlers);
+-
+- for_each_possible_cpu(cpu) {
+- if (!pcpu_handlers[cpu])
+- continue;
+-
+- if (ra >= pcpu_handlers[cpu] &&
+- ra < pcpu_handlers[cpu] + vec_sz) {
+- ra = ra + eentry - pcpu_handlers[cpu];
+- break;
+- }
+- }
+-#endif
++ extern unsigned long eentry;
+
+ if (ra >= eentry && ra < eentry + EXCCODE_INT_END * VECSIZE) {
+ unsigned long func;
+--
+2.53.0
+
--- /dev/null
+From 8f932584f137f115dad98ef312a08f6b3fc3e616 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Apr 2026 20:00:57 -0400
+Subject: Revert "LoongArch/orc: Use RCU in all users of __module_address()."
+
+This reverts commit 2e6949777d1dbfa5c906a880028680cc3ebe1f68.
+
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/loongarch/kernel/unwind_orc.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/arch/loongarch/kernel/unwind_orc.c b/arch/loongarch/kernel/unwind_orc.c
+index 59809c3406c03..471652c0c8653 100644
+--- a/arch/loongarch/kernel/unwind_orc.c
++++ b/arch/loongarch/kernel/unwind_orc.c
+@@ -399,7 +399,7 @@ bool unwind_next_frame(struct unwind_state *state)
+ return false;
+
+ /* Don't let modules unload while we're reading their ORC data. */
+- guard(rcu)();
++ preempt_disable();
+
+ if (is_entry_func(state->pc))
+ goto end;
+@@ -514,12 +514,14 @@ bool unwind_next_frame(struct unwind_state *state)
+ if (!__kernel_text_address(state->pc))
+ goto err;
+
++ preempt_enable();
+ return true;
+
+ err:
+ state->error = true;
+
+ end:
++ preempt_enable();
+ state->stack_info.type = STACK_TYPE_UNKNOWN;
+ return false;
+ }
+--
+2.53.0
+
--- /dev/null
+From 3d853399df67f50d070f6ba643d69894ee9d0986 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Apr 2026 20:00:57 -0400
+Subject: Revert "LoongArch: Remove unnecessary checks for ORC unwinder"
+
+This reverts commit 5d8e3b81aee2c18610c5f440936f0bf3b6426e56.
+
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/loongarch/kernel/unwind_orc.c | 16 +++++++++++-----
+ 1 file changed, 11 insertions(+), 5 deletions(-)
+
+diff --git a/arch/loongarch/kernel/unwind_orc.c b/arch/loongarch/kernel/unwind_orc.c
+index 4924d1ecc4579..59809c3406c03 100644
+--- a/arch/loongarch/kernel/unwind_orc.c
++++ b/arch/loongarch/kernel/unwind_orc.c
+@@ -359,6 +359,12 @@ static inline unsigned long bt_address(unsigned long ra)
+ {
+ extern unsigned long eentry;
+
++ if (__kernel_text_address(ra))
++ return ra;
++
++ if (__module_text_address(ra))
++ return ra;
++
+ if (ra >= eentry && ra < eentry + EXCCODE_INT_END * VECSIZE) {
+ unsigned long func;
+ unsigned long type = (ra - eentry) / VECSIZE;
+@@ -376,13 +382,10 @@ static inline unsigned long bt_address(unsigned long ra)
+ break;
+ }
+
+- ra = func + offset;
++ return func + offset;
+ }
+
+- if (__kernel_text_address(ra))
+- return ra;
+-
+- return 0;
++ return ra;
+ }
+
+ bool unwind_next_frame(struct unwind_state *state)
+@@ -508,6 +511,9 @@ bool unwind_next_frame(struct unwind_state *state)
+ goto err;
+ }
+
++ if (!__kernel_text_address(state->pc))
++ goto err;
++
+ return true;
+
+ err:
+--
+2.53.0
+
usb-cdns3-gadget-fix-null-pointer-dereference-in-ep_queue.patch
usb-cdns3-gadget-fix-state-inconsistency-on-gadget-init-failure.patch
x86-platform-geode-fix-on-stack-property-data-use-after-return-bug.patch
+revert-loongarch-handle-percpu-handler-address-for-o.patch
+revert-loongarch-remove-unnecessary-checks-for-orc-u.patch
+revert-loongarch-orc-use-rcu-in-all-users-of-__modul.patch
--- /dev/null
+From e4e18b6f7f6c0d4f25e4f4de2d62f3faa9276397 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Apr 2026 16:32:05 +0800
+Subject: ASoC: qcom: sc7280: make use of common helpers
+
+From: Srinivas Kandagatla <srinivas.kandagatla@oss.qualcomm.com>
+
+commit 8fdb030fe283c84fd8d378c97ad0f32d6cdec6ce upstream.
+
+sc7280 machine driver can make use of common sdw functions to do most of
+the soundwire related operations. Remove such redundant code from sc7280
+driver.
+
+[This is a partial backport containing only the sound/soc/qcom/sdw.c
+changes which add LPASS CDC DMA DAI IDs to qcom_snd_is_sdw_dai().
+The sc7280.c refactoring changes are omitted as they depend on
+intermediate patches not present in 6.18.y. The sdw.c change fixes a
+NULL pointer dereference for lpass-cpu based SoundWire links.]
+
+Fixes: bcba17279327 ("ASoC: qcom: sdw: fix memory leak for sdw_stream_runtime")
+Cc: stable@vger.kernel.org
+Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@oss.qualcomm.com>
+Tested-by: Steev Klimaszewski <threeway@gmail.com> # Thinkpad X13s
+Link: https://patch.msgid.link/20251022143349.1081513-5-srinivas.kandagatla@oss.qualcomm.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Xilin Wu <sophon@radxa.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/qcom/sdw.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+diff --git a/sound/soc/qcom/sdw.c b/sound/soc/qcom/sdw.c
+index 7b2cae92c8129..5f880c74c8dc2 100644
+--- a/sound/soc/qcom/sdw.c
++++ b/sound/soc/qcom/sdw.c
+@@ -2,6 +2,7 @@
+ // Copyright (c) 2018-2023, Linaro Limited.
+ // Copyright (c) 2018, The Linux Foundation. All rights reserved.
+
++#include <dt-bindings/sound/qcom,lpass.h>
+ #include <dt-bindings/sound/qcom,q6afe.h>
+ #include <linux/module.h>
+ #include <sound/soc.h>
+@@ -35,6 +36,16 @@ static bool qcom_snd_is_sdw_dai(int id)
+ break;
+ }
+
++ /* DSP Bypass usecase, cpu dai index overlaps with DSP dai ids,
++ * DO NOT MERGE into top switch case */
++ switch (id) {
++ case LPASS_CDC_DMA_TX3:
++ case LPASS_CDC_DMA_RX0:
++ return true;
++ default:
++ break;
++ }
++
+ return false;
+ }
+
+--
+2.53.0
+
--- /dev/null
+From 9845d2c4136262f2274cef16141701b20d116fcb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 31 Mar 2026 07:07:47 -0600
+Subject: io_uring: protect remaining lockless ctx->rings accesses with RCU
+
+From: Jens Axboe <axboe@kernel.dk>
+
+Commit 61a11cf4812726aceaee17c96432e1c08f6ed6cb upstream.
+
+Commit 96189080265e addressed one case of ctx->rings being potentially
+accessed while a resize is happening on the ring, but there are still
+a few others that need handling. Add a helper for retrieving the
+rings associated with an io_uring context, and add some sanity checking
+to that to catch bad uses. ->rings_rcu is always valid, as long as it's
+used within RCU read lock. Any use of ->rings_rcu or ->rings inside
+either ->uring_lock or ->completion_lock is sane as well.
+
+Do the minimum fix for the current kernel, but set it up such that this
+basic infra can be extended for later kernels to make this harder to
+mess up in the future.
+
+Thanks to Junxi Qian for finding and debugging this issue.
+
+Cc: stable@vger.kernel.org
+Fixes: 79cfe9e59c2a ("io_uring/register: add IORING_REGISTER_RESIZE_RINGS")
+Reviewed-by: Junxi Qian <qjx1298677004@gmail.com>
+Tested-by: Junxi Qian <qjx1298677004@gmail.com>
+Link: https://lore.kernel.org/io-uring/20260330172348.89416-1-qjx1298677004@gmail.com/
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ io_uring/io_uring.c | 62 +++++++++++++++++++++++++++++----------------
+ io_uring/io_uring.h | 34 +++++++++++++++++++++----
+ 2 files changed, 69 insertions(+), 27 deletions(-)
+
+diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
+index d10a38c9dbfb3..99cd5bcac201d 100644
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -199,12 +199,15 @@ static void io_poison_req(struct io_kiocb *req)
+
+ static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx)
+ {
+- return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head);
++ struct io_rings *rings = io_get_rings(ctx);
++ return ctx->cached_cq_tail - READ_ONCE(rings->cq.head);
+ }
+
+ static inline unsigned int __io_cqring_events_user(struct io_ring_ctx *ctx)
+ {
+- return READ_ONCE(ctx->rings->cq.tail) - READ_ONCE(ctx->rings->cq.head);
++ struct io_rings *rings = io_get_rings(ctx);
++
++ return READ_ONCE(rings->cq.tail) - READ_ONCE(rings->cq.head);
+ }
+
+ static bool io_match_linked(struct io_kiocb *head)
+@@ -2550,12 +2553,15 @@ static enum hrtimer_restart io_cqring_min_timer_wakeup(struct hrtimer *timer)
+ if (io_has_work(ctx))
+ goto out_wake;
+ /* got events since we started waiting, min timeout is done */
+- if (iowq->cq_min_tail != READ_ONCE(ctx->rings->cq.tail))
+- goto out_wake;
+- /* if we have any events and min timeout expired, we're done */
+- if (io_cqring_events(ctx))
+- goto out_wake;
++ scoped_guard(rcu) {
++ struct io_rings *rings = io_get_rings(ctx);
+
++ if (iowq->cq_min_tail != READ_ONCE(rings->cq.tail))
++ goto out_wake;
++ /* if we have any events and min timeout expired, we're done */
++ if (io_cqring_events(ctx))
++ goto out_wake;
++ }
+ /*
+ * If using deferred task_work running and application is waiting on
+ * more than one request, ensure we reset it now where we are switching
+@@ -2666,9 +2672,9 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, u32 flags,
+ struct ext_arg *ext_arg)
+ {
+ struct io_wait_queue iowq;
+- struct io_rings *rings = ctx->rings;
++ struct io_rings *rings;
+ ktime_t start_time;
+- int ret;
++ int ret, nr_wait;
+
+ min_events = min_t(int, min_events, ctx->cq_entries);
+
+@@ -2681,15 +2687,23 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, u32 flags,
+
+ if (unlikely(test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)))
+ io_cqring_do_overflow_flush(ctx);
+- if (__io_cqring_events_user(ctx) >= min_events)
++
++ rcu_read_lock();
++ rings = io_get_rings(ctx);
++ if (__io_cqring_events_user(ctx) >= min_events) {
++ rcu_read_unlock();
+ return 0;
++ }
+
+ init_waitqueue_func_entry(&iowq.wq, io_wake_function);
+ iowq.wq.private = current;
+ INIT_LIST_HEAD(&iowq.wq.entry);
+ iowq.ctx = ctx;
+- iowq.cq_tail = READ_ONCE(ctx->rings->cq.head) + min_events;
+- iowq.cq_min_tail = READ_ONCE(ctx->rings->cq.tail);
++ iowq.cq_tail = READ_ONCE(rings->cq.head) + min_events;
++ iowq.cq_min_tail = READ_ONCE(rings->cq.tail);
++ nr_wait = (int) iowq.cq_tail - READ_ONCE(rings->cq.tail);
++ rcu_read_unlock();
++ rings = NULL;
+ iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
+ iowq.hit_timeout = 0;
+ iowq.min_timeout = ext_arg->min_time;
+@@ -2720,14 +2734,6 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, u32 flags,
+ trace_io_uring_cqring_wait(ctx, min_events);
+ do {
+ unsigned long check_cq;
+- int nr_wait;
+-
+- /* if min timeout has been hit, don't reset wait count */
+- if (!iowq.hit_timeout)
+- nr_wait = (int) iowq.cq_tail -
+- READ_ONCE(ctx->rings->cq.tail);
+- else
+- nr_wait = 1;
+
+ if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
+ atomic_set(&ctx->cq_wait_nr, nr_wait);
+@@ -2778,13 +2784,22 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, u32 flags,
+ break;
+ }
+ cond_resched();
++
++ /* if min timeout has been hit, don't reset wait count */
++ if (!iowq.hit_timeout)
++ scoped_guard(rcu)
++ nr_wait = (int) iowq.cq_tail -
++ READ_ONCE(io_get_rings(ctx)->cq.tail);
++ else
++ nr_wait = 1;
+ } while (1);
+
+ if (!(ctx->flags & IORING_SETUP_DEFER_TASKRUN))
+ finish_wait(&ctx->cq_wait, &iowq.wq);
+ restore_saved_sigmask_unless(ret == -EINTR);
+
+- return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
++ guard(rcu)();
++ return READ_ONCE(io_get_rings(ctx)->cq.head) == READ_ONCE(io_get_rings(ctx)->cq.tail) ? ret : 0;
+ }
+
+ static void io_rings_free(struct io_ring_ctx *ctx)
+@@ -2956,7 +2971,9 @@ static __poll_t io_uring_poll(struct file *file, poll_table *wait)
+ */
+ poll_wait(file, &ctx->poll_wq, wait);
+
+- if (!io_sqring_full(ctx))
++ rcu_read_lock();
++
++ if (!__io_sqring_full(ctx))
+ mask |= EPOLLOUT | EPOLLWRNORM;
+
+ /*
+@@ -2976,6 +2993,7 @@ static __poll_t io_uring_poll(struct file *file, poll_table *wait)
+ if (__io_cqring_events_user(ctx) || io_has_work(ctx))
+ mask |= EPOLLIN | EPOLLRDNORM;
+
++ rcu_read_unlock();
+ return mask;
+ }
+
+diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
+index 46d9141d772a7..05702288465b7 100644
+--- a/io_uring/io_uring.h
++++ b/io_uring/io_uring.h
+@@ -112,16 +112,28 @@ struct io_wait_queue {
+ #endif
+ };
+
++static inline struct io_rings *io_get_rings(struct io_ring_ctx *ctx)
++{
++ return rcu_dereference_check(ctx->rings_rcu,
++ lockdep_is_held(&ctx->uring_lock) ||
++ lockdep_is_held(&ctx->completion_lock));
++}
++
+ static inline bool io_should_wake(struct io_wait_queue *iowq)
+ {
+ struct io_ring_ctx *ctx = iowq->ctx;
+- int dist = READ_ONCE(ctx->rings->cq.tail) - (int) iowq->cq_tail;
++ struct io_rings *rings;
++ int dist;
++
++ guard(rcu)();
++ rings = io_get_rings(ctx);
+
+ /*
+ * Wake up if we have enough events, or if a timeout occurred since we
+ * started waiting. For timeouts, we always want to return to userspace,
+ * regardless of event count.
+ */
++ dist = READ_ONCE(rings->cq.tail) - (int) iowq->cq_tail;
+ return dist >= 0 || atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
+ }
+
+@@ -413,9 +425,9 @@ static inline void io_cqring_wake(struct io_ring_ctx *ctx)
+ __io_wq_wake(&ctx->cq_wait);
+ }
+
+-static inline bool io_sqring_full(struct io_ring_ctx *ctx)
++static inline bool __io_sqring_full(struct io_ring_ctx *ctx)
+ {
+- struct io_rings *r = ctx->rings;
++ struct io_rings *r = io_get_rings(ctx);
+
+ /*
+ * SQPOLL must use the actual sqring head, as using the cached_sq_head
+@@ -427,9 +439,15 @@ static inline bool io_sqring_full(struct io_ring_ctx *ctx)
+ return READ_ONCE(r->sq.tail) - READ_ONCE(r->sq.head) == ctx->sq_entries;
+ }
+
+-static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
++static inline bool io_sqring_full(struct io_ring_ctx *ctx)
+ {
+- struct io_rings *rings = ctx->rings;
++ guard(rcu)();
++ return __io_sqring_full(ctx);
++}
++
++static inline unsigned int __io_sqring_entries(struct io_ring_ctx *ctx)
++{
++ struct io_rings *rings = io_get_rings(ctx);
+ unsigned int entries;
+
+ /* make sure SQ entry isn't read before tail */
+@@ -490,6 +508,12 @@ static inline void io_tw_lock(struct io_ring_ctx *ctx, io_tw_token_t tw)
+ lockdep_assert_held(&ctx->uring_lock);
+ }
+
++static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
++{
++ guard(rcu)();
++ return __io_sqring_entries(ctx);
++}
++
+ /*
+ * Don't complete immediately but use deferred completion infrastructure.
+ * Protected by ->uring_lock and can only be used either with
+--
+2.53.0
+
usb-cdns3-gadget-fix-state-inconsistency-on-gadget-init-failure.patch
usb-core-use-dedicated-spinlock-for-offload-state.patch
x86-platform-geode-fix-on-stack-property-data-use-after-return-bug.patch
+io_uring-protect-remaining-lockless-ctx-rings-access.patch
+asoc-qcom-sc7280-make-use-of-common-helpers.patch
--- /dev/null
+From 0fe3a8d6d58b02bad62d937c1597d222671456d1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 31 Mar 2026 07:07:47 -0600
+Subject: io_uring: protect remaining lockless ctx->rings accesses with RCU
+
+From: Jens Axboe <axboe@kernel.dk>
+
+Commit 61a11cf4812726aceaee17c96432e1c08f6ed6cb upstream.
+
+Commit 96189080265e addressed one case of ctx->rings being potentially
+accessed while a resize is happening on the ring, but there are still
+a few others that need handling. Add a helper for retrieving the
+rings associated with an io_uring context, and add some sanity checking
+to that to catch bad uses. ->rings_rcu is always valid, as long as it's
+used within RCU read lock. Any use of ->rings_rcu or ->rings inside
+either ->uring_lock or ->completion_lock is sane as well.
+
+Do the minimum fix for the current kernel, but set it up such that this
+basic infra can be extended for later kernels to make this harder to
+mess up in the future.
+
+Thanks to Junxi Qian for finding and debugging this issue.
+
+Cc: stable@vger.kernel.org
+Fixes: 79cfe9e59c2a ("io_uring/register: add IORING_REGISTER_RESIZE_RINGS")
+Reviewed-by: Junxi Qian <qjx1298677004@gmail.com>
+Tested-by: Junxi Qian <qjx1298677004@gmail.com>
+Link: https://lore.kernel.org/io-uring/20260330172348.89416-1-qjx1298677004@gmail.com/
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ io_uring/io_uring.c | 62 +++++++++++++++++++++++++++++----------------
+ io_uring/io_uring.h | 34 +++++++++++++++++++++----
+ 2 files changed, 69 insertions(+), 27 deletions(-)
+
+diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
+index ac1a5cf102870..84fb1f7b0d818 100644
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -189,12 +189,15 @@ static void io_poison_req(struct io_kiocb *req)
+
+ static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx)
+ {
+- return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head);
++ struct io_rings *rings = io_get_rings(ctx);
++ return ctx->cached_cq_tail - READ_ONCE(rings->cq.head);
+ }
+
+ static inline unsigned int __io_cqring_events_user(struct io_ring_ctx *ctx)
+ {
+- return READ_ONCE(ctx->rings->cq.tail) - READ_ONCE(ctx->rings->cq.head);
++ struct io_rings *rings = io_get_rings(ctx);
++
++ return READ_ONCE(rings->cq.tail) - READ_ONCE(rings->cq.head);
+ }
+
+ static inline void req_fail_link_node(struct io_kiocb *req, int res)
+@@ -2536,12 +2539,15 @@ static enum hrtimer_restart io_cqring_min_timer_wakeup(struct hrtimer *timer)
+ if (io_has_work(ctx))
+ goto out_wake;
+ /* got events since we started waiting, min timeout is done */
+- if (iowq->cq_min_tail != READ_ONCE(ctx->rings->cq.tail))
+- goto out_wake;
+- /* if we have any events and min timeout expired, we're done */
+- if (io_cqring_events(ctx))
+- goto out_wake;
++ scoped_guard(rcu) {
++ struct io_rings *rings = io_get_rings(ctx);
+
++ if (iowq->cq_min_tail != READ_ONCE(rings->cq.tail))
++ goto out_wake;
++ /* if we have any events and min timeout expired, we're done */
++ if (io_cqring_events(ctx))
++ goto out_wake;
++ }
+ /*
+ * If using deferred task_work running and application is waiting on
+ * more than one request, ensure we reset it now where we are switching
+@@ -2652,9 +2658,9 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, u32 flags,
+ struct ext_arg *ext_arg)
+ {
+ struct io_wait_queue iowq;
+- struct io_rings *rings = ctx->rings;
++ struct io_rings *rings;
+ ktime_t start_time;
+- int ret;
++ int ret, nr_wait;
+
+ min_events = min_t(int, min_events, ctx->cq_entries);
+
+@@ -2667,15 +2673,23 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, u32 flags,
+
+ if (unlikely(test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)))
+ io_cqring_do_overflow_flush(ctx);
+- if (__io_cqring_events_user(ctx) >= min_events)
++
++ rcu_read_lock();
++ rings = io_get_rings(ctx);
++ if (__io_cqring_events_user(ctx) >= min_events) {
++ rcu_read_unlock();
+ return 0;
++ }
+
+ init_waitqueue_func_entry(&iowq.wq, io_wake_function);
+ iowq.wq.private = current;
+ INIT_LIST_HEAD(&iowq.wq.entry);
+ iowq.ctx = ctx;
+- iowq.cq_tail = READ_ONCE(ctx->rings->cq.head) + min_events;
+- iowq.cq_min_tail = READ_ONCE(ctx->rings->cq.tail);
++ iowq.cq_tail = READ_ONCE(rings->cq.head) + min_events;
++ iowq.cq_min_tail = READ_ONCE(rings->cq.tail);
++ nr_wait = (int) iowq.cq_tail - READ_ONCE(rings->cq.tail);
++ rcu_read_unlock();
++ rings = NULL;
+ iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
+ iowq.hit_timeout = 0;
+ iowq.min_timeout = ext_arg->min_time;
+@@ -2706,14 +2720,6 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, u32 flags,
+ trace_io_uring_cqring_wait(ctx, min_events);
+ do {
+ unsigned long check_cq;
+- int nr_wait;
+-
+- /* if min timeout has been hit, don't reset wait count */
+- if (!iowq.hit_timeout)
+- nr_wait = (int) iowq.cq_tail -
+- READ_ONCE(ctx->rings->cq.tail);
+- else
+- nr_wait = 1;
+
+ if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
+ atomic_set(&ctx->cq_wait_nr, nr_wait);
+@@ -2764,13 +2770,22 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, u32 flags,
+ break;
+ }
+ cond_resched();
++
++ /* if min timeout has been hit, don't reset wait count */
++ if (!iowq.hit_timeout)
++ scoped_guard(rcu)
++ nr_wait = (int) iowq.cq_tail -
++ READ_ONCE(io_get_rings(ctx)->cq.tail);
++ else
++ nr_wait = 1;
+ } while (1);
+
+ if (!(ctx->flags & IORING_SETUP_DEFER_TASKRUN))
+ finish_wait(&ctx->cq_wait, &iowq.wq);
+ restore_saved_sigmask_unless(ret == -EINTR);
+
+- return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
++ guard(rcu)();
++ return READ_ONCE(io_get_rings(ctx)->cq.head) == READ_ONCE(io_get_rings(ctx)->cq.tail) ? ret : 0;
+ }
+
+ static void io_rings_free(struct io_ring_ctx *ctx)
+@@ -2954,7 +2969,9 @@ static __poll_t io_uring_poll(struct file *file, poll_table *wait)
+ */
+ poll_wait(file, &ctx->poll_wq, wait);
+
+- if (!io_sqring_full(ctx))
++ rcu_read_lock();
++
++ if (!__io_sqring_full(ctx))
+ mask |= EPOLLOUT | EPOLLWRNORM;
+
+ /*
+@@ -2974,6 +2991,7 @@ static __poll_t io_uring_poll(struct file *file, poll_table *wait)
+ if (__io_cqring_events_user(ctx) || io_has_work(ctx))
+ mask |= EPOLLIN | EPOLLRDNORM;
+
++ rcu_read_unlock();
+ return mask;
+ }
+
+diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
+index 0f096f44d34bf..6ee49991cec8b 100644
+--- a/io_uring/io_uring.h
++++ b/io_uring/io_uring.h
+@@ -132,16 +132,28 @@ struct io_wait_queue {
+ #endif
+ };
+
++static inline struct io_rings *io_get_rings(struct io_ring_ctx *ctx)
++{
++ return rcu_dereference_check(ctx->rings_rcu,
++ lockdep_is_held(&ctx->uring_lock) ||
++ lockdep_is_held(&ctx->completion_lock));
++}
++
+ static inline bool io_should_wake(struct io_wait_queue *iowq)
+ {
+ struct io_ring_ctx *ctx = iowq->ctx;
+- int dist = READ_ONCE(ctx->rings->cq.tail) - (int) iowq->cq_tail;
++ struct io_rings *rings;
++ int dist;
++
++ guard(rcu)();
++ rings = io_get_rings(ctx);
+
+ /*
+ * Wake up if we have enough events, or if a timeout occurred since we
+ * started waiting. For timeouts, we always want to return to userspace,
+ * regardless of event count.
+ */
++ dist = READ_ONCE(rings->cq.tail) - (int) iowq->cq_tail;
+ return dist >= 0 || atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
+ }
+
+@@ -432,9 +444,9 @@ static inline void io_cqring_wake(struct io_ring_ctx *ctx)
+ __io_wq_wake(&ctx->cq_wait);
+ }
+
+-static inline bool io_sqring_full(struct io_ring_ctx *ctx)
++static inline bool __io_sqring_full(struct io_ring_ctx *ctx)
+ {
+- struct io_rings *r = ctx->rings;
++ struct io_rings *r = io_get_rings(ctx);
+
+ /*
+ * SQPOLL must use the actual sqring head, as using the cached_sq_head
+@@ -446,9 +458,15 @@ static inline bool io_sqring_full(struct io_ring_ctx *ctx)
+ return READ_ONCE(r->sq.tail) - READ_ONCE(r->sq.head) == ctx->sq_entries;
+ }
+
+-static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
++static inline bool io_sqring_full(struct io_ring_ctx *ctx)
+ {
+- struct io_rings *rings = ctx->rings;
++ guard(rcu)();
++ return __io_sqring_full(ctx);
++}
++
++static inline unsigned int __io_sqring_entries(struct io_ring_ctx *ctx)
++{
++ struct io_rings *rings = io_get_rings(ctx);
+ unsigned int entries;
+
+ /* make sure SQ entry isn't read before tail */
+@@ -509,6 +527,12 @@ static inline void io_tw_lock(struct io_ring_ctx *ctx, io_tw_token_t tw)
+ lockdep_assert_held(&ctx->uring_lock);
+ }
+
++static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
++{
++ guard(rcu)();
++ return __io_sqring_entries(ctx);
++}
++
+ /*
+ * Don't complete immediately but use deferred completion infrastructure.
+ * Protected by ->uring_lock and can only be used either with
+--
+2.53.0
+
usb-cdns3-gadget-fix-state-inconsistency-on-gadget-init-failure.patch
usb-core-use-dedicated-spinlock-for-offload-state.patch
x86-platform-geode-fix-on-stack-property-data-use-after-return-bug.patch
+io_uring-protect-remaining-lockless-ctx-rings-access.patch
--- /dev/null
+From a8e384be4af1e2589dbb9026312a5f76d71e4196 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 2 Apr 2026 18:13:11 +0200
+Subject: fork: defer linking file vma until vma is fully initialized
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Miaohe Lin <linmiaohe@huawei.com>
+
+[ Upstream commit 35e351780fa9d8240dd6f7e4f245f9ea37e96c19 ]
+
+Thorvald reported a WARNING [1]. And the root cause is below race:
+
+ CPU 1 CPU 2
+ fork hugetlbfs_fallocate
+ dup_mmap hugetlbfs_punch_hole
+ i_mmap_lock_write(mapping);
+ vma_interval_tree_insert_after -- Child vma is visible through i_mmap tree.
+ i_mmap_unlock_write(mapping);
+ hugetlb_dup_vma_private -- Clear vma_lock outside i_mmap_rwsem!
+ i_mmap_lock_write(mapping);
+ hugetlb_vmdelete_list
+ vma_interval_tree_foreach
+ hugetlb_vma_trylock_write -- Vma_lock is cleared.
+ tmp->vm_ops->open -- Alloc new vma_lock outside i_mmap_rwsem!
+ hugetlb_vma_unlock_write -- Vma_lock is assigned!!!
+ i_mmap_unlock_write(mapping);
+
+hugetlb_dup_vma_private() and hugetlb_vm_op_open() are called outside
+i_mmap_rwsem lock while vma lock can be used in the same time. Fix this
+by deferring linking file vma until vma is fully initialized. Those vmas
+should be initialized first before they can be used.
+
+[tk: Adapted to 6.6 stable where vma_iter_bulk_store() can fail
+(unlike mainline which uses __mt_dup() for pre-allocation).
+Preserved error handling via goto fail_nomem_vmi_store. Previous
+backport (cec11fa2eb512) was reverted (dd782da470761) due to
+xfstests failures.]
+
+Link: https://lkml.kernel.org/r/20240410091441.3539905-1-linmiaohe@huawei.com
+Fixes: 8d9bfb260814 ("hugetlb: add vma based lock for pmd sharing")
+Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
+Reported-by: Thorvald Natvig <thorvald@google.com>
+Closes: https://lore.kernel.org/linux-mm/20240129161735.6gmjsswx62o4pbja@revolver/T/ [1]
+Reviewed-by: Jane Chu <jane.chu@oracle.com>
+Cc: Christian Brauner <brauner@kernel.org>
+Cc: Heiko Carstens <hca@linux.ibm.com>
+Cc: Kent Overstreet <kent.overstreet@linux.dev>
+Cc: Liam R. Howlett <Liam.Howlett@oracle.com>
+Cc: Mateusz Guzik <mjguzik@gmail.com>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Miaohe Lin <linmiaohe@huawei.com>
+Cc: Muchun Song <muchun.song@linux.dev>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Cc: Peng Zhang <zhangpeng.00@bytedance.com>
+Cc: Tycho Andersen <tandersen@netflix.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Assisted-by: Claude:claude-opus-4.6
+Suggested-by: David Nyström <david.nystrom@est.tech>
+Signed-off-by: Tugrul Kukul <tugrul.kukul@est.tech>
+Acked-by: Alex Williamson <alex@shazbot.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/fork.c | 29 +++++++++++++++--------------
+ 1 file changed, 15 insertions(+), 14 deletions(-)
+
+diff --git a/kernel/fork.c b/kernel/fork.c
+index ce6f6e1e39057..5b60692b1a4ea 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -733,6 +733,21 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
+ } else if (anon_vma_fork(tmp, mpnt))
+ goto fail_nomem_anon_vma_fork;
+ vm_flags_clear(tmp, VM_LOCKED_MASK);
++ /*
++ * Copy/update hugetlb private vma information.
++ */
++ if (is_vm_hugetlb_page(tmp))
++ hugetlb_dup_vma_private(tmp);
++
++ /* Link the vma into the MT */
++ if (vma_iter_bulk_store(&vmi, tmp))
++ goto fail_nomem_vmi_store;
++
++ mm->map_count++;
++
++ if (tmp->vm_ops && tmp->vm_ops->open)
++ tmp->vm_ops->open(tmp);
++
+ file = tmp->vm_file;
+ if (file) {
+ struct address_space *mapping = file->f_mapping;
+@@ -749,23 +764,9 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
+ i_mmap_unlock_write(mapping);
+ }
+
+- /*
+- * Copy/update hugetlb private vma information.
+- */
+- if (is_vm_hugetlb_page(tmp))
+- hugetlb_dup_vma_private(tmp);
+-
+- /* Link the vma into the MT */
+- if (vma_iter_bulk_store(&vmi, tmp))
+- goto fail_nomem_vmi_store;
+-
+- mm->map_count++;
+ if (!(tmp->vm_flags & VM_WIPEONFORK))
+ retval = copy_page_range(tmp, mpnt);
+
+- if (tmp->vm_ops && tmp->vm_ops->open)
+- tmp->vm_ops->open(tmp);
+-
+ if (retval)
+ goto loop_out;
+ }
+--
+2.53.0
+
usb-dwc2-gadget-fix-spin_lock-unlock-mismatch-in-dwc2_hsotg_udc_stop.patch
usb-cdns3-gadget-fix-null-pointer-dereference-in-ep_queue.patch
usb-cdns3-gadget-fix-state-inconsistency-on-gadget-init-failure.patch
+vfio-create-vfio_fs_type-with-inode-per-device.patch
+vfio-pci-use-unmap_mapping_range.patch
+vfio-pci-insert-full-vma-on-mmap-d-mmio-fault.patch
+fork-defer-linking-file-vma-until-vma-is-fully-initi.patch
--- /dev/null
+From 3d2002c4265d9a03a0538d7bcf39a254310f122a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 2 Apr 2026 18:13:08 +0200
+Subject: vfio: Create vfio_fs_type with inode per device
+
+From: Alex Williamson <alex.williamson@redhat.com>
+
+commit b7c5e64fecfa88764791679cca4786ac65de739e upstream.
+
+By linking all the device fds we provide to userspace to an
+address space through a new pseudo fs, we can use tools like
+unmap_mapping_range() to zap all vmas associated with a device.
+
+Suggested-by: Jason Gunthorpe <jgg@nvidia.com>
+Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
+Reviewed-by: Kevin Tian <kevin.tian@intel.com>
+Link: https://lore.kernel.org/r/20240530045236.1005864-2-alex.williamson@redhat.com
+Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
+Signed-off-by: Axel Rasmussen <axelrasmussen@google.com>
+Signed-off-by: Tugrul Kukul <tugrul.kukul@est.tech>
+Tested-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Tested-by: Pavel Machek (CIP) <pavel@denx.de>
+Tested-by: Ron Economos <re@w6rz.net>
+Tested-by: Justin M. Forbes <jforbes@fedoraproject.org>
+Tested-by: Mark Brown <broonie@kernel.org>
+Tested-by: Conor Dooley <conor.dooley@microchip.com>
+Tested-by: Jon Hunter <jonathanh@nvidia.com>
+Tested-by: Shuah Khan <skhan@linuxfoundation.org>
+Tested-by: Peter Schneider <pschneider1968@googlemail.com>
+Reviewed-by: Alex Williamson <alex.williamson@redhat.com>
+Acked-by: Alex Williamson <alex@shazbot.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/vfio/device_cdev.c | 7 ++++++
+ drivers/vfio/group.c | 7 ++++++
+ drivers/vfio/vfio_main.c | 44 ++++++++++++++++++++++++++++++++++++++
+ include/linux/vfio.h | 1 +
+ 4 files changed, 59 insertions(+)
+
+diff --git a/drivers/vfio/device_cdev.c b/drivers/vfio/device_cdev.c
+index e75da0a70d1f8..bb1817bd4ff31 100644
+--- a/drivers/vfio/device_cdev.c
++++ b/drivers/vfio/device_cdev.c
+@@ -39,6 +39,13 @@ int vfio_device_fops_cdev_open(struct inode *inode, struct file *filep)
+
+ filep->private_data = df;
+
++ /*
++ * Use the pseudo fs inode on the device to link all mmaps
++ * to the same address space, allowing us to unmap all vmas
++ * associated to this device using unmap_mapping_range().
++ */
++ filep->f_mapping = device->inode->i_mapping;
++
+ return 0;
+
+ err_put_registration:
+diff --git a/drivers/vfio/group.c b/drivers/vfio/group.c
+index 54c3079031e16..4cd857ff0259b 100644
+--- a/drivers/vfio/group.c
++++ b/drivers/vfio/group.c
+@@ -285,6 +285,13 @@ static struct file *vfio_device_open_file(struct vfio_device *device)
+ */
+ filep->f_mode |= (FMODE_PREAD | FMODE_PWRITE);
+
++ /*
++ * Use the pseudo fs inode on the device to link all mmaps
++ * to the same address space, allowing us to unmap all vmas
++ * associated to this device using unmap_mapping_range().
++ */
++ filep->f_mapping = device->inode->i_mapping;
++
+ if (device->group->type == VFIO_NO_IOMMU)
+ dev_warn(device->dev, "vfio-noiommu device opened by user "
+ "(%s:%d)\n", current->comm, task_pid_nr(current));
+diff --git a/drivers/vfio/vfio_main.c b/drivers/vfio/vfio_main.c
+index 6dfb290c339f9..ec4fbd993bf00 100644
+--- a/drivers/vfio/vfio_main.c
++++ b/drivers/vfio/vfio_main.c
+@@ -22,8 +22,10 @@
+ #include <linux/list.h>
+ #include <linux/miscdevice.h>
+ #include <linux/module.h>
++#include <linux/mount.h>
+ #include <linux/mutex.h>
+ #include <linux/pci.h>
++#include <linux/pseudo_fs.h>
+ #include <linux/rwsem.h>
+ #include <linux/sched.h>
+ #include <linux/slab.h>
+@@ -43,9 +45,13 @@
+ #define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
+ #define DRIVER_DESC "VFIO - User Level meta-driver"
+
++#define VFIO_MAGIC 0x5646494f /* "VFIO" */
++
+ static struct vfio {
+ struct class *device_class;
+ struct ida device_ida;
++ struct vfsmount *vfs_mount;
++ int fs_count;
+ } vfio;
+
+ #ifdef CONFIG_VFIO_NOIOMMU
+@@ -186,6 +192,8 @@ static void vfio_device_release(struct device *dev)
+ if (device->ops->release)
+ device->ops->release(device);
+
++ iput(device->inode);
++ simple_release_fs(&vfio.vfs_mount, &vfio.fs_count);
+ kvfree(device);
+ }
+
+@@ -228,6 +236,34 @@ struct vfio_device *_vfio_alloc_device(size_t size, struct device *dev,
+ }
+ EXPORT_SYMBOL_GPL(_vfio_alloc_device);
+
++static int vfio_fs_init_fs_context(struct fs_context *fc)
++{
++ return init_pseudo(fc, VFIO_MAGIC) ? 0 : -ENOMEM;
++}
++
++static struct file_system_type vfio_fs_type = {
++ .name = "vfio",
++ .owner = THIS_MODULE,
++ .init_fs_context = vfio_fs_init_fs_context,
++ .kill_sb = kill_anon_super,
++};
++
++static struct inode *vfio_fs_inode_new(void)
++{
++ struct inode *inode;
++ int ret;
++
++ ret = simple_pin_fs(&vfio_fs_type, &vfio.vfs_mount, &vfio.fs_count);
++ if (ret)
++ return ERR_PTR(ret);
++
++ inode = alloc_anon_inode(vfio.vfs_mount->mnt_sb);
++ if (IS_ERR(inode))
++ simple_release_fs(&vfio.vfs_mount, &vfio.fs_count);
++
++ return inode;
++}
++
+ /*
+ * Initialize a vfio_device so it can be registered to vfio core.
+ */
+@@ -246,6 +282,11 @@ static int vfio_init_device(struct vfio_device *device, struct device *dev,
+ init_completion(&device->comp);
+ device->dev = dev;
+ device->ops = ops;
++ device->inode = vfio_fs_inode_new();
++ if (IS_ERR(device->inode)) {
++ ret = PTR_ERR(device->inode);
++ goto out_inode;
++ }
+
+ if (ops->init) {
+ ret = ops->init(device);
+@@ -260,6 +301,9 @@ static int vfio_init_device(struct vfio_device *device, struct device *dev,
+ return 0;
+
+ out_uninit:
++ iput(device->inode);
++ simple_release_fs(&vfio.vfs_mount, &vfio.fs_count);
++out_inode:
+ vfio_release_device_set(device);
+ ida_free(&vfio.device_ida, device->index);
+ return ret;
+diff --git a/include/linux/vfio.h b/include/linux/vfio.h
+index 5ac5f182ce0bb..514a7f9b3ef4b 100644
+--- a/include/linux/vfio.h
++++ b/include/linux/vfio.h
+@@ -64,6 +64,7 @@ struct vfio_device {
+ struct completion comp;
+ struct iommufd_access *iommufd_access;
+ void (*put_kvm)(struct kvm *kvm);
++ struct inode *inode;
+ #if IS_ENABLED(CONFIG_IOMMUFD)
+ struct iommufd_device *iommufd_device;
+ u8 iommufd_attached:1;
+--
+2.53.0
+
--- /dev/null
+From 4782dfb223ad1409853f2619ee8da679452a608a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 2 Apr 2026 18:13:10 +0200
+Subject: vfio/pci: Insert full vma on mmap'd MMIO fault
+
+From: Alex Williamson <alex.williamson@redhat.com>
+
+commit d71a989cf5d961989c273093cdff2550acdde314 upstream.
+
+In order to improve performance of typical scenarios we can try to insert
+the entire vma on fault. This accelerates typical cases, such as when
+the MMIO region is DMA mapped by QEMU. The vfio_iommu_type1 driver will
+fault in the entire DMA mapped range through fixup_user_fault().
+
+In synthetic testing, this improves the time required to walk a PCI BAR
+mapping from userspace by roughly 1/3rd.
+
+This is likely an interim solution until vmf_insert_pfn_{pmd,pud}() gain
+support for pfnmaps.
+
+Suggested-by: Yan Zhao <yan.y.zhao@intel.com>
+Link: https://lore.kernel.org/all/Zl6XdUkt%2FzMMGOLF@yzhao56-desk.sh.intel.com/
+Reviewed-by: Yan Zhao <yan.y.zhao@intel.com>
+Link: https://lore.kernel.org/r/20240607035213.2054226-1-alex.williamson@redhat.com
+Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
+Signed-off-by: Axel Rasmussen <axelrasmussen@google.com>
+Signed-off-by: Tugrul Kukul <tugrul.kukul@est.tech>
+Tested-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Tested-by: Pavel Machek (CIP) <pavel@denx.de>
+Tested-by: Ron Economos <re@w6rz.net>
+Tested-by: Justin M. Forbes <jforbes@fedoraproject.org>
+Tested-by: Mark Brown <broonie@kernel.org>
+Tested-by: Conor Dooley <conor.dooley@microchip.com>
+Tested-by: Jon Hunter <jonathanh@nvidia.com>
+Tested-by: Shuah Khan <skhan@linuxfoundation.org>
+Tested-by: Peter Schneider <pschneider1968@googlemail.com>
+Reviewed-by: Alex Williamson <alex.williamson@redhat.com>
+Acked-by: Alex Williamson <alex@shazbot.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/vfio/pci/vfio_pci_core.c | 19 +++++++++++++++++--
+ 1 file changed, 17 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c
+index e05d6ee9d4cab..55e28feba475e 100644
+--- a/drivers/vfio/pci/vfio_pci_core.c
++++ b/drivers/vfio/pci/vfio_pci_core.c
+@@ -1651,6 +1651,7 @@ static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf)
+ struct vm_area_struct *vma = vmf->vma;
+ struct vfio_pci_core_device *vdev = vma->vm_private_data;
+ unsigned long pfn, pgoff = vmf->pgoff - vma->vm_pgoff;
++ unsigned long addr = vma->vm_start;
+ vm_fault_t ret = VM_FAULT_SIGBUS;
+
+ pfn = vma_to_pfn(vma);
+@@ -1658,11 +1659,25 @@ static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf)
+ down_read(&vdev->memory_lock);
+
+ if (vdev->pm_runtime_engaged || !__vfio_pci_memory_enabled(vdev))
+- goto out_disabled;
++ goto out_unlock;
+
+ ret = vmf_insert_pfn(vma, vmf->address, pfn + pgoff);
++ if (ret & VM_FAULT_ERROR)
++ goto out_unlock;
+
+-out_disabled:
++ /*
++ * Pre-fault the remainder of the vma, abort further insertions and
++ * supress error if fault is encountered during pre-fault.
++ */
++ for (; addr < vma->vm_end; addr += PAGE_SIZE, pfn++) {
++ if (addr == vmf->address)
++ continue;
++
++ if (vmf_insert_pfn(vma, addr, pfn) & VM_FAULT_ERROR)
++ break;
++ }
++
++out_unlock:
+ up_read(&vdev->memory_lock);
+
+ return ret;
+--
+2.53.0
+
--- /dev/null
+From 2f3578fcddd0891cad918817156fdfbbee773974 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 2 Apr 2026 18:13:09 +0200
+Subject: vfio/pci: Use unmap_mapping_range()
+
+From: Alex Williamson <alex.williamson@redhat.com>
+
+commit aac6db75a9fc2c7a6f73e152df8f15101dda38e6 upstream.
+
+With the vfio device fd tied to the address space of the pseudo fs
+inode, we can use the mm to track all vmas that might be mmap'ing
+device BARs, which removes our vma_list and all the complicated lock
+ordering necessary to manually zap each related vma.
+
+Note that we can no longer store the pfn in vm_pgoff if we want to use
+unmap_mapping_range() to zap a selective portion of the device fd
+corresponding to BAR mappings.
+
+This also converts our mmap fault handler to use vmf_insert_pfn()
+because we no longer have a vma_list to avoid the concurrency problem
+with io_remap_pfn_range(). The goal is to eventually use the vm_ops
+huge_fault handler to avoid the additional faulting overhead, but
+vmf_insert_pfn_{pmd,pud}() need to learn about pfnmaps first.
+
+Also, Jason notes that a race exists between unmap_mapping_range() and
+the fops mmap callback if we were to call io_remap_pfn_range() to
+populate the vma on mmap. Specifically, mmap_region() does call_mmap()
+before it does vma_link_file() which gives a window where the vma is
+populated but invisible to unmap_mapping_range().
+
+Suggested-by: Jason Gunthorpe <jgg@nvidia.com>
+Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
+Reviewed-by: Kevin Tian <kevin.tian@intel.com>
+Link: https://lore.kernel.org/r/20240530045236.1005864-3-alex.williamson@redhat.com
+Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
+Signed-off-by: Axel Rasmussen <axelrasmussen@google.com>
+Signed-off-by: Tugrul Kukul <tugrul.kukul@est.tech>
+Acked-by: Alex Williamson <alex@shazbot.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/vfio/pci/vfio_pci_core.c | 264 +++++++------------------------
+ include/linux/vfio_pci_core.h | 2 -
+ 2 files changed, 55 insertions(+), 211 deletions(-)
+
+diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c
+index 3f139360752e2..e05d6ee9d4cab 100644
+--- a/drivers/vfio/pci/vfio_pci_core.c
++++ b/drivers/vfio/pci/vfio_pci_core.c
+@@ -1599,100 +1599,20 @@ ssize_t vfio_pci_core_write(struct vfio_device *core_vdev, const char __user *bu
+ }
+ EXPORT_SYMBOL_GPL(vfio_pci_core_write);
+
+-/* Return 1 on zap and vma_lock acquired, 0 on contention (only with @try) */
+-static int vfio_pci_zap_and_vma_lock(struct vfio_pci_core_device *vdev, bool try)
++static void vfio_pci_zap_bars(struct vfio_pci_core_device *vdev)
+ {
+- struct vfio_pci_mmap_vma *mmap_vma, *tmp;
++ struct vfio_device *core_vdev = &vdev->vdev;
++ loff_t start = VFIO_PCI_INDEX_TO_OFFSET(VFIO_PCI_BAR0_REGION_INDEX);
++ loff_t end = VFIO_PCI_INDEX_TO_OFFSET(VFIO_PCI_ROM_REGION_INDEX);
++ loff_t len = end - start;
+
+- /*
+- * Lock ordering:
+- * vma_lock is nested under mmap_lock for vm_ops callback paths.
+- * The memory_lock semaphore is used by both code paths calling
+- * into this function to zap vmas and the vm_ops.fault callback
+- * to protect the memory enable state of the device.
+- *
+- * When zapping vmas we need to maintain the mmap_lock => vma_lock
+- * ordering, which requires using vma_lock to walk vma_list to
+- * acquire an mm, then dropping vma_lock to get the mmap_lock and
+- * reacquiring vma_lock. This logic is derived from similar
+- * requirements in uverbs_user_mmap_disassociate().
+- *
+- * mmap_lock must always be the top-level lock when it is taken.
+- * Therefore we can only hold the memory_lock write lock when
+- * vma_list is empty, as we'd need to take mmap_lock to clear
+- * entries. vma_list can only be guaranteed empty when holding
+- * vma_lock, thus memory_lock is nested under vma_lock.
+- *
+- * This enables the vm_ops.fault callback to acquire vma_lock,
+- * followed by memory_lock read lock, while already holding
+- * mmap_lock without risk of deadlock.
+- */
+- while (1) {
+- struct mm_struct *mm = NULL;
+-
+- if (try) {
+- if (!mutex_trylock(&vdev->vma_lock))
+- return 0;
+- } else {
+- mutex_lock(&vdev->vma_lock);
+- }
+- while (!list_empty(&vdev->vma_list)) {
+- mmap_vma = list_first_entry(&vdev->vma_list,
+- struct vfio_pci_mmap_vma,
+- vma_next);
+- mm = mmap_vma->vma->vm_mm;
+- if (mmget_not_zero(mm))
+- break;
+-
+- list_del(&mmap_vma->vma_next);
+- kfree(mmap_vma);
+- mm = NULL;
+- }
+- if (!mm)
+- return 1;
+- mutex_unlock(&vdev->vma_lock);
+-
+- if (try) {
+- if (!mmap_read_trylock(mm)) {
+- mmput(mm);
+- return 0;
+- }
+- } else {
+- mmap_read_lock(mm);
+- }
+- if (try) {
+- if (!mutex_trylock(&vdev->vma_lock)) {
+- mmap_read_unlock(mm);
+- mmput(mm);
+- return 0;
+- }
+- } else {
+- mutex_lock(&vdev->vma_lock);
+- }
+- list_for_each_entry_safe(mmap_vma, tmp,
+- &vdev->vma_list, vma_next) {
+- struct vm_area_struct *vma = mmap_vma->vma;
+-
+- if (vma->vm_mm != mm)
+- continue;
+-
+- list_del(&mmap_vma->vma_next);
+- kfree(mmap_vma);
+-
+- zap_vma_ptes(vma, vma->vm_start,
+- vma->vm_end - vma->vm_start);
+- }
+- mutex_unlock(&vdev->vma_lock);
+- mmap_read_unlock(mm);
+- mmput(mm);
+- }
++ unmap_mapping_range(core_vdev->inode->i_mapping, start, len, true);
+ }
+
+ void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_core_device *vdev)
+ {
+- vfio_pci_zap_and_vma_lock(vdev, false);
+ down_write(&vdev->memory_lock);
+- mutex_unlock(&vdev->vma_lock);
++ vfio_pci_zap_bars(vdev);
+ }
+
+ u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_core_device *vdev)
+@@ -1714,99 +1634,41 @@ void vfio_pci_memory_unlock_and_restore(struct vfio_pci_core_device *vdev, u16 c
+ up_write(&vdev->memory_lock);
+ }
+
+-/* Caller holds vma_lock */
+-static int __vfio_pci_add_vma(struct vfio_pci_core_device *vdev,
+- struct vm_area_struct *vma)
+-{
+- struct vfio_pci_mmap_vma *mmap_vma;
+-
+- mmap_vma = kmalloc(sizeof(*mmap_vma), GFP_KERNEL_ACCOUNT);
+- if (!mmap_vma)
+- return -ENOMEM;
+-
+- mmap_vma->vma = vma;
+- list_add(&mmap_vma->vma_next, &vdev->vma_list);
+-
+- return 0;
+-}
+-
+-/*
+- * Zap mmaps on open so that we can fault them in on access and therefore
+- * our vma_list only tracks mappings accessed since last zap.
+- */
+-static void vfio_pci_mmap_open(struct vm_area_struct *vma)
+-{
+- zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
+-}
+-
+-static void vfio_pci_mmap_close(struct vm_area_struct *vma)
++static unsigned long vma_to_pfn(struct vm_area_struct *vma)
+ {
+ struct vfio_pci_core_device *vdev = vma->vm_private_data;
+- struct vfio_pci_mmap_vma *mmap_vma;
++ int index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
++ u64 pgoff;
+
+- mutex_lock(&vdev->vma_lock);
+- list_for_each_entry(mmap_vma, &vdev->vma_list, vma_next) {
+- if (mmap_vma->vma == vma) {
+- list_del(&mmap_vma->vma_next);
+- kfree(mmap_vma);
+- break;
+- }
+- }
+- mutex_unlock(&vdev->vma_lock);
++ pgoff = vma->vm_pgoff &
++ ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
++
++ return (pci_resource_start(vdev->pdev, index) >> PAGE_SHIFT) + pgoff;
+ }
+
+ static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf)
+ {
+ struct vm_area_struct *vma = vmf->vma;
+ struct vfio_pci_core_device *vdev = vma->vm_private_data;
+- struct vfio_pci_mmap_vma *mmap_vma;
+- vm_fault_t ret = VM_FAULT_NOPAGE;
++ unsigned long pfn, pgoff = vmf->pgoff - vma->vm_pgoff;
++ vm_fault_t ret = VM_FAULT_SIGBUS;
+
+- mutex_lock(&vdev->vma_lock);
+- down_read(&vdev->memory_lock);
++ pfn = vma_to_pfn(vma);
+
+- /*
+- * Memory region cannot be accessed if the low power feature is engaged
+- * or memory access is disabled.
+- */
+- if (vdev->pm_runtime_engaged || !__vfio_pci_memory_enabled(vdev)) {
+- ret = VM_FAULT_SIGBUS;
+- goto up_out;
+- }
++ down_read(&vdev->memory_lock);
+
+- /*
+- * We populate the whole vma on fault, so we need to test whether
+- * the vma has already been mapped, such as for concurrent faults
+- * to the same vma. io_remap_pfn_range() will trigger a BUG_ON if
+- * we ask it to fill the same range again.
+- */
+- list_for_each_entry(mmap_vma, &vdev->vma_list, vma_next) {
+- if (mmap_vma->vma == vma)
+- goto up_out;
+- }
++ if (vdev->pm_runtime_engaged || !__vfio_pci_memory_enabled(vdev))
++ goto out_disabled;
+
+- if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+- vma->vm_end - vma->vm_start,
+- vma->vm_page_prot)) {
+- ret = VM_FAULT_SIGBUS;
+- zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
+- goto up_out;
+- }
++ ret = vmf_insert_pfn(vma, vmf->address, pfn + pgoff);
+
+- if (__vfio_pci_add_vma(vdev, vma)) {
+- ret = VM_FAULT_OOM;
+- zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
+- }
+-
+-up_out:
++out_disabled:
+ up_read(&vdev->memory_lock);
+- mutex_unlock(&vdev->vma_lock);
++
+ return ret;
+ }
+
+ static const struct vm_operations_struct vfio_pci_mmap_ops = {
+- .open = vfio_pci_mmap_open,
+- .close = vfio_pci_mmap_close,
+ .fault = vfio_pci_mmap_fault,
+ };
+
+@@ -1869,11 +1731,12 @@ int vfio_pci_core_mmap(struct vfio_device *core_vdev, struct vm_area_struct *vma
+
+ vma->vm_private_data = vdev;
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+- vma->vm_pgoff = (pci_resource_start(pdev, index) >> PAGE_SHIFT) + pgoff;
++ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
+ /*
+- * See remap_pfn_range(), called from vfio_pci_fault() but we can't
+- * change vm_flags within the fault handler. Set them now.
++ * Set vm_flags now, they should not be changed in the fault handler.
++ * We want the same flags and page protection (decrypted above) as
++ * io_remap_pfn_range() would set.
+ */
+ vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
+ vma->vm_ops = &vfio_pci_mmap_ops;
+@@ -2173,8 +2036,6 @@ int vfio_pci_core_init_dev(struct vfio_device *core_vdev)
+ mutex_init(&vdev->ioeventfds_lock);
+ INIT_LIST_HEAD(&vdev->dummy_resources_list);
+ INIT_LIST_HEAD(&vdev->ioeventfds_list);
+- mutex_init(&vdev->vma_lock);
+- INIT_LIST_HEAD(&vdev->vma_list);
+ INIT_LIST_HEAD(&vdev->sriov_pfs_item);
+ init_rwsem(&vdev->memory_lock);
+ xa_init(&vdev->ctx);
+@@ -2190,7 +2051,6 @@ void vfio_pci_core_release_dev(struct vfio_device *core_vdev)
+
+ mutex_destroy(&vdev->igate);
+ mutex_destroy(&vdev->ioeventfds_lock);
+- mutex_destroy(&vdev->vma_lock);
+ kfree(vdev->region);
+ kfree(vdev->pm_save);
+ }
+@@ -2468,26 +2328,15 @@ static int vfio_pci_dev_set_pm_runtime_get(struct vfio_device_set *dev_set)
+ return ret;
+ }
+
+-/*
+- * We need to get memory_lock for each device, but devices can share mmap_lock,
+- * therefore we need to zap and hold the vma_lock for each device, and only then
+- * get each memory_lock.
+- */
+ static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set,
+ struct vfio_pci_group_info *groups,
+ struct iommufd_ctx *iommufd_ctx)
+ {
+- struct vfio_pci_core_device *cur_mem;
+- struct vfio_pci_core_device *cur_vma;
+- struct vfio_pci_core_device *cur;
++ struct vfio_pci_core_device *vdev;
+ struct pci_dev *pdev;
+- bool is_mem = true;
+ int ret;
+
+ mutex_lock(&dev_set->lock);
+- cur_mem = list_first_entry(&dev_set->device_list,
+- struct vfio_pci_core_device,
+- vdev.dev_set_list);
+
+ pdev = vfio_pci_dev_set_resettable(dev_set);
+ if (!pdev) {
+@@ -2504,7 +2353,7 @@ static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set,
+ if (ret)
+ goto err_unlock;
+
+- list_for_each_entry(cur_vma, &dev_set->device_list, vdev.dev_set_list) {
++ list_for_each_entry(vdev, &dev_set->device_list, vdev.dev_set_list) {
+ bool owned;
+
+ /*
+@@ -2528,38 +2377,38 @@ static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set,
+ * Otherwise, reset is not allowed.
+ */
+ if (iommufd_ctx) {
+- int devid = vfio_iommufd_get_dev_id(&cur_vma->vdev,
++ int devid = vfio_iommufd_get_dev_id(&vdev->vdev,
+ iommufd_ctx);
+
+ owned = (devid > 0 || devid == -ENOENT);
+ } else {
+- owned = vfio_dev_in_groups(&cur_vma->vdev, groups);
++ owned = vfio_dev_in_groups(&vdev->vdev, groups);
+ }
+
+ if (!owned) {
+ ret = -EINVAL;
+- goto err_undo;
++ break;
+ }
+
+ /*
+- * Locking multiple devices is prone to deadlock, runaway and
+- * unwind if we hit contention.
++ * Take the memory write lock for each device and zap BAR
++ * mappings to prevent the user accessing the device while in
++ * reset. Locking multiple devices is prone to deadlock,
++ * runaway and unwind if we hit contention.
+ */
+- if (!vfio_pci_zap_and_vma_lock(cur_vma, true)) {
++ if (!down_write_trylock(&vdev->memory_lock)) {
+ ret = -EBUSY;
+- goto err_undo;
++ break;
+ }
++
++ vfio_pci_zap_bars(vdev);
+ }
+- cur_vma = NULL;
+
+- list_for_each_entry(cur_mem, &dev_set->device_list, vdev.dev_set_list) {
+- if (!down_write_trylock(&cur_mem->memory_lock)) {
+- ret = -EBUSY;
+- goto err_undo;
+- }
+- mutex_unlock(&cur_mem->vma_lock);
++ if (!list_entry_is_head(vdev,
++ &dev_set->device_list, vdev.dev_set_list)) {
++ vdev = list_prev_entry(vdev, vdev.dev_set_list);
++ goto err_undo;
+ }
+- cur_mem = NULL;
+
+ /*
+ * The pci_reset_bus() will reset all the devices in the bus.
+@@ -2570,25 +2419,22 @@ static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set,
+ * cause the PCI config space reset without restoring the original
+ * state (saved locally in 'vdev->pm_save').
+ */
+- list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list)
+- vfio_pci_set_power_state(cur, PCI_D0);
++ list_for_each_entry(vdev, &dev_set->device_list, vdev.dev_set_list)
++ vfio_pci_set_power_state(vdev, PCI_D0);
+
+ ret = pci_reset_bus(pdev);
+
++ vdev = list_last_entry(&dev_set->device_list,
++ struct vfio_pci_core_device, vdev.dev_set_list);
++
+ err_undo:
+- list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list) {
+- if (cur == cur_mem)
+- is_mem = false;
+- if (cur == cur_vma)
+- break;
+- if (is_mem)
+- up_write(&cur->memory_lock);
+- else
+- mutex_unlock(&cur->vma_lock);
+- }
++ list_for_each_entry_from_reverse(vdev, &dev_set->device_list,
++ vdev.dev_set_list)
++ up_write(&vdev->memory_lock);
++
++ list_for_each_entry(vdev, &dev_set->device_list, vdev.dev_set_list)
++ pm_runtime_put(&vdev->pdev->dev);
+
+- list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list)
+- pm_runtime_put(&cur->pdev->dev);
+ err_unlock:
+ mutex_unlock(&dev_set->lock);
+ return ret;
+diff --git a/include/linux/vfio_pci_core.h b/include/linux/vfio_pci_core.h
+index 562e8754869da..4f283514a1ed6 100644
+--- a/include/linux/vfio_pci_core.h
++++ b/include/linux/vfio_pci_core.h
+@@ -93,8 +93,6 @@ struct vfio_pci_core_device {
+ struct list_head sriov_pfs_item;
+ struct vfio_pci_core_device *sriov_pf_core_dev;
+ struct notifier_block nb;
+- struct mutex vma_lock;
+- struct list_head vma_list;
+ struct rw_semaphore memory_lock;
+ };
+
+--
+2.53.0
+