--- /dev/null
+From 34ecb8760190606472f71ebf4ca2817928ce5d40 Mon Sep 17 00:00:00 2001
+From: Zenm Chen <zenmchen@gmail.com>
+Date: Sat, 26 Jul 2025 00:14:32 +0800
+Subject: Bluetooth: btusb: Add USB ID 2001:332a for D-Link AX9U rev. A1
+
+From: Zenm Chen <zenmchen@gmail.com>
+
+commit 34ecb8760190606472f71ebf4ca2817928ce5d40 upstream.
+
+Add USB ID 2001:332a for D-Link AX9U rev. A1 which is based on a Realtek
+RTL8851BU chip.
+
+The information in /sys/kernel/debug/usb/devices about the Bluetooth
+device is listed as the below:
+
+T: Bus=03 Lev=01 Prnt=01 Port=02 Cnt=01 Dev#= 2 Spd=480 MxCh= 0
+D: Ver= 2.00 Cls=ef(misc ) Sub=02 Prot=01 MxPS=64 #Cfgs= 1
+P: Vendor=2001 ProdID=332a Rev= 0.00
+S: Manufacturer=Realtek
+S: Product=802.11ax WLAN Adapter
+S: SerialNumber=00e04c000001
+C:* #Ifs= 3 Cfg#= 1 Atr=e0 MxPwr=500mA
+A: FirstIf#= 0 IfCount= 2 Cls=e0(wlcon) Sub=01 Prot=01
+I:* If#= 0 Alt= 0 #EPs= 3 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb
+E: Ad=81(I) Atr=03(Int.) MxPS= 16 Ivl=1ms
+E: Ad=02(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E: Ad=82(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+I:* If#= 1 Alt= 0 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb
+E: Ad=03(O) Atr=01(Isoc) MxPS= 0 Ivl=1ms
+E: Ad=83(I) Atr=01(Isoc) MxPS= 0 Ivl=1ms
+I: If#= 1 Alt= 1 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb
+E: Ad=03(O) Atr=01(Isoc) MxPS= 9 Ivl=1ms
+E: Ad=83(I) Atr=01(Isoc) MxPS= 9 Ivl=1ms
+I: If#= 1 Alt= 2 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb
+E: Ad=03(O) Atr=01(Isoc) MxPS= 17 Ivl=1ms
+E: Ad=83(I) Atr=01(Isoc) MxPS= 17 Ivl=1ms
+I: If#= 1 Alt= 3 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb
+E: Ad=03(O) Atr=01(Isoc) MxPS= 25 Ivl=1ms
+E: Ad=83(I) Atr=01(Isoc) MxPS= 25 Ivl=1ms
+I: If#= 1 Alt= 4 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb
+E: Ad=03(O) Atr=01(Isoc) MxPS= 33 Ivl=1ms
+E: Ad=83(I) Atr=01(Isoc) MxPS= 33 Ivl=1ms
+I: If#= 1 Alt= 5 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb
+E: Ad=03(O) Atr=01(Isoc) MxPS= 49 Ivl=1ms
+E: Ad=83(I) Atr=01(Isoc) MxPS= 49 Ivl=1ms
+I: If#= 1 Alt= 6 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb
+E: Ad=03(O) Atr=01(Isoc) MxPS= 63 Ivl=1ms
+E: Ad=83(I) Atr=01(Isoc) MxPS= 63 Ivl=1ms
+I:* If#= 2 Alt= 0 #EPs= 8 Cls=ff(vend.) Sub=ff Prot=ff Driver=rtw89_8851bu_git
+E: Ad=84(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E: Ad=05(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E: Ad=06(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E: Ad=07(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E: Ad=09(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E: Ad=0a(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E: Ad=0b(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E: Ad=0c(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+
+Cc: stable@vger.kernel.org # 6.12.x
+Signed-off-by: Zenm Chen <zenmchen@gmail.com>
+Reviewed-by: Paul Menzel <pmenzel@molgen.mpg.de>
+Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Signed-off-by: Zenm Chen <zenmchen@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/bluetooth/btusb.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -511,6 +511,8 @@ static const struct usb_device_id quirks
+ /* Realtek 8851BU Bluetooth devices */
+ { USB_DEVICE(0x3625, 0x010b), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
++ { USB_DEVICE(0x2001, 0x332a), .driver_info = BTUSB_REALTEK |
++ BTUSB_WIDEBAND_SPEECH },
+
+ /* Realtek 8852AE Bluetooth devices */
+ { USB_DEVICE(0x0bda, 0x2852), .driver_info = BTUSB_REALTEK |
--- /dev/null
+From stable+bounces-187854-greg=kroah.com@vger.kernel.org Sat Oct 18 18:25:27 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 18 Oct 2025 12:25:19 -0400
+Subject: cpufreq: CPPC: Avoid using CPUFREQ_ETERNAL as transition delay
+To: stable@vger.kernel.org
+Cc: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>, "Mario Limonciello (AMD)" <superm1@kernel.org>, Jie Zhan <zhanjie9@hisilicon.com>, Viresh Kumar <viresh.kumar@linaro.org>, Qais Yousef <qyousef@layalina.io>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251018162519.839592-1-sashal@kernel.org>
+
+From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+
+[ Upstream commit f965d111e68f4a993cc44d487d416e3d954eea11 ]
+
+If cppc_get_transition_latency() returns CPUFREQ_ETERNAL to indicate a
+failure to retrieve the transition latency value from the platform
+firmware, the CPPC cpufreq driver will use that value (converted to
+microseconds) as the policy transition delay, but it is way too large
+for any practical use.
+
+Address this by making the driver use the cpufreq's default
+transition latency value (in microseconds) as the transition delay
+if CPUFREQ_ETERNAL is returned by cppc_get_transition_latency().
+
+Fixes: d4f3388afd48 ("cpufreq / CPPC: Set platform specific transition_delay_us")
+Cc: 5.19+ <stable@vger.kernel.org> # 5.19
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Reviewed-by: Mario Limonciello (AMD) <superm1@kernel.org>
+Reviewed-by: Jie Zhan <zhanjie9@hisilicon.com>
+Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
+Reviewed-by: Qais Yousef <qyousef@layalina.io>
+[ added CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS definition to include/linux/cpufreq.h ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/cpufreq/cppc_cpufreq.c | 14 ++++++++++++--
+ include/linux/cpufreq.h | 3 +++
+ 2 files changed, 15 insertions(+), 2 deletions(-)
+
+--- a/drivers/cpufreq/cppc_cpufreq.c
++++ b/drivers/cpufreq/cppc_cpufreq.c
+@@ -344,6 +344,16 @@ static int cppc_verify_policy(struct cpu
+ return 0;
+ }
+
++static unsigned int __cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
++{
++ unsigned int transition_latency_ns = cppc_get_transition_latency(cpu);
++
++ if (transition_latency_ns == CPUFREQ_ETERNAL)
++ return CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS / NSEC_PER_USEC;
++
++ return transition_latency_ns / NSEC_PER_USEC;
++}
++
+ /*
+ * The PCC subspace describes the rate at which platform can accept commands
+ * on the shared PCC channel (including READs which do not count towards freq
+@@ -366,12 +376,12 @@ static unsigned int cppc_cpufreq_get_tra
+ return 10000;
+ }
+ }
+- return cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
++ return __cppc_cpufreq_get_transition_delay_us(cpu);
+ }
+ #else
+ static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
+ {
+- return cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
++ return __cppc_cpufreq_get_transition_delay_us(cpu);
+ }
+ #endif
+
+--- a/include/linux/cpufreq.h
++++ b/include/linux/cpufreq.h
+@@ -32,6 +32,9 @@
+ */
+
+ #define CPUFREQ_ETERNAL (-1)
++
++#define CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS NSEC_PER_MSEC
++
+ #define CPUFREQ_NAME_LEN 16
+ /* Print length for names. Extra 1 space for accommodating '\n' in prints */
+ #define CPUFREQ_NAME_PLEN (CPUFREQ_NAME_LEN + 1)
--- /dev/null
+From c760bcda83571e07b72c10d9da175db5051ed971 Mon Sep 17 00:00:00 2001
+From: Mario Limonciello <mario.limonciello@amd.com>
+Date: Thu, 25 Sep 2025 14:10:57 -0500
+Subject: drm/amd: Check whether secure display TA loaded successfully
+
+From: Mario Limonciello <mario.limonciello@amd.com>
+
+commit c760bcda83571e07b72c10d9da175db5051ed971 upstream.
+
+[Why]
+Not all renoir hardware supports secure display. If the TA is present
+but the feature isn't supported it will fail to load or send commands.
+This shows ERR messages to the user that make it seems like there is
+a problem.
+
+[How]
+Check the resp_status of the context to see if there was an error
+before trying to send any secure display commands.
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/1415
+Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Adrian Yip <adrian.ytw@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+@@ -2012,7 +2012,7 @@ static int psp_securedisplay_initialize(
+ }
+
+ ret = psp_ta_load(psp, &psp->securedisplay_context.context);
+- if (!ret) {
++ if (!ret && !psp->securedisplay_context.context.resp_status) {
+ psp->securedisplay_context.context.initialized = true;
+ mutex_init(&psp->securedisplay_context.mutex);
+ } else
--- /dev/null
+From stable+bounces-187785-greg=kroah.com@vger.kernel.org Sat Oct 18 04:18:29 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 17 Oct 2025 22:18:17 -0400
+Subject: drm/exynos: exynos7_drm_decon: fix uninitialized crtc reference in functions
+To: stable@vger.kernel.org
+Cc: Kaustabh Chakraborty <kauschluss@disroot.org>, Inki Dae <inki.dae@samsung.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251018021819.214653-1-sashal@kernel.org>
+
+From: Kaustabh Chakraborty <kauschluss@disroot.org>
+
+[ Upstream commit d31bbacf783daf1e71fbe5c68df93550c446bf44 ]
+
+Modify the functions to accept a pointer to struct decon_context
+instead.
+
+Signed-off-by: Kaustabh Chakraborty <kauschluss@disroot.org>
+Signed-off-by: Inki Dae <inki.dae@samsung.com>
+Stable-dep-of: e1361a4f1be9 ("drm/exynos: exynos7_drm_decon: remove ctx->suspended")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/exynos/exynos7_drm_decon.c | 11 ++++-------
+ 1 file changed, 4 insertions(+), 7 deletions(-)
+
+--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
++++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+@@ -81,10 +81,8 @@ static const enum drm_plane_type decon_w
+ DRM_PLANE_TYPE_CURSOR,
+ };
+
+-static void decon_wait_for_vblank(struct exynos_drm_crtc *crtc)
++static void decon_wait_for_vblank(struct decon_context *ctx)
+ {
+- struct decon_context *ctx = crtc->ctx;
+-
+ if (ctx->suspended)
+ return;
+
+@@ -100,9 +98,8 @@ static void decon_wait_for_vblank(struct
+ DRM_DEV_DEBUG_KMS(ctx->dev, "vblank wait timed out.\n");
+ }
+
+-static void decon_clear_channels(struct exynos_drm_crtc *crtc)
++static void decon_clear_channels(struct decon_context *ctx)
+ {
+- struct decon_context *ctx = crtc->ctx;
+ unsigned int win, ch_enabled = 0;
+
+ /* Check if any channel is enabled. */
+@@ -118,7 +115,7 @@ static void decon_clear_channels(struct
+
+ /* Wait for vsync, as disable channel takes effect at next vsync */
+ if (ch_enabled)
+- decon_wait_for_vblank(ctx->crtc);
++ decon_wait_for_vblank(ctx);
+ }
+
+ static int decon_ctx_initialize(struct decon_context *ctx,
+@@ -126,7 +123,7 @@ static int decon_ctx_initialize(struct d
+ {
+ ctx->drm_dev = drm_dev;
+
+- decon_clear_channels(ctx->crtc);
++ decon_clear_channels(ctx);
+
+ return exynos_drm_register_dma(drm_dev, ctx->dev, &ctx->dma_priv);
+ }
--- /dev/null
+From stable+bounces-187786-greg=kroah.com@vger.kernel.org Sat Oct 18 04:18:28 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 17 Oct 2025 22:18:18 -0400
+Subject: drm/exynos: exynos7_drm_decon: properly clear channels during bind
+To: stable@vger.kernel.org
+Cc: Kaustabh Chakraborty <kauschluss@disroot.org>, Inki Dae <inki.dae@samsung.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251018021819.214653-2-sashal@kernel.org>
+
+From: Kaustabh Chakraborty <kauschluss@disroot.org>
+
+[ Upstream commit 5f1a453974204175f20b3788824a0fe23cc36f79 ]
+
+The DECON channels are not cleared properly as the windows aren't
+shadow protected. When accompanied with an IOMMU, it pagefaults, and
+the kernel panics.
+
+Implement shadow protect/unprotect, along with a standalone update,
+for channel clearing to properly take effect.
+
+Signed-off-by: Kaustabh Chakraborty <kauschluss@disroot.org>
+Signed-off-by: Inki Dae <inki.dae@samsung.com>
+Stable-dep-of: e1361a4f1be9 ("drm/exynos: exynos7_drm_decon: remove ctx->suspended")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/exynos/exynos7_drm_decon.c | 55 ++++++++++++++++-------------
+ 1 file changed, 32 insertions(+), 23 deletions(-)
+
+--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
++++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+@@ -81,6 +81,28 @@ static const enum drm_plane_type decon_w
+ DRM_PLANE_TYPE_CURSOR,
+ };
+
++/**
++ * decon_shadow_protect_win() - disable updating values from shadow registers at vsync
++ *
++ * @ctx: display and enhancement controller context
++ * @win: window to protect registers for
++ * @protect: 1 to protect (disable updates)
++ */
++static void decon_shadow_protect_win(struct decon_context *ctx,
++ unsigned int win, bool protect)
++{
++ u32 bits, val;
++
++ bits = SHADOWCON_WINx_PROTECT(win);
++
++ val = readl(ctx->regs + SHADOWCON);
++ if (protect)
++ val |= bits;
++ else
++ val &= ~bits;
++ writel(val, ctx->regs + SHADOWCON);
++}
++
+ static void decon_wait_for_vblank(struct decon_context *ctx)
+ {
+ if (ctx->suspended)
+@@ -101,18 +123,27 @@ static void decon_wait_for_vblank(struct
+ static void decon_clear_channels(struct decon_context *ctx)
+ {
+ unsigned int win, ch_enabled = 0;
++ u32 val;
+
+ /* Check if any channel is enabled. */
+ for (win = 0; win < WINDOWS_NR; win++) {
+- u32 val = readl(ctx->regs + WINCON(win));
++ val = readl(ctx->regs + WINCON(win));
+
+ if (val & WINCONx_ENWIN) {
++ decon_shadow_protect_win(ctx, win, true);
++
+ val &= ~WINCONx_ENWIN;
+ writel(val, ctx->regs + WINCON(win));
+ ch_enabled = 1;
++
++ decon_shadow_protect_win(ctx, win, false);
+ }
+ }
+
++ val = readl(ctx->regs + DECON_UPDATE);
++ val |= DECON_UPDATE_STANDALONE_F;
++ writel(val, ctx->regs + DECON_UPDATE);
++
+ /* Wait for vsync, as disable channel takes effect at next vsync */
+ if (ch_enabled)
+ decon_wait_for_vblank(ctx);
+@@ -340,28 +371,6 @@ static void decon_win_set_colkey(struct
+ writel(keycon1, ctx->regs + WKEYCON1_BASE(win));
+ }
+
+-/**
+- * decon_shadow_protect_win() - disable updating values from shadow registers at vsync
+- *
+- * @ctx: display and enhancement controller context
+- * @win: window to protect registers for
+- * @protect: 1 to protect (disable updates)
+- */
+-static void decon_shadow_protect_win(struct decon_context *ctx,
+- unsigned int win, bool protect)
+-{
+- u32 bits, val;
+-
+- bits = SHADOWCON_WINx_PROTECT(win);
+-
+- val = readl(ctx->regs + SHADOWCON);
+- if (protect)
+- val |= bits;
+- else
+- val &= ~bits;
+- writel(val, ctx->regs + SHADOWCON);
+-}
+-
+ static void decon_atomic_begin(struct exynos_drm_crtc *crtc)
+ {
+ struct decon_context *ctx = crtc->ctx;
--- /dev/null
+From stable+bounces-187787-greg=kroah.com@vger.kernel.org Sat Oct 18 04:18:30 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 17 Oct 2025 22:18:19 -0400
+Subject: drm/exynos: exynos7_drm_decon: remove ctx->suspended
+To: stable@vger.kernel.org
+Cc: Kaustabh Chakraborty <kauschluss@disroot.org>, Inki Dae <inki.dae@samsung.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251018021819.214653-3-sashal@kernel.org>
+
+From: Kaustabh Chakraborty <kauschluss@disroot.org>
+
+[ Upstream commit e1361a4f1be9cb69a662c6d7b5ce218007d6e82b ]
+
+Condition guards are found to be redundant, as the call flow is properly
+managed now, as also observed in the Exynos5433 DECON driver. Since
+state checking is no longer necessary, remove it.
+
+This also fixes an issue which prevented decon_commit() from
+decon_atomic_enable() due to an incorrect state change setting.
+
+Fixes: 96976c3d9aff ("drm/exynos: Add DECON driver")
+Cc: stable@vger.kernel.org
+Suggested-by: Inki Dae <inki.dae@samsung.com>
+Signed-off-by: Kaustabh Chakraborty <kauschluss@disroot.org>
+Signed-off-by: Inki Dae <inki.dae@samsung.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/exynos/exynos7_drm_decon.c | 36 -----------------------------
+ 1 file changed, 36 deletions(-)
+
+--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
++++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+@@ -51,7 +51,6 @@ struct decon_context {
+ void __iomem *regs;
+ unsigned long irq_flags;
+ bool i80_if;
+- bool suspended;
+ wait_queue_head_t wait_vsync_queue;
+ atomic_t wait_vsync_event;
+
+@@ -105,9 +104,6 @@ static void decon_shadow_protect_win(str
+
+ static void decon_wait_for_vblank(struct decon_context *ctx)
+ {
+- if (ctx->suspended)
+- return;
+-
+ atomic_set(&ctx->wait_vsync_event, 1);
+
+ /*
+@@ -183,9 +179,6 @@ static void decon_commit(struct exynos_d
+ struct drm_display_mode *mode = &crtc->base.state->adjusted_mode;
+ u32 val, clkdiv;
+
+- if (ctx->suspended)
+- return;
+-
+ /* nothing to do if we haven't set the mode yet */
+ if (mode->htotal == 0 || mode->vtotal == 0)
+ return;
+@@ -247,9 +240,6 @@ static int decon_enable_vblank(struct ex
+ struct decon_context *ctx = crtc->ctx;
+ u32 val;
+
+- if (ctx->suspended)
+- return -EPERM;
+-
+ if (!test_and_set_bit(0, &ctx->irq_flags)) {
+ val = readl(ctx->regs + VIDINTCON0);
+
+@@ -272,9 +262,6 @@ static void decon_disable_vblank(struct
+ struct decon_context *ctx = crtc->ctx;
+ u32 val;
+
+- if (ctx->suspended)
+- return;
+-
+ if (test_and_clear_bit(0, &ctx->irq_flags)) {
+ val = readl(ctx->regs + VIDINTCON0);
+
+@@ -376,9 +363,6 @@ static void decon_atomic_begin(struct ex
+ struct decon_context *ctx = crtc->ctx;
+ int i;
+
+- if (ctx->suspended)
+- return;
+-
+ for (i = 0; i < WINDOWS_NR; i++)
+ decon_shadow_protect_win(ctx, i, true);
+ }
+@@ -398,9 +382,6 @@ static void decon_update_plane(struct ex
+ unsigned int cpp = fb->format->cpp[0];
+ unsigned int pitch = fb->pitches[0];
+
+- if (ctx->suspended)
+- return;
+-
+ /*
+ * SHADOWCON/PRTCON register is used for enabling timing.
+ *
+@@ -488,9 +469,6 @@ static void decon_disable_plane(struct e
+ unsigned int win = plane->index;
+ u32 val;
+
+- if (ctx->suspended)
+- return;
+-
+ /* protect windows */
+ decon_shadow_protect_win(ctx, win, true);
+
+@@ -509,9 +487,6 @@ static void decon_atomic_flush(struct ex
+ struct decon_context *ctx = crtc->ctx;
+ int i;
+
+- if (ctx->suspended)
+- return;
+-
+ for (i = 0; i < WINDOWS_NR; i++)
+ decon_shadow_protect_win(ctx, i, false);
+ exynos_crtc_handle_event(crtc);
+@@ -539,9 +514,6 @@ static void decon_atomic_enable(struct e
+ struct decon_context *ctx = crtc->ctx;
+ int ret;
+
+- if (!ctx->suspended)
+- return;
+-
+ ret = pm_runtime_resume_and_get(ctx->dev);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev, "failed to enable DECON device.\n");
+@@ -555,8 +527,6 @@ static void decon_atomic_enable(struct e
+ decon_enable_vblank(ctx->crtc);
+
+ decon_commit(ctx->crtc);
+-
+- ctx->suspended = false;
+ }
+
+ static void decon_atomic_disable(struct exynos_drm_crtc *crtc)
+@@ -564,9 +534,6 @@ static void decon_atomic_disable(struct
+ struct decon_context *ctx = crtc->ctx;
+ int i;
+
+- if (ctx->suspended)
+- return;
+-
+ /*
+ * We need to make sure that all windows are disabled before we
+ * suspend that connector. Otherwise we might try to scan from
+@@ -576,8 +543,6 @@ static void decon_atomic_disable(struct
+ decon_disable_plane(crtc, &ctx->planes[i]);
+
+ pm_runtime_put_sync(ctx->dev);
+-
+- ctx->suspended = true;
+ }
+
+ static const struct exynos_drm_crtc_ops decon_crtc_ops = {
+@@ -698,7 +663,6 @@ static int decon_probe(struct platform_d
+ return -ENOMEM;
+
+ ctx->dev = dev;
+- ctx->suspended = true;
+
+ i80_if_timings = of_get_child_by_name(dev->of_node, "i80-if-timings");
+ if (i80_if_timings)
--- /dev/null
+From stable+bounces-187809-greg=kroah.com@vger.kernel.org Sat Oct 18 05:24:20 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 17 Oct 2025 23:24:08 -0400
+Subject: drm/msm/a6xx: Fix PDC sleep sequence
+To: stable@vger.kernel.org
+Cc: Akhil P Oommen <akhilpo@oss.qualcomm.com>, Rob Clark <robin.clark@oss.qualcomm.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251018032408.252050-2-sashal@kernel.org>
+
+From: Akhil P Oommen <akhilpo@oss.qualcomm.com>
+
+[ Upstream commit f248d5d5159a88ded55329f0b1b463d0f4094228 ]
+
+Since the PDC resides out of the GPU subsystem and cannot be reset in
+case it enters bad state, utmost care must be taken to trigger the PDC
+wake/sleep routines in the correct order.
+
+The PDC wake sequence can be exercised only after a PDC sleep sequence.
+Additionally, GMU firmware should initialize a few registers before the
+KMD can trigger a PDC sleep sequence. So PDC sleep can't be done if the
+GMU firmware has not initialized. Track these dependencies using a new
+status variable and trigger PDC sleep/wake sequences appropriately.
+
+Cc: stable@vger.kernel.org
+Fixes: 4b565ca5a2cb ("drm/msm: Add A6XX device support")
+Signed-off-by: Akhil P Oommen <akhilpo@oss.qualcomm.com>
+Patchwork: https://patchwork.freedesktop.org/patch/673362/
+Signed-off-by: Rob Clark <robin.clark@oss.qualcomm.com>
+[ omitted A7XX GPU logic and newer struct fields ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/msm/adreno/a6xx_gmu.c | 34 ++++++++++++++++++++--------------
+ drivers/gpu/drm/msm/adreno/a6xx_gmu.h | 6 ++++++
+ 2 files changed, 26 insertions(+), 14 deletions(-)
+
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+@@ -230,6 +230,8 @@ static int a6xx_gmu_start(struct a6xx_gm
+ if (ret)
+ DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n");
+
++ set_bit(GMU_STATUS_FW_START, &gmu->status);
++
+ return ret;
+ }
+
+@@ -460,6 +462,9 @@ static int a6xx_rpmh_start(struct a6xx_g
+ int ret;
+ u32 val;
+
++ if (!test_and_clear_bit(GMU_STATUS_PDC_SLEEP, &gmu->status))
++ return 0;
++
+ gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, BIT(1));
+
+ ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val,
+@@ -487,6 +492,9 @@ static void a6xx_rpmh_stop(struct a6xx_g
+ int ret;
+ u32 val;
+
++ if (test_and_clear_bit(GMU_STATUS_FW_START, &gmu->status))
++ return;
++
+ gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1);
+
+ ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0,
+@@ -495,6 +503,8 @@ static void a6xx_rpmh_stop(struct a6xx_g
+ DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n");
+
+ gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
++
++ set_bit(GMU_STATUS_PDC_SLEEP, &gmu->status);
+ }
+
+ static inline void pdc_write(void __iomem *ptr, u32 offset, u32 value)
+@@ -615,8 +625,6 @@ setup_pdc:
+ /* ensure no writes happen before the uCode is fully written */
+ wmb();
+
+- a6xx_rpmh_stop(gmu);
+-
+ err:
+ if (!IS_ERR_OR_NULL(pdcptr))
+ iounmap(pdcptr);
+@@ -753,22 +761,18 @@ static int a6xx_gmu_fw_start(struct a6xx
+ gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FAL_INTF, 1);
+ }
+
+- if (state == GMU_WARM_BOOT) {
+- ret = a6xx_rpmh_start(gmu);
+- if (ret)
+- return ret;
+- } else {
++ /* Turn on register retention */
++ gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1);
++
++ ret = a6xx_rpmh_start(gmu);
++ if (ret)
++ return ret;
++
++ if (state == GMU_COLD_BOOT) {
+ if (WARN(!adreno_gpu->fw[ADRENO_FW_GMU],
+ "GMU firmware is not loaded\n"))
+ return -ENOENT;
+
+- /* Turn on register retention */
+- gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1);
+-
+- ret = a6xx_rpmh_start(gmu);
+- if (ret)
+- return ret;
+-
+ ret = a6xx_gmu_fw_load(gmu);
+ if (ret)
+ return ret;
+@@ -907,6 +911,8 @@ static void a6xx_gmu_force_off(struct a6
+
+ /* Reset GPU core blocks */
+ a6xx_gpu_sw_reset(gpu, true);
++
++ a6xx_rpmh_stop(gmu);
+ }
+
+ static void a6xx_gmu_set_initial_freq(struct msm_gpu *gpu, struct a6xx_gmu *gmu)
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+@@ -96,6 +96,12 @@ struct a6xx_gmu {
+ /* For power domain callback */
+ struct notifier_block pd_nb;
+ struct completion pd_gate;
++
++/* To check if we can trigger sleep seq at PDC. Cleared in a6xx_rpmh_stop() */
++#define GMU_STATUS_FW_START 0
++/* To track if PDC sleep seq was done */
++#define GMU_STATUS_PDC_SLEEP 1
++ unsigned long status;
+ };
+
+ static inline u32 gmu_read(struct a6xx_gmu *gmu, u32 offset)
--- /dev/null
+From stable+bounces-187808-greg=kroah.com@vger.kernel.org Sat Oct 18 05:24:19 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 17 Oct 2025 23:24:07 -0400
+Subject: drm/msm/adreno: De-spaghettify the use of memory barriers
+To: stable@vger.kernel.org
+Cc: Konrad Dybcio <konrad.dybcio@linaro.org>, Akhil P Oommen <quic_akhilpo@quicinc.com>, Rob Clark <robdclark@chromium.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251018032408.252050-1-sashal@kernel.org>
+
+From: Konrad Dybcio <konrad.dybcio@linaro.org>
+
+[ Upstream commit 43ec1a202cfa9f765412d325b93873284e7c3d82 ]
+
+Memory barriers help ensure instruction ordering, NOT time and order
+of actual write arrival at other observers (e.g. memory-mapped IP).
+On architectures employing weak memory ordering, the latter can be a
+giant pain point, and it has been as part of this driver.
+
+Moreover, the gpu_/gmu_ accessors already use non-relaxed versions of
+readl/writel, which include r/w (respectively) barriers.
+
+Replace the barriers with a readback (or drop altogether where possible)
+that ensures the previous writes have exited the write buffer (as the CPU
+must flush the write to the register it's trying to read back).
+
+Signed-off-by: Konrad Dybcio <konrad.dybcio@linaro.org>
+Patchwork: https://patchwork.freedesktop.org/patch/600869/
+Reviewed-by: Akhil P Oommen <quic_akhilpo@quicinc.com>
+Signed-off-by: Rob Clark <robdclark@chromium.org>
+Stable-dep-of: f248d5d5159a ("drm/msm/a6xx: Fix PDC sleep sequence")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/msm/adreno/a6xx_gmu.c | 4 +---
+ drivers/gpu/drm/msm/adreno/a6xx_gpu.c | 10 ++++++----
+ 2 files changed, 7 insertions(+), 7 deletions(-)
+
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+@@ -460,9 +460,7 @@ static int a6xx_rpmh_start(struct a6xx_g
+ int ret;
+ u32 val;
+
+- gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1 << 1);
+- /* Wait for the register to finish posting */
+- wmb();
++ gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, BIT(1));
+
+ ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val,
+ val & (1 << 1), 100, 10000);
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+@@ -1209,14 +1209,16 @@ static int hw_init(struct msm_gpu *gpu)
+ /* Clear GBIF halt in case GX domain was not collapsed */
+ if (adreno_is_a619_holi(adreno_gpu)) {
+ gpu_write(gpu, REG_A6XX_GBIF_HALT, 0);
++ gpu_read(gpu, REG_A6XX_GBIF_HALT);
++
+ gpu_write(gpu, REG_A6XX_RBBM_GPR0_CNTL, 0);
+- /* Let's make extra sure that the GPU can access the memory.. */
+- mb();
++ gpu_read(gpu, REG_A6XX_RBBM_GPR0_CNTL);
+ } else if (a6xx_has_gbif(adreno_gpu)) {
+ gpu_write(gpu, REG_A6XX_GBIF_HALT, 0);
++ gpu_read(gpu, REG_A6XX_GBIF_HALT);
++
+ gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 0);
+- /* Let's make extra sure that the GPU can access the memory.. */
+- mb();
++ gpu_read(gpu, REG_A6XX_RBBM_GBIF_HALT);
+ }
+
+ gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_CNTL, 0);
--- /dev/null
+From stable+bounces-187866-greg=kroah.com@vger.kernel.org Sat Oct 18 21:36:37 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 18 Oct 2025 15:36:27 -0400
+Subject: epoll: Remove ep_scan_ready_list() in comments
+To: stable@vger.kernel.org
+Cc: Huang Xiaojia <huangxiaojia2@huawei.com>, Jan Kara <jack@suse.cz>, Christian Brauner <brauner@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251018193629.891117-1-sashal@kernel.org>
+
+From: Huang Xiaojia <huangxiaojia2@huawei.com>
+
+[ Upstream commit e6f7958042a7b1dc9a4dfc19fca74217bc0c4865 ]
+
+Since commit 443f1a042233 ("lift the calls of ep_send_events_proc()
+into the callers"), ep_scan_ready_list() has been removed.
+But there are still several in comments. All of them should
+be replaced with other caller functions.
+
+Signed-off-by: Huang Xiaojia <huangxiaojia2@huawei.com>
+Link: https://lore.kernel.org/r/20240206014353.4191262-1-huangxiaojia2@huawei.com
+Reviewed-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Stable-dep-of: 0c43094f8cc9 ("eventpoll: Replace rwlock with spinlock")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/eventpoll.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/fs/eventpoll.c
++++ b/fs/eventpoll.c
+@@ -206,7 +206,7 @@ struct eventpoll {
+ */
+ struct epitem *ovflist;
+
+- /* wakeup_source used when ep_scan_ready_list is running */
++ /* wakeup_source used when ep_send_events or __ep_eventpoll_poll is running */
+ struct wakeup_source *ws;
+
+ /* The user that created the eventpoll descriptor */
+@@ -1190,7 +1190,7 @@ static inline bool chain_epi_lockless(st
+ * This callback takes a read lock in order not to contend with concurrent
+ * events from another file descriptor, thus all modifications to ->rdllist
+ * or ->ovflist are lockless. Read lock is paired with the write lock from
+- * ep_scan_ready_list(), which stops all list modifications and guarantees
++ * ep_start/done_scan(), which stops all list modifications and guarantees
+ * that lists state is seen correctly.
+ *
+ * Another thing worth to mention is that ep_poll_callback() can be called
+@@ -1792,7 +1792,7 @@ static int ep_send_events(struct eventpo
+ * availability. At this point, no one can insert
+ * into ep->rdllist besides us. The epoll_ctl()
+ * callers are locked out by
+- * ep_scan_ready_list() holding "mtx" and the
++ * ep_send_events() holding "mtx" and the
+ * poll callback will queue them in ep->ovflist.
+ */
+ list_add_tail(&epi->rdllink, &ep->rdllist);
+@@ -1945,7 +1945,7 @@ static int ep_poll(struct eventpoll *ep,
+ __set_current_state(TASK_INTERRUPTIBLE);
+
+ /*
+- * Do the final check under the lock. ep_scan_ready_list()
++ * Do the final check under the lock. ep_start/done_scan()
+ * plays with two lists (->rdllist and ->ovflist) and there
+ * is always a race when both lists are empty for short
+ * period of time although events are pending, so lock is
--- /dev/null
+From stable+bounces-187867-greg=kroah.com@vger.kernel.org Sat Oct 18 21:36:38 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 18 Oct 2025 15:36:28 -0400
+Subject: eventpoll: Replace rwlock with spinlock
+To: stable@vger.kernel.org
+Cc: Nam Cao <namcao@linutronix.de>, K Prateek Nayak <kprateek.nayak@amd.com>, Frederic Weisbecker <frederic@kernel.org>, Valentin Schneider <vschneid@redhat.com>, Christian Brauner <brauner@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251018193629.891117-2-sashal@kernel.org>
+
+From: Nam Cao <namcao@linutronix.de>
+
+[ Upstream commit 0c43094f8cc9d3d99d835c0ac9c4fe1ccc62babd ]
+
+The ready event list of an epoll object is protected by read-write
+semaphore:
+
+ - The consumer (waiter) acquires the write lock and takes items.
+ - the producer (waker) takes the read lock and adds items.
+
+The point of this design is enabling epoll to scale well with large number
+of producers, as multiple producers can hold the read lock at the same
+time.
+
+Unfortunately, this implementation may cause scheduling priority inversion
+problem. Suppose the consumer has higher scheduling priority than the
+producer. The consumer needs to acquire the write lock, but may be blocked
+by the producer holding the read lock. Since read-write semaphore does not
+support priority-boosting for the readers (even with CONFIG_PREEMPT_RT=y),
+we have a case of priority inversion: a higher priority consumer is blocked
+by a lower priority producer. This problem was reported in [1].
+
+Furthermore, this could also cause stall problem, as described in [2].
+
+Fix this problem by replacing rwlock with spinlock.
+
+This reduces the event bandwidth, as the producers now have to contend with
+each other for the spinlock. According to the benchmark from
+https://github.com/rouming/test-tools/blob/master/stress-epoll.c:
+
+ On 12 x86 CPUs:
+ Before After Diff
+ threads events/ms events/ms
+ 8 7162 4956 -31%
+ 16 8733 5383 -38%
+ 32 7968 5572 -30%
+ 64 10652 5739 -46%
+ 128 11236 5931 -47%
+
+ On 4 riscv CPUs:
+ Before After Diff
+ threads events/ms events/ms
+ 8 2958 2833 -4%
+ 16 3323 3097 -7%
+ 32 3451 3240 -6%
+ 64 3554 3178 -11%
+ 128 3601 3235 -10%
+
+Although the numbers look bad, it should be noted that this benchmark
+creates multiple threads who do nothing except constantly generating new
+epoll events, thus contention on the spinlock is high. For real workload,
+the event rate is likely much lower, and the performance drop is not as
+bad.
+
+Using another benchmark (perf bench epoll wait) where spinlock contention
+is lower, improvement is even observed on x86:
+
+ On 12 x86 CPUs:
+ Before: Averaged 110279 operations/sec (+- 1.09%), total secs = 8
+ After: Averaged 114577 operations/sec (+- 2.25%), total secs = 8
+
+ On 4 riscv CPUs:
+ Before: Averaged 175767 operations/sec (+- 0.62%), total secs = 8
+ After: Averaged 167396 operations/sec (+- 0.23%), total secs = 8
+
+In conclusion, no one is likely to be upset over this change. After all,
+spinlock was used originally for years, and the commit which converted to
+rwlock didn't mention a real workload, just that the benchmark numbers are
+nice.
+
+This patch is not exactly the revert of commit a218cc491420 ("epoll: use
+rwlock in order to reduce ep_poll_callback() contention"), because git
+revert conflicts in some places which are not obvious on the resolution.
+This patch is intended to be backported, therefore go with the obvious
+approach:
+
+ - Replace rwlock_t with spinlock_t one to one
+
+ - Delete list_add_tail_lockless() and chain_epi_lockless(). These were
+ introduced to allow producers to concurrently add items to the list.
+ But now that spinlock no longer allows producers to touch the event
+ list concurrently, these two functions are not necessary anymore.
+
+Fixes: a218cc491420 ("epoll: use rwlock in order to reduce ep_poll_callback() contention")
+Signed-off-by: Nam Cao <namcao@linutronix.de>
+Link: https://lore.kernel.org/ec92458ea357ec503c737ead0f10b2c6e4c37d47.1752581388.git.namcao@linutronix.de
+Tested-by: K Prateek Nayak <kprateek.nayak@amd.com>
+Cc: stable@vger.kernel.org
+Reported-by: Frederic Weisbecker <frederic@kernel.org>
+Closes: https://lore.kernel.org/linux-rt-users/20210825132754.GA895675@lothringen/ [1]
+Reported-by: Valentin Schneider <vschneid@redhat.com>
+Closes: https://lore.kernel.org/linux-rt-users/xhsmhttqvnall.mognet@vschneid.remote.csb/ [2]
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/eventpoll.c | 139 ++++++++++-----------------------------------------------
+ 1 file changed, 26 insertions(+), 113 deletions(-)
+
+--- a/fs/eventpoll.c
++++ b/fs/eventpoll.c
+@@ -45,10 +45,10 @@
+ *
+ * 1) epnested_mutex (mutex)
+ * 2) ep->mtx (mutex)
+- * 3) ep->lock (rwlock)
++ * 3) ep->lock (spinlock)
+ *
+ * The acquire order is the one listed above, from 1 to 3.
+- * We need a rwlock (ep->lock) because we manipulate objects
++ * We need a spinlock (ep->lock) because we manipulate objects
+ * from inside the poll callback, that might be triggered from
+ * a wake_up() that in turn might be called from IRQ context.
+ * So we can't sleep inside the poll callback and hence we need
+@@ -194,7 +194,7 @@ struct eventpoll {
+ struct list_head rdllist;
+
+ /* Lock which protects rdllist and ovflist */
+- rwlock_t lock;
++ spinlock_t lock;
+
+ /* RB tree root used to store monitored fd structs */
+ struct rb_root_cached rbr;
+@@ -625,10 +625,10 @@ static void ep_start_scan(struct eventpo
+ * in a lockless way.
+ */
+ lockdep_assert_irqs_enabled();
+- write_lock_irq(&ep->lock);
++ spin_lock_irq(&ep->lock);
+ list_splice_init(&ep->rdllist, txlist);
+ WRITE_ONCE(ep->ovflist, NULL);
+- write_unlock_irq(&ep->lock);
++ spin_unlock_irq(&ep->lock);
+ }
+
+ static void ep_done_scan(struct eventpoll *ep,
+@@ -636,7 +636,7 @@ static void ep_done_scan(struct eventpol
+ {
+ struct epitem *epi, *nepi;
+
+- write_lock_irq(&ep->lock);
++ spin_lock_irq(&ep->lock);
+ /*
+ * During the time we spent inside the "sproc" callback, some
+ * other events might have been queued by the poll callback.
+@@ -677,7 +677,7 @@ static void ep_done_scan(struct eventpol
+ wake_up(&ep->wq);
+ }
+
+- write_unlock_irq(&ep->lock);
++ spin_unlock_irq(&ep->lock);
+ }
+
+ static void epi_rcu_free(struct rcu_head *head)
+@@ -757,10 +757,10 @@ static bool __ep_remove(struct eventpoll
+
+ rb_erase_cached(&epi->rbn, &ep->rbr);
+
+- write_lock_irq(&ep->lock);
++ spin_lock_irq(&ep->lock);
+ if (ep_is_linked(epi))
+ list_del_init(&epi->rdllink);
+- write_unlock_irq(&ep->lock);
++ spin_unlock_irq(&ep->lock);
+
+ wakeup_source_unregister(ep_wakeup_source(epi));
+ /*
+@@ -1018,7 +1018,7 @@ static int ep_alloc(struct eventpoll **p
+ return -ENOMEM;
+
+ mutex_init(&ep->mtx);
+- rwlock_init(&ep->lock);
++ spin_lock_init(&ep->lock);
+ init_waitqueue_head(&ep->wq);
+ init_waitqueue_head(&ep->poll_wait);
+ INIT_LIST_HEAD(&ep->rdllist);
+@@ -1106,99 +1106,9 @@ struct file *get_epoll_tfile_raw_ptr(str
+ #endif /* CONFIG_KCMP */
+
+ /*
+- * Adds a new entry to the tail of the list in a lockless way, i.e.
+- * multiple CPUs are allowed to call this function concurrently.
+- *
+- * Beware: it is necessary to prevent any other modifications of the
+- * existing list until all changes are completed, in other words
+- * concurrent list_add_tail_lockless() calls should be protected
+- * with a read lock, where write lock acts as a barrier which
+- * makes sure all list_add_tail_lockless() calls are fully
+- * completed.
+- *
+- * Also an element can be locklessly added to the list only in one
+- * direction i.e. either to the tail or to the head, otherwise
+- * concurrent access will corrupt the list.
+- *
+- * Return: %false if element has been already added to the list, %true
+- * otherwise.
+- */
+-static inline bool list_add_tail_lockless(struct list_head *new,
+- struct list_head *head)
+-{
+- struct list_head *prev;
+-
+- /*
+- * This is simple 'new->next = head' operation, but cmpxchg()
+- * is used in order to detect that same element has been just
+- * added to the list from another CPU: the winner observes
+- * new->next == new.
+- */
+- if (!try_cmpxchg(&new->next, &new, head))
+- return false;
+-
+- /*
+- * Initially ->next of a new element must be updated with the head
+- * (we are inserting to the tail) and only then pointers are atomically
+- * exchanged. XCHG guarantees memory ordering, thus ->next should be
+- * updated before pointers are actually swapped and pointers are
+- * swapped before prev->next is updated.
+- */
+-
+- prev = xchg(&head->prev, new);
+-
+- /*
+- * It is safe to modify prev->next and new->prev, because a new element
+- * is added only to the tail and new->next is updated before XCHG.
+- */
+-
+- prev->next = new;
+- new->prev = prev;
+-
+- return true;
+-}
+-
+-/*
+- * Chains a new epi entry to the tail of the ep->ovflist in a lockless way,
+- * i.e. multiple CPUs are allowed to call this function concurrently.
+- *
+- * Return: %false if epi element has been already chained, %true otherwise.
+- */
+-static inline bool chain_epi_lockless(struct epitem *epi)
+-{
+- struct eventpoll *ep = epi->ep;
+-
+- /* Fast preliminary check */
+- if (epi->next != EP_UNACTIVE_PTR)
+- return false;
+-
+- /* Check that the same epi has not been just chained from another CPU */
+- if (cmpxchg(&epi->next, EP_UNACTIVE_PTR, NULL) != EP_UNACTIVE_PTR)
+- return false;
+-
+- /* Atomically exchange tail */
+- epi->next = xchg(&ep->ovflist, epi);
+-
+- return true;
+-}
+-
+-/*
+ * This is the callback that is passed to the wait queue wakeup
+ * mechanism. It is called by the stored file descriptors when they
+ * have events to report.
+- *
+- * This callback takes a read lock in order not to contend with concurrent
+- * events from another file descriptor, thus all modifications to ->rdllist
+- * or ->ovflist are lockless. Read lock is paired with the write lock from
+- * ep_start/done_scan(), which stops all list modifications and guarantees
+- * that lists state is seen correctly.
+- *
+- * Another thing worth to mention is that ep_poll_callback() can be called
+- * concurrently for the same @epi from different CPUs if poll table was inited
+- * with several wait queues entries. Plural wakeup from different CPUs of a
+- * single wait queue is serialized by wq.lock, but the case when multiple wait
+- * queues are used should be detected accordingly. This is detected using
+- * cmpxchg() operation.
+ */
+ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
+ {
+@@ -1209,7 +1119,7 @@ static int ep_poll_callback(wait_queue_e
+ unsigned long flags;
+ int ewake = 0;
+
+- read_lock_irqsave(&ep->lock, flags);
++ spin_lock_irqsave(&ep->lock, flags);
+
+ ep_set_busy_poll_napi_id(epi);
+
+@@ -1238,12 +1148,15 @@ static int ep_poll_callback(wait_queue_e
+ * chained in ep->ovflist and requeued later on.
+ */
+ if (READ_ONCE(ep->ovflist) != EP_UNACTIVE_PTR) {
+- if (chain_epi_lockless(epi))
++ if (epi->next == EP_UNACTIVE_PTR) {
++ epi->next = READ_ONCE(ep->ovflist);
++ WRITE_ONCE(ep->ovflist, epi);
+ ep_pm_stay_awake_rcu(epi);
++ }
+ } else if (!ep_is_linked(epi)) {
+ /* In the usual case, add event to ready list. */
+- if (list_add_tail_lockless(&epi->rdllink, &ep->rdllist))
+- ep_pm_stay_awake_rcu(epi);
++ list_add_tail(&epi->rdllink, &ep->rdllist);
++ ep_pm_stay_awake_rcu(epi);
+ }
+
+ /*
+@@ -1276,7 +1189,7 @@ static int ep_poll_callback(wait_queue_e
+ pwake++;
+
+ out_unlock:
+- read_unlock_irqrestore(&ep->lock, flags);
++ spin_unlock_irqrestore(&ep->lock, flags);
+
+ /* We have to call this outside the lock */
+ if (pwake)
+@@ -1611,7 +1524,7 @@ static int ep_insert(struct eventpoll *e
+ }
+
+ /* We have to drop the new item inside our item list to keep track of it */
+- write_lock_irq(&ep->lock);
++ spin_lock_irq(&ep->lock);
+
+ /* record NAPI ID of new item if present */
+ ep_set_busy_poll_napi_id(epi);
+@@ -1628,7 +1541,7 @@ static int ep_insert(struct eventpoll *e
+ pwake++;
+ }
+
+- write_unlock_irq(&ep->lock);
++ spin_unlock_irq(&ep->lock);
+
+ /* We have to call this outside the lock */
+ if (pwake)
+@@ -1692,7 +1605,7 @@ static int ep_modify(struct eventpoll *e
+ * list, push it inside.
+ */
+ if (ep_item_poll(epi, &pt, 1)) {
+- write_lock_irq(&ep->lock);
++ spin_lock_irq(&ep->lock);
+ if (!ep_is_linked(epi)) {
+ list_add_tail(&epi->rdllink, &ep->rdllist);
+ ep_pm_stay_awake(epi);
+@@ -1703,7 +1616,7 @@ static int ep_modify(struct eventpoll *e
+ if (waitqueue_active(&ep->poll_wait))
+ pwake++;
+ }
+- write_unlock_irq(&ep->lock);
++ spin_unlock_irq(&ep->lock);
+ }
+
+ /* We have to call this outside the lock */
+@@ -1936,7 +1849,7 @@ static int ep_poll(struct eventpoll *ep,
+ init_wait(&wait);
+ wait.func = ep_autoremove_wake_function;
+
+- write_lock_irq(&ep->lock);
++ spin_lock_irq(&ep->lock);
+ /*
+ * Barrierless variant, waitqueue_active() is called under
+ * the same lock on wakeup ep_poll_callback() side, so it
+@@ -1955,7 +1868,7 @@ static int ep_poll(struct eventpoll *ep,
+ if (!eavail)
+ __add_wait_queue_exclusive(&ep->wq, &wait);
+
+- write_unlock_irq(&ep->lock);
++ spin_unlock_irq(&ep->lock);
+
+ if (!eavail)
+ timed_out = !schedule_hrtimeout_range(to, slack,
+@@ -1970,7 +1883,7 @@ static int ep_poll(struct eventpoll *ep,
+ eavail = 1;
+
+ if (!list_empty_careful(&wait.entry)) {
+- write_lock_irq(&ep->lock);
++ spin_lock_irq(&ep->lock);
+ /*
+ * If the thread timed out and is not on the wait queue,
+ * it means that the thread was woken up after its
+@@ -1981,7 +1894,7 @@ static int ep_poll(struct eventpoll *ep,
+ if (timed_out)
+ eavail = list_empty(&wait.entry);
+ __remove_wait_queue(&ep->wq, &wait);
+- write_unlock_irq(&ep->lock);
++ spin_unlock_irq(&ep->lock);
+ }
+ }
+ }
--- /dev/null
+From stable+bounces-187724-greg=kroah.com@vger.kernel.org Sat Oct 18 01:32:01 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 17 Oct 2025 19:31:54 -0400
+Subject: media: nxp: imx8-isi: Drop unused argument to mxc_isi_channel_chain()
+To: stable@vger.kernel.org
+Cc: Laurent Pinchart <laurent.pinchart@ideasonboard.com>, Frank Li <Frank.Li@nxp.com>, Hans Verkuil <hverkuil+cisco@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251017233155.38054-1-sashal@kernel.org>
+
+From: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+
+[ Upstream commit 9a21ffeade25cbf310f5db39a1f9932695dd41bb ]
+
+The bypass argument to the mxc_isi_channel_chain() function is unused.
+Drop it.
+
+Link: https://lore.kernel.org/r/20250813225501.20762-1-laurent.pinchart@ideasonboard.com
+Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Reviewed-by: Frank Li <Frank.Li@nxp.com>
+Signed-off-by: Hans Verkuil <hverkuil+cisco@kernel.org>
+Stable-dep-of: 178aa3360220 ("media: nxp: imx8-isi: m2m: Fix streaming cleanup on release")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/media/platform/nxp/imx8-isi/imx8-isi-core.h | 2 +-
+ drivers/media/platform/nxp/imx8-isi/imx8-isi-hw.c | 2 +-
+ drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c | 11 +++++------
+ drivers/media/platform/nxp/imx8-isi/imx8-isi-pipe.c | 2 +-
+ 4 files changed, 8 insertions(+), 9 deletions(-)
+
+--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.h
++++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.h
+@@ -361,7 +361,7 @@ void mxc_isi_channel_get(struct mxc_isi_
+ void mxc_isi_channel_put(struct mxc_isi_pipe *pipe);
+ void mxc_isi_channel_enable(struct mxc_isi_pipe *pipe);
+ void mxc_isi_channel_disable(struct mxc_isi_pipe *pipe);
+-int mxc_isi_channel_chain(struct mxc_isi_pipe *pipe, bool bypass);
++int mxc_isi_channel_chain(struct mxc_isi_pipe *pipe);
+ void mxc_isi_channel_unchain(struct mxc_isi_pipe *pipe);
+
+ void mxc_isi_channel_config(struct mxc_isi_pipe *pipe,
+--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-hw.c
++++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-hw.c
+@@ -589,7 +589,7 @@ void mxc_isi_channel_release(struct mxc_
+ *
+ * TODO: Support secondary line buffer for downscaling YUV420 images.
+ */
+-int mxc_isi_channel_chain(struct mxc_isi_pipe *pipe, bool bypass)
++int mxc_isi_channel_chain(struct mxc_isi_pipe *pipe)
+ {
+ /* Channel chaining requires both line and output buffer. */
+ const u8 resources = MXC_ISI_CHANNEL_RES_OUTPUT_BUF
+--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c
++++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c
+@@ -493,7 +493,6 @@ static int mxc_isi_m2m_streamon(struct f
+ const struct mxc_isi_format_info *cap_info = ctx->queues.cap.info;
+ const struct mxc_isi_format_info *out_info = ctx->queues.out.info;
+ struct mxc_isi_m2m *m2m = ctx->m2m;
+- bool bypass;
+ int ret;
+
+ if (q->streaming)
+@@ -506,15 +505,15 @@ static int mxc_isi_m2m_streamon(struct f
+ goto unlock;
+ }
+
+- bypass = cap_pix->width == out_pix->width &&
+- cap_pix->height == out_pix->height &&
+- cap_info->encoding == out_info->encoding;
+-
+ /*
+ * Acquire the pipe and initialize the channel with the first user of
+ * the M2M device.
+ */
+ if (m2m->usage_count == 0) {
++ bool bypass = cap_pix->width == out_pix->width &&
++ cap_pix->height == out_pix->height &&
++ cap_info->encoding == out_info->encoding;
++
+ ret = mxc_isi_channel_acquire(m2m->pipe,
+ &mxc_isi_m2m_frame_write_done,
+ bypass);
+@@ -531,7 +530,7 @@ static int mxc_isi_m2m_streamon(struct f
+ * buffer chaining.
+ */
+ if (!ctx->chained && out_pix->width > MXC_ISI_MAX_WIDTH_UNCHAINED) {
+- ret = mxc_isi_channel_chain(m2m->pipe, bypass);
++ ret = mxc_isi_channel_chain(m2m->pipe);
+ if (ret)
+ goto deinit;
+
+--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-pipe.c
++++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-pipe.c
+@@ -851,7 +851,7 @@ int mxc_isi_pipe_acquire(struct mxc_isi_
+
+ /* Chain the channel if needed for wide resolutions. */
+ if (sink_fmt->width > MXC_ISI_MAX_WIDTH_UNCHAINED) {
+- ret = mxc_isi_channel_chain(pipe, bypass);
++ ret = mxc_isi_channel_chain(pipe);
+ if (ret)
+ mxc_isi_channel_release(pipe);
+ }
--- /dev/null
+From stable+bounces-187725-greg=kroah.com@vger.kernel.org Sat Oct 18 01:32:03 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 17 Oct 2025 19:31:55 -0400
+Subject: media: nxp: imx8-isi: m2m: Fix streaming cleanup on release
+To: stable@vger.kernel.org
+Cc: Guoniu Zhou <guoniu.zhou@nxp.com>, Laurent Pinchart <laurent.pinchart@ideasonboard.com>, Frank Li <Frank.Li@nxp.com>, Hans Verkuil <hverkuil+cisco@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251017233155.38054-2-sashal@kernel.org>
+
+From: Guoniu Zhou <guoniu.zhou@nxp.com>
+
+[ Upstream commit 178aa3360220231dd91e7dbc2eb984525886c9c1 ]
+
+If streamon/streamoff calls are imbalanced, such as when exiting an
+application with Ctrl+C when streaming, the m2m usage_count will never
+reach zero and the ISI channel won't be freed. Besides from that, if the
+input line width is more than 2K, it will trigger a WARN_ON():
+
+[ 59.222120] ------------[ cut here ]------------
+[ 59.226758] WARNING: drivers/media/platform/nxp/imx8-isi/imx8-isi-hw.c:631 at mxc_isi_channel_chain+0xa4/0x120, CPU#4: v4l2-ctl/654
+[ 59.238569] Modules linked in: ap1302
+[ 59.242231] CPU: 4 UID: 0 PID: 654 Comm: v4l2-ctl Not tainted 6.16.0-rc4-next-20250704-06511-gff0e002d480a-dirty #258 PREEMPT
+[ 59.253597] Hardware name: NXP i.MX95 15X15 board (DT)
+[ 59.258720] pstate: 80400009 (Nzcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
+[ 59.265669] pc : mxc_isi_channel_chain+0xa4/0x120
+[ 59.270358] lr : mxc_isi_channel_chain+0x44/0x120
+[ 59.275047] sp : ffff8000848c3b40
+[ 59.278348] x29: ffff8000848c3b40 x28: ffff0000859b4c98 x27: ffff800081939f00
+[ 59.285472] x26: 000000000000000a x25: ffff0000859b4cb8 x24: 0000000000000001
+[ 59.292597] x23: ffff0000816f4760 x22: ffff0000816f4258 x21: ffff000084ceb780
+[ 59.299720] x20: ffff000084342ff8 x19: ffff000084340000 x18: 0000000000000000
+[ 59.306845] x17: 0000000000000000 x16: 0000000000000000 x15: 0000ffffdb369e1c
+[ 59.313969] x14: 0000000000000000 x13: 0000000000000000 x12: 0000000000000000
+[ 59.321093] x11: 0000000000000000 x10: 0000000000000000 x9 : 0000000000000000
+[ 59.328217] x8 : ffff8000848c3d48 x7 : ffff800081930b30 x6 : ffff800081930b30
+[ 59.335340] x5 : ffff0000859b6000 x4 : ffff80008193ae80 x3 : ffff800081022420
+[ 59.342464] x2 : ffff0000852f6900 x1 : 0000000000000001 x0 : ffff000084341000
+[ 59.349590] Call trace:
+[ 59.352025] mxc_isi_channel_chain+0xa4/0x120 (P)
+[ 59.356722] mxc_isi_m2m_streamon+0x160/0x20c
+[ 59.361072] v4l_streamon+0x24/0x30
+[ 59.364556] __video_do_ioctl+0x40c/0x4a0
+[ 59.368560] video_usercopy+0x2bc/0x690
+[ 59.372382] video_ioctl2+0x18/0x24
+[ 59.375857] v4l2_ioctl+0x40/0x60
+[ 59.379168] __arm64_sys_ioctl+0xac/0x104
+[ 59.383172] invoke_syscall+0x48/0x104
+[ 59.386916] el0_svc_common.constprop.0+0xc0/0xe0
+[ 59.391613] do_el0_svc+0x1c/0x28
+[ 59.394915] el0_svc+0x34/0xf4
+[ 59.397966] el0t_64_sync_handler+0xa0/0xe4
+[ 59.402143] el0t_64_sync+0x198/0x19c
+[ 59.405801] ---[ end trace 0000000000000000 ]---
+
+Address this issue by moving the streaming preparation and cleanup to
+the vb2 .prepare_streaming() and .unprepare_streaming() operations. This
+also simplifies the driver by allowing direct usage of the
+v4l2_m2m_ioctl_streamon() and v4l2_m2m_ioctl_streamoff() helpers.
+
+Fixes: cf21f328fcaf ("media: nxp: Add i.MX8 ISI driver")
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20250821135123.29462-1-laurent.pinchart@ideasonboard.com
+Signed-off-by: Guoniu Zhou <guoniu.zhou@nxp.com>
+Co-developed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Tested-by: Guoniu Zhou <guoniu.zhou@nxp.com>
+Reviewed-by: Frank Li <Frank.Li@nxp.com>
+Signed-off-by: Hans Verkuil <hverkuil+cisco@kernel.org>
+[ Adjust context ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c | 224 ++++++++-------------
+ 1 file changed, 92 insertions(+), 132 deletions(-)
+
+--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c
++++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c
+@@ -43,7 +43,6 @@ struct mxc_isi_m2m_ctx_queue_data {
+ struct v4l2_pix_format_mplane format;
+ const struct mxc_isi_format_info *info;
+ u32 sequence;
+- bool streaming;
+ };
+
+ struct mxc_isi_m2m_ctx {
+@@ -236,6 +235,65 @@ static void mxc_isi_m2m_vb2_buffer_queue
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+ }
+
++static int mxc_isi_m2m_vb2_prepare_streaming(struct vb2_queue *q)
++{
++ struct mxc_isi_m2m_ctx *ctx = vb2_get_drv_priv(q);
++ const struct v4l2_pix_format_mplane *out_pix = &ctx->queues.out.format;
++ const struct v4l2_pix_format_mplane *cap_pix = &ctx->queues.cap.format;
++ const struct mxc_isi_format_info *cap_info = ctx->queues.cap.info;
++ const struct mxc_isi_format_info *out_info = ctx->queues.out.info;
++ struct mxc_isi_m2m *m2m = ctx->m2m;
++ int ret;
++
++ guard(mutex)(&m2m->lock);
++
++ if (m2m->usage_count == INT_MAX)
++ return -EOVERFLOW;
++
++ /*
++ * Acquire the pipe and initialize the channel with the first user of
++ * the M2M device.
++ */
++ if (m2m->usage_count == 0) {
++ bool bypass = cap_pix->width == out_pix->width &&
++ cap_pix->height == out_pix->height &&
++ cap_info->encoding == out_info->encoding;
++
++ ret = mxc_isi_channel_acquire(m2m->pipe,
++ &mxc_isi_m2m_frame_write_done,
++ bypass);
++ if (ret)
++ return ret;
++
++ mxc_isi_channel_get(m2m->pipe);
++ }
++
++ m2m->usage_count++;
++
++ /*
++ * Allocate resources for the channel, counting how many users require
++ * buffer chaining.
++ */
++ if (!ctx->chained && out_pix->width > MXC_ISI_MAX_WIDTH_UNCHAINED) {
++ ret = mxc_isi_channel_chain(m2m->pipe);
++ if (ret)
++ goto err_deinit;
++
++ m2m->chained_count++;
++ ctx->chained = true;
++ }
++
++ return 0;
++
++err_deinit:
++ if (--m2m->usage_count == 0) {
++ mxc_isi_channel_put(m2m->pipe);
++ mxc_isi_channel_release(m2m->pipe);
++ }
++
++ return ret;
++}
++
+ static int mxc_isi_m2m_vb2_start_streaming(struct vb2_queue *q,
+ unsigned int count)
+ {
+@@ -265,6 +323,35 @@ static void mxc_isi_m2m_vb2_stop_streami
+ }
+ }
+
++static void mxc_isi_m2m_vb2_unprepare_streaming(struct vb2_queue *q)
++{
++ struct mxc_isi_m2m_ctx *ctx = vb2_get_drv_priv(q);
++ struct mxc_isi_m2m *m2m = ctx->m2m;
++
++ guard(mutex)(&m2m->lock);
++
++ /*
++ * If the last context is this one, reset it to make sure the device
++ * will be reconfigured when streaming is restarted.
++ */
++ if (m2m->last_ctx == ctx)
++ m2m->last_ctx = NULL;
++
++ /* Free the channel resources if this is the last chained context. */
++ if (ctx->chained && --m2m->chained_count == 0)
++ mxc_isi_channel_unchain(m2m->pipe);
++ ctx->chained = false;
++
++ /* Turn off the light with the last user. */
++ if (--m2m->usage_count == 0) {
++ mxc_isi_channel_disable(m2m->pipe);
++ mxc_isi_channel_put(m2m->pipe);
++ mxc_isi_channel_release(m2m->pipe);
++ }
++
++ WARN_ON(m2m->usage_count < 0);
++}
++
+ static const struct vb2_ops mxc_isi_m2m_vb2_qops = {
+ .queue_setup = mxc_isi_m2m_vb2_queue_setup,
+ .buf_init = mxc_isi_m2m_vb2_buffer_init,
+@@ -272,8 +359,10 @@ static const struct vb2_ops mxc_isi_m2m_
+ .buf_queue = mxc_isi_m2m_vb2_buffer_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
++ .prepare_streaming = mxc_isi_m2m_vb2_prepare_streaming,
+ .start_streaming = mxc_isi_m2m_vb2_start_streaming,
+ .stop_streaming = mxc_isi_m2m_vb2_stop_streaming,
++ .unprepare_streaming = mxc_isi_m2m_vb2_unprepare_streaming,
+ };
+
+ static int mxc_isi_m2m_queue_init(void *priv, struct vb2_queue *src_vq,
+@@ -483,135 +572,6 @@ static int mxc_isi_m2m_s_fmt_vid(struct
+ return 0;
+ }
+
+-static int mxc_isi_m2m_streamon(struct file *file, void *fh,
+- enum v4l2_buf_type type)
+-{
+- struct mxc_isi_m2m_ctx *ctx = to_isi_m2m_ctx(fh);
+- struct mxc_isi_m2m_ctx_queue_data *q = mxc_isi_m2m_ctx_qdata(ctx, type);
+- const struct v4l2_pix_format_mplane *out_pix = &ctx->queues.out.format;
+- const struct v4l2_pix_format_mplane *cap_pix = &ctx->queues.cap.format;
+- const struct mxc_isi_format_info *cap_info = ctx->queues.cap.info;
+- const struct mxc_isi_format_info *out_info = ctx->queues.out.info;
+- struct mxc_isi_m2m *m2m = ctx->m2m;
+- int ret;
+-
+- if (q->streaming)
+- return 0;
+-
+- mutex_lock(&m2m->lock);
+-
+- if (m2m->usage_count == INT_MAX) {
+- ret = -EOVERFLOW;
+- goto unlock;
+- }
+-
+- /*
+- * Acquire the pipe and initialize the channel with the first user of
+- * the M2M device.
+- */
+- if (m2m->usage_count == 0) {
+- bool bypass = cap_pix->width == out_pix->width &&
+- cap_pix->height == out_pix->height &&
+- cap_info->encoding == out_info->encoding;
+-
+- ret = mxc_isi_channel_acquire(m2m->pipe,
+- &mxc_isi_m2m_frame_write_done,
+- bypass);
+- if (ret)
+- goto unlock;
+-
+- mxc_isi_channel_get(m2m->pipe);
+- }
+-
+- m2m->usage_count++;
+-
+- /*
+- * Allocate resources for the channel, counting how many users require
+- * buffer chaining.
+- */
+- if (!ctx->chained && out_pix->width > MXC_ISI_MAX_WIDTH_UNCHAINED) {
+- ret = mxc_isi_channel_chain(m2m->pipe);
+- if (ret)
+- goto deinit;
+-
+- m2m->chained_count++;
+- ctx->chained = true;
+- }
+-
+- /*
+- * Drop the lock to start the stream, as the .device_run() operation
+- * needs to acquire it.
+- */
+- mutex_unlock(&m2m->lock);
+- ret = v4l2_m2m_ioctl_streamon(file, fh, type);
+- if (ret) {
+- /* Reacquire the lock for the cleanup path. */
+- mutex_lock(&m2m->lock);
+- goto unchain;
+- }
+-
+- q->streaming = true;
+-
+- return 0;
+-
+-unchain:
+- if (ctx->chained && --m2m->chained_count == 0)
+- mxc_isi_channel_unchain(m2m->pipe);
+- ctx->chained = false;
+-
+-deinit:
+- if (--m2m->usage_count == 0) {
+- mxc_isi_channel_put(m2m->pipe);
+- mxc_isi_channel_release(m2m->pipe);
+- }
+-
+-unlock:
+- mutex_unlock(&m2m->lock);
+- return ret;
+-}
+-
+-static int mxc_isi_m2m_streamoff(struct file *file, void *fh,
+- enum v4l2_buf_type type)
+-{
+- struct mxc_isi_m2m_ctx *ctx = to_isi_m2m_ctx(fh);
+- struct mxc_isi_m2m_ctx_queue_data *q = mxc_isi_m2m_ctx_qdata(ctx, type);
+- struct mxc_isi_m2m *m2m = ctx->m2m;
+-
+- v4l2_m2m_ioctl_streamoff(file, fh, type);
+-
+- if (!q->streaming)
+- return 0;
+-
+- mutex_lock(&m2m->lock);
+-
+- /*
+- * If the last context is this one, reset it to make sure the device
+- * will be reconfigured when streaming is restarted.
+- */
+- if (m2m->last_ctx == ctx)
+- m2m->last_ctx = NULL;
+-
+- /* Free the channel resources if this is the last chained context. */
+- if (ctx->chained && --m2m->chained_count == 0)
+- mxc_isi_channel_unchain(m2m->pipe);
+- ctx->chained = false;
+-
+- /* Turn off the light with the last user. */
+- if (--m2m->usage_count == 0) {
+- mxc_isi_channel_disable(m2m->pipe);
+- mxc_isi_channel_put(m2m->pipe);
+- mxc_isi_channel_release(m2m->pipe);
+- }
+-
+- WARN_ON(m2m->usage_count < 0);
+-
+- mutex_unlock(&m2m->lock);
+-
+- q->streaming = false;
+-
+- return 0;
+-}
+-
+ static const struct v4l2_ioctl_ops mxc_isi_m2m_ioctl_ops = {
+ .vidioc_querycap = mxc_isi_m2m_querycap,
+
+@@ -632,8 +592,8 @@ static const struct v4l2_ioctl_ops mxc_i
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+
+- .vidioc_streamon = mxc_isi_m2m_streamon,
+- .vidioc_streamoff = mxc_isi_m2m_streamoff,
++ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
++ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
cifs-parse_dfs_referrals-prevent-oob-on-malformed-input.patch
drm-sched-fix-potential-double-free-in-drm_sched_job_add_resv_dependencies.patch
drm-amdgpu-use-atomic-functions-with-memory-barriers-for-vm-fault-info.patch
+drm-amd-check-whether-secure-display-ta-loaded-successfully.patch
+cpufreq-cppc-avoid-using-cpufreq_eternal-as-transition-delay.patch
+bluetooth-btusb-add-usb-id-2001-332a-for-d-link-ax9u-rev.-a1.patch
+epoll-remove-ep_scan_ready_list-in-comments.patch
+eventpoll-replace-rwlock-with-spinlock.patch
+drm-msm-adreno-de-spaghettify-the-use-of-memory-barriers.patch
+drm-msm-a6xx-fix-pdc-sleep-sequence.patch
+drm-exynos-exynos7_drm_decon-fix-uninitialized-crtc-reference-in-functions.patch
+drm-exynos-exynos7_drm_decon-properly-clear-channels-during-bind.patch
+drm-exynos-exynos7_drm_decon-remove-ctx-suspended.patch
+media-nxp-imx8-isi-drop-unused-argument-to-mxc_isi_channel_chain.patch
+media-nxp-imx8-isi-m2m-fix-streaming-cleanup-on-release.patch
+usb-gadget-store-endpoint-pointer-in-usb_request.patch
+usb-gadget-introduce-free_usb_request-helper.patch
+usb-gadget-f_ecm-refactor-bind-path-to-use-__free.patch
+usb-gadget-f_acm-refactor-bind-path-to-use-__free.patch
+usb-gadget-f_ncm-refactor-bind-path-to-use-__free.patch
+usb-gadget-f_rndis-refactor-bind-path-to-use-__free.patch
--- /dev/null
+From stable+bounces-187758-greg=kroah.com@vger.kernel.org Sat Oct 18 02:52:45 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 17 Oct 2025 20:52:33 -0400
+Subject: usb: gadget: f_acm: Refactor bind path to use __free()
+To: stable@vger.kernel.org
+Cc: Kuen-Han Tsai <khtsai@google.com>, stable@kernel.org, Greg Kroah-Hartman <gregkh@linuxfoundation.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251018005233.97995-3-sashal@kernel.org>
+
+From: Kuen-Han Tsai <khtsai@google.com>
+
+[ Upstream commit 47b2116e54b4a854600341487e8b55249e926324 ]
+
+After an bind/unbind cycle, the acm->notify_req is left stale. If a
+subsequent bind fails, the unified error label attempts to free this
+stale request, leading to a NULL pointer dereference when accessing
+ep->ops->free_request.
+
+Refactor the error handling in the bind path to use the __free()
+automatic cleanup mechanism.
+
+Unable to handle kernel NULL pointer dereference at virtual address 0000000000000020
+Call trace:
+ usb_ep_free_request+0x2c/0xec
+ gs_free_req+0x30/0x44
+ acm_bind+0x1b8/0x1f4
+ usb_add_function+0xcc/0x1f0
+ configfs_composite_bind+0x468/0x588
+ gadget_bind_driver+0x104/0x270
+ really_probe+0x190/0x374
+ __driver_probe_device+0xa0/0x12c
+ driver_probe_device+0x3c/0x218
+ __device_attach_driver+0x14c/0x188
+ bus_for_each_drv+0x10c/0x168
+ __device_attach+0xfc/0x198
+ device_initial_probe+0x14/0x24
+ bus_probe_device+0x94/0x11c
+ device_add+0x268/0x48c
+ usb_add_gadget+0x198/0x28c
+ dwc3_gadget_init+0x700/0x858
+ __dwc3_set_mode+0x3cc/0x664
+ process_scheduled_works+0x1d8/0x488
+ worker_thread+0x244/0x334
+ kthread+0x114/0x1bc
+ ret_from_fork+0x10/0x20
+
+Fixes: 1f1ba11b6494 ("usb gadget: issue notifications from ACM function")
+Cc: stable@kernel.org
+Signed-off-by: Kuen-Han Tsai <khtsai@google.com>
+Link: https://lore.kernel.org/r/20250916-ready-v1-4-4997bf277548@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Link: https://lore.kernel.org/r/20250916-ready-v1-4-4997bf277548@google.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/gadget/function/f_acm.c | 42 ++++++++++++++++--------------------
+ 1 file changed, 19 insertions(+), 23 deletions(-)
+
+--- a/drivers/usb/gadget/function/f_acm.c
++++ b/drivers/usb/gadget/function/f_acm.c
+@@ -11,12 +11,15 @@
+
+ /* #define VERBOSE_DEBUG */
+
++#include <linux/cleanup.h>
+ #include <linux/slab.h>
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/device.h>
+ #include <linux/err.h>
+
++#include <linux/usb/gadget.h>
++
+ #include "u_serial.h"
+
+
+@@ -612,6 +615,7 @@ acm_bind(struct usb_configuration *c, st
+ struct usb_string *us;
+ int status;
+ struct usb_ep *ep;
++ struct usb_request *request __free(free_usb_request) = NULL;
+
+ /* REVISIT might want instance-specific strings to help
+ * distinguish instances ...
+@@ -629,7 +633,7 @@ acm_bind(struct usb_configuration *c, st
+ /* allocate instance-specific interface IDs, and patch descriptors */
+ status = usb_interface_id(c, f);
+ if (status < 0)
+- goto fail;
++ return status;
+ acm->ctrl_id = status;
+ acm_iad_descriptor.bFirstInterface = status;
+
+@@ -638,40 +642,38 @@ acm_bind(struct usb_configuration *c, st
+
+ status = usb_interface_id(c, f);
+ if (status < 0)
+- goto fail;
++ return status;
+ acm->data_id = status;
+
+ acm_data_interface_desc.bInterfaceNumber = status;
+ acm_union_desc.bSlaveInterface0 = status;
+ acm_call_mgmt_descriptor.bDataInterface = status;
+
+- status = -ENODEV;
+-
+ /* allocate instance-specific endpoints */
+ ep = usb_ep_autoconfig(cdev->gadget, &acm_fs_in_desc);
+ if (!ep)
+- goto fail;
++ return -ENODEV;
+ acm->port.in = ep;
+
+ ep = usb_ep_autoconfig(cdev->gadget, &acm_fs_out_desc);
+ if (!ep)
+- goto fail;
++ return -ENODEV;
+ acm->port.out = ep;
+
+ ep = usb_ep_autoconfig(cdev->gadget, &acm_fs_notify_desc);
+ if (!ep)
+- goto fail;
++ return -ENODEV;
+ acm->notify = ep;
+
+ /* allocate notification */
+- acm->notify_req = gs_alloc_req(ep,
+- sizeof(struct usb_cdc_notification) + 2,
+- GFP_KERNEL);
+- if (!acm->notify_req)
+- goto fail;
++ request = gs_alloc_req(ep,
++ sizeof(struct usb_cdc_notification) + 2,
++ GFP_KERNEL);
++ if (!request)
++ return -ENODEV;
+
+- acm->notify_req->complete = acm_cdc_notify_complete;
+- acm->notify_req->context = acm;
++ request->complete = acm_cdc_notify_complete;
++ request->context = acm;
+
+ /* support all relevant hardware speeds... we expect that when
+ * hardware is dual speed, all bulk-capable endpoints work at
+@@ -688,7 +690,9 @@ acm_bind(struct usb_configuration *c, st
+ status = usb_assign_descriptors(f, acm_fs_function, acm_hs_function,
+ acm_ss_function, acm_ss_function);
+ if (status)
+- goto fail;
++ return status;
++
++ acm->notify_req = no_free_ptr(request);
+
+ dev_dbg(&cdev->gadget->dev,
+ "acm ttyGS%d: IN/%s OUT/%s NOTIFY/%s\n",
+@@ -696,14 +700,6 @@ acm_bind(struct usb_configuration *c, st
+ acm->port.in->name, acm->port.out->name,
+ acm->notify->name);
+ return 0;
+-
+-fail:
+- if (acm->notify_req)
+- gs_free_req(acm->notify, acm->notify_req);
+-
+- ERROR(cdev, "%s/%p: can't bind, err %d\n", f->name, f, status);
+-
+- return status;
+ }
+
+ static void acm_unbind(struct usb_configuration *c, struct usb_function *f)
--- /dev/null
+From stable+bounces-187764-greg=kroah.com@vger.kernel.org Sat Oct 18 03:03:05 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 17 Oct 2025 21:02:55 -0400
+Subject: usb: gadget: f_ecm: Refactor bind path to use __free()
+To: stable@vger.kernel.org
+Cc: Kuen-Han Tsai <khtsai@google.com>, stable@kernel.org, Greg Kroah-Hartman <gregkh@linuxfoundation.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251018010255.104362-3-sashal@kernel.org>
+
+From: Kuen-Han Tsai <khtsai@google.com>
+
+[ Upstream commit 42988380ac67c76bb9dff8f77d7ef3eefd50b7b5 ]
+
+After an bind/unbind cycle, the ecm->notify_req is left stale. If a
+subsequent bind fails, the unified error label attempts to free this
+stale request, leading to a NULL pointer dereference when accessing
+ep->ops->free_request.
+
+Refactor the error handling in the bind path to use the __free()
+automatic cleanup mechanism.
+
+Fixes: da741b8c56d6 ("usb ethernet gadget: split CDC Ethernet function")
+Cc: stable@kernel.org
+Signed-off-by: Kuen-Han Tsai <khtsai@google.com>
+Link: https://lore.kernel.org/r/20250916-ready-v1-5-4997bf277548@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Link: https://lore.kernel.org/r/20250916-ready-v1-5-4997bf277548@google.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/gadget/function/f_ecm.c | 48 +++++++++++++++---------------------
+ 1 file changed, 20 insertions(+), 28 deletions(-)
+
+--- a/drivers/usb/gadget/function/f_ecm.c
++++ b/drivers/usb/gadget/function/f_ecm.c
+@@ -8,12 +8,15 @@
+
+ /* #define VERBOSE_DEBUG */
+
++#include <linux/cleanup.h>
+ #include <linux/slab.h>
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/device.h>
+ #include <linux/etherdevice.h>
+
++#include <linux/usb/gadget.h>
++
+ #include "u_ether.h"
+ #include "u_ether_configfs.h"
+ #include "u_ecm.h"
+@@ -678,6 +681,7 @@ ecm_bind(struct usb_configuration *c, st
+ struct usb_ep *ep;
+
+ struct f_ecm_opts *ecm_opts;
++ struct usb_request *request __free(free_usb_request) = NULL;
+
+ if (!can_support_ecm(cdev->gadget))
+ return -EINVAL;
+@@ -711,7 +715,7 @@ ecm_bind(struct usb_configuration *c, st
+ /* allocate instance-specific interface IDs */
+ status = usb_interface_id(c, f);
+ if (status < 0)
+- goto fail;
++ return status;
+ ecm->ctrl_id = status;
+ ecm_iad_descriptor.bFirstInterface = status;
+
+@@ -720,24 +724,22 @@ ecm_bind(struct usb_configuration *c, st
+
+ status = usb_interface_id(c, f);
+ if (status < 0)
+- goto fail;
++ return status;
+ ecm->data_id = status;
+
+ ecm_data_nop_intf.bInterfaceNumber = status;
+ ecm_data_intf.bInterfaceNumber = status;
+ ecm_union_desc.bSlaveInterface0 = status;
+
+- status = -ENODEV;
+-
+ /* allocate instance-specific endpoints */
+ ep = usb_ep_autoconfig(cdev->gadget, &fs_ecm_in_desc);
+ if (!ep)
+- goto fail;
++ return -ENODEV;
+ ecm->port.in_ep = ep;
+
+ ep = usb_ep_autoconfig(cdev->gadget, &fs_ecm_out_desc);
+ if (!ep)
+- goto fail;
++ return -ENODEV;
+ ecm->port.out_ep = ep;
+
+ /* NOTE: a status/notification endpoint is *OPTIONAL* but we
+@@ -746,20 +748,18 @@ ecm_bind(struct usb_configuration *c, st
+ */
+ ep = usb_ep_autoconfig(cdev->gadget, &fs_ecm_notify_desc);
+ if (!ep)
+- goto fail;
++ return -ENODEV;
+ ecm->notify = ep;
+
+- status = -ENOMEM;
+-
+ /* allocate notification request and buffer */
+- ecm->notify_req = usb_ep_alloc_request(ep, GFP_KERNEL);
+- if (!ecm->notify_req)
+- goto fail;
+- ecm->notify_req->buf = kmalloc(ECM_STATUS_BYTECOUNT, GFP_KERNEL);
+- if (!ecm->notify_req->buf)
+- goto fail;
+- ecm->notify_req->context = ecm;
+- ecm->notify_req->complete = ecm_notify_complete;
++ request = usb_ep_alloc_request(ep, GFP_KERNEL);
++ if (!request)
++ return -ENOMEM;
++ request->buf = kmalloc(ECM_STATUS_BYTECOUNT, GFP_KERNEL);
++ if (!request->buf)
++ return -ENOMEM;
++ request->context = ecm;
++ request->complete = ecm_notify_complete;
+
+ /* support all relevant hardware speeds... we expect that when
+ * hardware is dual speed, all bulk-capable endpoints work at
+@@ -778,7 +778,7 @@ ecm_bind(struct usb_configuration *c, st
+ status = usb_assign_descriptors(f, ecm_fs_function, ecm_hs_function,
+ ecm_ss_function, ecm_ss_function);
+ if (status)
+- goto fail;
++ return status;
+
+ /* NOTE: all that is done without knowing or caring about
+ * the network link ... which is unavailable to this code
+@@ -788,20 +788,12 @@ ecm_bind(struct usb_configuration *c, st
+ ecm->port.open = ecm_open;
+ ecm->port.close = ecm_close;
+
++ ecm->notify_req = no_free_ptr(request);
++
+ DBG(cdev, "CDC Ethernet: IN/%s OUT/%s NOTIFY/%s\n",
+ ecm->port.in_ep->name, ecm->port.out_ep->name,
+ ecm->notify->name);
+ return 0;
+-
+-fail:
+- if (ecm->notify_req) {
+- kfree(ecm->notify_req->buf);
+- usb_ep_free_request(ecm->notify, ecm->notify_req);
+- }
+-
+- ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
+-
+- return status;
+ }
+
+ static inline struct f_ecm_opts *to_f_ecm_opts(struct config_item *item)
--- /dev/null
+From stable+bounces-187748-greg=kroah.com@vger.kernel.org Sat Oct 18 02:42:00 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 17 Oct 2025 20:41:48 -0400
+Subject: usb: gadget: f_ncm: Refactor bind path to use __free()
+To: stable@vger.kernel.org
+Cc: Kuen-Han Tsai <khtsai@google.com>, stable@kernel.org, Greg Kroah-Hartman <gregkh@linuxfoundation.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251018004148.92008-3-sashal@kernel.org>
+
+From: Kuen-Han Tsai <khtsai@google.com>
+
+[ Upstream commit 75a5b8d4ddd4eb6b16cb0b475d14ff4ae64295ef ]
+
+After an bind/unbind cycle, the ncm->notify_req is left stale. If a
+subsequent bind fails, the unified error label attempts to free this
+stale request, leading to a NULL pointer dereference when accessing
+ep->ops->free_request.
+
+Refactor the error handling in the bind path to use the __free()
+automatic cleanup mechanism.
+
+Unable to handle kernel NULL pointer dereference at virtual address 0000000000000020
+Call trace:
+ usb_ep_free_request+0x2c/0xec
+ ncm_bind+0x39c/0x3dc
+ usb_add_function+0xcc/0x1f0
+ configfs_composite_bind+0x468/0x588
+ gadget_bind_driver+0x104/0x270
+ really_probe+0x190/0x374
+ __driver_probe_device+0xa0/0x12c
+ driver_probe_device+0x3c/0x218
+ __device_attach_driver+0x14c/0x188
+ bus_for_each_drv+0x10c/0x168
+ __device_attach+0xfc/0x198
+ device_initial_probe+0x14/0x24
+ bus_probe_device+0x94/0x11c
+ device_add+0x268/0x48c
+ usb_add_gadget+0x198/0x28c
+ dwc3_gadget_init+0x700/0x858
+ __dwc3_set_mode+0x3cc/0x664
+ process_scheduled_works+0x1d8/0x488
+ worker_thread+0x244/0x334
+ kthread+0x114/0x1bc
+ ret_from_fork+0x10/0x20
+
+Fixes: 9f6ce4240a2b ("usb: gadget: f_ncm.c added")
+Cc: stable@kernel.org
+Signed-off-by: Kuen-Han Tsai <khtsai@google.com>
+Link: https://lore.kernel.org/r/20250916-ready-v1-3-4997bf277548@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Link: https://lore.kernel.org/r/20250916-ready-v1-3-4997bf277548@google.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/gadget/function/f_ncm.c | 78 +++++++++++++++---------------------
+ 1 file changed, 33 insertions(+), 45 deletions(-)
+
+--- a/drivers/usb/gadget/function/f_ncm.c
++++ b/drivers/usb/gadget/function/f_ncm.c
+@@ -11,6 +11,7 @@
+ * Copyright (C) 2008 Nokia Corporation
+ */
+
++#include <linux/cleanup.h>
+ #include <linux/kernel.h>
+ #include <linux/interrupt.h>
+ #include <linux/module.h>
+@@ -19,6 +20,7 @@
+ #include <linux/crc32.h>
+
+ #include <linux/usb/cdc.h>
++#include <linux/usb/gadget.h>
+
+ #include "u_ether.h"
+ #include "u_ether_configfs.h"
+@@ -1422,18 +1424,18 @@ static int ncm_bind(struct usb_configura
+ struct usb_ep *ep;
+ struct f_ncm_opts *ncm_opts;
+
++ struct usb_os_desc_table *os_desc_table __free(kfree) = NULL;
++ struct usb_request *request __free(free_usb_request) = NULL;
++
+ if (!can_support_ecm(cdev->gadget))
+ return -EINVAL;
+
+ ncm_opts = container_of(f->fi, struct f_ncm_opts, func_inst);
+
+ if (cdev->use_os_string) {
+- f->os_desc_table = kzalloc(sizeof(*f->os_desc_table),
+- GFP_KERNEL);
+- if (!f->os_desc_table)
++ os_desc_table = kzalloc(sizeof(*os_desc_table), GFP_KERNEL);
++ if (!os_desc_table)
+ return -ENOMEM;
+- f->os_desc_n = 1;
+- f->os_desc_table[0].os_desc = &ncm_opts->ncm_os_desc;
+ }
+
+ mutex_lock(&ncm_opts->lock);
+@@ -1443,16 +1445,15 @@ static int ncm_bind(struct usb_configura
+ mutex_unlock(&ncm_opts->lock);
+
+ if (status)
+- goto fail;
++ return status;
+
+ ncm_opts->bound = true;
+
+ us = usb_gstrings_attach(cdev, ncm_strings,
+ ARRAY_SIZE(ncm_string_defs));
+- if (IS_ERR(us)) {
+- status = PTR_ERR(us);
+- goto fail;
+- }
++ if (IS_ERR(us))
++ return PTR_ERR(us);
++
+ ncm_control_intf.iInterface = us[STRING_CTRL_IDX].id;
+ ncm_data_nop_intf.iInterface = us[STRING_DATA_IDX].id;
+ ncm_data_intf.iInterface = us[STRING_DATA_IDX].id;
+@@ -1462,55 +1463,47 @@ static int ncm_bind(struct usb_configura
+ /* allocate instance-specific interface IDs */
+ status = usb_interface_id(c, f);
+ if (status < 0)
+- goto fail;
++ return status;
+ ncm->ctrl_id = status;
+ ncm_iad_desc.bFirstInterface = status;
+
+ ncm_control_intf.bInterfaceNumber = status;
+ ncm_union_desc.bMasterInterface0 = status;
+
+- if (cdev->use_os_string)
+- f->os_desc_table[0].if_id =
+- ncm_iad_desc.bFirstInterface;
+-
+ status = usb_interface_id(c, f);
+ if (status < 0)
+- goto fail;
++ return status;
+ ncm->data_id = status;
+
+ ncm_data_nop_intf.bInterfaceNumber = status;
+ ncm_data_intf.bInterfaceNumber = status;
+ ncm_union_desc.bSlaveInterface0 = status;
+
+- status = -ENODEV;
+-
+ /* allocate instance-specific endpoints */
+ ep = usb_ep_autoconfig(cdev->gadget, &fs_ncm_in_desc);
+ if (!ep)
+- goto fail;
++ return -ENODEV;
+ ncm->port.in_ep = ep;
+
+ ep = usb_ep_autoconfig(cdev->gadget, &fs_ncm_out_desc);
+ if (!ep)
+- goto fail;
++ return -ENODEV;
+ ncm->port.out_ep = ep;
+
+ ep = usb_ep_autoconfig(cdev->gadget, &fs_ncm_notify_desc);
+ if (!ep)
+- goto fail;
++ return -ENODEV;
+ ncm->notify = ep;
+
+- status = -ENOMEM;
+-
+ /* allocate notification request and buffer */
+- ncm->notify_req = usb_ep_alloc_request(ep, GFP_KERNEL);
+- if (!ncm->notify_req)
+- goto fail;
+- ncm->notify_req->buf = kmalloc(NCM_STATUS_BYTECOUNT, GFP_KERNEL);
+- if (!ncm->notify_req->buf)
+- goto fail;
+- ncm->notify_req->context = ncm;
+- ncm->notify_req->complete = ncm_notify_complete;
++ request = usb_ep_alloc_request(ep, GFP_KERNEL);
++ if (!request)
++ return -ENOMEM;
++ request->buf = kmalloc(NCM_STATUS_BYTECOUNT, GFP_KERNEL);
++ if (!request->buf)
++ return -ENOMEM;
++ request->context = ncm;
++ request->complete = ncm_notify_complete;
+
+ /*
+ * support all relevant hardware speeds... we expect that when
+@@ -1530,7 +1523,7 @@ static int ncm_bind(struct usb_configura
+ status = usb_assign_descriptors(f, ncm_fs_function, ncm_hs_function,
+ ncm_ss_function, ncm_ss_function);
+ if (status)
+- goto fail;
++ return status;
+
+ /*
+ * NOTE: all that is done without knowing or caring about
+@@ -1544,23 +1537,18 @@ static int ncm_bind(struct usb_configura
+ hrtimer_init(&ncm->task_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
+ ncm->task_timer.function = ncm_tx_timeout;
+
++ if (cdev->use_os_string) {
++ os_desc_table[0].os_desc = &ncm_opts->ncm_os_desc;
++ os_desc_table[0].if_id = ncm_iad_desc.bFirstInterface;
++ f->os_desc_table = no_free_ptr(os_desc_table);
++ f->os_desc_n = 1;
++ }
++ ncm->notify_req = no_free_ptr(request);
++
+ DBG(cdev, "CDC Network: IN/%s OUT/%s NOTIFY/%s\n",
+ ncm->port.in_ep->name, ncm->port.out_ep->name,
+ ncm->notify->name);
+ return 0;
+-
+-fail:
+- kfree(f->os_desc_table);
+- f->os_desc_n = 0;
+-
+- if (ncm->notify_req) {
+- kfree(ncm->notify_req->buf);
+- usb_ep_free_request(ncm->notify, ncm->notify_req);
+- }
+-
+- ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
+-
+- return status;
+ }
+
+ static inline struct f_ncm_opts *to_f_ncm_opts(struct config_item *item)
--- /dev/null
+From stable+bounces-187790-greg=kroah.com@vger.kernel.org Sat Oct 18 04:18:35 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 17 Oct 2025 22:18:22 -0400
+Subject: usb: gadget: f_rndis: Refactor bind path to use __free()
+To: stable@vger.kernel.org
+Cc: Kuen-Han Tsai <khtsai@google.com>, stable@kernel.org, Greg Kroah-Hartman <gregkh@linuxfoundation.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251018021822.214718-3-sashal@kernel.org>
+
+From: Kuen-Han Tsai <khtsai@google.com>
+
+[ Upstream commit 08228941436047bdcd35a612c1aec0912a29d8cd ]
+
+After an bind/unbind cycle, the rndis->notify_req is left stale. If a
+subsequent bind fails, the unified error label attempts to free this
+stale request, leading to a NULL pointer dereference when accessing
+ep->ops->free_request.
+
+Refactor the error handling in the bind path to use the __free()
+automatic cleanup mechanism.
+
+Fixes: 45fe3b8e5342 ("usb ethernet gadget: split RNDIS function")
+Cc: stable@kernel.org
+Signed-off-by: Kuen-Han Tsai <khtsai@google.com>
+Link: https://lore.kernel.org/r/20250916-ready-v1-6-4997bf277548@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Link: https://lore.kernel.org/r/20250916-ready-v1-6-4997bf277548@google.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/gadget/function/f_rndis.c | 85 ++++++++++++++--------------------
+ 1 file changed, 35 insertions(+), 50 deletions(-)
+
+--- a/drivers/usb/gadget/function/f_rndis.c
++++ b/drivers/usb/gadget/function/f_rndis.c
+@@ -19,6 +19,8 @@
+
+ #include <linux/atomic.h>
+
++#include <linux/usb/gadget.h>
++
+ #include "u_ether.h"
+ #include "u_ether_configfs.h"
+ #include "u_rndis.h"
+@@ -662,6 +664,8 @@ rndis_bind(struct usb_configuration *c,
+ struct usb_ep *ep;
+
+ struct f_rndis_opts *rndis_opts;
++ struct usb_os_desc_table *os_desc_table __free(kfree) = NULL;
++ struct usb_request *request __free(free_usb_request) = NULL;
+
+ if (!can_support_rndis(c))
+ return -EINVAL;
+@@ -669,12 +673,9 @@ rndis_bind(struct usb_configuration *c,
+ rndis_opts = container_of(f->fi, struct f_rndis_opts, func_inst);
+
+ if (cdev->use_os_string) {
+- f->os_desc_table = kzalloc(sizeof(*f->os_desc_table),
+- GFP_KERNEL);
+- if (!f->os_desc_table)
++ os_desc_table = kzalloc(sizeof(*os_desc_table), GFP_KERNEL);
++ if (!os_desc_table)
+ return -ENOMEM;
+- f->os_desc_n = 1;
+- f->os_desc_table[0].os_desc = &rndis_opts->rndis_os_desc;
+ }
+
+ rndis_iad_descriptor.bFunctionClass = rndis_opts->class;
+@@ -692,16 +693,14 @@ rndis_bind(struct usb_configuration *c,
+ gether_set_gadget(rndis_opts->net, cdev->gadget);
+ status = gether_register_netdev(rndis_opts->net);
+ if (status)
+- goto fail;
++ return status;
+ rndis_opts->bound = true;
+ }
+
+ us = usb_gstrings_attach(cdev, rndis_strings,
+ ARRAY_SIZE(rndis_string_defs));
+- if (IS_ERR(us)) {
+- status = PTR_ERR(us);
+- goto fail;
+- }
++ if (IS_ERR(us))
++ return PTR_ERR(us);
+ rndis_control_intf.iInterface = us[0].id;
+ rndis_data_intf.iInterface = us[1].id;
+ rndis_iad_descriptor.iFunction = us[2].id;
+@@ -709,36 +708,30 @@ rndis_bind(struct usb_configuration *c,
+ /* allocate instance-specific interface IDs */
+ status = usb_interface_id(c, f);
+ if (status < 0)
+- goto fail;
++ return status;
+ rndis->ctrl_id = status;
+ rndis_iad_descriptor.bFirstInterface = status;
+
+ rndis_control_intf.bInterfaceNumber = status;
+ rndis_union_desc.bMasterInterface0 = status;
+
+- if (cdev->use_os_string)
+- f->os_desc_table[0].if_id =
+- rndis_iad_descriptor.bFirstInterface;
+-
+ status = usb_interface_id(c, f);
+ if (status < 0)
+- goto fail;
++ return status;
+ rndis->data_id = status;
+
+ rndis_data_intf.bInterfaceNumber = status;
+ rndis_union_desc.bSlaveInterface0 = status;
+
+- status = -ENODEV;
+-
+ /* allocate instance-specific endpoints */
+ ep = usb_ep_autoconfig(cdev->gadget, &fs_in_desc);
+ if (!ep)
+- goto fail;
++ return -ENODEV;
+ rndis->port.in_ep = ep;
+
+ ep = usb_ep_autoconfig(cdev->gadget, &fs_out_desc);
+ if (!ep)
+- goto fail;
++ return -ENODEV;
+ rndis->port.out_ep = ep;
+
+ /* NOTE: a status/notification endpoint is, strictly speaking,
+@@ -747,21 +740,19 @@ rndis_bind(struct usb_configuration *c,
+ */
+ ep = usb_ep_autoconfig(cdev->gadget, &fs_notify_desc);
+ if (!ep)
+- goto fail;
++ return -ENODEV;
+ rndis->notify = ep;
+
+- status = -ENOMEM;
+-
+ /* allocate notification request and buffer */
+- rndis->notify_req = usb_ep_alloc_request(ep, GFP_KERNEL);
+- if (!rndis->notify_req)
+- goto fail;
+- rndis->notify_req->buf = kmalloc(STATUS_BYTECOUNT, GFP_KERNEL);
+- if (!rndis->notify_req->buf)
+- goto fail;
+- rndis->notify_req->length = STATUS_BYTECOUNT;
+- rndis->notify_req->context = rndis;
+- rndis->notify_req->complete = rndis_response_complete;
++ request = usb_ep_alloc_request(ep, GFP_KERNEL);
++ if (!request)
++ return -ENOMEM;
++ request->buf = kmalloc(STATUS_BYTECOUNT, GFP_KERNEL);
++ if (!request->buf)
++ return -ENOMEM;
++ request->length = STATUS_BYTECOUNT;
++ request->context = rndis;
++ request->complete = rndis_response_complete;
+
+ /* support all relevant hardware speeds... we expect that when
+ * hardware is dual speed, all bulk-capable endpoints work at
+@@ -778,7 +769,7 @@ rndis_bind(struct usb_configuration *c,
+ status = usb_assign_descriptors(f, eth_fs_function, eth_hs_function,
+ eth_ss_function, eth_ss_function);
+ if (status)
+- goto fail;
++ return status;
+
+ rndis->port.open = rndis_open;
+ rndis->port.close = rndis_close;
+@@ -789,9 +780,18 @@ rndis_bind(struct usb_configuration *c,
+ if (rndis->manufacturer && rndis->vendorID &&
+ rndis_set_param_vendor(rndis->params, rndis->vendorID,
+ rndis->manufacturer)) {
+- status = -EINVAL;
+- goto fail_free_descs;
++ usb_free_all_descriptors(f);
++ return -EINVAL;
++ }
++
++ if (cdev->use_os_string) {
++ os_desc_table[0].os_desc = &rndis_opts->rndis_os_desc;
++ os_desc_table[0].if_id = rndis_iad_descriptor.bFirstInterface;
++ f->os_desc_table = no_free_ptr(os_desc_table);
++ f->os_desc_n = 1;
++
+ }
++ rndis->notify_req = no_free_ptr(request);
+
+ /* NOTE: all that is done without knowing or caring about
+ * the network link ... which is unavailable to this code
+@@ -802,21 +802,6 @@ rndis_bind(struct usb_configuration *c,
+ rndis->port.in_ep->name, rndis->port.out_ep->name,
+ rndis->notify->name);
+ return 0;
+-
+-fail_free_descs:
+- usb_free_all_descriptors(f);
+-fail:
+- kfree(f->os_desc_table);
+- f->os_desc_n = 0;
+-
+- if (rndis->notify_req) {
+- kfree(rndis->notify_req->buf);
+- usb_ep_free_request(rndis->notify, rndis->notify_req);
+- }
+-
+- ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
+-
+- return status;
+ }
+
+ void rndis_borrow_net(struct usb_function_instance *f, struct net_device *net)
--- /dev/null
+From stable+bounces-187747-greg=kroah.com@vger.kernel.org Sat Oct 18 02:41:59 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 17 Oct 2025 20:41:47 -0400
+Subject: usb: gadget: Introduce free_usb_request helper
+To: stable@vger.kernel.org
+Cc: Kuen-Han Tsai <khtsai@google.com>, Greg Kroah-Hartman <gregkh@linuxfoundation.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251018004148.92008-2-sashal@kernel.org>
+
+From: Kuen-Han Tsai <khtsai@google.com>
+
+[ Upstream commit 201c53c687f2b55a7cc6d9f4000af4797860174b ]
+
+Introduce the free_usb_request() function that frees both the request's
+buffer and the request itself.
+
+This function serves as the cleanup callback for DEFINE_FREE() to enable
+automatic, scope-based cleanup for usb_request pointers.
+
+Signed-off-by: Kuen-Han Tsai <khtsai@google.com>
+Link: https://lore.kernel.org/r/20250916-ready-v1-2-4997bf277548@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Link: https://lore.kernel.org/r/20250916-ready-v1-2-4997bf277548@google.com
+Stable-dep-of: 75a5b8d4ddd4 ("usb: gadget: f_ncm: Refactor bind path to use __free()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/usb/gadget.h | 23 +++++++++++++++++++++++
+ 1 file changed, 23 insertions(+)
+
+--- a/include/linux/usb/gadget.h
++++ b/include/linux/usb/gadget.h
+@@ -15,6 +15,7 @@
+ #ifndef __LINUX_USB_GADGET_H
+ #define __LINUX_USB_GADGET_H
+
++#include <linux/cleanup.h>
+ #include <linux/configfs.h>
+ #include <linux/device.h>
+ #include <linux/errno.h>
+@@ -291,6 +292,28 @@ static inline void usb_ep_fifo_flush(str
+
+ /*-------------------------------------------------------------------------*/
+
++/**
++ * free_usb_request - frees a usb_request object and its buffer
++ * @req: the request being freed
++ *
++ * This helper function frees both the request's buffer and the request object
++ * itself by calling usb_ep_free_request(). Its signature is designed to be used
++ * with DEFINE_FREE() to enable automatic, scope-based cleanup for usb_request
++ * pointers.
++ */
++static inline void free_usb_request(struct usb_request *req)
++{
++ if (!req)
++ return;
++
++ kfree(req->buf);
++ usb_ep_free_request(req->ep, req);
++}
++
++DEFINE_FREE(free_usb_request, struct usb_request *, free_usb_request(_T))
++
++/*-------------------------------------------------------------------------*/
++
+ struct usb_dcd_config_params {
+ __u8 bU1devExitLat; /* U1 Device exit Latency */
+ #define USB_DEFAULT_U1_DEV_EXIT_LAT 0x01 /* Less then 1 microsec */
--- /dev/null
+From stable+bounces-187746-greg=kroah.com@vger.kernel.org Sat Oct 18 02:41:57 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 17 Oct 2025 20:41:46 -0400
+Subject: usb: gadget: Store endpoint pointer in usb_request
+To: stable@vger.kernel.org
+Cc: Kuen-Han Tsai <khtsai@google.com>, Greg Kroah-Hartman <gregkh@linuxfoundation.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251018004148.92008-1-sashal@kernel.org>
+
+From: Kuen-Han Tsai <khtsai@google.com>
+
+[ Upstream commit bfb1d99d969fe3b892db30848aeebfa19d21f57f ]
+
+Gadget function drivers often have goto-based error handling in their
+bind paths, which can be bug-prone. Refactoring these paths to use
+__free() scope-based cleanup is desirable, but currently blocked.
+
+The blocker is that usb_ep_free_request(ep, req) requires two
+parameters, while the __free() mechanism can only pass a pointer to the
+request itself.
+
+Store an endpoint pointer in the struct usb_request. The pointer is
+populated centrally in usb_ep_alloc_request() on every successful
+allocation, making the request object self-contained.
+
+Signed-off-by: Kuen-Han Tsai <khtsai@google.com>
+Link: https://lore.kernel.org/r/20250916-ready-v1-1-4997bf277548@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Link: https://lore.kernel.org/r/20250916-ready-v1-1-4997bf277548@google.com
+Stable-dep-of: 75a5b8d4ddd4 ("usb: gadget: f_ncm: Refactor bind path to use __free()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/gadget/udc/core.c | 3 +++
+ include/linux/usb/gadget.h | 2 ++
+ 2 files changed, 5 insertions(+)
+
+--- a/drivers/usb/gadget/udc/core.c
++++ b/drivers/usb/gadget/udc/core.c
+@@ -194,6 +194,9 @@ struct usb_request *usb_ep_alloc_request
+
+ req = ep->ops->alloc_request(ep, gfp_flags);
+
++ if (req)
++ req->ep = ep;
++
+ trace_usb_ep_alloc_request(ep, req, req ? 0 : -ENOMEM);
+
+ return req;
+--- a/include/linux/usb/gadget.h
++++ b/include/linux/usb/gadget.h
+@@ -32,6 +32,7 @@ struct usb_ep;
+
+ /**
+ * struct usb_request - describes one i/o request
++ * @ep: The associated endpoint set by usb_ep_alloc_request().
+ * @buf: Buffer used for data. Always provide this; some controllers
+ * only use PIO, or don't use DMA for some endpoints.
+ * @dma: DMA address corresponding to 'buf'. If you don't set this
+@@ -97,6 +98,7 @@ struct usb_ep;
+ */
+
+ struct usb_request {
++ struct usb_ep *ep;
+ void *buf;
+ unsigned length;
+ dma_addr_t dma;