--- /dev/null
+From stable+bounces-189872-greg=kroah.com@vger.kernel.org Sun Oct 26 17:25:39 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 26 Oct 2025 12:25:25 -0400
+Subject: arch_topology: Fix incorrect error check in topology_parse_cpu_capacity()
+To: stable@vger.kernel.org
+Cc: Kaushlendra Kumar <kaushlendra.kumar@intel.com>, stable <stable@kernel.org>, Sudeep Holla <sudeep.holla@arm.com>, Greg Kroah-Hartman <gregkh@linuxfoundation.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251026162525.110118-1-sashal@kernel.org>
+
+From: Kaushlendra Kumar <kaushlendra.kumar@intel.com>
+
+[ Upstream commit 2eead19334516c8e9927c11b448fbe512b1f18a1 ]
+
+Fix incorrect use of PTR_ERR_OR_ZERO() in topology_parse_cpu_capacity()
+which causes the code to proceed with NULL clock pointers. The current
+logic uses !PTR_ERR_OR_ZERO(cpu_clk) which evaluates to true for both
+valid pointers and NULL, leading to potential NULL pointer dereference
+in clk_get_rate().
+
+Per include/linux/err.h documentation, PTR_ERR_OR_ZERO(ptr) returns:
+"The error code within @ptr if it is an error pointer; 0 otherwise."
+
+This means PTR_ERR_OR_ZERO() returns 0 for both valid pointers AND NULL
+pointers. Therefore !PTR_ERR_OR_ZERO(cpu_clk) evaluates to true (proceed)
+when cpu_clk is either valid or NULL, causing clk_get_rate(NULL) to be
+called when of_clk_get() returns NULL.
+
+Replace with !IS_ERR_OR_NULL(cpu_clk) which only proceeds for valid
+pointers, preventing potential NULL pointer dereference in clk_get_rate().
+
+Cc: stable <stable@kernel.org>
+Signed-off-by: Kaushlendra Kumar <kaushlendra.kumar@intel.com>
+Reviewed-by: Sudeep Holla <sudeep.holla@arm.com>
+Fixes: b8fe128dad8f ("arch_topology: Adjust initial CPU capacities with current freq")
+Link: https://patch.msgid.link/20250923174308.1771906-1-kaushlendra.kumar@intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+[ Adjust context ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/base/arch_topology.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/base/arch_topology.c
++++ b/drivers/base/arch_topology.c
+@@ -290,7 +290,7 @@ bool __init topology_parse_cpu_capacity(
+ * frequency (by keeping the initial freq_factor value).
+ */
+ cpu_clk = of_clk_get(cpu_node, 0);
+- if (!PTR_ERR_OR_ZERO(cpu_clk)) {
++ if (!IS_ERR_OR_NULL(cpu_clk)) {
+ per_cpu(freq_factor, cpu) =
+ clk_get_rate(cpu_clk) / 1000;
+ clk_put(cpu_clk);
--- /dev/null
+From 3bbf004c4808e2c3241e5c1ad6cc102f38a03c39 Mon Sep 17 00:00:00 2001
+From: Mark Rutland <mark.rutland@arm.com>
+Date: Fri, 19 Sep 2025 15:58:28 +0100
+Subject: arm64: cputype: Add Neoverse-V3AE definitions
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+commit 3bbf004c4808e2c3241e5c1ad6cc102f38a03c39 upstream.
+
+Add cputype definitions for Neoverse-V3AE. These will be used for errata
+detection in subsequent patches.
+
+These values can be found in the Neoverse-V3AE TRM:
+
+ https://developer.arm.com/documentation/SDEN-2615521/9-0/
+
+... in section A.6.1 ("MIDR_EL1, Main ID Register").
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Cc: James Morse <james.morse@arm.com>
+Cc: Will Deacon <will@kernel.org>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
+Signed-off-by: Will Deacon <will@kernel.org>
+[ Ryan: Trivial backport ]
+Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/cputype.h | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/arm64/include/asm/cputype.h
++++ b/arch/arm64/include/asm/cputype.h
+@@ -93,6 +93,7 @@
+ #define ARM_CPU_PART_NEOVERSE_V2 0xD4F
+ #define ARM_CPU_PART_CORTEX_A720 0xD81
+ #define ARM_CPU_PART_CORTEX_X4 0xD82
++#define ARM_CPU_PART_NEOVERSE_V3AE 0xD83
+ #define ARM_CPU_PART_NEOVERSE_V3 0xD84
+ #define ARM_CPU_PART_CORTEX_X925 0xD85
+ #define ARM_CPU_PART_CORTEX_A725 0xD87
+@@ -160,6 +161,7 @@
+ #define MIDR_NEOVERSE_V2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V2)
+ #define MIDR_CORTEX_A720 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A720)
+ #define MIDR_CORTEX_X4 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X4)
++#define MIDR_NEOVERSE_V3AE MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V3AE)
+ #define MIDR_NEOVERSE_V3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V3)
+ #define MIDR_CORTEX_X925 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X925)
+ #define MIDR_CORTEX_A725 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A725)
--- /dev/null
+From 0c33aa1804d101c11ba1992504f17a42233f0e11 Mon Sep 17 00:00:00 2001
+From: Mark Rutland <mark.rutland@arm.com>
+Date: Fri, 19 Sep 2025 15:58:29 +0100
+Subject: arm64: errata: Apply workarounds for Neoverse-V3AE
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+commit 0c33aa1804d101c11ba1992504f17a42233f0e11 upstream.
+
+Neoverse-V3AE is also affected by erratum #3312417, as described in its
+Software Developer Errata Notice (SDEN) document:
+
+ Neoverse V3AE (MP172) SDEN v9.0, erratum 3312417
+ https://developer.arm.com/documentation/SDEN-2615521/9-0/
+
+Enable the workaround for Neoverse-V3AE, and document this.
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Cc: James Morse <james.morse@arm.com>
+Cc: Will Deacon <will@kernel.org>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
+Signed-off-by: Will Deacon <will@kernel.org>
+[ Ryan: Trivial backport ]
+Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/arm64/silicon-errata.rst | 2 ++
+ arch/arm64/Kconfig | 1 +
+ arch/arm64/kernel/cpu_errata.c | 1 +
+ 3 files changed, 4 insertions(+)
+
+--- a/Documentation/arm64/silicon-errata.rst
++++ b/Documentation/arm64/silicon-errata.rst
+@@ -160,6 +160,8 @@ stable kernels.
+ +----------------+-----------------+-----------------+-----------------------------+
+ | ARM | Neoverse-V3 | #3312417 | ARM64_ERRATUM_3194386 |
+ +----------------+-----------------+-----------------+-----------------------------+
++| ARM | Neoverse-V3AE | #3312417 | ARM64_ERRATUM_3194386 |
+++----------------+-----------------+-----------------+-----------------------------+
+ | ARM | MMU-500 | #841119,826419 | N/A |
+ +----------------+-----------------+-----------------+-----------------------------+
+ | ARM | MMU-600 | #1076982,1209401| N/A |
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -875,6 +875,7 @@ config ARM64_ERRATUM_3194386
+ * ARM Neoverse-V1 erratum 3324341
+ * ARM Neoverse V2 erratum 3324336
+ * ARM Neoverse-V3 erratum 3312417
++ * ARM Neoverse-V3AE erratum 3312417
+
+ On affected cores "MSR SSBS, #0" instructions may not affect
+ subsequent speculative instructions, which may permit unexepected
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -424,6 +424,7 @@ static const struct midr_range erratum_s
+ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
+ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V2),
+ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V3),
++ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V3AE),
+ {}
+ };
+ #endif
--- /dev/null
+From stable+bounces-189904-greg=kroah.com@vger.kernel.org Mon Oct 27 02:15:09 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 26 Oct 2025 21:15:01 -0400
+Subject: devcoredump: Fix circular locking dependency with devcd->mutex.
+To: stable@vger.kernel.org
+Cc: Maarten Lankhorst <dev@lankhorst.se>, Mukesh Ojha <quic_mojha@quicinc.com>, Greg Kroah-Hartman <gregkh@linuxfoundation.org>, Johannes Berg <johannes@sipsolutions.net>, "Rafael J. Wysocki" <rafael@kernel.org>, Danilo Krummrich <dakr@kernel.org>, linux-kernel@vger.kernel.org, Matthew Brost <matthew.brost@intel.com>, Mukesh Ojha <mukesh.ojha@oss.qualcomm.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251027011501.314221-1-sashal@kernel.org>
+
+From: Maarten Lankhorst <dev@lankhorst.se>
+
+[ Upstream commit a91c8096590bd7801a26454789f2992094fe36da ]
+
+The original code causes a circular locking dependency found by lockdep.
+
+======================================================
+WARNING: possible circular locking dependency detected
+6.16.0-rc6-lgci-xe-xe-pw-151626v3+ #1 Tainted: G S U
+------------------------------------------------------
+xe_fault_inject/5091 is trying to acquire lock:
+ffff888156815688 ((work_completion)(&(&devcd->del_wk)->work)){+.+.}-{0:0}, at: __flush_work+0x25d/0x660
+
+but task is already holding lock:
+
+ffff888156815620 (&devcd->mutex){+.+.}-{3:3}, at: dev_coredump_put+0x3f/0xa0
+which lock already depends on the new lock.
+the existing dependency chain (in reverse order) is:
+-> #2 (&devcd->mutex){+.+.}-{3:3}:
+ mutex_lock_nested+0x4e/0xc0
+ devcd_data_write+0x27/0x90
+ sysfs_kf_bin_write+0x80/0xf0
+ kernfs_fop_write_iter+0x169/0x220
+ vfs_write+0x293/0x560
+ ksys_write+0x72/0xf0
+ __x64_sys_write+0x19/0x30
+ x64_sys_call+0x2bf/0x2660
+ do_syscall_64+0x93/0xb60
+ entry_SYSCALL_64_after_hwframe+0x76/0x7e
+-> #1 (kn->active#236){++++}-{0:0}:
+ kernfs_drain+0x1e2/0x200
+ __kernfs_remove+0xae/0x400
+ kernfs_remove_by_name_ns+0x5d/0xc0
+ remove_files+0x54/0x70
+ sysfs_remove_group+0x3d/0xa0
+ sysfs_remove_groups+0x2e/0x60
+ device_remove_attrs+0xc7/0x100
+ device_del+0x15d/0x3b0
+ devcd_del+0x19/0x30
+ process_one_work+0x22b/0x6f0
+ worker_thread+0x1e8/0x3d0
+ kthread+0x11c/0x250
+ ret_from_fork+0x26c/0x2e0
+ ret_from_fork_asm+0x1a/0x30
+-> #0 ((work_completion)(&(&devcd->del_wk)->work)){+.+.}-{0:0}:
+ __lock_acquire+0x1661/0x2860
+ lock_acquire+0xc4/0x2f0
+ __flush_work+0x27a/0x660
+ flush_delayed_work+0x5d/0xa0
+ dev_coredump_put+0x63/0xa0
+ xe_driver_devcoredump_fini+0x12/0x20 [xe]
+ devm_action_release+0x12/0x30
+ release_nodes+0x3a/0x120
+ devres_release_all+0x8a/0xd0
+ device_unbind_cleanup+0x12/0x80
+ device_release_driver_internal+0x23a/0x280
+ device_driver_detach+0x14/0x20
+ unbind_store+0xaf/0xc0
+ drv_attr_store+0x21/0x50
+ sysfs_kf_write+0x4a/0x80
+ kernfs_fop_write_iter+0x169/0x220
+ vfs_write+0x293/0x560
+ ksys_write+0x72/0xf0
+ __x64_sys_write+0x19/0x30
+ x64_sys_call+0x2bf/0x2660
+ do_syscall_64+0x93/0xb60
+ entry_SYSCALL_64_after_hwframe+0x76/0x7e
+other info that might help us debug this:
+Chain exists of: (work_completion)(&(&devcd->del_wk)->work) --> kn->active#236 --> &devcd->mutex
+ Possible unsafe locking scenario:
+ CPU0 CPU1
+ ---- ----
+ lock(&devcd->mutex);
+ lock(kn->active#236);
+ lock(&devcd->mutex);
+ lock((work_completion)(&(&devcd->del_wk)->work));
+ *** DEADLOCK ***
+5 locks held by xe_fault_inject/5091:
+ #0: ffff8881129f9488 (sb_writers#5){.+.+}-{0:0}, at: ksys_write+0x72/0xf0
+ #1: ffff88810c755078 (&of->mutex#2){+.+.}-{3:3}, at: kernfs_fop_write_iter+0x123/0x220
+ #2: ffff8881054811a0 (&dev->mutex){....}-{3:3}, at: device_release_driver_internal+0x55/0x280
+ #3: ffff888156815620 (&devcd->mutex){+.+.}-{3:3}, at: dev_coredump_put+0x3f/0xa0
+ #4: ffffffff8359e020 (rcu_read_lock){....}-{1:2}, at: __flush_work+0x72/0x660
+stack backtrace:
+CPU: 14 UID: 0 PID: 5091 Comm: xe_fault_inject Tainted: G S U 6.16.0-rc6-lgci-xe-xe-pw-151626v3+ #1 PREEMPT_{RT,(lazy)}
+Tainted: [S]=CPU_OUT_OF_SPEC, [U]=USER
+Hardware name: Micro-Star International Co., Ltd. MS-7D25/PRO Z690-A DDR4(MS-7D25), BIOS 1.10 12/13/2021
+Call Trace:
+ <TASK>
+ dump_stack_lvl+0x91/0xf0
+ dump_stack+0x10/0x20
+ print_circular_bug+0x285/0x360
+ check_noncircular+0x135/0x150
+ ? register_lock_class+0x48/0x4a0
+ __lock_acquire+0x1661/0x2860
+ lock_acquire+0xc4/0x2f0
+ ? __flush_work+0x25d/0x660
+ ? mark_held_locks+0x46/0x90
+ ? __flush_work+0x25d/0x660
+ __flush_work+0x27a/0x660
+ ? __flush_work+0x25d/0x660
+ ? trace_hardirqs_on+0x1e/0xd0
+ ? __pfx_wq_barrier_func+0x10/0x10
+ flush_delayed_work+0x5d/0xa0
+ dev_coredump_put+0x63/0xa0
+ xe_driver_devcoredump_fini+0x12/0x20 [xe]
+ devm_action_release+0x12/0x30
+ release_nodes+0x3a/0x120
+ devres_release_all+0x8a/0xd0
+ device_unbind_cleanup+0x12/0x80
+ device_release_driver_internal+0x23a/0x280
+ ? bus_find_device+0xa8/0xe0
+ device_driver_detach+0x14/0x20
+ unbind_store+0xaf/0xc0
+ drv_attr_store+0x21/0x50
+ sysfs_kf_write+0x4a/0x80
+ kernfs_fop_write_iter+0x169/0x220
+ vfs_write+0x293/0x560
+ ksys_write+0x72/0xf0
+ __x64_sys_write+0x19/0x30
+ x64_sys_call+0x2bf/0x2660
+ do_syscall_64+0x93/0xb60
+ ? __f_unlock_pos+0x15/0x20
+ ? __x64_sys_getdents64+0x9b/0x130
+ ? __pfx_filldir64+0x10/0x10
+ ? do_syscall_64+0x1a2/0xb60
+ ? clear_bhb_loop+0x30/0x80
+ ? clear_bhb_loop+0x30/0x80
+ entry_SYSCALL_64_after_hwframe+0x76/0x7e
+RIP: 0033:0x76e292edd574
+Code: c7 00 16 00 00 00 b8 ff ff ff ff c3 66 2e 0f 1f 84 00 00 00 00 00 f3 0f 1e fa 80 3d d5 ea 0e 00 00 74 13 b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 54 c3 0f 1f 00 55 48 89 e5 48 83 ec 20 48 89
+RSP: 002b:00007fffe247a828 EFLAGS: 00000202 ORIG_RAX: 0000000000000001
+RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 000076e292edd574
+RDX: 000000000000000c RSI: 00006267f6306063 RDI: 000000000000000b
+RBP: 000000000000000c R08: 000076e292fc4b20 R09: 0000000000000000
+R10: 0000000000000000 R11: 0000000000000202 R12: 00006267f6306063
+R13: 000000000000000b R14: 00006267e6859c00 R15: 000076e29322a000
+ </TASK>
+xe 0000:03:00.0: [drm] Xe device coredump has been deleted.
+
+Fixes: 01daccf74832 ("devcoredump : Serialize devcd_del work")
+Cc: Mukesh Ojha <quic_mojha@quicinc.com>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Johannes Berg <johannes@sipsolutions.net>
+Cc: Rafael J. Wysocki <rafael@kernel.org>
+Cc: Danilo Krummrich <dakr@kernel.org>
+Cc: linux-kernel@vger.kernel.org
+Cc: stable@vger.kernel.org # v6.1+
+Signed-off-by: Maarten Lankhorst <dev@lankhorst.se>
+Cc: Matthew Brost <matthew.brost@intel.com>
+Acked-by: Mukesh Ojha <mukesh.ojha@oss.qualcomm.com>
+Link: https://lore.kernel.org/r/20250723142416.1020423-1-dev@lankhorst.se
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+[ replaced disable_delayed_work_sync() with cancel_delayed_work_sync() ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/base/devcoredump.c | 138 +++++++++++++++++++++++++++------------------
+ 1 file changed, 84 insertions(+), 54 deletions(-)
+
+--- a/drivers/base/devcoredump.c
++++ b/drivers/base/devcoredump.c
+@@ -26,50 +26,46 @@ struct devcd_entry {
+ void *data;
+ size_t datalen;
+ /*
+- * Here, mutex is required to serialize the calls to del_wk work between
+- * user/kernel space which happens when devcd is added with device_add()
+- * and that sends uevent to user space. User space reads the uevents,
+- * and calls to devcd_data_write() which try to modify the work which is
+- * not even initialized/queued from devcoredump.
++ * There are 2 races for which mutex is required.
+ *
++ * The first race is between device creation and userspace writing to
++ * schedule immediately destruction.
+ *
++ * This race is handled by arming the timer before device creation, but
++ * when device creation fails the timer still exists.
+ *
+- * cpu0(X) cpu1(Y)
++ * To solve this, hold the mutex during device_add(), and set
++ * init_completed on success before releasing the mutex.
+ *
+- * dev_coredump() uevent sent to user space
+- * device_add() ======================> user space process Y reads the
+- * uevents writes to devcd fd
+- * which results into writes to
++ * That way the timer will never fire until device_add() is called,
++ * it will do nothing if init_completed is not set. The timer is also
++ * cancelled in that case.
+ *
+- * devcd_data_write()
+- * mod_delayed_work()
+- * try_to_grab_pending()
+- * del_timer()
+- * debug_assert_init()
+- * INIT_DELAYED_WORK()
+- * schedule_delayed_work()
+- *
+- *
+- * Also, mutex alone would not be enough to avoid scheduling of
+- * del_wk work after it get flush from a call to devcd_free()
+- * mentioned as below.
+- *
+- * disabled_store()
+- * devcd_free()
+- * mutex_lock() devcd_data_write()
+- * flush_delayed_work()
+- * mutex_unlock()
+- * mutex_lock()
+- * mod_delayed_work()
+- * mutex_unlock()
+- * So, delete_work flag is required.
++ * The second race involves multiple parallel invocations of devcd_free(),
++ * add a deleted flag so only 1 can call the destructor.
+ */
+ struct mutex mutex;
+- bool delete_work;
++ bool init_completed, deleted;
+ struct module *owner;
+ ssize_t (*read)(char *buffer, loff_t offset, size_t count,
+ void *data, size_t datalen);
+ void (*free)(void *data);
++ /*
++ * If nothing interferes and device_add() was returns success,
++ * del_wk will destroy the device after the timer fires.
++ *
++ * Multiple userspace processes can interfere in the working of the timer:
++ * - Writing to the coredump will reschedule the timer to run immediately,
++ * if still armed.
++ *
++ * This is handled by using "if (cancel_delayed_work()) {
++ * schedule_delayed_work() }", to prevent re-arming after having
++ * been previously fired.
++ * - Writing to /sys/class/devcoredump/disabled will destroy the
++ * coredump synchronously.
++ * This is handled by using disable_delayed_work_sync(), and then
++ * checking if deleted flag is set with &devcd->mutex held.
++ */
+ struct delayed_work del_wk;
+ struct device *failing_dev;
+ };
+@@ -98,14 +94,27 @@ static void devcd_dev_release(struct dev
+ kfree(devcd);
+ }
+
++static void __devcd_del(struct devcd_entry *devcd)
++{
++ devcd->deleted = true;
++ device_del(&devcd->devcd_dev);
++ put_device(&devcd->devcd_dev);
++}
++
+ static void devcd_del(struct work_struct *wk)
+ {
+ struct devcd_entry *devcd;
++ bool init_completed;
+
+ devcd = container_of(wk, struct devcd_entry, del_wk.work);
+
+- device_del(&devcd->devcd_dev);
+- put_device(&devcd->devcd_dev);
++ /* devcd->mutex serializes against dev_coredumpm_timeout */
++ mutex_lock(&devcd->mutex);
++ init_completed = devcd->init_completed;
++ mutex_unlock(&devcd->mutex);
++
++ if (init_completed)
++ __devcd_del(devcd);
+ }
+
+ static ssize_t devcd_data_read(struct file *filp, struct kobject *kobj,
+@@ -125,12 +134,12 @@ static ssize_t devcd_data_write(struct f
+ struct device *dev = kobj_to_dev(kobj);
+ struct devcd_entry *devcd = dev_to_devcd(dev);
+
+- mutex_lock(&devcd->mutex);
+- if (!devcd->delete_work) {
+- devcd->delete_work = true;
+- mod_delayed_work(system_wq, &devcd->del_wk, 0);
+- }
+- mutex_unlock(&devcd->mutex);
++ /*
++ * Although it's tempting to use mod_delayed work here,
++ * that will cause a reschedule if the timer already fired.
++ */
++ if (cancel_delayed_work(&devcd->del_wk))
++ schedule_delayed_work(&devcd->del_wk, 0);
+
+ return count;
+ }
+@@ -158,11 +167,21 @@ static int devcd_free(struct device *dev
+ {
+ struct devcd_entry *devcd = dev_to_devcd(dev);
+
++ /*
++ * To prevent a race with devcd_data_write(), cancel work and
++ * complete manually instead.
++ *
++ * We cannot rely on the return value of
++ * cancel_delayed_work_sync() here, because it might be in the
++ * middle of a cancel_delayed_work + schedule_delayed_work pair.
++ *
++ * devcd->mutex here guards against multiple parallel invocations
++ * of devcd_free().
++ */
++ cancel_delayed_work_sync(&devcd->del_wk);
+ mutex_lock(&devcd->mutex);
+- if (!devcd->delete_work)
+- devcd->delete_work = true;
+-
+- flush_delayed_work(&devcd->del_wk);
++ if (!devcd->deleted)
++ __devcd_del(devcd);
+ mutex_unlock(&devcd->mutex);
+ return 0;
+ }
+@@ -186,12 +205,10 @@ static ssize_t disabled_show(struct clas
+ * put_device() <- last reference
+ * error = fn(dev, data) devcd_dev_release()
+ * devcd_free(dev, data) kfree(devcd)
+- * mutex_lock(&devcd->mutex);
+ *
+ *
+- * In the above diagram, It looks like disabled_store() would be racing with parallely
+- * running devcd_del() and result in memory abort while acquiring devcd->mutex which
+- * is called after kfree of devcd memory after dropping its last reference with
++ * In the above diagram, it looks like disabled_store() would be racing with parallelly
++ * running devcd_del() and result in memory abort after dropping its last reference with
+ * put_device(). However, this will not happens as fn(dev, data) runs
+ * with its own reference to device via klist_node so it is not its last reference.
+ * so, above situation would not occur.
+@@ -353,7 +370,7 @@ void dev_coredumpm(struct device *dev, s
+ devcd->read = read;
+ devcd->free = free;
+ devcd->failing_dev = get_device(dev);
+- devcd->delete_work = false;
++ devcd->deleted = false;
+
+ mutex_init(&devcd->mutex);
+ device_initialize(&devcd->devcd_dev);
+@@ -362,8 +379,14 @@ void dev_coredumpm(struct device *dev, s
+ atomic_inc_return(&devcd_count));
+ devcd->devcd_dev.class = &devcd_class;
+
+- mutex_lock(&devcd->mutex);
+ dev_set_uevent_suppress(&devcd->devcd_dev, true);
++
++ /* devcd->mutex prevents devcd_del() completing until init finishes */
++ mutex_lock(&devcd->mutex);
++ devcd->init_completed = false;
++ INIT_DELAYED_WORK(&devcd->del_wk, devcd_del);
++ schedule_delayed_work(&devcd->del_wk, DEVCD_TIMEOUT);
++
+ if (device_add(&devcd->devcd_dev))
+ goto put_device;
+
+@@ -380,13 +403,20 @@ void dev_coredumpm(struct device *dev, s
+
+ dev_set_uevent_suppress(&devcd->devcd_dev, false);
+ kobject_uevent(&devcd->devcd_dev.kobj, KOBJ_ADD);
+- INIT_DELAYED_WORK(&devcd->del_wk, devcd_del);
+- schedule_delayed_work(&devcd->del_wk, DEVCD_TIMEOUT);
++
++ /*
++ * Safe to run devcd_del() now that we are done with devcd_dev.
++ * Alternatively we could have taken a ref on devcd_dev before
++ * dropping the lock.
++ */
++ devcd->init_completed = true;
+ mutex_unlock(&devcd->mutex);
+ return;
+ put_device:
+- put_device(&devcd->devcd_dev);
+ mutex_unlock(&devcd->mutex);
++ cancel_delayed_work_sync(&devcd->del_wk);
++ put_device(&devcd->devcd_dev);
++
+ put_module:
+ module_put(owner);
+ free:
--- /dev/null
+From stable+bounces-188337-greg=kroah.com@vger.kernel.org Tue Oct 21 15:14:19 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 21 Oct 2025 09:13:39 -0400
+Subject: drm/amdgpu: use atomic functions with memory barriers for vm fault info
+To: stable@vger.kernel.org
+Cc: Gui-Dong Han <hanguidong02@gmail.com>, Felix Kuehling <felix.kuehling@amd.com>, Alex Deucher <alexander.deucher@amd.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251021131339.2072904-1-sashal@kernel.org>
+
+From: Gui-Dong Han <hanguidong02@gmail.com>
+
+[ Upstream commit 6df8e84aa6b5b1812cc2cacd6b3f5ccbb18cda2b ]
+
+The atomic variable vm_fault_info_updated is used to synchronize access to
+adev->gmc.vm_fault_info between the interrupt handler and
+get_vm_fault_info().
+
+The default atomic functions like atomic_set() and atomic_read() do not
+provide memory barriers. This allows for CPU instruction reordering,
+meaning the memory accesses to vm_fault_info and the vm_fault_info_updated
+flag are not guaranteed to occur in the intended order. This creates a
+race condition that can lead to inconsistent or stale data being used.
+
+The previous implementation, which used an explicit mb(), was incomplete
+and inefficient. It failed to account for all potential CPU reorderings,
+such as the access of vm_fault_info being reordered before the atomic_read
+of the flag. This approach is also more verbose and less performant than
+using the proper atomic functions with acquire/release semantics.
+
+Fix this by switching to atomic_set_release() and atomic_read_acquire().
+These functions provide the necessary acquire and release semantics,
+which act as memory barriers to ensure the correct order of operations.
+It is also more efficient and idiomatic than using explicit full memory
+barriers.
+
+Fixes: b97dfa27ef3a ("drm/amdgpu: save vm fault information for amdkfd")
+Cc: stable@vger.kernel.org
+Signed-off-by: Gui-Dong Han <hanguidong02@gmail.com>
+Signed-off-by: Felix Kuehling <felix.kuehling@amd.com>
+Reviewed-by: Felix Kuehling <felix.kuehling@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+[ kept kgd_dev parameter and adev cast in amdgpu_amdkfd_gpuvm_get_vm_fault_info ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 5 ++---
+ drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 7 +++----
+ drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 7 +++----
+ 3 files changed, 8 insertions(+), 11 deletions(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+@@ -1870,10 +1870,9 @@ int amdgpu_amdkfd_gpuvm_get_vm_fault_inf
+ struct amdgpu_device *adev;
+
+ adev = (struct amdgpu_device *)kgd;
+- if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
++ if (atomic_read_acquire(&adev->gmc.vm_fault_info_updated) == 1) {
+ *mem = *adev->gmc.vm_fault_info;
+- mb();
+- atomic_set(&adev->gmc.vm_fault_info_updated, 0);
++ atomic_set_release(&adev->gmc.vm_fault_info_updated, 0);
+ }
+ return 0;
+ }
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+@@ -1072,7 +1072,7 @@ static int gmc_v7_0_sw_init(void *handle
+ GFP_KERNEL);
+ if (!adev->gmc.vm_fault_info)
+ return -ENOMEM;
+- atomic_set(&adev->gmc.vm_fault_info_updated, 0);
++ atomic_set_release(&adev->gmc.vm_fault_info_updated, 0);
+
+ return 0;
+ }
+@@ -1301,7 +1301,7 @@ static int gmc_v7_0_process_interrupt(st
+ vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
+ VMID);
+ if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid)
+- && !atomic_read(&adev->gmc.vm_fault_info_updated)) {
++ && !atomic_read_acquire(&adev->gmc.vm_fault_info_updated)) {
+ struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info;
+ u32 protections = REG_GET_FIELD(status,
+ VM_CONTEXT1_PROTECTION_FAULT_STATUS,
+@@ -1317,8 +1317,7 @@ static int gmc_v7_0_process_interrupt(st
+ info->prot_read = protections & 0x8 ? true : false;
+ info->prot_write = protections & 0x10 ? true : false;
+ info->prot_exec = protections & 0x20 ? true : false;
+- mb();
+- atomic_set(&adev->gmc.vm_fault_info_updated, 1);
++ atomic_set_release(&adev->gmc.vm_fault_info_updated, 1);
+ }
+
+ return 0;
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+@@ -1194,7 +1194,7 @@ static int gmc_v8_0_sw_init(void *handle
+ GFP_KERNEL);
+ if (!adev->gmc.vm_fault_info)
+ return -ENOMEM;
+- atomic_set(&adev->gmc.vm_fault_info_updated, 0);
++ atomic_set_release(&adev->gmc.vm_fault_info_updated, 0);
+
+ return 0;
+ }
+@@ -1482,7 +1482,7 @@ static int gmc_v8_0_process_interrupt(st
+ vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
+ VMID);
+ if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid)
+- && !atomic_read(&adev->gmc.vm_fault_info_updated)) {
++ && !atomic_read_acquire(&adev->gmc.vm_fault_info_updated)) {
+ struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info;
+ u32 protections = REG_GET_FIELD(status,
+ VM_CONTEXT1_PROTECTION_FAULT_STATUS,
+@@ -1498,8 +1498,7 @@ static int gmc_v8_0_process_interrupt(st
+ info->prot_read = protections & 0x8 ? true : false;
+ info->prot_write = protections & 0x10 ? true : false;
+ info->prot_exec = protections & 0x20 ? true : false;
+- mb();
+- atomic_set(&adev->gmc.vm_fault_info_updated, 1);
++ atomic_set_release(&adev->gmc.vm_fault_info_updated, 1);
+ }
+
+ return 0;
--- /dev/null
+From stable+bounces-188265-greg=kroah.com@vger.kernel.org Mon Oct 20 23:52:24 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Oct 2025 17:52:17 -0400
+Subject: f2fs: fix wrong block mapping for multi-devices
+To: stable@vger.kernel.org
+Cc: Jaegeuk Kim <jaegeuk@kernel.org>, Chao Yu <chao@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251020215217.1928651-1-sashal@kernel.org>
+
+From: Jaegeuk Kim <jaegeuk@kernel.org>
+
+[ Upstream commit 9d5c4f5c7a2c7677e1b3942772122b032c265aae ]
+
+Assuming the disk layout as below,
+
+disk0: 0 --- 0x00035abfff
+disk1: 0x00035ac000 --- 0x00037abfff
+disk2: 0x00037ac000 --- 0x00037ebfff
+
+and we want to read data from offset=13568 having len=128 across the block
+devices, we can illustrate the block addresses like below.
+
+0 .. 0x00037ac000 ------------------- 0x00037ebfff, 0x00037ec000 -------
+ | ^ ^ ^
+ | fofs 0 13568 13568+128
+ | ------------------------------------------------------
+ | LBA 0x37e8aa9 0x37ebfa9 0x37ec029
+ --- map 0x3caa9 0x3ffa9
+
+In this example, we should give the relative map of the target block device
+ranging from 0x3caa9 to 0x3ffa9 where the length should be calculated by
+0x37ebfff + 1 - 0x37ebfa9.
+
+In the below equation, however, map->m_pblk was supposed to be the original
+address instead of the one from the target block address.
+
+ - map->m_len = min(map->m_len, dev->end_blk + 1 - map->m_pblk);
+
+Cc: stable@vger.kernel.org
+Fixes: 71f2c8206202 ("f2fs: multidevice: support direct IO")
+Reviewed-by: Chao Yu <chao@kernel.org>
+Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
+[ applied fix to f2fs_map_blocks() instead of f2fs_map_blocks_cached() ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/f2fs/data.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/f2fs/data.c
++++ b/fs/f2fs/data.c
+@@ -1505,9 +1505,9 @@ int f2fs_map_blocks(struct inode *inode,
+ bidx = f2fs_target_device_index(sbi, map->m_pblk);
+
+ map->m_bdev = FDEV(bidx).bdev;
+- map->m_pblk -= FDEV(bidx).start_blk;
+ map->m_len = min(map->m_len,
+ FDEV(bidx).end_blk + 1 - map->m_pblk);
++ map->m_pblk -= FDEV(bidx).start_blk;
+
+ if (map->m_may_create)
+ f2fs_update_device_state(sbi, inode->i_ino,
--- /dev/null
+From stable+bounces-188168-greg=kroah.com@vger.kernel.org Mon Oct 20 18:03:05 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Oct 2025 12:02:55 -0400
+Subject: fuse: allocate ff->release_args only if release is needed
+To: stable@vger.kernel.org
+Cc: Amir Goldstein <amir73il@gmail.com>, Miklos Szeredi <mszeredi@redhat.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251020160256.1828701-1-sashal@kernel.org>
+
+From: Amir Goldstein <amir73il@gmail.com>
+
+[ Upstream commit e26ee4efbc79610b20e7abe9d96c87f33dacc1ff ]
+
+This removed the need to pass isdir argument to fuse_put_file().
+
+Signed-off-by: Amir Goldstein <amir73il@gmail.com>
+Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
+Stable-dep-of: 26e5c67deb2e ("fuse: fix livelock in synchronous file put from fuseblk workers")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/fuse/dir.c | 2 -
+ fs/fuse/file.c | 69 +++++++++++++++++++++++++++++++------------------------
+ fs/fuse/fuse_i.h | 2 -
+ 3 files changed, 41 insertions(+), 32 deletions(-)
+
+--- a/fs/fuse/dir.c
++++ b/fs/fuse/dir.c
+@@ -491,7 +491,7 @@ static int fuse_create_open(struct inode
+ goto out_err;
+
+ err = -ENOMEM;
+- ff = fuse_file_alloc(fm);
++ ff = fuse_file_alloc(fm, true);
+ if (!ff)
+ goto out_put_forget_req;
+
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -54,7 +54,7 @@ struct fuse_release_args {
+ struct inode *inode;
+ };
+
+-struct fuse_file *fuse_file_alloc(struct fuse_mount *fm)
++struct fuse_file *fuse_file_alloc(struct fuse_mount *fm, bool release)
+ {
+ struct fuse_file *ff;
+
+@@ -63,11 +63,13 @@ struct fuse_file *fuse_file_alloc(struct
+ return NULL;
+
+ ff->fm = fm;
+- ff->release_args = kzalloc(sizeof(*ff->release_args),
+- GFP_KERNEL_ACCOUNT);
+- if (!ff->release_args) {
+- kfree(ff);
+- return NULL;
++ if (release) {
++ ff->release_args = kzalloc(sizeof(*ff->release_args),
++ GFP_KERNEL_ACCOUNT);
++ if (!ff->release_args) {
++ kfree(ff);
++ return NULL;
++ }
+ }
+
+ INIT_LIST_HEAD(&ff->write_entry);
+@@ -103,14 +105,14 @@ static void fuse_release_end(struct fuse
+ kfree(ra);
+ }
+
+-static void fuse_file_put(struct fuse_file *ff, bool sync, bool isdir)
++static void fuse_file_put(struct fuse_file *ff, bool sync)
+ {
+ if (refcount_dec_and_test(&ff->count)) {
+- struct fuse_args *args = &ff->release_args->args;
++ struct fuse_release_args *ra = ff->release_args;
++ struct fuse_args *args = (ra ? &ra->args : NULL);
+
+- if (isdir ? ff->fm->fc->no_opendir : ff->fm->fc->no_open) {
+- /* Do nothing when client does not implement 'open' */
+- fuse_release_end(ff->fm, args, 0);
++ if (!args) {
++ /* Do nothing when server does not implement 'open' */
+ } else if (sync) {
+ fuse_simple_request(ff->fm, args);
+ fuse_release_end(ff->fm, args, 0);
+@@ -130,15 +132,16 @@ struct fuse_file *fuse_file_open(struct
+ struct fuse_conn *fc = fm->fc;
+ struct fuse_file *ff;
+ int opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN;
++ bool open = isdir ? !fc->no_opendir : !fc->no_open;
+
+- ff = fuse_file_alloc(fm);
++ ff = fuse_file_alloc(fm, open);
+ if (!ff)
+ return ERR_PTR(-ENOMEM);
+
+ ff->fh = 0;
+ /* Default for no-open */
+ ff->open_flags = FOPEN_KEEP_CACHE | (isdir ? FOPEN_CACHE_DIR : 0);
+- if (isdir ? !fc->no_opendir : !fc->no_open) {
++ if (open) {
+ struct fuse_open_out outarg;
+ int err;
+
+@@ -146,11 +149,13 @@ struct fuse_file *fuse_file_open(struct
+ if (!err) {
+ ff->fh = outarg.fh;
+ ff->open_flags = outarg.open_flags;
+-
+ } else if (err != -ENOSYS) {
+ fuse_file_free(ff);
+ return ERR_PTR(err);
+ } else {
++ /* No release needed */
++ kfree(ff->release_args);
++ ff->release_args = NULL;
+ if (isdir)
+ fc->no_opendir = 1;
+ else
+@@ -274,7 +279,7 @@ out_inode_unlock:
+ }
+
+ static void fuse_prepare_release(struct fuse_inode *fi, struct fuse_file *ff,
+- unsigned int flags, int opcode)
++ unsigned int flags, int opcode, bool sync)
+ {
+ struct fuse_conn *fc = ff->fm->fc;
+ struct fuse_release_args *ra = ff->release_args;
+@@ -292,6 +297,9 @@ static void fuse_prepare_release(struct
+
+ wake_up_interruptible_all(&ff->poll_wait);
+
++ if (!ra)
++ return;
++
+ ra->inarg.fh = ff->fh;
+ ra->inarg.flags = flags;
+ ra->args.in_numargs = 1;
+@@ -301,6 +309,13 @@ static void fuse_prepare_release(struct
+ ra->args.nodeid = ff->nodeid;
+ ra->args.force = true;
+ ra->args.nocreds = true;
++
++ /*
++ * Hold inode until release is finished.
++ * From fuse_sync_release() the refcount is 1 and everything's
++ * synchronous, so we are fine with not doing igrab() here.
++ */
++ ra->inode = sync ? NULL : igrab(&fi->inode);
+ }
+
+ void fuse_file_release(struct inode *inode, struct fuse_file *ff,
+@@ -310,14 +325,12 @@ void fuse_file_release(struct inode *ino
+ struct fuse_release_args *ra = ff->release_args;
+ int opcode = isdir ? FUSE_RELEASEDIR : FUSE_RELEASE;
+
+- fuse_prepare_release(fi, ff, open_flags, opcode);
++ fuse_prepare_release(fi, ff, open_flags, opcode, false);
+
+- if (ff->flock) {
++ if (ra && ff->flock) {
+ ra->inarg.release_flags |= FUSE_RELEASE_FLOCK_UNLOCK;
+ ra->inarg.lock_owner = fuse_lock_owner_id(ff->fm->fc, id);
+ }
+- /* Hold inode until release is finished */
+- ra->inode = igrab(inode);
+
+ /*
+ * Normally this will send the RELEASE request, however if
+@@ -328,7 +341,7 @@ void fuse_file_release(struct inode *ino
+ * synchronous RELEASE is allowed (and desirable) in this case
+ * because the server can be trusted not to screw up.
+ */
+- fuse_file_put(ff, ff->fm->fc->destroy, isdir);
++ fuse_file_put(ff, ff->fm->fc->destroy);
+ }
+
+ void fuse_release_common(struct file *file, bool isdir)
+@@ -360,12 +373,8 @@ void fuse_sync_release(struct fuse_inode
+ unsigned int flags)
+ {
+ WARN_ON(refcount_read(&ff->count) > 1);
+- fuse_prepare_release(fi, ff, flags, FUSE_RELEASE);
+- /*
+- * iput(NULL) is a no-op and since the refcount is 1 and everything's
+- * synchronous, we are fine with not doing igrab() here"
+- */
+- fuse_file_put(ff, true, false);
++ fuse_prepare_release(fi, ff, flags, FUSE_RELEASE, true);
++ fuse_file_put(ff, true);
+ }
+ EXPORT_SYMBOL_GPL(fuse_sync_release);
+
+@@ -918,7 +927,7 @@ static void fuse_readpages_end(struct fu
+ put_page(page);
+ }
+ if (ia->ff)
+- fuse_file_put(ia->ff, false, false);
++ fuse_file_put(ia->ff, false);
+
+ fuse_io_free(ia);
+ }
+@@ -1625,7 +1634,7 @@ static void fuse_writepage_free(struct f
+ __free_page(ap->pages[i]);
+
+ if (wpa->ia.ff)
+- fuse_file_put(wpa->ia.ff, false, false);
++ fuse_file_put(wpa->ia.ff, false);
+
+ kfree(ap->pages);
+ kfree(wpa);
+@@ -1874,7 +1883,7 @@ int fuse_write_inode(struct inode *inode
+ ff = __fuse_write_file_get(fi);
+ err = fuse_flush_times(inode, ff);
+ if (ff)
+- fuse_file_put(ff, false, false);
++ fuse_file_put(ff, false);
+
+ return err;
+ }
+@@ -2263,7 +2272,7 @@ static int fuse_writepages(struct addres
+ fuse_writepages_send(&data);
+ }
+ if (data.ff)
+- fuse_file_put(data.ff, false, false);
++ fuse_file_put(data.ff, false);
+
+ kfree(data.orig_pages);
+ out:
+--- a/fs/fuse/fuse_i.h
++++ b/fs/fuse/fuse_i.h
+@@ -996,7 +996,7 @@ void fuse_read_args_fill(struct fuse_io_
+ */
+ int fuse_open_common(struct inode *inode, struct file *file, bool isdir);
+
+-struct fuse_file *fuse_file_alloc(struct fuse_mount *fm);
++struct fuse_file *fuse_file_alloc(struct fuse_mount *fm, bool release);
+ void fuse_file_free(struct fuse_file *ff);
+ void fuse_finish_open(struct inode *inode, struct file *file);
+
--- /dev/null
+From stable+bounces-188169-greg=kroah.com@vger.kernel.org Mon Oct 20 18:03:07 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Oct 2025 12:02:56 -0400
+Subject: fuse: fix livelock in synchronous file put from fuseblk workers
+To: stable@vger.kernel.org
+Cc: "Darrick J. Wong" <djwong@kernel.org>, Miklos Szeredi <mszeredi@redhat.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251020160256.1828701-2-sashal@kernel.org>
+
+From: "Darrick J. Wong" <djwong@kernel.org>
+
+[ Upstream commit 26e5c67deb2e1f42a951f022fdf5b9f7eb747b01 ]
+
+I observed a hang when running generic/323 against a fuseblk server.
+This test opens a file, initiates a lot of AIO writes to that file
+descriptor, and closes the file descriptor before the writes complete.
+Unsurprisingly, the AIO exerciser threads are mostly stuck waiting for
+responses from the fuseblk server:
+
+# cat /proc/372265/task/372313/stack
+[<0>] request_wait_answer+0x1fe/0x2a0 [fuse]
+[<0>] __fuse_simple_request+0xd3/0x2b0 [fuse]
+[<0>] fuse_do_getattr+0xfc/0x1f0 [fuse]
+[<0>] fuse_file_read_iter+0xbe/0x1c0 [fuse]
+[<0>] aio_read+0x130/0x1e0
+[<0>] io_submit_one+0x542/0x860
+[<0>] __x64_sys_io_submit+0x98/0x1a0
+[<0>] do_syscall_64+0x37/0xf0
+[<0>] entry_SYSCALL_64_after_hwframe+0x4b/0x53
+
+But the /weird/ part is that the fuseblk server threads are waiting for
+responses from itself:
+
+# cat /proc/372210/task/372232/stack
+[<0>] request_wait_answer+0x1fe/0x2a0 [fuse]
+[<0>] __fuse_simple_request+0xd3/0x2b0 [fuse]
+[<0>] fuse_file_put+0x9a/0xd0 [fuse]
+[<0>] fuse_release+0x36/0x50 [fuse]
+[<0>] __fput+0xec/0x2b0
+[<0>] task_work_run+0x55/0x90
+[<0>] syscall_exit_to_user_mode+0xe9/0x100
+[<0>] do_syscall_64+0x43/0xf0
+[<0>] entry_SYSCALL_64_after_hwframe+0x4b/0x53
+
+The fuseblk server is fuse2fs so there's nothing all that exciting in
+the server itself. So why is the fuse server calling fuse_file_put?
+The commit message for the fstest sheds some light on that:
+
+"By closing the file descriptor before calling io_destroy, you pretty
+much guarantee that the last put on the ioctx will be done in interrupt
+context (during I/O completion).
+
+Aha. AIO fgets a new struct file from the fd when it queues the ioctx.
+The completion of the FUSE_WRITE command from userspace causes the fuse
+server to call the AIO completion function. The completion puts the
+struct file, queuing a delayed fput to the fuse server task. When the
+fuse server task returns to userspace, it has to run the delayed fput,
+which in the case of a fuseblk server, it does synchronously.
+
+Sending the FUSE_RELEASE command sychronously from fuse server threads
+is a bad idea because a client program can initiate enough simultaneous
+AIOs such that all the fuse server threads end up in delayed_fput, and
+now there aren't any threads left to handle the queued fuse commands.
+
+Fix this by only using asynchronous fputs when closing files, and leave
+a comment explaining why.
+
+Cc: stable@vger.kernel.org # v2.6.38
+Fixes: 5a18ec176c934c ("fuse: fix hang of single threaded fuseblk filesystem")
+Signed-off-by: Darrick J. Wong <djwong@kernel.org>
+Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/fuse/file.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -340,8 +340,14 @@ void fuse_file_release(struct inode *ino
+ * Make the release synchronous if this is a fuseblk mount,
+ * synchronous RELEASE is allowed (and desirable) in this case
+ * because the server can be trusted not to screw up.
++ *
++ * Always use the asynchronous file put because the current thread
++ * might be the fuse server. This can happen if a process starts some
++ * aio and closes the fd before the aio completes. Since aio takes its
++ * own ref to the file, the IO completion has to drop the ref, which is
++ * how the fuse server can end up closing its clients' files.
+ */
+- fuse_file_put(ff, ff->fm->fc->destroy);
++ fuse_file_put(ff, false);
+ }
+
+ void fuse_release_common(struct file *file, bool isdir)
--- /dev/null
+From stable+bounces-188114-greg=kroah.com@vger.kernel.org Mon Oct 20 15:10:51 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Oct 2025 09:10:30 -0400
+Subject: iio: imu: inv_icm42600: Avoid configuring if already pm_runtime suspended
+To: stable@vger.kernel.org
+Cc: Sean Nyekjaer <sean@geanix.com>, Stable@vger.kernel.org, Jonathan Cameron <Jonathan.Cameron@huawei.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251020131030.1767726-2-sashal@kernel.org>
+
+From: Sean Nyekjaer <sean@geanix.com>
+
+[ Upstream commit 466f7a2fef2a4e426f809f79845a1ec1aeb558f4 ]
+
+Do as in suspend, skip resume configuration steps if the device is already
+pm_runtime suspended. This avoids reconfiguring a device that is already
+in the correct low-power state and ensures that pm_runtime handles the
+power state transitions properly.
+
+Fixes: 31c24c1e93c3 ("iio: imu: inv_icm42600: add core of new inv_icm42600 driver")
+Signed-off-by: Sean Nyekjaer <sean@geanix.com>
+Link: https://patch.msgid.link/20250901-icm42pmreg-v3-3-ef1336246960@geanix.com
+Cc: <Stable@vger.kernel.org>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+[ Placed early return before regulator enable instead of APEX wakeup logic ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iio/imu/inv_icm42600/inv_icm42600_core.c | 11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
++++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
+@@ -680,17 +680,15 @@ EXPORT_SYMBOL_GPL(inv_icm42600_core_prob
+ static int __maybe_unused inv_icm42600_suspend(struct device *dev)
+ {
+ struct inv_icm42600_state *st = dev_get_drvdata(dev);
+- int ret;
++ int ret = 0;
+
+ mutex_lock(&st->lock);
+
+ st->suspended.gyro = st->conf.gyro.mode;
+ st->suspended.accel = st->conf.accel.mode;
+ st->suspended.temp = st->conf.temp_en;
+- if (pm_runtime_suspended(dev)) {
+- ret = 0;
++ if (pm_runtime_suspended(dev))
+ goto out_unlock;
+- }
+
+ /* disable FIFO data streaming */
+ if (st->fifo.on) {
+@@ -722,10 +720,13 @@ static int __maybe_unused inv_icm42600_r
+ struct inv_icm42600_state *st = dev_get_drvdata(dev);
+ struct inv_icm42600_timestamp *gyro_ts = iio_priv(st->indio_gyro);
+ struct inv_icm42600_timestamp *accel_ts = iio_priv(st->indio_accel);
+- int ret;
++ int ret = 0;
+
+ mutex_lock(&st->lock);
+
++ if (pm_runtime_suspended(dev))
++ goto out_unlock;
++
+ ret = inv_icm42600_enable_regulator_vddio(st);
+ if (ret)
+ goto out_unlock;
--- /dev/null
+From stable+bounces-188101-greg=kroah.com@vger.kernel.org Mon Oct 20 15:06:58 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Oct 2025 09:04:36 -0400
+Subject: iio: imu: inv_icm42600: Simplify pm_runtime setup
+To: stable@vger.kernel.org
+Cc: Sean Nyekjaer <sean@geanix.com>, Stable@vger.kernel.org, Jonathan Cameron <Jonathan.Cameron@huawei.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251020130436.1764668-2-sashal@kernel.org>
+
+From: Sean Nyekjaer <sean@geanix.com>
+
+[ Upstream commit 0792c1984a45ccd7a296d6b8cb78088bc99a212e ]
+
+Rework the power management in inv_icm42600_core_probe() to use
+devm_pm_runtime_set_active_enabled(), which simplifies the runtime PM
+setup by handling activation and enabling in one step.
+Remove the separate inv_icm42600_disable_pm callback, as it's no longer
+needed with the devm-managed approach.
+Using devm_pm_runtime_enable() also fixes the missing disable of
+autosuspend.
+Update inv_icm42600_disable_vddio_reg() to only disable the regulator if
+the device is not suspended i.e. powered-down, preventing unbalanced
+disables.
+Also remove redundant error msg on regulator_disable(), the regulator
+framework already emits an error message when regulator_disable() fails.
+
+This simplifies the PM setup and avoids manipulating the usage counter
+unnecessarily.
+
+Fixes: 31c24c1e93c3 ("iio: imu: inv_icm42600: add core of new inv_icm42600 driver")
+Signed-off-by: Sean Nyekjaer <sean@geanix.com>
+Link: https://patch.msgid.link/20250901-icm42pmreg-v3-1-ef1336246960@geanix.com
+Cc: <Stable@vger.kernel.org>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iio/imu/inv_icm42600/inv_icm42600_core.c | 24 ++++++-----------------
+ 1 file changed, 7 insertions(+), 17 deletions(-)
+
+--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
++++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
+@@ -550,20 +550,12 @@ static void inv_icm42600_disable_vdd_reg
+ static void inv_icm42600_disable_vddio_reg(void *_data)
+ {
+ struct inv_icm42600_state *st = _data;
+- const struct device *dev = regmap_get_device(st->map);
+- int ret;
+-
+- ret = regulator_disable(st->vddio_supply);
+- if (ret)
+- dev_err(dev, "failed to disable vddio error %d\n", ret);
+-}
++ struct device *dev = regmap_get_device(st->map);
+
+-static void inv_icm42600_disable_pm(void *_data)
+-{
+- struct device *dev = _data;
++ if (pm_runtime_status_suspended(dev))
++ return;
+
+- pm_runtime_put_sync(dev);
+- pm_runtime_disable(dev);
++ regulator_disable(st->vddio_supply);
+ }
+
+ int inv_icm42600_core_probe(struct regmap *regmap, int chip, int irq,
+@@ -660,16 +652,14 @@ int inv_icm42600_core_probe(struct regma
+ return ret;
+
+ /* setup runtime power management */
+- ret = pm_runtime_set_active(dev);
++ ret = devm_pm_runtime_set_active_enabled(dev);
+ if (ret)
+ return ret;
+- pm_runtime_get_noresume(dev);
+- pm_runtime_enable(dev);
++
+ pm_runtime_set_autosuspend_delay(dev, INV_ICM42600_SUSPEND_DELAY_MS);
+ pm_runtime_use_autosuspend(dev);
+- pm_runtime_put(dev);
+
+- return devm_add_action_or_reset(dev, inv_icm42600_disable_pm, dev);
++ return ret;
+ }
+ EXPORT_SYMBOL_GPL(inv_icm42600_core_probe);
+
--- /dev/null
+From stable+bounces-188113-greg=kroah.com@vger.kernel.org Mon Oct 20 15:11:01 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Oct 2025 09:10:29 -0400
+Subject: iio: imu: inv_icm42600: use = { } instead of memset()
+To: stable@vger.kernel.org
+Cc: "David Lechner" <dlechner@baylibre.com>, "Nuno Sá" <nuno.sa@analog.com>, "Andy Shevchenko" <andriy.shevchenko@linux.intel.com>, "Jonathan Cameron" <Jonathan.Cameron@huawei.com>, "Sasha Levin" <sashal@kernel.org>
+Message-ID: <20251020131030.1767726-1-sashal@kernel.org>
+
+From: David Lechner <dlechner@baylibre.com>
+
+[ Upstream commit 352112e2d9aab6a156c2803ae14eb89a9fd93b7d ]
+
+Use { } instead of memset() to zero-initialize stack memory to simplify
+the code.
+
+Signed-off-by: David Lechner <dlechner@baylibre.com>
+Reviewed-by: Nuno Sá <nuno.sa@analog.com>
+Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Link: https://patch.msgid.link/20250611-iio-zero-init-stack-with-instead-of-memset-v1-16-ebb2d0a24302@baylibre.com
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Stable-dep-of: 466f7a2fef2a ("iio: imu: inv_icm42600: Avoid configuring if already pm_runtime suspended")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c | 5 ++---
+ drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c | 5 ++---
+ 2 files changed, 4 insertions(+), 6 deletions(-)
+
+--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
++++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
+@@ -749,7 +749,8 @@ int inv_icm42600_accel_parse_fifo(struct
+ const int8_t *temp;
+ unsigned int odr;
+ int64_t ts_val;
+- struct inv_icm42600_accel_buffer buffer;
++ /* buffer is copied to userspace, zeroing it to avoid any data leak */
++ struct inv_icm42600_accel_buffer buffer = { };
+
+ /* parse all fifo packets */
+ for (i = 0, no = 0; i < st->fifo.count; i += size, ++no) {
+@@ -768,8 +769,6 @@ int inv_icm42600_accel_parse_fifo(struct
+ inv_icm42600_timestamp_apply_odr(ts, st->fifo.period,
+ st->fifo.nb.total, no);
+
+- /* buffer is copied to userspace, zeroing it to avoid any data leak */
+- memset(&buffer, 0, sizeof(buffer));
+ memcpy(&buffer.accel, accel, sizeof(buffer.accel));
+ /* convert 8 bits FIFO temperature in high resolution format */
+ buffer.temp = temp ? (*temp * 64) : 0;
+--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
++++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
+@@ -761,7 +761,8 @@ int inv_icm42600_gyro_parse_fifo(struct
+ const int8_t *temp;
+ unsigned int odr;
+ int64_t ts_val;
+- struct inv_icm42600_gyro_buffer buffer;
++ /* buffer is copied to userspace, zeroing it to avoid any data leak */
++ struct inv_icm42600_gyro_buffer buffer = { };
+
+ /* parse all fifo packets */
+ for (i = 0, no = 0; i < st->fifo.count; i += size, ++no) {
+@@ -780,8 +781,6 @@ int inv_icm42600_gyro_parse_fifo(struct
+ inv_icm42600_timestamp_apply_odr(ts, st->fifo.period,
+ st->fifo.nb.total, no);
+
+- /* buffer is copied to userspace, zeroing it to avoid any data leak */
+- memset(&buffer, 0, sizeof(buffer));
+ memcpy(&buffer.gyro, gyro, sizeof(buffer.gyro));
+ /* convert 8 bits FIFO temperature in high resolution format */
+ buffer.temp = temp ? (*temp * 64) : 0;
--- /dev/null
+From stable+bounces-188274-greg=kroah.com@vger.kernel.org Tue Oct 21 02:34:30 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Oct 2025 20:34:23 -0400
+Subject: NFSD: Define a proc_layoutcommit for the FlexFiles layout type
+To: stable@vger.kernel.org
+Cc: Chuck Lever <chuck.lever@oracle.com>, Robert Morris <rtm@csail.mit.edu>, Thomas Haynes <loghyr@hammerspace.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251021003423.1954898-1-sashal@kernel.org>
+
+From: Chuck Lever <chuck.lever@oracle.com>
+
+[ Upstream commit 4b47a8601b71ad98833b447d465592d847b4dc77 ]
+
+Avoid a crash if a pNFS client should happen to send a LAYOUTCOMMIT
+operation on a FlexFiles layout.
+
+Reported-by: Robert Morris <rtm@csail.mit.edu>
+Closes: https://lore.kernel.org/linux-nfs/152f99b2-ba35-4dec-93a9-4690e625dccd@oracle.com/T/#t
+Cc: Thomas Haynes <loghyr@hammerspace.com>
+Cc: stable@vger.kernel.org
+Fixes: 9b9960a0ca47 ("nfsd: Add a super simple flex file server")
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+[ removed struct svc_rqst parameter from nfsd4_ff_proc_layoutcommit ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nfsd/flexfilelayout.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/fs/nfsd/flexfilelayout.c
++++ b/fs/nfsd/flexfilelayout.c
+@@ -125,6 +125,13 @@ nfsd4_ff_proc_getdeviceinfo(struct super
+ return 0;
+ }
+
++static __be32
++nfsd4_ff_proc_layoutcommit(struct inode *inode,
++ struct nfsd4_layoutcommit *lcp)
++{
++ return nfs_ok;
++}
++
+ const struct nfsd4_layout_ops ff_layout_ops = {
+ .notify_types =
+ NOTIFY_DEVICEID4_DELETE | NOTIFY_DEVICEID4_CHANGE,
+@@ -133,4 +140,5 @@ const struct nfsd4_layout_ops ff_layout_
+ .encode_getdeviceinfo = nfsd4_ff_encode_getdeviceinfo,
+ .proc_layoutget = nfsd4_ff_proc_layoutget,
+ .encode_layoutget = nfsd4_ff_encode_layoutget,
++ .proc_layoutcommit = nfsd4_ff_proc_layoutcommit,
+ };
--- /dev/null
+From stable+bounces-188080-greg=kroah.com@vger.kernel.org Mon Oct 20 14:56:25 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Oct 2025 08:56:02 -0400
+Subject: NFSD: Fix last write offset handling in layoutcommit
+To: stable@vger.kernel.org
+Cc: Sergey Bashirov <sergeybashirov@gmail.com>, Konstantin Evtushenko <koevtushenko@yandex.com>, Christoph Hellwig <hch@lst.de>, Jeff Layton <jlayton@kernel.org>, Chuck Lever <chuck.lever@oracle.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251020125602.1761375-3-sashal@kernel.org>
+
+From: Sergey Bashirov <sergeybashirov@gmail.com>
+
+[ Upstream commit d68886bae76a4b9b3484d23e5b7df086f940fa38 ]
+
+The data type of loca_last_write_offset is newoffset4 and is switched
+on a boolean value, no_newoffset, that indicates if a previous write
+occurred or not. If no_newoffset is FALSE, an offset is not given.
+This means that client does not try to update the file size. Thus,
+server should not try to calculate new file size and check if it fits
+into the segment range. See RFC 8881, section 12.5.4.2.
+
+Sometimes the current incorrect logic may cause clients to hang when
+trying to sync an inode. If layoutcommit fails, the client marks the
+inode as dirty again.
+
+Fixes: 9cf514ccfacb ("nfsd: implement pNFS operations")
+Cc: stable@vger.kernel.org
+Co-developed-by: Konstantin Evtushenko <koevtushenko@yandex.com>
+Signed-off-by: Konstantin Evtushenko <koevtushenko@yandex.com>
+Signed-off-by: Sergey Bashirov <sergeybashirov@gmail.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+[ adapted for direct inode->i_mtime access and 2-parameter proc_layoutcommit callback ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nfsd/blocklayout.c | 5 ++---
+ fs/nfsd/nfs4proc.c | 30 +++++++++++++++---------------
+ 2 files changed, 17 insertions(+), 18 deletions(-)
+
+--- a/fs/nfsd/blocklayout.c
++++ b/fs/nfsd/blocklayout.c
+@@ -121,7 +121,6 @@ static __be32
+ nfsd4_block_commit_blocks(struct inode *inode, struct nfsd4_layoutcommit *lcp,
+ struct iomap *iomaps, int nr_iomaps)
+ {
+- loff_t new_size = lcp->lc_last_wr + 1;
+ struct iattr iattr = { .ia_valid = 0 };
+ int error;
+
+@@ -131,9 +130,9 @@ nfsd4_block_commit_blocks(struct inode *
+ iattr.ia_valid |= ATTR_ATIME | ATTR_CTIME | ATTR_MTIME;
+ iattr.ia_atime = iattr.ia_ctime = iattr.ia_mtime = lcp->lc_mtime;
+
+- if (new_size > i_size_read(inode)) {
++ if (lcp->lc_size_chg) {
+ iattr.ia_valid |= ATTR_SIZE;
+- iattr.ia_size = new_size;
++ iattr.ia_size = lcp->lc_newsize;
+ }
+
+ error = inode->i_sb->s_export_op->commit_blocks(inode, iomaps,
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -2262,7 +2262,6 @@ nfsd4_layoutcommit(struct svc_rqst *rqst
+ const struct nfsd4_layout_seg *seg = &lcp->lc_seg;
+ struct svc_fh *current_fh = &cstate->current_fh;
+ const struct nfsd4_layout_ops *ops;
+- loff_t new_size = lcp->lc_last_wr + 1;
+ struct inode *inode;
+ struct nfs4_layout_stateid *ls;
+ __be32 nfserr;
+@@ -2277,13 +2276,21 @@ nfsd4_layoutcommit(struct svc_rqst *rqst
+ goto out;
+ inode = d_inode(current_fh->fh_dentry);
+
+- nfserr = nfserr_inval;
+- if (new_size <= seg->offset)
+- goto out;
+- if (new_size > seg->offset + seg->length)
+- goto out;
+- if (!lcp->lc_newoffset && new_size > i_size_read(inode))
+- goto out;
++ lcp->lc_size_chg = false;
++ if (lcp->lc_newoffset) {
++ loff_t new_size = lcp->lc_last_wr + 1;
++
++ nfserr = nfserr_inval;
++ if (new_size <= seg->offset)
++ goto out;
++ if (new_size > seg->offset + seg->length)
++ goto out;
++
++ if (new_size > i_size_read(inode)) {
++ lcp->lc_size_chg = true;
++ lcp->lc_newsize = new_size;
++ }
++ }
+
+ nfserr = nfsd4_preprocess_layout_stateid(rqstp, cstate, &lcp->lc_sid,
+ false, lcp->lc_layout_type,
+@@ -2299,13 +2306,6 @@ nfsd4_layoutcommit(struct svc_rqst *rqst
+ /* LAYOUTCOMMIT does not require any serialization */
+ mutex_unlock(&ls->ls_mutex);
+
+- if (new_size > i_size_read(inode)) {
+- lcp->lc_size_chg = 1;
+- lcp->lc_newsize = new_size;
+- } else {
+- lcp->lc_size_chg = 0;
+- }
+-
+ nfserr = ops->proc_layoutcommit(inode, lcp);
+ nfs4_put_stid(&ls->ls_stid);
+ out:
--- /dev/null
+From stable+bounces-188079-greg=kroah.com@vger.kernel.org Mon Oct 20 14:56:20 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Oct 2025 08:56:01 -0400
+Subject: NFSD: Minor cleanup in layoutcommit processing
+To: stable@vger.kernel.org
+Cc: Sergey Bashirov <sergeybashirov@gmail.com>, Christoph Hellwig <hch@lst.de>, Chuck Lever <chuck.lever@oracle.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251020125602.1761375-2-sashal@kernel.org>
+
+From: Sergey Bashirov <sergeybashirov@gmail.com>
+
+[ Upstream commit 274365a51d88658fb51cca637ba579034e90a799 ]
+
+Remove dprintk in nfsd4_layoutcommit. These are not needed
+in day to day usage, and the information is also available
+in Wireshark when capturing NFS traffic.
+
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Sergey Bashirov <sergeybashirov@gmail.com>
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Stable-dep-of: d68886bae76a ("NFSD: Fix last write offset handling in layoutcommit")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nfsd/nfs4proc.c | 12 +++---------
+ 1 file changed, 3 insertions(+), 9 deletions(-)
+
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -2278,18 +2278,12 @@ nfsd4_layoutcommit(struct svc_rqst *rqst
+ inode = d_inode(current_fh->fh_dentry);
+
+ nfserr = nfserr_inval;
+- if (new_size <= seg->offset) {
+- dprintk("pnfsd: last write before layout segment\n");
++ if (new_size <= seg->offset)
+ goto out;
+- }
+- if (new_size > seg->offset + seg->length) {
+- dprintk("pnfsd: last write beyond layout segment\n");
++ if (new_size > seg->offset + seg->length)
+ goto out;
+- }
+- if (!lcp->lc_newoffset && new_size > i_size_read(inode)) {
+- dprintk("pnfsd: layoutcommit beyond EOF\n");
++ if (!lcp->lc_newoffset && new_size > i_size_read(inode))
+ goto out;
+- }
+
+ nfserr = nfsd4_preprocess_layout_stateid(rqstp, cstate, &lcp->lc_sid,
+ false, lcp->lc_layout_type,
--- /dev/null
+From stable+bounces-188078-greg=kroah.com@vger.kernel.org Mon Oct 20 14:56:20 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Oct 2025 08:56:00 -0400
+Subject: NFSD: Rework encoding and decoding of nfsd4_deviceid
+To: stable@vger.kernel.org
+Cc: Sergey Bashirov <sergeybashirov@gmail.com>, Chuck Lever <chuck.lever@oracle.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251020125602.1761375-1-sashal@kernel.org>
+
+From: Sergey Bashirov <sergeybashirov@gmail.com>
+
+[ Upstream commit 832738e4b325b742940761e10487403f9aad13e8 ]
+
+Compilers may optimize the layout of C structures, so we should not rely
+on sizeof struct and memcpy to encode and decode XDR structures. The byte
+order of the fields should also be taken into account.
+
+This patch adds the correct functions to handle the deviceid4 structure
+and removes the pad field, which is currently not used by NFSD, from the
+runtime state. The server's byte order is preserved because the deviceid4
+blob on the wire is only used as a cookie by the client.
+
+Signed-off-by: Sergey Bashirov <sergeybashirov@gmail.com>
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Stable-dep-of: d68886bae76a ("NFSD: Fix last write offset handling in layoutcommit")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nfsd/blocklayoutxdr.c | 7 ++-----
+ fs/nfsd/flexfilelayoutxdr.c | 3 +--
+ fs/nfsd/nfs4layouts.c | 1 -
+ fs/nfsd/nfs4xdr.c | 14 +-------------
+ fs/nfsd/xdr4.h | 36 +++++++++++++++++++++++++++++++++++-
+ 5 files changed, 39 insertions(+), 22 deletions(-)
+
+--- a/fs/nfsd/blocklayoutxdr.c
++++ b/fs/nfsd/blocklayoutxdr.c
+@@ -29,8 +29,7 @@ nfsd4_block_encode_layoutget(struct xdr_
+ *p++ = cpu_to_be32(len);
+ *p++ = cpu_to_be32(1); /* we always return a single extent */
+
+- p = xdr_encode_opaque_fixed(p, &b->vol_id,
+- sizeof(struct nfsd4_deviceid));
++ p = svcxdr_encode_deviceid4(p, &b->vol_id);
+ p = xdr_encode_hyper(p, b->foff);
+ p = xdr_encode_hyper(p, b->len);
+ p = xdr_encode_hyper(p, b->soff);
+@@ -145,9 +144,7 @@ nfsd4_block_decode_layoutupdate(__be32 *
+ for (i = 0; i < nr_iomaps; i++) {
+ struct pnfs_block_extent bex;
+
+- memcpy(&bex.vol_id, p, sizeof(struct nfsd4_deviceid));
+- p += XDR_QUADLEN(sizeof(struct nfsd4_deviceid));
+-
++ p = svcxdr_decode_deviceid4(p, &bex.vol_id);
+ p = xdr_decode_hyper(p, &bex.foff);
+ if (bex.foff & (block_size - 1)) {
+ dprintk("%s: unaligned offset 0x%llx\n",
+--- a/fs/nfsd/flexfilelayoutxdr.c
++++ b/fs/nfsd/flexfilelayoutxdr.c
+@@ -54,8 +54,7 @@ nfsd4_ff_encode_layoutget(struct xdr_str
+ *p++ = cpu_to_be32(1); /* single mirror */
+ *p++ = cpu_to_be32(1); /* single data server */
+
+- p = xdr_encode_opaque_fixed(p, &fl->deviceid,
+- sizeof(struct nfsd4_deviceid));
++ p = svcxdr_encode_deviceid4(p, &fl->deviceid);
+
+ *p++ = cpu_to_be32(1); /* efficiency */
+
+--- a/fs/nfsd/nfs4layouts.c
++++ b/fs/nfsd/nfs4layouts.c
+@@ -120,7 +120,6 @@ nfsd4_set_deviceid(struct nfsd4_deviceid
+
+ id->fsid_idx = fhp->fh_export->ex_devid_map->idx;
+ id->generation = device_generation;
+- id->pad = 0;
+ return 0;
+ }
+
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -566,18 +566,6 @@ nfsd4_decode_state_owner4(struct nfsd4_c
+ }
+
+ #ifdef CONFIG_NFSD_PNFS
+-static __be32
+-nfsd4_decode_deviceid4(struct nfsd4_compoundargs *argp,
+- struct nfsd4_deviceid *devid)
+-{
+- __be32 *p;
+-
+- p = xdr_inline_decode(argp->xdr, NFS4_DEVICEID4_SIZE);
+- if (!p)
+- return nfserr_bad_xdr;
+- memcpy(devid, p, sizeof(*devid));
+- return nfs_ok;
+-}
+
+ static __be32
+ nfsd4_decode_layoutupdate4(struct nfsd4_compoundargs *argp,
+@@ -1733,7 +1721,7 @@ nfsd4_decode_getdeviceinfo(struct nfsd4_
+ __be32 status;
+
+ memset(gdev, 0, sizeof(*gdev));
+- status = nfsd4_decode_deviceid4(argp, &gdev->gd_devid);
++ status = nfsd4_decode_deviceid4(argp->xdr, &gdev->gd_devid);
+ if (status)
+ return status;
+ if (xdr_stream_decode_u32(argp->xdr, &gdev->gd_layout_type) < 0)
+--- a/fs/nfsd/xdr4.h
++++ b/fs/nfsd/xdr4.h
+@@ -459,9 +459,43 @@ struct nfsd4_reclaim_complete {
+ struct nfsd4_deviceid {
+ u64 fsid_idx;
+ u32 generation;
+- u32 pad;
+ };
+
++static inline __be32 *
++svcxdr_encode_deviceid4(__be32 *p, const struct nfsd4_deviceid *devid)
++{
++ __be64 *q = (__be64 *)p;
++
++ *q = (__force __be64)devid->fsid_idx;
++ p += 2;
++ *p++ = (__force __be32)devid->generation;
++ *p++ = xdr_zero;
++ return p;
++}
++
++static inline __be32 *
++svcxdr_decode_deviceid4(__be32 *p, struct nfsd4_deviceid *devid)
++{
++ __be64 *q = (__be64 *)p;
++
++ devid->fsid_idx = (__force u64)(*q);
++ p += 2;
++ devid->generation = (__force u32)(*p++);
++ p++; /* NFSD does not use the remaining octets */
++ return p;
++}
++
++static inline __be32
++nfsd4_decode_deviceid4(struct xdr_stream *xdr, struct nfsd4_deviceid *devid)
++{
++ __be32 *p = xdr_inline_decode(xdr, NFS4_DEVICEID4_SIZE);
++
++ if (unlikely(!p))
++ return nfserr_bad_xdr;
++ svcxdr_decode_deviceid4(p, devid);
++ return nfs_ok;
++}
++
+ struct nfsd4_layout_seg {
+ u32 iomode;
+ u64 offset;
--- /dev/null
+From stable+bounces-188143-greg=kroah.com@vger.kernel.org Mon Oct 20 17:41:40 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Oct 2025 11:39:57 -0400
+Subject: padata: Reset next CPU when reorder sequence wraps around
+To: stable@vger.kernel.org
+Cc: Xiao Liang <shaw.leon@gmail.com>, Herbert Xu <herbert@gondor.apana.org.au>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251020153957.1821339-1-sashal@kernel.org>
+
+From: Xiao Liang <shaw.leon@gmail.com>
+
+[ Upstream commit 501302d5cee0d8e8ec2c4a5919c37e0df9abc99b ]
+
+When seq_nr wraps around, the next reorder job with seq 0 is hashed to
+the first CPU in padata_do_serial(). Correspondingly, need reset pd->cpu
+to the first one when pd->processed wraps around. Otherwise, if the
+number of used CPUs is not a power of 2, padata_find_next() will be
+checking a wrong list, hence deadlock.
+
+Fixes: 6fc4dbcf0276 ("padata: Replace delayed timer with immediate workqueue in padata_reorder")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Xiao Liang <shaw.leon@gmail.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+[ moved fix from padata_reorder() to padata_find_next() and adapted cpumask_next_wrap() to 4-argument signature ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/padata.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/kernel/padata.c
++++ b/kernel/padata.c
+@@ -282,7 +282,11 @@ static struct padata_priv *padata_find_n
+ if (remove_object) {
+ list_del_init(&padata->list);
+ ++pd->processed;
+- pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1, false);
++ /* When sequence wraps around, reset to the first CPU. */
++ if (unlikely(pd->processed == 0))
++ pd->cpu = cpumask_first(pd->cpumask.pcpu);
++ else
++ pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1, false);
+ }
+
+ spin_unlock(&reorder->lock);
--- /dev/null
+From stable+bounces-188176-greg=kroah.com@vger.kernel.org Mon Oct 20 18:14:54 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Oct 2025 12:14:07 -0400
+Subject: PCI: j721e: Enable ACSPCIE Refclk if "ti,syscon-acspcie-proxy-ctrl" exists
+To: stable@vger.kernel.org
+Cc: "Siddharth Vadapalli" <s-vadapalli@ti.com>, "Krzysztof Wilczyński" <kwilczynski@kernel.org>, "Manivannan Sadhasivam" <manivannan.sadhasivam@linaro.org>, "Sasha Levin" <sashal@kernel.org>
+Message-ID: <20251020161408.1833901-1-sashal@kernel.org>
+
+From: Siddharth Vadapalli <s-vadapalli@ti.com>
+
+[ Upstream commit 82c4be4168e26a5593aaa1002b5678128a638824 ]
+
+The ACSPCIE module is capable of driving the reference clock required by
+the PCIe Endpoint device. It is an alternative to on-board and external
+reference clock generators. Enabling the output from the ACSPCIE module's
+PAD IO Buffers requires clearing the "PAD IO disable" bits of the
+ACSPCIE_PROXY_CTRL register in the CTRL_MMR register space.
+
+Add support to enable the ACSPCIE reference clock output using the optional
+device-tree property "ti,syscon-acspcie-proxy-ctrl".
+
+Link: https://lore.kernel.org/linux-pci/20240829105316.1483684-3-s-vadapalli@ti.com
+Signed-off-by: Siddharth Vadapalli <s-vadapalli@ti.com>
+Signed-off-by: Krzysztof Wilczyński <kwilczynski@kernel.org>
+Reviewed-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Stable-dep-of: f842d3313ba1 ("PCI: j721e: Fix programming sequence of "strap" settings")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/pci/controller/cadence/pci-j721e.c | 39 ++++++++++++++++++++++++++++-
+ 1 file changed, 38 insertions(+), 1 deletion(-)
+
+--- a/drivers/pci/controller/cadence/pci-j721e.c
++++ b/drivers/pci/controller/cadence/pci-j721e.c
+@@ -46,6 +46,7 @@ enum link_status {
+ #define LANE_COUNT_MASK BIT(8)
+ #define LANE_COUNT(n) ((n) << 8)
+
++#define ACSPCIE_PAD_DISABLE_MASK GENMASK(1, 0)
+ #define GENERATION_SEL_MASK GENMASK(1, 0)
+
+ #define MAX_LANES 2
+@@ -218,6 +219,36 @@ static int j721e_pcie_set_lane_count(str
+ return ret;
+ }
+
++static int j721e_enable_acspcie_refclk(struct j721e_pcie *pcie,
++ struct regmap *syscon)
++{
++ struct device *dev = pcie->cdns_pcie->dev;
++ struct device_node *node = dev->of_node;
++ u32 mask = ACSPCIE_PAD_DISABLE_MASK;
++ struct of_phandle_args args;
++ u32 val;
++ int ret;
++
++ ret = of_parse_phandle_with_fixed_args(node,
++ "ti,syscon-acspcie-proxy-ctrl",
++ 1, 0, &args);
++ if (ret) {
++ dev_err(dev,
++ "ti,syscon-acspcie-proxy-ctrl has invalid arguments\n");
++ return ret;
++ }
++
++ /* Clear PAD IO disable bits to enable refclk output */
++ val = ~(args.args[0]);
++ ret = regmap_update_bits(syscon, 0, mask, val);
++ if (ret) {
++ dev_err(dev, "failed to enable ACSPCIE refclk: %d\n", ret);
++ return ret;
++ }
++
++ return 0;
++}
++
+ static int j721e_pcie_ctrl_init(struct j721e_pcie *pcie)
+ {
+ struct device *dev = pcie->dev;
+@@ -257,7 +288,13 @@ static int j721e_pcie_ctrl_init(struct j
+ return ret;
+ }
+
+- return 0;
++ /* Enable ACSPCIE refclk output if the optional property exists */
++ syscon = syscon_regmap_lookup_by_phandle_optional(node,
++ "ti,syscon-acspcie-proxy-ctrl");
++ if (!syscon)
++ return 0;
++
++ return j721e_enable_acspcie_refclk(pcie, syscon);
+ }
+
+ static int cdns_ti_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
--- /dev/null
+From stable+bounces-188177-greg=kroah.com@vger.kernel.org Mon Oct 20 18:14:58 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Oct 2025 12:14:08 -0400
+Subject: PCI: j721e: Fix programming sequence of "strap" settings
+To: stable@vger.kernel.org
+Cc: Siddharth Vadapalli <s-vadapalli@ti.com>, Manivannan Sadhasivam <mani@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251020161408.1833901-2-sashal@kernel.org>
+
+From: Siddharth Vadapalli <s-vadapalli@ti.com>
+
+[ Upstream commit f842d3313ba179d4005096357289c7ad09cec575 ]
+
+The Cadence PCIe Controller integrated in the TI K3 SoCs supports both
+Root-Complex and Endpoint modes of operation. The Glue Layer allows
+"strapping" the Mode of operation of the Controller, the Link Speed
+and the Link Width. This is enabled by programming the "PCIEn_CTRL"
+register (n corresponds to the PCIe instance) within the CTRL_MMR
+memory-mapped register space. The "reset-values" of the registers are
+also different depending on the mode of operation.
+
+Since the PCIe Controller latches onto the "reset-values" immediately
+after being powered on, if the Glue Layer configuration is not done while
+the PCIe Controller is off, it will result in the PCIe Controller latching
+onto the wrong "reset-values". In practice, this will show up as a wrong
+representation of the PCIe Controller's capability structures in the PCIe
+Configuration Space. Some such capabilities which are supported by the PCIe
+Controller in the Root-Complex mode but are incorrectly latched onto as
+being unsupported are:
+- Link Bandwidth Notification
+- Alternate Routing ID (ARI) Forwarding Support
+- Next capability offset within Advanced Error Reporting (AER) capability
+
+Fix this by powering off the PCIe Controller before programming the "strap"
+settings and powering it on after that. The runtime PM APIs namely
+pm_runtime_put_sync() and pm_runtime_get_sync() will decrement and
+increment the usage counter respectively, causing GENPD to power off and
+power on the PCIe Controller.
+
+Fixes: f3e25911a430 ("PCI: j721e: Add TI J721E PCIe driver")
+Signed-off-by: Siddharth Vadapalli <s-vadapalli@ti.com>
+Signed-off-by: Manivannan Sadhasivam <mani@kernel.org>
+Cc: stable@vger.kernel.org
+Link: https://patch.msgid.link/20250908120828.1471776-1-s-vadapalli@ti.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/pci/controller/cadence/pci-j721e.c | 25 +++++++++++++++++++++++++
+ 1 file changed, 25 insertions(+)
+
+--- a/drivers/pci/controller/cadence/pci-j721e.c
++++ b/drivers/pci/controller/cadence/pci-j721e.c
+@@ -270,6 +270,25 @@ static int j721e_pcie_ctrl_init(struct j
+ if (!ret)
+ offset = args.args[0];
+
++ /*
++ * The PCIe Controller's registers have different "reset-values"
++ * depending on the "strap" settings programmed into the PCIEn_CTRL
++ * register within the CTRL_MMR memory-mapped register space.
++ * The registers latch onto a "reset-value" based on the "strap"
++ * settings sampled after the PCIe Controller is powered on.
++ * To ensure that the "reset-values" are sampled accurately, power
++ * off the PCIe Controller before programming the "strap" settings
++ * and power it on after that. The runtime PM APIs namely
++ * pm_runtime_put_sync() and pm_runtime_get_sync() will decrement and
++ * increment the usage counter respectively, causing GENPD to power off
++ * and power on the PCIe Controller.
++ */
++ ret = pm_runtime_put_sync(dev);
++ if (ret < 0) {
++ dev_err(dev, "Failed to power off PCIe Controller\n");
++ return ret;
++ }
++
+ ret = j721e_pcie_set_mode(pcie, syscon, offset);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set pci mode\n");
+@@ -288,6 +307,12 @@ static int j721e_pcie_ctrl_init(struct j
+ return ret;
+ }
+
++ ret = pm_runtime_get_sync(dev);
++ if (ret < 0) {
++ dev_err(dev, "Failed to power on PCIe Controller\n");
++ return ret;
++ }
++
+ /* Enable ACSPCIE refclk output if the optional property exists */
+ syscon = syscon_regmap_lookup_by_phandle_optional(node,
+ "ti,syscon-acspcie-proxy-ctrl");
--- /dev/null
+From stable+bounces-188410-greg=kroah.com@vger.kernel.org Tue Oct 21 20:46:37 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 21 Oct 2025 14:46:27 -0400
+Subject: PCI: rcar: Finish transition to L1 state in rcar_pcie_config_access()
+To: stable@vger.kernel.org
+Cc: "Marek Vasut" <marek.vasut+renesas@gmail.com>, "Geert Uytterhoeven" <geert+renesas@glider.be>, "Lorenzo Pieralisi" <lorenzo.pieralisi@arm.com>, "Arnd Bergmann" <arnd@arndb.de>, "Bjorn Helgaas" <bhelgaas@google.com>, "Krzysztof Wilczyński" <kw@linux.com>, "Wolfram Sang" <wsa@the-dreams.de>, "Yoshihiro Shimoda" <yoshihiro.shimoda.uh@renesas.com>, linux-renesas-soc@vger.kernel.org, "Sasha Levin" <sashal@kernel.org>
+Message-ID: <20251021184628.2530506-1-sashal@kernel.org>
+
+From: Marek Vasut <marek.vasut+renesas@gmail.com>
+
+[ Upstream commit 84b576146294c2be702cfcd174eaa74167e276f9 ]
+
+In case the controller is transitioning to L1 in rcar_pcie_config_access(),
+any read/write access to PCIECDR triggers asynchronous external abort. This
+is because the transition to L1 link state must be manually finished by the
+driver. The PCIe IP can transition back from L1 state to L0 on its own.
+
+Avoid triggering the abort in rcar_pcie_config_access() by checking whether
+the controller is in the transition state, and if so, finish the transition
+right away. This prevents a lot of unnecessary exceptions, although not all
+of them.
+
+Link: https://lore.kernel.org/r/20220312212349.781799-1-marek.vasut@gmail.com
+Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
+Signed-off-by: Marek Vasut <marek.vasut+renesas@gmail.com>
+Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+Reviewed-by: Geert Uytterhoeven <geert+renesas@glider.be>
+Cc: Arnd Bergmann <arnd@arndb.de>
+Cc: Bjorn Helgaas <bhelgaas@google.com>
+Cc: Geert Uytterhoeven <geert+renesas@glider.be>
+Cc: Krzysztof Wilczyński <kw@linux.com>
+Cc: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+Cc: Wolfram Sang <wsa@the-dreams.de>
+Cc: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+Cc: linux-renesas-soc@vger.kernel.org
+Stable-dep-of: 0a8f173d9dad ("PCI: rcar-host: Drop PMSR spinlock")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/pci/controller/pcie-rcar-host.c | 76 ++++++++++++++++++--------------
+ 1 file changed, 45 insertions(+), 31 deletions(-)
+
+--- a/drivers/pci/controller/pcie-rcar-host.c
++++ b/drivers/pci/controller/pcie-rcar-host.c
+@@ -67,6 +67,42 @@ struct rcar_pcie_host {
+ int (*phy_init_fn)(struct rcar_pcie_host *host);
+ };
+
++static DEFINE_SPINLOCK(pmsr_lock);
++
++static int rcar_pcie_wakeup(struct device *pcie_dev, void __iomem *pcie_base)
++{
++ unsigned long flags;
++ u32 pmsr, val;
++ int ret = 0;
++
++ spin_lock_irqsave(&pmsr_lock, flags);
++
++ if (!pcie_base || pm_runtime_suspended(pcie_dev)) {
++ ret = -EINVAL;
++ goto unlock_exit;
++ }
++
++ pmsr = readl(pcie_base + PMSR);
++
++ /*
++ * Test if the PCIe controller received PM_ENTER_L1 DLLP and
++ * the PCIe controller is not in L1 link state. If true, apply
++ * fix, which will put the controller into L1 link state, from
++ * which it can return to L0s/L0 on its own.
++ */
++ if ((pmsr & PMEL1RX) && ((pmsr & PMSTATE) != PMSTATE_L1)) {
++ writel(L1IATN, pcie_base + PMCTLR);
++ ret = readl_poll_timeout_atomic(pcie_base + PMSR, val,
++ val & L1FAEG, 10, 1000);
++ WARN(ret, "Timeout waiting for L1 link state, ret=%d\n", ret);
++ writel(L1FAEG | PMEL1RX, pcie_base + PMSR);
++ }
++
++unlock_exit:
++ spin_unlock_irqrestore(&pmsr_lock, flags);
++ return ret;
++}
++
+ static struct rcar_pcie_host *msi_to_host(struct rcar_msi *msi)
+ {
+ return container_of(msi, struct rcar_pcie_host, msi);
+@@ -87,6 +123,14 @@ static int rcar_pcie_config_access(struc
+ {
+ struct rcar_pcie *pcie = &host->pcie;
+ unsigned int dev, func, reg, index;
++ int ret;
++
++ /* Wake the bus up in case it is in L1 state. */
++ ret = rcar_pcie_wakeup(pcie->dev, pcie->base);
++ if (ret) {
++ PCI_SET_ERROR_RESPONSE(data);
++ return PCIBIOS_SET_FAILED;
++ }
+
+ dev = PCI_SLOT(devfn);
+ func = PCI_FUNC(devfn);
+@@ -1054,40 +1098,10 @@ static struct platform_driver rcar_pcie_
+ };
+
+ #ifdef CONFIG_ARM
+-static DEFINE_SPINLOCK(pmsr_lock);
+ static int rcar_pcie_aarch32_abort_handler(unsigned long addr,
+ unsigned int fsr, struct pt_regs *regs)
+ {
+- unsigned long flags;
+- u32 pmsr, val;
+- int ret = 0;
+-
+- spin_lock_irqsave(&pmsr_lock, flags);
+-
+- if (!pcie_base || pm_runtime_suspended(pcie_dev)) {
+- ret = 1;
+- goto unlock_exit;
+- }
+-
+- pmsr = readl(pcie_base + PMSR);
+-
+- /*
+- * Test if the PCIe controller received PM_ENTER_L1 DLLP and
+- * the PCIe controller is not in L1 link state. If true, apply
+- * fix, which will put the controller into L1 link state, from
+- * which it can return to L0s/L0 on its own.
+- */
+- if ((pmsr & PMEL1RX) && ((pmsr & PMSTATE) != PMSTATE_L1)) {
+- writel(L1IATN, pcie_base + PMCTLR);
+- ret = readl_poll_timeout_atomic(pcie_base + PMSR, val,
+- val & L1FAEG, 10, 1000);
+- WARN(ret, "Timeout waiting for L1 link state, ret=%d\n", ret);
+- writel(L1FAEG | PMEL1RX, pcie_base + PMSR);
+- }
+-
+-unlock_exit:
+- spin_unlock_irqrestore(&pmsr_lock, flags);
+- return ret;
++ return !!rcar_pcie_wakeup(pcie_dev, pcie_base);
+ }
+
+ static const struct of_device_id rcar_pcie_abort_handler_of_match[] __initconst = {
--- /dev/null
+From stable+bounces-188378-greg=kroah.com@vger.kernel.org Tue Oct 21 18:44:29 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 21 Oct 2025 12:43:52 -0400
+Subject: PCI: rcar-host: Convert struct rcar_msi mask_lock into raw spinlock
+To: stable@vger.kernel.org
+Cc: Marek Vasut <marek.vasut+renesas@mailbox.org>, Duy Nguyen <duy.nguyen.rh@renesas.com>, Thuan Nguyen <thuan.nguyen-hong@banvien.com.vn>, Manivannan Sadhasivam <mani@kernel.org>, Bjorn Helgaas <bhelgaas@google.com>, Geert Uytterhoeven <geert+renesas@glider.be>, Marc Zyngier <maz@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251021164352.2381251-1-sashal@kernel.org>
+
+From: Marek Vasut <marek.vasut+renesas@mailbox.org>
+
+[ Upstream commit 5ed35b4d490d8735021cce9b715b62a418310864 ]
+
+The rcar_msi_irq_unmask() function may be called from a PCI driver
+request_threaded_irq() function. This triggers kernel/irq/manage.c
+__setup_irq() which locks raw spinlock &desc->lock descriptor lock
+and with that descriptor lock held, calls rcar_msi_irq_unmask().
+
+Since the &desc->lock descriptor lock is a raw spinlock, and the rcar_msi
+.mask_lock is not a raw spinlock, this setup triggers 'BUG: Invalid wait
+context' with CONFIG_PROVE_RAW_LOCK_NESTING=y.
+
+Use scoped_guard() to simplify the locking.
+
+Fixes: 83ed8d4fa656 ("PCI: rcar: Convert to MSI domains")
+Reported-by: Duy Nguyen <duy.nguyen.rh@renesas.com>
+Reported-by: Thuan Nguyen <thuan.nguyen-hong@banvien.com.vn>
+Signed-off-by: Marek Vasut <marek.vasut+renesas@mailbox.org>
+Signed-off-by: Manivannan Sadhasivam <mani@kernel.org>
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Reviewed-by: Geert Uytterhoeven <geert+renesas@glider.be>
+Acked-by: Marc Zyngier <maz@kernel.org>
+Cc: stable@vger.kernel.org
+Link: https://patch.msgid.link/20250909162707.13927-2-marek.vasut+renesas@mailbox.org
+[ replaced scoped_guard() with explicit raw_spin_lock_irqsave()/raw_spin_unlock_irqrestore() calls ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/pci/controller/pcie-rcar-host.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+--- a/drivers/pci/controller/pcie-rcar-host.c
++++ b/drivers/pci/controller/pcie-rcar-host.c
+@@ -38,7 +38,7 @@ struct rcar_msi {
+ DECLARE_BITMAP(used, INT_PCI_MSI_NR);
+ struct irq_domain *domain;
+ struct mutex map_lock;
+- spinlock_t mask_lock;
++ raw_spinlock_t mask_lock;
+ int irq1;
+ int irq2;
+ };
+@@ -559,11 +559,11 @@ static void rcar_msi_irq_mask(struct irq
+ unsigned long flags;
+ u32 value;
+
+- spin_lock_irqsave(&msi->mask_lock, flags);
++ raw_spin_lock_irqsave(&msi->mask_lock, flags);
+ value = rcar_pci_read_reg(pcie, PCIEMSIIER);
+ value &= ~BIT(d->hwirq);
+ rcar_pci_write_reg(pcie, value, PCIEMSIIER);
+- spin_unlock_irqrestore(&msi->mask_lock, flags);
++ raw_spin_unlock_irqrestore(&msi->mask_lock, flags);
+ }
+
+ static void rcar_msi_irq_unmask(struct irq_data *d)
+@@ -573,11 +573,11 @@ static void rcar_msi_irq_unmask(struct i
+ unsigned long flags;
+ u32 value;
+
+- spin_lock_irqsave(&msi->mask_lock, flags);
++ raw_spin_lock_irqsave(&msi->mask_lock, flags);
+ value = rcar_pci_read_reg(pcie, PCIEMSIIER);
+ value |= BIT(d->hwirq);
+ rcar_pci_write_reg(pcie, value, PCIEMSIIER);
+- spin_unlock_irqrestore(&msi->mask_lock, flags);
++ raw_spin_unlock_irqrestore(&msi->mask_lock, flags);
+ }
+
+ static int rcar_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force)
+@@ -693,7 +693,7 @@ static int rcar_pcie_enable_msi(struct r
+ int err;
+
+ mutex_init(&msi->map_lock);
+- spin_lock_init(&msi->mask_lock);
++ raw_spin_lock_init(&msi->mask_lock);
+
+ err = of_address_to_resource(dev->of_node, 0, &res);
+ if (err)
--- /dev/null
+From stable+bounces-188411-greg=kroah.com@vger.kernel.org Tue Oct 21 20:46:39 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 21 Oct 2025 14:46:28 -0400
+Subject: PCI: rcar-host: Drop PMSR spinlock
+To: stable@vger.kernel.org
+Cc: Marek Vasut <marek.vasut+renesas@mailbox.org>, Duy Nguyen <duy.nguyen.rh@renesas.com>, Thuan Nguyen <thuan.nguyen-hong@banvien.com.vn>, Manivannan Sadhasivam <mani@kernel.org>, Geert Uytterhoeven <geert+renesas@glider.be>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251021184628.2530506-2-sashal@kernel.org>
+
+From: Marek Vasut <marek.vasut+renesas@mailbox.org>
+
+[ Upstream commit 0a8f173d9dad13930d5888505dc4c4fd6a1d4262 ]
+
+The pmsr_lock spinlock used to be necessary to synchronize access to the
+PMSR register, because that access could have been triggered from either
+config space access in rcar_pcie_config_access() or an exception handler
+rcar_pcie_aarch32_abort_handler().
+
+The rcar_pcie_aarch32_abort_handler() case is no longer applicable since
+commit 6e36203bc14c ("PCI: rcar: Use PCI_SET_ERROR_RESPONSE after read
+which triggered an exception"), which performs more accurate, controlled
+invocation of the exception, and a fixup.
+
+This leaves rcar_pcie_config_access() as the only call site from which
+rcar_pcie_wakeup() is called. The rcar_pcie_config_access() can only be
+called from the controller struct pci_ops .read and .write callbacks,
+and those are serialized in drivers/pci/access.c using raw spinlock
+'pci_lock' . It should be noted that CONFIG_PCI_LOCKLESS_CONFIG is never
+set on this platform.
+
+Since the 'pci_lock' is a raw spinlock , and the 'pmsr_lock' is not a
+raw spinlock, this constellation triggers 'BUG: Invalid wait context'
+with CONFIG_PROVE_RAW_LOCK_NESTING=y .
+
+Remove the pmsr_lock to fix the locking.
+
+Fixes: a115b1bd3af0 ("PCI: rcar: Add L1 link state fix into data abort hook")
+Reported-by: Duy Nguyen <duy.nguyen.rh@renesas.com>
+Reported-by: Thuan Nguyen <thuan.nguyen-hong@banvien.com.vn>
+Signed-off-by: Marek Vasut <marek.vasut+renesas@mailbox.org>
+Signed-off-by: Manivannan Sadhasivam <mani@kernel.org>
+Reviewed-by: Geert Uytterhoeven <geert+renesas@glider.be>
+Cc: stable@vger.kernel.org
+Link: https://patch.msgid.link/20250909162707.13927-1-marek.vasut+renesas@mailbox.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/pci/controller/pcie-rcar-host.c | 13 ++-----------
+ 1 file changed, 2 insertions(+), 11 deletions(-)
+
+--- a/drivers/pci/controller/pcie-rcar-host.c
++++ b/drivers/pci/controller/pcie-rcar-host.c
+@@ -67,20 +67,13 @@ struct rcar_pcie_host {
+ int (*phy_init_fn)(struct rcar_pcie_host *host);
+ };
+
+-static DEFINE_SPINLOCK(pmsr_lock);
+-
+ static int rcar_pcie_wakeup(struct device *pcie_dev, void __iomem *pcie_base)
+ {
+- unsigned long flags;
+ u32 pmsr, val;
+ int ret = 0;
+
+- spin_lock_irqsave(&pmsr_lock, flags);
+-
+- if (!pcie_base || pm_runtime_suspended(pcie_dev)) {
+- ret = -EINVAL;
+- goto unlock_exit;
+- }
++ if (!pcie_base || pm_runtime_suspended(pcie_dev))
++ return -EINVAL;
+
+ pmsr = readl(pcie_base + PMSR);
+
+@@ -98,8 +91,6 @@ static int rcar_pcie_wakeup(struct devic
+ writel(L1FAEG | PMEL1RX, pcie_base + PMSR);
+ }
+
+-unlock_exit:
+- spin_unlock_irqrestore(&pmsr_lock, flags);
+ return ret;
+ }
+
--- /dev/null
+From stable+bounces-188189-greg=kroah.com@vger.kernel.org Mon Oct 20 18:22:48 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Oct 2025 12:21:28 -0400
+Subject: PCI: tegra194: Handle errors in BPMP response
+To: stable@vger.kernel.org
+Cc: Vidya Sagar <vidyas@nvidia.com>, Niklas Cassel <cassel@kernel.org>, Manivannan Sadhasivam <mani@kernel.org>, Bjorn Helgaas <bhelgaas@google.com>, Jon Hunter <jonathanh@nvidia.com>, Thierry Reding <treding@nvidia.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251020162128.1836626-1-sashal@kernel.org>
+
+From: Vidya Sagar <vidyas@nvidia.com>
+
+[ Upstream commit f8c9ad46b00453a8c075453f3745f8d263f44834 ]
+
+The return value from tegra_bpmp_transfer() indicates the success or
+failure of the IPC transaction with BPMP. If the transaction succeeded, we
+also need to check the actual command's result code.
+
+If we don't have error handling for tegra_bpmp_transfer(), we will set the
+pcie->ep_state to EP_STATE_ENABLED even when the tegra_bpmp_transfer()
+command fails. Thus, the pcie->ep_state will get out of sync with reality,
+and any further PERST# assert + deassert will be a no-op and will not
+trigger the hardware initialization sequence.
+
+This is because pex_ep_event_pex_rst_deassert() checks the current
+pcie->ep_state, and does nothing if the current state is already
+EP_STATE_ENABLED.
+
+Thus, it is important to have error handling for tegra_bpmp_transfer(),
+such that the pcie->ep_state can not get out of sync with reality, so that
+we will try to initialize the hardware not only during the first PERST#
+assert + deassert, but also during any succeeding PERST# assert + deassert.
+
+One example where this fix is needed is when using a rock5b as host.
+During the initial PERST# assert + deassert (triggered by the bootloader on
+the rock5b) pex_ep_event_pex_rst_deassert() will get called, but for some
+unknown reason, the tegra_bpmp_transfer() call to initialize the PHY fails.
+Once Linux has been loaded on the rock5b, the PCIe driver will once again
+assert + deassert PERST#. However, without tegra_bpmp_transfer() error
+handling, this second PERST# assert + deassert will not trigger the
+hardware initialization sequence.
+
+With tegra_bpmp_transfer() error handling, the second PERST# assert +
+deassert will once again trigger the hardware to be initialized and this
+time the tegra_bpmp_transfer() succeeds.
+
+Fixes: c57247f940e8 ("PCI: tegra: Add support for PCIe endpoint mode in Tegra194")
+Signed-off-by: Vidya Sagar <vidyas@nvidia.com>
+[cassel: improve commit log]
+Signed-off-by: Niklas Cassel <cassel@kernel.org>
+Signed-off-by: Manivannan Sadhasivam <mani@kernel.org>
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Reviewed-by: Jon Hunter <jonathanh@nvidia.com>
+Acked-by: Thierry Reding <treding@nvidia.com>
+Cc: stable@vger.kernel.org
+Link: https://patch.msgid.link/20250922140822.519796-8-cassel@kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/pci/controller/dwc/pcie-tegra194.c | 18 ++++++++++++++++--
+ 1 file changed, 16 insertions(+), 2 deletions(-)
+
+--- a/drivers/pci/controller/dwc/pcie-tegra194.c
++++ b/drivers/pci/controller/dwc/pcie-tegra194.c
+@@ -1168,6 +1168,7 @@ static int tegra_pcie_bpmp_set_ctrl_stat
+ struct mrq_uphy_response resp;
+ struct tegra_bpmp_message msg;
+ struct mrq_uphy_request req;
++ int err;
+
+ /* Controller-5 doesn't need to have its state set by BPMP-FW */
+ if (pcie->cid == 5)
+@@ -1187,7 +1188,13 @@ static int tegra_pcie_bpmp_set_ctrl_stat
+ msg.rx.data = &resp;
+ msg.rx.size = sizeof(resp);
+
+- return tegra_bpmp_transfer(pcie->bpmp, &msg);
++ err = tegra_bpmp_transfer(pcie->bpmp, &msg);
++ if (err)
++ return err;
++ if (msg.rx.ret)
++ return -EINVAL;
++
++ return 0;
+ }
+
+ static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie,
+@@ -1196,6 +1203,7 @@ static int tegra_pcie_bpmp_set_pll_state
+ struct mrq_uphy_response resp;
+ struct tegra_bpmp_message msg;
+ struct mrq_uphy_request req;
++ int err;
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+@@ -1215,7 +1223,13 @@ static int tegra_pcie_bpmp_set_pll_state
+ msg.rx.data = &resp;
+ msg.rx.size = sizeof(resp);
+
+- return tegra_bpmp_transfer(pcie->bpmp, &msg);
++ err = tegra_bpmp_transfer(pcie->bpmp, &msg);
++ if (err)
++ return err;
++ if (msg.rx.ret)
++ return -EINVAL;
++
++ return 0;
+ }
+
+ static void tegra_pcie_downstream_dev_to_D0(struct tegra_pcie_dw *pcie)
--- /dev/null
+From stable+bounces-188421-greg=kroah.com@vger.kernel.org Tue Oct 21 21:43:40 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 21 Oct 2025 15:43:23 -0400
+Subject: PCI: tegra194: Reset BARs when running in PCIe endpoint mode
+To: stable@vger.kernel.org
+Cc: Niklas Cassel <cassel@kernel.org>, Manivannan Sadhasivam <mani@kernel.org>, Bjorn Helgaas <bhelgaas@google.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251021194323.2906923-1-sashal@kernel.org>
+
+From: Niklas Cassel <cassel@kernel.org>
+
+[ Upstream commit 42f9c66a6d0cc45758dab77233c5460e1cf003df ]
+
+Tegra already defines all BARs except BAR0 as BAR_RESERVED. This is
+sufficient for pci-epf-test to not allocate backing memory and to not call
+set_bar() for those BARs. However, marking a BAR as BAR_RESERVED does not
+mean that the BAR gets disabled.
+
+The host side driver, pci_endpoint_test, simply does an ioremap for all
+enabled BARs and will run tests against all enabled BARs, so it will run
+tests against the BARs marked as BAR_RESERVED.
+
+After running the BAR tests (which will write to all enabled BARs), the
+inbound address translation is broken. This is because the tegra controller
+exposes the ATU Port Logic Structure in BAR4, so when BAR4 is written, the
+inbound address translation settings get overwritten.
+
+To avoid this, implement the dw_pcie_ep_ops .init() callback and start off
+by disabling all BARs (pci-epf-test will later enable/configure BARs that
+are not defined as BAR_RESERVED).
+
+This matches the behavior of other PCIe endpoint drivers: dra7xx, imx6,
+layerscape-ep, artpec6, dw-rockchip, qcom-ep, rcar-gen4, and uniphier-ep.
+
+With this, the PCI endpoint kselftest test case CONSECUTIVE_BAR_TEST (which
+was specifically made to detect address translation issues) passes.
+
+Fixes: c57247f940e8 ("PCI: tegra: Add support for PCIe endpoint mode in Tegra194")
+Signed-off-by: Niklas Cassel <cassel@kernel.org>
+Signed-off-by: Manivannan Sadhasivam <mani@kernel.org>
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Cc: stable@vger.kernel.org
+Link: https://patch.msgid.link/20250922140822.519796-7-cassel@kernel.org
+[ changed dw_pcie_ep_ops .init to .ep_init and exported dw_pcie_ep_reset_bar ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/pci/controller/dwc/pcie-designware-ep.c | 1 +
+ drivers/pci/controller/dwc/pcie-tegra194.c | 10 ++++++++++
+ 2 files changed, 11 insertions(+)
+
+--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
++++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
+@@ -84,6 +84,7 @@ void dw_pcie_ep_reset_bar(struct dw_pcie
+ for (func_no = 0; func_no < funcs; func_no++)
+ __dw_pcie_ep_reset_bar(pci, func_no, bar, 0);
+ }
++EXPORT_SYMBOL_GPL(dw_pcie_ep_reset_bar);
+
+ static u8 __dw_pcie_ep_find_next_cap(struct dw_pcie_ep *ep, u8 func_no,
+ u8 cap_ptr, u8 cap)
+--- a/drivers/pci/controller/dwc/pcie-tegra194.c
++++ b/drivers/pci/controller/dwc/pcie-tegra194.c
+@@ -1839,6 +1839,15 @@ static irqreturn_t tegra_pcie_ep_pex_rst
+ return IRQ_HANDLED;
+ }
+
++static void tegra_pcie_ep_init(struct dw_pcie_ep *ep)
++{
++ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
++ enum pci_barno bar;
++
++ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
++ dw_pcie_ep_reset_bar(pci, bar);
++};
++
+ static int tegra_pcie_ep_raise_legacy_irq(struct tegra_pcie_dw *pcie, u16 irq)
+ {
+ /* Tegra194 supports only INTA */
+@@ -1912,6 +1921,7 @@ tegra_pcie_ep_get_features(struct dw_pci
+ }
+
+ static const struct dw_pcie_ep_ops pcie_ep_ops = {
++ .ep_init = tegra_pcie_ep_init,
+ .raise_irq = tegra_pcie_ep_raise_irq,
+ .get_features = tegra_pcie_ep_get_features,
+ };
--- /dev/null
+From stable+bounces-188100-greg=kroah.com@vger.kernel.org Mon Oct 20 15:13:27 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Oct 2025 09:04:35 -0400
+Subject: PM: runtime: Add new devm functions
+To: stable@vger.kernel.org
+Cc: "Bence Csókás" <csokas.bence@prolan.hu>, "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>, "Sasha Levin" <sashal@kernel.org>
+Message-ID: <20251020130436.1764668-1-sashal@kernel.org>
+
+From: Bence Csókás <csokas.bence@prolan.hu>
+
+[ Upstream commit 73db799bf5efc5a04654bb3ff6c9bf63a0dfa473 ]
+
+Add `devm_pm_runtime_set_active_enabled()` and
+`devm_pm_runtime_get_noresume()` for simplifying
+common cases in drivers.
+
+Signed-off-by: Bence Csókás <csokas.bence@prolan.hu>
+Link: https://patch.msgid.link/20250327195928.680771-3-csokas.bence@prolan.hu
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Stable-dep-of: 0792c1984a45 ("iio: imu: inv_icm42600: Simplify pm_runtime setup")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/base/power/runtime.c | 44 +++++++++++++++++++++++++++++++++++++++++++
+ include/linux/pm_runtime.h | 4 +++
+ 2 files changed, 48 insertions(+)
+
+--- a/drivers/base/power/runtime.c
++++ b/drivers/base/power/runtime.c
+@@ -1477,6 +1477,32 @@ void pm_runtime_enable(struct device *de
+ }
+ EXPORT_SYMBOL_GPL(pm_runtime_enable);
+
++static void pm_runtime_set_suspended_action(void *data)
++{
++ pm_runtime_set_suspended(data);
++}
++
++/**
++ * devm_pm_runtime_set_active_enabled - set_active version of devm_pm_runtime_enable.
++ *
++ * @dev: Device to handle.
++ */
++int devm_pm_runtime_set_active_enabled(struct device *dev)
++{
++ int err;
++
++ err = pm_runtime_set_active(dev);
++ if (err)
++ return err;
++
++ err = devm_add_action_or_reset(dev, pm_runtime_set_suspended_action, dev);
++ if (err)
++ return err;
++
++ return devm_pm_runtime_enable(dev);
++}
++EXPORT_SYMBOL_GPL(devm_pm_runtime_set_active_enabled);
++
+ static void pm_runtime_disable_action(void *data)
+ {
+ pm_runtime_dont_use_autosuspend(data);
+@@ -1499,6 +1525,24 @@ int devm_pm_runtime_enable(struct device
+ }
+ EXPORT_SYMBOL_GPL(devm_pm_runtime_enable);
+
++static void pm_runtime_put_noidle_action(void *data)
++{
++ pm_runtime_put_noidle(data);
++}
++
++/**
++ * devm_pm_runtime_get_noresume - devres-enabled version of pm_runtime_get_noresume.
++ *
++ * @dev: Device to handle.
++ */
++int devm_pm_runtime_get_noresume(struct device *dev)
++{
++ pm_runtime_get_noresume(dev);
++
++ return devm_add_action_or_reset(dev, pm_runtime_put_noidle_action, dev);
++}
++EXPORT_SYMBOL_GPL(devm_pm_runtime_get_noresume);
++
+ /**
+ * pm_runtime_forbid - Block runtime PM of a device.
+ * @dev: Device to handle.
+--- a/include/linux/pm_runtime.h
++++ b/include/linux/pm_runtime.h
+@@ -75,7 +75,9 @@ extern void pm_runtime_new_link(struct d
+ extern void pm_runtime_drop_link(struct device_link *link);
+ extern void pm_runtime_release_supplier(struct device_link *link);
+
++int devm_pm_runtime_set_active_enabled(struct device *dev);
+ extern int devm_pm_runtime_enable(struct device *dev);
++int devm_pm_runtime_get_noresume(struct device *dev);
+
+ /**
+ * pm_runtime_get_if_in_use - Conditionally bump up runtime PM usage counter.
+@@ -272,7 +274,9 @@ static inline void __pm_runtime_disable(
+ static inline void pm_runtime_allow(struct device *dev) {}
+ static inline void pm_runtime_forbid(struct device *dev) {}
+
++static inline int devm_pm_runtime_set_active_enabled(struct device *dev) { return 0; }
+ static inline int devm_pm_runtime_enable(struct device *dev) { return 0; }
++static inline int devm_pm_runtime_get_noresume(struct device *dev) { return 0; }
+
+ static inline void pm_suspend_ignore_children(struct device *dev, bool enable) {}
+ static inline void pm_runtime_get_noresume(struct device *dev) {}
--- /dev/null
+From 9daa5a8795865f9a3c93d8d1066785b07ded6073 Mon Sep 17 00:00:00 2001
+From: Vineeth Vijayan <vneethv@linux.ibm.com>
+Date: Wed, 1 Oct 2025 15:38:17 +0200
+Subject: s390/cio: Update purge function to unregister the unused subchannels
+
+From: Vineeth Vijayan <vneethv@linux.ibm.com>
+
+commit 9daa5a8795865f9a3c93d8d1066785b07ded6073 upstream.
+
+Starting with 'commit 2297791c92d0 ("s390/cio: dont unregister
+subchannel from child-drivers")', cio no longer unregisters
+subchannels when the attached device is invalid or unavailable.
+
+As an unintended side-effect, the cio_ignore purge function no longer
+removes subchannels for devices on the cio_ignore list if no CCW device
+is attached. This situation occurs when a CCW device is non-operational
+or unavailable
+
+To ensure the same outcome of the purge function as when the
+current cio_ignore list had been active during boot, update the purge
+function to remove I/O subchannels without working CCW devices if the
+associated device number is found on the cio_ignore list.
+
+Fixes: 2297791c92d0 ("s390/cio: dont unregister subchannel from child-drivers")
+Suggested-by: Peter Oberparleiter <oberpar@linux.ibm.com>
+Reviewed-by: Peter Oberparleiter <oberpar@linux.ibm.com>
+Signed-off-by: Vineeth Vijayan <vneethv@linux.ibm.com>
+Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/s390/cio/device.c | 39 +++++++++++++++++++++++++--------------
+ 1 file changed, 25 insertions(+), 14 deletions(-)
+
+--- a/drivers/s390/cio/device.c
++++ b/drivers/s390/cio/device.c
+@@ -1323,23 +1323,34 @@ void ccw_device_schedule_recovery(void)
+ spin_unlock_irqrestore(&recovery_lock, flags);
+ }
+
+-static int purge_fn(struct device *dev, void *data)
++static int purge_fn(struct subchannel *sch, void *data)
+ {
+- struct ccw_device *cdev = to_ccwdev(dev);
+- struct ccw_dev_id *id = &cdev->private->dev_id;
+- struct subchannel *sch = to_subchannel(cdev->dev.parent);
+-
+- spin_lock_irq(cdev->ccwlock);
+- if (is_blacklisted(id->ssid, id->devno) &&
+- (cdev->private->state == DEV_STATE_OFFLINE) &&
+- (atomic_cmpxchg(&cdev->private->onoff, 0, 1) == 0)) {
+- CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", id->ssid,
+- id->devno);
++ struct ccw_device *cdev;
++
++ spin_lock_irq(sch->lock);
++ if (sch->st != SUBCHANNEL_TYPE_IO || !sch->schib.pmcw.dnv)
++ goto unlock;
++
++ if (!is_blacklisted(sch->schid.ssid, sch->schib.pmcw.dev))
++ goto unlock;
++
++ cdev = sch_get_cdev(sch);
++ if (cdev) {
++ if (cdev->private->state != DEV_STATE_OFFLINE)
++ goto unlock;
++
++ if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
++ goto unlock;
+ ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
+- css_sched_sch_todo(sch, SCH_TODO_UNREG);
+ atomic_set(&cdev->private->onoff, 0);
+ }
+- spin_unlock_irq(cdev->ccwlock);
++
++ css_sched_sch_todo(sch, SCH_TODO_UNREG);
++ CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x%s\n", sch->schid.ssid,
++ sch->schib.pmcw.dev, cdev ? "" : " (no cdev)");
++
++unlock:
++ spin_unlock_irq(sch->lock);
+ /* Abort loop in case of pending signal. */
+ if (signal_pending(current))
+ return -EINTR;
+@@ -1355,7 +1366,7 @@ static int purge_fn(struct device *dev,
+ int ccw_purge_blacklisted(void)
+ {
+ CIO_MSG_EVENT(2, "ccw: purging blacklisted devices\n");
+- bus_for_each_dev(&ccw_bus_type, NULL, NULL, purge_fn);
++ for_each_subchannel_staged(purge_fn, NULL, NULL);
+ return 0;
+ }
+
most-usb-fix-use-after-free-in-hdm_disconnect.patch
most-usb-hdm_probe-fix-calling-put_device-before-device-initialization.patch
serial-8250_exar-add-support-for-advantech-2-port-card-with-device-id-0x0018.patch
+arm64-cputype-add-neoverse-v3ae-definitions.patch
+arm64-errata-apply-workarounds-for-neoverse-v3ae.patch
+s390-cio-update-purge-function-to-unregister-the-unused-subchannels.patch
+xfs-rename-the-old_crc-variable-in-xlog_recover_process.patch
+xfs-fix-log-crc-mismatches-between-i386-and-other-architectures.patch
+nfsd-rework-encoding-and-decoding-of-nfsd4_deviceid.patch
+nfsd-minor-cleanup-in-layoutcommit-processing.patch
+nfsd-fix-last-write-offset-handling-in-layoutcommit.patch
+iio-imu-inv_icm42600-use-instead-of-memset.patch
+iio-imu-inv_icm42600-avoid-configuring-if-already-pm_runtime-suspended.patch
+pm-runtime-add-new-devm-functions.patch
+iio-imu-inv_icm42600-simplify-pm_runtime-setup.patch
+padata-reset-next-cpu-when-reorder-sequence-wraps-around.patch
+fuse-allocate-ff-release_args-only-if-release-is-needed.patch
+fuse-fix-livelock-in-synchronous-file-put-from-fuseblk-workers.patch
+pci-j721e-enable-acspcie-refclk-if-ti-syscon-acspcie-proxy-ctrl-exists.patch
+pci-j721e-fix-programming-sequence-of-strap-settings.patch
+wifi-ath11k-hal-srng-don-t-deinitialize-and-re-initialize-again.patch
+pci-rcar-host-convert-struct-rcar_msi-mask_lock-into-raw-spinlock.patch
+drm-amdgpu-use-atomic-functions-with-memory-barriers-for-vm-fault-info.patch
+vfs-don-t-leak-disconnected-dentries-on-umount.patch
+nfsd-define-a-proc_layoutcommit-for-the-flexfiles-layout-type.patch
+f2fs-fix-wrong-block-mapping-for-multi-devices.patch
+pci-tegra194-handle-errors-in-bpmp-response.patch
+pci-rcar-finish-transition-to-l1-state-in-rcar_pcie_config_access.patch
+pci-rcar-host-drop-pmsr-spinlock.patch
+pci-tegra194-reset-bars-when-running-in-pcie-endpoint-mode.patch
+devcoredump-fix-circular-locking-dependency-with-devcd-mutex.patch
+xfs-always-warn-about-deprecated-mount-options.patch
+arch_topology-fix-incorrect-error-check-in-topology_parse_cpu_capacity.patch
+usb-gadget-store-endpoint-pointer-in-usb_request.patch
+usb-gadget-introduce-free_usb_request-helper.patch
+usb-gadget-f_ncm-refactor-bind-path-to-use-__free.patch
+usb-gadget-f_acm-refactor-bind-path-to-use-__free.patch
--- /dev/null
+From stable+bounces-188185-greg=kroah.com@vger.kernel.org Mon Oct 20 18:19:51 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Oct 2025 12:19:42 -0400
+Subject: usb: gadget: f_acm: Refactor bind path to use __free()
+To: stable@vger.kernel.org
+Cc: Kuen-Han Tsai <khtsai@google.com>, stable@kernel.org, Greg Kroah-Hartman <gregkh@linuxfoundation.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251020161942.1835894-3-sashal@kernel.org>
+
+From: Kuen-Han Tsai <khtsai@google.com>
+
+[ Upstream commit 47b2116e54b4a854600341487e8b55249e926324 ]
+
+After an bind/unbind cycle, the acm->notify_req is left stale. If a
+subsequent bind fails, the unified error label attempts to free this
+stale request, leading to a NULL pointer dereference when accessing
+ep->ops->free_request.
+
+Refactor the error handling in the bind path to use the __free()
+automatic cleanup mechanism.
+
+Unable to handle kernel NULL pointer dereference at virtual address 0000000000000020
+Call trace:
+ usb_ep_free_request+0x2c/0xec
+ gs_free_req+0x30/0x44
+ acm_bind+0x1b8/0x1f4
+ usb_add_function+0xcc/0x1f0
+ configfs_composite_bind+0x468/0x588
+ gadget_bind_driver+0x104/0x270
+ really_probe+0x190/0x374
+ __driver_probe_device+0xa0/0x12c
+ driver_probe_device+0x3c/0x218
+ __device_attach_driver+0x14c/0x188
+ bus_for_each_drv+0x10c/0x168
+ __device_attach+0xfc/0x198
+ device_initial_probe+0x14/0x24
+ bus_probe_device+0x94/0x11c
+ device_add+0x268/0x48c
+ usb_add_gadget+0x198/0x28c
+ dwc3_gadget_init+0x700/0x858
+ __dwc3_set_mode+0x3cc/0x664
+ process_scheduled_works+0x1d8/0x488
+ worker_thread+0x244/0x334
+ kthread+0x114/0x1bc
+ ret_from_fork+0x10/0x20
+
+Fixes: 1f1ba11b6494 ("usb gadget: issue notifications from ACM function")
+Cc: stable@kernel.org
+Signed-off-by: Kuen-Han Tsai <khtsai@google.com>
+Link: https://lore.kernel.org/r/20250916-ready-v1-4-4997bf277548@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Link: https://lore.kernel.org/r/20250916-ready-v1-4-4997bf277548@google.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/gadget/function/f_acm.c | 42 ++++++++++++++++--------------------
+ 1 file changed, 19 insertions(+), 23 deletions(-)
+
+--- a/drivers/usb/gadget/function/f_acm.c
++++ b/drivers/usb/gadget/function/f_acm.c
+@@ -11,12 +11,15 @@
+
+ /* #define VERBOSE_DEBUG */
+
++#include <linux/cleanup.h>
+ #include <linux/slab.h>
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/device.h>
+ #include <linux/err.h>
+
++#include <linux/usb/gadget.h>
++
+ #include "u_serial.h"
+
+
+@@ -612,6 +615,7 @@ acm_bind(struct usb_configuration *c, st
+ struct usb_string *us;
+ int status;
+ struct usb_ep *ep;
++ struct usb_request *request __free(free_usb_request) = NULL;
+
+ /* REVISIT might want instance-specific strings to help
+ * distinguish instances ...
+@@ -629,7 +633,7 @@ acm_bind(struct usb_configuration *c, st
+ /* allocate instance-specific interface IDs, and patch descriptors */
+ status = usb_interface_id(c, f);
+ if (status < 0)
+- goto fail;
++ return status;
+ acm->ctrl_id = status;
+ acm_iad_descriptor.bFirstInterface = status;
+
+@@ -638,40 +642,38 @@ acm_bind(struct usb_configuration *c, st
+
+ status = usb_interface_id(c, f);
+ if (status < 0)
+- goto fail;
++ return status;
+ acm->data_id = status;
+
+ acm_data_interface_desc.bInterfaceNumber = status;
+ acm_union_desc.bSlaveInterface0 = status;
+ acm_call_mgmt_descriptor.bDataInterface = status;
+
+- status = -ENODEV;
+-
+ /* allocate instance-specific endpoints */
+ ep = usb_ep_autoconfig(cdev->gadget, &acm_fs_in_desc);
+ if (!ep)
+- goto fail;
++ return -ENODEV;
+ acm->port.in = ep;
+
+ ep = usb_ep_autoconfig(cdev->gadget, &acm_fs_out_desc);
+ if (!ep)
+- goto fail;
++ return -ENODEV;
+ acm->port.out = ep;
+
+ ep = usb_ep_autoconfig(cdev->gadget, &acm_fs_notify_desc);
+ if (!ep)
+- goto fail;
++ return -ENODEV;
+ acm->notify = ep;
+
+ /* allocate notification */
+- acm->notify_req = gs_alloc_req(ep,
+- sizeof(struct usb_cdc_notification) + 2,
+- GFP_KERNEL);
+- if (!acm->notify_req)
+- goto fail;
++ request = gs_alloc_req(ep,
++ sizeof(struct usb_cdc_notification) + 2,
++ GFP_KERNEL);
++ if (!request)
++ return -ENODEV;
+
+- acm->notify_req->complete = acm_cdc_notify_complete;
+- acm->notify_req->context = acm;
++ request->complete = acm_cdc_notify_complete;
++ request->context = acm;
+
+ /* support all relevant hardware speeds... we expect that when
+ * hardware is dual speed, all bulk-capable endpoints work at
+@@ -688,7 +690,9 @@ acm_bind(struct usb_configuration *c, st
+ status = usb_assign_descriptors(f, acm_fs_function, acm_hs_function,
+ acm_ss_function, acm_ss_function);
+ if (status)
+- goto fail;
++ return status;
++
++ acm->notify_req = no_free_ptr(request);
+
+ dev_dbg(&cdev->gadget->dev,
+ "acm ttyGS%d: %s speed IN/%s OUT/%s NOTIFY/%s\n",
+@@ -698,14 +702,6 @@ acm_bind(struct usb_configuration *c, st
+ acm->port.in->name, acm->port.out->name,
+ acm->notify->name);
+ return 0;
+-
+-fail:
+- if (acm->notify_req)
+- gs_free_req(acm->notify, acm->notify_req);
+-
+- ERROR(cdev, "%s/%p: can't bind, err %d\n", f->name, f, status);
+-
+- return status;
+ }
+
+ static void acm_unbind(struct usb_configuration *c, struct usb_function *f)
--- /dev/null
+From stable+bounces-188188-greg=kroah.com@vger.kernel.org Mon Oct 20 18:22:36 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Oct 2025 12:20:33 -0400
+Subject: usb: gadget: f_ncm: Refactor bind path to use __free()
+To: stable@vger.kernel.org
+Cc: Kuen-Han Tsai <khtsai@google.com>, stable@kernel.org, Greg Kroah-Hartman <gregkh@linuxfoundation.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251020162033.1836288-3-sashal@kernel.org>
+
+From: Kuen-Han Tsai <khtsai@google.com>
+
+[ Upstream commit 75a5b8d4ddd4eb6b16cb0b475d14ff4ae64295ef ]
+
+After an bind/unbind cycle, the ncm->notify_req is left stale. If a
+subsequent bind fails, the unified error label attempts to free this
+stale request, leading to a NULL pointer dereference when accessing
+ep->ops->free_request.
+
+Refactor the error handling in the bind path to use the __free()
+automatic cleanup mechanism.
+
+Unable to handle kernel NULL pointer dereference at virtual address 0000000000000020
+Call trace:
+ usb_ep_free_request+0x2c/0xec
+ ncm_bind+0x39c/0x3dc
+ usb_add_function+0xcc/0x1f0
+ configfs_composite_bind+0x468/0x588
+ gadget_bind_driver+0x104/0x270
+ really_probe+0x190/0x374
+ __driver_probe_device+0xa0/0x12c
+ driver_probe_device+0x3c/0x218
+ __device_attach_driver+0x14c/0x188
+ bus_for_each_drv+0x10c/0x168
+ __device_attach+0xfc/0x198
+ device_initial_probe+0x14/0x24
+ bus_probe_device+0x94/0x11c
+ device_add+0x268/0x48c
+ usb_add_gadget+0x198/0x28c
+ dwc3_gadget_init+0x700/0x858
+ __dwc3_set_mode+0x3cc/0x664
+ process_scheduled_works+0x1d8/0x488
+ worker_thread+0x244/0x334
+ kthread+0x114/0x1bc
+ ret_from_fork+0x10/0x20
+
+Fixes: 9f6ce4240a2b ("usb: gadget: f_ncm.c added")
+Cc: stable@kernel.org
+Signed-off-by: Kuen-Han Tsai <khtsai@google.com>
+Link: https://lore.kernel.org/r/20250916-ready-v1-3-4997bf277548@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Link: https://lore.kernel.org/r/20250916-ready-v1-3-4997bf277548@google.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/gadget/function/f_ncm.c | 78 +++++++++++++++---------------------
+ 1 file changed, 33 insertions(+), 45 deletions(-)
+
+--- a/drivers/usb/gadget/function/f_ncm.c
++++ b/drivers/usb/gadget/function/f_ncm.c
+@@ -11,6 +11,7 @@
+ * Copyright (C) 2008 Nokia Corporation
+ */
+
++#include <linux/cleanup.h>
+ #include <linux/kernel.h>
+ #include <linux/interrupt.h>
+ #include <linux/module.h>
+@@ -19,6 +20,7 @@
+ #include <linux/crc32.h>
+
+ #include <linux/usb/cdc.h>
++#include <linux/usb/gadget.h>
+
+ #include "u_ether.h"
+ #include "u_ether_configfs.h"
+@@ -1441,18 +1443,18 @@ static int ncm_bind(struct usb_configura
+ struct usb_ep *ep;
+ struct f_ncm_opts *ncm_opts;
+
++ struct usb_os_desc_table *os_desc_table __free(kfree) = NULL;
++ struct usb_request *request __free(free_usb_request) = NULL;
++
+ if (!can_support_ecm(cdev->gadget))
+ return -EINVAL;
+
+ ncm_opts = container_of(f->fi, struct f_ncm_opts, func_inst);
+
+ if (cdev->use_os_string) {
+- f->os_desc_table = kzalloc(sizeof(*f->os_desc_table),
+- GFP_KERNEL);
+- if (!f->os_desc_table)
++ os_desc_table = kzalloc(sizeof(*os_desc_table), GFP_KERNEL);
++ if (!os_desc_table)
+ return -ENOMEM;
+- f->os_desc_n = 1;
+- f->os_desc_table[0].os_desc = &ncm_opts->ncm_os_desc;
+ }
+
+ mutex_lock(&ncm_opts->lock);
+@@ -1462,16 +1464,15 @@ static int ncm_bind(struct usb_configura
+ mutex_unlock(&ncm_opts->lock);
+
+ if (status)
+- goto fail;
++ return status;
+
+ ncm_opts->bound = true;
+
+ us = usb_gstrings_attach(cdev, ncm_strings,
+ ARRAY_SIZE(ncm_string_defs));
+- if (IS_ERR(us)) {
+- status = PTR_ERR(us);
+- goto fail;
+- }
++ if (IS_ERR(us))
++ return PTR_ERR(us);
++
+ ncm_control_intf.iInterface = us[STRING_CTRL_IDX].id;
+ ncm_data_nop_intf.iInterface = us[STRING_DATA_IDX].id;
+ ncm_data_intf.iInterface = us[STRING_DATA_IDX].id;
+@@ -1481,55 +1482,47 @@ static int ncm_bind(struct usb_configura
+ /* allocate instance-specific interface IDs */
+ status = usb_interface_id(c, f);
+ if (status < 0)
+- goto fail;
++ return status;
+ ncm->ctrl_id = status;
+ ncm_iad_desc.bFirstInterface = status;
+
+ ncm_control_intf.bInterfaceNumber = status;
+ ncm_union_desc.bMasterInterface0 = status;
+
+- if (cdev->use_os_string)
+- f->os_desc_table[0].if_id =
+- ncm_iad_desc.bFirstInterface;
+-
+ status = usb_interface_id(c, f);
+ if (status < 0)
+- goto fail;
++ return status;
+ ncm->data_id = status;
+
+ ncm_data_nop_intf.bInterfaceNumber = status;
+ ncm_data_intf.bInterfaceNumber = status;
+ ncm_union_desc.bSlaveInterface0 = status;
+
+- status = -ENODEV;
+-
+ /* allocate instance-specific endpoints */
+ ep = usb_ep_autoconfig(cdev->gadget, &fs_ncm_in_desc);
+ if (!ep)
+- goto fail;
++ return -ENODEV;
+ ncm->port.in_ep = ep;
+
+ ep = usb_ep_autoconfig(cdev->gadget, &fs_ncm_out_desc);
+ if (!ep)
+- goto fail;
++ return -ENODEV;
+ ncm->port.out_ep = ep;
+
+ ep = usb_ep_autoconfig(cdev->gadget, &fs_ncm_notify_desc);
+ if (!ep)
+- goto fail;
++ return -ENODEV;
+ ncm->notify = ep;
+
+- status = -ENOMEM;
+-
+ /* allocate notification request and buffer */
+- ncm->notify_req = usb_ep_alloc_request(ep, GFP_KERNEL);
+- if (!ncm->notify_req)
+- goto fail;
+- ncm->notify_req->buf = kmalloc(NCM_STATUS_BYTECOUNT, GFP_KERNEL);
+- if (!ncm->notify_req->buf)
+- goto fail;
+- ncm->notify_req->context = ncm;
+- ncm->notify_req->complete = ncm_notify_complete;
++ request = usb_ep_alloc_request(ep, GFP_KERNEL);
++ if (!request)
++ return -ENOMEM;
++ request->buf = kmalloc(NCM_STATUS_BYTECOUNT, GFP_KERNEL);
++ if (!request->buf)
++ return -ENOMEM;
++ request->context = ncm;
++ request->complete = ncm_notify_complete;
+
+ /*
+ * support all relevant hardware speeds... we expect that when
+@@ -1549,7 +1542,7 @@ static int ncm_bind(struct usb_configura
+ status = usb_assign_descriptors(f, ncm_fs_function, ncm_hs_function,
+ ncm_ss_function, ncm_ss_function);
+ if (status)
+- goto fail;
++ return status;
+
+ /*
+ * NOTE: all that is done without knowing or caring about
+@@ -1563,25 +1556,20 @@ static int ncm_bind(struct usb_configura
+ hrtimer_init(&ncm->task_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
+ ncm->task_timer.function = ncm_tx_timeout;
+
++ if (cdev->use_os_string) {
++ os_desc_table[0].os_desc = &ncm_opts->ncm_os_desc;
++ os_desc_table[0].if_id = ncm_iad_desc.bFirstInterface;
++ f->os_desc_table = no_free_ptr(os_desc_table);
++ f->os_desc_n = 1;
++ }
++ ncm->notify_req = no_free_ptr(request);
++
+ DBG(cdev, "CDC Network: %s speed IN/%s OUT/%s NOTIFY/%s\n",
+ gadget_is_superspeed(c->cdev->gadget) ? "super" :
+ gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+ ncm->port.in_ep->name, ncm->port.out_ep->name,
+ ncm->notify->name);
+ return 0;
+-
+-fail:
+- kfree(f->os_desc_table);
+- f->os_desc_n = 0;
+-
+- if (ncm->notify_req) {
+- kfree(ncm->notify_req->buf);
+- usb_ep_free_request(ncm->notify, ncm->notify_req);
+- }
+-
+- ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
+-
+- return status;
+ }
+
+ static inline struct f_ncm_opts *to_f_ncm_opts(struct config_item *item)
--- /dev/null
+From stable+bounces-188186-greg=kroah.com@vger.kernel.org Mon Oct 20 18:22:30 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Oct 2025 12:20:32 -0400
+Subject: usb: gadget: Introduce free_usb_request helper
+To: stable@vger.kernel.org
+Cc: Kuen-Han Tsai <khtsai@google.com>, Greg Kroah-Hartman <gregkh@linuxfoundation.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251020162033.1836288-2-sashal@kernel.org>
+
+From: Kuen-Han Tsai <khtsai@google.com>
+
+[ Upstream commit 201c53c687f2b55a7cc6d9f4000af4797860174b ]
+
+Introduce the free_usb_request() function that frees both the request's
+buffer and the request itself.
+
+This function serves as the cleanup callback for DEFINE_FREE() to enable
+automatic, scope-based cleanup for usb_request pointers.
+
+Signed-off-by: Kuen-Han Tsai <khtsai@google.com>
+Link: https://lore.kernel.org/r/20250916-ready-v1-2-4997bf277548@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Link: https://lore.kernel.org/r/20250916-ready-v1-2-4997bf277548@google.com
+Stable-dep-of: 75a5b8d4ddd4 ("usb: gadget: f_ncm: Refactor bind path to use __free()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/usb/gadget.h | 23 +++++++++++++++++++++++
+ 1 file changed, 23 insertions(+)
+
+--- a/include/linux/usb/gadget.h
++++ b/include/linux/usb/gadget.h
+@@ -17,6 +17,7 @@
+ #ifndef __LINUX_USB_GADGET_H
+ #define __LINUX_USB_GADGET_H
+
++#include <linux/cleanup.h>
+ #include <linux/device.h>
+ #include <linux/errno.h>
+ #include <linux/init.h>
+@@ -292,6 +293,28 @@ static inline void usb_ep_fifo_flush(str
+
+ /*-------------------------------------------------------------------------*/
+
++/**
++ * free_usb_request - frees a usb_request object and its buffer
++ * @req: the request being freed
++ *
++ * This helper function frees both the request's buffer and the request object
++ * itself by calling usb_ep_free_request(). Its signature is designed to be used
++ * with DEFINE_FREE() to enable automatic, scope-based cleanup for usb_request
++ * pointers.
++ */
++static inline void free_usb_request(struct usb_request *req)
++{
++ if (!req)
++ return;
++
++ kfree(req->buf);
++ usb_ep_free_request(req->ep, req);
++}
++
++DEFINE_FREE(free_usb_request, struct usb_request *, free_usb_request(_T))
++
++/*-------------------------------------------------------------------------*/
++
+ struct usb_dcd_config_params {
+ __u8 bU1devExitLat; /* U1 Device exit Latency */
+ #define USB_DEFAULT_U1_DEV_EXIT_LAT 0x01 /* Less then 1 microsec */
--- /dev/null
+From stable+bounces-188187-greg=kroah.com@vger.kernel.org Mon Oct 20 18:20:45 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Oct 2025 12:20:31 -0400
+Subject: usb: gadget: Store endpoint pointer in usb_request
+To: stable@vger.kernel.org
+Cc: Kuen-Han Tsai <khtsai@google.com>, Greg Kroah-Hartman <gregkh@linuxfoundation.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251020162033.1836288-1-sashal@kernel.org>
+
+From: Kuen-Han Tsai <khtsai@google.com>
+
+[ Upstream commit bfb1d99d969fe3b892db30848aeebfa19d21f57f ]
+
+Gadget function drivers often have goto-based error handling in their
+bind paths, which can be bug-prone. Refactoring these paths to use
+__free() scope-based cleanup is desirable, but currently blocked.
+
+The blocker is that usb_ep_free_request(ep, req) requires two
+parameters, while the __free() mechanism can only pass a pointer to the
+request itself.
+
+Store an endpoint pointer in the struct usb_request. The pointer is
+populated centrally in usb_ep_alloc_request() on every successful
+allocation, making the request object self-contained.
+
+Signed-off-by: Kuen-Han Tsai <khtsai@google.com>
+Link: https://lore.kernel.org/r/20250916-ready-v1-1-4997bf277548@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Link: https://lore.kernel.org/r/20250916-ready-v1-1-4997bf277548@google.com
+Stable-dep-of: 75a5b8d4ddd4 ("usb: gadget: f_ncm: Refactor bind path to use __free()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/gadget/udc/core.c | 3 +++
+ include/linux/usb/gadget.h | 2 ++
+ 2 files changed, 5 insertions(+)
+
+--- a/drivers/usb/gadget/udc/core.c
++++ b/drivers/usb/gadget/udc/core.c
+@@ -177,6 +177,9 @@ struct usb_request *usb_ep_alloc_request
+
+ req = ep->ops->alloc_request(ep, gfp_flags);
+
++ if (req)
++ req->ep = ep;
++
+ trace_usb_ep_alloc_request(ep, req, req ? 0 : -ENOMEM);
+
+ return req;
+--- a/include/linux/usb/gadget.h
++++ b/include/linux/usb/gadget.h
+@@ -33,6 +33,7 @@ struct usb_ep;
+
+ /**
+ * struct usb_request - describes one i/o request
++ * @ep: The associated endpoint set by usb_ep_alloc_request().
+ * @buf: Buffer used for data. Always provide this; some controllers
+ * only use PIO, or don't use DMA for some endpoints.
+ * @dma: DMA address corresponding to 'buf'. If you don't set this
+@@ -98,6 +99,7 @@ struct usb_ep;
+ */
+
+ struct usb_request {
++ struct usb_ep *ep;
+ void *buf;
+ unsigned length;
+ dma_addr_t dma;
--- /dev/null
+From stable+bounces-188276-greg=kroah.com@vger.kernel.org Tue Oct 21 02:42:26 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Oct 2025 20:42:20 -0400
+Subject: vfs: Don't leak disconnected dentries on umount
+To: stable@vger.kernel.org
+Cc: Jan Kara <jack@suse.cz>, syzbot+1d79ebe5383fc016cf07@syzkaller.appspotmail.com, Christian Brauner <brauner@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251021004220.1957034-1-sashal@kernel.org>
+
+From: Jan Kara <jack@suse.cz>
+
+[ Upstream commit 56094ad3eaa21e6621396cc33811d8f72847a834 ]
+
+When user calls open_by_handle_at() on some inode that is not cached, we
+will create disconnected dentry for it. If such dentry is a directory,
+exportfs_decode_fh_raw() will then try to connect this dentry to the
+dentry tree through reconnect_path(). It may happen for various reasons
+(such as corrupted fs or race with rename) that the call to
+lookup_one_unlocked() in reconnect_one() will fail to find the dentry we
+are trying to reconnect and instead create a new dentry under the
+parent. Now this dentry will not be marked as disconnected although the
+parent still may well be disconnected (at least in case this
+inconsistency happened because the fs is corrupted and .. doesn't point
+to the real parent directory). This creates inconsistency in
+disconnected flags but AFAICS it was mostly harmless. At least until
+commit f1ee616214cb ("VFS: don't keep disconnected dentries on d_anon")
+which removed adding of most disconnected dentries to sb->s_anon list.
+Thus after this commit cleanup of disconnected dentries implicitely
+relies on the fact that dput() will immediately reclaim such dentries.
+However when some leaf dentry isn't marked as disconnected, as in the
+scenario described above, the reclaim doesn't happen and the dentries
+are "leaked". Memory reclaim can eventually reclaim them but otherwise
+they stay in memory and if umount comes first, we hit infamous "Busy
+inodes after unmount" bug. Make sure all dentries created under a
+disconnected parent are marked as disconnected as well.
+
+Reported-by: syzbot+1d79ebe5383fc016cf07@syzkaller.appspotmail.com
+Fixes: f1ee616214cb ("VFS: don't keep disconnected dentries on d_anon")
+CC: stable@vger.kernel.org
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+[ relocated DCACHE_DISCONNECTED propagation from d_alloc_parallel() to d_alloc() ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/dcache.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -1836,6 +1836,8 @@ struct dentry *d_alloc(struct dentry * p
+ __dget_dlock(parent);
+ dentry->d_parent = parent;
+ list_add(&dentry->d_child, &parent->d_subdirs);
++ if (parent->d_flags & DCACHE_DISCONNECTED)
++ dentry->d_flags |= DCACHE_DISCONNECTED;
+ spin_unlock(&parent->d_lock);
+
+ return dentry;
--- /dev/null
+From stable+bounces-188181-greg=kroah.com@vger.kernel.org Mon Oct 20 18:17:59 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Oct 2025 12:17:40 -0400
+Subject: wifi: ath11k: HAL SRNG: don't deinitialize and re-initialize again
+To: stable@vger.kernel.org
+Cc: Muhammad Usama Anjum <usama.anjum@collabora.com>, Baochen Qiang <baochen.qiang@oss.qualcomm.com>, Jeff Johnson <jeff.johnson@oss.qualcomm.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251020161740.1835172-1-sashal@kernel.org>
+
+From: Muhammad Usama Anjum <usama.anjum@collabora.com>
+
+[ Upstream commit 32be3ca4cf78b309dfe7ba52fe2d7cc3c23c5634 ]
+
+Don't deinitialize and reinitialize the HAL helpers. The dma memory is
+deallocated and there is high possibility that we'll not be able to get
+the same memory allocated from dma when there is high memory pressure.
+
+Tested-on: WCN6855 hw2.0 PCI WLAN.HSP.1.1-03926.13-QCAHSPSWPL_V2_SILICONZ_CE-2.52297.6
+
+Fixes: d5c65159f289 ("ath11k: driver for Qualcomm IEEE 802.11ax devices")
+Cc: stable@vger.kernel.org
+Cc: Baochen Qiang <baochen.qiang@oss.qualcomm.com>
+Reviewed-by: Baochen Qiang <baochen.qiang@oss.qualcomm.com>
+Signed-off-by: Muhammad Usama Anjum <usama.anjum@collabora.com>
+Link: https://patch.msgid.link/20250722053121.1145001-1-usama.anjum@collabora.com
+Signed-off-by: Jeff Johnson <jeff.johnson@oss.qualcomm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireless/ath/ath11k/core.c | 6 +-----
+ drivers/net/wireless/ath/ath11k/hal.c | 16 ++++++++++++++++
+ drivers/net/wireless/ath/ath11k/hal.h | 1 +
+ 3 files changed, 18 insertions(+), 5 deletions(-)
+
+--- a/drivers/net/wireless/ath/ath11k/core.c
++++ b/drivers/net/wireless/ath/ath11k/core.c
+@@ -942,14 +942,10 @@ static int ath11k_core_reconfigure_on_cr
+ mutex_unlock(&ab->core_lock);
+
+ ath11k_dp_free(ab);
+- ath11k_hal_srng_deinit(ab);
++ ath11k_hal_srng_clear(ab);
+
+ ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1;
+
+- ret = ath11k_hal_srng_init(ab);
+- if (ret)
+- return ret;
+-
+ clear_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags);
+
+ ret = ath11k_core_qmi_firmware_ready(ab);
+--- a/drivers/net/wireless/ath/ath11k/hal.c
++++ b/drivers/net/wireless/ath/ath11k/hal.c
+@@ -1313,6 +1313,22 @@ void ath11k_hal_srng_deinit(struct ath11
+ }
+ EXPORT_SYMBOL(ath11k_hal_srng_deinit);
+
++void ath11k_hal_srng_clear(struct ath11k_base *ab)
++{
++ /* No need to memset rdp and wrp memory since each individual
++ * segment would get cleared in ath11k_hal_srng_src_hw_init()
++ * and ath11k_hal_srng_dst_hw_init().
++ */
++ memset(ab->hal.srng_list, 0,
++ sizeof(ab->hal.srng_list));
++ memset(ab->hal.shadow_reg_addr, 0,
++ sizeof(ab->hal.shadow_reg_addr));
++ ab->hal.avail_blk_resource = 0;
++ ab->hal.current_blk_index = 0;
++ ab->hal.num_shadow_reg_configured = 0;
++}
++EXPORT_SYMBOL(ath11k_hal_srng_clear);
++
+ void ath11k_hal_dump_srng_stats(struct ath11k_base *ab)
+ {
+ struct hal_srng *srng;
+--- a/drivers/net/wireless/ath/ath11k/hal.h
++++ b/drivers/net/wireless/ath/ath11k/hal.h
+@@ -952,6 +952,7 @@ int ath11k_hal_srng_setup(struct ath11k_
+ struct hal_srng_params *params);
+ int ath11k_hal_srng_init(struct ath11k_base *ath11k);
+ void ath11k_hal_srng_deinit(struct ath11k_base *ath11k);
++void ath11k_hal_srng_clear(struct ath11k_base *ab);
+ void ath11k_hal_dump_srng_stats(struct ath11k_base *ab);
+ void ath11k_hal_srng_get_shadow_config(struct ath11k_base *ab,
+ u32 **cfg, u32 *len);
--- /dev/null
+From stable+bounces-189896-greg=kroah.com@vger.kernel.org Mon Oct 27 00:49:42 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 26 Oct 2025 19:49:25 -0400
+Subject: xfs: always warn about deprecated mount options
+To: stable@vger.kernel.org
+Cc: "Darrick J. Wong" <djwong@kernel.org>, Christoph Hellwig <hch@lst.de>, Carlos Maiolino <cmaiolino@redhat.com>, Carlos Maiolino <cem@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251026234925.287711-1-sashal@kernel.org>
+
+From: "Darrick J. Wong" <djwong@kernel.org>
+
+[ Upstream commit 630785bfbe12c3ee3ebccd8b530a98d632b7e39d ]
+
+The deprecation of the 'attr2' mount option in 6.18 wasn't entirely
+successful because nobody noticed that the kernel never printed a
+warning about attr2 being set in fstab if the only xfs filesystem is the
+root fs; the initramfs mounts the root fs with no mount options; and the
+init scripts only conveyed the fstab options by remounting the root fs.
+
+Fix this by making it complain all the time.
+
+Cc: stable@vger.kernel.org # v5.13
+Fixes: 92cf7d36384b99 ("xfs: Skip repetitive warnings about mount options")
+Signed-off-by: Darrick J. Wong <djwong@kernel.org>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Carlos Maiolino <cmaiolino@redhat.com>
+Signed-off-by: Carlos Maiolino <cem@kernel.org>
+[ Update existing xfs_fs_warn_deprecated() callers ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/xfs/xfs_super.c | 33 +++++++++++++++++++++------------
+ 1 file changed, 21 insertions(+), 12 deletions(-)
+
+--- a/fs/xfs/xfs_super.c
++++ b/fs/xfs/xfs_super.c
+@@ -1178,16 +1178,25 @@ suffix_kstrtoint(
+ static inline void
+ xfs_fs_warn_deprecated(
+ struct fs_context *fc,
+- struct fs_parameter *param,
+- uint64_t flag,
+- bool value)
++ struct fs_parameter *param)
+ {
+- /* Don't print the warning if reconfiguring and current mount point
+- * already had the flag set
++ /*
++ * Always warn about someone passing in a deprecated mount option.
++ * Previously we wouldn't print the warning if we were reconfiguring
++ * and current mount point already had the flag set, but that was not
++ * the right thing to do.
++ *
++ * Many distributions mount the root filesystem with no options in the
++ * initramfs and rely on mount -a to remount the root fs with the
++ * options in fstab. However, the old behavior meant that there would
++ * never be a warning about deprecated mount options for the root fs in
++ * /etc/fstab. On a single-fs system, that means no warning at all.
++ *
++ * Compounding this problem are distribution scripts that copy
++ * /proc/mounts to fstab, which means that we can't remove mount
++ * options unless we're 100% sure they have only ever been advertised
++ * in /proc/mounts in response to explicitly provided mount options.
+ */
+- if ((fc->purpose & FS_CONTEXT_FOR_RECONFIGURE) &&
+- !!(XFS_M(fc->root->d_sb)->m_features & flag) == value)
+- return;
+ xfs_warn(fc->s_fs_info, "%s mount option is deprecated.", param->key);
+ }
+
+@@ -1326,19 +1335,19 @@ xfs_fs_parse_param(
+ #endif
+ /* Following mount options will be removed in September 2025 */
+ case Opt_ikeep:
+- xfs_fs_warn_deprecated(fc, param, XFS_FEAT_IKEEP, true);
++ xfs_fs_warn_deprecated(fc, param);
+ parsing_mp->m_features |= XFS_FEAT_IKEEP;
+ return 0;
+ case Opt_noikeep:
+- xfs_fs_warn_deprecated(fc, param, XFS_FEAT_IKEEP, false);
++ xfs_fs_warn_deprecated(fc, param);
+ parsing_mp->m_features &= ~XFS_FEAT_IKEEP;
+ return 0;
+ case Opt_attr2:
+- xfs_fs_warn_deprecated(fc, param, XFS_FEAT_ATTR2, true);
++ xfs_fs_warn_deprecated(fc, param);
+ parsing_mp->m_features |= XFS_FEAT_ATTR2;
+ return 0;
+ case Opt_noattr2:
+- xfs_fs_warn_deprecated(fc, param, XFS_FEAT_NOATTR2, true);
++ xfs_fs_warn_deprecated(fc, param);
+ parsing_mp->m_features |= XFS_FEAT_NOATTR2;
+ return 0;
+ default:
--- /dev/null
+From stable+bounces-188059-greg=kroah.com@vger.kernel.org Mon Oct 20 14:50:47 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Oct 2025 08:50:24 -0400
+Subject: xfs: fix log CRC mismatches between i386 and other architectures
+To: stable@vger.kernel.org
+Cc: Christoph Hellwig <hch@lst.de>, Carlos Maiolino <cem@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251020125024.1758100-2-sashal@kernel.org>
+
+From: Christoph Hellwig <hch@lst.de>
+
+[ Upstream commit e747883c7d7306acb4d683038d881528fbfbe749 ]
+
+When mounting file systems with a log that was dirtied on i386 on
+other architectures or vice versa, log recovery is unhappy:
+
+[ 11.068052] XFS (vdb): Torn write (CRC failure) detected at log block 0x2. Truncating head block from 0xc.
+
+This is because the CRCs generated by i386 and other architectures
+always diff. The reason for that is that sizeof(struct xlog_rec_header)
+returns different values for i386 vs the rest (324 vs 328), because the
+struct is not sizeof(uint64_t) aligned, and i386 has odd struct size
+alignment rules.
+
+This issue goes back to commit 13cdc853c519 ("Add log versioning, and new
+super block field for the log stripe") in the xfs-import tree, which
+adds log v2 support and the h_size field that causes the unaligned size.
+At that time it only mattered for the crude debug only log header
+checksum, but with commit 0e446be44806 ("xfs: add CRC checks to the log")
+it became a real issue for v5 file system, because now there is a proper
+CRC, and regular builds actually expect it match.
+
+Fix this by allowing checksums with and without the padding.
+
+Fixes: 0e446be44806 ("xfs: add CRC checks to the log")
+Cc: <stable@vger.kernel.org> # v3.8
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Carlos Maiolino <cem@kernel.org>
+[ Adjust context and filenames ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/xfs/libxfs/xfs_log_format.h | 30 +++++++++++++++++++++++++++++-
+ fs/xfs/xfs_log.c | 8 ++++----
+ fs/xfs/xfs_log_priv.h | 4 ++--
+ fs/xfs/xfs_log_recover.c | 19 +++++++++++++++++--
+ fs/xfs/xfs_ondisk.h | 2 ++
+ 5 files changed, 54 insertions(+), 9 deletions(-)
+
+--- a/fs/xfs/libxfs/xfs_log_format.h
++++ b/fs/xfs/libxfs/xfs_log_format.h
+@@ -167,12 +167,40 @@ typedef struct xlog_rec_header {
+ __be32 h_prev_block; /* block number to previous LR : 4 */
+ __be32 h_num_logops; /* number of log operations in this LR : 4 */
+ __be32 h_cycle_data[XLOG_HEADER_CYCLE_SIZE / BBSIZE];
+- /* new fields */
++
++ /* fields added by the Linux port: */
+ __be32 h_fmt; /* format of log record : 4 */
+ uuid_t h_fs_uuid; /* uuid of FS : 16 */
++
++ /* fields added for log v2: */
+ __be32 h_size; /* iclog size : 4 */
++
++ /*
++ * When h_size added for log v2 support, it caused structure to have
++ * a different size on i386 vs all other architectures because the
++ * sum of the size ofthe member is not aligned by that of the largest
++ * __be64-sized member, and i386 has really odd struct alignment rules.
++ *
++ * Due to the way the log headers are placed out on-disk that alone is
++ * not a problem becaue the xlog_rec_header always sits alone in a
++ * BBSIZEs area, and the rest of that area is padded with zeroes.
++ * But xlog_cksum used to calculate the checksum based on the structure
++ * size, and thus gives different checksums for i386 vs the rest.
++ * We now do two checksum validation passes for both sizes to allow
++ * moving v5 file systems with unclean logs between i386 and other
++ * (little-endian) architectures.
++ */
++ __u32 h_pad0;
+ } xlog_rec_header_t;
+
++#ifdef __i386__
++#define XLOG_REC_SIZE offsetofend(struct xlog_rec_header, h_size)
++#define XLOG_REC_SIZE_OTHER sizeof(struct xlog_rec_header)
++#else
++#define XLOG_REC_SIZE sizeof(struct xlog_rec_header)
++#define XLOG_REC_SIZE_OTHER offsetofend(struct xlog_rec_header, h_size)
++#endif /* __i386__ */
++
+ typedef struct xlog_rec_ext_header {
+ __be32 xh_cycle; /* write cycle of log : 4 */
+ __be32 xh_cycle_data[XLOG_HEADER_CYCLE_SIZE / BBSIZE]; /* : 256 */
+--- a/fs/xfs/xfs_log.c
++++ b/fs/xfs/xfs_log.c
+@@ -1761,13 +1761,13 @@ xlog_cksum(
+ struct xlog *log,
+ struct xlog_rec_header *rhead,
+ char *dp,
+- int size)
++ unsigned int hdrsize,
++ unsigned int size)
+ {
+ uint32_t crc;
+
+ /* first generate the crc for the record header ... */
+- crc = xfs_start_cksum_update((char *)rhead,
+- sizeof(struct xlog_rec_header),
++ crc = xfs_start_cksum_update((char *)rhead, hdrsize,
+ offsetof(struct xlog_rec_header, h_crc));
+
+ /* ... then for additional cycle data for v2 logs ... */
+@@ -2013,7 +2013,7 @@ xlog_sync(
+
+ /* calculcate the checksum */
+ iclog->ic_header.h_crc = xlog_cksum(log, &iclog->ic_header,
+- iclog->ic_datap, size);
++ iclog->ic_datap, XLOG_REC_SIZE, size);
+ /*
+ * Intentionally corrupt the log record CRC based on the error injection
+ * frequency, if defined. This facilitates testing log recovery in the
+--- a/fs/xfs/xfs_log_priv.h
++++ b/fs/xfs/xfs_log_priv.h
+@@ -491,8 +491,8 @@ xlog_recover_finish(
+ extern void
+ xlog_recover_cancel(struct xlog *);
+
+-extern __le32 xlog_cksum(struct xlog *log, struct xlog_rec_header *rhead,
+- char *dp, int size);
++__le32 xlog_cksum(struct xlog *log, struct xlog_rec_header *rhead,
++ char *dp, unsigned int hdrsize, unsigned int size);
+
+ extern kmem_zone_t *xfs_log_ticket_zone;
+ struct xlog_ticket *
+--- a/fs/xfs/xfs_log_recover.c
++++ b/fs/xfs/xfs_log_recover.c
+@@ -2864,9 +2864,24 @@ xlog_recover_process(
+ int pass,
+ struct list_head *buffer_list)
+ {
+- __le32 expected_crc = rhead->h_crc, crc;
++ __le32 expected_crc = rhead->h_crc, crc, other_crc;
+
+- crc = xlog_cksum(log, rhead, dp, be32_to_cpu(rhead->h_len));
++ crc = xlog_cksum(log, rhead, dp, XLOG_REC_SIZE,
++ be32_to_cpu(rhead->h_len));
++
++ /*
++ * Look at the end of the struct xlog_rec_header definition in
++ * xfs_log_format.h for the glory details.
++ */
++ if (expected_crc && crc != expected_crc) {
++ other_crc = xlog_cksum(log, rhead, dp, XLOG_REC_SIZE_OTHER,
++ be32_to_cpu(rhead->h_len));
++ if (other_crc == expected_crc) {
++ xfs_notice_once(log->l_mp,
++ "Fixing up incorrect CRC due to padding.");
++ crc = other_crc;
++ }
++ }
+
+ /*
+ * Nothing else to do if this is a CRC verification pass. Just return
+--- a/fs/xfs/xfs_ondisk.h
++++ b/fs/xfs/xfs_ondisk.h
+@@ -132,6 +132,8 @@ xfs_check_ondisk_structs(void)
+ XFS_CHECK_STRUCT_SIZE(struct xfs_inode_log_format, 56);
+ XFS_CHECK_STRUCT_SIZE(struct xfs_qoff_logformat, 20);
+ XFS_CHECK_STRUCT_SIZE(struct xfs_trans_header, 16);
++ XFS_CHECK_STRUCT_SIZE(struct xlog_rec_header, 328);
++ XFS_CHECK_STRUCT_SIZE(struct xlog_rec_ext_header, 260);
+
+ /*
+ * The v5 superblock format extended several v4 header structures with
--- /dev/null
+From stable+bounces-188058-greg=kroah.com@vger.kernel.org Mon Oct 20 14:50:47 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Oct 2025 08:50:23 -0400
+Subject: xfs: rename the old_crc variable in xlog_recover_process
+To: stable@vger.kernel.org
+Cc: Christoph Hellwig <hch@lst.de>, "Darrick J. Wong" <djwong@kernel.org>, Carlos Maiolino <cem@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251020125024.1758100-1-sashal@kernel.org>
+
+From: Christoph Hellwig <hch@lst.de>
+
+[ Upstream commit 0b737f4ac1d3ec093347241df74bbf5f54a7e16c ]
+
+old_crc is a very misleading name. Rename it to expected_crc as that
+described the usage much better.
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Darrick J. Wong <djwong@kernel.org>
+Signed-off-by: Carlos Maiolino <cem@kernel.org>
+Stable-dep-of: e747883c7d73 ("xfs: fix log CRC mismatches between i386 and other architectures")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/xfs/xfs_log_recover.c | 17 ++++++++---------
+ 1 file changed, 8 insertions(+), 9 deletions(-)
+
+--- a/fs/xfs/xfs_log_recover.c
++++ b/fs/xfs/xfs_log_recover.c
+@@ -2864,20 +2864,19 @@ xlog_recover_process(
+ int pass,
+ struct list_head *buffer_list)
+ {
+- __le32 old_crc = rhead->h_crc;
+- __le32 crc;
++ __le32 expected_crc = rhead->h_crc, crc;
+
+ crc = xlog_cksum(log, rhead, dp, be32_to_cpu(rhead->h_len));
+
+ /*
+ * Nothing else to do if this is a CRC verification pass. Just return
+ * if this a record with a non-zero crc. Unfortunately, mkfs always
+- * sets old_crc to 0 so we must consider this valid even on v5 supers.
+- * Otherwise, return EFSBADCRC on failure so the callers up the stack
+- * know precisely what failed.
++ * sets expected_crc to 0 so we must consider this valid even on v5
++ * supers. Otherwise, return EFSBADCRC on failure so the callers up the
++ * stack know precisely what failed.
+ */
+ if (pass == XLOG_RECOVER_CRCPASS) {
+- if (old_crc && crc != old_crc)
++ if (expected_crc && crc != expected_crc)
+ return -EFSBADCRC;
+ return 0;
+ }
+@@ -2888,11 +2887,11 @@ xlog_recover_process(
+ * zero CRC check prevents warnings from being emitted when upgrading
+ * the kernel from one that does not add CRCs by default.
+ */
+- if (crc != old_crc) {
+- if (old_crc || xfs_has_crc(log->l_mp)) {
++ if (crc != expected_crc) {
++ if (expected_crc || xfs_has_crc(log->l_mp)) {
+ xfs_alert(log->l_mp,
+ "log record CRC mismatch: found 0x%x, expected 0x%x.",
+- le32_to_cpu(old_crc),
++ le32_to_cpu(expected_crc),
+ le32_to_cpu(crc));
+ xfs_hex_dump(dp, 32);
+ }