--- /dev/null
+From 3bbf004c4808e2c3241e5c1ad6cc102f38a03c39 Mon Sep 17 00:00:00 2001
+From: Mark Rutland <mark.rutland@arm.com>
+Date: Fri, 19 Sep 2025 15:58:28 +0100
+Subject: arm64: cputype: Add Neoverse-V3AE definitions
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+commit 3bbf004c4808e2c3241e5c1ad6cc102f38a03c39 upstream.
+
+Add cputype definitions for Neoverse-V3AE. These will be used for errata
+detection in subsequent patches.
+
+These values can be found in the Neoverse-V3AE TRM:
+
+ https://developer.arm.com/documentation/SDEN-2615521/9-0/
+
+... in section A.6.1 ("MIDR_EL1, Main ID Register").
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Cc: James Morse <james.morse@arm.com>
+Cc: Will Deacon <will@kernel.org>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/cputype.h | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/arm64/include/asm/cputype.h
++++ b/arch/arm64/include/asm/cputype.h
+@@ -93,6 +93,7 @@
+ #define ARM_CPU_PART_NEOVERSE_V2 0xD4F
+ #define ARM_CPU_PART_CORTEX_A720 0xD81
+ #define ARM_CPU_PART_CORTEX_X4 0xD82
++#define ARM_CPU_PART_NEOVERSE_V3AE 0xD83
+ #define ARM_CPU_PART_NEOVERSE_V3 0xD84
+ #define ARM_CPU_PART_CORTEX_X925 0xD85
+ #define ARM_CPU_PART_CORTEX_A725 0xD87
+@@ -180,6 +181,7 @@
+ #define MIDR_NEOVERSE_V2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V2)
+ #define MIDR_CORTEX_A720 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A720)
+ #define MIDR_CORTEX_X4 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X4)
++#define MIDR_NEOVERSE_V3AE MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V3AE)
+ #define MIDR_NEOVERSE_V3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V3)
+ #define MIDR_CORTEX_X925 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X925)
+ #define MIDR_CORTEX_A725 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A725)
--- /dev/null
+From 0c33aa1804d101c11ba1992504f17a42233f0e11 Mon Sep 17 00:00:00 2001
+From: Mark Rutland <mark.rutland@arm.com>
+Date: Fri, 19 Sep 2025 15:58:29 +0100
+Subject: arm64: errata: Apply workarounds for Neoverse-V3AE
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+commit 0c33aa1804d101c11ba1992504f17a42233f0e11 upstream.
+
+Neoverse-V3AE is also affected by erratum #3312417, as described in its
+Software Developer Errata Notice (SDEN) document:
+
+ Neoverse V3AE (MP172) SDEN v9.0, erratum 3312417
+ https://developer.arm.com/documentation/SDEN-2615521/9-0/
+
+Enable the workaround for Neoverse-V3AE, and document this.
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Cc: James Morse <james.morse@arm.com>
+Cc: Will Deacon <will@kernel.org>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/arch/arm64/silicon-errata.rst | 2 ++
+ arch/arm64/Kconfig | 1 +
+ arch/arm64/kernel/cpu_errata.c | 1 +
+ 3 files changed, 4 insertions(+)
+
+--- a/Documentation/arch/arm64/silicon-errata.rst
++++ b/Documentation/arch/arm64/silicon-errata.rst
+@@ -187,6 +187,8 @@ stable kernels.
+ +----------------+-----------------+-----------------+-----------------------------+
+ | ARM | Neoverse-V3 | #3312417 | ARM64_ERRATUM_3194386 |
+ +----------------+-----------------+-----------------+-----------------------------+
++| ARM | Neoverse-V3AE | #3312417 | ARM64_ERRATUM_3194386 |
+++----------------+-----------------+-----------------+-----------------------------+
+ | ARM | MMU-500 | #841119,826419 | N/A |
+ +----------------+-----------------+-----------------+-----------------------------+
+ | ARM | MMU-600 | #1076982,1209401| N/A |
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -1094,6 +1094,7 @@ config ARM64_ERRATUM_3194386
+ * ARM Neoverse-V1 erratum 3324341
+ * ARM Neoverse V2 erratum 3324336
+ * ARM Neoverse-V3 erratum 3312417
++ * ARM Neoverse-V3AE erratum 3312417
+
+ On affected cores "MSR SSBS, #0" instructions may not affect
+ subsequent speculative instructions, which may permit unexepected
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -471,6 +471,7 @@ static const struct midr_range erratum_s
+ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
+ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V2),
+ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V3),
++ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V3AE),
+ {}
+ };
+ #endif
--- /dev/null
+From c0e473a0d226479e8e925d5ba93f751d8df628e9 Mon Sep 17 00:00:00 2001
+From: "Darrick J. Wong" <djwong@kernel.org>
+Date: Wed, 23 Apr 2025 12:53:42 -0700
+Subject: block: fix race between set_blocksize and read paths
+
+From: Darrick J. Wong <djwong@kernel.org>
+
+commit c0e473a0d226479e8e925d5ba93f751d8df628e9 upstream.
+
+With the new large sector size support, it's now the case that
+set_blocksize can change i_blksize and the folio order in a manner that
+conflicts with a concurrent reader and causes a kernel crash.
+
+Specifically, let's say that udev-worker calls libblkid to detect the
+labels on a block device. The read call can create an order-0 folio to
+read the first 4096 bytes from the disk. But then udev is preempted.
+
+Next, someone tries to mount an 8k-sectorsize filesystem from the same
+block device. The filesystem calls set_blksize, which sets i_blksize to
+8192 and the minimum folio order to 1.
+
+Now udev resumes, still holding the order-0 folio it allocated. It then
+tries to schedule a read bio and do_mpage_readahead tries to create
+bufferheads for the folio. Unfortunately, blocks_per_folio == 0 because
+the page size is 4096 but the blocksize is 8192 so no bufferheads are
+attached and the bh walk never sets bdev. We then submit the bio with a
+NULL block device and crash.
+
+Therefore, truncate the page cache after flushing but before updating
+i_blksize. However, that's not enough -- we also need to lock out file
+IO and page faults during the update. Take both the i_rwsem and the
+invalidate_lock in exclusive mode for invalidations, and in shared mode
+for read/write operations.
+
+I don't know if this is the correct fix, but xfs/259 found it.
+
+Signed-off-by: Darrick J. Wong <djwong@kernel.org>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Luis Chamberlain <mcgrof@kernel.org>
+Tested-by: Shin'ichiro Kawasaki <shinichiro.kawasaki@wdc.com>
+Link: https://lore.kernel.org/r/174543795699.4139148.2086129139322431423.stgit@frogsfrogsfrogs
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+[ use bdev->bd_inode instead ]
+Signed-off-by: Mahmoud Adam <mngyadam@amazon.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ block/bdev.c | 17 +++++++++++++++++
+ block/blk-zoned.c | 5 ++++-
+ block/fops.c | 16 ++++++++++++++++
+ block/ioctl.c | 6 ++++++
+ 4 files changed, 43 insertions(+), 1 deletion(-)
+
+--- a/block/bdev.c
++++ b/block/bdev.c
+@@ -147,9 +147,26 @@ int set_blocksize(struct block_device *b
+
+ /* Don't change the size if it is same as current */
+ if (bdev->bd_inode->i_blkbits != blksize_bits(size)) {
++ /*
++ * Flush and truncate the pagecache before we reconfigure the
++ * mapping geometry because folio sizes are variable now. If a
++ * reader has already allocated a folio whose size is smaller
++ * than the new min_order but invokes readahead after the new
++ * min_order becomes visible, readahead will think there are
++ * "zero" blocks per folio and crash. Take the inode and
++ * invalidation locks to avoid racing with
++ * read/write/fallocate.
++ */
++ inode_lock(bdev->bd_inode);
++ filemap_invalidate_lock(bdev->bd_inode->i_mapping);
++
+ sync_blockdev(bdev);
++ kill_bdev(bdev);
++
+ bdev->bd_inode->i_blkbits = blksize_bits(size);
+ kill_bdev(bdev);
++ filemap_invalidate_unlock(bdev->bd_inode->i_mapping);
++ inode_unlock(bdev->bd_inode);
+ }
+ return 0;
+ }
+--- a/block/blk-zoned.c
++++ b/block/blk-zoned.c
+@@ -401,6 +401,7 @@ int blkdev_zone_mgmt_ioctl(struct block_
+ op = REQ_OP_ZONE_RESET;
+
+ /* Invalidate the page cache, including dirty pages. */
++ inode_lock(bdev->bd_inode);
+ filemap_invalidate_lock(bdev->bd_inode->i_mapping);
+ ret = blkdev_truncate_zone_range(bdev, mode, &zrange);
+ if (ret)
+@@ -423,8 +424,10 @@ int blkdev_zone_mgmt_ioctl(struct block_
+ GFP_KERNEL);
+
+ fail:
+- if (cmd == BLKRESETZONE)
++ if (cmd == BLKRESETZONE) {
+ filemap_invalidate_unlock(bdev->bd_inode->i_mapping);
++ inode_unlock(bdev->bd_inode);
++ }
+
+ return ret;
+ }
+--- a/block/fops.c
++++ b/block/fops.c
+@@ -681,7 +681,14 @@ static ssize_t blkdev_write_iter(struct
+ ret = direct_write_fallback(iocb, from, ret,
+ blkdev_buffered_write(iocb, from));
+ } else {
++ /*
++ * Take i_rwsem and invalidate_lock to avoid racing with
++ * set_blocksize changing i_blkbits/folio order and punching
++ * out the pagecache.
++ */
++ inode_lock_shared(bd_inode);
+ ret = blkdev_buffered_write(iocb, from);
++ inode_unlock_shared(bd_inode);
+ }
+
+ if (ret > 0)
+@@ -693,6 +700,7 @@ static ssize_t blkdev_write_iter(struct
+ static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
+ {
+ struct block_device *bdev = I_BDEV(iocb->ki_filp->f_mapping->host);
++ struct inode *bd_inode = bdev->bd_inode;
+ loff_t size = bdev_nr_bytes(bdev);
+ loff_t pos = iocb->ki_pos;
+ size_t shorted = 0;
+@@ -728,7 +736,13 @@ static ssize_t blkdev_read_iter(struct k
+ goto reexpand;
+ }
+
++ /*
++ * Take i_rwsem and invalidate_lock to avoid racing with set_blocksize
++ * changing i_blkbits/folio order and punching out the pagecache.
++ */
++ inode_lock_shared(bd_inode);
+ ret = filemap_read(iocb, to, ret);
++ inode_unlock_shared(bd_inode);
+
+ reexpand:
+ if (unlikely(shorted))
+@@ -771,6 +785,7 @@ static long blkdev_fallocate(struct file
+ if ((start | len) & (bdev_logical_block_size(bdev) - 1))
+ return -EINVAL;
+
++ inode_lock(inode);
+ filemap_invalidate_lock(inode->i_mapping);
+
+ /*
+@@ -811,6 +826,7 @@ static long blkdev_fallocate(struct file
+
+ fail:
+ filemap_invalidate_unlock(inode->i_mapping);
++ inode_unlock(inode);
+ return error;
+ }
+
+--- a/block/ioctl.c
++++ b/block/ioctl.c
+@@ -114,6 +114,7 @@ static int blk_ioctl_discard(struct bloc
+ end > bdev_nr_bytes(bdev))
+ return -EINVAL;
+
++ inode_lock(inode);
+ filemap_invalidate_lock(inode->i_mapping);
+ err = truncate_bdev_range(bdev, mode, start, end - 1);
+ if (err)
+@@ -121,6 +122,7 @@ static int blk_ioctl_discard(struct bloc
+ err = blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL);
+ fail:
+ filemap_invalidate_unlock(inode->i_mapping);
++ inode_unlock(inode);
+ return err;
+ }
+
+@@ -146,12 +148,14 @@ static int blk_ioctl_secure_erase(struct
+ end > bdev_nr_bytes(bdev))
+ return -EINVAL;
+
++ inode_lock(bdev->bd_inode);
+ filemap_invalidate_lock(bdev->bd_inode->i_mapping);
+ err = truncate_bdev_range(bdev, mode, start, end - 1);
+ if (!err)
+ err = blkdev_issue_secure_erase(bdev, start >> 9, len >> 9,
+ GFP_KERNEL);
+ filemap_invalidate_unlock(bdev->bd_inode->i_mapping);
++ inode_unlock(bdev->bd_inode);
+ return err;
+ }
+
+@@ -184,6 +188,7 @@ static int blk_ioctl_zeroout(struct bloc
+ return -EINVAL;
+
+ /* Invalidate the page cache, including dirty pages */
++ inode_lock(inode);
+ filemap_invalidate_lock(inode->i_mapping);
+ err = truncate_bdev_range(bdev, mode, start, end);
+ if (err)
+@@ -194,6 +199,7 @@ static int blk_ioctl_zeroout(struct bloc
+
+ fail:
+ filemap_invalidate_unlock(inode->i_mapping);
++ inode_unlock(inode);
+ return err;
+ }
+
--- /dev/null
+From stable+bounces-188379-greg=kroah.com@vger.kernel.org Tue Oct 21 18:44:55 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 21 Oct 2025 12:44:18 -0400
+Subject: ext4: avoid potential buffer over-read in parse_apply_sb_mount_options()
+To: stable@vger.kernel.org
+Cc: Theodore Ts'o <tytso@mit.edu>, Jan Kara <jack@suse.cz>, "Darrick J. Wong" <djwong@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251021164418.2381659-1-sashal@kernel.org>
+
+From: Theodore Ts'o <tytso@mit.edu>
+
+[ Upstream commit 8ecb790ea8c3fc69e77bace57f14cf0d7c177bd8 ]
+
+Unlike other strings in the ext4 superblock, we rely on tune2fs to
+make sure s_mount_opts is NUL terminated. Harden
+parse_apply_sb_mount_options() by treating s_mount_opts as a potential
+__nonstring.
+
+Cc: stable@vger.kernel.org
+Fixes: 8b67f04ab9de ("ext4: Add mount options in superblock")
+Reviewed-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Darrick J. Wong <djwong@kernel.org>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Message-ID: <20250916-tune2fs-v2-1-d594dc7486f0@mit.edu>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+[ added sizeof() third argument to strscpy_pad() ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ext4/super.c | 17 +++++------------
+ 1 file changed, 5 insertions(+), 12 deletions(-)
+
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -2506,7 +2506,7 @@ static int parse_apply_sb_mount_options(
+ struct ext4_fs_context *m_ctx)
+ {
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+- char *s_mount_opts = NULL;
++ char s_mount_opts[65];
+ struct ext4_fs_context *s_ctx = NULL;
+ struct fs_context *fc = NULL;
+ int ret = -ENOMEM;
+@@ -2514,15 +2514,11 @@ static int parse_apply_sb_mount_options(
+ if (!sbi->s_es->s_mount_opts[0])
+ return 0;
+
+- s_mount_opts = kstrndup(sbi->s_es->s_mount_opts,
+- sizeof(sbi->s_es->s_mount_opts),
+- GFP_KERNEL);
+- if (!s_mount_opts)
+- return ret;
++ strscpy_pad(s_mount_opts, sbi->s_es->s_mount_opts, sizeof(s_mount_opts));
+
+ fc = kzalloc(sizeof(struct fs_context), GFP_KERNEL);
+ if (!fc)
+- goto out_free;
++ return -ENOMEM;
+
+ s_ctx = kzalloc(sizeof(struct ext4_fs_context), GFP_KERNEL);
+ if (!s_ctx)
+@@ -2554,11 +2550,8 @@ parse_failed:
+ ret = 0;
+
+ out_free:
+- if (fc) {
+- ext4_fc_free(fc);
+- kfree(fc);
+- }
+- kfree(s_mount_opts);
++ ext4_fc_free(fc);
++ kfree(fc);
+ return ret;
+ }
+
--- /dev/null
+From stable+bounces-188179-greg=kroah.com@vger.kernel.org Mon Oct 20 18:16:20 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Oct 2025 12:16:05 -0400
+Subject: fs: quota: create dedicated workqueue for quota_release_work
+To: stable@vger.kernel.org
+Cc: Shashank A P <shashank.ap@samsung.com>, Jan Kara <jack@suse.cz>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251020161605.1834667-2-sashal@kernel.org>
+
+From: Shashank A P <shashank.ap@samsung.com>
+
+[ Upstream commit 72b7ceca857f38a8ca7c5629feffc63769638974 ]
+
+There is a kernel panic due to WARN_ONCE when panic_on_warn is set.
+
+This issue occurs when writeback is triggered due to sync call for an
+opened file(ie, writeback reason is WB_REASON_SYNC). When f2fs balance
+is needed at sync path, flush for quota_release_work is triggered.
+By default quota_release_work is queued to "events_unbound" queue which
+does not have WQ_MEM_RECLAIM flag. During f2fs balance "writeback"
+workqueue tries to flush quota_release_work causing kernel panic due to
+MEM_RECLAIM flag mismatch errors.
+
+This patch creates dedicated workqueue with WQ_MEM_RECLAIM flag
+for work quota_release_work.
+
+------------[ cut here ]------------
+WARNING: CPU: 4 PID: 14867 at kernel/workqueue.c:3721 check_flush_dependency+0x13c/0x148
+Call trace:
+ check_flush_dependency+0x13c/0x148
+ __flush_work+0xd0/0x398
+ flush_delayed_work+0x44/0x5c
+ dquot_writeback_dquots+0x54/0x318
+ f2fs_do_quota_sync+0xb8/0x1a8
+ f2fs_write_checkpoint+0x3cc/0x99c
+ f2fs_gc+0x190/0x750
+ f2fs_balance_fs+0x110/0x168
+ f2fs_write_single_data_page+0x474/0x7dc
+ f2fs_write_data_pages+0x7d0/0xd0c
+ do_writepages+0xe0/0x2f4
+ __writeback_single_inode+0x44/0x4ac
+ writeback_sb_inodes+0x30c/0x538
+ wb_writeback+0xf4/0x440
+ wb_workfn+0x128/0x5d4
+ process_scheduled_works+0x1c4/0x45c
+ worker_thread+0x32c/0x3e8
+ kthread+0x11c/0x1b0
+ ret_from_fork+0x10/0x20
+Kernel panic - not syncing: kernel: panic_on_warn set ...
+
+Fixes: ac6f420291b3 ("quota: flush quota_release_work upon quota writeback")
+CC: stable@vger.kernel.org
+Signed-off-by: Shashank A P <shashank.ap@samsung.com>
+Link: https://patch.msgid.link/20250901092905.2115-1-shashank.ap@samsung.com
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/quota/dquot.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+--- a/fs/quota/dquot.c
++++ b/fs/quota/dquot.c
+@@ -163,6 +163,9 @@ static struct quota_module_name module_n
+ /* SLAB cache for dquot structures */
+ static struct kmem_cache *dquot_cachep;
+
++/* workqueue for work quota_release_work*/
++static struct workqueue_struct *quota_unbound_wq;
++
+ void register_quota_format(struct quota_format_type *fmt)
+ {
+ spin_lock(&dq_list_lock);
+@@ -891,7 +894,7 @@ void dqput(struct dquot *dquot)
+ put_releasing_dquots(dquot);
+ atomic_dec(&dquot->dq_count);
+ spin_unlock(&dq_list_lock);
+- queue_delayed_work(system_unbound_wq, "a_release_work, 1);
++ queue_delayed_work(quota_unbound_wq, "a_release_work, 1);
+ }
+ EXPORT_SYMBOL(dqput);
+
+@@ -3046,6 +3049,11 @@ static int __init dquot_init(void)
+ if (register_shrinker(&dqcache_shrinker, "dquota-cache"))
+ panic("Cannot register dquot shrinker");
+
++ quota_unbound_wq = alloc_workqueue("quota_events_unbound",
++ WQ_UNBOUND | WQ_MEM_RECLAIM, WQ_MAX_ACTIVE);
++ if (!quota_unbound_wq)
++ panic("Cannot create quota_unbound_wq\n");
++
+ return 0;
+ }
+ fs_initcall(dquot_init);
--- /dev/null
+From 42520df65bf67189541a425f7d36b0b3e7bd7844 Mon Sep 17 00:00:00 2001
+From: Viacheslav Dubeyko <slava@dubeyko.com>
+Date: Fri, 19 Sep 2025 12:12:44 -0700
+Subject: hfsplus: fix slab-out-of-bounds read in hfsplus_strcasecmp()
+
+From: Viacheslav Dubeyko <slava@dubeyko.com>
+
+commit 42520df65bf67189541a425f7d36b0b3e7bd7844 upstream.
+
+The hfsplus_strcasecmp() logic can trigger the issue:
+
+[ 117.317703][ T9855] ==================================================================
+[ 117.318353][ T9855] BUG: KASAN: slab-out-of-bounds in hfsplus_strcasecmp+0x1bc/0x490
+[ 117.318991][ T9855] Read of size 2 at addr ffff88802160f40c by task repro/9855
+[ 117.319577][ T9855]
+[ 117.319773][ T9855] CPU: 0 UID: 0 PID: 9855 Comm: repro Not tainted 6.17.0-rc6 #33 PREEMPT(full)
+[ 117.319780][ T9855] Hardware name: QEMU Ubuntu 24.04 PC (i440FX + PIIX, 1996), BIOS 1.16.3-debian-1.16.3-2 04/01/2014
+[ 117.319783][ T9855] Call Trace:
+[ 117.319785][ T9855] <TASK>
+[ 117.319788][ T9855] dump_stack_lvl+0x1c1/0x2a0
+[ 117.319795][ T9855] ? __virt_addr_valid+0x1c8/0x5c0
+[ 117.319803][ T9855] ? __pfx_dump_stack_lvl+0x10/0x10
+[ 117.319808][ T9855] ? rcu_is_watching+0x15/0xb0
+[ 117.319816][ T9855] ? lock_release+0x4b/0x3e0
+[ 117.319821][ T9855] ? __kasan_check_byte+0x12/0x40
+[ 117.319828][ T9855] ? __virt_addr_valid+0x1c8/0x5c0
+[ 117.319835][ T9855] ? __virt_addr_valid+0x4a5/0x5c0
+[ 117.319842][ T9855] print_report+0x17e/0x7e0
+[ 117.319848][ T9855] ? __virt_addr_valid+0x1c8/0x5c0
+[ 117.319855][ T9855] ? __virt_addr_valid+0x4a5/0x5c0
+[ 117.319862][ T9855] ? __phys_addr+0xd3/0x180
+[ 117.319869][ T9855] ? hfsplus_strcasecmp+0x1bc/0x490
+[ 117.319876][ T9855] kasan_report+0x147/0x180
+[ 117.319882][ T9855] ? hfsplus_strcasecmp+0x1bc/0x490
+[ 117.319891][ T9855] hfsplus_strcasecmp+0x1bc/0x490
+[ 117.319900][ T9855] ? __pfx_hfsplus_cat_case_cmp_key+0x10/0x10
+[ 117.319906][ T9855] hfs_find_rec_by_key+0xa9/0x1e0
+[ 117.319913][ T9855] __hfsplus_brec_find+0x18e/0x470
+[ 117.319920][ T9855] ? __pfx_hfsplus_bnode_find+0x10/0x10
+[ 117.319926][ T9855] ? __pfx_hfs_find_rec_by_key+0x10/0x10
+[ 117.319933][ T9855] ? __pfx___hfsplus_brec_find+0x10/0x10
+[ 117.319942][ T9855] hfsplus_brec_find+0x28f/0x510
+[ 117.319949][ T9855] ? __pfx_hfs_find_rec_by_key+0x10/0x10
+[ 117.319956][ T9855] ? __pfx_hfsplus_brec_find+0x10/0x10
+[ 117.319963][ T9855] ? __kmalloc_noprof+0x2a9/0x510
+[ 117.319969][ T9855] ? hfsplus_find_init+0x8c/0x1d0
+[ 117.319976][ T9855] hfsplus_brec_read+0x2b/0x120
+[ 117.319983][ T9855] hfsplus_lookup+0x2aa/0x890
+[ 117.319990][ T9855] ? __pfx_hfsplus_lookup+0x10/0x10
+[ 117.320003][ T9855] ? d_alloc_parallel+0x2f0/0x15e0
+[ 117.320008][ T9855] ? __lock_acquire+0xaec/0xd80
+[ 117.320013][ T9855] ? __pfx_d_alloc_parallel+0x10/0x10
+[ 117.320019][ T9855] ? __raw_spin_lock_init+0x45/0x100
+[ 117.320026][ T9855] ? __init_waitqueue_head+0xa9/0x150
+[ 117.320034][ T9855] __lookup_slow+0x297/0x3d0
+[ 117.320039][ T9855] ? __pfx___lookup_slow+0x10/0x10
+[ 117.320045][ T9855] ? down_read+0x1ad/0x2e0
+[ 117.320055][ T9855] lookup_slow+0x53/0x70
+[ 117.320065][ T9855] walk_component+0x2f0/0x430
+[ 117.320073][ T9855] path_lookupat+0x169/0x440
+[ 117.320081][ T9855] filename_lookup+0x212/0x590
+[ 117.320089][ T9855] ? __pfx_filename_lookup+0x10/0x10
+[ 117.320098][ T9855] ? strncpy_from_user+0x150/0x290
+[ 117.320105][ T9855] ? getname_flags+0x1e5/0x540
+[ 117.320112][ T9855] user_path_at+0x3a/0x60
+[ 117.320117][ T9855] __x64_sys_umount+0xee/0x160
+[ 117.320123][ T9855] ? __pfx___x64_sys_umount+0x10/0x10
+[ 117.320129][ T9855] ? do_syscall_64+0xb7/0x3a0
+[ 117.320135][ T9855] ? entry_SYSCALL_64_after_hwframe+0x77/0x7f
+[ 117.320141][ T9855] ? entry_SYSCALL_64_after_hwframe+0x77/0x7f
+[ 117.320145][ T9855] do_syscall_64+0xf3/0x3a0
+[ 117.320150][ T9855] ? exc_page_fault+0x9f/0xf0
+[ 117.320154][ T9855] entry_SYSCALL_64_after_hwframe+0x77/0x7f
+[ 117.320158][ T9855] RIP: 0033:0x7f7dd7908b07
+[ 117.320163][ T9855] Code: 23 0d 00 f7 d8 64 89 01 48 83 c8 ff c3 66 0f 1f 44 00 00 31 f6 e9 09 00 00 00 66 0f 1f 84 00 00 08
+[ 117.320167][ T9855] RSP: 002b:00007ffd5ebd9698 EFLAGS: 00000202 ORIG_RAX: 00000000000000a6
+[ 117.320172][ T9855] RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007f7dd7908b07
+[ 117.320176][ T9855] RDX: 0000000000000009 RSI: 0000000000000009 RDI: 00007ffd5ebd9740
+[ 117.320179][ T9855] RBP: 00007ffd5ebda780 R08: 0000000000000005 R09: 00007ffd5ebd9530
+[ 117.320181][ T9855] R10: 00007f7dd799bfc0 R11: 0000000000000202 R12: 000055e2008b32d0
+[ 117.320184][ T9855] R13: 0000000000000000 R14: 0000000000000000 R15: 0000000000000000
+[ 117.320189][ T9855] </TASK>
+[ 117.320190][ T9855]
+[ 117.351311][ T9855] Allocated by task 9855:
+[ 117.351683][ T9855] kasan_save_track+0x3e/0x80
+[ 117.352093][ T9855] __kasan_kmalloc+0x8d/0xa0
+[ 117.352490][ T9855] __kmalloc_noprof+0x288/0x510
+[ 117.352914][ T9855] hfsplus_find_init+0x8c/0x1d0
+[ 117.353342][ T9855] hfsplus_lookup+0x19c/0x890
+[ 117.353747][ T9855] __lookup_slow+0x297/0x3d0
+[ 117.354148][ T9855] lookup_slow+0x53/0x70
+[ 117.354514][ T9855] walk_component+0x2f0/0x430
+[ 117.354921][ T9855] path_lookupat+0x169/0x440
+[ 117.355325][ T9855] filename_lookup+0x212/0x590
+[ 117.355740][ T9855] user_path_at+0x3a/0x60
+[ 117.356115][ T9855] __x64_sys_umount+0xee/0x160
+[ 117.356529][ T9855] do_syscall_64+0xf3/0x3a0
+[ 117.356920][ T9855] entry_SYSCALL_64_after_hwframe+0x77/0x7f
+[ 117.357429][ T9855]
+[ 117.357636][ T9855] The buggy address belongs to the object at ffff88802160f000
+[ 117.357636][ T9855] which belongs to the cache kmalloc-2k of size 2048
+[ 117.358827][ T9855] The buggy address is located 0 bytes to the right of
+[ 117.358827][ T9855] allocated 1036-byte region [ffff88802160f000, ffff88802160f40c)
+[ 117.360061][ T9855]
+[ 117.360266][ T9855] The buggy address belongs to the physical page:
+[ 117.360813][ T9855] page: refcount:0 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x21608
+[ 117.361562][ T9855] head: order:3 mapcount:0 entire_mapcount:0 nr_pages_mapped:0 pincount:0
+[ 117.362285][ T9855] flags: 0xfff00000000040(head|node=0|zone=1|lastcpupid=0x7ff)
+[ 117.362929][ T9855] page_type: f5(slab)
+[ 117.363282][ T9855] raw: 00fff00000000040 ffff88801a842f00 ffffea0000932000 dead000000000002
+[ 117.364015][ T9855] raw: 0000000000000000 0000000080080008 00000000f5000000 0000000000000000
+[ 117.364750][ T9855] head: 00fff00000000040 ffff88801a842f00 ffffea0000932000 dead000000000002
+[ 117.365491][ T9855] head: 0000000000000000 0000000080080008 00000000f5000000 0000000000000000
+[ 117.366232][ T9855] head: 00fff00000000003 ffffea0000858201 00000000ffffffff 00000000ffffffff
+[ 117.366968][ T9855] head: ffffffffffffffff 0000000000000000 00000000ffffffff 0000000000000008
+[ 117.367711][ T9855] page dumped because: kasan: bad access detected
+[ 117.368259][ T9855] page_owner tracks the page as allocated
+[ 117.368745][ T9855] page last allocated via order 3, migratetype Unmovable, gfp_mask 0xd20c0(__GFP_IO|__GFP_FS|__GFP_NOWARN1
+[ 117.370541][ T9855] post_alloc_hook+0x240/0x2a0
+[ 117.370954][ T9855] get_page_from_freelist+0x2101/0x21e0
+[ 117.371435][ T9855] __alloc_frozen_pages_noprof+0x274/0x380
+[ 117.371935][ T9855] alloc_pages_mpol+0x241/0x4b0
+[ 117.372360][ T9855] allocate_slab+0x8d/0x380
+[ 117.372752][ T9855] ___slab_alloc+0xbe3/0x1400
+[ 117.373159][ T9855] __kmalloc_cache_noprof+0x296/0x3d0
+[ 117.373621][ T9855] nexthop_net_init+0x75/0x100
+[ 117.374038][ T9855] ops_init+0x35c/0x5c0
+[ 117.374400][ T9855] setup_net+0x10c/0x320
+[ 117.374768][ T9855] copy_net_ns+0x31b/0x4d0
+[ 117.375156][ T9855] create_new_namespaces+0x3f3/0x720
+[ 117.375613][ T9855] unshare_nsproxy_namespaces+0x11c/0x170
+[ 117.376094][ T9855] ksys_unshare+0x4ca/0x8d0
+[ 117.376477][ T9855] __x64_sys_unshare+0x38/0x50
+[ 117.376879][ T9855] do_syscall_64+0xf3/0x3a0
+[ 117.377265][ T9855] page last free pid 9110 tgid 9110 stack trace:
+[ 117.377795][ T9855] __free_frozen_pages+0xbeb/0xd50
+[ 117.378229][ T9855] __put_partials+0x152/0x1a0
+[ 117.378625][ T9855] put_cpu_partial+0x17c/0x250
+[ 117.379026][ T9855] __slab_free+0x2d4/0x3c0
+[ 117.379404][ T9855] qlist_free_all+0x97/0x140
+[ 117.379790][ T9855] kasan_quarantine_reduce+0x148/0x160
+[ 117.380250][ T9855] __kasan_slab_alloc+0x22/0x80
+[ 117.380662][ T9855] __kmalloc_noprof+0x232/0x510
+[ 117.381074][ T9855] tomoyo_supervisor+0xc0a/0x1360
+[ 117.381498][ T9855] tomoyo_env_perm+0x149/0x1e0
+[ 117.381903][ T9855] tomoyo_find_next_domain+0x15ad/0x1b90
+[ 117.382378][ T9855] tomoyo_bprm_check_security+0x11c/0x180
+[ 117.382859][ T9855] security_bprm_check+0x89/0x280
+[ 117.383289][ T9855] bprm_execve+0x8f1/0x14a0
+[ 117.383673][ T9855] do_execveat_common+0x528/0x6b0
+[ 117.384103][ T9855] __x64_sys_execve+0x94/0xb0
+[ 117.384500][ T9855]
+[ 117.384706][ T9855] Memory state around the buggy address:
+[ 117.385179][ T9855] ffff88802160f300: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+[ 117.385854][ T9855] ffff88802160f380: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+[ 117.386534][ T9855] >ffff88802160f400: 00 04 fc fc fc fc fc fc fc fc fc fc fc fc fc fc
+[ 117.387204][ T9855] ^
+[ 117.387566][ T9855] ffff88802160f480: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
+[ 117.388243][ T9855] ffff88802160f500: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
+[ 117.388918][ T9855] ==================================================================
+
+The issue takes place if the length field of struct hfsplus_unistr
+is bigger than HFSPLUS_MAX_STRLEN. The patch simply checks
+the length of comparing strings. And if the strings' length
+is bigger than HFSPLUS_MAX_STRLEN, then it is corrected
+to this value.
+
+v2
+The string length correction has been added for hfsplus_strcmp().
+
+Reported-by: Jiaming Zhang <r772577952@gmail.com>
+Signed-off-by: Viacheslav Dubeyko <slava@dubeyko.com>
+cc: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
+cc: Yangtao Li <frank.li@vivo.com>
+cc: linux-fsdevel@vger.kernel.org
+cc: syzkaller@googlegroups.com
+Link: https://lore.kernel.org/r/20250919191243.1370388-1-slava@dubeyko.com
+Signed-off-by: Viacheslav Dubeyko <slava@dubeyko.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/hfsplus/unicode.c | 24 ++++++++++++++++++++++++
+ 1 file changed, 24 insertions(+)
+
+--- a/fs/hfsplus/unicode.c
++++ b/fs/hfsplus/unicode.c
+@@ -40,6 +40,18 @@ int hfsplus_strcasecmp(const struct hfsp
+ p1 = s1->unicode;
+ p2 = s2->unicode;
+
++ if (len1 > HFSPLUS_MAX_STRLEN) {
++ len1 = HFSPLUS_MAX_STRLEN;
++ pr_err("invalid length %u has been corrected to %d\n",
++ be16_to_cpu(s1->length), len1);
++ }
++
++ if (len2 > HFSPLUS_MAX_STRLEN) {
++ len2 = HFSPLUS_MAX_STRLEN;
++ pr_err("invalid length %u has been corrected to %d\n",
++ be16_to_cpu(s2->length), len2);
++ }
++
+ while (1) {
+ c1 = c2 = 0;
+
+@@ -74,6 +86,18 @@ int hfsplus_strcmp(const struct hfsplus_
+ p1 = s1->unicode;
+ p2 = s2->unicode;
+
++ if (len1 > HFSPLUS_MAX_STRLEN) {
++ len1 = HFSPLUS_MAX_STRLEN;
++ pr_err("invalid length %u has been corrected to %d\n",
++ be16_to_cpu(s1->length), len1);
++ }
++
++ if (len2 > HFSPLUS_MAX_STRLEN) {
++ len2 = HFSPLUS_MAX_STRLEN;
++ pr_err("invalid length %u has been corrected to %d\n",
++ be16_to_cpu(s2->length), len2);
++ }
++
+ for (len = min(len1, len2); len > 0; len--) {
+ c1 = be16_to_cpu(*p1);
+ c2 = be16_to_cpu(*p2);
--- /dev/null
+From stable+bounces-188110-greg=kroah.com@vger.kernel.org Mon Oct 20 15:09:27 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Oct 2025 09:09:00 -0400
+Subject: iio: imu: inv_icm42600: Avoid configuring if already pm_runtime suspended
+To: stable@vger.kernel.org
+Cc: Sean Nyekjaer <sean@geanix.com>, Stable@vger.kernel.org, Jonathan Cameron <Jonathan.Cameron@huawei.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251020130900.1766996-2-sashal@kernel.org>
+
+From: Sean Nyekjaer <sean@geanix.com>
+
+[ Upstream commit 466f7a2fef2a4e426f809f79845a1ec1aeb558f4 ]
+
+Do as in suspend, skip resume configuration steps if the device is already
+pm_runtime suspended. This avoids reconfiguring a device that is already
+in the correct low-power state and ensures that pm_runtime handles the
+power state transitions properly.
+
+Fixes: 31c24c1e93c3 ("iio: imu: inv_icm42600: add core of new inv_icm42600 driver")
+Signed-off-by: Sean Nyekjaer <sean@geanix.com>
+Link: https://patch.msgid.link/20250901-icm42pmreg-v3-3-ef1336246960@geanix.com
+Cc: <Stable@vger.kernel.org>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+[ adjusted context due to missing APEX/WoM features in older kernel version ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iio/imu/inv_icm42600/inv_icm42600_core.c | 11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
++++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
+@@ -687,17 +687,15 @@ EXPORT_SYMBOL_NS_GPL(inv_icm42600_core_p
+ static int inv_icm42600_suspend(struct device *dev)
+ {
+ struct inv_icm42600_state *st = dev_get_drvdata(dev);
+- int ret;
++ int ret = 0;
+
+ mutex_lock(&st->lock);
+
+ st->suspended.gyro = st->conf.gyro.mode;
+ st->suspended.accel = st->conf.accel.mode;
+ st->suspended.temp = st->conf.temp_en;
+- if (pm_runtime_suspended(dev)) {
+- ret = 0;
++ if (pm_runtime_suspended(dev))
+ goto out_unlock;
+- }
+
+ /* disable FIFO data streaming */
+ if (st->fifo.on) {
+@@ -729,10 +727,13 @@ static int inv_icm42600_resume(struct de
+ struct inv_icm42600_state *st = dev_get_drvdata(dev);
+ struct inv_sensors_timestamp *gyro_ts = iio_priv(st->indio_gyro);
+ struct inv_sensors_timestamp *accel_ts = iio_priv(st->indio_accel);
+- int ret;
++ int ret = 0;
+
+ mutex_lock(&st->lock);
+
++ if (pm_runtime_suspended(dev))
++ goto out_unlock;
++
+ ret = inv_icm42600_enable_regulator_vddio(st);
+ if (ret)
+ goto out_unlock;
--- /dev/null
+From stable+bounces-188109-greg=kroah.com@vger.kernel.org Mon Oct 20 15:10:03 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Oct 2025 09:08:59 -0400
+Subject: iio: imu: inv_icm42600: reorganize DMA aligned buffers in structure
+To: stable@vger.kernel.org
+Cc: Jean-Baptiste Maneyrol <jean-baptiste.maneyrol@tdk.com>, Jonathan Cameron <Jonathan.Cameron@huawei.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251020130900.1766996-1-sashal@kernel.org>
+
+From: Jean-Baptiste Maneyrol <jean-baptiste.maneyrol@tdk.com>
+
+[ Upstream commit 0c122c280e78150b0c666fb69db0000cdd1d7e0a ]
+
+Move all DMA aligned buffers together at the end of the structure.
+
+1. Timestamp anynomous structure is not used with DMA so it doesn't
+belong after __aligned(IIO_DMA_MINALIGN).
+2. struct inv_icm42600_fifo contains it's own __aligned(IIO_DMA_MINALIGN)
+within it at the end so it should not be after __aligned(IIO_DMA_MINALIGN)
+in the outer struct either.
+3. Normally 1 would have been considered a bug, but because of the extra
+alignment from 2, it actually was OK, but we shouldn't be relying on such
+quirks.
+
+Signed-off-by: Jean-Baptiste Maneyrol <jean-baptiste.maneyrol@tdk.com>
+Link: https://patch.msgid.link/20250630-losd-3-inv-icm42600-add-wom-support-v6-1-5bb0c84800d9@tdk.com
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Stable-dep-of: 466f7a2fef2a ("iio: imu: inv_icm42600: Avoid configuring if already pm_runtime suspended")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iio/imu/inv_icm42600/inv_icm42600.h | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/iio/imu/inv_icm42600/inv_icm42600.h
++++ b/drivers/iio/imu/inv_icm42600/inv_icm42600.h
+@@ -126,9 +126,9 @@ struct inv_icm42600_suspended {
+ * @suspended: suspended sensors configuration.
+ * @indio_gyro: gyroscope IIO device.
+ * @indio_accel: accelerometer IIO device.
+- * @buffer: data transfer buffer aligned for DMA.
+- * @fifo: FIFO management structure.
+ * @timestamp: interrupt timestamps.
++ * @fifo: FIFO management structure.
++ * @buffer: data transfer buffer aligned for DMA.
+ */
+ struct inv_icm42600_state {
+ struct mutex lock;
+@@ -142,12 +142,12 @@ struct inv_icm42600_state {
+ struct inv_icm42600_suspended suspended;
+ struct iio_dev *indio_gyro;
+ struct iio_dev *indio_accel;
+- u8 buffer[2] __aligned(IIO_DMA_MINALIGN);
+- struct inv_icm42600_fifo fifo;
+ struct {
+ s64 gyro;
+ s64 accel;
+ } timestamp;
++ struct inv_icm42600_fifo fifo;
++ u8 buffer[2] __aligned(IIO_DMA_MINALIGN);
+ };
+
+ /* Virtual register addresses: @bank on MSB (4 upper bits), @address on LSB */
--- /dev/null
+From stable+bounces-188097-greg=kroah.com@vger.kernel.org Mon Oct 20 15:03:20 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Oct 2025 09:03:03 -0400
+Subject: iio: imu: inv_icm42600: Simplify pm_runtime setup
+To: stable@vger.kernel.org
+Cc: Sean Nyekjaer <sean@geanix.com>, Stable@vger.kernel.org, Jonathan Cameron <Jonathan.Cameron@huawei.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251020130303.1764135-2-sashal@kernel.org>
+
+From: Sean Nyekjaer <sean@geanix.com>
+
+[ Upstream commit 0792c1984a45ccd7a296d6b8cb78088bc99a212e ]
+
+Rework the power management in inv_icm42600_core_probe() to use
+devm_pm_runtime_set_active_enabled(), which simplifies the runtime PM
+setup by handling activation and enabling in one step.
+Remove the separate inv_icm42600_disable_pm callback, as it's no longer
+needed with the devm-managed approach.
+Using devm_pm_runtime_enable() also fixes the missing disable of
+autosuspend.
+Update inv_icm42600_disable_vddio_reg() to only disable the regulator if
+the device is not suspended i.e. powered-down, preventing unbalanced
+disables.
+Also remove redundant error msg on regulator_disable(), the regulator
+framework already emits an error message when regulator_disable() fails.
+
+This simplifies the PM setup and avoids manipulating the usage counter
+unnecessarily.
+
+Fixes: 31c24c1e93c3 ("iio: imu: inv_icm42600: add core of new inv_icm42600 driver")
+Signed-off-by: Sean Nyekjaer <sean@geanix.com>
+Link: https://patch.msgid.link/20250901-icm42pmreg-v3-1-ef1336246960@geanix.com
+Cc: <Stable@vger.kernel.org>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iio/imu/inv_icm42600/inv_icm42600_core.c | 24 ++++++-----------------
+ 1 file changed, 7 insertions(+), 17 deletions(-)
+
+--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
++++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
+@@ -567,20 +567,12 @@ static void inv_icm42600_disable_vdd_reg
+ static void inv_icm42600_disable_vddio_reg(void *_data)
+ {
+ struct inv_icm42600_state *st = _data;
+- const struct device *dev = regmap_get_device(st->map);
+- int ret;
+-
+- ret = regulator_disable(st->vddio_supply);
+- if (ret)
+- dev_err(dev, "failed to disable vddio error %d\n", ret);
+-}
++ struct device *dev = regmap_get_device(st->map);
+
+-static void inv_icm42600_disable_pm(void *_data)
+-{
+- struct device *dev = _data;
++ if (pm_runtime_status_suspended(dev))
++ return;
+
+- pm_runtime_put_sync(dev);
+- pm_runtime_disable(dev);
++ regulator_disable(st->vddio_supply);
+ }
+
+ int inv_icm42600_core_probe(struct regmap *regmap, int chip, int irq,
+@@ -677,16 +669,14 @@ int inv_icm42600_core_probe(struct regma
+ return ret;
+
+ /* setup runtime power management */
+- ret = pm_runtime_set_active(dev);
++ ret = devm_pm_runtime_set_active_enabled(dev);
+ if (ret)
+ return ret;
+- pm_runtime_get_noresume(dev);
+- pm_runtime_enable(dev);
++
+ pm_runtime_set_autosuspend_delay(dev, INV_ICM42600_SUSPEND_DELAY_MS);
+ pm_runtime_use_autosuspend(dev);
+- pm_runtime_put(dev);
+
+- return devm_add_action_or_reset(dev, inv_icm42600_disable_pm, dev);
++ return ret;
+ }
+ EXPORT_SYMBOL_NS_GPL(inv_icm42600_core_probe, IIO_ICM42600);
+
--- /dev/null
+From stable+bounces-188225-greg=kroah.com@vger.kernel.org Mon Oct 20 20:10:38 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Oct 2025 14:10:26 -0400
+Subject: ixgbevf: Add support for Intel(R) E610 device
+To: stable@vger.kernel.org
+Cc: Piotr Kwapulinski <piotr.kwapulinski@intel.com>, Przemek Kitszel <przemyslaw.kitszel@intel.com>, Simon Horman <horms@kernel.org>, Rafal Romanowski <rafal.romanowski@intel.com>, Tony Nguyen <anthony.l.nguyen@intel.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251020181028.1864198-2-sashal@kernel.org>
+
+From: Piotr Kwapulinski <piotr.kwapulinski@intel.com>
+
+[ Upstream commit 4c44b450c69b676955c2790dcf467c1f969d80f1 ]
+
+Add support for Intel(R) E610 Series of network devices. The E610
+is based on X550 but adds firmware managed link, enhanced security
+capabilities and support for updated server manageability
+
+Reviewed-by: Przemek Kitszel <przemyslaw.kitszel@intel.com>
+Signed-off-by: Piotr Kwapulinski <piotr.kwapulinski@intel.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Tested-by: Rafal Romanowski <rafal.romanowski@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Stable-dep-of: a7075f501bd3 ("ixgbevf: fix mailbox API compatibility by negotiating supported features")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/intel/ixgbevf/defines.h | 5 ++++-
+ drivers/net/ethernet/intel/ixgbevf/ixgbevf.h | 6 +++++-
+ drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 12 ++++++++++--
+ drivers/net/ethernet/intel/ixgbevf/vf.c | 12 +++++++++++-
+ drivers/net/ethernet/intel/ixgbevf/vf.h | 4 +++-
+ 5 files changed, 33 insertions(+), 6 deletions(-)
+
+--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
++++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
+@@ -1,5 +1,5 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
+-/* Copyright(c) 1999 - 2018 Intel Corporation. */
++/* Copyright(c) 1999 - 2024 Intel Corporation. */
+
+ #ifndef _IXGBEVF_DEFINES_H_
+ #define _IXGBEVF_DEFINES_H_
+@@ -16,6 +16,9 @@
+ #define IXGBE_DEV_ID_X550_VF_HV 0x1564
+ #define IXGBE_DEV_ID_X550EM_X_VF_HV 0x15A9
+
++#define IXGBE_DEV_ID_E610_VF 0x57AD
++#define IXGBE_SUBDEV_ID_E610_VF_HV 0x00FF
++
+ #define IXGBE_VF_IRQ_CLEAR_MASK 7
+ #define IXGBE_VF_MAX_TX_QUEUES 8
+ #define IXGBE_VF_MAX_RX_QUEUES 8
+--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
++++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+@@ -1,5 +1,5 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
+-/* Copyright(c) 1999 - 2018 Intel Corporation. */
++/* Copyright(c) 1999 - 2024 Intel Corporation. */
+
+ #ifndef _IXGBEVF_H_
+ #define _IXGBEVF_H_
+@@ -418,6 +418,8 @@ enum ixgbevf_boards {
+ board_X550EM_x_vf,
+ board_X550EM_x_vf_hv,
+ board_x550em_a_vf,
++ board_e610_vf,
++ board_e610_vf_hv,
+ };
+
+ enum ixgbevf_xcast_modes {
+@@ -434,11 +436,13 @@ extern const struct ixgbevf_info ixgbevf
+ extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops;
+ extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops_legacy;
+ extern const struct ixgbevf_info ixgbevf_x550em_a_vf_info;
++extern const struct ixgbevf_info ixgbevf_e610_vf_info;
+
+ extern const struct ixgbevf_info ixgbevf_82599_vf_hv_info;
+ extern const struct ixgbevf_info ixgbevf_X540_vf_hv_info;
+ extern const struct ixgbevf_info ixgbevf_X550_vf_hv_info;
+ extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info;
++extern const struct ixgbevf_info ixgbevf_e610_vf_hv_info;
+ extern const struct ixgbe_mbx_operations ixgbevf_hv_mbx_ops;
+
+ /* needed by ethtool.c */
+--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
++++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+@@ -1,5 +1,5 @@
+ // SPDX-License-Identifier: GPL-2.0
+-/* Copyright(c) 1999 - 2018 Intel Corporation. */
++/* Copyright(c) 1999 - 2024 Intel Corporation. */
+
+ /******************************************************************************
+ Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
+@@ -39,7 +39,7 @@ static const char ixgbevf_driver_string[
+ "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
+
+ static char ixgbevf_copyright[] =
+- "Copyright (c) 2009 - 2018 Intel Corporation.";
++ "Copyright (c) 2009 - 2024 Intel Corporation.";
+
+ static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
+ [board_82599_vf] = &ixgbevf_82599_vf_info,
+@@ -51,6 +51,8 @@ static const struct ixgbevf_info *ixgbev
+ [board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info,
+ [board_X550EM_x_vf_hv] = &ixgbevf_X550EM_x_vf_hv_info,
+ [board_x550em_a_vf] = &ixgbevf_x550em_a_vf_info,
++ [board_e610_vf] = &ixgbevf_e610_vf_info,
++ [board_e610_vf_hv] = &ixgbevf_e610_vf_hv_info,
+ };
+
+ /* ixgbevf_pci_tbl - PCI Device ID Table
+@@ -71,6 +73,9 @@ static const struct pci_device_id ixgbev
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF_HV), board_X550EM_x_vf_hv},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_VF), board_x550em_a_vf },
++ {PCI_VDEVICE_SUB(INTEL, IXGBE_DEV_ID_E610_VF, PCI_ANY_ID,
++ IXGBE_SUBDEV_ID_E610_VF_HV), board_e610_vf_hv},
++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_E610_VF), board_e610_vf},
+ /* required last entry */
+ {0, }
+ };
+@@ -4694,6 +4699,9 @@ static int ixgbevf_probe(struct pci_dev
+ case ixgbe_mac_X540_vf:
+ dev_info(&pdev->dev, "Intel(R) X540 Virtual Function\n");
+ break;
++ case ixgbe_mac_e610_vf:
++ dev_info(&pdev->dev, "Intel(R) E610 Virtual Function\n");
++ break;
+ case ixgbe_mac_82599_vf:
+ default:
+ dev_info(&pdev->dev, "Intel(R) 82599 Virtual Function\n");
+--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
++++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
+@@ -1,5 +1,5 @@
+ // SPDX-License-Identifier: GPL-2.0
+-/* Copyright(c) 1999 - 2018 Intel Corporation. */
++/* Copyright(c) 1999 - 2024 Intel Corporation. */
+
+ #include "vf.h"
+ #include "ixgbevf.h"
+@@ -1076,3 +1076,13 @@ const struct ixgbevf_info ixgbevf_x550em
+ .mac = ixgbe_mac_x550em_a_vf,
+ .mac_ops = &ixgbevf_mac_ops,
+ };
++
++const struct ixgbevf_info ixgbevf_e610_vf_info = {
++ .mac = ixgbe_mac_e610_vf,
++ .mac_ops = &ixgbevf_mac_ops,
++};
++
++const struct ixgbevf_info ixgbevf_e610_vf_hv_info = {
++ .mac = ixgbe_mac_e610_vf,
++ .mac_ops = &ixgbevf_hv_mac_ops,
++};
+--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
++++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
+@@ -1,5 +1,5 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
+-/* Copyright(c) 1999 - 2018 Intel Corporation. */
++/* Copyright(c) 1999 - 2024 Intel Corporation. */
+
+ #ifndef __IXGBE_VF_H__
+ #define __IXGBE_VF_H__
+@@ -54,6 +54,8 @@ enum ixgbe_mac_type {
+ ixgbe_mac_X550_vf,
+ ixgbe_mac_X550EM_x_vf,
+ ixgbe_mac_x550em_a_vf,
++ ixgbe_mac_e610,
++ ixgbe_mac_e610_vf,
+ ixgbe_num_macs
+ };
+
--- /dev/null
+From stable+bounces-188226-greg=kroah.com@vger.kernel.org Mon Oct 20 20:10:41 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Oct 2025 14:10:27 -0400
+Subject: ixgbevf: fix getting link speed data for E610 devices
+To: stable@vger.kernel.org
+Cc: Jedrzej Jagielski <jedrzej.jagielski@intel.com>, Andrzej Wilczynski <andrzejx.wilczynski@intel.com>, Przemek Kitszel <przemyslaw.kitszel@intel.com>, Aleksandr Loktionov <aleksandr.loktionov@intel.com>, Rafal Romanowski <rafal.romanowski@intel.com>, Jacob Keller <jacob.e.keller@intel.com>, Jakub Kicinski <kuba@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251020181028.1864198-3-sashal@kernel.org>
+
+From: Jedrzej Jagielski <jedrzej.jagielski@intel.com>
+
+[ Upstream commit 53f0eb62b4d23d40686f2dd51776b8220f2887bb ]
+
+E610 adapters no longer use the VFLINKS register to read PF's link
+speed and linkup state. As a result VF driver cannot get actual link
+state and it incorrectly reports 10G which is the default option.
+It leads to a situation where even 1G adapters print 10G as actual
+link speed. The same happens when PF driver set speed different than 10G.
+
+Add new mailbox operation to let the VF driver request a PF driver
+to provide actual link data. Update the mailbox api to v1.6.
+
+Incorporate both ways of getting link status within the legacy
+ixgbe_check_mac_link_vf() function.
+
+Fixes: 4c44b450c69b ("ixgbevf: Add support for Intel(R) E610 device")
+Co-developed-by: Andrzej Wilczynski <andrzejx.wilczynski@intel.com>
+Signed-off-by: Andrzej Wilczynski <andrzejx.wilczynski@intel.com>
+Reviewed-by: Przemek Kitszel <przemyslaw.kitszel@intel.com>
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Jedrzej Jagielski <jedrzej.jagielski@intel.com>
+Tested-by: Rafal Romanowski <rafal.romanowski@intel.com>
+Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
+Link: https://patch.msgid.link/20251009-jk-iwl-net-2025-10-01-v3-2-ef32a425b92a@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Stable-dep-of: a7075f501bd3 ("ixgbevf: fix mailbox API compatibility by negotiating supported features")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/intel/ixgbevf/defines.h | 1
+ drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 6
+ drivers/net/ethernet/intel/ixgbevf/mbx.h | 4
+ drivers/net/ethernet/intel/ixgbevf/vf.c | 137 +++++++++++++++++-----
+ 4 files changed, 116 insertions(+), 32 deletions(-)
+
+--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
++++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
+@@ -28,6 +28,7 @@
+
+ /* Link speed */
+ typedef u32 ixgbe_link_speed;
++#define IXGBE_LINK_SPEED_UNKNOWN 0
+ #define IXGBE_LINK_SPEED_1GB_FULL 0x0020
+ #define IXGBE_LINK_SPEED_10GB_FULL 0x0080
+ #define IXGBE_LINK_SPEED_100_FULL 0x0008
+--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
++++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+@@ -2279,6 +2279,7 @@ static void ixgbevf_negotiate_api(struct
+ {
+ struct ixgbe_hw *hw = &adapter->hw;
+ static const int api[] = {
++ ixgbe_mbox_api_16,
+ ixgbe_mbox_api_15,
+ ixgbe_mbox_api_14,
+ ixgbe_mbox_api_13,
+@@ -2298,7 +2299,8 @@ static void ixgbevf_negotiate_api(struct
+ idx++;
+ }
+
+- if (hw->api_version >= ixgbe_mbox_api_15) {
++ /* Following is not supported by API 1.6, it is specific for 1.5 */
++ if (hw->api_version == ixgbe_mbox_api_15) {
+ hw->mbx.ops.init_params(hw);
+ memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
+ sizeof(struct ixgbe_mbx_operations));
+@@ -2655,6 +2657,7 @@ static void ixgbevf_set_num_queues(struc
+ case ixgbe_mbox_api_13:
+ case ixgbe_mbox_api_14:
+ case ixgbe_mbox_api_15:
++ case ixgbe_mbox_api_16:
+ if (adapter->xdp_prog &&
+ hw->mac.max_tx_queues == rss)
+ rss = rss > 3 ? 2 : 1;
+@@ -4649,6 +4652,7 @@ static int ixgbevf_probe(struct pci_dev
+ case ixgbe_mbox_api_13:
+ case ixgbe_mbox_api_14:
+ case ixgbe_mbox_api_15:
++ case ixgbe_mbox_api_16:
+ netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE -
+ (ETH_HLEN + ETH_FCS_LEN);
+ break;
+--- a/drivers/net/ethernet/intel/ixgbevf/mbx.h
++++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h
+@@ -66,6 +66,7 @@ enum ixgbe_pfvf_api_rev {
+ ixgbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */
+ ixgbe_mbox_api_14, /* API version 1.4, linux/freebsd VF driver */
+ ixgbe_mbox_api_15, /* API version 1.5, linux/freebsd VF driver */
++ ixgbe_mbox_api_16, /* API version 1.6, linux/freebsd VF driver */
+ /* This value should always be last */
+ ixgbe_mbox_api_unknown, /* indicates that API version is not known */
+ };
+@@ -102,6 +103,9 @@ enum ixgbe_pfvf_api_rev {
+
+ #define IXGBE_VF_GET_LINK_STATE 0x10 /* get vf link state */
+
++/* mailbox API, version 1.6 VF requests */
++#define IXGBE_VF_GET_PF_LINK_STATE 0x11 /* request PF to send link info */
++
+ /* length of permanent address message returned from PF */
+ #define IXGBE_VF_PERMADDR_MSG_LEN 4
+ /* word in permanent address message with the current multicast type */
+--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
++++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
+@@ -313,6 +313,7 @@ int ixgbevf_get_reta_locked(struct ixgbe
+ * is not supported for this device type.
+ */
+ switch (hw->api_version) {
++ case ixgbe_mbox_api_16:
+ case ixgbe_mbox_api_15:
+ case ixgbe_mbox_api_14:
+ case ixgbe_mbox_api_13:
+@@ -382,6 +383,7 @@ int ixgbevf_get_rss_key_locked(struct ix
+ * or if the operation is not supported for this device type.
+ */
+ switch (hw->api_version) {
++ case ixgbe_mbox_api_16:
+ case ixgbe_mbox_api_15:
+ case ixgbe_mbox_api_14:
+ case ixgbe_mbox_api_13:
+@@ -552,6 +554,7 @@ static s32 ixgbevf_update_xcast_mode(str
+ case ixgbe_mbox_api_13:
+ case ixgbe_mbox_api_14:
+ case ixgbe_mbox_api_15:
++ case ixgbe_mbox_api_16:
+ break;
+ default:
+ return -EOPNOTSUPP;
+@@ -625,6 +628,48 @@ static s32 ixgbevf_hv_get_link_state_vf(
+ }
+
+ /**
++ * ixgbevf_get_pf_link_state - Get PF's link status
++ * @hw: pointer to the HW structure
++ * @speed: link speed
++ * @link_up: indicate if link is up/down
++ *
++ * Ask PF to provide link_up state and speed of the link.
++ *
++ * Return: IXGBE_ERR_MBX in the case of mailbox error,
++ * -EOPNOTSUPP if the op is not supported or 0 on success.
++ */
++static int ixgbevf_get_pf_link_state(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
++ bool *link_up)
++{
++ u32 msgbuf[3] = {};
++ int err;
++
++ switch (hw->api_version) {
++ case ixgbe_mbox_api_16:
++ break;
++ default:
++ return -EOPNOTSUPP;
++ }
++
++ msgbuf[0] = IXGBE_VF_GET_PF_LINK_STATE;
++
++ err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
++ ARRAY_SIZE(msgbuf));
++ if (err || (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE)) {
++ err = IXGBE_ERR_MBX;
++ *speed = IXGBE_LINK_SPEED_UNKNOWN;
++ /* No need to set @link_up to false as it will be done by
++ * ixgbe_check_mac_link_vf().
++ */
++ } else {
++ *speed = msgbuf[1];
++ *link_up = msgbuf[2];
++ }
++
++ return err;
++}
++
++/**
+ * ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address
+ * @hw: pointer to the HW structure
+ * @vlan: 12 bit VLAN ID
+@@ -659,6 +704,58 @@ mbx_err:
+ }
+
+ /**
++ * ixgbe_read_vflinks - Read VFLINKS register
++ * @hw: pointer to the HW structure
++ * @speed: link speed
++ * @link_up: indicate if link is up/down
++ *
++ * Get linkup status and link speed from the VFLINKS register.
++ */
++static void ixgbe_read_vflinks(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
++ bool *link_up)
++{
++ u32 vflinks = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
++
++ /* if link status is down no point in checking to see if PF is up */
++ if (!(vflinks & IXGBE_LINKS_UP)) {
++ *link_up = false;
++ return;
++ }
++
++ /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
++ * before the link status is correct
++ */
++ if (hw->mac.type == ixgbe_mac_82599_vf) {
++ for (int i = 0; i < 5; i++) {
++ udelay(100);
++ vflinks = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
++
++ if (!(vflinks & IXGBE_LINKS_UP)) {
++ *link_up = false;
++ return;
++ }
++ }
++ }
++
++ /* We reached this point so there's link */
++ *link_up = true;
++
++ switch (vflinks & IXGBE_LINKS_SPEED_82599) {
++ case IXGBE_LINKS_SPEED_10G_82599:
++ *speed = IXGBE_LINK_SPEED_10GB_FULL;
++ break;
++ case IXGBE_LINKS_SPEED_1G_82599:
++ *speed = IXGBE_LINK_SPEED_1GB_FULL;
++ break;
++ case IXGBE_LINKS_SPEED_100_82599:
++ *speed = IXGBE_LINK_SPEED_100_FULL;
++ break;
++ default:
++ *speed = IXGBE_LINK_SPEED_UNKNOWN;
++ }
++}
++
++/**
+ * ixgbevf_hv_set_vfta_vf - * Hyper-V variant - just a stub.
+ * @hw: unused
+ * @vlan: unused
+@@ -705,7 +802,6 @@ static s32 ixgbevf_check_mac_link_vf(str
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ struct ixgbe_mac_info *mac = &hw->mac;
+ s32 ret_val = 0;
+- u32 links_reg;
+ u32 in_msg = 0;
+
+ /* If we were hit with a reset drop the link */
+@@ -715,36 +811,14 @@ static s32 ixgbevf_check_mac_link_vf(str
+ if (!mac->get_link_status)
+ goto out;
+
+- /* if link status is down no point in checking to see if pf is up */
+- links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+- if (!(links_reg & IXGBE_LINKS_UP))
+- goto out;
+-
+- /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
+- * before the link status is correct
+- */
+- if (mac->type == ixgbe_mac_82599_vf) {
+- int i;
+-
+- for (i = 0; i < 5; i++) {
+- udelay(100);
+- links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+-
+- if (!(links_reg & IXGBE_LINKS_UP))
+- goto out;
+- }
+- }
+-
+- switch (links_reg & IXGBE_LINKS_SPEED_82599) {
+- case IXGBE_LINKS_SPEED_10G_82599:
+- *speed = IXGBE_LINK_SPEED_10GB_FULL;
+- break;
+- case IXGBE_LINKS_SPEED_1G_82599:
+- *speed = IXGBE_LINK_SPEED_1GB_FULL;
+- break;
+- case IXGBE_LINKS_SPEED_100_82599:
+- *speed = IXGBE_LINK_SPEED_100_FULL;
+- break;
++ if (hw->mac.type == ixgbe_mac_e610_vf) {
++ ret_val = ixgbevf_get_pf_link_state(hw, speed, link_up);
++ if (ret_val)
++ goto out;
++ } else {
++ ixgbe_read_vflinks(hw, speed, link_up);
++ if (*link_up == false)
++ goto out;
+ }
+
+ /* if the read failed it could just be a mailbox collision, best wait
+@@ -951,6 +1025,7 @@ int ixgbevf_get_queues(struct ixgbe_hw *
+ case ixgbe_mbox_api_13:
+ case ixgbe_mbox_api_14:
+ case ixgbe_mbox_api_15:
++ case ixgbe_mbox_api_16:
+ break;
+ default:
+ return 0;
--- /dev/null
+From stable+bounces-188227-greg=kroah.com@vger.kernel.org Mon Oct 20 20:10:48 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Oct 2025 14:10:28 -0400
+Subject: ixgbevf: fix mailbox API compatibility by negotiating supported features
+To: stable@vger.kernel.org
+Cc: Jedrzej Jagielski <jedrzej.jagielski@intel.com>, Jacob Keller <jacob.e.keller@intel.com>, Przemek Kitszel <przemyslaw.kitszel@intel.com>, Aleksandr Loktionov <aleksandr.loktionov@intel.com>, Rafal Romanowski <rafal.romanowski@intel.com>, Jakub Kicinski <kuba@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251020181028.1864198-4-sashal@kernel.org>
+
+From: Jedrzej Jagielski <jedrzej.jagielski@intel.com>
+
+[ Upstream commit a7075f501bd33c93570af759b6f4302ef0175168 ]
+
+There was backward compatibility in the terms of mailbox API. Various
+drivers from various OSes supporting 10G adapters from Intel portfolio
+could easily negotiate mailbox API.
+
+This convention has been broken since introducing API 1.4.
+Commit 0062e7cc955e ("ixgbevf: add VF IPsec offload code") added support
+for IPSec which is specific only for the kernel ixgbe driver. None of the
+rest of the Intel 10G PF/VF drivers supports it. And actually lack of
+support was not included in the IPSec implementation - there were no such
+code paths. No possibility to negotiate support for the feature was
+introduced along with introduction of the feature itself.
+
+Commit 339f28964147 ("ixgbevf: Add support for new mailbox communication
+between PF and VF") increasing API version to 1.5 did the same - it
+introduced code supported specifically by the PF ESX driver. It altered API
+version for the VF driver in the same time not touching the version
+defined for the PF ixgbe driver. It led to additional discrepancies,
+as the code provided within API 1.6 cannot be supported for Linux ixgbe
+driver as it causes crashes.
+
+The issue was noticed some time ago and mitigated by Jake within the commit
+d0725312adf5 ("ixgbevf: stop attempting IPSEC offload on Mailbox API 1.5").
+As a result we have regression for IPsec support and after increasing API
+to version 1.6 ixgbevf driver stopped to support ESX MBX.
+
+To fix this mess add new mailbox op asking PF driver about supported
+features. Basing on a response determine whether to set support for IPSec
+and ESX-specific enhanced mailbox.
+
+New mailbox op, for compatibility purposes, must be added within new API
+revision, as API version of OOT PF & VF drivers is already increased to
+1.6 and doesn't incorporate features negotiate op.
+
+Features negotiation mechanism gives possibility to be extended with new
+features when needed in the future.
+
+Reported-by: Jacob Keller <jacob.e.keller@intel.com>
+Closes: https://lore.kernel.org/intel-wired-lan/20241101-jk-ixgbevf-mailbox-v1-5-fixes-v1-0-f556dc9a66ed@intel.com/
+Fixes: 0062e7cc955e ("ixgbevf: add VF IPsec offload code")
+Fixes: 339f28964147 ("ixgbevf: Add support for new mailbox communication between PF and VF")
+Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
+Reviewed-by: Przemek Kitszel <przemyslaw.kitszel@intel.com>
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Jedrzej Jagielski <jedrzej.jagielski@intel.com>
+Tested-by: Rafal Romanowski <rafal.romanowski@intel.com>
+Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
+Link: https://patch.msgid.link/20251009-jk-iwl-net-2025-10-01-v3-4-ef32a425b92a@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/intel/ixgbevf/ipsec.c | 10 ++++
+ drivers/net/ethernet/intel/ixgbevf/ixgbevf.h | 7 +++
+ drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 32 ++++++++++++++-
+ drivers/net/ethernet/intel/ixgbevf/mbx.h | 4 +
+ drivers/net/ethernet/intel/ixgbevf/vf.c | 45 +++++++++++++++++++++-
+ drivers/net/ethernet/intel/ixgbevf/vf.h | 1
+ 6 files changed, 96 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/ethernet/intel/ixgbevf/ipsec.c
++++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
+@@ -271,6 +271,9 @@ static int ixgbevf_ipsec_add_sa(struct x
+ adapter = netdev_priv(dev);
+ ipsec = adapter->ipsec;
+
++ if (!(adapter->pf_features & IXGBEVF_PF_SUP_IPSEC))
++ return -EOPNOTSUPP;
++
+ if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported protocol for IPsec offload");
+ return -EINVAL;
+@@ -400,6 +403,9 @@ static void ixgbevf_ipsec_del_sa(struct
+ adapter = netdev_priv(dev);
+ ipsec = adapter->ipsec;
+
++ if (!(adapter->pf_features & IXGBEVF_PF_SUP_IPSEC))
++ return;
++
+ if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) {
+ sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_RX_INDEX;
+
+@@ -628,6 +634,10 @@ void ixgbevf_init_ipsec_offload(struct i
+ size_t size;
+
+ switch (adapter->hw.api_version) {
++ case ixgbe_mbox_api_17:
++ if (!(adapter->pf_features & IXGBEVF_PF_SUP_IPSEC))
++ return;
++ break;
+ case ixgbe_mbox_api_14:
+ break;
+ default:
+--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
++++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+@@ -366,6 +366,13 @@ struct ixgbevf_adapter {
+ /* Interrupt Throttle Rate */
+ u32 eitr_param;
+
++ u32 pf_features;
++#define IXGBEVF_PF_SUP_IPSEC BIT(0)
++#define IXGBEVF_PF_SUP_ESX_MBX BIT(1)
++
++#define IXGBEVF_SUPPORTED_FEATURES (IXGBEVF_PF_SUP_IPSEC | \
++ IXGBEVF_PF_SUP_ESX_MBX)
++
+ struct ixgbevf_hw_stats stats;
+
+ unsigned long state;
+--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
++++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+@@ -2275,10 +2275,35 @@ static void ixgbevf_init_last_counter_st
+ adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
+ }
+
++/**
++ * ixgbevf_set_features - Set features supported by PF
++ * @adapter: pointer to the adapter struct
++ *
++ * Negotiate with PF supported features and then set pf_features accordingly.
++ */
++static void ixgbevf_set_features(struct ixgbevf_adapter *adapter)
++{
++ u32 *pf_features = &adapter->pf_features;
++ struct ixgbe_hw *hw = &adapter->hw;
++ int err;
++
++ err = hw->mac.ops.negotiate_features(hw, pf_features);
++ if (err && err != -EOPNOTSUPP)
++ netdev_dbg(adapter->netdev,
++ "PF feature negotiation failed.\n");
++
++ /* Address also pre API 1.7 cases */
++ if (hw->api_version == ixgbe_mbox_api_14)
++ *pf_features |= IXGBEVF_PF_SUP_IPSEC;
++ else if (hw->api_version == ixgbe_mbox_api_15)
++ *pf_features |= IXGBEVF_PF_SUP_ESX_MBX;
++}
++
+ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
+ {
+ struct ixgbe_hw *hw = &adapter->hw;
+ static const int api[] = {
++ ixgbe_mbox_api_17,
+ ixgbe_mbox_api_16,
+ ixgbe_mbox_api_15,
+ ixgbe_mbox_api_14,
+@@ -2299,8 +2324,9 @@ static void ixgbevf_negotiate_api(struct
+ idx++;
+ }
+
+- /* Following is not supported by API 1.6, it is specific for 1.5 */
+- if (hw->api_version == ixgbe_mbox_api_15) {
++ ixgbevf_set_features(adapter);
++
++ if (adapter->pf_features & IXGBEVF_PF_SUP_ESX_MBX) {
+ hw->mbx.ops.init_params(hw);
+ memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
+ sizeof(struct ixgbe_mbx_operations));
+@@ -2658,6 +2684,7 @@ static void ixgbevf_set_num_queues(struc
+ case ixgbe_mbox_api_14:
+ case ixgbe_mbox_api_15:
+ case ixgbe_mbox_api_16:
++ case ixgbe_mbox_api_17:
+ if (adapter->xdp_prog &&
+ hw->mac.max_tx_queues == rss)
+ rss = rss > 3 ? 2 : 1;
+@@ -4653,6 +4680,7 @@ static int ixgbevf_probe(struct pci_dev
+ case ixgbe_mbox_api_14:
+ case ixgbe_mbox_api_15:
+ case ixgbe_mbox_api_16:
++ case ixgbe_mbox_api_17:
+ netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE -
+ (ETH_HLEN + ETH_FCS_LEN);
+ break;
+--- a/drivers/net/ethernet/intel/ixgbevf/mbx.h
++++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h
+@@ -67,6 +67,7 @@ enum ixgbe_pfvf_api_rev {
+ ixgbe_mbox_api_14, /* API version 1.4, linux/freebsd VF driver */
+ ixgbe_mbox_api_15, /* API version 1.5, linux/freebsd VF driver */
+ ixgbe_mbox_api_16, /* API version 1.6, linux/freebsd VF driver */
++ ixgbe_mbox_api_17, /* API version 1.7, linux/freebsd VF driver */
+ /* This value should always be last */
+ ixgbe_mbox_api_unknown, /* indicates that API version is not known */
+ };
+@@ -106,6 +107,9 @@ enum ixgbe_pfvf_api_rev {
+ /* mailbox API, version 1.6 VF requests */
+ #define IXGBE_VF_GET_PF_LINK_STATE 0x11 /* request PF to send link info */
+
++/* mailbox API, version 1.7 VF requests */
++#define IXGBE_VF_FEATURES_NEGOTIATE 0x12 /* get features supported by PF*/
++
+ /* length of permanent address message returned from PF */
+ #define IXGBE_VF_PERMADDR_MSG_LEN 4
+ /* word in permanent address message with the current multicast type */
+--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
++++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
+@@ -313,6 +313,7 @@ int ixgbevf_get_reta_locked(struct ixgbe
+ * is not supported for this device type.
+ */
+ switch (hw->api_version) {
++ case ixgbe_mbox_api_17:
+ case ixgbe_mbox_api_16:
+ case ixgbe_mbox_api_15:
+ case ixgbe_mbox_api_14:
+@@ -383,6 +384,7 @@ int ixgbevf_get_rss_key_locked(struct ix
+ * or if the operation is not supported for this device type.
+ */
+ switch (hw->api_version) {
++ case ixgbe_mbox_api_17:
+ case ixgbe_mbox_api_16:
+ case ixgbe_mbox_api_15:
+ case ixgbe_mbox_api_14:
+@@ -555,6 +557,7 @@ static s32 ixgbevf_update_xcast_mode(str
+ case ixgbe_mbox_api_14:
+ case ixgbe_mbox_api_15:
+ case ixgbe_mbox_api_16:
++ case ixgbe_mbox_api_17:
+ break;
+ default:
+ return -EOPNOTSUPP;
+@@ -646,6 +649,7 @@ static int ixgbevf_get_pf_link_state(str
+
+ switch (hw->api_version) {
+ case ixgbe_mbox_api_16:
++ case ixgbe_mbox_api_17:
+ break;
+ default:
+ return -EOPNOTSUPP;
+@@ -670,6 +674,42 @@ static int ixgbevf_get_pf_link_state(str
+ }
+
+ /**
++ * ixgbevf_negotiate_features_vf - negotiate supported features with PF driver
++ * @hw: pointer to the HW structure
++ * @pf_features: bitmask of features supported by PF
++ *
++ * Return: IXGBE_ERR_MBX in the case of mailbox error,
++ * -EOPNOTSUPP if the op is not supported or 0 on success.
++ */
++static int ixgbevf_negotiate_features_vf(struct ixgbe_hw *hw, u32 *pf_features)
++{
++ u32 msgbuf[2] = {};
++ int err;
++
++ switch (hw->api_version) {
++ case ixgbe_mbox_api_17:
++ break;
++ default:
++ return -EOPNOTSUPP;
++ }
++
++ msgbuf[0] = IXGBE_VF_FEATURES_NEGOTIATE;
++ msgbuf[1] = IXGBEVF_SUPPORTED_FEATURES;
++
++ err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
++ ARRAY_SIZE(msgbuf));
++
++ if (err || (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE)) {
++ err = IXGBE_ERR_MBX;
++ *pf_features = 0x0;
++ } else {
++ *pf_features = msgbuf[1];
++ }
++
++ return err;
++}
++
++/**
+ * ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address
+ * @hw: pointer to the HW structure
+ * @vlan: 12 bit VLAN ID
+@@ -799,6 +839,7 @@ static s32 ixgbevf_check_mac_link_vf(str
+ bool *link_up,
+ bool autoneg_wait_to_complete)
+ {
++ struct ixgbevf_adapter *adapter = hw->back;
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ struct ixgbe_mac_info *mac = &hw->mac;
+ s32 ret_val = 0;
+@@ -825,7 +866,7 @@ static s32 ixgbevf_check_mac_link_vf(str
+ * until we are called again and don't report an error
+ */
+ if (mbx->ops.read(hw, &in_msg, 1)) {
+- if (hw->api_version >= ixgbe_mbox_api_15)
++ if (adapter->pf_features & IXGBEVF_PF_SUP_ESX_MBX)
+ mac->get_link_status = false;
+ goto out;
+ }
+@@ -1026,6 +1067,7 @@ int ixgbevf_get_queues(struct ixgbe_hw *
+ case ixgbe_mbox_api_14:
+ case ixgbe_mbox_api_15:
+ case ixgbe_mbox_api_16:
++ case ixgbe_mbox_api_17:
+ break;
+ default:
+ return 0;
+@@ -1080,6 +1122,7 @@ static const struct ixgbe_mac_operations
+ .setup_link = ixgbevf_setup_mac_link_vf,
+ .check_link = ixgbevf_check_mac_link_vf,
+ .negotiate_api_version = ixgbevf_negotiate_api_version_vf,
++ .negotiate_features = ixgbevf_negotiate_features_vf,
+ .set_rar = ixgbevf_set_rar_vf,
+ .update_mc_addr_list = ixgbevf_update_mc_addr_list_vf,
+ .update_xcast_mode = ixgbevf_update_xcast_mode,
+--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
++++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
+@@ -26,6 +26,7 @@ struct ixgbe_mac_operations {
+ s32 (*stop_adapter)(struct ixgbe_hw *);
+ s32 (*get_bus_info)(struct ixgbe_hw *);
+ s32 (*negotiate_api_version)(struct ixgbe_hw *hw, int api);
++ int (*negotiate_features)(struct ixgbe_hw *hw, u32 *pf_features);
+
+ /* Link */
+ s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool);
--- /dev/null
+From stable+bounces-188263-greg=kroah.com@vger.kernel.org Mon Oct 20 23:50:20 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Oct 2025 17:50:14 -0400
+Subject: NFSD: Define a proc_layoutcommit for the FlexFiles layout type
+To: stable@vger.kernel.org
+Cc: Chuck Lever <chuck.lever@oracle.com>, Robert Morris <rtm@csail.mit.edu>, Thomas Haynes <loghyr@hammerspace.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251020215014.1927401-1-sashal@kernel.org>
+
+From: Chuck Lever <chuck.lever@oracle.com>
+
+[ Upstream commit 4b47a8601b71ad98833b447d465592d847b4dc77 ]
+
+Avoid a crash if a pNFS client should happen to send a LAYOUTCOMMIT
+operation on a FlexFiles layout.
+
+Reported-by: Robert Morris <rtm@csail.mit.edu>
+Closes: https://lore.kernel.org/linux-nfs/152f99b2-ba35-4dec-93a9-4690e625dccd@oracle.com/T/#t
+Cc: Thomas Haynes <loghyr@hammerspace.com>
+Cc: stable@vger.kernel.org
+Fixes: 9b9960a0ca47 ("nfsd: Add a super simple flex file server")
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+[ removed struct svc_rqst parameter from nfsd4_ff_proc_layoutcommit ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nfsd/flexfilelayout.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/fs/nfsd/flexfilelayout.c
++++ b/fs/nfsd/flexfilelayout.c
+@@ -125,6 +125,13 @@ nfsd4_ff_proc_getdeviceinfo(struct super
+ return 0;
+ }
+
++static __be32
++nfsd4_ff_proc_layoutcommit(struct inode *inode,
++ struct nfsd4_layoutcommit *lcp)
++{
++ return nfs_ok;
++}
++
+ const struct nfsd4_layout_ops ff_layout_ops = {
+ .notify_types =
+ NOTIFY_DEVICEID4_DELETE | NOTIFY_DEVICEID4_CHANGE,
+@@ -133,4 +140,5 @@ const struct nfsd4_layout_ops ff_layout_
+ .encode_getdeviceinfo = nfsd4_ff_encode_getdeviceinfo,
+ .proc_layoutget = nfsd4_ff_proc_layoutget,
+ .encode_layoutget = nfsd4_ff_encode_layoutget,
++ .proc_layoutcommit = nfsd4_ff_proc_layoutcommit,
+ };
--- /dev/null
+From stable+bounces-188074-greg=kroah.com@vger.kernel.org Mon Oct 20 14:54:28 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Oct 2025 08:54:07 -0400
+Subject: NFSD: Fix last write offset handling in layoutcommit
+To: stable@vger.kernel.org
+Cc: Sergey Bashirov <sergeybashirov@gmail.com>, Konstantin Evtushenko <koevtushenko@yandex.com>, Christoph Hellwig <hch@lst.de>, Jeff Layton <jlayton@kernel.org>, Chuck Lever <chuck.lever@oracle.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251020125407.1760605-3-sashal@kernel.org>
+
+From: Sergey Bashirov <sergeybashirov@gmail.com>
+
+[ Upstream commit d68886bae76a4b9b3484d23e5b7df086f940fa38 ]
+
+The data type of loca_last_write_offset is newoffset4 and is switched
+on a boolean value, no_newoffset, that indicates if a previous write
+occurred or not. If no_newoffset is FALSE, an offset is not given.
+This means that client does not try to update the file size. Thus,
+server should not try to calculate new file size and check if it fits
+into the segment range. See RFC 8881, section 12.5.4.2.
+
+Sometimes the current incorrect logic may cause clients to hang when
+trying to sync an inode. If layoutcommit fails, the client marks the
+inode as dirty again.
+
+Fixes: 9cf514ccfacb ("nfsd: implement pNFS operations")
+Cc: stable@vger.kernel.org
+Co-developed-by: Konstantin Evtushenko <koevtushenko@yandex.com>
+Signed-off-by: Konstantin Evtushenko <koevtushenko@yandex.com>
+Signed-off-by: Sergey Bashirov <sergeybashirov@gmail.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+[ removed rqstp parameter from proc_layoutcommit ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nfsd/blocklayout.c | 5 ++---
+ fs/nfsd/nfs4proc.c | 30 +++++++++++++++---------------
+ 2 files changed, 17 insertions(+), 18 deletions(-)
+
+--- a/fs/nfsd/blocklayout.c
++++ b/fs/nfsd/blocklayout.c
+@@ -117,7 +117,6 @@ static __be32
+ nfsd4_block_commit_blocks(struct inode *inode, struct nfsd4_layoutcommit *lcp,
+ struct iomap *iomaps, int nr_iomaps)
+ {
+- loff_t new_size = lcp->lc_last_wr + 1;
+ struct iattr iattr = { .ia_valid = 0 };
+ int error;
+
+@@ -127,9 +126,9 @@ nfsd4_block_commit_blocks(struct inode *
+ iattr.ia_valid |= ATTR_ATIME | ATTR_CTIME | ATTR_MTIME;
+ iattr.ia_atime = iattr.ia_ctime = iattr.ia_mtime = lcp->lc_mtime;
+
+- if (new_size > i_size_read(inode)) {
++ if (lcp->lc_size_chg) {
+ iattr.ia_valid |= ATTR_SIZE;
+- iattr.ia_size = new_size;
++ iattr.ia_size = lcp->lc_newsize;
+ }
+
+ error = inode->i_sb->s_export_op->commit_blocks(inode, iomaps,
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -2308,7 +2308,6 @@ nfsd4_layoutcommit(struct svc_rqst *rqst
+ const struct nfsd4_layout_seg *seg = &lcp->lc_seg;
+ struct svc_fh *current_fh = &cstate->current_fh;
+ const struct nfsd4_layout_ops *ops;
+- loff_t new_size = lcp->lc_last_wr + 1;
+ struct inode *inode;
+ struct nfs4_layout_stateid *ls;
+ __be32 nfserr;
+@@ -2324,13 +2323,21 @@ nfsd4_layoutcommit(struct svc_rqst *rqst
+ goto out;
+ inode = d_inode(current_fh->fh_dentry);
+
+- nfserr = nfserr_inval;
+- if (new_size <= seg->offset)
+- goto out;
+- if (new_size > seg->offset + seg->length)
+- goto out;
+- if (!lcp->lc_newoffset && new_size > i_size_read(inode))
+- goto out;
++ lcp->lc_size_chg = false;
++ if (lcp->lc_newoffset) {
++ loff_t new_size = lcp->lc_last_wr + 1;
++
++ nfserr = nfserr_inval;
++ if (new_size <= seg->offset)
++ goto out;
++ if (new_size > seg->offset + seg->length)
++ goto out;
++
++ if (new_size > i_size_read(inode)) {
++ lcp->lc_size_chg = true;
++ lcp->lc_newsize = new_size;
++ }
++ }
+
+ nfserr = nfsd4_preprocess_layout_stateid(rqstp, cstate, &lcp->lc_sid,
+ false, lcp->lc_layout_type,
+@@ -2346,13 +2353,6 @@ nfsd4_layoutcommit(struct svc_rqst *rqst
+ /* LAYOUTCOMMIT does not require any serialization */
+ mutex_unlock(&ls->ls_mutex);
+
+- if (new_size > i_size_read(inode)) {
+- lcp->lc_size_chg = 1;
+- lcp->lc_newsize = new_size;
+- } else {
+- lcp->lc_size_chg = 0;
+- }
+-
+ nfserr = ops->proc_layoutcommit(inode, lcp);
+ nfs4_put_stid(&ls->ls_stid);
+ out:
--- /dev/null
+From stable+bounces-188073-greg=kroah.com@vger.kernel.org Mon Oct 20 14:54:43 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Oct 2025 08:54:06 -0400
+Subject: NFSD: Minor cleanup in layoutcommit processing
+To: stable@vger.kernel.org
+Cc: Sergey Bashirov <sergeybashirov@gmail.com>, Christoph Hellwig <hch@lst.de>, Chuck Lever <chuck.lever@oracle.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251020125407.1760605-2-sashal@kernel.org>
+
+From: Sergey Bashirov <sergeybashirov@gmail.com>
+
+[ Upstream commit 274365a51d88658fb51cca637ba579034e90a799 ]
+
+Remove dprintk in nfsd4_layoutcommit. These are not needed
+in day to day usage, and the information is also available
+in Wireshark when capturing NFS traffic.
+
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Sergey Bashirov <sergeybashirov@gmail.com>
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Stable-dep-of: d68886bae76a ("NFSD: Fix last write offset handling in layoutcommit")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nfsd/nfs4proc.c | 12 +++---------
+ 1 file changed, 3 insertions(+), 9 deletions(-)
+
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -2325,18 +2325,12 @@ nfsd4_layoutcommit(struct svc_rqst *rqst
+ inode = d_inode(current_fh->fh_dentry);
+
+ nfserr = nfserr_inval;
+- if (new_size <= seg->offset) {
+- dprintk("pnfsd: last write before layout segment\n");
++ if (new_size <= seg->offset)
+ goto out;
+- }
+- if (new_size > seg->offset + seg->length) {
+- dprintk("pnfsd: last write beyond layout segment\n");
++ if (new_size > seg->offset + seg->length)
+ goto out;
+- }
+- if (!lcp->lc_newoffset && new_size > i_size_read(inode)) {
+- dprintk("pnfsd: layoutcommit beyond EOF\n");
++ if (!lcp->lc_newoffset && new_size > i_size_read(inode))
+ goto out;
+- }
+
+ nfserr = nfsd4_preprocess_layout_stateid(rqstp, cstate, &lcp->lc_sid,
+ false, lcp->lc_layout_type,
--- /dev/null
+From stable+bounces-188072-greg=kroah.com@vger.kernel.org Mon Oct 20 14:54:42 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Oct 2025 08:54:05 -0400
+Subject: NFSD: Rework encoding and decoding of nfsd4_deviceid
+To: stable@vger.kernel.org
+Cc: Sergey Bashirov <sergeybashirov@gmail.com>, Chuck Lever <chuck.lever@oracle.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251020125407.1760605-1-sashal@kernel.org>
+
+From: Sergey Bashirov <sergeybashirov@gmail.com>
+
+[ Upstream commit 832738e4b325b742940761e10487403f9aad13e8 ]
+
+Compilers may optimize the layout of C structures, so we should not rely
+on sizeof struct and memcpy to encode and decode XDR structures. The byte
+order of the fields should also be taken into account.
+
+This patch adds the correct functions to handle the deviceid4 structure
+and removes the pad field, which is currently not used by NFSD, from the
+runtime state. The server's byte order is preserved because the deviceid4
+blob on the wire is only used as a cookie by the client.
+
+Signed-off-by: Sergey Bashirov <sergeybashirov@gmail.com>
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Stable-dep-of: d68886bae76a ("NFSD: Fix last write offset handling in layoutcommit")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nfsd/blocklayoutxdr.c | 7 ++-----
+ fs/nfsd/flexfilelayoutxdr.c | 3 +--
+ fs/nfsd/nfs4layouts.c | 1 -
+ fs/nfsd/nfs4xdr.c | 14 +-------------
+ fs/nfsd/xdr4.h | 36 +++++++++++++++++++++++++++++++++++-
+ 5 files changed, 39 insertions(+), 22 deletions(-)
+
+--- a/fs/nfsd/blocklayoutxdr.c
++++ b/fs/nfsd/blocklayoutxdr.c
+@@ -29,8 +29,7 @@ nfsd4_block_encode_layoutget(struct xdr_
+ *p++ = cpu_to_be32(len);
+ *p++ = cpu_to_be32(1); /* we always return a single extent */
+
+- p = xdr_encode_opaque_fixed(p, &b->vol_id,
+- sizeof(struct nfsd4_deviceid));
++ p = svcxdr_encode_deviceid4(p, &b->vol_id);
+ p = xdr_encode_hyper(p, b->foff);
+ p = xdr_encode_hyper(p, b->len);
+ p = xdr_encode_hyper(p, b->soff);
+@@ -145,9 +144,7 @@ nfsd4_block_decode_layoutupdate(__be32 *
+ for (i = 0; i < nr_iomaps; i++) {
+ struct pnfs_block_extent bex;
+
+- memcpy(&bex.vol_id, p, sizeof(struct nfsd4_deviceid));
+- p += XDR_QUADLEN(sizeof(struct nfsd4_deviceid));
+-
++ p = svcxdr_decode_deviceid4(p, &bex.vol_id);
+ p = xdr_decode_hyper(p, &bex.foff);
+ if (bex.foff & (block_size - 1)) {
+ dprintk("%s: unaligned offset 0x%llx\n",
+--- a/fs/nfsd/flexfilelayoutxdr.c
++++ b/fs/nfsd/flexfilelayoutxdr.c
+@@ -54,8 +54,7 @@ nfsd4_ff_encode_layoutget(struct xdr_str
+ *p++ = cpu_to_be32(1); /* single mirror */
+ *p++ = cpu_to_be32(1); /* single data server */
+
+- p = xdr_encode_opaque_fixed(p, &fl->deviceid,
+- sizeof(struct nfsd4_deviceid));
++ p = svcxdr_encode_deviceid4(p, &fl->deviceid);
+
+ *p++ = cpu_to_be32(1); /* efficiency */
+
+--- a/fs/nfsd/nfs4layouts.c
++++ b/fs/nfsd/nfs4layouts.c
+@@ -120,7 +120,6 @@ nfsd4_set_deviceid(struct nfsd4_deviceid
+
+ id->fsid_idx = fhp->fh_export->ex_devid_map->idx;
+ id->generation = device_generation;
+- id->pad = 0;
+ return 0;
+ }
+
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -566,18 +566,6 @@ nfsd4_decode_state_owner4(struct nfsd4_c
+ }
+
+ #ifdef CONFIG_NFSD_PNFS
+-static __be32
+-nfsd4_decode_deviceid4(struct nfsd4_compoundargs *argp,
+- struct nfsd4_deviceid *devid)
+-{
+- __be32 *p;
+-
+- p = xdr_inline_decode(argp->xdr, NFS4_DEVICEID4_SIZE);
+- if (!p)
+- return nfserr_bad_xdr;
+- memcpy(devid, p, sizeof(*devid));
+- return nfs_ok;
+-}
+
+ static __be32
+ nfsd4_decode_layoutupdate4(struct nfsd4_compoundargs *argp,
+@@ -1733,7 +1721,7 @@ nfsd4_decode_getdeviceinfo(struct nfsd4_
+ __be32 status;
+
+ memset(gdev, 0, sizeof(*gdev));
+- status = nfsd4_decode_deviceid4(argp, &gdev->gd_devid);
++ status = nfsd4_decode_deviceid4(argp->xdr, &gdev->gd_devid);
+ if (status)
+ return status;
+ if (xdr_stream_decode_u32(argp->xdr, &gdev->gd_layout_type) < 0)
+--- a/fs/nfsd/xdr4.h
++++ b/fs/nfsd/xdr4.h
+@@ -459,9 +459,43 @@ struct nfsd4_reclaim_complete {
+ struct nfsd4_deviceid {
+ u64 fsid_idx;
+ u32 generation;
+- u32 pad;
+ };
+
++static inline __be32 *
++svcxdr_encode_deviceid4(__be32 *p, const struct nfsd4_deviceid *devid)
++{
++ __be64 *q = (__be64 *)p;
++
++ *q = (__force __be64)devid->fsid_idx;
++ p += 2;
++ *p++ = (__force __be32)devid->generation;
++ *p++ = xdr_zero;
++ return p;
++}
++
++static inline __be32 *
++svcxdr_decode_deviceid4(__be32 *p, struct nfsd4_deviceid *devid)
++{
++ __be64 *q = (__be64 *)p;
++
++ devid->fsid_idx = (__force u64)(*q);
++ p += 2;
++ devid->generation = (__force u32)(*p++);
++ p++; /* NFSD does not use the remaining octets */
++ return p;
++}
++
++static inline __be32
++nfsd4_decode_deviceid4(struct xdr_stream *xdr, struct nfsd4_deviceid *devid)
++{
++ __be32 *p = xdr_inline_decode(xdr, NFS4_DEVICEID4_SIZE);
++
++ if (unlikely(!p))
++ return nfserr_bad_xdr;
++ svcxdr_decode_deviceid4(p, devid);
++ return nfs_ok;
++}
++
+ struct nfsd4_layout_seg {
+ u32 iomode;
+ u64 offset;
--- /dev/null
+From fb881cd7604536b17a1927fb0533f9a6982ffcc5 Mon Sep 17 00:00:00 2001
+From: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Date: Sat, 3 May 2025 14:33:14 +0900
+Subject: nilfs2: fix deadlock warnings caused by lock dependency in init_nilfs()
+
+From: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+
+commit fb881cd7604536b17a1927fb0533f9a6982ffcc5 upstream.
+
+After commit c0e473a0d226 ("block: fix race between set_blocksize and read
+paths") was merged, set_blocksize() called by sb_set_blocksize() now locks
+the inode of the backing device file. As a result of this change, syzbot
+started reporting deadlock warnings due to a circular dependency involving
+the semaphore "ns_sem" of the nilfs object, the inode lock of the backing
+device file, and the locks that this inode lock is transitively dependent
+on.
+
+This is caused by a new lock dependency added by the above change, since
+init_nilfs() calls sb_set_blocksize() in the lock section of "ns_sem".
+However, these warnings are false positives because init_nilfs() is called
+in the early stage of the mount operation and the filesystem has not yet
+started.
+
+The reason why "ns_sem" is locked in init_nilfs() was to avoid a race
+condition in nilfs_fill_super() caused by sharing a nilfs object among
+multiple filesystem instances (super block structures) in the early
+implementation. However, nilfs objects and super block structures have
+long ago become one-to-one, and there is no longer any need to use the
+semaphore there.
+
+So, fix this issue by removing the use of the semaphore "ns_sem" in
+init_nilfs().
+
+Link: https://lkml.kernel.org/r/20250503053327.12294-1-konishi.ryusuke@gmail.com
+Fixes: c0e473a0d226 ("block: fix race between set_blocksize and read paths")
+Signed-off-by: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Reported-by: syzbot+00f7f5b884b117ee6773@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=00f7f5b884b117ee6773
+Tested-by: syzbot+00f7f5b884b117ee6773@syzkaller.appspotmail.com
+Reported-by: syzbot+f30591e72bfc24d4715b@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=f30591e72bfc24d4715b
+Tested-by: syzbot+f30591e72bfc24d4715b@syzkaller.appspotmail.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Mahmoud Adam <mngyadam@amazon.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nilfs2/the_nilfs.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+--- a/fs/nilfs2/the_nilfs.c
++++ b/fs/nilfs2/the_nilfs.c
+@@ -680,8 +680,6 @@ int init_nilfs(struct the_nilfs *nilfs,
+ int blocksize;
+ int err;
+
+- down_write(&nilfs->ns_sem);
+-
+ blocksize = sb_min_blocksize(sb, NILFS_MIN_BLOCK_SIZE);
+ if (!blocksize) {
+ nilfs_err(sb, "unable to set blocksize");
+@@ -757,7 +755,6 @@ int init_nilfs(struct the_nilfs *nilfs,
+ set_nilfs_init(nilfs);
+ err = 0;
+ out:
+- up_write(&nilfs->ns_sem);
+ return err;
+
+ failed_sbh:
--- /dev/null
+From stable+bounces-188141-greg=kroah.com@vger.kernel.org Mon Oct 20 17:41:17 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Oct 2025 11:37:54 -0400
+Subject: padata: Reset next CPU when reorder sequence wraps around
+To: stable@vger.kernel.org
+Cc: Xiao Liang <shaw.leon@gmail.com>, Herbert Xu <herbert@gondor.apana.org.au>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251020153754.1820697-1-sashal@kernel.org>
+
+From: Xiao Liang <shaw.leon@gmail.com>
+
+[ Upstream commit 501302d5cee0d8e8ec2c4a5919c37e0df9abc99b ]
+
+When seq_nr wraps around, the next reorder job with seq 0 is hashed to
+the first CPU in padata_do_serial(). Correspondingly, need reset pd->cpu
+to the first one when pd->processed wraps around. Otherwise, if the
+number of used CPUs is not a power of 2, padata_find_next() will be
+checking a wrong list, hence deadlock.
+
+Fixes: 6fc4dbcf0276 ("padata: Replace delayed timer with immediate workqueue in padata_reorder")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Xiao Liang <shaw.leon@gmail.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+[ relocated fix from padata_reorder() function to padata_find_next() ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/padata.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/kernel/padata.c
++++ b/kernel/padata.c
+@@ -290,7 +290,11 @@ static struct padata_priv *padata_find_n
+ if (remove_object) {
+ list_del_init(&padata->list);
+ ++pd->processed;
+- pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1, false);
++ /* When sequence wraps around, reset to the first CPU. */
++ if (unlikely(pd->processed == 0))
++ pd->cpu = cpumask_first(pd->cpumask.pcpu);
++ else
++ pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1, false);
+ }
+
+ spin_unlock(&reorder->lock);
--- /dev/null
+From stable+bounces-188224-greg=kroah.com@vger.kernel.org Mon Oct 20 20:10:38 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Oct 2025 14:10:25 -0400
+Subject: PCI: Add PCI_VDEVICE_SUB helper macro
+To: stable@vger.kernel.org
+Cc: Piotr Kwapulinski <piotr.kwapulinski@intel.com>, Przemek Kitszel <przemyslaw.kitszel@intel.com>, Bjorn Helgaas <bhelgaas@google.com>, Rafal Romanowski <rafal.romanowski@intel.com>, Tony Nguyen <anthony.l.nguyen@intel.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251020181028.1864198-1-sashal@kernel.org>
+
+From: Piotr Kwapulinski <piotr.kwapulinski@intel.com>
+
+[ Upstream commit 208fff3f567e2a3c3e7e4788845e90245c3891b4 ]
+
+PCI_VDEVICE_SUB generates the pci_device_id struct layout for
+the specific PCI device/subdevice. Private data may follow the
+output.
+
+Reviewed-by: Przemek Kitszel <przemyslaw.kitszel@intel.com>
+Signed-off-by: Piotr Kwapulinski <piotr.kwapulinski@intel.com>
+Acked-by: Bjorn Helgaas <bhelgaas@google.com>
+Tested-by: Rafal Romanowski <rafal.romanowski@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Stable-dep-of: a7075f501bd3 ("ixgbevf: fix mailbox API compatibility by negotiating supported features")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/pci.h | 14 ++++++++++++++
+ 1 file changed, 14 insertions(+)
+
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -1035,6 +1035,20 @@ static inline struct pci_driver *to_pci_
+ .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0
+
+ /**
++ * PCI_VDEVICE_SUB - describe a specific PCI device/subdevice in a short form
++ * @vend: the vendor name
++ * @dev: the 16 bit PCI Device ID
++ * @subvend: the 16 bit PCI Subvendor ID
++ * @subdev: the 16 bit PCI Subdevice ID
++ *
++ * Generate the pci_device_id struct layout for the specific PCI
++ * device/subdevice. Private data may follow the output.
++ */
++#define PCI_VDEVICE_SUB(vend, dev, subvend, subdev) \
++ .vendor = PCI_VENDOR_ID_##vend, .device = (dev), \
++ .subvendor = (subvend), .subdevice = (subdev), 0, 0
++
++/**
+ * PCI_DEVICE_DATA - macro used to describe a specific PCI device in very short form
+ * @vend: the vendor name (without PCI_VENDOR_ID_ prefix)
+ * @dev: the device name (without PCI_DEVICE_ID_<vend>_ prefix)
--- /dev/null
+From stable+bounces-188091-greg=kroah.com@vger.kernel.org Mon Oct 20 15:02:41 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Oct 2025 08:59:35 -0400
+Subject: phy: cadence: cdns-dphy: Fix PLL lock and O_CMN_READY polling
+To: stable@vger.kernel.org
+Cc: Devarsh Thakkar <devarsht@ti.com>, Harikrishna Shenoy <h-shenoy@ti.com>, Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>, Vinod Koul <vkoul@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251020125935.1762853-2-sashal@kernel.org>
+
+From: Devarsh Thakkar <devarsht@ti.com>
+
+[ Upstream commit 284fb19a3ffb1083c3ad9c00d29749d09dddb99c ]
+
+PLL lockup and O_CMN_READY assertion can only happen after common state
+machine gets enabled by programming DPHY_CMN_SSM register, but driver was
+polling them before the common state machine was enabled which is
+incorrect. This is as per the DPHY initialization sequence as mentioned in
+J721E TRM [1] at section "12.7.2.4.1.2.1 Start-up Sequence Timing Diagram".
+It shows O_CMN_READY polling at the end after common configuration pin
+setup where the common configuration pin setup step enables state machine
+as referenced in "Table 12-1533. Common Configuration-Related Setup
+mentions state machine"
+
+To fix this :
+- Add new function callbacks for polling on PLL lock and O_CMN_READY
+ assertion.
+- As state machine and clocks get enabled in power_on callback only, move
+ the clock related programming part from configure callback to power_on
+callback and poll for the PLL lockup and O_CMN_READY assertion after state
+machine gets enabled.
+- The configure callback only saves the PLL configuration received from the
+ client driver which will be applied later on in power_on callback.
+- Add checks to ensure configure is called before power_on and state
+ machine is in disabled state before power_on callback is called.
+- Disable state machine in power_off so that client driver can re-configure
+ the PLL by following up a power_off, configure, power_on sequence.
+
+[1]: https://www.ti.com/lit/zip/spruil1
+
+Cc: stable@vger.kernel.org
+Fixes: 7a343c8bf4b5 ("phy: Add Cadence D-PHY support")
+Signed-off-by: Devarsh Thakkar <devarsht@ti.com>
+Tested-by: Harikrishna Shenoy <h-shenoy@ti.com>
+Reviewed-by: Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+Link: https://lore.kernel.org/r/20250704125915.1224738-2-devarsht@ti.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/phy/cadence/cdns-dphy.c | 124 +++++++++++++++++++++++++++++-----------
+ 1 file changed, 92 insertions(+), 32 deletions(-)
+
+--- a/drivers/phy/cadence/cdns-dphy.c
++++ b/drivers/phy/cadence/cdns-dphy.c
+@@ -100,6 +100,8 @@ struct cdns_dphy_ops {
+ void (*set_pll_cfg)(struct cdns_dphy *dphy,
+ const struct cdns_dphy_cfg *cfg);
+ unsigned long (*get_wakeup_time_ns)(struct cdns_dphy *dphy);
++ int (*wait_for_pll_lock)(struct cdns_dphy *dphy);
++ int (*wait_for_cmn_ready)(struct cdns_dphy *dphy);
+ };
+
+ struct cdns_dphy {
+@@ -109,6 +111,8 @@ struct cdns_dphy {
+ struct clk *pll_ref_clk;
+ const struct cdns_dphy_ops *ops;
+ struct phy *phy;
++ bool is_configured;
++ bool is_powered;
+ };
+
+ /* Order of bands is important since the index is the band number. */
+@@ -195,6 +199,16 @@ static unsigned long cdns_dphy_get_wakeu
+ return dphy->ops->get_wakeup_time_ns(dphy);
+ }
+
++static int cdns_dphy_wait_for_pll_lock(struct cdns_dphy *dphy)
++{
++ return dphy->ops->wait_for_pll_lock ? dphy->ops->wait_for_pll_lock(dphy) : 0;
++}
++
++static int cdns_dphy_wait_for_cmn_ready(struct cdns_dphy *dphy)
++{
++ return dphy->ops->wait_for_cmn_ready ? dphy->ops->wait_for_cmn_ready(dphy) : 0;
++}
++
+ static unsigned long cdns_dphy_ref_get_wakeup_time_ns(struct cdns_dphy *dphy)
+ {
+ /* Default wakeup time is 800 ns (in a simulated environment). */
+@@ -236,7 +250,6 @@ static unsigned long cdns_dphy_j721e_get
+ static void cdns_dphy_j721e_set_pll_cfg(struct cdns_dphy *dphy,
+ const struct cdns_dphy_cfg *cfg)
+ {
+- u32 status;
+
+ /*
+ * set the PWM and PLL Byteclk divider settings to recommended values
+@@ -253,13 +266,6 @@ static void cdns_dphy_j721e_set_pll_cfg(
+
+ writel(DPHY_TX_J721E_WIZ_LANE_RSTB,
+ dphy->regs + DPHY_TX_J721E_WIZ_RST_CTRL);
+-
+- readl_poll_timeout(dphy->regs + DPHY_TX_J721E_WIZ_PLL_CTRL, status,
+- (status & DPHY_TX_WIZ_PLL_LOCK), 0, POLL_TIMEOUT_US);
+-
+- readl_poll_timeout(dphy->regs + DPHY_TX_J721E_WIZ_STATUS, status,
+- (status & DPHY_TX_WIZ_O_CMN_READY), 0,
+- POLL_TIMEOUT_US);
+ }
+
+ static void cdns_dphy_j721e_set_psm_div(struct cdns_dphy *dphy, u8 div)
+@@ -267,6 +273,23 @@ static void cdns_dphy_j721e_set_psm_div(
+ writel(div, dphy->regs + DPHY_TX_J721E_WIZ_PSM_FREQ);
+ }
+
++static int cdns_dphy_j721e_wait_for_pll_lock(struct cdns_dphy *dphy)
++{
++ u32 status;
++
++ return readl_poll_timeout(dphy->regs + DPHY_TX_J721E_WIZ_PLL_CTRL, status,
++ status & DPHY_TX_WIZ_PLL_LOCK, 0, POLL_TIMEOUT_US);
++}
++
++static int cdns_dphy_j721e_wait_for_cmn_ready(struct cdns_dphy *dphy)
++{
++ u32 status;
++
++ return readl_poll_timeout(dphy->regs + DPHY_TX_J721E_WIZ_STATUS, status,
++ status & DPHY_TX_WIZ_O_CMN_READY, 0,
++ POLL_TIMEOUT_US);
++}
++
+ /*
+ * This is the reference implementation of DPHY hooks. Specific integration of
+ * this IP may have to re-implement some of them depending on how they decided
+@@ -282,6 +305,8 @@ static const struct cdns_dphy_ops j721e_
+ .get_wakeup_time_ns = cdns_dphy_j721e_get_wakeup_time_ns,
+ .set_pll_cfg = cdns_dphy_j721e_set_pll_cfg,
+ .set_psm_div = cdns_dphy_j721e_set_psm_div,
++ .wait_for_pll_lock = cdns_dphy_j721e_wait_for_pll_lock,
++ .wait_for_cmn_ready = cdns_dphy_j721e_wait_for_cmn_ready,
+ };
+
+ static int cdns_dphy_config_from_opts(struct phy *phy,
+@@ -339,21 +364,36 @@ static int cdns_dphy_validate(struct phy
+ static int cdns_dphy_configure(struct phy *phy, union phy_configure_opts *opts)
+ {
+ struct cdns_dphy *dphy = phy_get_drvdata(phy);
+- struct cdns_dphy_cfg cfg = { 0 };
+- int ret, band_ctrl;
+- unsigned int reg;
++ int ret;
+
+- ret = cdns_dphy_config_from_opts(phy, &opts->mipi_dphy, &cfg);
+- if (ret)
+- return ret;
++ ret = cdns_dphy_config_from_opts(phy, &opts->mipi_dphy, &dphy->cfg);
++ if (!ret)
++ dphy->is_configured = true;
++
++ return ret;
++}
++
++static int cdns_dphy_power_on(struct phy *phy)
++{
++ struct cdns_dphy *dphy = phy_get_drvdata(phy);
++ int ret;
++ u32 reg;
++
++ if (!dphy->is_configured || dphy->is_powered)
++ return -EINVAL;
++
++ clk_prepare_enable(dphy->psm_clk);
++ clk_prepare_enable(dphy->pll_ref_clk);
+
+ /*
+ * Configure the internal PSM clk divider so that the DPHY has a
+ * 1MHz clk (or something close).
+ */
+ ret = cdns_dphy_setup_psm(dphy);
+- if (ret)
+- return ret;
++ if (ret) {
++ dev_err(&dphy->phy->dev, "Failed to setup PSM with error %d\n", ret);
++ goto err_power_on;
++ }
+
+ /*
+ * Configure attach clk lanes to data lanes: the DPHY has 2 clk lanes
+@@ -368,40 +408,60 @@ static int cdns_dphy_configure(struct ph
+ * Configure the DPHY PLL that will be used to generate the TX byte
+ * clk.
+ */
+- cdns_dphy_set_pll_cfg(dphy, &cfg);
++ cdns_dphy_set_pll_cfg(dphy, &dphy->cfg);
+
+- band_ctrl = cdns_dphy_tx_get_band_ctrl(opts->mipi_dphy.hs_clk_rate);
+- if (band_ctrl < 0)
+- return band_ctrl;
++ ret = cdns_dphy_tx_get_band_ctrl(dphy->cfg.hs_clk_rate);
++ if (ret < 0) {
++ dev_err(&dphy->phy->dev, "Failed to get band control value with error %d\n", ret);
++ goto err_power_on;
++ }
+
+- reg = FIELD_PREP(DPHY_BAND_CFG_LEFT_BAND, band_ctrl) |
+- FIELD_PREP(DPHY_BAND_CFG_RIGHT_BAND, band_ctrl);
++ reg = FIELD_PREP(DPHY_BAND_CFG_LEFT_BAND, ret) |
++ FIELD_PREP(DPHY_BAND_CFG_RIGHT_BAND, ret);
+ writel(reg, dphy->regs + DPHY_BAND_CFG);
+
+- return 0;
+-}
+-
+-static int cdns_dphy_power_on(struct phy *phy)
+-{
+- struct cdns_dphy *dphy = phy_get_drvdata(phy);
+-
+- clk_prepare_enable(dphy->psm_clk);
+- clk_prepare_enable(dphy->pll_ref_clk);
+-
+ /* Start TX state machine. */
+ writel(DPHY_CMN_SSM_EN | DPHY_CMN_TX_MODE_EN,
+ dphy->regs + DPHY_CMN_SSM);
+
++ ret = cdns_dphy_wait_for_pll_lock(dphy);
++ if (ret) {
++ dev_err(&dphy->phy->dev, "Failed to lock PLL with error %d\n", ret);
++ goto err_power_on;
++ }
++
++ ret = cdns_dphy_wait_for_cmn_ready(dphy);
++ if (ret) {
++ dev_err(&dphy->phy->dev, "O_CMN_READY signal failed to assert with error %d\n",
++ ret);
++ goto err_power_on;
++ }
++
++ dphy->is_powered = true;
++
+ return 0;
++
++err_power_on:
++ clk_disable_unprepare(dphy->pll_ref_clk);
++ clk_disable_unprepare(dphy->psm_clk);
++
++ return ret;
+ }
+
+ static int cdns_dphy_power_off(struct phy *phy)
+ {
+ struct cdns_dphy *dphy = phy_get_drvdata(phy);
++ u32 reg;
+
+ clk_disable_unprepare(dphy->pll_ref_clk);
+ clk_disable_unprepare(dphy->psm_clk);
+
++ /* Stop TX state machine. */
++ reg = readl(dphy->regs + DPHY_CMN_SSM);
++ writel(reg & ~DPHY_CMN_SSM_EN, dphy->regs + DPHY_CMN_SSM);
++
++ dphy->is_powered = false;
++
+ return 0;
+ }
+
--- /dev/null
+From stable+bounces-188382-greg=kroah.com@vger.kernel.org Tue Oct 21 18:51:06 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 21 Oct 2025 12:50:53 -0400
+Subject: phy: cadence: cdns-dphy: Update calibration wait time for startup state machine
+To: stable@vger.kernel.org
+Cc: Devarsh Thakkar <devarsht@ti.com>, Harikrishna Shenoy <h-shenoy@ti.com>, Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>, Vinod Koul <vkoul@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251021165053.2388405-3-sashal@kernel.org>
+
+From: Devarsh Thakkar <devarsht@ti.com>
+
+[ Upstream commit 2c27aaee934a1b5229152fe33a14f1fdf50da143 ]
+
+Do read-modify-write so that we re-use the characterized reset value as
+specified in TRM [1] to program calibration wait time which defines number
+of cycles to wait for after startup state machine is in bandgap enable
+state.
+
+This fixes PLL lock timeout error faced while using RPi DSI Panel on TI's
+AM62L and J721E SoC since earlier calibration wait time was getting
+overwritten to zero value thus failing the PLL to lockup and causing
+timeout.
+
+[1] AM62P TRM (Section 14.8.6.3.2.1.1 DPHY_TX_DPHYTX_CMN0_CMN_DIG_TBIT2):
+Link: https://www.ti.com/lit/pdf/spruj83
+
+Cc: stable@vger.kernel.org
+Fixes: 7a343c8bf4b5 ("phy: Add Cadence D-PHY support")
+Signed-off-by: Devarsh Thakkar <devarsht@ti.com>
+Tested-by: Harikrishna Shenoy <h-shenoy@ti.com>
+Reviewed-by: Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+Link: https://lore.kernel.org/r/20250704125915.1224738-3-devarsht@ti.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/phy/cadence/cdns-dphy.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/phy/cadence/cdns-dphy.c
++++ b/drivers/phy/cadence/cdns-dphy.c
+@@ -30,6 +30,7 @@
+
+ #define DPHY_CMN_SSM DPHY_PMA_CMN(0x20)
+ #define DPHY_CMN_SSM_EN BIT(0)
++#define DPHY_CMN_SSM_CAL_WAIT_TIME GENMASK(8, 1)
+ #define DPHY_CMN_TX_MODE_EN BIT(9)
+
+ #define DPHY_CMN_PWM DPHY_PMA_CMN(0x40)
+@@ -421,7 +422,8 @@ static int cdns_dphy_power_on(struct phy
+ writel(reg, dphy->regs + DPHY_BAND_CFG);
+
+ /* Start TX state machine. */
+- writel(DPHY_CMN_SSM_EN | DPHY_CMN_TX_MODE_EN,
++ reg = readl(dphy->regs + DPHY_CMN_SSM);
++ writel((reg & DPHY_CMN_SSM_CAL_WAIT_TIME) | DPHY_CMN_SSM_EN | DPHY_CMN_TX_MODE_EN,
+ dphy->regs + DPHY_CMN_SSM);
+
+ ret = cdns_dphy_wait_for_pll_lock(dphy);
--- /dev/null
+From stable+bounces-188090-greg=kroah.com@vger.kernel.org Mon Oct 20 15:07:15 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Oct 2025 08:59:34 -0400
+Subject: phy: cdns-dphy: Store hs_clk_rate and return it
+To: stable@vger.kernel.org
+Cc: Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>, Aradhya Bhatia <aradhya.bhatia@linux.dev>, Parth Pancholi <parth.pancholi@toradex.com>, Jayesh Choudhary <j-choudhary@ti.com>, Vinod Koul <vkoul@kernel.org>, Devarsh Thakkar <devarsht@ti.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251020125935.1762853-1-sashal@kernel.org>
+
+From: Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+
+[ Upstream commit 689a54acb56858c85de8c7285db82b8ae6dbf683 ]
+
+The DPHY driver does not return the actual hs_clk_rate, so the DSI
+driver has no idea what clock was actually achieved. Set the realized
+hs_clk_rate to the opts struct, so that the DSI driver gets it back.
+
+Reviewed-by: Aradhya Bhatia <aradhya.bhatia@linux.dev>
+Tested-by: Parth Pancholi <parth.pancholi@toradex.com>
+Tested-by: Jayesh Choudhary <j-choudhary@ti.com>
+Acked-by: Vinod Koul <vkoul@kernel.org>
+Reviewed-by: Devarsh Thakkar <devarsht@ti.com>
+Signed-off-by: Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+Link: https://lore.kernel.org/r/20250723-cdns-dphy-hs-clk-rate-fix-v1-1-d4539d44cbe7@ideasonboard.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Stable-dep-of: 284fb19a3ffb ("phy: cadence: cdns-dphy: Fix PLL lock and O_CMN_READY polling")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/phy/cadence/cdns-dphy.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/drivers/phy/cadence/cdns-dphy.c
++++ b/drivers/phy/cadence/cdns-dphy.c
+@@ -79,6 +79,7 @@ struct cdns_dphy_cfg {
+ u8 pll_ipdiv;
+ u8 pll_opdiv;
+ u16 pll_fbdiv;
++ u32 hs_clk_rate;
+ unsigned int nlanes;
+ };
+
+@@ -154,6 +155,9 @@ static int cdns_dsi_get_dphy_pll_cfg(str
+ cfg->pll_ipdiv,
+ pll_ref_hz);
+
++ cfg->hs_clk_rate = div_u64((u64)pll_ref_hz * cfg->pll_fbdiv,
++ 2 * cfg->pll_opdiv * cfg->pll_ipdiv);
++
+ return 0;
+ }
+
+@@ -297,6 +301,7 @@ static int cdns_dphy_config_from_opts(st
+ if (ret)
+ return ret;
+
++ opts->hs_clk_rate = cfg->hs_clk_rate;
+ opts->wakeup = cdns_dphy_get_wakeup_time_ns(dphy) / 1000;
+
+ return 0;
--- /dev/null
+From stable+bounces-188096-greg=kroah.com@vger.kernel.org Mon Oct 20 15:06:30 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Oct 2025 09:03:02 -0400
+Subject: PM: runtime: Add new devm functions
+To: stable@vger.kernel.org
+Cc: "Bence Csókás" <csokas.bence@prolan.hu>, "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>, "Sasha Levin" <sashal@kernel.org>
+Message-ID: <20251020130303.1764135-1-sashal@kernel.org>
+
+From: Bence Csókás <csokas.bence@prolan.hu>
+
+[ Upstream commit 73db799bf5efc5a04654bb3ff6c9bf63a0dfa473 ]
+
+Add `devm_pm_runtime_set_active_enabled()` and
+`devm_pm_runtime_get_noresume()` for simplifying
+common cases in drivers.
+
+Signed-off-by: Bence Csókás <csokas.bence@prolan.hu>
+Link: https://patch.msgid.link/20250327195928.680771-3-csokas.bence@prolan.hu
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Stable-dep-of: 0792c1984a45 ("iio: imu: inv_icm42600: Simplify pm_runtime setup")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/base/power/runtime.c | 44 +++++++++++++++++++++++++++++++++++++++++++
+ include/linux/pm_runtime.h | 4 +++
+ 2 files changed, 48 insertions(+)
+
+--- a/drivers/base/power/runtime.c
++++ b/drivers/base/power/runtime.c
+@@ -1552,6 +1552,32 @@ out:
+ }
+ EXPORT_SYMBOL_GPL(pm_runtime_enable);
+
++static void pm_runtime_set_suspended_action(void *data)
++{
++ pm_runtime_set_suspended(data);
++}
++
++/**
++ * devm_pm_runtime_set_active_enabled - set_active version of devm_pm_runtime_enable.
++ *
++ * @dev: Device to handle.
++ */
++int devm_pm_runtime_set_active_enabled(struct device *dev)
++{
++ int err;
++
++ err = pm_runtime_set_active(dev);
++ if (err)
++ return err;
++
++ err = devm_add_action_or_reset(dev, pm_runtime_set_suspended_action, dev);
++ if (err)
++ return err;
++
++ return devm_pm_runtime_enable(dev);
++}
++EXPORT_SYMBOL_GPL(devm_pm_runtime_set_active_enabled);
++
+ static void pm_runtime_disable_action(void *data)
+ {
+ pm_runtime_dont_use_autosuspend(data);
+@@ -1574,6 +1600,24 @@ int devm_pm_runtime_enable(struct device
+ }
+ EXPORT_SYMBOL_GPL(devm_pm_runtime_enable);
+
++static void pm_runtime_put_noidle_action(void *data)
++{
++ pm_runtime_put_noidle(data);
++}
++
++/**
++ * devm_pm_runtime_get_noresume - devres-enabled version of pm_runtime_get_noresume.
++ *
++ * @dev: Device to handle.
++ */
++int devm_pm_runtime_get_noresume(struct device *dev)
++{
++ pm_runtime_get_noresume(dev);
++
++ return devm_add_action_or_reset(dev, pm_runtime_put_noidle_action, dev);
++}
++EXPORT_SYMBOL_GPL(devm_pm_runtime_get_noresume);
++
+ /**
+ * pm_runtime_forbid - Block runtime PM of a device.
+ * @dev: Device to handle.
+--- a/include/linux/pm_runtime.h
++++ b/include/linux/pm_runtime.h
+@@ -94,7 +94,9 @@ extern void pm_runtime_new_link(struct d
+ extern void pm_runtime_drop_link(struct device_link *link);
+ extern void pm_runtime_release_supplier(struct device_link *link);
+
++int devm_pm_runtime_set_active_enabled(struct device *dev);
+ extern int devm_pm_runtime_enable(struct device *dev);
++int devm_pm_runtime_get_noresume(struct device *dev);
+
+ /**
+ * pm_suspend_ignore_children - Set runtime PM behavior regarding children.
+@@ -278,7 +280,9 @@ static inline void __pm_runtime_disable(
+ static inline void pm_runtime_allow(struct device *dev) {}
+ static inline void pm_runtime_forbid(struct device *dev) {}
+
++static inline int devm_pm_runtime_set_active_enabled(struct device *dev) { return 0; }
+ static inline int devm_pm_runtime_enable(struct device *dev) { return 0; }
++static inline int devm_pm_runtime_get_noresume(struct device *dev) { return 0; }
+
+ static inline void pm_suspend_ignore_children(struct device *dev, bool enable) {}
+ static inline void pm_runtime_get_noresume(struct device *dev) {}
--- /dev/null
+From stable+bounces-188180-greg=kroah.com@vger.kernel.org Mon Oct 20 18:16:21 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Oct 2025 12:16:04 -0400
+Subject: quota: remove unneeded return value of register_quota_format
+To: stable@vger.kernel.org
+Cc: Kemeng Shi <shikemeng@huaweicloud.com>, Joseph Qi <joseph.qi@linux.alibaba.com>, Jan Kara <jack@suse.cz>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251020161605.1834667-1-sashal@kernel.org>
+
+From: Kemeng Shi <shikemeng@huaweicloud.com>
+
+[ Upstream commit a838e5dca63d1dc701e63b2b1176943c57485c45 ]
+
+The register_quota_format always returns 0, simply remove unneeded return
+value.
+
+Link: https://patch.msgid.link/20240715130534.2112678-3-shikemeng@huaweicloud.com
+Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com>
+Reviewed-by: Joseph Qi <joseph.qi@linux.alibaba.com>
+Signed-off-by: Jan Kara <jack@suse.cz>
+Stable-dep-of: 72b7ceca857f ("fs: quota: create dedicated workqueue for quota_release_work")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ocfs2/super.c | 6 ++----
+ fs/quota/dquot.c | 3 +--
+ fs/quota/quota_v1.c | 3 ++-
+ fs/quota/quota_v2.c | 9 +++------
+ include/linux/quota.h | 2 +-
+ mm/shmem.c | 7 +------
+ 6 files changed, 10 insertions(+), 20 deletions(-)
+
+--- a/fs/ocfs2/super.c
++++ b/fs/ocfs2/super.c
+@@ -1571,15 +1571,13 @@ static int __init ocfs2_init(void)
+
+ ocfs2_set_locking_protocol();
+
+- status = register_quota_format(&ocfs2_quota_format);
+- if (status < 0)
+- goto out3;
++ register_quota_format(&ocfs2_quota_format);
++
+ status = register_filesystem(&ocfs2_fs_type);
+ if (!status)
+ return 0;
+
+ unregister_quota_format(&ocfs2_quota_format);
+-out3:
+ debugfs_remove(ocfs2_debugfs_root);
+ ocfs2_free_mem_caches();
+ out2:
+--- a/fs/quota/dquot.c
++++ b/fs/quota/dquot.c
+@@ -163,13 +163,12 @@ static struct quota_module_name module_n
+ /* SLAB cache for dquot structures */
+ static struct kmem_cache *dquot_cachep;
+
+-int register_quota_format(struct quota_format_type *fmt)
++void register_quota_format(struct quota_format_type *fmt)
+ {
+ spin_lock(&dq_list_lock);
+ fmt->qf_next = quota_formats;
+ quota_formats = fmt;
+ spin_unlock(&dq_list_lock);
+- return 0;
+ }
+ EXPORT_SYMBOL(register_quota_format);
+
+--- a/fs/quota/quota_v1.c
++++ b/fs/quota/quota_v1.c
+@@ -229,7 +229,8 @@ static struct quota_format_type v1_quota
+
+ static int __init init_v1_quota_format(void)
+ {
+- return register_quota_format(&v1_quota_format);
++ register_quota_format(&v1_quota_format);
++ return 0;
+ }
+
+ static void __exit exit_v1_quota_format(void)
+--- a/fs/quota/quota_v2.c
++++ b/fs/quota/quota_v2.c
+@@ -422,12 +422,9 @@ static struct quota_format_type v2r1_quo
+
+ static int __init init_v2_quota_format(void)
+ {
+- int ret;
+-
+- ret = register_quota_format(&v2r0_quota_format);
+- if (ret)
+- return ret;
+- return register_quota_format(&v2r1_quota_format);
++ register_quota_format(&v2r0_quota_format);
++ register_quota_format(&v2r1_quota_format);
++ return 0;
+ }
+
+ static void __exit exit_v2_quota_format(void)
+--- a/include/linux/quota.h
++++ b/include/linux/quota.h
+@@ -526,7 +526,7 @@ struct quota_info {
+ const struct quota_format_ops *ops[MAXQUOTAS]; /* Operations for each type */
+ };
+
+-int register_quota_format(struct quota_format_type *fmt);
++void register_quota_format(struct quota_format_type *fmt);
+ void unregister_quota_format(struct quota_format_type *fmt);
+
+ struct quota_module_name {
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -4617,11 +4617,7 @@ void __init shmem_init(void)
+ shmem_init_inodecache();
+
+ #ifdef CONFIG_TMPFS_QUOTA
+- error = register_quota_format(&shmem_quota_format);
+- if (error < 0) {
+- pr_err("Could not register quota format\n");
+- goto out3;
+- }
++ register_quota_format(&shmem_quota_format);
+ #endif
+
+ error = register_filesystem(&shmem_fs_type);
+@@ -4650,7 +4646,6 @@ out1:
+ out2:
+ #ifdef CONFIG_TMPFS_QUOTA
+ unregister_quota_format(&shmem_quota_format);
+-out3:
+ #endif
+ shmem_destroy_inodecache();
+ shm_mnt = ERR_PTR(error);
hid-hid-input-only-ignore-0-battery-events-for-digit.patch
hid-multitouch-fix-name-of-stylus-input-devices.patch
selftests-arg_parsing-ensure-data-is-flushed-to-disk.patch
+hfsplus-fix-slab-out-of-bounds-read-in-hfsplus_strcasecmp.patch
+arm64-cputype-add-neoverse-v3ae-definitions.patch
+arm64-errata-apply-workarounds-for-neoverse-v3ae.patch
+block-fix-race-between-set_blocksize-and-read-paths.patch
+nilfs2-fix-deadlock-warnings-caused-by-lock-dependency-in-init_nilfs.patch
+nfsd-rework-encoding-and-decoding-of-nfsd4_deviceid.patch
+nfsd-minor-cleanup-in-layoutcommit-processing.patch
+nfsd-fix-last-write-offset-handling-in-layoutcommit.patch
+xfs-rename-the-old_crc-variable-in-xlog_recover_process.patch
+xfs-fix-log-crc-mismatches-between-i386-and-other-architectures.patch
+pm-runtime-add-new-devm-functions.patch
+iio-imu-inv_icm42600-simplify-pm_runtime-setup.patch
+phy-cdns-dphy-store-hs_clk_rate-and-return-it.patch
+phy-cadence-cdns-dphy-fix-pll-lock-and-o_cmn_ready-polling.patch
+iio-imu-inv_icm42600-reorganize-dma-aligned-buffers-in-structure.patch
+iio-imu-inv_icm42600-avoid-configuring-if-already-pm_runtime-suspended.patch
+xfs-use-deferred-intent-items-for-reaping-crosslinked-blocks.patch
+padata-reset-next-cpu-when-reorder-sequence-wraps-around.patch
+quota-remove-unneeded-return-value-of-register_quota_format.patch
+fs-quota-create-dedicated-workqueue-for-quota_release_work.patch
+nfsd-define-a-proc_layoutcommit-for-the-flexfiles-layout-type.patch
+vfs-don-t-leak-disconnected-dentries-on-umount.patch
+ext4-avoid-potential-buffer-over-read-in-parse_apply_sb_mount_options.patch
+phy-cadence-cdns-dphy-update-calibration-wait-time-for-startup-state-machine.patch
+pci-add-pci_vdevice_sub-helper-macro.patch
+ixgbevf-add-support-for-intel-r-e610-device.patch
+ixgbevf-fix-getting-link-speed-data-for-e610-devices.patch
+ixgbevf-fix-mailbox-api-compatibility-by-negotiating-supported-features.patch
--- /dev/null
+From stable+bounces-188264-greg=kroah.com@vger.kernel.org Mon Oct 20 23:51:33 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Oct 2025 17:51:25 -0400
+Subject: vfs: Don't leak disconnected dentries on umount
+To: stable@vger.kernel.org
+Cc: Jan Kara <jack@suse.cz>, syzbot+1d79ebe5383fc016cf07@syzkaller.appspotmail.com, Christian Brauner <brauner@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251020215125.1928136-1-sashal@kernel.org>
+
+From: Jan Kara <jack@suse.cz>
+
+[ Upstream commit 56094ad3eaa21e6621396cc33811d8f72847a834 ]
+
+When user calls open_by_handle_at() on some inode that is not cached, we
+will create disconnected dentry for it. If such dentry is a directory,
+exportfs_decode_fh_raw() will then try to connect this dentry to the
+dentry tree through reconnect_path(). It may happen for various reasons
+(such as corrupted fs or race with rename) that the call to
+lookup_one_unlocked() in reconnect_one() will fail to find the dentry we
+are trying to reconnect and instead create a new dentry under the
+parent. Now this dentry will not be marked as disconnected although the
+parent still may well be disconnected (at least in case this
+inconsistency happened because the fs is corrupted and .. doesn't point
+to the real parent directory). This creates inconsistency in
+disconnected flags but AFAICS it was mostly harmless. At least until
+commit f1ee616214cb ("VFS: don't keep disconnected dentries on d_anon")
+which removed adding of most disconnected dentries to sb->s_anon list.
+Thus after this commit cleanup of disconnected dentries implicitely
+relies on the fact that dput() will immediately reclaim such dentries.
+However when some leaf dentry isn't marked as disconnected, as in the
+scenario described above, the reclaim doesn't happen and the dentries
+are "leaked". Memory reclaim can eventually reclaim them but otherwise
+they stay in memory and if umount comes first, we hit infamous "Busy
+inodes after unmount" bug. Make sure all dentries created under a
+disconnected parent are marked as disconnected as well.
+
+Reported-by: syzbot+1d79ebe5383fc016cf07@syzkaller.appspotmail.com
+Fixes: f1ee616214cb ("VFS: don't keep disconnected dentries on d_anon")
+CC: stable@vger.kernel.org
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+[ relocated DCACHE_DISCONNECTED propagation from d_alloc_parallel() to d_alloc() ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/dcache.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -1861,6 +1861,8 @@ struct dentry *d_alloc(struct dentry * p
+ __dget_dlock(parent);
+ dentry->d_parent = parent;
+ list_add(&dentry->d_child, &parent->d_subdirs);
++ if (parent->d_flags & DCACHE_DISCONNECTED)
++ dentry->d_flags |= DCACHE_DISCONNECTED;
+ spin_unlock(&parent->d_lock);
+
+ return dentry;
--- /dev/null
+From stable+bounces-188054-greg=kroah.com@vger.kernel.org Mon Oct 20 14:47:35 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Oct 2025 08:47:23 -0400
+Subject: xfs: fix log CRC mismatches between i386 and other architectures
+To: stable@vger.kernel.org
+Cc: Christoph Hellwig <hch@lst.de>, Carlos Maiolino <cem@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251020124723.1757183-2-sashal@kernel.org>
+
+From: Christoph Hellwig <hch@lst.de>
+
+[ Upstream commit e747883c7d7306acb4d683038d881528fbfbe749 ]
+
+When mounting file systems with a log that was dirtied on i386 on
+other architectures or vice versa, log recovery is unhappy:
+
+[ 11.068052] XFS (vdb): Torn write (CRC failure) detected at log block 0x2. Truncating head block from 0xc.
+
+This is because the CRCs generated by i386 and other architectures
+always diff. The reason for that is that sizeof(struct xlog_rec_header)
+returns different values for i386 vs the rest (324 vs 328), because the
+struct is not sizeof(uint64_t) aligned, and i386 has odd struct size
+alignment rules.
+
+This issue goes back to commit 13cdc853c519 ("Add log versioning, and new
+super block field for the log stripe") in the xfs-import tree, which
+adds log v2 support and the h_size field that causes the unaligned size.
+At that time it only mattered for the crude debug only log header
+checksum, but with commit 0e446be44806 ("xfs: add CRC checks to the log")
+it became a real issue for v5 file system, because now there is a proper
+CRC, and regular builds actually expect it match.
+
+Fix this by allowing checksums with and without the padding.
+
+Fixes: 0e446be44806 ("xfs: add CRC checks to the log")
+Cc: <stable@vger.kernel.org> # v3.8
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Carlos Maiolino <cem@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/xfs/libxfs/xfs_log_format.h | 30 +++++++++++++++++++++++++++++-
+ fs/xfs/xfs_log.c | 8 ++++----
+ fs/xfs/xfs_log_priv.h | 4 ++--
+ fs/xfs/xfs_log_recover.c | 19 +++++++++++++++++--
+ fs/xfs/xfs_ondisk.h | 2 ++
+ 5 files changed, 54 insertions(+), 9 deletions(-)
+
+--- a/fs/xfs/libxfs/xfs_log_format.h
++++ b/fs/xfs/libxfs/xfs_log_format.h
+@@ -171,12 +171,40 @@ typedef struct xlog_rec_header {
+ __be32 h_prev_block; /* block number to previous LR : 4 */
+ __be32 h_num_logops; /* number of log operations in this LR : 4 */
+ __be32 h_cycle_data[XLOG_HEADER_CYCLE_SIZE / BBSIZE];
+- /* new fields */
++
++ /* fields added by the Linux port: */
+ __be32 h_fmt; /* format of log record : 4 */
+ uuid_t h_fs_uuid; /* uuid of FS : 16 */
++
++ /* fields added for log v2: */
+ __be32 h_size; /* iclog size : 4 */
++
++ /*
++ * When h_size added for log v2 support, it caused structure to have
++ * a different size on i386 vs all other architectures because the
++ * sum of the size ofthe member is not aligned by that of the largest
++ * __be64-sized member, and i386 has really odd struct alignment rules.
++ *
++ * Due to the way the log headers are placed out on-disk that alone is
++ * not a problem becaue the xlog_rec_header always sits alone in a
++ * BBSIZEs area, and the rest of that area is padded with zeroes.
++ * But xlog_cksum used to calculate the checksum based on the structure
++ * size, and thus gives different checksums for i386 vs the rest.
++ * We now do two checksum validation passes for both sizes to allow
++ * moving v5 file systems with unclean logs between i386 and other
++ * (little-endian) architectures.
++ */
++ __u32 h_pad0;
+ } xlog_rec_header_t;
+
++#ifdef __i386__
++#define XLOG_REC_SIZE offsetofend(struct xlog_rec_header, h_size)
++#define XLOG_REC_SIZE_OTHER sizeof(struct xlog_rec_header)
++#else
++#define XLOG_REC_SIZE sizeof(struct xlog_rec_header)
++#define XLOG_REC_SIZE_OTHER offsetofend(struct xlog_rec_header, h_size)
++#endif /* __i386__ */
++
+ typedef struct xlog_rec_ext_header {
+ __be32 xh_cycle; /* write cycle of log : 4 */
+ __be32 xh_cycle_data[XLOG_HEADER_CYCLE_SIZE / BBSIZE]; /* : 256 */
+--- a/fs/xfs/xfs_log.c
++++ b/fs/xfs/xfs_log.c
+@@ -1807,13 +1807,13 @@ xlog_cksum(
+ struct xlog *log,
+ struct xlog_rec_header *rhead,
+ char *dp,
+- int size)
++ unsigned int hdrsize,
++ unsigned int size)
+ {
+ uint32_t crc;
+
+ /* first generate the crc for the record header ... */
+- crc = xfs_start_cksum_update((char *)rhead,
+- sizeof(struct xlog_rec_header),
++ crc = xfs_start_cksum_update((char *)rhead, hdrsize,
+ offsetof(struct xlog_rec_header, h_crc));
+
+ /* ... then for additional cycle data for v2 logs ... */
+@@ -2077,7 +2077,7 @@ xlog_sync(
+
+ /* calculcate the checksum */
+ iclog->ic_header.h_crc = xlog_cksum(log, &iclog->ic_header,
+- iclog->ic_datap, size);
++ iclog->ic_datap, XLOG_REC_SIZE, size);
+ /*
+ * Intentionally corrupt the log record CRC based on the error injection
+ * frequency, if defined. This facilitates testing log recovery in the
+--- a/fs/xfs/xfs_log_priv.h
++++ b/fs/xfs/xfs_log_priv.h
+@@ -503,8 +503,8 @@ xlog_recover_finish(
+ extern void
+ xlog_recover_cancel(struct xlog *);
+
+-extern __le32 xlog_cksum(struct xlog *log, struct xlog_rec_header *rhead,
+- char *dp, int size);
++__le32 xlog_cksum(struct xlog *log, struct xlog_rec_header *rhead,
++ char *dp, unsigned int hdrsize, unsigned int size);
+
+ extern struct kmem_cache *xfs_log_ticket_cache;
+ struct xlog_ticket *xlog_ticket_alloc(struct xlog *log, int unit_bytes,
+--- a/fs/xfs/xfs_log_recover.c
++++ b/fs/xfs/xfs_log_recover.c
+@@ -2860,9 +2860,24 @@ xlog_recover_process(
+ int pass,
+ struct list_head *buffer_list)
+ {
+- __le32 expected_crc = rhead->h_crc, crc;
++ __le32 expected_crc = rhead->h_crc, crc, other_crc;
+
+- crc = xlog_cksum(log, rhead, dp, be32_to_cpu(rhead->h_len));
++ crc = xlog_cksum(log, rhead, dp, XLOG_REC_SIZE,
++ be32_to_cpu(rhead->h_len));
++
++ /*
++ * Look at the end of the struct xlog_rec_header definition in
++ * xfs_log_format.h for the glory details.
++ */
++ if (expected_crc && crc != expected_crc) {
++ other_crc = xlog_cksum(log, rhead, dp, XLOG_REC_SIZE_OTHER,
++ be32_to_cpu(rhead->h_len));
++ if (other_crc == expected_crc) {
++ xfs_notice_once(log->l_mp,
++ "Fixing up incorrect CRC due to padding.");
++ crc = other_crc;
++ }
++ }
+
+ /*
+ * Nothing else to do if this is a CRC verification pass. Just return
+--- a/fs/xfs/xfs_ondisk.h
++++ b/fs/xfs/xfs_ondisk.h
+@@ -143,6 +143,8 @@ xfs_check_ondisk_structs(void)
+ XFS_CHECK_STRUCT_SIZE(struct xfs_rud_log_format, 16);
+ XFS_CHECK_STRUCT_SIZE(struct xfs_map_extent, 32);
+ XFS_CHECK_STRUCT_SIZE(struct xfs_phys_extent, 16);
++ XFS_CHECK_STRUCT_SIZE(struct xlog_rec_header, 328);
++ XFS_CHECK_STRUCT_SIZE(struct xlog_rec_ext_header, 260);
+
+ XFS_CHECK_OFFSET(struct xfs_bui_log_format, bui_extents, 16);
+ XFS_CHECK_OFFSET(struct xfs_cui_log_format, cui_extents, 16);
--- /dev/null
+From stable+bounces-188053-greg=kroah.com@vger.kernel.org Mon Oct 20 14:47:33 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Oct 2025 08:47:22 -0400
+Subject: xfs: rename the old_crc variable in xlog_recover_process
+To: stable@vger.kernel.org
+Cc: Christoph Hellwig <hch@lst.de>, "Darrick J. Wong" <djwong@kernel.org>, Carlos Maiolino <cem@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251020124723.1757183-1-sashal@kernel.org>
+
+From: Christoph Hellwig <hch@lst.de>
+
+[ Upstream commit 0b737f4ac1d3ec093347241df74bbf5f54a7e16c ]
+
+old_crc is a very misleading name. Rename it to expected_crc as that
+described the usage much better.
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Darrick J. Wong <djwong@kernel.org>
+Signed-off-by: Carlos Maiolino <cem@kernel.org>
+Stable-dep-of: e747883c7d73 ("xfs: fix log CRC mismatches between i386 and other architectures")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/xfs/xfs_log_recover.c | 17 ++++++++---------
+ 1 file changed, 8 insertions(+), 9 deletions(-)
+
+--- a/fs/xfs/xfs_log_recover.c
++++ b/fs/xfs/xfs_log_recover.c
+@@ -2860,20 +2860,19 @@ xlog_recover_process(
+ int pass,
+ struct list_head *buffer_list)
+ {
+- __le32 old_crc = rhead->h_crc;
+- __le32 crc;
++ __le32 expected_crc = rhead->h_crc, crc;
+
+ crc = xlog_cksum(log, rhead, dp, be32_to_cpu(rhead->h_len));
+
+ /*
+ * Nothing else to do if this is a CRC verification pass. Just return
+ * if this a record with a non-zero crc. Unfortunately, mkfs always
+- * sets old_crc to 0 so we must consider this valid even on v5 supers.
+- * Otherwise, return EFSBADCRC on failure so the callers up the stack
+- * know precisely what failed.
++ * sets expected_crc to 0 so we must consider this valid even on v5
++ * supers. Otherwise, return EFSBADCRC on failure so the callers up the
++ * stack know precisely what failed.
+ */
+ if (pass == XLOG_RECOVER_CRCPASS) {
+- if (old_crc && crc != old_crc)
++ if (expected_crc && crc != expected_crc)
+ return -EFSBADCRC;
+ return 0;
+ }
+@@ -2884,11 +2883,11 @@ xlog_recover_process(
+ * zero CRC check prevents warnings from being emitted when upgrading
+ * the kernel from one that does not add CRCs by default.
+ */
+- if (crc != old_crc) {
+- if (old_crc || xfs_has_crc(log->l_mp)) {
++ if (crc != expected_crc) {
++ if (expected_crc || xfs_has_crc(log->l_mp)) {
+ xfs_alert(log->l_mp,
+ "log record CRC mismatch: found 0x%x, expected 0x%x.",
+- le32_to_cpu(old_crc),
++ le32_to_cpu(expected_crc),
+ le32_to_cpu(crc));
+ xfs_hex_dump(dp, 32);
+ }
--- /dev/null
+From stable+bounces-188159-greg=kroah.com@vger.kernel.org Mon Oct 20 17:51:31 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Oct 2025 11:51:16 -0400
+Subject: xfs: use deferred intent items for reaping crosslinked blocks
+To: stable@vger.kernel.org
+Cc: "Darrick J. Wong" <djwong@kernel.org>, Christoph Hellwig <hch@lst.de>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251020155116.1825625-1-sashal@kernel.org>
+
+From: "Darrick J. Wong" <djwong@kernel.org>
+
+[ Upstream commit cd32a0c0dcdf634f2e0e71f41c272e19dece6264 ]
+
+When we're removing rmap records for crosslinked blocks, use deferred
+intent items so that we can try to free/unmap as many of the old data
+structure's blocks as we can in the same transaction as the commit.
+
+Cc: <stable@vger.kernel.org> # v6.6
+Fixes: 1c7ce115e52106 ("xfs: reap large AG metadata extents when possible")
+Signed-off-by: "Darrick J. Wong" <djwong@kernel.org>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+[ adjusted xfs_rmap_free_extent() and xfs_refcount_free_cow_extent() ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/xfs/scrub/reap.c | 19 ++++++++++++++++---
+ 1 file changed, 16 insertions(+), 3 deletions(-)
+
+--- a/fs/xfs/scrub/reap.c
++++ b/fs/xfs/scrub/reap.c
+@@ -20,6 +20,7 @@
+ #include "xfs_ialloc_btree.h"
+ #include "xfs_rmap.h"
+ #include "xfs_rmap_btree.h"
++#include "xfs_refcount.h"
+ #include "xfs_refcount_btree.h"
+ #include "xfs_extent_busy.h"
+ #include "xfs_ag.h"
+@@ -376,9 +377,21 @@ xreap_agextent_iter(
+ if (crosslinked) {
+ trace_xreap_dispose_unmap_extent(sc->sa.pag, agbno, *aglenp);
+
+- rs->force_roll = true;
+- return xfs_rmap_free(sc->tp, sc->sa.agf_bp, sc->sa.pag, agbno,
+- *aglenp, rs->oinfo);
++ if (rs->oinfo == &XFS_RMAP_OINFO_COW) {
++ /*
++ * If we're unmapping CoW staging extents, remove the
++ * records from the refcountbt, which will remove the
++ * rmap record as well.
++ */
++ xfs_refcount_free_cow_extent(sc->tp, fsbno, *aglenp);
++ rs->force_roll = true;
++ return 0;
++ }
++
++ xfs_rmap_free_extent(sc->tp, sc->sa.pag->pag_agno, agbno,
++ *aglenp, rs->oinfo->oi_owner);
++ rs->deferred++;
++ return 0;
+ }
+
+ trace_xreap_dispose_free_extent(sc->sa.pag, agbno, *aglenp);