--- /dev/null
+From stable+bounces-232609-greg=kroah.com@vger.kernel.org Wed Apr 1 01:37:15 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 31 Mar 2026 19:37:04 -0400
+Subject: dmaengine: fsl-edma: change to guard(mutex) within fsl_edma3_xlate()
+To: stable@vger.kernel.org
+Cc: Joy Zou <joy.zou@nxp.com>, Frank Li <Frank.Li@nxp.com>, Vinod Koul <vkoul@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260331233706.3629169-1-sashal@kernel.org>
+
+From: Joy Zou <joy.zou@nxp.com>
+
+[ Upstream commit 90d21f6e57a898ef02810404dd3866acaf707ebf ]
+
+Introduce a scope guard to automatically unlock the mutex within
+fsl_edma3_xlate() to simplify the code.
+
+Prepare to add source ID checks in the future.
+
+Signed-off-by: Joy Zou <joy.zou@nxp.com>
+Reviewed-by: Frank Li <Frank.Li@nxp.com>
+Link: https://lore.kernel.org/r/20240701070232.2519179-2-joy.zou@nxp.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Stable-dep-of: 2e7b5cf72e51 ("dmaengine: fsl-edma: fix channel parameter config for fixed channel requests")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/dma/fsl-edma-main.c | 5 +----
+ 1 file changed, 1 insertion(+), 4 deletions(-)
+
+--- a/drivers/dma/fsl-edma-main.c
++++ b/drivers/dma/fsl-edma-main.c
+@@ -142,7 +142,7 @@ static struct dma_chan *fsl_edma3_xlate(
+
+ b_chmux = !!(fsl_edma->drvdata->flags & FSL_EDMA_DRV_HAS_CHMUX);
+
+- mutex_lock(&fsl_edma->fsl_edma_mutex);
++ guard(mutex)(&fsl_edma->fsl_edma_mutex);
+ list_for_each_entry_safe(chan, _chan, &fsl_edma->dma_dev.channels,
+ device_node) {
+
+@@ -166,18 +166,15 @@ static struct dma_chan *fsl_edma3_xlate(
+ if (!b_chmux && i == dma_spec->args[0]) {
+ chan = dma_get_slave_channel(chan);
+ chan->device->privatecnt++;
+- mutex_unlock(&fsl_edma->fsl_edma_mutex);
+ return chan;
+ } else if (b_chmux && !fsl_chan->srcid) {
+ /* if controller support channel mux, choose a free channel */
+ chan = dma_get_slave_channel(chan);
+ chan->device->privatecnt++;
+ fsl_chan->srcid = dma_spec->args[0];
+- mutex_unlock(&fsl_edma->fsl_edma_mutex);
+ return chan;
+ }
+ }
+- mutex_unlock(&fsl_edma->fsl_edma_mutex);
+ return NULL;
+ }
+
--- /dev/null
+From stable+bounces-232610-greg=kroah.com@vger.kernel.org Wed Apr 1 01:37:14 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 31 Mar 2026 19:37:05 -0400
+Subject: dmaengine: fsl-edma: fix channel parameter config for fixed channel requests
+To: stable@vger.kernel.org
+Cc: Joy Zou <joy.zou@nxp.com>, Frank Li <Frank.Li@nxp.com>, Vinod Koul <vkoul@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260331233706.3629169-2-sashal@kernel.org>
+
+From: Joy Zou <joy.zou@nxp.com>
+
+[ Upstream commit 2e7b5cf72e51c9cf9c8b75190189c757df31ddd9 ]
+
+Configure only the requested channel when a fixed channel is specified
+to avoid modifying other channels unintentionally.
+
+Fix parameter configuration when a fixed DMA channel is requested on
+i.MX9 AON domain and i.MX8QM/QXP/DXL platforms. When a client requests
+a fixed channel (e.g., channel 6), the driver traverses channels 0-5
+and may unintentionally modify their configuration if they are unused.
+
+This leads to issues such as setting the `is_multi_fifo` flag unexpectedly,
+causing memcpy tests to fail when using the dmatest tool.
+
+Only affect edma memcpy test when the channel is fixed.
+
+Fixes: 72f5801a4e2b ("dmaengine: fsl-edma: integrate v3 support")
+Signed-off-by: Joy Zou <joy.zou@nxp.com>
+Cc: stable@vger.kernel.org
+Reviewed-by: Frank Li <Frank.Li@nxp.com>
+Link: https://patch.msgid.link/20250917-b4-edma-chanconf-v1-1-886486e02e91@nxp.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/dma/fsl-edma-main.c | 26 +++++++++++---------------
+ 1 file changed, 11 insertions(+), 15 deletions(-)
+
+--- a/drivers/dma/fsl-edma-main.c
++++ b/drivers/dma/fsl-edma-main.c
+@@ -152,10 +152,8 @@ static struct dma_chan *fsl_edma3_xlate(
+ fsl_chan = to_fsl_edma_chan(chan);
+ i = fsl_chan - fsl_edma->chans;
+
+- fsl_chan->priority = dma_spec->args[1];
+- fsl_chan->is_rxchan = dma_spec->args[2] & FSL_EDMA_RX;
+- fsl_chan->is_remote = dma_spec->args[2] & FSL_EDMA_REMOTE;
+- fsl_chan->is_multi_fifo = dma_spec->args[2] & FSL_EDMA_MULTI_FIFO;
++ if (!b_chmux && i != dma_spec->args[0])
++ continue;
+
+ if ((dma_spec->args[2] & FSL_EDMA_EVEN_CH) && (i & 0x1))
+ continue;
+@@ -163,17 +161,15 @@ static struct dma_chan *fsl_edma3_xlate(
+ if ((dma_spec->args[2] & FSL_EDMA_ODD_CH) && !(i & 0x1))
+ continue;
+
+- if (!b_chmux && i == dma_spec->args[0]) {
+- chan = dma_get_slave_channel(chan);
+- chan->device->privatecnt++;
+- return chan;
+- } else if (b_chmux && !fsl_chan->srcid) {
+- /* if controller support channel mux, choose a free channel */
+- chan = dma_get_slave_channel(chan);
+- chan->device->privatecnt++;
+- fsl_chan->srcid = dma_spec->args[0];
+- return chan;
+- }
++ fsl_chan->srcid = dma_spec->args[0];
++ fsl_chan->priority = dma_spec->args[1];
++ fsl_chan->is_rxchan = dma_spec->args[2] & FSL_EDMA_RX;
++ fsl_chan->is_remote = dma_spec->args[2] & FSL_EDMA_REMOTE;
++ fsl_chan->is_multi_fifo = dma_spec->args[2] & FSL_EDMA_MULTI_FIFO;
++
++ chan = dma_get_slave_channel(chan);
++ chan->device->privatecnt++;
++ return chan;
+ }
+ return NULL;
+ }
--- /dev/null
+From stable+bounces-233179-greg=kroah.com@vger.kernel.org Fri Apr 3 13:50:35 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 3 Apr 2026 07:50:27 -0400
+Subject: ext4: handle wraparound when searching for blocks for indirect mapped blocks
+To: stable@vger.kernel.org
+Cc: Theodore Ts'o <tytso@mit.edu>, Jan Kara <jack@suse.cz>, Baokun Li <libaokun@linux.alibaba.com>, stable@kernel.org, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260403115027.2051682-1-sashal@kernel.org>
+
+From: Theodore Ts'o <tytso@mit.edu>
+
+[ Upstream commit bb81702370fad22c06ca12b6e1648754dbc37e0f ]
+
+Commit 4865c768b563 ("ext4: always allocate blocks only from groups
+inode can use") restricts what blocks will be allocated for indirect
+block based files to block numbers that fit within 32-bit block
+numbers.
+
+However, when using a review bot running on the latest Gemini LLM to
+check this commit when backporting into an LTS based kernel, it raised
+this concern:
+
+ If ac->ac_g_ex.fe_group is >= ngroups (for instance, if the goal
+ group was populated via stream allocation from s_mb_last_groups),
+ then start will be >= ngroups.
+
+ Does this allow allocating blocks beyond the 32-bit limit for
+ indirect block mapped files? The commit message mentions that
+ ext4_mb_scan_groups_linear() takes care to not select unsupported
+ groups. However, its loop uses group = *start, and the very first
+ iteration will call ext4_mb_scan_group() with this unsupported
+ group because next_linear_group() is only called at the end of the
+ iteration.
+
+After reviewing the code paths involved and considering the LLM
+review, I determined that this can happen when there is a file system
+where some files/directories are extent-mapped and others are
+indirect-block mapped. To address this, add a safety clamp in
+ext4_mb_scan_groups().
+
+Fixes: 4865c768b563 ("ext4: always allocate blocks only from groups inode can use")
+Cc: Jan Kara <jack@suse.cz>
+Reviewed-by: Baokun Li <libaokun@linux.alibaba.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Link: https://patch.msgid.link/20260326045834.1175822-1-tytso@mit.edu
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Cc: stable@kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ext4/mballoc.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -2892,6 +2892,8 @@ repeat:
+ * from the goal value specified
+ */
+ group = ac->ac_g_ex.fe_group;
++ if (group >= ngroups)
++ group = 0;
+ ac->ac_groups_linear_remaining = sbi->s_mb_max_linear_groups;
+ prefetch_grp = group;
+
--- /dev/null
+From stable+bounces-233077-greg=kroah.com@vger.kernel.org Thu Apr 2 19:19:25 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 2 Apr 2026 13:13:31 -0400
+Subject: ext4: publish jinode after initialization
+To: stable@vger.kernel.org
+Cc: Li Chen <me@linux.beauty>, Jan Kara <jack@suse.cz>, Theodore Ts'o <tytso@mit.edu>, stable@kernel.org, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260402171331.1527205-1-sashal@kernel.org>
+
+From: Li Chen <me@linux.beauty>
+
+[ Upstream commit 1aec30021edd410b986c156f195f3d23959a9d11 ]
+
+ext4_inode_attach_jinode() publishes ei->jinode to concurrent users.
+It used to set ei->jinode before jbd2_journal_init_jbd_inode(),
+allowing a reader to observe a non-NULL jinode with i_vfs_inode
+still unset.
+
+The fast commit flush path can then pass this jinode to
+jbd2_wait_inode_data(), which dereferences i_vfs_inode->i_mapping and
+may crash.
+
+Below is the crash I observe:
+```
+BUG: unable to handle page fault for address: 000000010beb47f4
+PGD 110e51067 P4D 110e51067 PUD 0
+Oops: Oops: 0000 [#1] SMP NOPTI
+CPU: 1 UID: 0 PID: 4850 Comm: fc_fsync_bench_ Not tainted 6.18.0-00764-g795a690c06a5 #1 PREEMPT(voluntary)
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Arch Linux 1.17.0-2-2 04/01/2014
+RIP: 0010:xas_find_marked+0x3d/0x2e0
+Code: e0 03 48 83 f8 02 0f 84 f0 01 00 00 48 8b 47 08 48 89 c3 48 39 c6 0f 82 fd 01 00 00 48 85 c9 74 3d 48 83 f9 03 77 63 4c 8b 0f <49> 8b 71 08 48 c7 47 18 00 00 00 00 48 89 f1 83 e1 03 48 83 f9 02
+RSP: 0018:ffffbbee806e7bf0 EFLAGS: 00010246
+RAX: 000000000010beb4 RBX: 000000000010beb4 RCX: 0000000000000003
+RDX: 0000000000000001 RSI: 0000002000300000 RDI: ffffbbee806e7c10
+RBP: 0000000000000001 R08: 0000002000300000 R09: 000000010beb47ec
+R10: ffff9ea494590090 R11: 0000000000000000 R12: 0000002000300000
+R13: ffffbbee806e7c90 R14: ffff9ea494513788 R15: ffffbbee806e7c88
+FS: 00007fc2f9e3e6c0(0000) GS:ffff9ea6b1444000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 000000010beb47f4 CR3: 0000000119ac5000 CR4: 0000000000750ef0
+PKRU: 55555554
+Call Trace:
+<TASK>
+filemap_get_folios_tag+0x87/0x2a0
+__filemap_fdatawait_range+0x5f/0xd0
+? srso_alias_return_thunk+0x5/0xfbef5
+? __schedule+0x3e7/0x10c0
+? srso_alias_return_thunk+0x5/0xfbef5
+? srso_alias_return_thunk+0x5/0xfbef5
+? srso_alias_return_thunk+0x5/0xfbef5
+? preempt_count_sub+0x5f/0x80
+? srso_alias_return_thunk+0x5/0xfbef5
+? cap_safe_nice+0x37/0x70
+? srso_alias_return_thunk+0x5/0xfbef5
+? preempt_count_sub+0x5f/0x80
+? srso_alias_return_thunk+0x5/0xfbef5
+filemap_fdatawait_range_keep_errors+0x12/0x40
+ext4_fc_commit+0x697/0x8b0
+? ext4_file_write_iter+0x64b/0x950
+? srso_alias_return_thunk+0x5/0xfbef5
+? preempt_count_sub+0x5f/0x80
+? srso_alias_return_thunk+0x5/0xfbef5
+? vfs_write+0x356/0x480
+? srso_alias_return_thunk+0x5/0xfbef5
+? preempt_count_sub+0x5f/0x80
+ext4_sync_file+0xf7/0x370
+do_fsync+0x3b/0x80
+? syscall_trace_enter+0x108/0x1d0
+__x64_sys_fdatasync+0x16/0x20
+do_syscall_64+0x62/0x2c0
+entry_SYSCALL_64_after_hwframe+0x76/0x7e
+...
+```
+
+Fix this by initializing the jbd2_inode first.
+Use smp_wmb() and WRITE_ONCE() to publish ei->jinode after
+initialization. Readers use READ_ONCE() to fetch the pointer.
+
+Fixes: a361293f5fede ("jbd2: Fix oops in jbd2_journal_file_inode()")
+Cc: stable@vger.kernel.org
+Signed-off-by: Li Chen <me@linux.beauty>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Link: https://patch.msgid.link/20260225082617.147957-1-me@linux.beauty
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Cc: stable@kernel.org
+[ adapted READ_ONCE(ei->jinode) to use pos->jinode ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ext4/fast_commit.c | 4 ++--
+ fs/ext4/inode.c | 15 +++++++++++----
+ 2 files changed, 13 insertions(+), 6 deletions(-)
+
+--- a/fs/ext4/fast_commit.c
++++ b/fs/ext4/fast_commit.c
+@@ -997,7 +997,7 @@ static int ext4_fc_submit_inode_data_all
+ finish_wait(&ei->i_fc_wait, &wait);
+ }
+ spin_unlock(&sbi->s_fc_lock);
+- ret = jbd2_submit_inode_data(journal, ei->jinode);
++ ret = jbd2_submit_inode_data(journal, READ_ONCE(ei->jinode));
+ if (ret)
+ return ret;
+ spin_lock(&sbi->s_fc_lock);
+@@ -1022,7 +1022,7 @@ static int ext4_fc_wait_inode_data_all(j
+ continue;
+ spin_unlock(&sbi->s_fc_lock);
+
+- ret = jbd2_wait_inode_data(journal, pos->jinode);
++ ret = jbd2_wait_inode_data(journal, READ_ONCE(pos->jinode));
+ if (ret)
+ return ret;
+ spin_lock(&sbi->s_fc_lock);
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -123,6 +123,8 @@ void ext4_inode_csum_set(struct inode *i
+ static inline int ext4_begin_ordered_truncate(struct inode *inode,
+ loff_t new_size)
+ {
++ struct jbd2_inode *jinode = READ_ONCE(EXT4_I(inode)->jinode);
++
+ trace_ext4_begin_ordered_truncate(inode, new_size);
+ /*
+ * If jinode is zero, then we never opened the file for
+@@ -130,10 +132,10 @@ static inline int ext4_begin_ordered_tru
+ * jbd2_journal_begin_ordered_truncate() since there's no
+ * outstanding writes we need to flush.
+ */
+- if (!EXT4_I(inode)->jinode)
++ if (!jinode)
+ return 0;
+ return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode),
+- EXT4_I(inode)->jinode,
++ jinode,
+ new_size);
+ }
+
+@@ -4160,8 +4162,13 @@ int ext4_inode_attach_jinode(struct inod
+ spin_unlock(&inode->i_lock);
+ return -ENOMEM;
+ }
+- ei->jinode = jinode;
+- jbd2_journal_init_jbd_inode(ei->jinode, inode);
++ jbd2_journal_init_jbd_inode(jinode, inode);
++ /*
++ * Publish ->jinode only after it is fully initialized so that
++ * readers never observe a partially initialized jbd2_inode.
++ */
++ smp_wmb();
++ WRITE_ONCE(ei->jinode, jinode);
+ jinode = NULL;
+ }
+ spin_unlock(&inode->i_lock);
--- /dev/null
+From stable+bounces-226451-greg=kroah.com@vger.kernel.org Tue Mar 17 18:05:19 2026
+From: inv.git-commit@tdk.com
+Date: Tue, 17 Mar 2026 16:59:03 +0000
+Subject: iio: imu: inv_icm42600: fix odr switch when turning buffer off
+To: stable@vger.kernel.org
+Cc: Jean-Baptiste Maneyrol <jean-baptiste.maneyrol@tdk.com>, Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Message-ID: <20260317165903.745349-1-inv.git-commit@tdk.com>
+
+From: Jean-Baptiste Maneyrol <jean-baptiste.maneyrol@tdk.com>
+
+[ Upstream commit ffd32db8263d2d785a2c419486a450dc80693235 ]
+
+ODR switch is done in 2 steps when FIFO is on : change the ODR register
+value and acknowledge change when reading the FIFO ODR change flag.
+When we are switching odr and turning buffer off just afterward, we are
+losing the FIFO ODR change flag and ODR switch is blocked.
+
+Fix the issue by force applying any waiting ODR change when turning
+buffer off.
+
+Fixes: ec74ae9fd37c ("iio: imu: inv_icm42600: add accurate timestamping")
+Signed-off-by: Jean-Baptiste Maneyrol <jean-baptiste.maneyrol@tdk.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c
++++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c
+@@ -378,6 +378,7 @@ out_unlock:
+ static int inv_icm42600_buffer_postdisable(struct iio_dev *indio_dev)
+ {
+ struct inv_icm42600_state *st = iio_device_get_drvdata(indio_dev);
++ struct inv_sensors_timestamp *ts = iio_priv(indio_dev);
+ struct device *dev = regmap_get_device(st->map);
+ unsigned int sensor;
+ unsigned int *watermark;
+@@ -399,6 +400,8 @@ static int inv_icm42600_buffer_postdisab
+
+ mutex_lock(&st->lock);
+
++ inv_sensors_timestamp_apply_odr(ts, 0, 0, 0);
++
+ ret = inv_icm42600_buffer_set_fifo_en(st, st->fifo.en & ~sensor);
+ if (ret)
+ goto out_unlock;
--- /dev/null
+From stable+bounces-231408-greg=kroah.com@vger.kernel.org Tue Mar 31 13:57:21 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 31 Mar 2026 07:51:22 -0400
+Subject: mm/huge_memory: fix folio isn't locked in softleaf_to_folio()
+To: stable@vger.kernel.org
+Cc: Jinjiang Tu <tujinjiang@huawei.com>, "David Hildenbrand (Arm)" <david@kernel.org>, "Lorenzo Stoakes (Oracle)" <ljs@kernel.org>, Barry Song <baohua@kernel.org>, Kefeng Wang <wangkefeng.wang@huawei.com>, Liam Howlett <liam.howlett@oracle.com>, Michal Hocko <mhocko@suse.com>, Mike Rapoport <rppt@kernel.org>, Nanyong Sun <sunnanyong@huawei.com>, Ryan Roberts <ryan.roberts@arm.com>, Suren Baghdasaryan <surenb@google.com>, Vlastimil Babka <vbabka@kernel.org>, Andrew Morton <akpm@linux-foundation.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260331115122.2131507-1-sashal@kernel.org>
+
+From: Jinjiang Tu <tujinjiang@huawei.com>
+
+[ Upstream commit 4c5e7f0fcd592801c9cc18f29f80fbee84eb8669 ]
+
+On arm64 server, we found folio that get from migration entry isn't locked
+in softleaf_to_folio(). This issue triggers when mTHP splitting and
+zap_nonpresent_ptes() races, and the root cause is lack of memory barrier
+in softleaf_to_folio(). The race is as follows:
+
+ CPU0 CPU1
+
+deferred_split_scan() zap_nonpresent_ptes()
+ lock folio
+ split_folio()
+ unmap_folio()
+ change ptes to migration entries
+ __split_folio_to_order() softleaf_to_folio()
+ set flags(including PG_locked) for tail pages folio = pfn_folio(softleaf_to_pfn(entry))
+ smp_wmb() VM_WARN_ON_ONCE(!folio_test_locked(folio))
+ prep_compound_page() for tail pages
+
+In __split_folio_to_order(), smp_wmb() guarantees page flags of tail pages
+are visible before the tail page becomes non-compound. smp_wmb() should
+be paired with smp_rmb() in softleaf_to_folio(), which is missed. As a
+result, if zap_nonpresent_ptes() accesses migration entry that stores tail
+pfn, softleaf_to_folio() may see the updated compound_head of tail page
+before page->flags.
+
+This issue will trigger VM_WARN_ON_ONCE() in pfn_swap_entry_folio()
+because of the race between folio split and zap_nonpresent_ptes()
+leading to a folio incorrectly undergoing modification without a folio
+lock being held.
+
+This is a BUG_ON() before commit 93976a20345b ("mm: eliminate further
+swapops predicates"), which in merged in v6.19-rc1.
+
+To fix it, add missing smp_rmb() if the softleaf entry is migration entry
+in softleaf_to_folio() and softleaf_to_page().
+
+[tujinjiang@huawei.com: update function name and comments]
+ Link: https://lkml.kernel.org/r/20260321075214.3305564-1-tujinjiang@huawei.com
+Link: https://lkml.kernel.org/r/20260319012541.4158561-1-tujinjiang@huawei.com
+Fixes: e9b61f19858a ("thp: reintroduce split_huge_page()")
+Signed-off-by: Jinjiang Tu <tujinjiang@huawei.com>
+Acked-by: David Hildenbrand (Arm) <david@kernel.org>
+Reviewed-by: Lorenzo Stoakes (Oracle) <ljs@kernel.org>
+Cc: Barry Song <baohua@kernel.org>
+Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
+Cc: Liam Howlett <liam.howlett@oracle.com>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: Mike Rapoport <rppt@kernel.org>
+Cc: Nanyong Sun <sunnanyong@huawei.com>
+Cc: Ryan Roberts <ryan.roberts@arm.com>
+Cc: Suren Baghdasaryan <surenb@google.com>
+Cc: Vlastimil Babka <vbabka@kernel.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+[ adapted fix from leafops.h softleaf_to_page()/softleaf_to_folio() ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/swapops.h | 20 +++++++++++++++-----
+ 1 file changed, 15 insertions(+), 5 deletions(-)
+
+--- a/include/linux/swapops.h
++++ b/include/linux/swapops.h
+@@ -488,11 +488,21 @@ static inline struct page *pfn_swap_entr
+ {
+ struct page *p = pfn_to_page(swp_offset_pfn(entry));
+
+- /*
+- * Any use of migration entries may only occur while the
+- * corresponding page is locked
+- */
+- BUG_ON(is_migration_entry(entry) && !PageLocked(p));
++ if (is_migration_entry(entry)) {
++ /*
++ * Ensure we do not race with split, which might alter tail
++ * pages into new folios and thus result in observing an
++ * unlocked folio.
++ * This matches the write barrier in __split_folio_to_order().
++ */
++ smp_rmb();
++
++ /*
++ * Any use of migration entries may only occur while the
++ * corresponding page is locked
++ */
++ BUG_ON(!PageLocked(p));
++ }
+
+ return p;
+ }
--- /dev/null
+From stable+bounces-233082-greg=kroah.com@vger.kernel.org Thu Apr 2 19:39:57 2026
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Thu, 2 Apr 2026 19:36:17 +0200
+Subject: MPTCP: fix lock class name family in pm_nl_create_listen_socket
+To: stable@vger.kernel.org, gregkh@linuxfoundation.org
+Cc: MPTCP Upstream <mptcp@lists.linux.dev>, Li Xiasong <lixiasong1@huawei.com>, "Matthieu Baerts (NGI0)" <matttbe@kernel.org>, Jakub Kicinski <kuba@kernel.org>
+Message-ID: <20260402173616.3331064-2-matttbe@kernel.org>
+
+From: Li Xiasong <lixiasong1@huawei.com>
+
+commit 7ab4a7c5d969642782b8a5b608da0dd02aa9f229 upstream.
+
+In mptcp_pm_nl_create_listen_socket(), use entry->addr.family
+instead of sk->sk_family for lock class setup. The 'sk' parameter
+is a netlink socket, not the MPTCP subflow socket being created.
+
+Fixes: cee4034a3db1 ("mptcp: fix lockdep false positive in mptcp_pm_nl_create_listen_socket()")
+Signed-off-by: Li Xiasong <lixiasong1@huawei.com>
+Reviewed-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Link: https://patch.msgid.link/20260319112159.3118874-1-lixiasong1@huawei.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+[ Conflict in pm_kernel.c, because commit 8617e85e04bd ("mptcp: pm:
+ split in-kernel PM specific code") is not in this version, and moves
+ code from pm_netlink.c to pm_kernel.c. ]
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/pm_netlink.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -1189,7 +1189,7 @@ static struct lock_class_key mptcp_keys[
+ static int mptcp_pm_nl_create_listen_socket(struct sock *sk,
+ struct mptcp_pm_addr_entry *entry)
+ {
+- bool is_ipv6 = sk->sk_family == AF_INET6;
++ bool is_ipv6 = entry->addr.family == AF_INET6;
+ int addrlen = sizeof(struct sockaddr_in);
+ struct sockaddr_storage addr;
+ struct sock *newsk, *ssk;
--- /dev/null
+From stable+bounces-232566-greg=kroah.com@vger.kernel.org Tue Mar 31 21:45:00 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 31 Mar 2026 15:40:11 -0400
+Subject: net: correctly handle tunneled traffic on IPV6_CSUM GSO fallback
+To: stable@vger.kernel.org
+Cc: Willem de Bruijn <willemb@google.com>, Tangxin Xie <xietangxin@yeah.net>, Paolo Abeni <pabeni@redhat.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260331194011.3088342-1-sashal@kernel.org>
+
+From: Willem de Bruijn <willemb@google.com>
+
+[ Upstream commit c4336a07eb6b2526dc2b62928b5104b41a7f81f5 ]
+
+NETIF_F_IPV6_CSUM only advertises support for checksum offload of
+packets without IPv6 extension headers. Packets with extension
+headers must fall back onto software checksumming. Since TSO
+depends on checksum offload, those must revert to GSO.
+
+The below commit introduces that fallback. It always checks
+network header length. For tunneled packets, the inner header length
+must be checked instead. Extend the check accordingly.
+
+A special case is tunneled packets without inner IP protocol. Such as
+RFC 6951 SCTP in UDP. Those are not standard IPv6 followed by
+transport header either, so also must revert to the software GSO path.
+
+Cc: stable@vger.kernel.org
+Fixes: 864e3396976e ("net: gso: Forbid IPv6 TSO with extensions on devices with only IPV6_CSUM")
+Reported-by: Tangxin Xie <xietangxin@yeah.net>
+Closes: https://lore.kernel.org/netdev/0414e7e2-9a1c-4d7c-a99d-b9039cf68f40@yeah.net/
+Suggested-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Willem de Bruijn <willemb@google.com>
+Link: https://patch.msgid.link/20260320190148.2409107-1-willemdebruijn.kernel@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/dev.c | 22 +++++++++++++++++-----
+ 1 file changed, 17 insertions(+), 5 deletions(-)
+
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3535,6 +3535,22 @@ static netdev_features_t dflt_features_c
+ return vlan_features_check(skb, features);
+ }
+
++static bool skb_gso_has_extension_hdr(const struct sk_buff *skb)
++{
++ if (!skb->encapsulation)
++ return ((skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6 ||
++ (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4 &&
++ vlan_get_protocol(skb) == htons(ETH_P_IPV6))) &&
++ skb_transport_header_was_set(skb) &&
++ skb_network_header_len(skb) != sizeof(struct ipv6hdr));
++ else
++ return (!skb_inner_network_header_was_set(skb) ||
++ ((skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6 ||
++ (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4 &&
++ inner_ip_hdr(skb)->version == 6)) &&
++ skb_inner_network_header_len(skb) != sizeof(struct ipv6hdr)));
++}
++
+ static netdev_features_t gso_features_check(const struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
+@@ -3576,11 +3592,7 @@ static netdev_features_t gso_features_ch
+ * so neither does TSO that depends on it.
+ */
+ if (features & NETIF_F_IPV6_CSUM &&
+- (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6 ||
+- (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4 &&
+- vlan_get_protocol(skb) == htons(ETH_P_IPV6))) &&
+- skb_transport_header_was_set(skb) &&
+- skb_network_header_len(skb) != sizeof(struct ipv6hdr) &&
++ skb_gso_has_extension_hdr(skb) &&
+ !ipv6_has_hopopt_jumbo(skb))
+ features &= ~(NETIF_F_IPV6_CSUM | NETIF_F_TSO6 | NETIF_F_GSO_UDP_L4);
+
--- /dev/null
+From stable+bounces-232568-greg=kroah.com@vger.kernel.org Tue Mar 31 21:45:04 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 31 Mar 2026 15:40:40 -0400
+Subject: net: macb: Move devm_{free,request}_irq() out of spin lock area
+To: stable@vger.kernel.org
+Cc: "Kevin Hao" <haokexin@gmail.com>, "Théo Lebrun" <theo.lebrun@bootlin.com>, "Jakub Kicinski" <kuba@kernel.org>, "Sasha Levin" <sashal@kernel.org>
+Message-ID: <20260331194040.3090422-1-sashal@kernel.org>
+
+From: Kevin Hao <haokexin@gmail.com>
+
+[ Upstream commit 317e49358ebbf6390fa439ef3c142f9239dd25fb ]
+
+The devm_free_irq() and devm_request_irq() functions should not be
+executed in an atomic context.
+
+During device suspend, all userspace processes and most kernel threads
+are frozen. Additionally, we flush all tx/rx status, disable all macb
+interrupts, and halt rx operations. Therefore, it is safe to split the
+region protected by bp->lock into two independent sections, allowing
+devm_free_irq() and devm_request_irq() to run in a non-atomic context.
+This modification resolves the following lockdep warning:
+ BUG: sleeping function called from invalid context at kernel/locking/mutex.c:591
+ in_atomic(): 1, irqs_disabled(): 1, non_block: 0, pid: 501, name: rtcwake
+ preempt_count: 1, expected: 0
+ RCU nest depth: 1, expected: 0
+ 7 locks held by rtcwake/501:
+ #0: ffff0008038c3408 (sb_writers#5){.+.+}-{0:0}, at: vfs_write+0xf8/0x368
+ #1: ffff0008049a5e88 (&of->mutex#2){+.+.}-{4:4}, at: kernfs_fop_write_iter+0xbc/0x1c8
+ #2: ffff00080098d588 (kn->active#70){.+.+}-{0:0}, at: kernfs_fop_write_iter+0xcc/0x1c8
+ #3: ffff800081c84888 (system_transition_mutex){+.+.}-{4:4}, at: pm_suspend+0x1ec/0x290
+ #4: ffff0008009ba0f8 (&dev->mutex){....}-{4:4}, at: device_suspend+0x118/0x4f0
+ #5: ffff800081d00458 (rcu_read_lock){....}-{1:3}, at: rcu_lock_acquire+0x4/0x48
+ #6: ffff0008031fb9e0 (&bp->lock){-.-.}-{3:3}, at: macb_suspend+0x144/0x558
+ irq event stamp: 8682
+ hardirqs last enabled at (8681): [<ffff8000813c7d7c>] _raw_spin_unlock_irqrestore+0x44/0x88
+ hardirqs last disabled at (8682): [<ffff8000813c7b58>] _raw_spin_lock_irqsave+0x38/0x98
+ softirqs last enabled at (7322): [<ffff8000800f1b4c>] handle_softirqs+0x52c/0x588
+ softirqs last disabled at (7317): [<ffff800080010310>] __do_softirq+0x20/0x2c
+ CPU: 1 UID: 0 PID: 501 Comm: rtcwake Not tainted 7.0.0-rc3-next-20260310-yocto-standard+ #125 PREEMPT
+ Hardware name: ZynqMP ZCU102 Rev1.1 (DT)
+ Call trace:
+ show_stack+0x24/0x38 (C)
+ __dump_stack+0x28/0x38
+ dump_stack_lvl+0x64/0x88
+ dump_stack+0x18/0x24
+ __might_resched+0x200/0x218
+ __might_sleep+0x38/0x98
+ __mutex_lock_common+0x7c/0x1378
+ mutex_lock_nested+0x38/0x50
+ free_irq+0x68/0x2b0
+ devm_irq_release+0x24/0x38
+ devres_release+0x40/0x80
+ devm_free_irq+0x48/0x88
+ macb_suspend+0x298/0x558
+ device_suspend+0x218/0x4f0
+ dpm_suspend+0x244/0x3a0
+ dpm_suspend_start+0x50/0x78
+ suspend_devices_and_enter+0xec/0x560
+ pm_suspend+0x194/0x290
+ state_store+0x110/0x158
+ kobj_attr_store+0x1c/0x30
+ sysfs_kf_write+0xa8/0xd0
+ kernfs_fop_write_iter+0x11c/0x1c8
+ vfs_write+0x248/0x368
+ ksys_write+0x7c/0xf8
+ __arm64_sys_write+0x28/0x40
+ invoke_syscall+0x4c/0xe8
+ el0_svc_common+0x98/0xf0
+ do_el0_svc+0x28/0x40
+ el0_svc+0x54/0x1e0
+ el0t_64_sync_handler+0x84/0x130
+ el0t_64_sync+0x198/0x1a0
+
+Fixes: 558e35ccfe95 ("net: macb: WoL support for GEM type of Ethernet controller")
+Cc: stable@vger.kernel.org
+Reviewed-by: Théo Lebrun <theo.lebrun@bootlin.com>
+Signed-off-by: Kevin Hao <haokexin@gmail.com>
+Link: https://patch.msgid.link/20260318-macb-irq-v2-1-f1179768ab24@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+[ replaced `tmp` variable with direct `MACB_BIT(MAG)` ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/cadence/macb_main.c | 13 ++++++++-----
+ 1 file changed, 8 insertions(+), 5 deletions(-)
+
+--- a/drivers/net/ethernet/cadence/macb_main.c
++++ b/drivers/net/ethernet/cadence/macb_main.c
+@@ -5384,6 +5384,8 @@ static int __maybe_unused macb_suspend(s
+ macb_writel(bp, TSR, -1);
+ macb_writel(bp, RSR, -1);
+
++ spin_unlock_irqrestore(&bp->lock, flags);
++
+ /* Change interrupt handler and
+ * Enable WoL IRQ on queue 0
+ */
+@@ -5395,11 +5397,12 @@ static int __maybe_unused macb_suspend(s
+ dev_err(dev,
+ "Unable to request IRQ %d (error %d)\n",
+ bp->queues[0].irq, err);
+- spin_unlock_irqrestore(&bp->lock, flags);
+ return err;
+ }
++ spin_lock_irqsave(&bp->lock, flags);
+ queue_writel(bp->queues, IER, GEM_BIT(WOL));
+ gem_writel(bp, WOL, MACB_BIT(MAG));
++ spin_unlock_irqrestore(&bp->lock, flags);
+ } else {
+ err = devm_request_irq(dev, bp->queues[0].irq, macb_wol_interrupt,
+ IRQF_SHARED, netdev->name, bp->queues);
+@@ -5407,13 +5410,13 @@ static int __maybe_unused macb_suspend(s
+ dev_err(dev,
+ "Unable to request IRQ %d (error %d)\n",
+ bp->queues[0].irq, err);
+- spin_unlock_irqrestore(&bp->lock, flags);
+ return err;
+ }
++ spin_lock_irqsave(&bp->lock, flags);
+ queue_writel(bp->queues, IER, MACB_BIT(WOL));
+ macb_writel(bp, WOL, MACB_BIT(MAG));
++ spin_unlock_irqrestore(&bp->lock, flags);
+ }
+- spin_unlock_irqrestore(&bp->lock, flags);
+
+ enable_irq_wake(bp->queues[0].irq);
+ }
+@@ -5480,6 +5483,8 @@ static int __maybe_unused macb_resume(st
+ queue_readl(bp->queues, ISR);
+ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+ queue_writel(bp->queues, ISR, -1);
++ spin_unlock_irqrestore(&bp->lock, flags);
++
+ /* Replace interrupt handler on queue 0 */
+ devm_free_irq(dev, bp->queues[0].irq, bp->queues);
+ err = devm_request_irq(dev, bp->queues[0].irq, macb_interrupt,
+@@ -5488,10 +5493,8 @@ static int __maybe_unused macb_resume(st
+ dev_err(dev,
+ "Unable to request IRQ %d (error %d)\n",
+ bp->queues[0].irq, err);
+- spin_unlock_irqrestore(&bp->lock, flags);
+ return err;
+ }
+- spin_unlock_irqrestore(&bp->lock, flags);
+
+ disable_irq_wake(bp->queues[0].irq);
+
--- /dev/null
+From stable+bounces-232567-greg=kroah.com@vger.kernel.org Tue Mar 31 21:41:08 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 31 Mar 2026 15:40:19 -0400
+Subject: net: mana: fix use-after-free in add_adev() error path
+To: stable@vger.kernel.org
+Cc: Guangshuo Li <lgs201920130244@gmail.com>, Long Li <longli@microsoft.com>, Jakub Kicinski <kuba@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260331194019.3088820-1-sashal@kernel.org>
+
+From: Guangshuo Li <lgs201920130244@gmail.com>
+
+[ Upstream commit c4ea7d8907cf72b259bf70bd8c2e791e1c4ff70f ]
+
+If auxiliary_device_add() fails, add_adev() jumps to add_fail and calls
+auxiliary_device_uninit(adev).
+
+The auxiliary device has its release callback set to adev_release(),
+which frees the containing struct mana_adev. Since adev is embedded in
+struct mana_adev, the subsequent fall-through to init_fail and access
+to adev->id may result in a use-after-free.
+
+Fix this by saving the allocated auxiliary device id in a local
+variable before calling auxiliary_device_add(), and use that saved id
+in the cleanup path after auxiliary_device_uninit().
+
+Fixes: a69839d4327d ("net: mana: Add support for auxiliary device")
+Cc: stable@vger.kernel.org
+Reviewed-by: Long Li <longli@microsoft.com>
+Signed-off-by: Guangshuo Li <lgs201920130244@gmail.com>
+Link: https://patch.msgid.link/20260323165730.945365-1-lgs201920130244@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/microsoft/mana/mana_en.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
++++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
+@@ -2759,6 +2759,7 @@ static int add_adev(struct gdma_dev *gd)
+ struct auxiliary_device *adev;
+ struct mana_adev *madev;
+ int ret;
++ int id;
+
+ madev = kzalloc(sizeof(*madev), GFP_KERNEL);
+ if (!madev)
+@@ -2768,7 +2769,8 @@ static int add_adev(struct gdma_dev *gd)
+ ret = mana_adev_idx_alloc();
+ if (ret < 0)
+ goto idx_fail;
+- adev->id = ret;
++ id = ret;
++ adev->id = id;
+
+ adev->name = "rdma";
+ adev->dev.parent = gd->gdma_context->dev;
+@@ -2792,7 +2794,7 @@ add_fail:
+ auxiliary_device_uninit(adev);
+
+ init_fail:
+- mana_adev_idx_free(adev->id);
++ mana_adev_idx_free(id);
+
+ idx_fail:
+ kfree(madev);
--- /dev/null
+From stable+bounces-233514-greg=kroah.com@vger.kernel.org Tue Apr 7 10:11:58 2026
+From: "Marek Behún" <kabel@kernel.org>
+Date: Tue, 7 Apr 2026 10:11:15 +0200
+Subject: net: sfp: Fix Ubiquiti U-Fiber Instant SFP module on mvneta
+To: stable@vger.kernel.org
+Cc: "Greg Kroah-Hartman" <gregkh@linuxfoundation.org>, "Sasha Levin" <sashal@kernel.org>, "Marek Behún" <kabel@kernel.org>, "Russell King (Oracle)" <rmk+kernel@armlinux.org.uk>, "Jakub Kicinski" <kuba@kernel.org>
+Message-ID: <20260407081115.2830361-1-kabel@kernel.org>
+
+From: "Marek Behún" <kabel@kernel.org>
+
+[ Upstream commit eeee5a710f26ce57807024ef330fe5a850eaecd8 ]
+
+In commit 8110633db49d7de2 ("net: sfp-bus: allow SFP quirks to override
+Autoneg and pause bits") we moved the setting of Autoneg and pause bits
+before the call to SFP quirk when parsing SFP module support.
+
+Since the quirk for Ubiquiti U-Fiber Instant SFP module zeroes the
+support bits and sets 1000baseX_Full only, the above mentioned commit
+changed the overall computed support from
+ 1000baseX_Full, Autoneg, Pause, Asym_Pause
+to just
+ 1000baseX_Full.
+
+This broke the SFP module for mvneta, which requires Autoneg for
+1000baseX since commit c762b7fac1b249a9 ("net: mvneta: deny disabling
+autoneg for 802.3z modes").
+
+Fix this by setting back the Autoneg, Pause and Asym_Pause bits in the
+quirk.
+
+Fixes: 8110633db49d7de2 ("net: sfp-bus: allow SFP quirks to override Autoneg and pause bits")
+Signed-off-by: Marek Behún <kabel@kernel.org>
+Reviewed-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+Link: https://patch.msgid.link/20260326122038.2489589-1-kabel@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/phy/sfp.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/phy/sfp.c
++++ b/drivers/net/phy/sfp.c
+@@ -465,10 +465,15 @@ static void sfp_quirk_ubnt_uf_instant(co
+ {
+ /* Ubiquiti U-Fiber Instant module claims that support all transceiver
+ * types including 10G Ethernet which is not truth. So clear all claimed
+- * modes and set only one mode which module supports: 1000baseX_Full.
++ * modes and set only one mode which module supports: 1000baseX_Full,
++ * along with the Autoneg and pause bits.
+ */
+ linkmode_zero(modes);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, modes);
++ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, modes);
++ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, modes);
++ linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, modes);
++
+ phy_interface_zero(interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, interfaces);
+ }
--- /dev/null
+From stable+bounces-231444-greg=kroah.com@vger.kernel.org Tue Mar 31 17:26:38 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 31 Mar 2026 11:20:32 -0400
+Subject: scsi: target: tcm_loop: Drain commands in target_reset handler
+To: stable@vger.kernel.org
+Cc: Josef Bacik <josef@toxicpanda.com>, "Martin K. Petersen" <martin.petersen@oracle.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260331152032.2611007-1-sashal@kernel.org>
+
+From: Josef Bacik <josef@toxicpanda.com>
+
+[ Upstream commit 1333eee56cdf3f0cf67c6ab4114c2c9e0a952026 ]
+
+tcm_loop_target_reset() violates the SCSI EH contract: it returns SUCCESS
+without draining any in-flight commands. The SCSI EH documentation
+(scsi_eh.rst) requires that when a reset handler returns SUCCESS the driver
+has made lower layers "forget about timed out scmds" and is ready for new
+commands. Every other SCSI LLD (virtio_scsi, mpt3sas, ipr, scsi_debug,
+mpi3mr) enforces this by draining or completing outstanding commands before
+returning SUCCESS.
+
+Because tcm_loop_target_reset() doesn't drain, the SCSI EH reuses in-flight
+scsi_cmnd structures for recovery commands (e.g. TUR) while the target core
+still has async completion work queued for the old se_cmd. The memset in
+queuecommand zeroes se_lun and lun_ref_active, causing
+transport_lun_remove_cmd() to skip its percpu_ref_put(). The leaked LUN
+reference prevents transport_clear_lun_ref() from completing, hanging
+configfs LUN unlink forever in D-state:
+
+ INFO: task rm:264 blocked for more than 122 seconds.
+ rm D 0 264 258 0x00004000
+ Call Trace:
+ __schedule+0x3d0/0x8e0
+ schedule+0x36/0xf0
+ transport_clear_lun_ref+0x78/0x90 [target_core_mod]
+ core_tpg_remove_lun+0x28/0xb0 [target_core_mod]
+ target_fabric_port_unlink+0x50/0x60 [target_core_mod]
+ configfs_unlink+0x156/0x1f0 [configfs]
+ vfs_unlink+0x109/0x290
+ do_unlinkat+0x1d5/0x2d0
+
+Fix this by making tcm_loop_target_reset() actually drain commands:
+
+ 1. Issue TMR_LUN_RESET via tcm_loop_issue_tmr() to drain all commands that
+ the target core knows about (those not yet CMD_T_COMPLETE).
+
+ 2. Use blk_mq_tagset_busy_iter() to iterate all started requests and
+ flush_work() on each se_cmd — this drains any deferred completion work
+ for commands that already had CMD_T_COMPLETE set before the TMR (which
+ the TMR skips via __target_check_io_state()). This is the same pattern
+ used by mpi3mr, scsi_debug, and libsas to drain outstanding commands
+ during reset.
+
+Fixes: e0eb5d38b732 ("scsi: target: tcm_loop: Use block cmd allocator for se_cmds")
+Cc: stable@vger.kernel.org
+Assisted-by: Claude:claude-opus-4-6
+Signed-off-by: Josef Bacik <josef@toxicpanda.com>
+Link: https://patch.msgid.link/27011aa34c8f6b1b94d2e3cf5655b6d037f53428.1773706803.git.josef@toxicpanda.com
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/target/loopback/tcm_loop.c | 52 ++++++++++++++++++++++++++++++++-----
+ 1 file changed, 46 insertions(+), 6 deletions(-)
+
+--- a/drivers/target/loopback/tcm_loop.c
++++ b/drivers/target/loopback/tcm_loop.c
+@@ -26,6 +26,7 @@
+ #include <linux/slab.h>
+ #include <linux/types.h>
+ #include <linux/configfs.h>
++#include <linux/blk-mq.h>
+ #include <scsi/scsi.h>
+ #include <scsi/scsi_tcq.h>
+ #include <scsi/scsi_host.h>
+@@ -267,15 +268,27 @@ static int tcm_loop_device_reset(struct
+ return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
+ }
+
++static bool tcm_loop_flush_work_iter(struct request *rq, void *data)
++{
++ struct scsi_cmnd *sc = blk_mq_rq_to_pdu(rq);
++ struct tcm_loop_cmd *tl_cmd = scsi_cmd_priv(sc);
++ struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd;
++
++ flush_work(&se_cmd->work);
++ return true;
++}
++
+ static int tcm_loop_target_reset(struct scsi_cmnd *sc)
+ {
+ struct tcm_loop_hba *tl_hba;
+ struct tcm_loop_tpg *tl_tpg;
++ struct Scsi_Host *sh = sc->device->host;
++ int ret;
+
+ /*
+ * Locate the tcm_loop_hba_t pointer
+ */
+- tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
++ tl_hba = *(struct tcm_loop_hba **)shost_priv(sh);
+ if (!tl_hba) {
+ pr_err("Unable to perform device reset without active I_T Nexus\n");
+ return FAILED;
+@@ -284,11 +297,38 @@ static int tcm_loop_target_reset(struct
+ * Locate the tl_tpg pointer from TargetID in sc->device->id
+ */
+ tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
+- if (tl_tpg) {
+- tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
+- return SUCCESS;
+- }
+- return FAILED;
++ if (!tl_tpg)
++ return FAILED;
++
++ /*
++ * Issue a LUN_RESET to drain all commands that the target core
++ * knows about. This handles commands not yet marked CMD_T_COMPLETE.
++ */
++ ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun, 0, TMR_LUN_RESET);
++ if (ret != TMR_FUNCTION_COMPLETE)
++ return FAILED;
++
++ /*
++ * Flush any deferred target core completion work that may still be
++ * queued. Commands that already had CMD_T_COMPLETE set before the TMR
++ * are skipped by the TMR drain, but their async completion work
++ * (transport_lun_remove_cmd → percpu_ref_put, release_cmd → scsi_done)
++ * may still be pending in target_completion_wq.
++ *
++ * The SCSI EH will reuse in-flight scsi_cmnd structures for recovery
++ * commands (e.g. TUR) immediately after this handler returns SUCCESS —
++ * if deferred work is still pending, the memset in queuecommand would
++ * zero the se_cmd while the work accesses it, leaking the LUN
++ * percpu_ref and hanging configfs unlink forever.
++ *
++ * Use blk_mq_tagset_busy_iter() to find all started requests and
++ * flush_work() on each — the same pattern used by mpi3mr, scsi_debug,
++ * and other SCSI drivers to drain outstanding commands during reset.
++ */
++ blk_mq_tagset_busy_iter(&sh->tag_set, tcm_loop_flush_work_iter, NULL);
++
++ tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
++ return SUCCESS;
+ }
+
+ static const struct scsi_host_template tcm_loop_driver_template = {
usb-gadget-f_subset-fix-unbalanced-refcnt-in-geth_free.patch
usb-gadget-f_rndis-protect-rndis-options-with-mutex.patch
usb-gadget-f_uac1_legacy-validate-control-request-size.patch
+wifi-virt_wifi-remove-set_netdev_dev-to-avoid-use-after-free.patch
+iio-imu-inv_icm42600-fix-odr-switch-when-turning-buffer-off.patch
+net-macb-move-devm_-free-request-_irq-out-of-spin-lock-area.patch
+net-correctly-handle-tunneled-traffic-on-ipv6_csum-gso-fallback.patch
+net-mana-fix-use-after-free-in-add_adev-error-path.patch
+scsi-target-tcm_loop-drain-commands-in-target_reset-handler.patch
+mm-huge_memory-fix-folio-isn-t-locked-in-softleaf_to_folio.patch
+x86-cpu-enable-fsgsbase-early-in-cpu_init_exception_handling.patch
+dmaengine-fsl-edma-change-to-guard-mutex-within-fsl_edma3_xlate.patch
+dmaengine-fsl-edma-fix-channel-parameter-config-for-fixed-channel-requests.patch
+ext4-publish-jinode-after-initialization.patch
+ext4-handle-wraparound-when-searching-for-blocks-for-indirect-mapped-blocks.patch
+mptcp-fix-lock-class-name-family-in-pm_nl_create_listen_socket.patch
+net-sfp-fix-ubiquiti-u-fiber-instant-sfp-module-on-mvneta.patch
--- /dev/null
+From 789b06f9f39cdc7e895bdab2c034e39c41c8f8d6 Mon Sep 17 00:00:00 2001
+From: Alexander Popov <alex.popov@linux.com>
+Date: Wed, 25 Mar 2026 01:46:02 +0300
+Subject: wifi: virt_wifi: remove SET_NETDEV_DEV to avoid use-after-free
+
+From: Alexander Popov <alex.popov@linux.com>
+
+commit 789b06f9f39cdc7e895bdab2c034e39c41c8f8d6 upstream.
+
+Currently we execute `SET_NETDEV_DEV(dev, &priv->lowerdev->dev)` for
+the virt_wifi net devices. However, unregistering a virt_wifi device in
+netdev_run_todo() can happen together with the device referenced by
+SET_NETDEV_DEV().
+
+It can result in use-after-free during the ethtool operations performed
+on a virt_wifi device that is currently being unregistered. Such a net
+device can have the `dev.parent` field pointing to the freed memory,
+but ethnl_ops_begin() calls `pm_runtime_get_sync(dev->dev.parent)`.
+
+Let's remove SET_NETDEV_DEV for virt_wifi to avoid bugs like this:
+
+ ==================================================================
+ BUG: KASAN: slab-use-after-free in __pm_runtime_resume+0xe2/0xf0
+ Read of size 2 at addr ffff88810cfc46f8 by task pm/606
+
+ Call Trace:
+ <TASK>
+ dump_stack_lvl+0x4d/0x70
+ print_report+0x170/0x4f3
+ ? __pfx__raw_spin_lock_irqsave+0x10/0x10
+ kasan_report+0xda/0x110
+ ? __pm_runtime_resume+0xe2/0xf0
+ ? __pm_runtime_resume+0xe2/0xf0
+ __pm_runtime_resume+0xe2/0xf0
+ ethnl_ops_begin+0x49/0x270
+ ethnl_set_features+0x23c/0xab0
+ ? __pfx_ethnl_set_features+0x10/0x10
+ ? kvm_sched_clock_read+0x11/0x20
+ ? local_clock_noinstr+0xf/0xf0
+ ? local_clock+0x10/0x30
+ ? kasan_save_track+0x25/0x60
+ ? __kasan_kmalloc+0x7f/0x90
+ ? genl_family_rcv_msg_attrs_parse.isra.0+0x150/0x2c0
+ genl_family_rcv_msg_doit+0x1e7/0x2c0
+ ? __pfx_genl_family_rcv_msg_doit+0x10/0x10
+ ? __pfx_cred_has_capability.isra.0+0x10/0x10
+ ? stack_trace_save+0x8e/0xc0
+ genl_rcv_msg+0x411/0x660
+ ? __pfx_genl_rcv_msg+0x10/0x10
+ ? __pfx_ethnl_set_features+0x10/0x10
+ netlink_rcv_skb+0x121/0x380
+ ? __pfx_genl_rcv_msg+0x10/0x10
+ ? __pfx_netlink_rcv_skb+0x10/0x10
+ ? __pfx_down_read+0x10/0x10
+ genl_rcv+0x23/0x30
+ netlink_unicast+0x60f/0x830
+ ? __pfx_netlink_unicast+0x10/0x10
+ ? __pfx___alloc_skb+0x10/0x10
+ netlink_sendmsg+0x6ea/0xbc0
+ ? __pfx_netlink_sendmsg+0x10/0x10
+ ? __futex_queue+0x10b/0x1f0
+ ____sys_sendmsg+0x7a2/0x950
+ ? copy_msghdr_from_user+0x26b/0x430
+ ? __pfx_____sys_sendmsg+0x10/0x10
+ ? __pfx_copy_msghdr_from_user+0x10/0x10
+ ___sys_sendmsg+0xf8/0x180
+ ? __pfx____sys_sendmsg+0x10/0x10
+ ? __pfx_futex_wait+0x10/0x10
+ ? fdget+0x2e4/0x4a0
+ __sys_sendmsg+0x11f/0x1c0
+ ? __pfx___sys_sendmsg+0x10/0x10
+ do_syscall_64+0xe2/0x570
+ ? exc_page_fault+0x66/0xb0
+ entry_SYSCALL_64_after_hwframe+0x77/0x7f
+ </TASK>
+
+This fix may be combined with another one in the ethtool subsystem:
+https://lore.kernel.org/all/20260322075917.254874-1-alex.popov@linux.com/T/#u
+
+Fixes: d43c65b05b848e0b ("ethtool: runtime-resume netdev parent in ethnl_ops_begin")
+Cc: stable@vger.kernel.org
+Signed-off-by: Alexander Popov <alex.popov@linux.com>
+Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Breno Leitao <leitao@debian.org>
+Link: https://patch.msgid.link/20260324224607.374327-1-alex.popov@linux.com
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireless/virtual/virt_wifi.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/drivers/net/wireless/virtual/virt_wifi.c
++++ b/drivers/net/wireless/virtual/virt_wifi.c
+@@ -555,7 +555,6 @@ static int virt_wifi_newlink(struct net
+ eth_hw_addr_inherit(dev, priv->lowerdev);
+ netif_stacked_transfer_operstate(priv->lowerdev, dev);
+
+- SET_NETDEV_DEV(dev, &priv->lowerdev->dev);
+ dev->ieee80211_ptr = kzalloc(sizeof(*dev->ieee80211_ptr), GFP_KERNEL);
+
+ if (!dev->ieee80211_ptr) {
--- /dev/null
+From stable+bounces-231406-greg=kroah.com@vger.kernel.org Tue Mar 31 13:43:15 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 31 Mar 2026 07:39:33 -0400
+Subject: x86/cpu: Enable FSGSBASE early in cpu_init_exception_handling()
+To: stable@vger.kernel.org
+Cc: Nikunj A Dadhania <nikunj@amd.com>, Borislav Petkov <bp@alien8.de>, Sohil Mehta <sohil.mehta@intel.com>, stable@kernel.org, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260331113933.2081102-1-sashal@kernel.org>
+
+From: Nikunj A Dadhania <nikunj@amd.com>
+
+[ Upstream commit 05243d490bb7852a8acca7b5b5658019c7797a52 ]
+
+Move FSGSBASE enablement from identify_cpu() to cpu_init_exception_handling()
+to ensure it is enabled before any exceptions can occur on both boot and
+secondary CPUs.
+
+== Background ==
+
+Exception entry code (paranoid_entry()) uses ALTERNATIVE patching based on
+X86_FEATURE_FSGSBASE to decide whether to use RDGSBASE/WRGSBASE instructions
+or the slower RDMSR/SWAPGS sequence for saving/restoring GSBASE.
+
+On boot CPU, ALTERNATIVE patching happens after enabling FSGSBASE in CR4.
+When the feature is available, the code is permanently patched to use
+RDGSBASE/WRGSBASE, which require CR4.FSGSBASE=1 to execute without triggering
+
+== Boot Sequence ==
+
+Boot CPU (with CR pinning enabled):
+ trap_init()
+ cpu_init() <- Uses unpatched code (RDMSR/SWAPGS)
+ x2apic_setup()
+ ...
+ arch_cpu_finalize_init()
+ identify_boot_cpu()
+ identify_cpu()
+ cr4_set_bits(X86_CR4_FSGSBASE) # Enables the feature
+ # This becomes part of cr4_pinned_bits
+ ...
+ alternative_instructions() <- Patches code to use RDGSBASE/WRGSBASE
+
+Secondary CPUs (with CR pinning enabled):
+ start_secondary()
+ cr4_init() <- Code already patched, CR4.FSGSBASE=1
+ set implicitly via cr4_pinned_bits
+
+ cpu_init() <- exceptions work because FSGSBASE is
+ already enabled
+
+Secondary CPU (with CR pinning disabled):
+ start_secondary()
+ cr4_init() <- Code already patched, CR4.FSGSBASE=0
+ cpu_init()
+ x2apic_setup()
+ rdmsrq(MSR_IA32_APICBASE) <- Triggers #VC in SNP guests
+ exc_vmm_communication()
+ paranoid_entry() <- Uses RDGSBASE with CR4.FSGSBASE=0
+ (patched code)
+ ...
+ ap_starting()
+ identify_secondary_cpu()
+ identify_cpu()
+ cr4_set_bits(X86_CR4_FSGSBASE) <- Enables the feature, which is
+ too late
+
+== CR Pinning ==
+
+Currently, for secondary CPUs, CR4.FSGSBASE is set implicitly through
+CR-pinning: the boot CPU sets it during identify_cpu(), it becomes part of
+cr4_pinned_bits, and cr4_init() applies those pinned bits to secondary CPUs.
+This works but creates an undocumented dependency between cr4_init() and the
+pinning mechanism.
+
+== Problem ==
+
+Secondary CPUs boot after alternatives have been applied globally. They
+execute already-patched paranoid_entry() code that uses RDGSBASE/WRGSBASE
+instructions, which require CR4.FSGSBASE=1. Upcoming changes to CR pinning
+behavior will break the implicit dependency, causing secondary CPUs to
+generate #UD.
+
+This issue manifests itself on AMD SEV-SNP guests, where the rdmsrq() in
+x2apic_setup() triggers a #VC exception early during cpu_init(). The #VC
+handler (exc_vmm_communication()) executes the patched paranoid_entry() path.
+Without CR4.FSGSBASE enabled, RDGSBASE instructions trigger #UD.
+
+== Fix ==
+
+Enable FSGSBASE explicitly in cpu_init_exception_handling() before loading
+exception handlers. This makes the dependency explicit and ensures both
+boot and secondary CPUs have FSGSBASE enabled before paranoid_entry()
+executes.
+
+Fixes: c82965f9e530 ("x86/entry/64: Handle FSGSBASE enabled paranoid entry/exit")
+Reported-by: Borislav Petkov <bp@alien8.de>
+Suggested-by: Sohil Mehta <sohil.mehta@intel.com>
+Signed-off-by: Nikunj A Dadhania <nikunj@amd.com>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Reviewed-by: Sohil Mehta <sohil.mehta@intel.com>
+Cc: <stable@kernel.org>
+Link: https://patch.msgid.link/20260318075654.1792916-2-nikunj@amd.com
+[ adapted to cpu_init_exception_handling(void) lacking FRED and LASS support ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/common.c | 18 ++++++++++++------
+ 1 file changed, 12 insertions(+), 6 deletions(-)
+
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -2012,12 +2012,6 @@ static void identify_cpu(struct cpuinfo_
+ setup_smap(c);
+ setup_umip(c);
+
+- /* Enable FSGSBASE instructions if available. */
+- if (cpu_has(c, X86_FEATURE_FSGSBASE)) {
+- cr4_set_bits(X86_CR4_FSGSBASE);
+- elf_hwcap2 |= HWCAP2_FSGSBASE;
+- }
+-
+ /*
+ * The vendor-specific functions might have changed features.
+ * Now we do "generic changes."
+@@ -2349,6 +2343,18 @@ void cpu_init_exception_handling(void)
+ /* GHCB needs to be setup to handle #VC. */
+ setup_ghcb();
+
++ /*
++ * On CPUs with FSGSBASE support, paranoid_entry() uses
++ * ALTERNATIVE-patched RDGSBASE/WRGSBASE instructions. Secondary CPUs
++ * boot after alternatives are patched globally, so early exceptions
++ * execute patched code that depends on FSGSBASE. Enable the feature
++ * before any exceptions occur.
++ */
++ if (cpu_feature_enabled(X86_FEATURE_FSGSBASE)) {
++ cr4_set_bits(X86_CR4_FSGSBASE);
++ elf_hwcap2 |= HWCAP2_FSGSBASE;
++ }
++
+ /* Finally load the IDT */
+ load_current_idt();
+ }