]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.12-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 7 Sep 2025 07:43:51 +0000 (09:43 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 7 Sep 2025 07:43:51 +0000 (09:43 +0200)
added patches:
dmaengine-mediatek-fix-a-possible-deadlock-error-in-mtk_cqdma_tx_status.patch
ext4-avoid-journaling-sb-update-on-error-if-journal-is-destroying.patch
ext4-define-ext4_journal_destroy-wrapper.patch
fs-fhandle.c-fix-a-race-in-call-of-has_locked_children.patch
kunit-kasan_test-disable-fortify-string-checker-on-kasan_strings-test.patch
md-md-bitmap-fix-wrong-bitmap_limit-for-clustermd-when-write-sb.patch
md-raid1-raid10-don-t-handle-io-error-for-req_rahead-and-req_nowait.patch
md-raid1-raid10-don-t-ignore-io-flags.patch
md-raid1-raid10-strip-req_nowait-from-member-bios.patch
mm-fix-accounting-of-memmap-pages.patch
mm-slab-cleanup-slab_bug-parameters.patch
mm-slub-avoid-accessing-metadata-when-pointer-is-invalid-in-object_err.patch
mm-slub-call-warn-when-detecting-a-slab-corruption.patch
mm-slub-print-the-broken-data-before-restoring-them.patch
net-dsa-add-hook-to-determine-whether-eee-is-supported.patch
net-dsa-b53-bcm_sf2-implement-.support_eee-method.patch
net-dsa-b53-do-not-enable-eee-on-bcm63xx.patch
net-dsa-provide-implementation-of-.support_eee.patch
net-fix-null-pointer-dereference-in-l3mdev_l3_rcv.patch
nouveau-fix-disabling-the-nonstall-irq-due-to-storm-code.patch
thermal-drivers-mediatek-lvts-disable-low-offset-irq-for-minimum-threshold.patch
wifi-ath11k-update-channel-list-in-reg-notifier-instead-reg-worker.patch
wifi-ath11k-update-channel-list-in-worker-when-wait-flag-is-set.patch

24 files changed:
queue-6.12/dmaengine-mediatek-fix-a-possible-deadlock-error-in-mtk_cqdma_tx_status.patch [new file with mode: 0644]
queue-6.12/ext4-avoid-journaling-sb-update-on-error-if-journal-is-destroying.patch [new file with mode: 0644]
queue-6.12/ext4-define-ext4_journal_destroy-wrapper.patch [new file with mode: 0644]
queue-6.12/fs-fhandle.c-fix-a-race-in-call-of-has_locked_children.patch [new file with mode: 0644]
queue-6.12/kunit-kasan_test-disable-fortify-string-checker-on-kasan_strings-test.patch [new file with mode: 0644]
queue-6.12/md-md-bitmap-fix-wrong-bitmap_limit-for-clustermd-when-write-sb.patch [new file with mode: 0644]
queue-6.12/md-raid1-raid10-don-t-handle-io-error-for-req_rahead-and-req_nowait.patch [new file with mode: 0644]
queue-6.12/md-raid1-raid10-don-t-ignore-io-flags.patch [new file with mode: 0644]
queue-6.12/md-raid1-raid10-strip-req_nowait-from-member-bios.patch [new file with mode: 0644]
queue-6.12/mm-fix-accounting-of-memmap-pages.patch [new file with mode: 0644]
queue-6.12/mm-slab-cleanup-slab_bug-parameters.patch [new file with mode: 0644]
queue-6.12/mm-slub-avoid-accessing-metadata-when-pointer-is-invalid-in-object_err.patch [new file with mode: 0644]
queue-6.12/mm-slub-call-warn-when-detecting-a-slab-corruption.patch [new file with mode: 0644]
queue-6.12/mm-slub-print-the-broken-data-before-restoring-them.patch [new file with mode: 0644]
queue-6.12/net-dsa-add-hook-to-determine-whether-eee-is-supported.patch [new file with mode: 0644]
queue-6.12/net-dsa-b53-bcm_sf2-implement-.support_eee-method.patch [new file with mode: 0644]
queue-6.12/net-dsa-b53-do-not-enable-eee-on-bcm63xx.patch [new file with mode: 0644]
queue-6.12/net-dsa-provide-implementation-of-.support_eee.patch [new file with mode: 0644]
queue-6.12/net-fix-null-pointer-dereference-in-l3mdev_l3_rcv.patch [new file with mode: 0644]
queue-6.12/nouveau-fix-disabling-the-nonstall-irq-due-to-storm-code.patch [new file with mode: 0644]
queue-6.12/series
queue-6.12/thermal-drivers-mediatek-lvts-disable-low-offset-irq-for-minimum-threshold.patch [new file with mode: 0644]
queue-6.12/wifi-ath11k-update-channel-list-in-reg-notifier-instead-reg-worker.patch [new file with mode: 0644]
queue-6.12/wifi-ath11k-update-channel-list-in-worker-when-wait-flag-is-set.patch [new file with mode: 0644]

diff --git a/queue-6.12/dmaengine-mediatek-fix-a-possible-deadlock-error-in-mtk_cqdma_tx_status.patch b/queue-6.12/dmaengine-mediatek-fix-a-possible-deadlock-error-in-mtk_cqdma_tx_status.patch
new file mode 100644 (file)
index 0000000..2028690
--- /dev/null
@@ -0,0 +1,77 @@
+From stable+bounces-177887-greg=kroah.com@vger.kernel.org Fri Sep  5 21:38:30 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri,  5 Sep 2025 15:38:20 -0400
+Subject: dmaengine: mediatek: Fix a possible deadlock error in mtk_cqdma_tx_status()
+To: stable@vger.kernel.org
+Cc: Qiu-ji Chen <chenqiuji666@gmail.com>, AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>, Vinod Koul <vkoul@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20250905193820.3342853-1-sashal@kernel.org>
+
+From: Qiu-ji Chen <chenqiuji666@gmail.com>
+
+[ Upstream commit 157ae5ffd76a2857ccb4b7ce40bc5a344ca00395 ]
+
+Fix a potential deadlock bug. Observe that in the mtk-cqdma.c
+file, functions like mtk_cqdma_issue_pending() and
+mtk_cqdma_free_active_desc() properly acquire the pc lock before the vc
+lock when handling pc and vc fields. However, mtk_cqdma_tx_status()
+violates this order by first acquiring the vc lock before invoking
+mtk_cqdma_find_active_desc(), which subsequently takes the pc lock. This
+reversed locking sequence (vc → pc) contradicts the established
+pc → vc order and creates deadlock risks.
+
+Fix the issue by moving the vc lock acquisition code from
+mtk_cqdma_find_active_desc() to mtk_cqdma_tx_status(). Ensure the pc lock
+is acquired before the vc lock in the calling function to maintain correct
+locking hierarchy. Note that since mtk_cqdma_find_active_desc() is a
+static function with only one caller (mtk_cqdma_tx_status()), this
+modification safely eliminates the deadlock possibility without affecting
+other components.
+
+This possible bug is found by an experimental static analysis tool
+developed by our team. This tool analyzes the locking APIs to extract
+function pairs that can be concurrently executed, and then analyzes the
+instructions in the paired functions to identify possible concurrency bugs
+including deadlocks, data races and atomicity violations.
+
+Fixes: b1f01e48df5a ("dmaengine: mediatek: Add MediaTek Command-Queue DMA controller for MT6765 SoC")
+Cc: stable@vger.kernel.org
+Signed-off-by: Qiu-ji Chen <chenqiuji666@gmail.com>
+Reviewed-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+Link: https://lore.kernel.org/r/20250508073634.3719-1-chenqiuji666@gmail.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/dma/mediatek/mtk-cqdma.c |    6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+--- a/drivers/dma/mediatek/mtk-cqdma.c
++++ b/drivers/dma/mediatek/mtk-cqdma.c
+@@ -420,15 +420,11 @@ static struct virt_dma_desc *mtk_cqdma_f
+ {
+       struct mtk_cqdma_vchan *cvc = to_cqdma_vchan(c);
+       struct virt_dma_desc *vd;
+-      unsigned long flags;
+-      spin_lock_irqsave(&cvc->pc->lock, flags);
+       list_for_each_entry(vd, &cvc->pc->queue, node)
+               if (vd->tx.cookie == cookie) {
+-                      spin_unlock_irqrestore(&cvc->pc->lock, flags);
+                       return vd;
+               }
+-      spin_unlock_irqrestore(&cvc->pc->lock, flags);
+       list_for_each_entry(vd, &cvc->vc.desc_issued, node)
+               if (vd->tx.cookie == cookie)
+@@ -452,9 +448,11 @@ static enum dma_status mtk_cqdma_tx_stat
+       if (ret == DMA_COMPLETE || !txstate)
+               return ret;
++      spin_lock_irqsave(&cvc->pc->lock, flags);
+       spin_lock_irqsave(&cvc->vc.lock, flags);
+       vd = mtk_cqdma_find_active_desc(c, cookie);
+       spin_unlock_irqrestore(&cvc->vc.lock, flags);
++      spin_unlock_irqrestore(&cvc->pc->lock, flags);
+       if (vd) {
+               cvd = to_cqdma_vdesc(vd);
diff --git a/queue-6.12/ext4-avoid-journaling-sb-update-on-error-if-journal-is-destroying.patch b/queue-6.12/ext4-avoid-journaling-sb-update-on-error-if-journal-is-destroying.patch
new file mode 100644 (file)
index 0000000..1ee4d0b
--- /dev/null
@@ -0,0 +1,165 @@
+From ce2f26e73783b4a7c46a86e3af5b5c8de0971790 Mon Sep 17 00:00:00 2001
+From: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Date: Tue, 18 Mar 2025 13:22:56 +0530
+Subject: ext4: avoid journaling sb update on error if journal is destroying
+
+From: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+
+commit ce2f26e73783b4a7c46a86e3af5b5c8de0971790 upstream.
+
+Presently we always BUG_ON if trying to start a transaction on a journal marked
+with JBD2_UNMOUNT, since this should never happen. However, while ltp running
+stress tests, it was observed that in case of some error handling paths, it is
+possible for update_super_work to start a transaction after the journal is
+destroyed eg:
+
+(umount)
+ext4_kill_sb
+  kill_block_super
+    generic_shutdown_super
+      sync_filesystem /* commits all txns */
+      evict_inodes
+        /* might start a new txn */
+      ext4_put_super
+       flush_work(&sbi->s_sb_upd_work) /* flush the workqueue */
+        jbd2_journal_destroy
+          journal_kill_thread
+            journal->j_flags |= JBD2_UNMOUNT;
+          jbd2_journal_commit_transaction
+            jbd2_journal_get_descriptor_buffer
+              jbd2_journal_bmap
+                ext4_journal_bmap
+                  ext4_map_blocks
+                    ...
+                    ext4_inode_error
+                      ext4_handle_error
+                        schedule_work(&sbi->s_sb_upd_work)
+
+                                               /* work queue kicks in */
+                                               update_super_work
+                                                 jbd2_journal_start
+                                                   start_this_handle
+                                                     BUG_ON(journal->j_flags &
+                                                            JBD2_UNMOUNT)
+
+Hence, introduce a new mount flag to indicate journal is destroying and only do
+a journaled (and deferred) update of sb if this flag is not set. Otherwise, just
+fallback to an un-journaled commit.
+
+Further, in the journal destroy path, we have the following sequence:
+
+  1. Set mount flag indicating journal is destroying
+  2. force a commit and wait for it
+  3. flush pending sb updates
+
+This sequence is important as it ensures that, after this point, there is no sb
+update that might be journaled so it is safe to update the sb outside the
+journal. (To avoid race discussed in 2d01ddc86606)
+
+Also, we don't need a similar check in ext4_grp_locked_error since it is only
+called from mballoc and AFAICT it would be always valid to schedule work here.
+
+Fixes: 2d01ddc86606 ("ext4: save error info to sb through journal if available")
+Reported-by: Mahesh Kumar <maheshkumar657g@gmail.com>
+Signed-off-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Link: https://patch.msgid.link/9613c465d6ff00cd315602f99283d5f24018c3f7.1742279837.git.ojaswin@linux.ibm.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ext4/ext4.h      |    3 ++-
+ fs/ext4/ext4_jbd2.h |   15 +++++++++++++++
+ fs/ext4/super.c     |   16 ++++++++--------
+ 3 files changed, 25 insertions(+), 9 deletions(-)
+
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -1823,7 +1823,8 @@ static inline int ext4_valid_inum(struct
+  */
+ enum {
+       EXT4_MF_MNTDIR_SAMPLED,
+-      EXT4_MF_FC_INELIGIBLE   /* Fast commit ineligible */
++      EXT4_MF_FC_INELIGIBLE,  /* Fast commit ineligible */
++      EXT4_MF_JOURNAL_DESTROY /* Journal is in process of destroying */
+ };
+ static inline void ext4_set_mount_flag(struct super_block *sb, int bit)
+--- a/fs/ext4/ext4_jbd2.h
++++ b/fs/ext4/ext4_jbd2.h
+@@ -521,6 +521,21 @@ static inline int ext4_journal_destroy(s
+ {
+       int err = 0;
++      /*
++       * At this point only two things can be operating on the journal.
++       * JBD2 thread performing transaction commit and s_sb_upd_work
++       * issuing sb update through the journal. Once we set
++       * EXT4_JOURNAL_DESTROY, new ext4_handle_error() calls will not
++       * queue s_sb_upd_work and ext4_force_commit() makes sure any
++       * ext4_handle_error() calls from the running transaction commit are
++       * finished. Hence no new s_sb_upd_work can be queued after we
++       * flush it here.
++       */
++      ext4_set_mount_flag(sbi->s_sb, EXT4_MF_JOURNAL_DESTROY);
++
++      ext4_force_commit(sbi->s_sb);
++      flush_work(&sbi->s_sb_upd_work);
++
+       err = jbd2_journal_destroy(journal);
+       sbi->s_journal = NULL;
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -719,9 +719,13 @@ static void ext4_handle_error(struct sup
+                * In case the fs should keep running, we need to writeout
+                * superblock through the journal. Due to lock ordering
+                * constraints, it may not be safe to do it right here so we
+-               * defer superblock flushing to a workqueue.
++               * defer superblock flushing to a workqueue. We just need to be
++               * careful when the journal is already shutting down. If we get
++               * here in that case, just update the sb directly as the last
++               * transaction won't commit anyway.
+                */
+-              if (continue_fs && journal)
++              if (continue_fs && journal &&
++                  !ext4_test_mount_flag(sb, EXT4_MF_JOURNAL_DESTROY))
+                       schedule_work(&EXT4_SB(sb)->s_sb_upd_work);
+               else
+                       ext4_commit_super(sb);
+@@ -1306,7 +1310,6 @@ static void ext4_put_super(struct super_
+       ext4_unregister_li_request(sb);
+       ext4_quotas_off(sb, EXT4_MAXQUOTAS);
+-      flush_work(&sbi->s_sb_upd_work);
+       destroy_workqueue(sbi->rsv_conversion_wq);
+       ext4_release_orphan_info(sb);
+@@ -1316,7 +1319,8 @@ static void ext4_put_super(struct super_
+               if ((err < 0) && !aborted) {
+                       ext4_abort(sb, -err, "Couldn't clean up the journal");
+               }
+-      }
++      } else
++              flush_work(&sbi->s_sb_upd_work);
+       ext4_es_unregister_shrinker(sbi);
+       timer_shutdown_sync(&sbi->s_err_report);
+@@ -4954,8 +4958,6 @@ static int ext4_load_and_init_journal(st
+       return 0;
+ out:
+-      /* flush s_sb_upd_work before destroying the journal. */
+-      flush_work(&sbi->s_sb_upd_work);
+       ext4_journal_destroy(sbi, sbi->s_journal);
+       return -EINVAL;
+ }
+@@ -5645,8 +5647,6 @@ failed_mount_wq:
+       sbi->s_ea_block_cache = NULL;
+       if (sbi->s_journal) {
+-              /* flush s_sb_upd_work before journal destroy. */
+-              flush_work(&sbi->s_sb_upd_work);
+               ext4_journal_destroy(sbi, sbi->s_journal);
+       }
+ failed_mount3a:
diff --git a/queue-6.12/ext4-define-ext4_journal_destroy-wrapper.patch b/queue-6.12/ext4-define-ext4_journal_destroy-wrapper.patch
new file mode 100644 (file)
index 0000000..829bb6e
--- /dev/null
@@ -0,0 +1,107 @@
+From 5a02a6204ca37e7c22fbb55a789c503f05e8e89a Mon Sep 17 00:00:00 2001
+From: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Date: Tue, 18 Mar 2025 13:22:55 +0530
+Subject: ext4: define ext4_journal_destroy wrapper
+
+From: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+
+commit 5a02a6204ca37e7c22fbb55a789c503f05e8e89a upstream.
+
+Define an ext4 wrapper over jbd2_journal_destroy to make sure we
+have consistent behavior during journal destruction. This will also
+come useful in the next patch where we add some ext4 specific logic
+in the destroy path.
+
+Reviewed-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Baokun Li <libaokun1@huawei.com>
+Signed-off-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
+Link: https://patch.msgid.link/c3ba78c5c419757e6d5f2d8ebb4a8ce9d21da86a.1742279837.git.ojaswin@linux.ibm.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ext4/ext4_jbd2.h |   14 ++++++++++++++
+ fs/ext4/super.c     |   16 ++++++----------
+ 2 files changed, 20 insertions(+), 10 deletions(-)
+
+--- a/fs/ext4/ext4_jbd2.h
++++ b/fs/ext4/ext4_jbd2.h
+@@ -513,4 +513,18 @@ static inline int ext4_should_dioread_no
+       return 1;
+ }
++/*
++ * Pass journal explicitly as it may not be cached in the sbi->s_journal in some
++ * cases
++ */
++static inline int ext4_journal_destroy(struct ext4_sb_info *sbi, journal_t *journal)
++{
++      int err = 0;
++
++      err = jbd2_journal_destroy(journal);
++      sbi->s_journal = NULL;
++
++      return err;
++}
++
+ #endif        /* _EXT4_JBD2_H */
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -1312,8 +1312,7 @@ static void ext4_put_super(struct super_
+       if (sbi->s_journal) {
+               aborted = is_journal_aborted(sbi->s_journal);
+-              err = jbd2_journal_destroy(sbi->s_journal);
+-              sbi->s_journal = NULL;
++              err = ext4_journal_destroy(sbi, sbi->s_journal);
+               if ((err < 0) && !aborted) {
+                       ext4_abort(sb, -err, "Couldn't clean up the journal");
+               }
+@@ -4957,8 +4956,7 @@ static int ext4_load_and_init_journal(st
+ out:
+       /* flush s_sb_upd_work before destroying the journal. */
+       flush_work(&sbi->s_sb_upd_work);
+-      jbd2_journal_destroy(sbi->s_journal);
+-      sbi->s_journal = NULL;
++      ext4_journal_destroy(sbi, sbi->s_journal);
+       return -EINVAL;
+ }
+@@ -5649,8 +5647,7 @@ failed_mount_wq:
+       if (sbi->s_journal) {
+               /* flush s_sb_upd_work before journal destroy. */
+               flush_work(&sbi->s_sb_upd_work);
+-              jbd2_journal_destroy(sbi->s_journal);
+-              sbi->s_journal = NULL;
++              ext4_journal_destroy(sbi, sbi->s_journal);
+       }
+ failed_mount3a:
+       ext4_es_unregister_shrinker(sbi);
+@@ -5958,7 +5955,7 @@ static journal_t *ext4_open_dev_journal(
+       return journal;
+ out_journal:
+-      jbd2_journal_destroy(journal);
++      ext4_journal_destroy(EXT4_SB(sb), journal);
+ out_bdev:
+       bdev_fput(bdev_file);
+       return ERR_PTR(errno);
+@@ -6075,8 +6072,7 @@ static int ext4_load_journal(struct supe
+       EXT4_SB(sb)->s_journal = journal;
+       err = ext4_clear_journal_err(sb, es);
+       if (err) {
+-              EXT4_SB(sb)->s_journal = NULL;
+-              jbd2_journal_destroy(journal);
++              ext4_journal_destroy(EXT4_SB(sb), journal);
+               return err;
+       }
+@@ -6094,7 +6090,7 @@ static int ext4_load_journal(struct supe
+       return 0;
+ err_out:
+-      jbd2_journal_destroy(journal);
++      ext4_journal_destroy(EXT4_SB(sb), journal);
+       return err;
+ }
diff --git a/queue-6.12/fs-fhandle.c-fix-a-race-in-call-of-has_locked_children.patch b/queue-6.12/fs-fhandle.c-fix-a-race-in-call-of-has_locked_children.patch
new file mode 100644 (file)
index 0000000..f43c346
--- /dev/null
@@ -0,0 +1,89 @@
+From 1f282cdc1d219c4a557f7009e81bc792820d9d9a Mon Sep 17 00:00:00 2001
+From: Al Viro <viro@zeniv.linux.org.uk>
+Date: Sun, 1 Jun 2025 14:23:52 -0400
+Subject: fs/fhandle.c: fix a race in call of has_locked_children()
+
+From: Al Viro <viro@zeniv.linux.org.uk>
+
+commit 1f282cdc1d219c4a557f7009e81bc792820d9d9a upstream.
+
+may_decode_fh() is calling has_locked_children() while holding no locks.
+That's an oopsable race...
+
+The rest of the callers are safe since they are holding namespace_sem and
+are guaranteed a positive refcount on the mount in question.
+
+Rename the current has_locked_children() to __has_locked_children(), make
+it static and switch the fs/namespace.c users to it.
+
+Make has_locked_children() a wrapper for __has_locked_children(), calling
+the latter under read_seqlock_excl(&mount_lock).
+
+Reviewed-by: Christian Brauner <brauner@kernel.org>
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Fixes: 620c266f3949 ("fhandle: relax open_by_handle_at() permission checks")
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+[ Harshit: Resolved conflicts due to missing commit:
+  db04662e2f4f ("fs: allow detached mounts in clone_private_mount()") in
+  linux-6.12.y ]
+Signed-off-by: Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/namespace.c |   18 ++++++++++++++----
+ 1 file changed, 14 insertions(+), 4 deletions(-)
+
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -2227,7 +2227,7 @@ void drop_collected_mounts(struct vfsmou
+       namespace_unlock();
+ }
+-bool has_locked_children(struct mount *mnt, struct dentry *dentry)
++static bool __has_locked_children(struct mount *mnt, struct dentry *dentry)
+ {
+       struct mount *child;
+@@ -2241,6 +2241,16 @@ bool has_locked_children(struct mount *m
+       return false;
+ }
++bool has_locked_children(struct mount *mnt, struct dentry *dentry)
++{
++      bool res;
++
++      read_seqlock_excl(&mount_lock);
++      res = __has_locked_children(mnt, dentry);
++      read_sequnlock_excl(&mount_lock);
++      return res;
++}
++
+ /**
+  * clone_private_mount - create a private clone of a path
+  * @path: path to clone
+@@ -2268,7 +2278,7 @@ struct vfsmount *clone_private_mount(con
+               return ERR_PTR(-EPERM);
+       }
+-      if (has_locked_children(old_mnt, path->dentry))
++      if (__has_locked_children(old_mnt, path->dentry))
+               goto invalid;
+       new_mnt = clone_mnt(old_mnt, path->dentry, CL_PRIVATE);
+@@ -2762,7 +2772,7 @@ static struct mount *__do_loopback(struc
+       if (!check_mnt(old) && old_path->dentry->d_op != &ns_dentry_operations)
+               return mnt;
+-      if (!recurse && has_locked_children(old, old_path->dentry))
++      if (!recurse && __has_locked_children(old, old_path->dentry))
+               return mnt;
+       if (recurse)
+@@ -3152,7 +3162,7 @@ static int do_set_group(struct path *fro
+               goto out;
+       /* From mount should not have locked children in place of To's root */
+-      if (has_locked_children(from, to->mnt.mnt_root))
++      if (__has_locked_children(from, to->mnt.mnt_root))
+               goto out;
+       /* Setting sharing groups is only allowed on private mounts */
diff --git a/queue-6.12/kunit-kasan_test-disable-fortify-string-checker-on-kasan_strings-test.patch b/queue-6.12/kunit-kasan_test-disable-fortify-string-checker-on-kasan_strings-test.patch
new file mode 100644 (file)
index 0000000..1a693a5
--- /dev/null
@@ -0,0 +1,58 @@
+From stable+bounces-178017-greg=kroah.com@vger.kernel.org Sun Sep  7 04:29:35 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat,  6 Sep 2025 22:29:28 -0400
+Subject: kunit: kasan_test: disable fortify string checker on kasan_strings() test
+To: stable@vger.kernel.org
+Cc: Yeoreum Yun <yeoreum.yun@arm.com>, Alexander Potapenko <glider@google.com>, Andrey Konovalov <andreyknvl@gmail.com>, Andrey Ryabinin <ryabinin.a.a@gmail.com>, Dmitriy Vyukov <dvyukov@google.com>, Vincenzo Frascino <vincenzo.frascino@arm.com>, Andrew Morton <akpm@linux-foundation.org>
+Message-ID: <20250907022928.412807-1-sashal@kernel.org>
+
+From: Yeoreum Yun <yeoreum.yun@arm.com>
+
+Similar to commit 09c6304e38e4 ("kasan: test: fix compatibility with
+FORTIFY_SOURCE") the kernel is panicing in kasan_string().
+
+This is due to the `src` and `ptr` not being hidden from the optimizer
+which would disable the runtime fortify string checker.
+
+Call trace:
+  __fortify_panic+0x10/0x20 (P)
+  kasan_strings+0x980/0x9b0
+  kunit_try_run_case+0x68/0x190
+  kunit_generic_run_threadfn_adapter+0x34/0x68
+  kthread+0x1c4/0x228
+  ret_from_fork+0x10/0x20
+ Code: d503233f a9bf7bfd 910003fd 9424b243 (d4210000)
+ ---[ end trace 0000000000000000 ]---
+ note: kunit_try_catch[128] exited with irqs disabled
+ note: kunit_try_catch[128] exited with preempt_count 1
+     # kasan_strings: try faulted: last
+** replaying previous printk message **
+     # kasan_strings: try faulted: last line seen mm/kasan/kasan_test_c.c:1600
+     # kasan_strings: internal error occurred preventing test case from running: -4
+
+Link: https://lkml.kernel.org/r/20250801120236.2962642-1-yeoreum.yun@arm.com
+Fixes: 73228c7ecc5e ("KASAN: port KASAN Tests to KUnit")
+Signed-off-by: Yeoreum Yun <yeoreum.yun@arm.com>
+Cc: Alexander Potapenko <glider@google.com>
+Cc: Andrey Konovalov <andreyknvl@gmail.com>
+Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
+Cc: Dmitriy Vyukov <dvyukov@google.com>
+Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
+Cc: <stable@vger.kernel.org>
+[ One less test in older trees ]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/kasan/kasan_test_c.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/mm/kasan/kasan_test_c.c
++++ b/mm/kasan/kasan_test_c.c
+@@ -1548,6 +1548,7 @@ static void kasan_strings(struct kunit *
+       ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
++      OPTIMIZER_HIDE_VAR(ptr);
+       kfree(ptr);
diff --git a/queue-6.12/md-md-bitmap-fix-wrong-bitmap_limit-for-clustermd-when-write-sb.patch b/queue-6.12/md-md-bitmap-fix-wrong-bitmap_limit-for-clustermd-when-write-sb.patch
new file mode 100644 (file)
index 0000000..490bf07
--- /dev/null
@@ -0,0 +1,62 @@
+From 6130825f34d41718c98a9b1504a79a23e379701e Mon Sep 17 00:00:00 2001
+From: Su Yue <glass.su@suse.com>
+Date: Mon, 3 Mar 2025 11:39:18 +0800
+Subject: md/md-bitmap: fix wrong bitmap_limit for clustermd when write sb
+
+From: Su Yue <glass.su@suse.com>
+
+commit 6130825f34d41718c98a9b1504a79a23e379701e upstream.
+
+In clustermd, separate write-intent-bitmaps are used for each cluster
+node:
+
+0                    4k                     8k                    12k
+-------------------------------------------------------------------
+| idle                | md super            | bm super [0] + bits |
+| bm bits[0, contd]   | bm super[1] + bits  | bm bits[1, contd]   |
+| bm super[2] + bits  | bm bits [2, contd]  | bm super[3] + bits  |
+| bm bits [3, contd]  |                     |                     |
+
+So in node 1, pg_index in __write_sb_page() could equal to
+bitmap->storage.file_pages. Then bitmap_limit will be calculated to
+0. md_super_write() will be called with 0 size.
+That means the first 4k sb area of node 1 will never be updated
+through filemap_write_page().
+This bug causes hang of mdadm/clustermd_tests/01r1_Grow_resize.
+
+Here use (pg_index % bitmap->storage.file_pages) to make calculation
+of bitmap_limit correct.
+
+Fixes: ab99a87542f1 ("md/md-bitmap: fix writing non bitmap pages")
+Signed-off-by: Su Yue <glass.su@suse.com>
+Reviewed-by: Heming Zhao <heming.zhao@suse.com>
+Link: https://lore.kernel.org/linux-raid/20250303033918.32136-1-glass.su@suse.com
+Signed-off-by: Yu Kuai <yukuai3@huawei.com>
+Signed-off-by: Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/md-bitmap.c |    6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/md/md-bitmap.c
++++ b/drivers/md/md-bitmap.c
+@@ -426,8 +426,8 @@ static int __write_sb_page(struct md_rde
+       struct block_device *bdev;
+       struct mddev *mddev = bitmap->mddev;
+       struct bitmap_storage *store = &bitmap->storage;
+-      unsigned int bitmap_limit = (bitmap->storage.file_pages - pg_index) <<
+-              PAGE_SHIFT;
++      unsigned long num_pages = bitmap->storage.file_pages;
++      unsigned int bitmap_limit = (num_pages - pg_index % num_pages) << PAGE_SHIFT;
+       loff_t sboff, offset = mddev->bitmap_info.offset;
+       sector_t ps = pg_index * PAGE_SIZE / SECTOR_SIZE;
+       unsigned int size = PAGE_SIZE;
+@@ -436,7 +436,7 @@ static int __write_sb_page(struct md_rde
+       bdev = (rdev->meta_bdev) ? rdev->meta_bdev : rdev->bdev;
+       /* we compare length (page numbers), not page offset. */
+-      if ((pg_index - store->sb_index) == store->file_pages - 1) {
++      if ((pg_index - store->sb_index) == num_pages - 1) {
+               unsigned int last_page_size = store->bytes & (PAGE_SIZE - 1);
+               if (last_page_size == 0)
diff --git a/queue-6.12/md-raid1-raid10-don-t-handle-io-error-for-req_rahead-and-req_nowait.patch b/queue-6.12/md-raid1-raid10-don-t-handle-io-error-for-req_rahead-and-req_nowait.patch
new file mode 100644 (file)
index 0000000..9c56d84
--- /dev/null
@@ -0,0 +1,138 @@
+From 9f346f7d4ea73692b82f5102ca8698e4040469ea Mon Sep 17 00:00:00 2001
+From: Yu Kuai <yukuai3@huawei.com>
+Date: Tue, 27 May 2025 16:14:07 +0800
+Subject: md/raid1,raid10: don't handle IO error for REQ_RAHEAD and REQ_NOWAIT
+
+From: Yu Kuai <yukuai3@huawei.com>
+
+commit 9f346f7d4ea73692b82f5102ca8698e4040469ea upstream.
+
+IO with REQ_RAHEAD or REQ_NOWAIT can fail early, even if the storage medium
+is fine, hence record badblocks or remove the disk from array does not
+make sense.
+
+This problem if found by lvm2 test lvcreate-large-raid, where dm-zero
+will fail read ahead IO directly.
+
+Fixes: e879a0d9cb08 ("md/raid1,raid10: don't ignore IO flags")
+Reported-and-tested-by: Mikulas Patocka <mpatocka@redhat.com>
+Closes: https://lore.kernel.org/all/34fa755d-62c8-4588-8ee1-33cb1249bdf2@redhat.com/
+Link: https://lore.kernel.org/linux-raid/20250527081407.3004055-1-yukuai1@huaweicloud.com
+Signed-off-by: Yu Kuai <yukuai3@huawei.com>
+Signed-off-by: Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/raid1-10.c |   10 ++++++++++
+ drivers/md/raid1.c    |   19 ++++++++++---------
+ drivers/md/raid10.c   |   11 ++++++-----
+ 3 files changed, 26 insertions(+), 14 deletions(-)
+
+--- a/drivers/md/raid1-10.c
++++ b/drivers/md/raid1-10.c
+@@ -293,3 +293,13 @@ static inline bool raid1_should_read_fir
+       return false;
+ }
++
++/*
++ * bio with REQ_RAHEAD or REQ_NOWAIT can fail at anytime, before such IO is
++ * submitted to the underlying disks, hence don't record badblocks or retry
++ * in this case.
++ */
++static inline bool raid1_should_handle_error(struct bio *bio)
++{
++      return !(bio->bi_opf & (REQ_RAHEAD | REQ_NOWAIT));
++}
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -371,14 +371,16 @@ static void raid1_end_read_request(struc
+        */
+       update_head_pos(r1_bio->read_disk, r1_bio);
+-      if (uptodate)
++      if (uptodate) {
+               set_bit(R1BIO_Uptodate, &r1_bio->state);
+-      else if (test_bit(FailFast, &rdev->flags) &&
+-               test_bit(R1BIO_FailFast, &r1_bio->state))
++      } else if (test_bit(FailFast, &rdev->flags) &&
++               test_bit(R1BIO_FailFast, &r1_bio->state)) {
+               /* This was a fail-fast read so we definitely
+                * want to retry */
+               ;
+-      else {
++      } else if (!raid1_should_handle_error(bio)) {
++              uptodate = 1;
++      } else {
+               /* If all other devices have failed, we want to return
+                * the error upwards rather than fail the last device.
+                * Here we redefine "uptodate" to mean "Don't want to retry"
+@@ -449,16 +451,15 @@ static void raid1_end_write_request(stru
+       struct bio *to_put = NULL;
+       int mirror = find_bio_disk(r1_bio, bio);
+       struct md_rdev *rdev = conf->mirrors[mirror].rdev;
+-      bool discard_error;
+       sector_t lo = r1_bio->sector;
+       sector_t hi = r1_bio->sector + r1_bio->sectors;
+-
+-      discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD;
++      bool ignore_error = !raid1_should_handle_error(bio) ||
++              (bio->bi_status && bio_op(bio) == REQ_OP_DISCARD);
+       /*
+        * 'one mirror IO has finished' event handler:
+        */
+-      if (bio->bi_status && !discard_error) {
++      if (bio->bi_status && !ignore_error) {
+               set_bit(WriteErrorSeen, &rdev->flags);
+               if (!test_and_set_bit(WantReplacement, &rdev->flags))
+                       set_bit(MD_RECOVERY_NEEDED, &
+@@ -509,7 +510,7 @@ static void raid1_end_write_request(stru
+               /* Maybe we can clear some bad blocks. */
+               if (rdev_has_badblock(rdev, r1_bio->sector, r1_bio->sectors) &&
+-                  !discard_error) {
++                  !ignore_error) {
+                       r1_bio->bios[mirror] = IO_MADE_GOOD;
+                       set_bit(R1BIO_MadeGood, &r1_bio->state);
+               }
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -398,6 +398,8 @@ static void raid10_end_read_request(stru
+                * wait for the 'master' bio.
+                */
+               set_bit(R10BIO_Uptodate, &r10_bio->state);
++      } else if (!raid1_should_handle_error(bio)) {
++              uptodate = 1;
+       } else {
+               /* If all other devices that store this block have
+                * failed, we want to return the error upwards rather
+@@ -455,9 +457,8 @@ static void raid10_end_write_request(str
+       int slot, repl;
+       struct md_rdev *rdev = NULL;
+       struct bio *to_put = NULL;
+-      bool discard_error;
+-
+-      discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD;
++      bool ignore_error = !raid1_should_handle_error(bio) ||
++              (bio->bi_status && bio_op(bio) == REQ_OP_DISCARD);
+       dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
+@@ -471,7 +472,7 @@ static void raid10_end_write_request(str
+       /*
+        * this branch is our 'one mirror IO has finished' event handler:
+        */
+-      if (bio->bi_status && !discard_error) {
++      if (bio->bi_status && !ignore_error) {
+               if (repl)
+                       /* Never record new bad blocks to replacement,
+                        * just fail it.
+@@ -526,7 +527,7 @@ static void raid10_end_write_request(str
+               /* Maybe we can clear some bad blocks. */
+               if (rdev_has_badblock(rdev, r10_bio->devs[slot].addr,
+                                     r10_bio->sectors) &&
+-                  !discard_error) {
++                  !ignore_error) {
+                       bio_put(bio);
+                       if (repl)
+                               r10_bio->devs[slot].repl_bio = IO_MADE_GOOD;
diff --git a/queue-6.12/md-raid1-raid10-don-t-ignore-io-flags.patch b/queue-6.12/md-raid1-raid10-don-t-ignore-io-flags.patch
new file mode 100644 (file)
index 0000000..6ab8a89
--- /dev/null
@@ -0,0 +1,101 @@
+From e879a0d9cb086c8e52ce6c04e5bfa63825a6213c Mon Sep 17 00:00:00 2001
+From: Yu Kuai <yukuai3@huawei.com>
+Date: Thu, 27 Feb 2025 20:16:57 +0800
+Subject: md/raid1,raid10: don't ignore IO flags
+
+From: Yu Kuai <yukuai3@huawei.com>
+
+commit e879a0d9cb086c8e52ce6c04e5bfa63825a6213c upstream.
+
+If blk-wbt is enabled by default, it's found that raid write performance
+is quite bad because all IO are throttled by wbt of underlying disks,
+due to flag REQ_IDLE is ignored. And turns out this behaviour exist since
+blk-wbt is introduced.
+
+Other than REQ_IDLE, other flags should not be ignored as well, for
+example REQ_META can be set for filesystems, clearing it can cause priority
+reverse problems; And REQ_NOWAIT should not be cleared as well, because
+io will wait instead of failing directly in underlying disks.
+
+Fix those problems by keep IO flags from master bio.
+
+Fises: f51d46d0e7cb ("md: add support for REQ_NOWAIT")
+Fixes: e34cbd307477 ("blk-wbt: add general throttling mechanism")
+Fixes: 5404bc7a87b9 ("[PATCH] Allow file systems to differentiate between data and meta reads")
+Link: https://lore.kernel.org/linux-raid/20250227121657.832356-1-yukuai1@huaweicloud.com
+Signed-off-by: Yu Kuai <yukuai3@huawei.com>
+[ Harshit: Resolve conflicts due to missing commit: f2a38abf5f1c
+  ("md/raid1: Atomic write support") and  commit: a1d9b4fd42d9
+  ("md/raid10: Atomic write support") in 6.12.y, we don't have Atomic
+  writes feature in 6.12.y ]
+Signed-off-by: Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/raid1.c  |    4 ----
+ drivers/md/raid10.c |    7 -------
+ 2 files changed, 11 deletions(-)
+
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -1315,8 +1315,6 @@ static void raid1_read_request(struct md
+       struct r1conf *conf = mddev->private;
+       struct raid1_info *mirror;
+       struct bio *read_bio;
+-      const enum req_op op = bio_op(bio);
+-      const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC;
+       int max_sectors;
+       int rdisk;
+       bool r1bio_existed = !!r1_bio;
+@@ -1399,7 +1397,6 @@ static void raid1_read_request(struct md
+       read_bio->bi_iter.bi_sector = r1_bio->sector +
+               mirror->rdev->data_offset;
+       read_bio->bi_end_io = raid1_end_read_request;
+-      read_bio->bi_opf = op | do_sync;
+       if (test_bit(FailFast, &mirror->rdev->flags) &&
+           test_bit(R1BIO_FailFast, &r1_bio->state))
+               read_bio->bi_opf |= MD_FAILFAST;
+@@ -1619,7 +1616,6 @@ static void raid1_write_request(struct m
+               mbio->bi_iter.bi_sector = (r1_bio->sector + rdev->data_offset);
+               mbio->bi_end_io = raid1_end_write_request;
+-              mbio->bi_opf = bio_op(bio) | (bio->bi_opf & (REQ_SYNC | REQ_FUA));
+               if (test_bit(FailFast, &rdev->flags) &&
+                   !test_bit(WriteMostly, &rdev->flags) &&
+                   conf->raid_disks - mddev->degraded > 1)
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -1146,8 +1146,6 @@ static void raid10_read_request(struct m
+ {
+       struct r10conf *conf = mddev->private;
+       struct bio *read_bio;
+-      const enum req_op op = bio_op(bio);
+-      const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC;
+       int max_sectors;
+       struct md_rdev *rdev;
+       char b[BDEVNAME_SIZE];
+@@ -1226,7 +1224,6 @@ static void raid10_read_request(struct m
+       read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
+               choose_data_offset(r10_bio, rdev);
+       read_bio->bi_end_io = raid10_end_read_request;
+-      read_bio->bi_opf = op | do_sync;
+       if (test_bit(FailFast, &rdev->flags) &&
+           test_bit(R10BIO_FailFast, &r10_bio->state))
+               read_bio->bi_opf |= MD_FAILFAST;
+@@ -1240,9 +1237,6 @@ static void raid10_write_one_disk(struct
+                                 struct bio *bio, bool replacement,
+                                 int n_copy)
+ {
+-      const enum req_op op = bio_op(bio);
+-      const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC;
+-      const blk_opf_t do_fua = bio->bi_opf & REQ_FUA;
+       unsigned long flags;
+       struct r10conf *conf = mddev->private;
+       struct md_rdev *rdev;
+@@ -1261,7 +1255,6 @@ static void raid10_write_one_disk(struct
+       mbio->bi_iter.bi_sector = (r10_bio->devs[n_copy].addr +
+                                  choose_data_offset(r10_bio, rdev));
+       mbio->bi_end_io = raid10_end_write_request;
+-      mbio->bi_opf = op | do_sync | do_fua;
+       if (!replacement && test_bit(FailFast,
+                                    &conf->mirrors[devnum].rdev->flags)
+                        && enough(conf, devnum))
diff --git a/queue-6.12/md-raid1-raid10-strip-req_nowait-from-member-bios.patch b/queue-6.12/md-raid1-raid10-strip-req_nowait-from-member-bios.patch
new file mode 100644 (file)
index 0000000..e249843
--- /dev/null
@@ -0,0 +1,67 @@
+From 5fa31c49928139fa948f078b094d80f12ed83f5f Mon Sep 17 00:00:00 2001
+From: Zheng Qixing <zhengqixing@huawei.com>
+Date: Wed, 2 Jul 2025 18:23:41 +0800
+Subject: md/raid1,raid10: strip REQ_NOWAIT from member bios
+
+From: Zheng Qixing <zhengqixing@huawei.com>
+
+commit 5fa31c49928139fa948f078b094d80f12ed83f5f upstream.
+
+RAID layers don't implement proper non-blocking semantics for
+REQ_NOWAIT, making the flag potentially misleading when propagated
+to member disks.
+
+This patch clear REQ_NOWAIT from cloned bios in raid1/raid10. Retain
+original bio's REQ_NOWAIT flag for upper layer error handling.
+
+Maybe we can implement non-blocking I/O handling mechanisms within
+RAID in future work.
+
+Fixes: 9f346f7d4ea7 ("md/raid1,raid10: don't handle IO error for REQ_RAHEAD and REQ_NOWAIT")
+Signed-off-by: Zheng Qixing <zhengqixing@huawei.com>
+Link: https://lore.kernel.org/linux-raid/20250702102341.1969154-1-zhengqixing@huaweicloud.com
+Signed-off-by: Yu Kuai <yukuai3@huawei.com>
+Signed-off-by: Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/raid1.c  |    3 ++-
+ drivers/md/raid10.c |    2 ++
+ 2 files changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -1392,7 +1392,7 @@ static void raid1_read_request(struct md
+       }
+       read_bio = bio_alloc_clone(mirror->rdev->bdev, bio, gfp,
+                                  &mddev->bio_set);
+-
++      read_bio->bi_opf &= ~REQ_NOWAIT;
+       r1_bio->bios[rdisk] = read_bio;
+       read_bio->bi_iter.bi_sector = r1_bio->sector +
+@@ -1613,6 +1613,7 @@ static void raid1_write_request(struct m
+                               wait_for_serialization(rdev, r1_bio);
+               }
++              mbio->bi_opf &= ~REQ_NOWAIT;
+               r1_bio->bios[i] = mbio;
+               mbio->bi_iter.bi_sector = (r1_bio->sector + rdev->data_offset);
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -1218,6 +1218,7 @@ static void raid10_read_request(struct m
+               r10_bio->master_bio = bio;
+       }
+       read_bio = bio_alloc_clone(rdev->bdev, bio, gfp, &mddev->bio_set);
++      read_bio->bi_opf &= ~REQ_NOWAIT;
+       r10_bio->devs[slot].bio = read_bio;
+       r10_bio->devs[slot].rdev = rdev;
+@@ -1248,6 +1249,7 @@ static void raid10_write_one_disk(struct
+                            conf->mirrors[devnum].rdev;
+       mbio = bio_alloc_clone(rdev->bdev, bio, GFP_NOIO, &mddev->bio_set);
++      mbio->bi_opf &= ~REQ_NOWAIT;
+       if (replacement)
+               r10_bio->devs[n_copy].repl_bio = mbio;
+       else
diff --git a/queue-6.12/mm-fix-accounting-of-memmap-pages.patch b/queue-6.12/mm-fix-accounting-of-memmap-pages.patch
new file mode 100644 (file)
index 0000000..ceaea5f
--- /dev/null
@@ -0,0 +1,112 @@
+From stable+bounces-178014-greg=kroah.com@vger.kernel.org Sun Sep  7 03:44:46 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat,  6 Sep 2025 21:44:36 -0400
+Subject: mm: fix accounting of memmap pages
+To: stable@vger.kernel.org
+Cc: Sumanth Korikkar <sumanthk@linux.ibm.com>, David Hildenbrand <david@redhat.com>, Wei Yang <richard.weiyang@gmail.com>, Alexander Gordeev <agordeev@linux.ibm.com>, Gerald Schaefer <gerald.schaefer@linux.ibm.com>, Heiko Carstens <hca@linux.ibm.com>, Vasily Gorbik <gor@linux.ibm.com>, Andrew Morton <akpm@linux-foundation.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20250907014436.393471-1-sashal@kernel.org>
+
+From: Sumanth Korikkar <sumanthk@linux.ibm.com>
+
+[ Upstream commit c3576889d87b603cb66b417e08844a53c1077a37 ]
+
+For !CONFIG_SPARSEMEM_VMEMMAP, memmap page accounting is currently done
+upfront in sparse_buffer_init().  However, sparse_buffer_alloc() may
+return NULL in failure scenario.
+
+Also, memmap pages may be allocated either from the memblock allocator
+during early boot or from the buddy allocator.  When removed via
+arch_remove_memory(), accounting of memmap pages must reflect the original
+allocation source.
+
+To ensure correctness:
+* Account memmap pages after successful allocation in sparse_init_nid()
+  and section_activate().
+* Account memmap pages in section_deactivate() based on allocation
+  source.
+
+Link: https://lkml.kernel.org/r/20250807183545.1424509-1-sumanthk@linux.ibm.com
+Fixes: 15995a352474 ("mm: report per-page metadata information")
+Signed-off-by: Sumanth Korikkar <sumanthk@linux.ibm.com>
+Suggested-by: David Hildenbrand <david@redhat.com>
+Reviewed-by: Wei Yang <richard.weiyang@gmail.com>
+Cc: Alexander Gordeev <agordeev@linux.ibm.com>
+Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
+Cc: Heiko Carstens <hca@linux.ibm.com>
+Cc: Vasily Gorbik <gor@linux.ibm.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/sparse-vmemmap.c |    5 -----
+ mm/sparse.c         |   15 +++++++++------
+ 2 files changed, 9 insertions(+), 11 deletions(-)
+
+--- a/mm/sparse-vmemmap.c
++++ b/mm/sparse-vmemmap.c
+@@ -474,10 +474,5 @@ struct page * __meminit __populate_secti
+       if (r < 0)
+               return NULL;
+-      if (system_state == SYSTEM_BOOTING)
+-              memmap_boot_pages_add(DIV_ROUND_UP(end - start, PAGE_SIZE));
+-      else
+-              memmap_pages_add(DIV_ROUND_UP(end - start, PAGE_SIZE));
+-
+       return pfn_to_page(pfn);
+ }
+--- a/mm/sparse.c
++++ b/mm/sparse.c
+@@ -462,9 +462,6 @@ static void __init sparse_buffer_init(un
+        */
+       sparsemap_buf = memmap_alloc(size, section_map_size(), addr, nid, true);
+       sparsemap_buf_end = sparsemap_buf + size;
+-#ifndef CONFIG_SPARSEMEM_VMEMMAP
+-      memmap_boot_pages_add(DIV_ROUND_UP(size, PAGE_SIZE));
+-#endif
+ }
+ static void __init sparse_buffer_fini(void)
+@@ -532,6 +529,8 @@ static void __init sparse_init_nid(int n
+                       sparse_buffer_fini();
+                       goto failed;
+               }
++              memmap_boot_pages_add(DIV_ROUND_UP(PAGES_PER_SECTION * sizeof(struct page),
++                                                 PAGE_SIZE));
+               check_usemap_section_nr(nid, usage);
+               sparse_init_one_section(__nr_to_section(pnum), pnum, map, usage,
+                               SECTION_IS_EARLY);
+@@ -643,7 +642,6 @@ static void depopulate_section_memmap(un
+       unsigned long start = (unsigned long) pfn_to_page(pfn);
+       unsigned long end = start + nr_pages * sizeof(struct page);
+-      memmap_pages_add(-1L * (DIV_ROUND_UP(end - start, PAGE_SIZE)));
+       vmemmap_free(start, end, altmap);
+ }
+ static void free_map_bootmem(struct page *memmap)
+@@ -819,10 +817,14 @@ static void section_deactivate(unsigned
+        * The memmap of early sections is always fully populated. See
+        * section_activate() and pfn_valid() .
+        */
+-      if (!section_is_early)
++      if (!section_is_early) {
++              memmap_pages_add(-1L * (DIV_ROUND_UP(nr_pages * sizeof(struct page), PAGE_SIZE)));
+               depopulate_section_memmap(pfn, nr_pages, altmap);
+-      else if (memmap)
++      } else if (memmap) {
++              memmap_boot_pages_add(-1L * (DIV_ROUND_UP(nr_pages * sizeof(struct page),
++                                                        PAGE_SIZE)));
+               free_map_bootmem(memmap);
++      }
+       if (empty)
+               ms->section_mem_map = (unsigned long)NULL;
+@@ -867,6 +869,7 @@ static struct page * __meminit section_a
+               section_deactivate(pfn, nr_pages, altmap);
+               return ERR_PTR(-ENOMEM);
+       }
++      memmap_pages_add(DIV_ROUND_UP(nr_pages * sizeof(struct page), PAGE_SIZE));
+       return memmap;
+ }
diff --git a/queue-6.12/mm-slab-cleanup-slab_bug-parameters.patch b/queue-6.12/mm-slab-cleanup-slab_bug-parameters.patch
new file mode 100644 (file)
index 0000000..ac020b2
--- /dev/null
@@ -0,0 +1,118 @@
+From stable+bounces-178009-greg=kroah.com@vger.kernel.org Sat Sep  6 23:25:42 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat,  6 Sep 2025 17:25:29 -0400
+Subject: mm, slab: cleanup slab_bug() parameters
+To: stable@vger.kernel.org
+Cc: Vlastimil Babka <vbabka@suse.cz>, Harry Yoo <harry.yoo@oracle.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20250906212530.302670-3-sashal@kernel.org>
+
+From: Vlastimil Babka <vbabka@suse.cz>
+
+[ Upstream commit 4b183dd9359d5772446cb634b12a383bed98c4fc ]
+
+slab_err() has variadic printf arguments but instead of passing them to
+slab_bug() it does vsnprintf() to a buffer and passes %s, buf.
+
+To allow passing them directly, turn slab_bug() to __slab_bug() with a
+va_list parameter, and slab_bug() a wrapper with fmt, ... parameters.
+Then slab_err() can call __slab_bug() without the intermediate buffer.
+
+Also constify fmt everywhere, which also simplifies object_err()'s
+call to slab_bug().
+
+Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
+Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
+Stable-dep-of: b4efccec8d06 ("mm/slub: avoid accessing metadata when pointer is invalid in object_err()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/slub.c |   28 ++++++++++++++++++----------
+ 1 file changed, 18 insertions(+), 10 deletions(-)
+
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -1027,12 +1027,12 @@ void skip_orig_size_check(struct kmem_ca
+       set_orig_size(s, (void *)object, s->object_size);
+ }
+-static void slab_bug(struct kmem_cache *s, char *fmt, ...)
++static void __slab_bug(struct kmem_cache *s, const char *fmt, va_list argsp)
+ {
+       struct va_format vaf;
+       va_list args;
+-      va_start(args, fmt);
++      va_copy(args, argsp);
+       vaf.fmt = fmt;
+       vaf.va = &args;
+       pr_err("=============================================================================\n");
+@@ -1041,8 +1041,17 @@ static void slab_bug(struct kmem_cache *
+       va_end(args);
+ }
++static void slab_bug(struct kmem_cache *s, const char *fmt, ...)
++{
++      va_list args;
++
++      va_start(args, fmt);
++      __slab_bug(s, fmt, args);
++      va_end(args);
++}
++
+ __printf(2, 3)
+-static void slab_fix(struct kmem_cache *s, char *fmt, ...)
++static void slab_fix(struct kmem_cache *s, const char *fmt, ...)
+ {
+       struct va_format vaf;
+       va_list args;
+@@ -1098,12 +1107,12 @@ static void print_trailer(struct kmem_ca
+ }
+ static void object_err(struct kmem_cache *s, struct slab *slab,
+-                      u8 *object, char *reason)
++                      u8 *object, const char *reason)
+ {
+       if (slab_add_kunit_errors())
+               return;
+-      slab_bug(s, "%s", reason);
++      slab_bug(s, reason);
+       print_trailer(s, slab, object);
+       add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
+@@ -1139,15 +1148,14 @@ static __printf(3, 4) void slab_err(stru
+                       const char *fmt, ...)
+ {
+       va_list args;
+-      char buf[100];
+       if (slab_add_kunit_errors())
+               return;
+       va_start(args, fmt);
+-      vsnprintf(buf, sizeof(buf), fmt, args);
++      __slab_bug(s, fmt, args);
+       va_end(args);
+-      slab_bug(s, "%s", buf);
++
+       __slab_err(slab);
+ }
+@@ -1185,7 +1193,7 @@ static void init_object(struct kmem_cach
+                                         s->inuse - poison_size);
+ }
+-static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
++static void restore_bytes(struct kmem_cache *s, const char *message, u8 data,
+                                               void *from, void *to)
+ {
+       slab_fix(s, "Restoring %s 0x%p-0x%p=0x%x", message, from, to - 1, data);
+@@ -1200,7 +1208,7 @@ static void restore_bytes(struct kmem_ca
+ static pad_check_attributes int
+ check_bytes_and_report(struct kmem_cache *s, struct slab *slab,
+-                     u8 *object, char *what, u8 *start, unsigned int value,
++                     u8 *object, const char *what, u8 *start, unsigned int value,
+                      unsigned int bytes, bool slab_obj_print)
+ {
+       u8 *fault;
diff --git a/queue-6.12/mm-slub-avoid-accessing-metadata-when-pointer-is-invalid-in-object_err.patch b/queue-6.12/mm-slub-avoid-accessing-metadata-when-pointer-is-invalid-in-object_err.patch
new file mode 100644 (file)
index 0000000..4407d02
--- /dev/null
@@ -0,0 +1,54 @@
+From stable+bounces-178010-greg=kroah.com@vger.kernel.org Sat Sep  6 23:25:52 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat,  6 Sep 2025 17:25:30 -0400
+Subject: mm/slub: avoid accessing metadata when pointer is invalid in object_err()
+To: stable@vger.kernel.org
+Cc: Li Qiong <liqiong@nfschina.com>, Harry Yoo <harry.yoo@oracle.com>, "Matthew Wilcox (Oracle)" <willy@infradead.org>, Vlastimil Babka <vbabka@suse.cz>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20250906212530.302670-4-sashal@kernel.org>
+
+From: Li Qiong <liqiong@nfschina.com>
+
+[ Upstream commit b4efccec8d06ceb10a7d34d7b1c449c569d53770 ]
+
+object_err() reports details of an object for further debugging, such as
+the freelist pointer, redzone, etc. However, if the pointer is invalid,
+attempting to access object metadata can lead to a crash since it does
+not point to a valid object.
+
+One known path to the crash is when alloc_consistency_checks()
+determines the pointer to the allocated object is invalid because of a
+freelist corruption, and calls object_err() to report it. The debug code
+should report and handle the corruption gracefully and not crash in the
+process.
+
+In case the pointer is NULL or check_valid_pointer() returns false for
+the pointer, only print the pointer value and skip accessing metadata.
+
+Fixes: 81819f0fc828 ("SLUB core")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Li Qiong <liqiong@nfschina.com>
+Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
+Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/slub.c |    7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -1113,7 +1113,12 @@ static void object_err(struct kmem_cache
+               return;
+       slab_bug(s, reason);
+-      print_trailer(s, slab, object);
++      if (!object || !check_valid_pointer(s, slab, object)) {
++              print_slab_info(slab);
++              pr_err("Invalid pointer 0x%p\n", object);
++      } else {
++              print_trailer(s, slab, object);
++      }
+       add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
+       WARN_ON(1);
diff --git a/queue-6.12/mm-slub-call-warn-when-detecting-a-slab-corruption.patch b/queue-6.12/mm-slub-call-warn-when-detecting-a-slab-corruption.patch
new file mode 100644 (file)
index 0000000..8c72e26
--- /dev/null
@@ -0,0 +1,160 @@
+From stable+bounces-178008-greg=kroah.com@vger.kernel.org Sat Sep  6 23:25:40 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat,  6 Sep 2025 17:25:28 -0400
+Subject: mm: slub: call WARN() when detecting a slab corruption
+To: stable@vger.kernel.org
+Cc: Hyesoo Yu <hyesoo.yu@samsung.com>, Harry Yoo <harry.yoo@oracle.com>, Vlastimil Babka <vbabka@suse.cz>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20250906212530.302670-2-sashal@kernel.org>
+
+From: Hyesoo Yu <hyesoo.yu@samsung.com>
+
+[ Upstream commit 3f6f32b14ab35452d2ed52f7821cf2829923c98d ]
+
+If a slab object is corrupted or an error occurs in its internal
+validation, continuing after restoration may cause other side effects.
+At this point, it is difficult to debug because the problem occurred in
+the past. It is useful to use WARN() to catch errors at the point of
+issue because WARN() could trigger panic for system debugging when
+panic_on_warn is enabled. WARN() is added where to detect the error on
+slab_err and object_err.
+
+It makes sense to only do the WARN() after printing the logs. slab_err
+is splited to __slab_err that calls the WARN() and it is called after
+printing logs.
+
+Signed-off-by: Hyesoo Yu <hyesoo.yu@samsung.com>
+Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
+Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
+Stable-dep-of: b4efccec8d06 ("mm/slub: avoid accessing metadata when pointer is invalid in object_err()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/slub.c |   47 +++++++++++++++++++++++++++++------------------
+ 1 file changed, 29 insertions(+), 18 deletions(-)
+
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -1036,7 +1036,7 @@ static void slab_bug(struct kmem_cache *
+       vaf.fmt = fmt;
+       vaf.va = &args;
+       pr_err("=============================================================================\n");
+-      pr_err("BUG %s (%s): %pV\n", s->name, print_tainted(), &vaf);
++      pr_err("BUG %s (%s): %pV\n", s ? s->name : "<unknown>", print_tainted(), &vaf);
+       pr_err("-----------------------------------------------------------------------------\n\n");
+       va_end(args);
+ }
+@@ -1095,8 +1095,6 @@ static void print_trailer(struct kmem_ca
+               /* Beginning of the filler is the free pointer */
+               print_section(KERN_ERR, "Padding  ", p + off,
+                             size_from_object(s) - off);
+-
+-      dump_stack();
+ }
+ static void object_err(struct kmem_cache *s, struct slab *slab,
+@@ -1108,6 +1106,8 @@ static void object_err(struct kmem_cache
+       slab_bug(s, "%s", reason);
+       print_trailer(s, slab, object);
+       add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
++
++      WARN_ON(1);
+ }
+ static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab,
+@@ -1124,6 +1124,17 @@ static bool freelist_corrupted(struct km
+       return false;
+ }
++static void __slab_err(struct slab *slab)
++{
++      if (slab_in_kunit_test())
++              return;
++
++      print_slab_info(slab);
++      add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
++
++      WARN_ON(1);
++}
++
+ static __printf(3, 4) void slab_err(struct kmem_cache *s, struct slab *slab,
+                       const char *fmt, ...)
+ {
+@@ -1137,9 +1148,7 @@ static __printf(3, 4) void slab_err(stru
+       vsnprintf(buf, sizeof(buf), fmt, args);
+       va_end(args);
+       slab_bug(s, "%s", buf);
+-      print_slab_info(slab);
+-      dump_stack();
+-      add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
++      __slab_err(slab);
+ }
+ static void init_object(struct kmem_cache *s, void *object, u8 val)
+@@ -1312,9 +1321,10 @@ slab_pad_check(struct kmem_cache *s, str
+       while (end > fault && end[-1] == POISON_INUSE)
+               end--;
+-      slab_err(s, slab, "Padding overwritten. 0x%p-0x%p @offset=%tu",
+-                      fault, end - 1, fault - start);
++      slab_bug(s, "Padding overwritten. 0x%p-0x%p @offset=%tu",
++               fault, end - 1, fault - start);
+       print_section(KERN_ERR, "Padding ", pad, remainder);
++      __slab_err(slab);
+       restore_bytes(s, "slab padding", POISON_INUSE, fault, end);
+ }
+@@ -1630,12 +1640,12 @@ static inline int free_consistency_check
+                       slab_err(s, slab, "Attempt to free object(0x%p) outside of slab",
+                                object);
+               } else if (!slab->slab_cache) {
+-                      pr_err("SLUB <none>: no slab for object 0x%p.\n",
+-                             object);
+-                      dump_stack();
+-              } else
++                      slab_err(NULL, slab, "No slab cache for object 0x%p",
++                               object);
++              } else {
+                       object_err(s, slab, object,
+-                                      "page slab pointer corrupt.");
++                                 "page slab pointer corrupt.");
++              }
+               return 0;
+       }
+       return 1;
+@@ -5450,14 +5460,14 @@ static int calculate_sizes(struct kmem_c
+       return !!oo_objects(s->oo);
+ }
+-static void list_slab_objects(struct kmem_cache *s, struct slab *slab,
+-                            const char *text)
++static void list_slab_objects(struct kmem_cache *s, struct slab *slab)
+ {
+ #ifdef CONFIG_SLUB_DEBUG
+       void *addr = slab_address(slab);
+       void *p;
+-      slab_err(s, slab, text, s->name);
++      if (!slab_add_kunit_errors())
++              slab_bug(s, "Objects remaining on __kmem_cache_shutdown()");
+       spin_lock(&object_map_lock);
+       __fill_map(object_map, s, slab);
+@@ -5472,6 +5482,8 @@ static void list_slab_objects(struct kme
+               }
+       }
+       spin_unlock(&object_map_lock);
++
++      __slab_err(slab);
+ #endif
+ }
+@@ -5492,8 +5504,7 @@ static void free_partial(struct kmem_cac
+                       remove_partial(n, slab);
+                       list_add(&slab->slab_list, &discard);
+               } else {
+-                      list_slab_objects(s, slab,
+-                        "Objects remaining in %s on __kmem_cache_shutdown()");
++                      list_slab_objects(s, slab);
+               }
+       }
+       spin_unlock_irq(&n->list_lock);
diff --git a/queue-6.12/mm-slub-print-the-broken-data-before-restoring-them.patch b/queue-6.12/mm-slub-print-the-broken-data-before-restoring-them.patch
new file mode 100644 (file)
index 0000000..24d8741
--- /dev/null
@@ -0,0 +1,130 @@
+From stable+bounces-178007-greg=kroah.com@vger.kernel.org Sat Sep  6 23:25:39 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat,  6 Sep 2025 17:25:27 -0400
+Subject: mm: slub: Print the broken data before restoring them
+To: stable@vger.kernel.org
+Cc: Hyesoo Yu <hyesoo.yu@samsung.com>, Harry Yoo <harry.yoo@oracle.com>, Vlastimil Babka <vbabka@suse.cz>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20250906212530.302670-1-sashal@kernel.org>
+
+From: Hyesoo Yu <hyesoo.yu@samsung.com>
+
+[ Upstream commit ed5ec2e952595a469eae1f6dce040737359b6da2 ]
+
+Previously, the restore occurred after printing the object in slub.
+After commit 47d911b02cbe ("slab: make check_object() more consistent"),
+the bytes are printed after the restore. This information about the bytes
+before the restore is highly valuable for debugging purpose.
+For instance, in a event of cache issue, it displays byte patterns
+by breaking them down into 64-bytes units. Without this information,
+we can only speculate on how it was broken. Hence the corrupted regions
+should be printed prior to the restoration process. However if an object
+breaks in multiple places, the same log may be output multiple times.
+Therefore the slub log is reported only once to prevent redundant printing,
+by sending a parameter indicating whether an error has occurred previously.
+
+Signed-off-by: Hyesoo Yu <hyesoo.yu@samsung.com>
+Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
+Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
+Stable-dep-of: b4efccec8d06 ("mm/slub: avoid accessing metadata when pointer is invalid in object_err()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/slub.c |   32 ++++++++++++++------------------
+ 1 file changed, 14 insertions(+), 18 deletions(-)
+
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -1191,8 +1191,8 @@ static void restore_bytes(struct kmem_ca
+ static pad_check_attributes int
+ check_bytes_and_report(struct kmem_cache *s, struct slab *slab,
+-                     u8 *object, char *what,
+-                     u8 *start, unsigned int value, unsigned int bytes)
++                     u8 *object, char *what, u8 *start, unsigned int value,
++                     unsigned int bytes, bool slab_obj_print)
+ {
+       u8 *fault;
+       u8 *end;
+@@ -1211,10 +1211,11 @@ check_bytes_and_report(struct kmem_cache
+       if (slab_add_kunit_errors())
+               goto skip_bug_print;
+-      slab_bug(s, "%s overwritten", what);
+-      pr_err("0x%p-0x%p @offset=%tu. First byte 0x%x instead of 0x%x\n",
+-                                      fault, end - 1, fault - addr,
+-                                      fault[0], value);
++      pr_err("[%s overwritten] 0x%p-0x%p @offset=%tu. First byte 0x%x instead of 0x%x\n",
++             what, fault, end - 1, fault - addr, fault[0], value);
++
++      if (slab_obj_print)
++              object_err(s, slab, object, "Object corrupt");
+ skip_bug_print:
+       restore_bytes(s, what, value, fault, end);
+@@ -1278,7 +1279,7 @@ static int check_pad_bytes(struct kmem_c
+               return 1;
+       return check_bytes_and_report(s, slab, p, "Object padding",
+-                      p + off, POISON_INUSE, size_from_object(s) - off);
++                      p + off, POISON_INUSE, size_from_object(s) - off, true);
+ }
+ /* Check the pad bytes at the end of a slab page */
+@@ -1328,11 +1329,11 @@ static int check_object(struct kmem_cach
+       if (s->flags & SLAB_RED_ZONE) {
+               if (!check_bytes_and_report(s, slab, object, "Left Redzone",
+-                      object - s->red_left_pad, val, s->red_left_pad))
++                      object - s->red_left_pad, val, s->red_left_pad, ret))
+                       ret = 0;
+               if (!check_bytes_and_report(s, slab, object, "Right Redzone",
+-                      endobject, val, s->inuse - s->object_size))
++                      endobject, val, s->inuse - s->object_size, ret))
+                       ret = 0;
+               if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) {
+@@ -1341,7 +1342,7 @@ static int check_object(struct kmem_cach
+                       if (s->object_size > orig_size  &&
+                               !check_bytes_and_report(s, slab, object,
+                                       "kmalloc Redzone", p + orig_size,
+-                                      val, s->object_size - orig_size)) {
++                                      val, s->object_size - orig_size, ret)) {
+                               ret = 0;
+                       }
+               }
+@@ -1349,7 +1350,7 @@ static int check_object(struct kmem_cach
+               if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
+                       if (!check_bytes_and_report(s, slab, p, "Alignment padding",
+                               endobject, POISON_INUSE,
+-                              s->inuse - s->object_size))
++                              s->inuse - s->object_size, ret))
+                               ret = 0;
+               }
+       }
+@@ -1365,11 +1366,11 @@ static int check_object(struct kmem_cach
+                       if (kasan_meta_size < s->object_size - 1 &&
+                           !check_bytes_and_report(s, slab, p, "Poison",
+                                       p + kasan_meta_size, POISON_FREE,
+-                                      s->object_size - kasan_meta_size - 1))
++                                      s->object_size - kasan_meta_size - 1, ret))
+                               ret = 0;
+                       if (kasan_meta_size < s->object_size &&
+                           !check_bytes_and_report(s, slab, p, "End Poison",
+-                                      p + s->object_size - 1, POISON_END, 1))
++                                      p + s->object_size - 1, POISON_END, 1, ret))
+                               ret = 0;
+               }
+               /*
+@@ -1395,11 +1396,6 @@ static int check_object(struct kmem_cach
+               ret = 0;
+       }
+-      if (!ret && !slab_in_kunit_test()) {
+-              print_trailer(s, slab, object);
+-              add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
+-      }
+-
+       return ret;
+ }
diff --git a/queue-6.12/net-dsa-add-hook-to-determine-whether-eee-is-supported.patch b/queue-6.12/net-dsa-add-hook-to-determine-whether-eee-is-supported.patch
new file mode 100644 (file)
index 0000000..e1232d1
--- /dev/null
@@ -0,0 +1,60 @@
+From 9723a77318b7c0cfd06ea207e52a042f8c815318 Mon Sep 17 00:00:00 2001
+From: "Russell King (Oracle)" <rmk+kernel@armlinux.org.uk>
+Date: Tue, 10 Dec 2024 14:18:16 +0000
+Subject: net: dsa: add hook to determine whether EEE is supported
+
+From: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+
+commit 9723a77318b7c0cfd06ea207e52a042f8c815318 upstream.
+
+Add a hook to determine whether the switch supports EEE. This will
+return false if the switch does not, or true if it does. If the
+method is not implemented, we assume (currently) that the switch
+supports EEE.
+
+Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+Reviewed-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
+Link: https://patch.msgid.link/E1tL144-006cZD-El@rmk-PC.armlinux.org.uk
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/dsa.h |    1 +
+ net/dsa/user.c    |    8 ++++++++
+ 2 files changed, 9 insertions(+)
+
+--- a/include/net/dsa.h
++++ b/include/net/dsa.h
+@@ -1003,6 +1003,7 @@ struct dsa_switch_ops {
+       /*
+        * Port's MAC EEE settings
+        */
++      bool    (*support_eee)(struct dsa_switch *ds, int port);
+       int     (*set_mac_eee)(struct dsa_switch *ds, int port,
+                              struct ethtool_keee *e);
+       int     (*get_mac_eee)(struct dsa_switch *ds, int port,
+--- a/net/dsa/user.c
++++ b/net/dsa/user.c
+@@ -1231,6 +1231,10 @@ static int dsa_user_set_eee(struct net_d
+       struct dsa_switch *ds = dp->ds;
+       int ret;
++      /* Check whether the switch supports EEE */
++      if (ds->ops->support_eee && !ds->ops->support_eee(ds, dp->index))
++              return -EOPNOTSUPP;
++
+       /* Port's PHY and MAC both need to be EEE capable */
+       if (!dev->phydev || !dp->pl)
+               return -ENODEV;
+@@ -1251,6 +1255,10 @@ static int dsa_user_get_eee(struct net_d
+       struct dsa_switch *ds = dp->ds;
+       int ret;
++      /* Check whether the switch supports EEE */
++      if (ds->ops->support_eee && !ds->ops->support_eee(ds, dp->index))
++              return -EOPNOTSUPP;
++
+       /* Port's PHY and MAC both need to be EEE capable */
+       if (!dev->phydev || !dp->pl)
+               return -ENODEV;
diff --git a/queue-6.12/net-dsa-b53-bcm_sf2-implement-.support_eee-method.patch b/queue-6.12/net-dsa-b53-bcm_sf2-implement-.support_eee-method.patch
new file mode 100644 (file)
index 0000000..480675e
--- /dev/null
@@ -0,0 +1,86 @@
+From c86692fc2cb77d94dd8c166c2b9017f196d02a84 Mon Sep 17 00:00:00 2001
+From: "Russell King (Oracle)" <rmk+kernel@armlinux.org.uk>
+Date: Tue, 10 Dec 2024 14:18:26 +0000
+Subject: net: dsa: b53/bcm_sf2: implement .support_eee() method
+
+From: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+
+commit c86692fc2cb77d94dd8c166c2b9017f196d02a84 upstream.
+
+Implement the .support_eee() method to indicate that EEE is not
+supported by two switch variants, rather than making these checks in
+the .set_mac_eee() and .get_mac_eee() methods.
+
+Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+Reviewed-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
+Link: https://patch.msgid.link/E1tL14E-006cZU-Nc@rmk-PC.armlinux.org.uk
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/dsa/b53/b53_common.c |   13 +++++++------
+ drivers/net/dsa/b53/b53_priv.h   |    1 +
+ drivers/net/dsa/bcm_sf2.c        |    1 +
+ 3 files changed, 9 insertions(+), 6 deletions(-)
+
+--- a/drivers/net/dsa/b53/b53_common.c
++++ b/drivers/net/dsa/b53/b53_common.c
+@@ -2388,13 +2388,16 @@ int b53_eee_init(struct dsa_switch *ds,
+ }
+ EXPORT_SYMBOL(b53_eee_init);
+-int b53_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e)
++bool b53_support_eee(struct dsa_switch *ds, int port)
+ {
+       struct b53_device *dev = ds->priv;
+-      if (is5325(dev) || is5365(dev))
+-              return -EOPNOTSUPP;
++      return !is5325(dev) && !is5365(dev);
++}
++EXPORT_SYMBOL(b53_support_eee);
++int b53_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e)
++{
+       return 0;
+ }
+ EXPORT_SYMBOL(b53_get_mac_eee);
+@@ -2404,9 +2407,6 @@ int b53_set_mac_eee(struct dsa_switch *d
+       struct b53_device *dev = ds->priv;
+       struct ethtool_keee *p = &dev->ports[port].eee;
+-      if (is5325(dev) || is5365(dev))
+-              return -EOPNOTSUPP;
+-
+       p->eee_enabled = e->eee_enabled;
+       b53_eee_enable_set(ds, port, e->eee_enabled);
+@@ -2463,6 +2463,7 @@ static const struct dsa_switch_ops b53_s
+       .port_setup             = b53_setup_port,
+       .port_enable            = b53_enable_port,
+       .port_disable           = b53_disable_port,
++      .support_eee            = b53_support_eee,
+       .get_mac_eee            = b53_get_mac_eee,
+       .set_mac_eee            = b53_set_mac_eee,
+       .port_bridge_join       = b53_br_join,
+--- a/drivers/net/dsa/b53/b53_priv.h
++++ b/drivers/net/dsa/b53/b53_priv.h
+@@ -387,6 +387,7 @@ int b53_enable_port(struct dsa_switch *d
+ void b53_disable_port(struct dsa_switch *ds, int port);
+ void b53_brcm_hdr_setup(struct dsa_switch *ds, int port);
+ int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy);
++bool b53_support_eee(struct dsa_switch *ds, int port);
+ int b53_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e);
+ int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e);
+--- a/drivers/net/dsa/bcm_sf2.c
++++ b/drivers/net/dsa/bcm_sf2.c
+@@ -1233,6 +1233,7 @@ static const struct dsa_switch_ops bcm_s
+       .port_setup             = b53_setup_port,
+       .port_enable            = bcm_sf2_port_setup,
+       .port_disable           = bcm_sf2_port_disable,
++      .support_eee            = b53_support_eee,
+       .get_mac_eee            = b53_get_mac_eee,
+       .set_mac_eee            = b53_set_mac_eee,
+       .port_bridge_join       = b53_br_join,
diff --git a/queue-6.12/net-dsa-b53-do-not-enable-eee-on-bcm63xx.patch b/queue-6.12/net-dsa-b53-do-not-enable-eee-on-bcm63xx.patch
new file mode 100644 (file)
index 0000000..6bdc3a3
--- /dev/null
@@ -0,0 +1,53 @@
+From 1237c2d4a8db79dfd4369bff6930b0e385ed7d5c Mon Sep 17 00:00:00 2001
+From: Jonas Gorski <jonas.gorski@gmail.com>
+Date: Mon, 2 Jun 2025 21:39:49 +0200
+Subject: net: dsa: b53: do not enable EEE on bcm63xx
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jonas Gorski <jonas.gorski@gmail.com>
+
+commit 1237c2d4a8db79dfd4369bff6930b0e385ed7d5c upstream.
+
+BCM63xx internal switches do not support EEE, but provide multiple RGMII
+ports where external PHYs may be connected. If one of these PHYs are EEE
+capable, we may try to enable EEE for the MACs, which then hangs the
+system on access of the (non-existent) EEE registers.
+
+Fix this by checking if the switch actually supports EEE before
+attempting to configure it.
+
+Fixes: 22256b0afb12 ("net: dsa: b53: Move EEE functions to b53")
+Reviewed-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Tested-by: Álvaro Fernández Rojas <noltari@gmail.com>
+Signed-off-by: Jonas Gorski <jonas.gorski@gmail.com>
+Link: https://patch.msgid.link/20250602193953.1010487-2-jonas.gorski@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/dsa/b53/b53_common.c |    5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/dsa/b53/b53_common.c
++++ b/drivers/net/dsa/b53/b53_common.c
+@@ -2378,6 +2378,9 @@ int b53_eee_init(struct dsa_switch *ds,
+ {
+       int ret;
++      if (!b53_support_eee(ds, port))
++              return 0;
++
+       ret = phy_init_eee(phy, false);
+       if (ret)
+               return 0;
+@@ -2392,7 +2395,7 @@ bool b53_support_eee(struct dsa_switch *
+ {
+       struct b53_device *dev = ds->priv;
+-      return !is5325(dev) && !is5365(dev);
++      return !is5325(dev) && !is5365(dev) && !is63xx(dev);
+ }
+ EXPORT_SYMBOL(b53_support_eee);
diff --git a/queue-6.12/net-dsa-provide-implementation-of-.support_eee.patch b/queue-6.12/net-dsa-provide-implementation-of-.support_eee.patch
new file mode 100644 (file)
index 0000000..409e383
--- /dev/null
@@ -0,0 +1,63 @@
+From 99379f587278c818777cb4778e2c79c6c1440c65 Mon Sep 17 00:00:00 2001
+From: "Russell King (Oracle)" <rmk+kernel@armlinux.org.uk>
+Date: Tue, 10 Dec 2024 14:18:21 +0000
+Subject: net: dsa: provide implementation of .support_eee()
+
+From: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+
+commit 99379f587278c818777cb4778e2c79c6c1440c65 upstream.
+
+Provide a trivial implementation for the .support_eee() method which
+switch drivers can use to simply indicate that they support EEE on
+all their user ports.
+
+Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+Reviewed-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
+Link: https://patch.msgid.link/E1tL149-006cZJ-JJ@rmk-PC.armlinux.org.uk
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+[ Harshit: Resolve contextual conflicts due to missing commit:
+  539770616521 ("net: dsa: remove obsolete phylink dsa_switch operations")
+  and commit: ecb595ebba0e ("net: dsa: remove
+  dsa_port_phylink_mac_select_pcs()") in 6.12.y ]
+Signed-off-by: Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/dsa.h |    1 +
+ net/dsa/port.c    |   16 ++++++++++++++++
+ 2 files changed, 17 insertions(+)
+
+--- a/include/net/dsa.h
++++ b/include/net/dsa.h
+@@ -1399,5 +1399,6 @@ static inline bool dsa_user_dev_check(co
+ netdev_tx_t dsa_enqueue_skb(struct sk_buff *skb, struct net_device *dev);
+ void dsa_port_phylink_mac_change(struct dsa_switch *ds, int port, bool up);
++bool dsa_supports_eee(struct dsa_switch *ds, int port);
+ #endif
+--- a/net/dsa/port.c
++++ b/net/dsa/port.c
+@@ -1589,6 +1589,22 @@ dsa_port_phylink_mac_select_pcs(struct p
+       return pcs;
+ }
++/* dsa_supports_eee - indicate that EEE is supported
++ * @ds: pointer to &struct dsa_switch
++ * @port: port index
++ *
++ * A default implementation for the .support_eee() DSA operations member,
++ * which drivers can use to indicate that they support EEE on all of their
++ * user ports.
++ *
++ * Returns: true
++ */
++bool dsa_supports_eee(struct dsa_switch *ds, int port)
++{
++      return true;
++}
++EXPORT_SYMBOL_GPL(dsa_supports_eee);
++
+ static void dsa_port_phylink_mac_config(struct phylink_config *config,
+                                       unsigned int mode,
+                                       const struct phylink_link_state *state)
diff --git a/queue-6.12/net-fix-null-pointer-dereference-in-l3mdev_l3_rcv.patch b/queue-6.12/net-fix-null-pointer-dereference-in-l3mdev_l3_rcv.patch
new file mode 100644 (file)
index 0000000..6b818df
--- /dev/null
@@ -0,0 +1,62 @@
+From 0032c99e83b9ce6d5995d65900aa4b6ffb501cce Mon Sep 17 00:00:00 2001
+From: Wang Liang <wangliang74@huawei.com>
+Date: Fri, 21 Mar 2025 17:03:53 +0800
+Subject: net: fix NULL pointer dereference in l3mdev_l3_rcv
+
+From: Wang Liang <wangliang74@huawei.com>
+
+commit 0032c99e83b9ce6d5995d65900aa4b6ffb501cce upstream.
+
+When delete l3s ipvlan:
+
+    ip link del link eth0 ipvlan1 type ipvlan mode l3s
+
+This may cause a null pointer dereference:
+
+    Call trace:
+     ip_rcv_finish+0x48/0xd0
+     ip_rcv+0x5c/0x100
+     __netif_receive_skb_one_core+0x64/0xb0
+     __netif_receive_skb+0x20/0x80
+     process_backlog+0xb4/0x204
+     napi_poll+0xe8/0x294
+     net_rx_action+0xd8/0x22c
+     __do_softirq+0x12c/0x354
+
+This is because l3mdev_l3_rcv() visit dev->l3mdev_ops after
+ipvlan_l3s_unregister() assign the dev->l3mdev_ops to NULL. The process
+like this:
+
+    (CPU1)                     | (CPU2)
+    l3mdev_l3_rcv()            |
+      check dev->priv_flags:   |
+        master = skb->dev;     |
+                               |
+                               | ipvlan_l3s_unregister()
+                               |   set dev->priv_flags
+                               |   dev->l3mdev_ops = NULL;
+                               |
+      visit master->l3mdev_ops |
+
+To avoid this by do not set dev->l3mdev_ops when unregister l3s ipvlan.
+
+Suggested-by: David Ahern <dsahern@kernel.org>
+Fixes: c675e06a98a4 ("ipvlan: decouple l3s mode dependencies from other modes")
+Signed-off-by: Wang Liang <wangliang74@huawei.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/20250321090353.1170545-1-wangliang74@huawei.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ipvlan/ipvlan_l3s.c |    1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/drivers/net/ipvlan/ipvlan_l3s.c
++++ b/drivers/net/ipvlan/ipvlan_l3s.c
+@@ -224,5 +224,4 @@ void ipvlan_l3s_unregister(struct ipvl_p
+       dev->priv_flags &= ~IFF_L3MDEV_RX_HANDLER;
+       ipvlan_unregister_nf_hook(read_pnet(&port->pnet));
+-      dev->l3mdev_ops = NULL;
+ }
diff --git a/queue-6.12/nouveau-fix-disabling-the-nonstall-irq-due-to-storm-code.patch b/queue-6.12/nouveau-fix-disabling-the-nonstall-irq-due-to-storm-code.patch
new file mode 100644 (file)
index 0000000..da45e33
--- /dev/null
@@ -0,0 +1,152 @@
+From stable+bounces-178019-greg=kroah.com@vger.kernel.org Sun Sep  7 04:53:02 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat,  6 Sep 2025 22:52:54 -0400
+Subject: nouveau: fix disabling the nonstall irq due to storm code
+To: stable@vger.kernel.org
+Cc: Dave Airlie <airlied@redhat.com>, Danilo Krummrich <dakr@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20250907025254.430972-1-sashal@kernel.org>
+
+From: Dave Airlie <airlied@redhat.com>
+
+[ Upstream commit 0ef5c4e4dbbfcebaa9b2eca18097b43016727dfe ]
+
+Nouveau has code that when it gets an IRQ with no allowed handler
+it disables it to avoid storms.
+
+However with nonstall interrupts, we often disable them from
+the drm driver, but still request their emission via the push submission.
+
+Just don't disable nonstall irqs ever in normal operation, the
+event handling code will filter them out, and the driver will
+just enable/disable them at load time.
+
+This fixes timeouts we've been seeing on/off for a long time,
+but they became a lot more noticeable on Blackwell.
+
+This doesn't fix all of them, there is a subsequent fence emission
+fix to fix the last few.
+
+Fixes: 3ebd64aa3c4f ("drm/nouveau/intr: support multiple trees, and explicit interfaces")
+Cc: stable@vger.kernel.org
+Signed-off-by: Dave Airlie <airlied@redhat.com>
+Link: https://lore.kernel.org/r/20250829021633.1674524-1-airlied@gmail.com
+[ Fix a typo and a minor checkpatch.pl warning; remove "v2" from commit
+  subject. - Danilo ]
+Signed-off-by: Danilo Krummrich <dakr@kernel.org>
+[ Apply to drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c  |    2 ++
+ drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga100.c |   23 +++++++++++++++--------
+ drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c |    1 +
+ drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h  |    2 ++
+ drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c  |    1 +
+ 5 files changed, 21 insertions(+), 8 deletions(-)
+
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
+@@ -352,6 +352,8 @@ nvkm_fifo_dtor(struct nvkm_engine *engin
+       mutex_destroy(&fifo->userd.mutex);
+       nvkm_event_fini(&fifo->nonstall.event);
++      if (fifo->func->nonstall_dtor)
++              fifo->func->nonstall_dtor(fifo);
+       mutex_destroy(&fifo->mutex);
+       if (fifo->func->dtor)
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga100.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga100.c
+@@ -517,19 +517,11 @@ ga100_fifo_nonstall_intr(struct nvkm_int
+ static void
+ ga100_fifo_nonstall_block(struct nvkm_event *event, int type, int index)
+ {
+-      struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), nonstall.event);
+-      struct nvkm_runl *runl = nvkm_runl_get(fifo, index, 0);
+-
+-      nvkm_inth_block(&runl->nonstall.inth);
+ }
+ static void
+ ga100_fifo_nonstall_allow(struct nvkm_event *event, int type, int index)
+ {
+-      struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), nonstall.event);
+-      struct nvkm_runl *runl = nvkm_runl_get(fifo, index, 0);
+-
+-      nvkm_inth_allow(&runl->nonstall.inth);
+ }
+ const struct nvkm_event_func
+@@ -564,12 +556,26 @@ ga100_fifo_nonstall_ctor(struct nvkm_fif
+               if (ret)
+                       return ret;
++              nvkm_inth_allow(&runl->nonstall.inth);
++
+               nr = max(nr, runl->id + 1);
+       }
+       return nr;
+ }
++void
++ga100_fifo_nonstall_dtor(struct nvkm_fifo *fifo)
++{
++      struct nvkm_runl *runl;
++
++      nvkm_runl_foreach(runl, fifo) {
++              if (runl->nonstall.vector < 0)
++                      continue;
++              nvkm_inth_block(&runl->nonstall.inth);
++      }
++}
++
+ int
+ ga100_fifo_runl_ctor(struct nvkm_fifo *fifo)
+ {
+@@ -599,6 +605,7 @@ ga100_fifo = {
+       .runl_ctor = ga100_fifo_runl_ctor,
+       .mmu_fault = &tu102_fifo_mmu_fault,
+       .nonstall_ctor = ga100_fifo_nonstall_ctor,
++      .nonstall_dtor = ga100_fifo_nonstall_dtor,
+       .nonstall = &ga100_fifo_nonstall,
+       .runl = &ga100_runl,
+       .runq = &ga100_runq,
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c
+@@ -30,6 +30,7 @@ ga102_fifo = {
+       .runl_ctor = ga100_fifo_runl_ctor,
+       .mmu_fault = &tu102_fifo_mmu_fault,
+       .nonstall_ctor = ga100_fifo_nonstall_ctor,
++      .nonstall_dtor = ga100_fifo_nonstall_dtor,
+       .nonstall = &ga100_fifo_nonstall,
+       .runl = &ga100_runl,
+       .runq = &ga100_runq,
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
+@@ -40,6 +40,7 @@ struct nvkm_fifo_func {
+       void (*start)(struct nvkm_fifo *, unsigned long *);
+       int (*nonstall_ctor)(struct nvkm_fifo *);
++      void (*nonstall_dtor)(struct nvkm_fifo *);
+       const struct nvkm_event_func *nonstall;
+       const struct nvkm_runl_func *runl;
+@@ -198,6 +199,7 @@ extern const struct nvkm_fifo_func_mmu_f
+ int ga100_fifo_runl_ctor(struct nvkm_fifo *);
+ int ga100_fifo_nonstall_ctor(struct nvkm_fifo *);
++void ga100_fifo_nonstall_dtor(struct nvkm_fifo *);
+ extern const struct nvkm_event_func ga100_fifo_nonstall;
+ extern const struct nvkm_runl_func ga100_runl;
+ extern const struct nvkm_runq_func ga100_runq;
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c
+@@ -660,6 +660,7 @@ r535_fifo_new(const struct nvkm_fifo_fun
+       rm->chan.func = &r535_chan;
+       rm->nonstall = &ga100_fifo_nonstall;
+       rm->nonstall_ctor = ga100_fifo_nonstall_ctor;
++      rm->nonstall_dtor = ga100_fifo_nonstall_dtor;
+       return nvkm_fifo_new_(rm, device, type, inst, pfifo);
+ }
index 8e072d8a0d088989c658c27555c73454de69ae2b..8ef1819b1353522b1dc1e6f9e4e5b55207c9ae50 100644 (file)
@@ -120,3 +120,26 @@ e1000e-fix-heap-overflow-in-e1000_set_eeprom.patch
 net-pcs-rzn1-miic-correct-modctrl-register-offset.patch
 microchip-lan865x-fix-module-autoloading.patch
 microchip-lan865x-fix-lan8651-autoloading.patch
+fs-fhandle.c-fix-a-race-in-call-of-has_locked_children.patch
+net-dsa-add-hook-to-determine-whether-eee-is-supported.patch
+net-dsa-provide-implementation-of-.support_eee.patch
+net-dsa-b53-bcm_sf2-implement-.support_eee-method.patch
+net-dsa-b53-do-not-enable-eee-on-bcm63xx.patch
+md-raid1-raid10-don-t-ignore-io-flags.patch
+md-raid1-raid10-don-t-handle-io-error-for-req_rahead-and-req_nowait.patch
+md-raid1-raid10-strip-req_nowait-from-member-bios.patch
+ext4-define-ext4_journal_destroy-wrapper.patch
+ext4-avoid-journaling-sb-update-on-error-if-journal-is-destroying.patch
+wifi-ath11k-update-channel-list-in-reg-notifier-instead-reg-worker.patch
+wifi-ath11k-update-channel-list-in-worker-when-wait-flag-is-set.patch
+net-fix-null-pointer-dereference-in-l3mdev_l3_rcv.patch
+md-md-bitmap-fix-wrong-bitmap_limit-for-clustermd-when-write-sb.patch
+mm-slub-print-the-broken-data-before-restoring-them.patch
+mm-slub-call-warn-when-detecting-a-slab-corruption.patch
+mm-slab-cleanup-slab_bug-parameters.patch
+mm-slub-avoid-accessing-metadata-when-pointer-is-invalid-in-object_err.patch
+nouveau-fix-disabling-the-nonstall-irq-due-to-storm-code.patch
+kunit-kasan_test-disable-fortify-string-checker-on-kasan_strings-test.patch
+mm-fix-accounting-of-memmap-pages.patch
+thermal-drivers-mediatek-lvts-disable-low-offset-irq-for-minimum-threshold.patch
+dmaengine-mediatek-fix-a-possible-deadlock-error-in-mtk_cqdma_tx_status.patch
diff --git a/queue-6.12/thermal-drivers-mediatek-lvts-disable-low-offset-irq-for-minimum-threshold.patch b/queue-6.12/thermal-drivers-mediatek-lvts-disable-low-offset-irq-for-minimum-threshold.patch
new file mode 100644 (file)
index 0000000..4468e84
--- /dev/null
@@ -0,0 +1,108 @@
+From stable+bounces-177996-greg=kroah.com@vger.kernel.org Sat Sep  6 21:48:30 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat,  6 Sep 2025 15:48:21 -0400
+Subject: thermal/drivers/mediatek/lvts: Disable low offset IRQ for minimum threshold
+To: stable@vger.kernel.org
+Cc: "Nícolas F. R. A. Prado" <nfraprado@collabora.com>, "AngeloGioacchino Del Regno" <angelogioacchino.delregno@collabora.com>, "Daniel Lezcano" <daniel.lezcano@linaro.org>, "Sasha Levin" <sashal@kernel.org>
+Message-ID: <20250906194821.208326-1-sashal@kernel.org>
+
+From: Nícolas F. R. A. Prado <nfraprado@collabora.com>
+
+[ Upstream commit fa17ff8e325a657c84be1083f06e54ee7eea82e4 ]
+
+In order to get working interrupts, a low offset value needs to be
+configured. The minimum value for it is 20 Celsius, which is what is
+configured when there's no lower thermal trip (ie the thermal core
+passes -INT_MAX as low trip temperature). However, when the temperature
+gets that low and fluctuates around that value it causes an interrupt
+storm.
+
+Prevent that interrupt storm by not enabling the low offset interrupt if
+the low threshold is the minimum one.
+
+Cc: stable@vger.kernel.org
+Fixes: 77354eaef821 ("thermal/drivers/mediatek/lvts_thermal: Don't leave threshold zeroed")
+Reviewed-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+Signed-off-by: Nícolas F. R. A. Prado <nfraprado@collabora.com>
+Link: https://lore.kernel.org/r/20250113-mt8192-lvts-filtered-suspend-fix-v2-3-07a25200c7c6@collabora.com
+Signed-off-by: Daniel Lezcano <daniel.lezcano@linaro.org>
+[ Adapted interrupt mask definitions ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/thermal/mediatek/lvts_thermal.c |   50 +++++++++++++++++++++++---------
+ 1 file changed, 36 insertions(+), 14 deletions(-)
+
+--- a/drivers/thermal/mediatek/lvts_thermal.c
++++ b/drivers/thermal/mediatek/lvts_thermal.c
+@@ -66,10 +66,14 @@
+ #define LVTS_TSSEL_CONF                               0x13121110
+ #define LVTS_CALSCALE_CONF                    0x300
+-#define LVTS_MONINT_OFFSET_SENSOR0            0xC
+-#define LVTS_MONINT_OFFSET_SENSOR1            0x180
+-#define LVTS_MONINT_OFFSET_SENSOR2            0x3000
+-#define LVTS_MONINT_OFFSET_SENSOR3            0x3000000
++#define LVTS_MONINT_OFFSET_HIGH_INTEN_SENSOR0         BIT(3)
++#define LVTS_MONINT_OFFSET_HIGH_INTEN_SENSOR1         BIT(8)
++#define LVTS_MONINT_OFFSET_HIGH_INTEN_SENSOR2         BIT(13)
++#define LVTS_MONINT_OFFSET_HIGH_INTEN_SENSOR3         BIT(25)
++#define LVTS_MONINT_OFFSET_LOW_INTEN_SENSOR0          BIT(2)
++#define LVTS_MONINT_OFFSET_LOW_INTEN_SENSOR1          BIT(7)
++#define LVTS_MONINT_OFFSET_LOW_INTEN_SENSOR2          BIT(12)
++#define LVTS_MONINT_OFFSET_LOW_INTEN_SENSOR3          BIT(24)
+ #define LVTS_INT_SENSOR0                      0x0009001F
+ #define LVTS_INT_SENSOR1                      0x001203E0
+@@ -329,23 +333,41 @@ static int lvts_get_temp(struct thermal_
+ static void lvts_update_irq_mask(struct lvts_ctrl *lvts_ctrl)
+ {
+-      u32 masks[] = {
+-              LVTS_MONINT_OFFSET_SENSOR0,
+-              LVTS_MONINT_OFFSET_SENSOR1,
+-              LVTS_MONINT_OFFSET_SENSOR2,
+-              LVTS_MONINT_OFFSET_SENSOR3,
++      static const u32 high_offset_inten_masks[] = {
++              LVTS_MONINT_OFFSET_HIGH_INTEN_SENSOR0,
++              LVTS_MONINT_OFFSET_HIGH_INTEN_SENSOR1,
++              LVTS_MONINT_OFFSET_HIGH_INTEN_SENSOR2,
++              LVTS_MONINT_OFFSET_HIGH_INTEN_SENSOR3,
++      };
++      static const u32 low_offset_inten_masks[] = {
++              LVTS_MONINT_OFFSET_LOW_INTEN_SENSOR0,
++              LVTS_MONINT_OFFSET_LOW_INTEN_SENSOR1,
++              LVTS_MONINT_OFFSET_LOW_INTEN_SENSOR2,
++              LVTS_MONINT_OFFSET_LOW_INTEN_SENSOR3,
+       };
+       u32 value = 0;
+       int i;
+       value = readl(LVTS_MONINT(lvts_ctrl->base));
+-      for (i = 0; i < ARRAY_SIZE(masks); i++) {
++      for (i = 0; i < ARRAY_SIZE(high_offset_inten_masks); i++) {
+               if (lvts_ctrl->sensors[i].high_thresh == lvts_ctrl->high_thresh
+-                  && lvts_ctrl->sensors[i].low_thresh == lvts_ctrl->low_thresh)
+-                      value |= masks[i];
+-              else
+-                      value &= ~masks[i];
++                  && lvts_ctrl->sensors[i].low_thresh == lvts_ctrl->low_thresh) {
++                      /*
++                       * The minimum threshold needs to be configured in the
++                       * OFFSETL register to get working interrupts, but we
++                       * don't actually want to generate interrupts when
++                       * crossing it.
++                       */
++                      if (lvts_ctrl->low_thresh == -INT_MAX) {
++                              value &= ~low_offset_inten_masks[i];
++                              value |= high_offset_inten_masks[i];
++                      } else {
++                              value |= low_offset_inten_masks[i] | high_offset_inten_masks[i];
++                      }
++              } else {
++                      value &= ~(low_offset_inten_masks[i] | high_offset_inten_masks[i]);
++              }
+       }
+       writel(value, LVTS_MONINT(lvts_ctrl->base));
diff --git a/queue-6.12/wifi-ath11k-update-channel-list-in-reg-notifier-instead-reg-worker.patch b/queue-6.12/wifi-ath11k-update-channel-list-in-reg-notifier-instead-reg-worker.patch
new file mode 100644 (file)
index 0000000..c67f08e
--- /dev/null
@@ -0,0 +1,104 @@
+From 933ab187e679e6fbdeea1835ae39efcc59c022d2 Mon Sep 17 00:00:00 2001
+From: Wen Gong <quic_wgong@quicinc.com>
+Date: Fri, 17 Jan 2025 14:17:36 +0800
+Subject: wifi: ath11k: update channel list in reg notifier instead reg worker
+
+From: Wen Gong <quic_wgong@quicinc.com>
+
+commit 933ab187e679e6fbdeea1835ae39efcc59c022d2 upstream.
+
+Currently when ath11k gets a new channel list, it will be processed
+according to the following steps:
+1. update new channel list to cfg80211 and queue reg_work.
+2. cfg80211 handles new channel list during reg_work.
+3. update cfg80211's handled channel list to firmware by
+ath11k_reg_update_chan_list().
+
+But ath11k will immediately execute step 3 after reg_work is just
+queued. Since step 2 is asynchronous, cfg80211 may not have completed
+handling the new channel list, which may leading to an out-of-bounds
+write error:
+BUG: KASAN: slab-out-of-bounds in ath11k_reg_update_chan_list
+Call Trace:
+    ath11k_reg_update_chan_list+0xbfe/0xfe0 [ath11k]
+    kfree+0x109/0x3a0
+    ath11k_regd_update+0x1cf/0x350 [ath11k]
+    ath11k_regd_update_work+0x14/0x20 [ath11k]
+    process_one_work+0xe35/0x14c0
+
+Should ensure step 2 is completely done before executing step 3. Thus
+Wen raised patch[1]. When flag NL80211_REGDOM_SET_BY_DRIVER is set,
+cfg80211 will notify ath11k after step 2 is done.
+
+So enable the flag NL80211_REGDOM_SET_BY_DRIVER then cfg80211 will
+notify ath11k after step 2 is done. At this time, there will be no
+KASAN bug during the execution of the step 3.
+
+[1] https://patchwork.kernel.org/project/linux-wireless/patch/20230201065313.27203-1-quic_wgong@quicinc.com/
+
+Tested-on: WCN6855 hw2.0 PCI WLAN.HSP.1.1-03125-QCAHSPSWPL_V1_V2_SILICONZ_LITE-3
+
+Fixes: f45cb6b29cd3 ("wifi: ath11k: avoid deadlock during regulatory update in ath11k_regd_update()")
+Signed-off-by: Wen Gong <quic_wgong@quicinc.com>
+Signed-off-by: Kang Yang <quic_kangyang@quicinc.com>
+Reviewed-by: Aditya Kumar Singh <quic_adisi@quicinc.com>
+Link: https://patch.msgid.link/20250117061737.1921-2-quic_kangyang@quicinc.com
+Signed-off-by: Jeff Johnson <jeff.johnson@oss.qualcomm.com>
+Signed-off-by: Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireless/ath/ath11k/reg.c |   22 +++++++++++++++-------
+ 1 file changed, 15 insertions(+), 7 deletions(-)
+
+--- a/drivers/net/wireless/ath/ath11k/reg.c
++++ b/drivers/net/wireless/ath/ath11k/reg.c
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: BSD-3-Clause-Clear
+ /*
+  * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+  */
+ #include <linux/rtnetlink.h>
+@@ -55,6 +55,19 @@ ath11k_reg_notifier(struct wiphy *wiphy,
+       ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+                  "Regulatory Notification received for %s\n", wiphy_name(wiphy));
++      if (request->initiator == NL80211_REGDOM_SET_BY_DRIVER) {
++              ath11k_dbg(ar->ab, ATH11K_DBG_REG,
++                         "driver initiated regd update\n");
++              if (ar->state != ATH11K_STATE_ON)
++                      return;
++
++              ret = ath11k_reg_update_chan_list(ar, true);
++              if (ret)
++                      ath11k_warn(ar->ab, "failed to update channel list: %d\n", ret);
++
++              return;
++      }
++
+       /* Currently supporting only General User Hints. Cell base user
+        * hints to be handled later.
+        * Hints from other sources like Core, Beacons are not expected for
+@@ -293,12 +306,6 @@ int ath11k_regd_update(struct ath11k *ar
+       if (ret)
+               goto err;
+-      if (ar->state == ATH11K_STATE_ON) {
+-              ret = ath11k_reg_update_chan_list(ar, true);
+-              if (ret)
+-                      goto err;
+-      }
+-
+       return 0;
+ err:
+       ath11k_warn(ab, "failed to perform regd update : %d\n", ret);
+@@ -977,6 +984,7 @@ void ath11k_regd_update_work(struct work
+ void ath11k_reg_init(struct ath11k *ar)
+ {
+       ar->hw->wiphy->regulatory_flags = REGULATORY_WIPHY_SELF_MANAGED;
++      ar->hw->wiphy->flags |= WIPHY_FLAG_NOTIFY_REGDOM_BY_DRIVER;
+       ar->hw->wiphy->reg_notifier = ath11k_reg_notifier;
+ }
diff --git a/queue-6.12/wifi-ath11k-update-channel-list-in-worker-when-wait-flag-is-set.patch b/queue-6.12/wifi-ath11k-update-channel-list-in-worker-when-wait-flag-is-set.patch
new file mode 100644 (file)
index 0000000..fdb2829
--- /dev/null
@@ -0,0 +1,265 @@
+From 02aae8e2f957adc1b15b6b8055316f8a154ac3f5 Mon Sep 17 00:00:00 2001
+From: Wen Gong <quic_wgong@quicinc.com>
+Date: Fri, 17 Jan 2025 14:17:37 +0800
+Subject: wifi: ath11k: update channel list in worker when wait flag is set
+
+From: Wen Gong <quic_wgong@quicinc.com>
+
+commit 02aae8e2f957adc1b15b6b8055316f8a154ac3f5 upstream.
+
+With previous patch "wifi: ath11k: move update channel list from update
+reg worker to reg notifier", ath11k_reg_update_chan_list() will be
+called during reg_process_self_managed_hint().
+
+reg_process_self_managed_hint() will hold rtnl_lock all the time.
+But ath11k_reg_update_chan_list() may increase the occupation time of
+rtnl_lock, because when wait flag is set, wait_for_completion_timeout()
+will be called during 11d/hw scan.
+
+Should minimize the occupation time of rtnl_lock as much as possible
+to avoid interfering with rest of the system. So move the update channel
+list operation to a new worker, so that wait_for_completion_timeout()
+won't be called and will not increase the occupation time of rtnl_lock.
+
+Tested-on: WCN6855 hw2.0 PCI WLAN.HSP.1.1-03125-QCAHSPSWPL_V1_V2_SILICONZ_LITE-3
+
+Signed-off-by: Wen Gong <quic_wgong@quicinc.com>
+Co-developed-by: Kang Yang <quic_kangyang@quicinc.com>
+Signed-off-by: Kang Yang <quic_kangyang@quicinc.com>
+Reviewed-by: Aditya Kumar Singh <quic_adisi@quicinc.com>
+Link: https://patch.msgid.link/20250117061737.1921-3-quic_kangyang@quicinc.com
+Signed-off-by: Jeff Johnson <jeff.johnson@oss.qualcomm.com>
+Signed-off-by: Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireless/ath/ath11k/core.c |    1 
+ drivers/net/wireless/ath/ath11k/core.h |    5 +
+ drivers/net/wireless/ath/ath11k/mac.c  |   14 +++++
+ drivers/net/wireless/ath/ath11k/reg.c  |   85 ++++++++++++++++++++++-----------
+ drivers/net/wireless/ath/ath11k/reg.h  |    3 -
+ drivers/net/wireless/ath/ath11k/wmi.h  |    1 
+ 6 files changed, 81 insertions(+), 28 deletions(-)
+
+--- a/drivers/net/wireless/ath/ath11k/core.c
++++ b/drivers/net/wireless/ath/ath11k/core.c
+@@ -1972,6 +1972,7 @@ void ath11k_core_halt(struct ath11k *ar)
+       ath11k_mac_scan_finish(ar);
+       ath11k_mac_peer_cleanup_all(ar);
+       cancel_delayed_work_sync(&ar->scan.timeout);
++      cancel_work_sync(&ar->channel_update_work);
+       cancel_work_sync(&ar->regd_update_work);
+       cancel_work_sync(&ab->update_11d_work);
+--- a/drivers/net/wireless/ath/ath11k/core.h
++++ b/drivers/net/wireless/ath/ath11k/core.h
+@@ -691,7 +691,7 @@ struct ath11k {
+       struct mutex conf_mutex;
+       /* protects the radio specific data like debug stats, ppdu_stats_info stats,
+        * vdev_stop_status info, scan data, ath11k_sta info, ath11k_vif info,
+-       * channel context data, survey info, test mode data.
++       * channel context data, survey info, test mode data, channel_update_queue.
+        */
+       spinlock_t data_lock;
+@@ -749,6 +749,9 @@ struct ath11k {
+       struct completion bss_survey_done;
+       struct work_struct regd_update_work;
++      struct work_struct channel_update_work;
++      /* protected with data_lock */
++      struct list_head channel_update_queue;
+       struct work_struct wmi_mgmt_tx_work;
+       struct sk_buff_head wmi_mgmt_tx_queue;
+--- a/drivers/net/wireless/ath/ath11k/mac.c
++++ b/drivers/net/wireless/ath/ath11k/mac.c
+@@ -6367,6 +6367,7 @@ static void ath11k_mac_op_stop(struct ie
+ {
+       struct ath11k *ar = hw->priv;
+       struct htt_ppdu_stats_info *ppdu_stats, *tmp;
++      struct scan_chan_list_params *params;
+       int ret;
+       ath11k_mac_drain_tx(ar);
+@@ -6382,6 +6383,7 @@ static void ath11k_mac_op_stop(struct ie
+       mutex_unlock(&ar->conf_mutex);
+       cancel_delayed_work_sync(&ar->scan.timeout);
++      cancel_work_sync(&ar->channel_update_work);
+       cancel_work_sync(&ar->regd_update_work);
+       cancel_work_sync(&ar->ab->update_11d_work);
+@@ -6391,10 +6393,19 @@ static void ath11k_mac_op_stop(struct ie
+       }
+       spin_lock_bh(&ar->data_lock);
++
+       list_for_each_entry_safe(ppdu_stats, tmp, &ar->ppdu_stats_info, list) {
+               list_del(&ppdu_stats->list);
+               kfree(ppdu_stats);
+       }
++
++      while ((params = list_first_entry_or_null(&ar->channel_update_queue,
++                                                struct scan_chan_list_params,
++                                                list))) {
++              list_del(&params->list);
++              kfree(params);
++      }
++
+       spin_unlock_bh(&ar->data_lock);
+       rcu_assign_pointer(ar->ab->pdevs_active[ar->pdev_idx], NULL);
+@@ -10194,6 +10205,7 @@ static const struct wiphy_iftype_ext_cap
+ static void __ath11k_mac_unregister(struct ath11k *ar)
+ {
++      cancel_work_sync(&ar->channel_update_work);
+       cancel_work_sync(&ar->regd_update_work);
+       ieee80211_unregister_hw(ar->hw);
+@@ -10593,6 +10605,8 @@ int ath11k_mac_allocate(struct ath11k_ba
+               init_completion(&ar->thermal.wmi_sync);
+               INIT_DELAYED_WORK(&ar->scan.timeout, ath11k_scan_timeout_work);
++              INIT_WORK(&ar->channel_update_work, ath11k_regd_update_chan_list_work);
++              INIT_LIST_HEAD(&ar->channel_update_queue);
+               INIT_WORK(&ar->regd_update_work, ath11k_regd_update_work);
+               INIT_WORK(&ar->wmi_mgmt_tx_work, ath11k_mgmt_over_wmi_tx_work);
+--- a/drivers/net/wireless/ath/ath11k/reg.c
++++ b/drivers/net/wireless/ath/ath11k/reg.c
+@@ -124,32 +124,7 @@ int ath11k_reg_update_chan_list(struct a
+       struct channel_param *ch;
+       enum nl80211_band band;
+       int num_channels = 0;
+-      int i, ret, left;
+-
+-      if (wait && ar->state_11d != ATH11K_11D_IDLE) {
+-              left = wait_for_completion_timeout(&ar->completed_11d_scan,
+-                                                 ATH11K_SCAN_TIMEOUT_HZ);
+-              if (!left) {
+-                      ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+-                                 "failed to receive 11d scan complete: timed out\n");
+-                      ar->state_11d = ATH11K_11D_IDLE;
+-              }
+-              ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+-                         "11d scan wait left time %d\n", left);
+-      }
+-
+-      if (wait &&
+-          (ar->scan.state == ATH11K_SCAN_STARTING ||
+-          ar->scan.state == ATH11K_SCAN_RUNNING)) {
+-              left = wait_for_completion_timeout(&ar->scan.completed,
+-                                                 ATH11K_SCAN_TIMEOUT_HZ);
+-              if (!left)
+-                      ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+-                                 "failed to receive hw scan complete: timed out\n");
+-
+-              ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+-                         "hw scan wait left time %d\n", left);
+-      }
++      int i, ret = 0;
+       if (ar->state == ATH11K_STATE_RESTARTING)
+               return 0;
+@@ -231,6 +206,16 @@ int ath11k_reg_update_chan_list(struct a
+               }
+       }
++      if (wait) {
++              spin_lock_bh(&ar->data_lock);
++              list_add_tail(&params->list, &ar->channel_update_queue);
++              spin_unlock_bh(&ar->data_lock);
++
++              queue_work(ar->ab->workqueue, &ar->channel_update_work);
++
++              return 0;
++      }
++
+       ret = ath11k_wmi_send_scan_chan_list_cmd(ar, params);
+       kfree(params);
+@@ -811,6 +796,54 @@ ret:
+       return new_regd;
+ }
++void ath11k_regd_update_chan_list_work(struct work_struct *work)
++{
++      struct ath11k *ar = container_of(work, struct ath11k,
++                                       channel_update_work);
++      struct scan_chan_list_params *params;
++      struct list_head local_update_list;
++      int left;
++
++      INIT_LIST_HEAD(&local_update_list);
++
++      spin_lock_bh(&ar->data_lock);
++      list_splice_tail_init(&ar->channel_update_queue, &local_update_list);
++      spin_unlock_bh(&ar->data_lock);
++
++      while ((params = list_first_entry_or_null(&local_update_list,
++                                                struct scan_chan_list_params,
++                                                list))) {
++              if (ar->state_11d != ATH11K_11D_IDLE) {
++                      left = wait_for_completion_timeout(&ar->completed_11d_scan,
++                                                         ATH11K_SCAN_TIMEOUT_HZ);
++                      if (!left) {
++                              ath11k_dbg(ar->ab, ATH11K_DBG_REG,
++                                         "failed to receive 11d scan complete: timed out\n");
++                              ar->state_11d = ATH11K_11D_IDLE;
++                      }
++
++                      ath11k_dbg(ar->ab, ATH11K_DBG_REG,
++                                 "reg 11d scan wait left time %d\n", left);
++              }
++
++              if ((ar->scan.state == ATH11K_SCAN_STARTING ||
++                   ar->scan.state == ATH11K_SCAN_RUNNING)) {
++                      left = wait_for_completion_timeout(&ar->scan.completed,
++                                                         ATH11K_SCAN_TIMEOUT_HZ);
++                      if (!left)
++                              ath11k_dbg(ar->ab, ATH11K_DBG_REG,
++                                         "failed to receive hw scan complete: timed out\n");
++
++                      ath11k_dbg(ar->ab, ATH11K_DBG_REG,
++                                 "reg hw scan wait left time %d\n", left);
++              }
++
++              ath11k_wmi_send_scan_chan_list_cmd(ar, params);
++              list_del(&params->list);
++              kfree(params);
++      }
++}
++
+ static bool ath11k_reg_is_world_alpha(char *alpha)
+ {
+       if (alpha[0] == '0' && alpha[1] == '0')
+--- a/drivers/net/wireless/ath/ath11k/reg.h
++++ b/drivers/net/wireless/ath/ath11k/reg.h
+@@ -1,7 +1,7 @@
+ /* SPDX-License-Identifier: BSD-3-Clause-Clear */
+ /*
+  * Copyright (c) 2019 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+  */
+ #ifndef ATH11K_REG_H
+@@ -33,6 +33,7 @@ void ath11k_reg_init(struct ath11k *ar);
+ void ath11k_reg_reset_info(struct cur_regulatory_info *reg_info);
+ void ath11k_reg_free(struct ath11k_base *ab);
+ void ath11k_regd_update_work(struct work_struct *work);
++void ath11k_regd_update_chan_list_work(struct work_struct *work);
+ struct ieee80211_regdomain *
+ ath11k_reg_build_regd(struct ath11k_base *ab,
+                     struct cur_regulatory_info *reg_info, bool intersect,
+--- a/drivers/net/wireless/ath/ath11k/wmi.h
++++ b/drivers/net/wireless/ath/ath11k/wmi.h
+@@ -3817,6 +3817,7 @@ struct wmi_stop_scan_cmd {
+ };
+ struct scan_chan_list_params {
++      struct list_head list;
+       u32 pdev_id;
+       u16 nallchans;
+       struct channel_param ch_param[];