--- /dev/null
+From 3d8d32873c7b6d9cec5b40c2ddb8c7c55961694f Mon Sep 17 00:00:00 2001
+From: Yu Kuai <yukuai3@huawei.com>
+Date: Wed, 27 Sep 2023 14:12:40 +0800
+Subject: md: factor out a helper from mddev_put()
+
+From: Yu Kuai <yukuai3@huawei.com>
+
+commit 3d8d32873c7b6d9cec5b40c2ddb8c7c55961694f upstream.
+
+There are no functional changes, prepare to simplify md_seq_ops in next
+patch.
+
+Signed-off-by: Yu Kuai <yukuai3@huawei.com>
+Signed-off-by: Song Liu <song@kernel.org>
+Link: https://lore.kernel.org/r/20230927061241.1552837-2-yukuai1@huaweicloud.com
+[minor context conflict]
+Signed-off-by: Yu Kuai <yukuai3@huawei.com>
+Cc: Salvatore Bonaccorso <carnil@debian.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/md.c | 32 ++++++++++++++++++--------------
+ 1 file changed, 18 insertions(+), 14 deletions(-)
+
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -667,24 +667,28 @@ static inline struct mddev *mddev_get(st
+
+ static void mddev_delayed_delete(struct work_struct *ws);
+
++static void __mddev_put(struct mddev *mddev)
++{
++ if (mddev->raid_disks || !list_empty(&mddev->disks) ||
++ mddev->ctime || mddev->hold_active)
++ return;
++
++ /* Array is not configured at all, and not held active, so destroy it */
++ set_bit(MD_DELETED, &mddev->flags);
++
++ /*
++ * Call queue_work inside the spinlock so that flush_workqueue() after
++ * mddev_find will succeed in waiting for the work to be done.
++ */
++ INIT_WORK(&mddev->del_work, mddev_delayed_delete);
++ queue_work(md_misc_wq, &mddev->del_work);
++}
++
+ void mddev_put(struct mddev *mddev)
+ {
+ if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
+ return;
+- if (!mddev->raid_disks && list_empty(&mddev->disks) &&
+- mddev->ctime == 0 && !mddev->hold_active) {
+- /* Array is not configured at all, and not held active,
+- * so destroy it */
+- set_bit(MD_DELETED, &mddev->flags);
+-
+- /*
+- * Call queue_work inside the spinlock so that
+- * flush_workqueue() after mddev_find will succeed in waiting
+- * for the work to be done.
+- */
+- INIT_WORK(&mddev->del_work, mddev_delayed_delete);
+- queue_work(md_misc_wq, &mddev->del_work);
+- }
++ __mddev_put(mddev);
+ spin_unlock(&all_mddevs_lock);
+ }
+
--- /dev/null
+From 8542870237c3a48ff049b6c5df5f50c8728284fa Mon Sep 17 00:00:00 2001
+From: Yu Kuai <yukuai3@huawei.com>
+Date: Thu, 20 Feb 2025 20:43:48 +0800
+Subject: md: fix mddev uaf while iterating all_mddevs list
+
+From: Yu Kuai <yukuai3@huawei.com>
+
+commit 8542870237c3a48ff049b6c5df5f50c8728284fa upstream.
+
+While iterating all_mddevs list from md_notify_reboot() and md_exit(),
+list_for_each_entry_safe is used, and this can race with deletint the
+next mddev, causing UAF:
+
+t1:
+spin_lock
+//list_for_each_entry_safe(mddev, n, ...)
+ mddev_get(mddev1)
+ // assume mddev2 is the next entry
+ spin_unlock
+ t2:
+ //remove mddev2
+ ...
+ mddev_free
+ spin_lock
+ list_del
+ spin_unlock
+ kfree(mddev2)
+ mddev_put(mddev1)
+ spin_lock
+ //continue dereference mddev2->all_mddevs
+
+The old helper for_each_mddev() actually grab the reference of mddev2
+while holding the lock, to prevent from being freed. This problem can be
+fixed the same way, however, the code will be complex.
+
+Hence switch to use list_for_each_entry, in this case mddev_put() can free
+the mddev1 and it's not safe as well. Refer to md_seq_show(), also factor
+out a helper mddev_put_locked() to fix this problem.
+
+Cc: Christoph Hellwig <hch@lst.de>
+Link: https://lore.kernel.org/linux-raid/20250220124348.845222-1-yukuai1@huaweicloud.com
+Fixes: f26514342255 ("md: stop using for_each_mddev in md_notify_reboot")
+Fixes: 16648bac862f ("md: stop using for_each_mddev in md_exit")
+Reported-and-tested-by: Guillaume Morin <guillaume@morinfr.org>
+Closes: https://lore.kernel.org/all/Z7Y0SURoA8xwg7vn@bender.morinfr.org/
+Signed-off-by: Yu Kuai <yukuai3@huawei.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+[skip md_seq_show() that is not exist]
+Signed-off-by: Yu Kuai <yukuai3@huawei.com>
+Cc: Salvatore Bonaccorso <carnil@debian.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/md.c | 18 ++++++++++++------
+ 1 file changed, 12 insertions(+), 6 deletions(-)
+
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -684,6 +684,12 @@ static void __mddev_put(struct mddev *md
+ queue_work(md_misc_wq, &mddev->del_work);
+ }
+
++static void mddev_put_locked(struct mddev *mddev)
++{
++ if (atomic_dec_and_test(&mddev->active))
++ __mddev_put(mddev);
++}
++
+ void mddev_put(struct mddev *mddev)
+ {
+ if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
+@@ -9704,11 +9710,11 @@ EXPORT_SYMBOL_GPL(rdev_clear_badblocks);
+ static int md_notify_reboot(struct notifier_block *this,
+ unsigned long code, void *x)
+ {
+- struct mddev *mddev, *n;
++ struct mddev *mddev;
+ int need_delay = 0;
+
+ spin_lock(&all_mddevs_lock);
+- list_for_each_entry_safe(mddev, n, &all_mddevs, all_mddevs) {
++ list_for_each_entry(mddev, &all_mddevs, all_mddevs) {
+ if (!mddev_get(mddev))
+ continue;
+ spin_unlock(&all_mddevs_lock);
+@@ -9720,8 +9726,8 @@ static int md_notify_reboot(struct notif
+ mddev_unlock(mddev);
+ }
+ need_delay = 1;
+- mddev_put(mddev);
+ spin_lock(&all_mddevs_lock);
++ mddev_put_locked(mddev);
+ }
+ spin_unlock(&all_mddevs_lock);
+
+@@ -10043,7 +10049,7 @@ void md_autostart_arrays(int part)
+
+ static __exit void md_exit(void)
+ {
+- struct mddev *mddev, *n;
++ struct mddev *mddev;
+ int delay = 1;
+
+ unregister_blkdev(MD_MAJOR,"md");
+@@ -10064,7 +10070,7 @@ static __exit void md_exit(void)
+ remove_proc_entry("mdstat", NULL);
+
+ spin_lock(&all_mddevs_lock);
+- list_for_each_entry_safe(mddev, n, &all_mddevs, all_mddevs) {
++ list_for_each_entry(mddev, &all_mddevs, all_mddevs) {
+ if (!mddev_get(mddev))
+ continue;
+ spin_unlock(&all_mddevs_lock);
+@@ -10076,8 +10082,8 @@ static __exit void md_exit(void)
+ * the mddev for destruction by a workqueue, and the
+ * destroy_workqueue() below will wait for that to complete.
+ */
+- mddev_put(mddev);
+ spin_lock(&all_mddevs_lock);
++ mddev_put_locked(mddev);
+ }
+ spin_unlock(&all_mddevs_lock);
+