]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
md: factor out a helper from mddev_put()
authorYu Kuai <yukuai3@huawei.com>
Wed, 27 Sep 2023 06:12:40 +0000 (14:12 +0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 25 Apr 2025 08:44:05 +0000 (10:44 +0200)
commit 3d8d32873c7b6d9cec5b40c2ddb8c7c55961694f upstream.

There are no functional changes, prepare to simplify md_seq_ops in next
patch.

Signed-off-by: Yu Kuai <yukuai3@huawei.com>
Signed-off-by: Song Liu <song@kernel.org>
Link: https://lore.kernel.org/r/20230927061241.1552837-2-yukuai1@huaweicloud.com
[minor context conflict]
Signed-off-by: Yu Kuai <yukuai3@huawei.com>
Cc: Salvatore Bonaccorso <carnil@debian.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/md/md.c

index 5e2751d42f645c2c48634eab9397edfbc48c5dce..639fe8ebaa68f24d0dc1ebb8805d60d6b567b881 100644 (file)
@@ -667,24 +667,28 @@ static inline struct mddev *mddev_get(struct mddev *mddev)
 
 static void mddev_delayed_delete(struct work_struct *ws);
 
+static void __mddev_put(struct mddev *mddev)
+{
+       if (mddev->raid_disks || !list_empty(&mddev->disks) ||
+           mddev->ctime || mddev->hold_active)
+               return;
+
+       /* Array is not configured at all, and not held active, so destroy it */
+       set_bit(MD_DELETED, &mddev->flags);
+
+       /*
+        * Call queue_work inside the spinlock so that flush_workqueue() after
+        * mddev_find will succeed in waiting for the work to be done.
+        */
+       INIT_WORK(&mddev->del_work, mddev_delayed_delete);
+       queue_work(md_misc_wq, &mddev->del_work);
+}
+
 void mddev_put(struct mddev *mddev)
 {
        if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
                return;
-       if (!mddev->raid_disks && list_empty(&mddev->disks) &&
-           mddev->ctime == 0 && !mddev->hold_active) {
-               /* Array is not configured at all, and not held active,
-                * so destroy it */
-               set_bit(MD_DELETED, &mddev->flags);
-
-               /*
-                * Call queue_work inside the spinlock so that
-                * flush_workqueue() after mddev_find will succeed in waiting
-                * for the work to be done.
-                */
-               INIT_WORK(&mddev->del_work, mddev_delayed_delete);
-               queue_work(md_misc_wq, &mddev->del_work);
-       }
+       __mddev_put(mddev);
        spin_unlock(&all_mddevs_lock);
 }