]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
dm-delay: change locking to avoid contention
authorBenjamin Marzinski <bmarzins@redhat.com>
Tue, 7 May 2024 21:16:24 +0000 (17:16 -0400)
committerMike Snitzer <snitzer@kernel.org>
Thu, 9 May 2024 13:10:58 +0000 (09:10 -0400)
The delayed_bios list is protected by one mutex shared by all dm-delay
devices. This mutex must be held whenever a bio is added or expired bios
are removed from the list.  Since a large number of expired bios could
be on the list, flush_delayed_bios() can schedule while holding the
mutex. This means a flush_delayed_bios() call on any dm-delay device can
slow down delay_map() calls on any other dm-delay device.

To keep dm-delay devices from slowing each other down and keep
processing delay bios from slowing adding delayed bios, the global mutex
has been removed, and each dm-delay device now has two locks.
delayed_bios_lock is a spinlock that must be held whenever the
delayed_bios list is accessed. process_bios_lock is a mutex that must be
held whenever a process has temporarily pulled bios off the delayed_bios
list to check which ones should be processed. It must be held until all
the bios that won't be processed are returned to the list. This is what
flush_delayed_bios() now does. The mutex is necessary to guarantee that
delay_presuspend() sees the entire list of delayed bios when it calls
flush_delayed_bios().

Signed-off-by: Benjamin Marzinski <bmarzins@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@kernel.org>
drivers/md/dm-delay.c

index 2ac43d1f1b92c3774b1ea5aa1cc837e50266852d..da3f8131d52c234af10cad6a708e2b0b5871acb6 100644 (file)
@@ -29,6 +29,8 @@ struct delay_class {
 struct delay_c {
        struct timer_list delay_timer;
        struct mutex timer_lock;
+       struct mutex process_bios_lock; /* hold while removing bios to be processed from list */
+       spinlock_t delayed_bios_lock; /* hold on all accesses to delayed_bios list */
        struct workqueue_struct *kdelayd_wq;
        struct work_struct flush_expired_bios;
        struct list_head delayed_bios;
@@ -49,8 +51,6 @@ struct dm_delay_info {
        unsigned long expires;
 };
 
-static DEFINE_MUTEX(delayed_bios_lock);
-
 static void handle_delayed_timer(struct timer_list *t)
 {
        struct delay_c *dc = from_timer(dc, t, delay_timer);
@@ -89,12 +89,16 @@ static void flush_delayed_bios(struct delay_c *dc, bool flush_all)
 {
        struct dm_delay_info *delayed, *next;
        struct bio_list flush_bio_list;
+       LIST_HEAD(local_list);
        unsigned long next_expires = 0;
        bool start_timer = false;
        bio_list_init(&flush_bio_list);
 
-       mutex_lock(&delayed_bios_lock);
-       list_for_each_entry_safe(delayed, next, &dc->delayed_bios, list) {
+       mutex_lock(&dc->process_bios_lock);
+       spin_lock(&dc->delayed_bios_lock);
+       list_replace_init(&dc->delayed_bios, &local_list);
+       spin_unlock(&dc->delayed_bios_lock);
+       list_for_each_entry_safe(delayed, next, &local_list, list) {
                cond_resched();
                if (flush_all || time_after_eq(jiffies, delayed->expires)) {
                        struct bio *bio = dm_bio_from_per_bio_data(delayed,
@@ -114,7 +118,10 @@ static void flush_delayed_bios(struct delay_c *dc, bool flush_all)
                        }
                }
        }
-       mutex_unlock(&delayed_bios_lock);
+       spin_lock(&dc->delayed_bios_lock);
+       list_splice(&local_list, &dc->delayed_bios);
+       spin_unlock(&dc->delayed_bios_lock);
+       mutex_unlock(&dc->process_bios_lock);
 
        if (start_timer)
                queue_timeout(dc, next_expires);
@@ -128,13 +135,13 @@ static int flush_worker_fn(void *data)
 
        while (!kthread_should_stop()) {
                flush_delayed_bios(dc, false);
-               mutex_lock(&delayed_bios_lock);
+               spin_lock(&dc->delayed_bios_lock);
                if (unlikely(list_empty(&dc->delayed_bios))) {
                        set_current_state(TASK_INTERRUPTIBLE);
-                       mutex_unlock(&delayed_bios_lock);
+                       spin_unlock(&dc->delayed_bios_lock);
                        schedule();
                } else {
-                       mutex_unlock(&delayed_bios_lock);
+                       spin_unlock(&dc->delayed_bios_lock);
                        cond_resched();
                }
        }
@@ -168,6 +175,7 @@ static void delay_dtr(struct dm_target *ti)
        if (dc->worker)
                kthread_stop(dc->worker);
 
+       mutex_destroy(&dc->process_bios_lock);
        mutex_destroy(&dc->timer_lock);
 
        kfree(dc);
@@ -227,6 +235,8 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        ti->private = dc;
        INIT_LIST_HEAD(&dc->delayed_bios);
        mutex_init(&dc->timer_lock);
+       mutex_init(&dc->process_bios_lock);
+       spin_lock_init(&dc->delayed_bios_lock);
        dc->may_delay = true;
        dc->argc = argc;
 
@@ -309,14 +319,14 @@ static int delay_bio(struct delay_c *dc, struct delay_class *c, struct bio *bio)
        delayed->context = dc;
        delayed->expires = expires = jiffies + msecs_to_jiffies(c->delay);
 
-       mutex_lock(&delayed_bios_lock);
+       spin_lock(&dc->delayed_bios_lock);
        if (unlikely(!dc->may_delay)) {
-               mutex_unlock(&delayed_bios_lock);
+               spin_unlock(&dc->delayed_bios_lock);
                return DM_MAPIO_REMAPPED;
        }
        c->ops++;
        list_add_tail(&delayed->list, &dc->delayed_bios);
-       mutex_unlock(&delayed_bios_lock);
+       spin_unlock(&dc->delayed_bios_lock);
 
        if (delay_is_fast(dc))
                wake_up_process(dc->worker);
@@ -330,9 +340,9 @@ static void delay_presuspend(struct dm_target *ti)
 {
        struct delay_c *dc = ti->private;
 
-       mutex_lock(&delayed_bios_lock);
+       spin_lock(&dc->delayed_bios_lock);
        dc->may_delay = false;
-       mutex_unlock(&delayed_bios_lock);
+       spin_unlock(&dc->delayed_bios_lock);
 
        if (!delay_is_fast(dc))
                timer_delete(&dc->delay_timer);