]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
md/raid1: serialize overlap io for writemostly disk
authorXiao Ni <xni@redhat.com>
Tue, 24 Mar 2026 07:24:54 +0000 (15:24 +0800)
committerYu Kuai <yukuai@fnnas.com>
Tue, 7 Apr 2026 05:09:22 +0000 (13:09 +0800)
Previously, using wait_event() would wake up all waiters simultaneously,
and they would compete for the tree lock. The bio which gets the lock
first will be handled, so the write sequence cannot be guaranteed.

For example:
bio1(100,200)
bio2(150,200)
bio3(150,300)

The write sequence of fast device is bio1,bio2,bio3. But the write sequence
of slow device could be bio1,bio3,bio2 due to lock competition. This causes
data corruption.

Replace waitqueue with a fifo list to guarantee the write sequence. And it
also needs to iterate the list when removing one entry. If not, it may miss
the opportunity to wake up the waiting io.

For example:
bio1(1,3), bio2(2,4)
bio3(5,7), bio4(6,8)
These four bios are in the same bucket. bio1 and bio3 are inserted into
the rbtree. bio2 and bio4 are added to the waiting list and bio2 is the
first one. bio3 returns from slow disk and tries to wake up the waiting
bios. bio2 is removed from the list and will be handled. But bio1 hasn't
finished. So bio2 will be added into waiting list again. Then bio1 returns
from slow disk and wakes up waiting bios. bio4 is removed from the list
and will be handled. Now bio1, bio3 and bio4 all finish and bio2 is left
on the waiting list. So it needs to iterate the waiting list to wake up
the right bio.

Signed-off-by: Xiao Ni <xni@redhat.com>
Link: https://lore.kernel.org/linux-raid/20260324072501.59865-1-xni@redhat.com/
Signed-off-by: Yu Kuai <yukuai@fnnas.com>
drivers/md/md.c
drivers/md/md.h
drivers/md/raid1.c

index ee01e050ee12238ec67eae43f6ec72ff88063934..67e2b501d94fb1430e8a69de967bfc43795b731b 100644 (file)
@@ -187,7 +187,6 @@ static int rdev_init_serial(struct md_rdev *rdev)
 
                spin_lock_init(&serial_tmp->serial_lock);
                serial_tmp->serial_rb = RB_ROOT_CACHED;
-               init_waitqueue_head(&serial_tmp->serial_io_wait);
        }
 
        rdev->serial = serial;
index ac84289664cd7e5bcaa8a809d40460997c009323..d6f5482e247908e43671248a54261800d3e33594 100644 (file)
@@ -126,7 +126,6 @@ enum sync_action {
 struct serial_in_rdev {
        struct rb_root_cached serial_rb;
        spinlock_t serial_lock;
-       wait_queue_head_t serial_io_wait;
 };
 
 /*
@@ -381,7 +380,11 @@ struct serial_info {
        struct rb_node node;
        sector_t start;         /* start sector of rb node */
        sector_t last;          /* end sector of rb node */
+       sector_t wnode_start; /* address of waiting nodes on the same list */
        sector_t _subtree_last; /* highest sector in subtree of rb node */
+       struct list_head        list_node;
+       struct list_head        waiters;
+       struct completion       ready;
 };
 
 /*
index 16f671ab12c00d0d873faf1a5b5469c3fcb572f2..ba91f7e61920d47f597f3e9b053c080154bdc919 100644 (file)
@@ -57,21 +57,29 @@ INTERVAL_TREE_DEFINE(struct serial_info, node, sector_t, _subtree_last,
                     START, LAST, static inline, raid1_rb);
 
 static int check_and_add_serial(struct md_rdev *rdev, struct r1bio *r1_bio,
-                               struct serial_info *si, int idx)
+                               struct serial_info *si)
 {
        unsigned long flags;
        int ret = 0;
        sector_t lo = r1_bio->sector;
        sector_t hi = lo + r1_bio->sectors - 1;
+       int idx = sector_to_idx(r1_bio->sector);
        struct serial_in_rdev *serial = &rdev->serial[idx];
+       struct serial_info *head_si;
 
        spin_lock_irqsave(&serial->serial_lock, flags);
        /* collision happened */
-       if (raid1_rb_iter_first(&serial->serial_rb, lo, hi))
+       head_si = raid1_rb_iter_first(&serial->serial_rb, lo, hi);
+       if (head_si && head_si != si) {
+               si->start = lo;
+               si->last = hi;
+               si->wnode_start = head_si->wnode_start;
+               list_add_tail(&si->list_node, &head_si->waiters);
                ret = -EBUSY;
-       else {
+       } else if (!head_si) {
                si->start = lo;
                si->last = hi;
+               si->wnode_start = si->start;
                raid1_rb_insert(si, &serial->serial_rb);
        }
        spin_unlock_irqrestore(&serial->serial_lock, flags);
@@ -83,19 +91,22 @@ static void wait_for_serialization(struct md_rdev *rdev, struct r1bio *r1_bio)
 {
        struct mddev *mddev = rdev->mddev;
        struct serial_info *si;
-       int idx = sector_to_idx(r1_bio->sector);
-       struct serial_in_rdev *serial = &rdev->serial[idx];
 
        if (WARN_ON(!mddev->serial_info_pool))
                return;
        si = mempool_alloc(mddev->serial_info_pool, GFP_NOIO);
-       wait_event(serial->serial_io_wait,
-                  check_and_add_serial(rdev, r1_bio, si, idx) == 0);
+       INIT_LIST_HEAD(&si->waiters);
+       INIT_LIST_HEAD(&si->list_node);
+       init_completion(&si->ready);
+       while (check_and_add_serial(rdev, r1_bio, si)) {
+               wait_for_completion(&si->ready);
+               reinit_completion(&si->ready);
+       }
 }
 
 static void remove_serial(struct md_rdev *rdev, sector_t lo, sector_t hi)
 {
-       struct serial_info *si;
+       struct serial_info *si, *iter_si;
        unsigned long flags;
        int found = 0;
        struct mddev *mddev = rdev->mddev;
@@ -106,16 +117,28 @@ static void remove_serial(struct md_rdev *rdev, sector_t lo, sector_t hi)
        for (si = raid1_rb_iter_first(&serial->serial_rb, lo, hi);
             si; si = raid1_rb_iter_next(si, lo, hi)) {
                if (si->start == lo && si->last == hi) {
-                       raid1_rb_remove(si, &serial->serial_rb);
-                       mempool_free(si, mddev->serial_info_pool);
                        found = 1;
                        break;
                }
        }
-       if (!found)
+       if (found) {
+               raid1_rb_remove(si, &serial->serial_rb);
+               if (!list_empty(&si->waiters)) {
+                       list_for_each_entry(iter_si, &si->waiters, list_node) {
+                               if (iter_si->wnode_start == si->wnode_start) {
+                                       list_del_init(&iter_si->list_node);
+                                       list_splice_init(&si->waiters, &iter_si->waiters);
+                                       raid1_rb_insert(iter_si, &serial->serial_rb);
+                                       complete(&iter_si->ready);
+                                       break;
+                               }
+                       }
+               }
+               mempool_free(si, mddev->serial_info_pool);
+       } else {
                WARN(1, "The write IO is not recorded for serialization\n");
+       }
        spin_unlock_irqrestore(&serial->serial_lock, flags);
-       wake_up(&serial->serial_io_wait);
 }
 
 /*