]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
md/raid1: Handle bio_split() errors
authorJohn Garry <john.g.garry@oracle.com>
Mon, 11 Nov 2024 11:21:49 +0000 (11:21 +0000)
committerJens Axboe <axboe@kernel.dk>
Mon, 11 Nov 2024 15:35:46 +0000 (08:35 -0700)
Add proper bio_split() error handling. For any error, call
raid_end_bio_io() and return.

For the case of an in the write path, we need to undo the increment in
the rdev pending count and NULLify the r1_bio->bios[] pointers.

For read path failure, we need to undo rdev pending count increment from
the earlier read_balance() call.

Reviewed-by: Yu Kuai <yukuai3@huawei.com>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: John Garry <john.g.garry@oracle.com>
Link: https://lore.kernel.org/r/20241111112150.3756529-6-john.g.garry@oracle.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
drivers/md/raid1.c

index cd3e94dceabc48796ad81e82d80d4b4d58a86173..a5adf08ee174199531575b9deb9f34577bbb72bc 100644 (file)
@@ -1322,7 +1322,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
        const enum req_op op = bio_op(bio);
        const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC;
        int max_sectors;
-       int rdisk;
+       int rdisk, error;
        bool r1bio_existed = !!r1_bio;
 
        /*
@@ -1383,6 +1383,11 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
        if (max_sectors < bio_sectors(bio)) {
                struct bio *split = bio_split(bio, max_sectors,
                                              gfp, &conf->bio_split);
+
+               if (IS_ERR(split)) {
+                       error = PTR_ERR(split);
+                       goto err_handle;
+               }
                bio_chain(split, bio);
                submit_bio_noacct(bio);
                bio = split;
@@ -1410,6 +1415,13 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
        read_bio->bi_private = r1_bio;
        mddev_trace_remap(mddev, read_bio, r1_bio->sector);
        submit_bio_noacct(read_bio);
+       return;
+
+err_handle:
+       atomic_dec(&mirror->rdev->nr_pending);
+       bio->bi_status = errno_to_blk_status(error);
+       set_bit(R1BIO_Uptodate, &r1_bio->state);
+       raid_end_bio_io(r1_bio);
 }
 
 static bool wait_blocked_rdev(struct mddev *mddev, struct bio *bio)
@@ -1451,7 +1463,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
 {
        struct r1conf *conf = mddev->private;
        struct r1bio *r1_bio;
-       int i, disks;
+       int i, disks, k, error;
        unsigned long flags;
        int first_clone;
        int max_sectors;
@@ -1579,6 +1591,11 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
        if (max_sectors < bio_sectors(bio)) {
                struct bio *split = bio_split(bio, max_sectors,
                                              GFP_NOIO, &conf->bio_split);
+
+               if (IS_ERR(split)) {
+                       error = PTR_ERR(split);
+                       goto err_handle;
+               }
                bio_chain(split, bio);
                submit_bio_noacct(bio);
                bio = split;
@@ -1663,6 +1680,18 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
 
        /* In case raid1d snuck in to freeze_array */
        wake_up_barrier(conf);
+       return;
+err_handle:
+       for (k = 0; k < i; k++) {
+               if (r1_bio->bios[k]) {
+                       rdev_dec_pending(conf->mirrors[k].rdev, mddev);
+                       r1_bio->bios[k] = NULL;
+               }
+       }
+
+       bio->bi_status = errno_to_blk_status(error);
+       set_bit(R1BIO_Uptodate, &r1_bio->state);
+       raid_end_bio_io(r1_bio);
 }
 
 static bool raid1_make_request(struct mddev *mddev, struct bio *bio)