]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
md/raid1: Handle bio_split() errors
authorJohn Garry <john.g.garry@oracle.com>
Mon, 20 Oct 2025 13:06:47 +0000 (09:06 -0400)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 23 Oct 2025 14:20:42 +0000 (16:20 +0200)
[ Upstream commit b1a7ad8b5c4fa28325ee7b369a2d545d3e16ccde ]

Add proper bio_split() error handling. For any error, call
raid_end_bio_io() and return.

For the case of an in the write path, we need to undo the increment in
the rdev pending count and NULLify the r1_bio->bios[] pointers.

For read path failure, we need to undo rdev pending count increment from
the earlier read_balance() call.

Reviewed-by: Yu Kuai <yukuai3@huawei.com>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: John Garry <john.g.garry@oracle.com>
Link: https://lore.kernel.org/r/20241111112150.3756529-6-john.g.garry@oracle.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Stable-dep-of: 22f166218f73 ("md: fix mssing blktrace bio split events")
Signed-off-by: Sasha Levin <sashal@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/md/raid1.c

index faccf7344ef933c9d1b5c4e2a1ff9456ab16bdff..31081d9e94025222f11fd1b9950d17728f170504 100644 (file)
@@ -1317,7 +1317,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
        struct raid1_info *mirror;
        struct bio *read_bio;
        int max_sectors;
-       int rdisk;
+       int rdisk, error;
        bool r1bio_existed = !!r1_bio;
 
        /*
@@ -1378,6 +1378,11 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
        if (max_sectors < bio_sectors(bio)) {
                struct bio *split = bio_split(bio, max_sectors,
                                              gfp, &conf->bio_split);
+
+               if (IS_ERR(split)) {
+                       error = PTR_ERR(split);
+                       goto err_handle;
+               }
                bio_chain(split, bio);
                submit_bio_noacct(bio);
                bio = split;
@@ -1404,6 +1409,13 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
        read_bio->bi_private = r1_bio;
        mddev_trace_remap(mddev, read_bio, r1_bio->sector);
        submit_bio_noacct(read_bio);
+       return;
+
+err_handle:
+       atomic_dec(&mirror->rdev->nr_pending);
+       bio->bi_status = errno_to_blk_status(error);
+       set_bit(R1BIO_Uptodate, &r1_bio->state);
+       raid_end_bio_io(r1_bio);
 }
 
 static void raid1_write_request(struct mddev *mddev, struct bio *bio,
@@ -1411,7 +1423,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
 {
        struct r1conf *conf = mddev->private;
        struct r1bio *r1_bio;
-       int i, disks;
+       int i, disks, k, error;
        unsigned long flags;
        struct md_rdev *blocked_rdev;
        int first_clone;
@@ -1557,6 +1569,11 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
        if (max_sectors < bio_sectors(bio)) {
                struct bio *split = bio_split(bio, max_sectors,
                                              GFP_NOIO, &conf->bio_split);
+
+               if (IS_ERR(split)) {
+                       error = PTR_ERR(split);
+                       goto err_handle;
+               }
                bio_chain(split, bio);
                submit_bio_noacct(bio);
                bio = split;
@@ -1640,6 +1657,18 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
 
        /* In case raid1d snuck in to freeze_array */
        wake_up_barrier(conf);
+       return;
+err_handle:
+       for (k = 0; k < i; k++) {
+               if (r1_bio->bios[k]) {
+                       rdev_dec_pending(conf->mirrors[k].rdev, mddev);
+                       r1_bio->bios[k] = NULL;
+               }
+       }
+
+       bio->bi_status = errno_to_blk_status(error);
+       set_bit(R1BIO_Uptodate, &r1_bio->state);
+       raid_end_bio_io(r1_bio);
 }
 
 static bool raid1_make_request(struct mddev *mddev, struct bio *bio)