]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
block: refactor blkdev_zone_mgmt_ioctl
authorChristoph Hellwig <hch@lst.de>
Fri, 27 Mar 2026 09:00:32 +0000 (10:00 +0100)
committerJens Axboe <axboe@kernel.dk>
Fri, 10 Apr 2026 01:14:58 +0000 (19:14 -0600)
Split the zone reset case into a separate helper so that the conditional
locking goes away.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Reviewed-by: Bart Van Assche <bvanassche@acm.org>
Reviewed-by: Damien Le Moal <dlemoal@kernel.org>
Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
Link: https://patch.msgid.link/20260327090032.3722065-1-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-zoned.c

index bfd9733ebd3196471e15b076b2153f184df4e965..30cad2bb9291b60f03ab405a0349531b080bc35f 100644 (file)
@@ -417,20 +417,32 @@ int blkdev_report_zones_ioctl(struct block_device *bdev, unsigned int cmd,
        return 0;
 }
 
-static int blkdev_truncate_zone_range(struct block_device *bdev,
-               blk_mode_t mode, const struct blk_zone_range *zrange)
+static int blkdev_reset_zone(struct block_device *bdev, blk_mode_t mode,
+                            struct blk_zone_range *zrange)
 {
        loff_t start, end;
+       int ret = -EINVAL;
 
+       inode_lock(bdev->bd_mapping->host);
+       filemap_invalidate_lock(bdev->bd_mapping);
        if (zrange->sector + zrange->nr_sectors <= zrange->sector ||
            zrange->sector + zrange->nr_sectors > get_capacity(bdev->bd_disk))
                /* Out of range */
-               return -EINVAL;
+               goto out_unlock;
 
        start = zrange->sector << SECTOR_SHIFT;
        end = ((zrange->sector + zrange->nr_sectors) << SECTOR_SHIFT) - 1;
 
-       return truncate_bdev_range(bdev, mode, start, end);
+       ret = truncate_bdev_range(bdev, mode, start, end);
+       if (ret)
+               goto out_unlock;
+
+       ret = blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET, zrange->sector,
+                              zrange->nr_sectors);
+out_unlock:
+       filemap_invalidate_unlock(bdev->bd_mapping);
+       inode_unlock(bdev->bd_mapping->host);
+       return ret;
 }
 
 /*
@@ -443,7 +455,6 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode,
        void __user *argp = (void __user *)arg;
        struct blk_zone_range zrange;
        enum req_op op;
-       int ret;
 
        if (!argp)
                return -EINVAL;
@@ -459,15 +470,7 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode,
 
        switch (cmd) {
        case BLKRESETZONE:
-               op = REQ_OP_ZONE_RESET;
-
-               /* Invalidate the page cache, including dirty pages. */
-               inode_lock(bdev->bd_mapping->host);
-               filemap_invalidate_lock(bdev->bd_mapping);
-               ret = blkdev_truncate_zone_range(bdev, mode, &zrange);
-               if (ret)
-                       goto fail;
-               break;
+               return blkdev_reset_zone(bdev, mode, &zrange);
        case BLKOPENZONE:
                op = REQ_OP_ZONE_OPEN;
                break;
@@ -481,15 +484,7 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode,
                return -ENOTTY;
        }
 
-       ret = blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors);
-
-fail:
-       if (cmd == BLKRESETZONE) {
-               filemap_invalidate_unlock(bdev->bd_mapping);
-               inode_unlock(bdev->bd_mapping->host);
-       }
-
-       return ret;
+       return blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors);
 }
 
 static bool disk_zone_is_last(struct gendisk *disk, struct blk_zone *zone)