return 0;
}
-static int blkdev_truncate_zone_range(struct block_device *bdev,
- blk_mode_t mode, const struct blk_zone_range *zrange)
+static int blkdev_reset_zone(struct block_device *bdev, blk_mode_t mode,
+ struct blk_zone_range *zrange)
{
loff_t start, end;
+ int ret = -EINVAL;
+ inode_lock(bdev->bd_mapping->host);
+ filemap_invalidate_lock(bdev->bd_mapping);
if (zrange->sector + zrange->nr_sectors <= zrange->sector ||
zrange->sector + zrange->nr_sectors > get_capacity(bdev->bd_disk))
/* Out of range */
- return -EINVAL;
+ goto out_unlock;
start = zrange->sector << SECTOR_SHIFT;
end = ((zrange->sector + zrange->nr_sectors) << SECTOR_SHIFT) - 1;
- return truncate_bdev_range(bdev, mode, start, end);
+ ret = truncate_bdev_range(bdev, mode, start, end);
+ if (ret)
+ goto out_unlock;
+
+ ret = blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET, zrange->sector,
+ zrange->nr_sectors);
+out_unlock:
+ filemap_invalidate_unlock(bdev->bd_mapping);
+ inode_unlock(bdev->bd_mapping->host);
+ return ret;
}
/*
void __user *argp = (void __user *)arg;
struct blk_zone_range zrange;
enum req_op op;
- int ret;
if (!argp)
return -EINVAL;
switch (cmd) {
case BLKRESETZONE:
- op = REQ_OP_ZONE_RESET;
-
- /* Invalidate the page cache, including dirty pages. */
- inode_lock(bdev->bd_mapping->host);
- filemap_invalidate_lock(bdev->bd_mapping);
- ret = blkdev_truncate_zone_range(bdev, mode, &zrange);
- if (ret)
- goto fail;
- break;
+ return blkdev_reset_zone(bdev, mode, &zrange);
case BLKOPENZONE:
op = REQ_OP_ZONE_OPEN;
break;
return -ENOTTY;
}
- ret = blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors);
-
-fail:
- if (cmd == BLKRESETZONE) {
- filemap_invalidate_unlock(bdev->bd_mapping);
- inode_unlock(bdev->bd_mapping->host);
- }
-
- return ret;
+ return blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors);
}
static bool disk_zone_is_last(struct gendisk *disk, struct blk_zone *zone)