static void btrfs_submit_dev_bio(struct btrfs_device *dev, struct bio *bio)
{
+ u64 physical = bio->bi_iter.bi_sector << SECTOR_SHIFT;
+
if (!dev || !dev->bdev ||
test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) ||
(btrfs_op(bio) == BTRFS_MAP_WRITE &&
* For zone append writing, bi_sector must point the beginning of the
* zone
*/
- if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
- u64 physical = bio->bi_iter.bi_sector << SECTOR_SHIFT;
+ if (btrfs_bio(bio)->can_use_append && btrfs_dev_is_sequential(dev, physical)) {
u64 zone_start = round_down(physical, dev->fs_info->zone_size);
ASSERT(btrfs_dev_is_sequential(dev, physical));
bio->bi_iter.bi_sector = zone_start >> SECTOR_SHIFT;
+ bio->bi_opf &= ~REQ_OP_WRITE;
+ bio->bi_opf |= REQ_OP_ZONE_APPEND;
}
btrfs_debug(dev->fs_info,
"%s: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u",
u64 logical = bio->bi_iter.bi_sector << SECTOR_SHIFT;
u64 length = bio->bi_iter.bi_size;
u64 map_length = length;
- bool use_append = btrfs_use_zone_append(bbio);
struct btrfs_io_context *bioc = NULL;
struct btrfs_io_stripe smap;
blk_status_t status;
if (bio_op(bio) == REQ_OP_WRITE && is_data_bbio(bbio))
bbio->orig_logical = logical;
+ bbio->can_use_append = btrfs_use_zone_append(bbio);
+
map_length = min(map_length, length);
- if (use_append)
+ if (bbio->can_use_append)
map_length = btrfs_append_map_length(bbio, map_length);
if (map_length < length) {
}
if (btrfs_op(bio) == BTRFS_MAP_WRITE) {
- if (use_append) {
- bio->bi_opf &= ~REQ_OP_WRITE;
- bio->bi_opf |= REQ_OP_ZONE_APPEND;
- }
-
if (is_data_bbio(bbio) && bioc && bioc->use_rst) {
/*
* No locking for the list update, as we only add to
status = errno_to_blk_status(ret);
if (status)
goto fail;
- } else if (use_append ||
+ } else if (bbio->can_use_append ||
(btrfs_is_zoned(fs_info) && inode &&
inode->flags & BTRFS_INODE_NODATASUM)) {
ret = btrfs_alloc_dummy_sum(bbio);