From: Johannes Thumshirn Date: Thu, 4 Dec 2025 12:42:23 +0000 (+0100) Subject: btrfs: zoned: don't zone append to conventional zone X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=b39b26e017c7889181cb84032e22bef72e81cf29;p=thirdparty%2Fkernel%2Flinux.git btrfs: zoned: don't zone append to conventional zone In case of a zoned RAID, it can happen that a data write is targeting a sequential write required zone and a conventional zone. In this case the bio will be marked as REQ_OP_ZONE_APPEND but for the conventional zone, this needs to be REQ_OP_WRITE. The setting of REQ_OP_ZONE_APPEND is deferred to the last possible time in btrfs_submit_dev_bio(), but the decision if we can use zone append is cached in btrfs_bio. CC: Naohiro Aota Fixes: e9b9b911e03c ("btrfs: add raid stripe tree to features enabled with debug config") Reviewed-by: Christoph Hellwig Reviewed-by: Naohiro Aota Signed-off-by: Johannes Thumshirn Signed-off-by: David Sterba --- diff --git a/fs/btrfs/bio.c b/fs/btrfs/bio.c index fa1d321a2fb83..e4d382d3a7aea 100644 --- a/fs/btrfs/bio.c +++ b/fs/btrfs/bio.c @@ -480,6 +480,8 @@ static void btrfs_clone_write_end_io(struct bio *bio) static void btrfs_submit_dev_bio(struct btrfs_device *dev, struct bio *bio) { + u64 physical = bio->bi_iter.bi_sector << SECTOR_SHIFT; + if (!dev || !dev->bdev || test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) || (btrfs_op(bio) == BTRFS_MAP_WRITE && @@ -494,12 +496,13 @@ static void btrfs_submit_dev_bio(struct btrfs_device *dev, struct bio *bio) * For zone append writing, bi_sector must point the beginning of the * zone */ - if (bio_op(bio) == REQ_OP_ZONE_APPEND) { - u64 physical = bio->bi_iter.bi_sector << SECTOR_SHIFT; + if (btrfs_bio(bio)->can_use_append && btrfs_dev_is_sequential(dev, physical)) { u64 zone_start = round_down(physical, dev->fs_info->zone_size); ASSERT(btrfs_dev_is_sequential(dev, physical)); bio->bi_iter.bi_sector = zone_start >> SECTOR_SHIFT; + bio->bi_opf &= ~REQ_OP_WRITE; + bio->bi_opf |= REQ_OP_ZONE_APPEND; } btrfs_debug(dev->fs_info, "%s: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u", @@ -747,7 +750,6 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num) u64 logical = bio->bi_iter.bi_sector << SECTOR_SHIFT; u64 length = bio->bi_iter.bi_size; u64 map_length = length; - bool use_append = btrfs_use_zone_append(bbio); struct btrfs_io_context *bioc = NULL; struct btrfs_io_stripe smap; blk_status_t status; @@ -775,8 +777,10 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num) if (bio_op(bio) == REQ_OP_WRITE && is_data_bbio(bbio)) bbio->orig_logical = logical; + bbio->can_use_append = btrfs_use_zone_append(bbio); + map_length = min(map_length, length); - if (use_append) + if (bbio->can_use_append) map_length = btrfs_append_map_length(bbio, map_length); if (map_length < length) { @@ -805,11 +809,6 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num) } if (btrfs_op(bio) == BTRFS_MAP_WRITE) { - if (use_append) { - bio->bi_opf &= ~REQ_OP_WRITE; - bio->bi_opf |= REQ_OP_ZONE_APPEND; - } - if (is_data_bbio(bbio) && bioc && bioc->use_rst) { /* * No locking for the list update, as we only add to @@ -836,7 +835,7 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num) status = errno_to_blk_status(ret); if (status) goto fail; - } else if (use_append || + } else if (bbio->can_use_append || (btrfs_is_zoned(fs_info) && inode && inode->flags & BTRFS_INODE_NODATASUM)) { ret = btrfs_alloc_dummy_sum(bbio); diff --git a/fs/btrfs/bio.h b/fs/btrfs/bio.h index 1be74209f0b8d..246c7519dff39 100644 --- a/fs/btrfs/bio.h +++ b/fs/btrfs/bio.h @@ -92,6 +92,9 @@ struct btrfs_bio { /* Whether the csum generation for data write is async. */ bool async_csum; + /* Whether the bio is written using zone append. */ + bool can_use_append; + /* * This member must come last, bio_alloc_bioset will allocate enough * bytes for entire btrfs_bio but relies on bio being last.