]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
btrfs: remove btrfs_bio::fs_info by extracting it from btrfs_bio::inode
authorQu Wenruo <wqu@suse.com>
Tue, 28 Oct 2025 22:05:33 +0000 (08:35 +1030)
committerDavid Sterba <dsterba@suse.com>
Mon, 24 Nov 2025 21:40:16 +0000 (22:40 +0100)
Currently there is only one caller which doesn't populate
btrfs_bio::inode, and that's scrub.

The idea is scrub doesn't want any automatic csum verification nor
read-repair, as everything will be handled by scrub itself.

However that behavior is really no different than metadata inode, thus
we can reuse btree_inode as btrfs_bio::inode for scrub.

The only exception is in btrfs_submit_chunk() where if a bbio is from
scrub or data reloc inode, we set rst_search_commit_root to true.
This means we still need a way to distinguish scrub from metadata, but
that can be done by a new flag inside btrfs_bio.

Now btrfs_bio::inode is a mandatory parameter, we can extract fs_info
from that inode thus can remove btrfs_bio::fs_info to save 8 bytes from
btrfs_bio structure.

Signed-off-by: Qu Wenruo <wqu@suse.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
fs/btrfs/bio.c
fs/btrfs/bio.h
fs/btrfs/compression.c
fs/btrfs/compression.h
fs/btrfs/direct-io.c
fs/btrfs/extent_io.c
fs/btrfs/inode.c
fs/btrfs/scrub.c
fs/btrfs/zoned.c

index 21df48e6c4fa2091889cab6fae80f8a730e9380a..b85b6b21b5450b1eb9467387483aaa2d5c69c806 100644 (file)
@@ -41,13 +41,17 @@ static bool bbio_has_ordered_extent(const struct btrfs_bio *bbio)
  * Initialize a btrfs_bio structure.  This skips the embedded bio itself as it
  * is already initialized by the block layer.
  */
-void btrfs_bio_init(struct btrfs_bio *bbio, struct btrfs_fs_info *fs_info,
+void btrfs_bio_init(struct btrfs_bio *bbio, struct btrfs_inode *inode, u64 file_offset,
                    btrfs_bio_end_io_t end_io, void *private)
 {
+       /* @inode parameter is mandatory. */
+       ASSERT(inode);
+
        memset(bbio, 0, offsetof(struct btrfs_bio, bio));
-       bbio->fs_info = fs_info;
+       bbio->inode = inode;
        bbio->end_io = end_io;
        bbio->private = private;
+       bbio->file_offset = file_offset;
        atomic_set(&bbio->pending_ios, 1);
        WRITE_ONCE(bbio->status, BLK_STS_OK);
 }
@@ -60,7 +64,7 @@ void btrfs_bio_init(struct btrfs_bio *bbio, struct btrfs_fs_info *fs_info,
  * a mempool.
  */
 struct btrfs_bio *btrfs_bio_alloc(unsigned int nr_vecs, blk_opf_t opf,
-                                 struct btrfs_fs_info *fs_info,
+                                 struct btrfs_inode *inode, u64 file_offset,
                                  btrfs_bio_end_io_t end_io, void *private)
 {
        struct btrfs_bio *bbio;
@@ -68,7 +72,7 @@ struct btrfs_bio *btrfs_bio_alloc(unsigned int nr_vecs, blk_opf_t opf,
 
        bio = bio_alloc_bioset(NULL, nr_vecs, opf, GFP_NOFS, &btrfs_bioset);
        bbio = btrfs_bio(bio);
-       btrfs_bio_init(bbio, fs_info, end_io, private);
+       btrfs_bio_init(bbio, inode, file_offset, end_io, private);
        return bbio;
 }
 
@@ -85,9 +89,7 @@ static struct btrfs_bio *btrfs_split_bio(struct btrfs_fs_info *fs_info,
                return ERR_CAST(bio);
 
        bbio = btrfs_bio(bio);
-       btrfs_bio_init(bbio, fs_info, NULL, orig_bbio);
-       bbio->inode = orig_bbio->inode;
-       bbio->file_offset = orig_bbio->file_offset;
+       btrfs_bio_init(bbio, orig_bbio->inode, orig_bbio->file_offset, NULL, orig_bbio);
        orig_bbio->file_offset += map_length;
        if (bbio_has_ordered_extent(bbio)) {
                refcount_inc(&orig_bbio->ordered->refs);
@@ -244,9 +246,8 @@ static struct btrfs_failed_bio *repair_one_sector(struct btrfs_bio *failed_bbio,
        bio_add_folio_nofail(repair_bio, folio, sectorsize, foff);
 
        repair_bbio = btrfs_bio(repair_bio);
-       btrfs_bio_init(repair_bbio, fs_info, NULL, fbio);
-       repair_bbio->inode = failed_bbio->inode;
-       repair_bbio->file_offset = failed_bbio->file_offset + bio_offset;
+       btrfs_bio_init(repair_bbio, failed_bbio->inode, failed_bbio->file_offset + bio_offset,
+                      NULL, fbio);
 
        mirror = next_repair_mirror(fbio, failed_bbio->mirror_num);
        btrfs_debug(fs_info, "submitting repair read to mirror %d", mirror);
@@ -332,7 +333,7 @@ static void btrfs_simple_end_io(struct bio *bio)
 {
        struct btrfs_bio *bbio = btrfs_bio(bio);
        struct btrfs_device *dev = bio->bi_private;
-       struct btrfs_fs_info *fs_info = bbio->fs_info;
+       struct btrfs_fs_info *fs_info = bbio->inode->root->fs_info;
 
        btrfs_bio_counter_dec(fs_info);
 
@@ -581,10 +582,11 @@ static void run_one_async_done(struct btrfs_work *work, bool do_free)
 
 static bool should_async_write(struct btrfs_bio *bbio)
 {
+       struct btrfs_fs_info *fs_info = bbio->inode->root->fs_info;
        bool auto_csum_mode = true;
 
 #ifdef CONFIG_BTRFS_EXPERIMENTAL
-       struct btrfs_fs_devices *fs_devices = bbio->fs_info->fs_devices;
+       struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
        enum btrfs_offload_csum_mode csum_mode = READ_ONCE(fs_devices->offload_csum_mode);
 
        if (csum_mode == BTRFS_OFFLOAD_CSUM_FORCE_OFF)
@@ -594,7 +596,7 @@ static bool should_async_write(struct btrfs_bio *bbio)
 #endif
 
        /* Submit synchronously if the checksum implementation is fast. */
-       if (auto_csum_mode && test_bit(BTRFS_FS_CSUM_IMPL_FAST, &bbio->fs_info->flags))
+       if (auto_csum_mode && test_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags))
                return false;
 
        /*
@@ -605,7 +607,7 @@ static bool should_async_write(struct btrfs_bio *bbio)
                return false;
 
        /* Zoned devices require I/O to be submitted in order. */
-       if ((bbio->bio.bi_opf & REQ_META) && btrfs_is_zoned(bbio->fs_info))
+       if ((bbio->bio.bi_opf & REQ_META) && btrfs_is_zoned(fs_info))
                return false;
 
        return true;
@@ -620,7 +622,7 @@ static bool btrfs_wq_submit_bio(struct btrfs_bio *bbio,
                                struct btrfs_io_context *bioc,
                                struct btrfs_io_stripe *smap, int mirror_num)
 {
-       struct btrfs_fs_info *fs_info = bbio->fs_info;
+       struct btrfs_fs_info *fs_info = bbio->inode->root->fs_info;
        struct async_submit_bio *async;
 
        async = kmalloc(sizeof(*async), GFP_NOFS);
@@ -639,11 +641,12 @@ static bool btrfs_wq_submit_bio(struct btrfs_bio *bbio,
 
 static u64 btrfs_append_map_length(struct btrfs_bio *bbio, u64 map_length)
 {
+       struct btrfs_fs_info *fs_info = bbio->inode->root->fs_info;
        unsigned int nr_segs;
        int sector_offset;
 
-       map_length = min(map_length, bbio->fs_info->max_zone_append_size);
-       sector_offset = bio_split_rw_at(&bbio->bio, &bbio->fs_info->limits,
+       map_length = min(map_length, fs_info->max_zone_append_size);
+       sector_offset = bio_split_rw_at(&bbio->bio, &fs_info->limits,
                                        &nr_segs, map_length);
        if (sector_offset) {
                /*
@@ -651,7 +654,7 @@ static u64 btrfs_append_map_length(struct btrfs_bio *bbio, u64 map_length)
                 * sectorsize and thus cause unaligned I/Os.  Fix that by
                 * always rounding down to the nearest boundary.
                 */
-               return ALIGN_DOWN(sector_offset << SECTOR_SHIFT, bbio->fs_info->sectorsize);
+               return ALIGN_DOWN(sector_offset << SECTOR_SHIFT, fs_info->sectorsize);
        }
        return map_length;
 }
@@ -659,7 +662,7 @@ static u64 btrfs_append_map_length(struct btrfs_bio *bbio, u64 map_length)
 static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num)
 {
        struct btrfs_inode *inode = bbio->inode;
-       struct btrfs_fs_info *fs_info = bbio->fs_info;
+       struct btrfs_fs_info *fs_info = inode->root->fs_info;
        struct bio *bio = &bbio->bio;
        u64 logical = bio->bi_iter.bi_sector << SECTOR_SHIFT;
        u64 length = bio->bi_iter.bi_size;
@@ -670,7 +673,7 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num)
        blk_status_t status;
        int ret;
 
-       if (!bbio->inode || btrfs_is_data_reloc_root(inode->root))
+       if (bbio->is_scrub || btrfs_is_data_reloc_root(inode->root))
                smap.rst_search_commit_root = true;
        else
                smap.rst_search_commit_root = false;
@@ -734,7 +737,7 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num)
                 * Csum items for reloc roots have already been cloned at this
                 * point, so they are handled as part of the no-checksum case.
                 */
-               if (inode && !(inode->flags & BTRFS_INODE_NODATASUM) &&
+               if (!(inode->flags & BTRFS_INODE_NODATASUM) &&
                    !test_bit(BTRFS_FS_STATE_NO_DATA_CSUMS, &fs_info->fs_state) &&
                    !btrfs_is_data_reloc_root(inode->root)) {
                        if (should_async_write(bbio) &&
@@ -782,7 +785,7 @@ end_bbio:
 static void assert_bbio_alignment(struct btrfs_bio *bbio)
 {
 #ifdef CONFIG_BTRFS_ASSERT
-       struct btrfs_fs_info *fs_info = bbio->fs_info;
+       struct btrfs_fs_info *fs_info = bbio->inode->root->fs_info;
        struct bio_vec bvec;
        struct bvec_iter iter;
        const u32 blocksize = fs_info->sectorsize;
@@ -885,16 +888,16 @@ out_counter_dec:
  */
 void btrfs_submit_repair_write(struct btrfs_bio *bbio, int mirror_num, bool dev_replace)
 {
-       struct btrfs_fs_info *fs_info = bbio->fs_info;
+       struct btrfs_fs_info *fs_info = bbio->inode->root->fs_info;
        u64 logical = bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT;
        u64 length = bbio->bio.bi_iter.bi_size;
        struct btrfs_io_stripe smap = { 0 };
        int ret;
 
-       ASSERT(fs_info);
        ASSERT(mirror_num > 0);
        ASSERT(btrfs_op(&bbio->bio) == BTRFS_MAP_WRITE);
-       ASSERT(!bbio->inode);
+       ASSERT(!is_data_inode(bbio->inode));
+       ASSERT(bbio->is_scrub);
 
        btrfs_bio_counter_inc_blocked(fs_info);
        ret = btrfs_map_repair_block(fs_info, &smap, logical, length, mirror_num);
index 3cc0fe23898f7524e6671ba5292c710c28c31a82..5d20f959e12d3650fe39ab4cfb950d719f49fa49 100644 (file)
@@ -27,7 +27,10 @@ typedef void (*btrfs_bio_end_io_t)(struct btrfs_bio *bbio);
 struct btrfs_bio {
        /*
         * Inode and offset into it that this I/O operates on.
-        * Only set for data I/O.
+        *
+        * If the inode is a data one, csum verification and read-repair
+        * will be done automatically.
+        * If the inode is a metadata one, everything is handled by the caller.
         */
        struct btrfs_inode *inode;
        u64 file_offset;
@@ -69,14 +72,17 @@ struct btrfs_bio {
        atomic_t pending_ios;
        struct work_struct end_io_work;
 
-       /* File system that this I/O operates on. */
-       struct btrfs_fs_info *fs_info;
-
        /* Save the first error status of split bio. */
        blk_status_t status;
 
        /* Use the commit root to look up csums (data read bio only). */
        bool csum_search_commit_root;
+
+       /*
+        * Since scrub will reuse btree inode, we need this flag to distinguish
+        * scrub bios.
+        */
+       bool is_scrub;
        /*
         * This member must come last, bio_alloc_bioset will allocate enough
         * bytes for entire btrfs_bio but relies on bio being last.
@@ -92,10 +98,10 @@ static inline struct btrfs_bio *btrfs_bio(struct bio *bio)
 int __init btrfs_bioset_init(void);
 void __cold btrfs_bioset_exit(void);
 
-void btrfs_bio_init(struct btrfs_bio *bbio, struct btrfs_fs_info *fs_info,
+void btrfs_bio_init(struct btrfs_bio *bbio, struct btrfs_inode *inode, u64 file_offset,
                    btrfs_bio_end_io_t end_io, void *private);
 struct btrfs_bio *btrfs_bio_alloc(unsigned int nr_vecs, blk_opf_t opf,
-                                 struct btrfs_fs_info *fs_info,
+                                 struct btrfs_inode *inode, u64 file_offset,
                                  btrfs_bio_end_io_t end_io, void *private);
 void btrfs_bio_end_io(struct btrfs_bio *bbio, blk_status_t status);
 
index bacad18357b3386cb38b8c0b3ddebbb5baf76255..8c3899832a1aa162c8bbb4b12b5a2c030dfa2790 100644 (file)
@@ -67,9 +67,7 @@ static struct compressed_bio *alloc_compressed_bio(struct btrfs_inode *inode,
 
        bbio = btrfs_bio(bio_alloc_bioset(NULL, BTRFS_MAX_COMPRESSED_PAGES, op,
                                          GFP_NOFS, &btrfs_compressed_bioset));
-       btrfs_bio_init(bbio, inode->root->fs_info, end_io, NULL);
-       bbio->inode = inode;
-       bbio->file_offset = start;
+       btrfs_bio_init(bbio, inode, start, end_io, NULL);
        return to_compressed_bio(bbio);
 }
 
@@ -354,7 +352,7 @@ static void end_bbio_compressed_write(struct btrfs_bio *bbio)
 
 static void btrfs_add_compressed_bio_folios(struct compressed_bio *cb)
 {
-       struct btrfs_fs_info *fs_info = cb->bbio.fs_info;
+       struct btrfs_fs_info *fs_info = cb->bbio.inode->root->fs_info;
        struct bio *bio = &cb->bbio.bio;
        u32 offset = 0;
 
index c6812d5fcab7910eead5c964ffb84419fc2529d0..062ebd9c2d32d199277407ea900f3befef1c9e97 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/pagemap.h>
 #include "bio.h"
 #include "fs.h"
+#include "btrfs_inode.h"
 
 struct address_space;
 struct inode;
@@ -74,7 +75,7 @@ struct compressed_bio {
 
 static inline struct btrfs_fs_info *cb_to_fs_info(const struct compressed_bio *cb)
 {
-       return cb->bbio.fs_info;
+       return cb->bbio.inode->root->fs_info;
 }
 
 /* @range_end must be exclusive. */
index f225cc3fd3a1097caff821ef162c2a658590992a..962fccceffd637e424dc959213603ba64d8b9414 100644 (file)
@@ -715,10 +715,8 @@ static void btrfs_dio_submit_io(const struct iomap_iter *iter, struct bio *bio,
                container_of(bbio, struct btrfs_dio_private, bbio);
        struct btrfs_dio_data *dio_data = iter->private;
 
-       btrfs_bio_init(bbio, BTRFS_I(iter->inode)->root->fs_info,
+       btrfs_bio_init(bbio, BTRFS_I(iter->inode), file_offset,
                       btrfs_dio_end_io, bio->bi_private);
-       bbio->inode = BTRFS_I(iter->inode);
-       bbio->file_offset = file_offset;
 
        dip->file_offset = file_offset;
        dip->bytes = bio->bi_iter.bi_size;
index cb680cdeb77d200f246dd1632fc75bcba9e3c813..b25a2b45047e9e5d9787af83f59c6cb8936e26ee 100644 (file)
@@ -517,7 +517,7 @@ static void end_folio_read(struct folio *folio, bool uptodate, u64 start, u32 le
  */
 static void end_bbio_data_write(struct btrfs_bio *bbio)
 {
-       struct btrfs_fs_info *fs_info = bbio->fs_info;
+       struct btrfs_fs_info *fs_info = bbio->inode->root->fs_info;
        struct bio *bio = &bbio->bio;
        int error = blk_status_to_errno(bio->bi_status);
        struct folio_iter fi;
@@ -573,7 +573,7 @@ static void begin_folio_read(struct btrfs_fs_info *fs_info, struct folio *folio)
  */
 static void end_bbio_data_read(struct btrfs_bio *bbio)
 {
-       struct btrfs_fs_info *fs_info = bbio->fs_info;
+       struct btrfs_fs_info *fs_info = bbio->inode->root->fs_info;
        struct bio *bio = &bbio->bio;
        struct folio_iter fi;
 
@@ -738,12 +738,10 @@ static void alloc_new_bio(struct btrfs_inode *inode,
        struct btrfs_fs_info *fs_info = inode->root->fs_info;
        struct btrfs_bio *bbio;
 
-       bbio = btrfs_bio_alloc(BIO_MAX_VECS, bio_ctrl->opf, fs_info,
-                              bio_ctrl->end_io_func, NULL);
+       bbio = btrfs_bio_alloc(BIO_MAX_VECS, bio_ctrl->opf, inode,
+                              file_offset, bio_ctrl->end_io_func, NULL);
        bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
        bbio->bio.bi_write_hint = inode->vfs_inode.i_write_hint;
-       bbio->inode = inode;
-       bbio->file_offset = file_offset;
        bio_ctrl->bbio = bbio;
        bio_ctrl->len_to_oe_boundary = U32_MAX;
        bio_ctrl->next_file_offset = file_offset;
@@ -2224,12 +2222,11 @@ static noinline_for_stack void write_one_eb(struct extent_buffer *eb,
 
        bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES,
                               REQ_OP_WRITE | REQ_META | wbc_to_write_flags(wbc),
-                              eb->fs_info, end_bbio_meta_write, eb);
+                              BTRFS_I(fs_info->btree_inode), eb->start,
+                              end_bbio_meta_write, eb);
        bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT;
        bio_set_dev(&bbio->bio, fs_info->fs_devices->latest_dev->bdev);
        wbc_init_bio(wbc, &bbio->bio);
-       bbio->inode = BTRFS_I(eb->fs_info->btree_inode);
-       bbio->file_offset = eb->start;
        for (int i = 0; i < num_extent_folios(eb); i++) {
                struct folio *folio = eb->folios[i];
                u64 range_start = max_t(u64, eb->start, folio_pos(folio));
@@ -3844,6 +3841,7 @@ static void end_bbio_meta_read(struct btrfs_bio *bbio)
 int read_extent_buffer_pages_nowait(struct extent_buffer *eb, int mirror_num,
                                    const struct btrfs_tree_parent_check *check)
 {
+       struct btrfs_fs_info *fs_info = eb->fs_info;
        struct btrfs_bio *bbio;
 
        if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
@@ -3877,11 +3875,9 @@ int read_extent_buffer_pages_nowait(struct extent_buffer *eb, int mirror_num,
        refcount_inc(&eb->refs);
 
        bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES,
-                              REQ_OP_READ | REQ_META, eb->fs_info,
-                              end_bbio_meta_read, eb);
+                              REQ_OP_READ | REQ_META, BTRFS_I(fs_info->btree_inode),
+                              eb->start, end_bbio_meta_read, eb);
        bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT;
-       bbio->inode = BTRFS_I(eb->fs_info->btree_inode);
-       bbio->file_offset = eb->start;
        memcpy(&bbio->parent_check, check, sizeof(*check));
        for (int i = 0; i < num_extent_folios(eb); i++) {
                struct folio *folio = eb->folios[i];
index 3c8bcdcf525e5d6f9a73e34a870bc545a7437c1f..a2e8d52a2a87ae0c05e6a18698f709b28d43f758 100644 (file)
@@ -9404,7 +9404,6 @@ int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
                                          u64 disk_bytenr, u64 disk_io_size,
                                          struct page **pages, void *uring_ctx)
 {
-       struct btrfs_fs_info *fs_info = inode->root->fs_info;
        struct btrfs_encoded_read_private *priv, sync_priv;
        struct completion sync_reads;
        unsigned long i = 0;
@@ -9429,10 +9428,9 @@ int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
        priv->status = 0;
        priv->uring_ctx = uring_ctx;
 
-       bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, fs_info,
+       bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, inode, 0,
                               btrfs_encoded_read_endio, priv);
        bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
-       bbio->inode = inode;
 
        do {
                size_t bytes = min_t(u64, disk_io_size, PAGE_SIZE);
@@ -9441,10 +9439,9 @@ int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
                        refcount_inc(&priv->pending_refs);
                        btrfs_submit_bbio(bbio, 0);
 
-                       bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, fs_info,
+                       bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, inode, 0,
                                               btrfs_encoded_read_endio, priv);
                        bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
-                       bbio->inode = inode;
                        continue;
                }
 
index 33c9cb91f0a948c8d3264bdd95bfa8aafc098cd5..3dbb02dbfffb6c6d0a982699b6df06e5241e9d0f 100644 (file)
@@ -927,10 +927,11 @@ static int calc_next_mirror(int mirror, int num_copies)
 static void scrub_bio_add_sector(struct btrfs_bio *bbio, struct scrub_stripe *stripe,
                                 int sector_nr)
 {
+       struct btrfs_fs_info *fs_info = bbio->inode->root->fs_info;
        void *kaddr = scrub_stripe_get_kaddr(stripe, sector_nr);
        int ret;
 
-       ret = bio_add_page(&bbio->bio, virt_to_page(kaddr), bbio->fs_info->sectorsize,
+       ret = bio_add_page(&bbio->bio, virt_to_page(kaddr), fs_info->sectorsize,
                           offset_in_page(kaddr));
        /*
         * Caller should ensure the bbio has enough size.
@@ -940,7 +941,21 @@ static void scrub_bio_add_sector(struct btrfs_bio *bbio, struct scrub_stripe *st
         * to create the minimal amount of bio vectors, for fs block size < page
         * size cases.
         */
-       ASSERT(ret == bbio->fs_info->sectorsize);
+       ASSERT(ret == fs_info->sectorsize);
+}
+
+static struct btrfs_bio *alloc_scrub_bbio(struct btrfs_fs_info *fs_info,
+                                         unsigned int nr_vecs, blk_opf_t opf,
+                                         u64 logical,
+                                         btrfs_bio_end_io_t end_io, void *private)
+{
+       struct btrfs_bio *bbio;
+
+       bbio = btrfs_bio_alloc(nr_vecs, opf, BTRFS_I(fs_info->btree_inode),
+                              logical, end_io, private);
+       bbio->is_scrub = true;
+       bbio->bio.bi_iter.bi_sector = logical >> SECTOR_SHIFT;
+       return bbio;
 }
 
 static void scrub_stripe_submit_repair_read(struct scrub_stripe *stripe,
@@ -966,12 +981,10 @@ static void scrub_stripe_submit_repair_read(struct scrub_stripe *stripe,
                        bbio = NULL;
                }
 
-               if (!bbio) {
-                       bbio = btrfs_bio_alloc(stripe->nr_sectors, REQ_OP_READ,
-                               fs_info, scrub_repair_read_endio, stripe);
-                       bbio->bio.bi_iter.bi_sector = (stripe->logical +
-                               (i << fs_info->sectorsize_bits)) >> SECTOR_SHIFT;
-               }
+               if (!bbio)
+                       bbio = alloc_scrub_bbio(fs_info, stripe->nr_sectors, REQ_OP_READ,
+                                               stripe->logical + (i << fs_info->sectorsize_bits),
+                                               scrub_repair_read_endio, stripe);
 
                scrub_bio_add_sector(bbio, stripe, i);
        }
@@ -1350,13 +1363,10 @@ static void scrub_write_sectors(struct scrub_ctx *sctx, struct scrub_stripe *str
                        scrub_submit_write_bio(sctx, stripe, bbio, dev_replace);
                        bbio = NULL;
                }
-               if (!bbio) {
-                       bbio = btrfs_bio_alloc(stripe->nr_sectors, REQ_OP_WRITE,
-                                              fs_info, scrub_write_endio, stripe);
-                       bbio->bio.bi_iter.bi_sector = (stripe->logical +
-                               (sector_nr << fs_info->sectorsize_bits)) >>
-                               SECTOR_SHIFT;
-               }
+               if (!bbio)
+                       bbio = alloc_scrub_bbio(fs_info, stripe->nr_sectors, REQ_OP_WRITE,
+                                       stripe->logical + (sector_nr << fs_info->sectorsize_bits),
+                                       scrub_write_endio, stripe);
                scrub_bio_add_sector(bbio, stripe, sector_nr);
        }
        if (bbio)
@@ -1847,9 +1857,8 @@ static void scrub_submit_extent_sector_read(struct scrub_stripe *stripe)
                                continue;
                        }
 
-                       bbio = btrfs_bio_alloc(stripe->nr_sectors, REQ_OP_READ,
-                                              fs_info, scrub_read_endio, stripe);
-                       bbio->bio.bi_iter.bi_sector = logical >> SECTOR_SHIFT;
+                       bbio = alloc_scrub_bbio(fs_info, stripe->nr_sectors, REQ_OP_READ,
+                                               logical, scrub_read_endio, stripe);
                }
 
                scrub_bio_add_sector(bbio, stripe, i);
@@ -1886,10 +1895,8 @@ static void scrub_submit_initial_read(struct scrub_ctx *sctx,
                return;
        }
 
-       bbio = btrfs_bio_alloc(BTRFS_STRIPE_LEN >> min_folio_shift, REQ_OP_READ, fs_info,
-                              scrub_read_endio, stripe);
-
-       bbio->bio.bi_iter.bi_sector = stripe->logical >> SECTOR_SHIFT;
+       bbio = alloc_scrub_bbio(fs_info, BTRFS_STRIPE_LEN >> min_folio_shift, REQ_OP_READ,
+                               stripe->logical, scrub_read_endio, stripe);
        /* Read the whole range inside the chunk boundary. */
        for (unsigned int cur = 0; cur < nr_sectors; cur++)
                scrub_bio_add_sector(bbio, stripe, cur);
index 9b2af6210867e7a9c62a0b3f660d5549b1cb5329..41a4a7d50bd3d8551af4e34007084bda91697a7a 100644 (file)
@@ -1808,14 +1808,14 @@ bool btrfs_use_zone_append(struct btrfs_bio *bbio)
 {
        u64 start = (bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT);
        struct btrfs_inode *inode = bbio->inode;
-       struct btrfs_fs_info *fs_info = bbio->fs_info;
+       struct btrfs_fs_info *fs_info = inode->root->fs_info;
        struct btrfs_block_group *cache;
        bool ret = false;
 
        if (!btrfs_is_zoned(fs_info))
                return false;
 
-       if (!inode || !is_data_inode(inode))
+       if (!is_data_inode(inode))
                return false;
 
        if (btrfs_op(&bbio->bio) != BTRFS_MAP_WRITE)