]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
xfs: fix number of GC bvecs
authorChristoph Hellwig <hch@lst.de>
Tue, 7 Apr 2026 14:05:24 +0000 (16:05 +0200)
committerJens Axboe <axboe@kernel.dk>
Tue, 7 Apr 2026 14:22:24 +0000 (08:22 -0600)
GC scratch allocations can wrap around and use the same buffer twice, and
the current code fails to account for that.  So far this worked due to
rounding in the block layer, but changes to the bio allocator drop the
over-provisioning and generic/256 or generic/361 will now usually fail
when running against the current block tree.

Simplify the allocation to always pass the maximum value that is easier to
verify, as a saving of up to one bvec per allocation isn't worth the
effort to verify a complicated calculated value.

Fixes: 102f444b57b3 ("xfs: rework zone GC buffer management")
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Damien Le Moal <dlemoal@kernel.org>
Reviewed-by: Hans Holmberg <hans.holmberg@wdc.com>
Link: https://patch.msgid.link/20260407140538.633364-2-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
fs/xfs/xfs_zone_gc.c

index 7efeecd2d85f5162d1dfab227e3a5118e15042c1..f279dcca53cc2a8309bd3dedc4fff8a0562d2f0c 100644 (file)
@@ -671,7 +671,6 @@ xfs_zone_gc_start_chunk(
        struct xfs_inode        *ip;
        struct bio              *bio;
        xfs_daddr_t             daddr;
-       unsigned int            len;
        bool                    is_seq;
 
        if (xfs_is_shutdown(mp))
@@ -686,15 +685,16 @@ xfs_zone_gc_start_chunk(
                return false;
        }
 
-       len = XFS_FSB_TO_B(mp, irec.rm_blockcount);
-       bio = bio_alloc_bioset(bdev,
-                       min(howmany(len, XFS_GC_BUF_SIZE) + 1, XFS_GC_NR_BUFS),
-                       REQ_OP_READ, GFP_NOFS, &data->bio_set);
-
+       /*
+        * Scratch allocation can wrap around to the same buffer again,
+        * provision an extra bvec for that case.
+        */
+       bio = bio_alloc_bioset(bdev, XFS_GC_NR_BUFS + 1, REQ_OP_READ, GFP_NOFS,
+                       &data->bio_set);
        chunk = container_of(bio, struct xfs_gc_bio, bio);
        chunk->ip = ip;
        chunk->offset = XFS_FSB_TO_B(mp, irec.rm_offset);
-       chunk->len = len;
+       chunk->len = XFS_FSB_TO_B(mp, irec.rm_blockcount);
        chunk->old_startblock =
                xfs_rgbno_to_rtb(iter->victim_rtg, irec.rm_startblock);
        chunk->new_daddr = daddr;
@@ -708,8 +708,9 @@ xfs_zone_gc_start_chunk(
        bio->bi_iter.bi_sector = xfs_rtb_to_daddr(mp, chunk->old_startblock);
        bio->bi_end_io = xfs_zone_gc_end_io;
        xfs_zone_gc_add_data(chunk);
-       data->scratch_head = (data->scratch_head + len) % data->scratch_size;
-       data->scratch_available -= len;
+       data->scratch_head =
+               (data->scratch_head + chunk->len) % data->scratch_size;
+       data->scratch_available -= chunk->len;
 
        XFS_STATS_INC(mp, xs_gc_read_calls);