]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
iomap: store read/readahead bio generically
authorJoanne Koong <joannelkoong@gmail.com>
Fri, 26 Sep 2025 00:25:58 +0000 (17:25 -0700)
committerChristian Brauner <brauner@kernel.org>
Mon, 20 Oct 2025 18:21:25 +0000 (20:21 +0200)
Store the iomap_readpage_ctx bio generically as a "void *read_ctx".
This makes the read/readahead interface more generic, which allows it to
be used by filesystems that may not be block-based and may not have
CONFIG_BLOCK set.

Signed-off-by: Joanne Koong <joannelkoong@gmail.com>
Tested-by: syzbot@syzkaller.appspotmail.com
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Signed-off-by: Christian Brauner <brauner@kernel.org>
fs/iomap/buffered-io.c

index f8b985bb5a6b5d19781885766ac48a2b572b9b37..b06b532033adf8292b80043085d19506fcf679bc 100644 (file)
@@ -363,13 +363,13 @@ static void iomap_read_end_io(struct bio *bio)
 struct iomap_readpage_ctx {
        struct folio            *cur_folio;
        bool                    cur_folio_in_bio;
-       struct bio              *bio;
+       void                    *read_ctx;
        struct readahead_control *rac;
 };
 
 static void iomap_bio_submit_read(struct iomap_readpage_ctx *ctx)
 {
-       struct bio *bio = ctx->bio;
+       struct bio *bio = ctx->read_ctx;
 
        if (bio)
                submit_bio(bio);
@@ -384,6 +384,7 @@ static void iomap_bio_read_folio_range(const struct iomap_iter *iter,
        size_t poff = offset_in_folio(folio, pos);
        loff_t length = iomap_length(iter);
        sector_t sector;
+       struct bio *bio = ctx->read_ctx;
 
        ctx->cur_folio_in_bio = true;
        if (ifs) {
@@ -393,9 +394,8 @@ static void iomap_bio_read_folio_range(const struct iomap_iter *iter,
        }
 
        sector = iomap_sector(iomap, pos);
-       if (!ctx->bio ||
-           bio_end_sector(ctx->bio) != sector ||
-           !bio_add_folio(ctx->bio, folio, plen, poff)) {
+       if (!bio || bio_end_sector(bio) != sector ||
+           !bio_add_folio(bio, folio, plen, poff)) {
                gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL);
                gfp_t orig_gfp = gfp;
                unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE);
@@ -404,22 +404,21 @@ static void iomap_bio_read_folio_range(const struct iomap_iter *iter,
 
                if (ctx->rac) /* same as readahead_gfp_mask */
                        gfp |= __GFP_NORETRY | __GFP_NOWARN;
-               ctx->bio = bio_alloc(iomap->bdev, bio_max_segs(nr_vecs),
-                                    REQ_OP_READ, gfp);
+               bio = bio_alloc(iomap->bdev, bio_max_segs(nr_vecs), REQ_OP_READ,
+                                    gfp);
                /*
                 * If the bio_alloc fails, try it again for a single page to
                 * avoid having to deal with partial page reads.  This emulates
                 * what do_mpage_read_folio does.
                 */
-               if (!ctx->bio) {
-                       ctx->bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ,
-                                            orig_gfp);
-               }
+               if (!bio)
+                       bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ, orig_gfp);
                if (ctx->rac)
-                       ctx->bio->bi_opf |= REQ_RAHEAD;
-               ctx->bio->bi_iter.bi_sector = sector;
-               ctx->bio->bi_end_io = iomap_read_end_io;
-               bio_add_folio_nofail(ctx->bio, folio, plen, poff);
+                       bio->bi_opf |= REQ_RAHEAD;
+               bio->bi_iter.bi_sector = sector;
+               bio->bi_end_io = iomap_read_end_io;
+               bio_add_folio_nofail(bio, folio, plen, poff);
+               ctx->read_ctx = bio;
        }
 }