]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
iomap: move buffered io bio logic into new file
authorChristoph Hellwig [1] <hch@lst.de>
Fri, 26 Sep 2025 00:26:05 +0000 (17:26 -0700)
committerChristian Brauner <brauner@kernel.org>
Wed, 5 Nov 2025 11:57:23 +0000 (12:57 +0100)
Move bio logic in the buffered io code into its own file and remove
CONFIG_BLOCK gating for iomap read/readahead.

[1] https://lore.kernel.org/linux-fsdevel/aMK2GuumUf93ep99@infradead.org/

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Joanne Koong <joannelkoong@gmail.com>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Signed-off-by: Christian Brauner <brauner@kernel.org>
fs/iomap/Makefile
fs/iomap/bio.c [new file with mode: 0644]
fs/iomap/buffered-io.c
fs/iomap/internal.h

index f7e1c8534c4641c2e66ed66a91b9e20935b004aa..a572b8808524aac943267127e1b6682111935c58 100644 (file)
@@ -14,5 +14,6 @@ iomap-y                               += trace.o \
 iomap-$(CONFIG_BLOCK)          += direct-io.o \
                                   ioend.o \
                                   fiemap.o \
-                                  seek.o
+                                  seek.o \
+                                  bio.o
 iomap-$(CONFIG_SWAP)           += swapfile.o
diff --git a/fs/iomap/bio.c b/fs/iomap/bio.c
new file mode 100644 (file)
index 0000000..fc045f2
--- /dev/null
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2010 Red Hat, Inc.
+ * Copyright (C) 2016-2023 Christoph Hellwig.
+ */
+#include <linux/iomap.h>
+#include <linux/pagemap.h>
+#include "internal.h"
+#include "trace.h"
+
+static void iomap_read_end_io(struct bio *bio)
+{
+       int error = blk_status_to_errno(bio->bi_status);
+       struct folio_iter fi;
+
+       bio_for_each_folio_all(fi, bio)
+               iomap_finish_folio_read(fi.folio, fi.offset, fi.length, error);
+       bio_put(bio);
+}
+
+static void iomap_bio_submit_read(struct iomap_read_folio_ctx *ctx)
+{
+       struct bio *bio = ctx->read_ctx;
+
+       if (bio)
+               submit_bio(bio);
+}
+
+static int iomap_bio_read_folio_range(const struct iomap_iter *iter,
+               struct iomap_read_folio_ctx *ctx, size_t plen)
+{
+       struct folio *folio = ctx->cur_folio;
+       const struct iomap *iomap = &iter->iomap;
+       loff_t pos = iter->pos;
+       size_t poff = offset_in_folio(folio, pos);
+       loff_t length = iomap_length(iter);
+       sector_t sector;
+       struct bio *bio = ctx->read_ctx;
+
+       sector = iomap_sector(iomap, pos);
+       if (!bio || bio_end_sector(bio) != sector ||
+           !bio_add_folio(bio, folio, plen, poff)) {
+               gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL);
+               gfp_t orig_gfp = gfp;
+               unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE);
+
+               if (bio)
+                       submit_bio(bio);
+
+               if (ctx->rac) /* same as readahead_gfp_mask */
+                       gfp |= __GFP_NORETRY | __GFP_NOWARN;
+               bio = bio_alloc(iomap->bdev, bio_max_segs(nr_vecs), REQ_OP_READ,
+                                    gfp);
+               /*
+                * If the bio_alloc fails, try it again for a single page to
+                * avoid having to deal with partial page reads.  This emulates
+                * what do_mpage_read_folio does.
+                */
+               if (!bio)
+                       bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ, orig_gfp);
+               if (ctx->rac)
+                       bio->bi_opf |= REQ_RAHEAD;
+               bio->bi_iter.bi_sector = sector;
+               bio->bi_end_io = iomap_read_end_io;
+               bio_add_folio_nofail(bio, folio, plen, poff);
+               ctx->read_ctx = bio;
+       }
+       return 0;
+}
+
+const struct iomap_read_ops iomap_bio_read_ops = {
+       .read_folio_range = iomap_bio_read_folio_range,
+       .submit_read = iomap_bio_submit_read,
+};
+EXPORT_SYMBOL_GPL(iomap_bio_read_ops);
+
+int iomap_bio_read_folio_range_sync(const struct iomap_iter *iter,
+               struct folio *folio, loff_t pos, size_t len)
+{
+       const struct iomap *srcmap = iomap_iter_srcmap(iter);
+       struct bio_vec bvec;
+       struct bio bio;
+
+       bio_init(&bio, srcmap->bdev, &bvec, 1, REQ_OP_READ);
+       bio.bi_iter.bi_sector = iomap_sector(srcmap, pos);
+       bio_add_folio_nofail(&bio, folio, len, offset_in_folio(folio, pos));
+       return submit_bio_wait(&bio);
+}
index d7100a5f953a5ddf4d825eda17d5d60e37c031a0..0d88a4f3c79170fea92abdbce6b5642ba9ea3343 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/writeback.h>
 #include <linux/swap.h>
 #include <linux/migrate.h>
+#include "internal.h"
 #include "trace.h"
 
 #include "../internal.h"
@@ -327,7 +328,6 @@ static int iomap_read_inline_data(const struct iomap_iter *iter,
        return 0;
 }
 
-#ifdef CONFIG_BLOCK
 void iomap_finish_folio_read(struct folio *folio, size_t off, size_t len,
                int error)
 {
@@ -351,71 +351,6 @@ void iomap_finish_folio_read(struct folio *folio, size_t off, size_t len,
 }
 EXPORT_SYMBOL_GPL(iomap_finish_folio_read);
 
-static void iomap_read_end_io(struct bio *bio)
-{
-       int error = blk_status_to_errno(bio->bi_status);
-       struct folio_iter fi;
-
-       bio_for_each_folio_all(fi, bio)
-               iomap_finish_folio_read(fi.folio, fi.offset, fi.length, error);
-       bio_put(bio);
-}
-
-static void iomap_bio_submit_read(struct iomap_read_folio_ctx *ctx)
-{
-       struct bio *bio = ctx->read_ctx;
-
-       if (bio)
-               submit_bio(bio);
-}
-
-static int iomap_bio_read_folio_range(const struct iomap_iter *iter,
-               struct iomap_read_folio_ctx *ctx, size_t plen)
-{
-       struct folio *folio = ctx->cur_folio;
-       const struct iomap *iomap = &iter->iomap;
-       loff_t pos = iter->pos;
-       size_t poff = offset_in_folio(folio, pos);
-       loff_t length = iomap_length(iter);
-       sector_t sector;
-       struct bio *bio = ctx->read_ctx;
-
-       sector = iomap_sector(iomap, pos);
-       if (!bio || bio_end_sector(bio) != sector ||
-           !bio_add_folio(bio, folio, plen, poff)) {
-               gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL);
-               gfp_t orig_gfp = gfp;
-               unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE);
-
-               iomap_bio_submit_read(ctx);
-
-               if (ctx->rac) /* same as readahead_gfp_mask */
-                       gfp |= __GFP_NORETRY | __GFP_NOWARN;
-               bio = bio_alloc(iomap->bdev, bio_max_segs(nr_vecs), REQ_OP_READ,
-                                    gfp);
-               /*
-                * If the bio_alloc fails, try it again for a single page to
-                * avoid having to deal with partial page reads.  This emulates
-                * what do_mpage_read_folio does.
-                */
-               if (!bio)
-                       bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ, orig_gfp);
-               if (ctx->rac)
-                       bio->bi_opf |= REQ_RAHEAD;
-               bio->bi_iter.bi_sector = sector;
-               bio->bi_end_io = iomap_read_end_io;
-               bio_add_folio_nofail(bio, folio, plen, poff);
-               ctx->read_ctx = bio;
-       }
-       return 0;
-}
-
-const struct iomap_read_ops iomap_bio_read_ops = {
-       .read_folio_range       = iomap_bio_read_folio_range,
-       .submit_read            = iomap_bio_submit_read,
-};
-EXPORT_SYMBOL_GPL(iomap_bio_read_ops);
-
 static void iomap_read_init(struct folio *folio)
 {
        struct iomap_folio_state *ifs = folio->private;
@@ -656,27 +591,6 @@ void iomap_readahead(const struct iomap_ops *ops,
 }
 EXPORT_SYMBOL_GPL(iomap_readahead);
 
-static int iomap_bio_read_folio_range_sync(const struct iomap_iter *iter,
-               struct folio *folio, loff_t pos, size_t len)
-{
-       const struct iomap *srcmap = iomap_iter_srcmap(iter);
-       struct bio_vec bvec;
-       struct bio bio;
-
-       bio_init(&bio, srcmap->bdev, &bvec, 1, REQ_OP_READ);
-       bio.bi_iter.bi_sector = iomap_sector(srcmap, pos);
-       bio_add_folio_nofail(&bio, folio, len, offset_in_folio(folio, pos));
-       return submit_bio_wait(&bio);
-}
-#else
-static int iomap_bio_read_folio_range_sync(const struct iomap_iter *iter,
-               struct folio *folio, loff_t pos, size_t len)
-{
-       WARN_ON_ONCE(1);
-       return -EIO;
-}
-#endif /* CONFIG_BLOCK */
-
 /*
  * iomap_is_partially_uptodate checks whether blocks within a folio are
  * uptodate or not.
index d05cb3aed96e790dc1f8edefb2b9c88c79c4dee2..3a4e4aad2bd12865da97fa565ae2b7b9de5598ee 100644 (file)
@@ -6,4 +6,16 @@
 
 u32 iomap_finish_ioend_direct(struct iomap_ioend *ioend);
 
+#ifdef CONFIG_BLOCK
+int iomap_bio_read_folio_range_sync(const struct iomap_iter *iter,
+               struct folio *folio, loff_t pos, size_t len);
+#else
+static inline int iomap_bio_read_folio_range_sync(const struct iomap_iter *iter,
+               struct folio *folio, loff_t pos, size_t len)
+{
+       WARN_ON_ONCE(1);
+       return -EIO;
+}
+#endif /* CONFIG_BLOCK */
+
 #endif /* _IOMAP_INTERNAL_H */