struct readahead_control *rac;
};
+static void iomap_bio_submit_read(struct iomap_readpage_ctx *ctx)
+{
+ struct bio *bio = ctx->bio;
+
+ if (bio)
+ submit_bio(bio);
+}
+
static void iomap_bio_read_folio_range(const struct iomap_iter *iter,
struct iomap_readpage_ctx *ctx, loff_t pos, size_t plen)
{
gfp_t orig_gfp = gfp;
unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE);
- if (ctx->bio)
- submit_bio(ctx->bio);
+ iomap_bio_submit_read(ctx);
if (ctx->rac) /* same as readahead_gfp_mask */
gfp |= __GFP_NORETRY | __GFP_NOWARN;
while ((ret = iomap_iter(&iter, ops)) > 0)
iter.status = iomap_read_folio_iter(&iter, &ctx);
- if (ctx.bio) {
- submit_bio(ctx.bio);
- WARN_ON_ONCE(!ctx.cur_folio_in_bio);
- } else {
- WARN_ON_ONCE(ctx.cur_folio_in_bio);
+ iomap_bio_submit_read(&ctx);
+
+ if (!ctx.cur_folio_in_bio)
folio_unlock(folio);
- }
/*
* Just like mpage_readahead and block_read_full_folio, we always
while (iomap_iter(&iter, ops) > 0)
iter.status = iomap_readahead_iter(&iter, &ctx);
- if (ctx.bio)
- submit_bio(ctx.bio);
- if (ctx.cur_folio) {
- if (!ctx.cur_folio_in_bio)
- folio_unlock(ctx.cur_folio);
- }
+ iomap_bio_submit_read(&ctx);
+
+ if (ctx.cur_folio && !ctx.cur_folio_in_bio)
+ folio_unlock(ctx.cur_folio);
}
EXPORT_SYMBOL_GPL(iomap_readahead);