struct folio *folio = ctx->cur_folio;
gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL);
gfp_t orig_gfp = gfp;
- struct bio *bio = ctx->read_ctx;
+ struct bio *bio;
- if (bio)
- submit_bio(bio);
+ /* Submit the existing range if there was one. */
+ if (ctx->read_ctx)
+ ctx->ops->submit_read(iter, ctx);
/* Same as readahead_gfp_mask: */
if (ctx->rac)
bio_add_folio_nofail(bio, folio, plen,
offset_in_folio(folio, iter->pos));
ctx->read_ctx = bio;
+ ctx->read_ctx_file_offset = iter->pos;
}
-static int iomap_bio_read_folio_range(const struct iomap_iter *iter,
+int iomap_bio_read_folio_range(const struct iomap_iter *iter,
struct iomap_read_folio_ctx *ctx, size_t plen)
{
struct folio *folio = ctx->cur_folio;
iomap_read_alloc_bio(iter, ctx, plen);
return 0;
}
+EXPORT_SYMBOL_GPL(iomap_bio_read_folio_range);
const struct iomap_read_ops iomap_bio_read_ops = {
- .read_folio_range = iomap_bio_read_folio_range,
- .submit_read = iomap_bio_submit_read,
+ .read_folio_range = iomap_bio_read_folio_range,
+ .submit_read = iomap_bio_submit_read,
};
EXPORT_SYMBOL_GPL(iomap_bio_read_ops);
struct folio *cur_folio;
struct readahead_control *rac;
void *read_ctx;
+ loff_t read_ctx_file_offset;
};
struct iomap_read_ops {
extern struct bio_set iomap_ioend_bioset;
#ifdef CONFIG_BLOCK
+int iomap_bio_read_folio_range(const struct iomap_iter *iter,
+ struct iomap_read_folio_ctx *ctx, size_t plen);
+
extern const struct iomap_read_ops iomap_bio_read_ops;
static inline void iomap_bio_read_folio(struct folio *folio,