* For a multi-segment *iter, this function only adds pages from the next
* non-empty segment of the iov iterator.
*/
-static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
+static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter,
+ unsigned len_align_mask)
{
iov_iter_extraction_t extraction_flags = 0;
unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
struct page **pages = (struct page **)bv;
ssize_t size;
unsigned int num_pages, i = 0;
- size_t offset, folio_offset, left, len;
+ size_t offset, folio_offset, left, len, trim;
int ret = 0;
/*
nr_pages = DIV_ROUND_UP(offset + size, PAGE_SIZE);
- if (bio->bi_bdev) {
- size_t trim = size & (bdev_logical_block_size(bio->bi_bdev) - 1);
+ trim = size & len_align_mask;
+ if (trim) {
iov_iter_revert(iter, trim);
size -= trim;
}
}
/**
- * bio_iov_iter_get_pages - add user or kernel pages to a bio
+ * bio_iov_iter_get_pages_aligned - add user or kernel pages to a bio
* @bio: bio to add pages to
* @iter: iov iterator describing the region to be added
+ * @len_align_mask: the mask to align each vector size to, 0 for any length
*
* This takes either an iterator pointing to user memory, or one pointing to
* kernel pages (BVEC iterator). If we're adding user pages, we pin them and
* MM encounters an error pinning the requested pages, it stops. Error
* is returned only if 0 pages could be pinned.
*/
-int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
+int bio_iov_iter_get_pages_aligned(struct bio *bio, struct iov_iter *iter,
+ unsigned len_align_mask)
{
int ret = 0;
if (iov_iter_extract_will_pin(iter))
bio_set_flag(bio, BIO_PAGE_PINNED);
do {
- ret = __bio_iov_iter_get_pages(bio, iter);
+ ret = __bio_iov_iter_get_pages(bio, iter, len_align_mask);
} while (!ret && iov_iter_count(iter) && !bio_full(bio, 0));
return bio->bi_vcnt ? 0 : ret;
}
-EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages);
+EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages_aligned);
static void submit_bio_wait_endio(struct bio *bio)
{
if (iocb->ki_flags & IOCB_ATOMIC)
bio.bi_opf |= REQ_ATOMIC;
- ret = bio_iov_iter_get_pages(&bio, iter);
+ ret = bio_iov_iter_get_bdev_pages(&bio, iter, bdev);
if (unlikely(ret))
goto out;
ret = bio.bi_iter.bi_size;
bio->bi_end_io = blkdev_bio_end_io;
bio->bi_ioprio = iocb->ki_ioprio;
- ret = bio_iov_iter_get_pages(bio, iter);
+ ret = bio_iov_iter_get_bdev_pages(bio, iter, bdev);
if (unlikely(ret)) {
bio->bi_status = BLK_STS_IOERR;
bio_endio(bio);
*/
bio_iov_bvec_set(bio, iter);
} else {
- ret = bio_iov_iter_get_pages(bio, iter);
+ ret = bio_iov_iter_get_bdev_pages(bio, iter, bdev);
if (unlikely(ret))
goto out_bio_put;
}
int bdev_rw_virt(struct block_device *bdev, sector_t sector, void *data,
size_t len, enum req_op op);
-int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter);
+int bio_iov_iter_get_pages_aligned(struct bio *bio, struct iov_iter *iter,
+ unsigned len_align_mask);
+
+static inline int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
+{
+ return bio_iov_iter_get_pages_aligned(bio, iter, 0);
+}
+
void bio_iov_bvec_set(struct bio *bio, const struct iov_iter *iter);
void __bio_release_pages(struct bio *bio, bool mark_dirty);
extern void bio_set_pages_dirty(struct bio *bio);
return bio_split_io_at(bio, lim, segs, max_bytes, lim->dma_alignment);
}
+static inline int bio_iov_iter_get_bdev_pages(struct bio *bio,
+ struct iov_iter *iter, struct block_device *bdev)
+{
+ return bio_iov_iter_get_pages_aligned(bio, iter,
+ bdev_logical_block_size(bdev) - 1);
+}
+
#define DEFINE_IO_COMP_BATCH(name) struct io_comp_batch name = { }
#endif /* _LINUX_BLKDEV_H */