}
/**
- * bio_iov_iter_get_pages_aligned - add user or kernel pages to a bio
+ * bio_iov_iter_get_pages - add user or kernel pages to a bio
* @bio: bio to add pages to
* @iter: iov iterator describing the region to be added
* @len_align_mask: the mask to align the total size to, 0 for any length
* MM encounters an error pinning the requested pages, it stops. Error
* is returned only if 0 pages could be pinned.
*/
-int bio_iov_iter_get_pages_aligned(struct bio *bio, struct iov_iter *iter,
+int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter,
unsigned len_align_mask)
{
int ret = 0;
return bio_iov_iter_align_down(bio, iter, len_align_mask);
return ret;
}
-EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages_aligned);
static void submit_bio_wait_endio(struct bio *bio)
{
* No alignment requirements on our part to support arbitrary
* passthrough commands.
*/
- ret = bio_iov_iter_get_pages_aligned(bio, iter, 0);
+ ret = bio_iov_iter_get_pages(bio, iter, 0);
if (ret)
goto out_put;
ret = blk_rq_append_bio(rq, bio);
int bdev_rw_virt(struct block_device *bdev, sector_t sector, void *data,
size_t len, enum req_op op);
-int bio_iov_iter_get_pages_aligned(struct bio *bio, struct iov_iter *iter,
+int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter,
unsigned len_align_mask);
void bio_iov_bvec_set(struct bio *bio, const struct iov_iter *iter);
static inline int bio_iov_iter_get_bdev_pages(struct bio *bio,
struct iov_iter *iter, struct block_device *bdev)
{
- return bio_iov_iter_get_pages_aligned(bio, iter,
+ return bio_iov_iter_get_pages(bio, iter,
bdev_logical_block_size(bdev) - 1);
}