]>
git.ipfire.org Git - people/ms/linux.git/blob - block/blk-map.c
1 // SPDX-License-Identifier: GPL-2.0
3 * Functions related to mapping data to requests
5 #include <linux/kernel.h>
6 #include <linux/sched/task_stack.h>
7 #include <linux/module.h>
9 #include <linux/blkdev.h>
10 #include <linux/uio.h>
15 bool is_our_pages
: 1;
16 bool is_null_mapped
: 1;
21 static struct bio_map_data
*bio_alloc_map_data(struct iov_iter
*data
,
24 struct bio_map_data
*bmd
;
26 if (data
->nr_segs
> UIO_MAXIOV
)
29 bmd
= kmalloc(struct_size(bmd
, iov
, data
->nr_segs
), gfp_mask
);
32 memcpy(bmd
->iov
, data
->iov
, sizeof(struct iovec
) * data
->nr_segs
);
34 bmd
->iter
.iov
= bmd
->iov
;
39 * bio_copy_from_iter - copy all pages from iov_iter to bio
40 * @bio: The &struct bio which describes the I/O as destination
41 * @iter: iov_iter as source
43 * Copy all pages from iov_iter to bio.
44 * Returns 0 on success, or error on failure.
46 static int bio_copy_from_iter(struct bio
*bio
, struct iov_iter
*iter
)
49 struct bvec_iter_all iter_all
;
51 bio_for_each_segment_all(bvec
, bio
, iter_all
) {
54 ret
= copy_page_from_iter(bvec
->bv_page
,
59 if (!iov_iter_count(iter
))
62 if (ret
< bvec
->bv_len
)
70 * bio_copy_to_iter - copy all pages from bio to iov_iter
71 * @bio: The &struct bio which describes the I/O as source
72 * @iter: iov_iter as destination
74 * Copy all pages from bio to iov_iter.
75 * Returns 0 on success, or error on failure.
77 static int bio_copy_to_iter(struct bio
*bio
, struct iov_iter iter
)
80 struct bvec_iter_all iter_all
;
82 bio_for_each_segment_all(bvec
, bio
, iter_all
) {
85 ret
= copy_page_to_iter(bvec
->bv_page
,
90 if (!iov_iter_count(&iter
))
93 if (ret
< bvec
->bv_len
)
101 * bio_uncopy_user - finish previously mapped bio
102 * @bio: bio being terminated
104 * Free pages allocated from bio_copy_user_iov() and write back data
105 * to user space in case of a read.
107 static int bio_uncopy_user(struct bio
*bio
)
109 struct bio_map_data
*bmd
= bio
->bi_private
;
112 if (!bmd
->is_null_mapped
) {
114 * if we're in a workqueue, the request is orphaned, so
115 * don't copy into a random user address space, just free
116 * and return -EINTR so user space doesn't expect any data.
120 else if (bio_data_dir(bio
) == READ
)
121 ret
= bio_copy_to_iter(bio
, bmd
->iter
);
122 if (bmd
->is_our_pages
)
129 static int bio_copy_user_iov(struct request
*rq
, struct rq_map_data
*map_data
,
130 struct iov_iter
*iter
, gfp_t gfp_mask
)
132 struct bio_map_data
*bmd
;
137 unsigned int len
= iter
->count
;
138 unsigned int offset
= map_data
? offset_in_page(map_data
->offset
) : 0;
140 bmd
= bio_alloc_map_data(iter
, gfp_mask
);
145 * We need to do a deep copy of the iov_iter including the iovecs.
146 * The caller provided iov might point to an on-stack or otherwise
149 bmd
->is_our_pages
= !map_data
;
150 bmd
->is_null_mapped
= (map_data
&& map_data
->null_mapped
);
152 nr_pages
= bio_max_segs(DIV_ROUND_UP(offset
+ len
, PAGE_SIZE
));
155 bio
= bio_kmalloc(nr_pages
, gfp_mask
);
158 bio_init(bio
, NULL
, bio
->bi_inline_vecs
, nr_pages
, req_op(rq
));
161 nr_pages
= 1 << map_data
->page_order
;
162 i
= map_data
->offset
/ PAGE_SIZE
;
165 unsigned int bytes
= PAGE_SIZE
;
173 if (i
== map_data
->nr_entries
* nr_pages
) {
178 page
= map_data
->pages
[i
/ nr_pages
];
179 page
+= (i
% nr_pages
);
183 page
= alloc_page(GFP_NOIO
| gfp_mask
);
190 if (bio_add_pc_page(rq
->q
, bio
, page
, bytes
, offset
) < bytes
) {
201 map_data
->offset
+= bio
->bi_iter
.bi_size
;
206 if ((iov_iter_rw(iter
) == WRITE
&&
207 (!map_data
|| !map_data
->null_mapped
)) ||
208 (map_data
&& map_data
->from_user
)) {
209 ret
= bio_copy_from_iter(bio
, iter
);
213 if (bmd
->is_our_pages
)
215 iov_iter_advance(iter
, bio
->bi_iter
.bi_size
);
218 bio
->bi_private
= bmd
;
220 ret
= blk_rq_append_bio(rq
, bio
);
234 static int bio_map_user_iov(struct request
*rq
, struct iov_iter
*iter
,
237 unsigned int max_sectors
= queue_max_hw_sectors(rq
->q
);
238 unsigned int nr_vecs
= iov_iter_npages(iter
, BIO_MAX_VECS
);
243 if (!iov_iter_count(iter
))
246 bio
= bio_kmalloc(nr_vecs
, gfp_mask
);
249 bio_init(bio
, NULL
, bio
->bi_inline_vecs
, nr_vecs
, req_op(rq
));
251 while (iov_iter_count(iter
)) {
254 size_t offs
, added
= 0;
257 bytes
= iov_iter_get_pages_alloc2(iter
, &pages
, LONG_MAX
, &offs
);
258 if (unlikely(bytes
<= 0)) {
259 ret
= bytes
? bytes
: -EFAULT
;
263 npages
= DIV_ROUND_UP(offs
+ bytes
, PAGE_SIZE
);
265 if (unlikely(offs
& queue_dma_alignment(rq
->q
)))
268 for (j
= 0; j
< npages
; j
++) {
269 struct page
*page
= pages
[j
];
270 unsigned int n
= PAGE_SIZE
- offs
;
271 bool same_page
= false;
276 if (!bio_add_hw_page(rq
->q
, bio
, page
, n
, offs
,
277 max_sectors
, &same_page
)) {
289 * release the pages we didn't map into the bio, if any
292 put_page(pages
[j
++]);
294 /* couldn't stuff something into bio? */
296 iov_iter_revert(iter
, bytes
);
301 ret
= blk_rq_append_bio(rq
, bio
);
307 bio_release_pages(bio
, false);
313 static void bio_invalidate_vmalloc_pages(struct bio
*bio
)
315 #ifdef ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE
316 if (bio
->bi_private
&& !op_is_write(bio_op(bio
))) {
317 unsigned long i
, len
= 0;
319 for (i
= 0; i
< bio
->bi_vcnt
; i
++)
320 len
+= bio
->bi_io_vec
[i
].bv_len
;
321 invalidate_kernel_vmap_range(bio
->bi_private
, len
);
326 static void bio_map_kern_endio(struct bio
*bio
)
328 bio_invalidate_vmalloc_pages(bio
);
334 * bio_map_kern - map kernel address into bio
335 * @q: the struct request_queue for the bio
336 * @data: pointer to buffer to map
337 * @len: length in bytes
338 * @gfp_mask: allocation flags for bio allocation
340 * Map the kernel address into a bio suitable for io to a block
341 * device. Returns an error pointer in case of error.
343 static struct bio
*bio_map_kern(struct request_queue
*q
, void *data
,
344 unsigned int len
, gfp_t gfp_mask
)
346 unsigned long kaddr
= (unsigned long)data
;
347 unsigned long end
= (kaddr
+ len
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
348 unsigned long start
= kaddr
>> PAGE_SHIFT
;
349 const int nr_pages
= end
- start
;
350 bool is_vmalloc
= is_vmalloc_addr(data
);
355 bio
= bio_kmalloc(nr_pages
, gfp_mask
);
357 return ERR_PTR(-ENOMEM
);
358 bio_init(bio
, NULL
, bio
->bi_inline_vecs
, nr_pages
, 0);
361 flush_kernel_vmap_range(data
, len
);
362 bio
->bi_private
= data
;
365 offset
= offset_in_page(kaddr
);
366 for (i
= 0; i
< nr_pages
; i
++) {
367 unsigned int bytes
= PAGE_SIZE
- offset
;
376 page
= virt_to_page(data
);
378 page
= vmalloc_to_page(data
);
379 if (bio_add_pc_page(q
, bio
, page
, bytes
,
381 /* we don't support partial mappings */
384 return ERR_PTR(-EINVAL
);
392 bio
->bi_end_io
= bio_map_kern_endio
;
396 static void bio_copy_kern_endio(struct bio
*bio
)
403 static void bio_copy_kern_endio_read(struct bio
*bio
)
405 char *p
= bio
->bi_private
;
406 struct bio_vec
*bvec
;
407 struct bvec_iter_all iter_all
;
409 bio_for_each_segment_all(bvec
, bio
, iter_all
) {
410 memcpy_from_bvec(p
, bvec
);
414 bio_copy_kern_endio(bio
);
418 * bio_copy_kern - copy kernel address into bio
419 * @q: the struct request_queue for the bio
420 * @data: pointer to buffer to copy
421 * @len: length in bytes
422 * @gfp_mask: allocation flags for bio and page allocation
423 * @reading: data direction is READ
425 * copy the kernel address into a bio suitable for io to a block
426 * device. Returns an error pointer in case of error.
428 static struct bio
*bio_copy_kern(struct request_queue
*q
, void *data
,
429 unsigned int len
, gfp_t gfp_mask
, int reading
)
431 unsigned long kaddr
= (unsigned long)data
;
432 unsigned long end
= (kaddr
+ len
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
433 unsigned long start
= kaddr
>> PAGE_SHIFT
;
442 return ERR_PTR(-EINVAL
);
444 nr_pages
= end
- start
;
445 bio
= bio_kmalloc(nr_pages
, gfp_mask
);
447 return ERR_PTR(-ENOMEM
);
448 bio_init(bio
, NULL
, bio
->bi_inline_vecs
, nr_pages
, 0);
452 unsigned int bytes
= PAGE_SIZE
;
457 page
= alloc_page(GFP_NOIO
| __GFP_ZERO
| gfp_mask
);
462 memcpy(page_address(page
), p
, bytes
);
464 if (bio_add_pc_page(q
, bio
, page
, bytes
, 0) < bytes
)
472 bio
->bi_end_io
= bio_copy_kern_endio_read
;
473 bio
->bi_private
= data
;
475 bio
->bi_end_io
= bio_copy_kern_endio
;
484 return ERR_PTR(-ENOMEM
);
488 * Append a bio to a passthrough request. Only works if the bio can be merged
489 * into the request based on the driver constraints.
491 int blk_rq_append_bio(struct request
*rq
, struct bio
*bio
)
493 struct bvec_iter iter
;
495 unsigned int nr_segs
= 0;
497 bio_for_each_bvec(bv
, bio
, iter
)
501 blk_rq_bio_prep(rq
, bio
, nr_segs
);
503 if (!ll_back_merge_fn(rq
, bio
, nr_segs
))
505 rq
->biotail
->bi_next
= bio
;
507 rq
->__data_len
+= (bio
)->bi_iter
.bi_size
;
508 bio_crypt_free_ctx(bio
);
513 EXPORT_SYMBOL(blk_rq_append_bio
);
516 * blk_rq_map_user_iov - map user data to a request, for passthrough requests
517 * @q: request queue where request should be inserted
518 * @rq: request to map data to
519 * @map_data: pointer to the rq_map_data holding pages (if necessary)
520 * @iter: iovec iterator
521 * @gfp_mask: memory allocation flags
524 * Data will be mapped directly for zero copy I/O, if possible. Otherwise
525 * a kernel bounce buffer is used.
527 * A matching blk_rq_unmap_user() must be issued at the end of I/O, while
528 * still in process context.
530 int blk_rq_map_user_iov(struct request_queue
*q
, struct request
*rq
,
531 struct rq_map_data
*map_data
,
532 const struct iov_iter
*iter
, gfp_t gfp_mask
)
535 unsigned long align
= q
->dma_pad_mask
| queue_dma_alignment(q
);
536 struct bio
*bio
= NULL
;
540 if (!iter_is_iovec(iter
))
545 else if (blk_queue_may_bounce(q
))
547 else if (iov_iter_alignment(iter
) & align
)
549 else if (queue_virt_boundary(q
))
550 copy
= queue_virt_boundary(q
) & iov_iter_gap_alignment(iter
);
555 ret
= bio_copy_user_iov(rq
, map_data
, &i
, gfp_mask
);
557 ret
= bio_map_user_iov(rq
, &i
, gfp_mask
);
562 } while (iov_iter_count(&i
));
567 blk_rq_unmap_user(bio
);
572 EXPORT_SYMBOL(blk_rq_map_user_iov
);
574 int blk_rq_map_user(struct request_queue
*q
, struct request
*rq
,
575 struct rq_map_data
*map_data
, void __user
*ubuf
,
576 unsigned long len
, gfp_t gfp_mask
)
580 int ret
= import_single_range(rq_data_dir(rq
), ubuf
, len
, &iov
, &i
);
582 if (unlikely(ret
< 0))
585 return blk_rq_map_user_iov(q
, rq
, map_data
, &i
, gfp_mask
);
587 EXPORT_SYMBOL(blk_rq_map_user
);
590 * blk_rq_unmap_user - unmap a request with user data
591 * @bio: start of bio list
594 * Unmap a rq previously mapped by blk_rq_map_user(). The caller must
595 * supply the original rq->bio from the blk_rq_map_user() return, since
596 * the I/O completion may have changed rq->bio.
598 int blk_rq_unmap_user(struct bio
*bio
)
600 struct bio
*next_bio
;
604 if (bio
->bi_private
) {
605 ret2
= bio_uncopy_user(bio
);
609 bio_release_pages(bio
, bio_data_dir(bio
) == READ
);
614 bio_uninit(next_bio
);
620 EXPORT_SYMBOL(blk_rq_unmap_user
);
623 * blk_rq_map_kern - map kernel data to a request, for passthrough requests
624 * @q: request queue where request should be inserted
625 * @rq: request to fill
626 * @kbuf: the kernel buffer
627 * @len: length of user data
628 * @gfp_mask: memory allocation flags
631 * Data will be mapped directly if possible. Otherwise a bounce
632 * buffer is used. Can be called multiple times to append multiple
635 int blk_rq_map_kern(struct request_queue
*q
, struct request
*rq
, void *kbuf
,
636 unsigned int len
, gfp_t gfp_mask
)
638 int reading
= rq_data_dir(rq
) == READ
;
639 unsigned long addr
= (unsigned long) kbuf
;
643 if (len
> (queue_max_hw_sectors(q
) << 9))
648 if (!blk_rq_aligned(q
, addr
, len
) || object_is_on_stack(kbuf
) ||
649 blk_queue_may_bounce(q
))
650 bio
= bio_copy_kern(q
, kbuf
, len
, gfp_mask
, reading
);
652 bio
= bio_map_kern(q
, kbuf
, len
, gfp_mask
);
657 bio
->bi_opf
&= ~REQ_OP_MASK
;
658 bio
->bi_opf
|= req_op(rq
);
660 ret
= blk_rq_append_bio(rq
, bio
);
667 EXPORT_SYMBOL(blk_rq_map_kern
);