1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <crypto/hash.h>
3 #include <linux/export.h>
4 #include <linux/bvec.h>
5 #include <linux/fault-inject-usercopy.h>
7 #include <linux/pagemap.h>
8 #include <linux/highmem.h>
9 #include <linux/slab.h>
10 #include <linux/vmalloc.h>
11 #include <linux/splice.h>
12 #include <linux/compat.h>
13 #include <net/checksum.h>
14 #include <linux/scatterlist.h>
15 #include <linux/instrumented.h>
17 /* covers ubuf and kbuf alike */
18 #define iterate_buf(i, n, base, len, off, __p, STEP) { \
19 size_t __maybe_unused off = 0; \
21 base = __p + i->iov_offset; \
23 i->iov_offset += len; \
27 /* covers iovec and kvec alike */
28 #define iterate_iovec(i, n, base, len, off, __p, STEP) { \
30 size_t skip = i->iov_offset; \
32 len = min(n, __p->iov_len - skip); \
34 base = __p->iov_base + skip; \
39 if (skip < __p->iov_len) \
45 i->iov_offset = skip; \
49 #define iterate_bvec(i, n, base, len, off, p, STEP) { \
51 unsigned skip = i->iov_offset; \
53 unsigned offset = p->bv_offset + skip; \
55 void *kaddr = kmap_local_page(p->bv_page + \
56 offset / PAGE_SIZE); \
57 base = kaddr + offset % PAGE_SIZE; \
58 len = min(min(n, (size_t)(p->bv_len - skip)), \
59 (size_t)(PAGE_SIZE - offset % PAGE_SIZE)); \
61 kunmap_local(kaddr); \
65 if (skip == p->bv_len) { \
73 i->iov_offset = skip; \
77 #define iterate_xarray(i, n, base, len, __off, STEP) { \
80 struct folio *folio; \
81 loff_t start = i->xarray_start + i->iov_offset; \
82 pgoff_t index = start / PAGE_SIZE; \
83 XA_STATE(xas, i->xarray, index); \
85 len = PAGE_SIZE - offset_in_page(start); \
87 xas_for_each(&xas, folio, ULONG_MAX) { \
90 if (xas_retry(&xas, folio)) \
92 if (WARN_ON(xa_is_value(folio))) \
94 if (WARN_ON(folio_test_hugetlb(folio))) \
96 offset = offset_in_folio(folio, start + __off); \
97 while (offset < folio_size(folio)) { \
98 base = kmap_local_folio(folio, offset); \
101 kunmap_local(base); \
105 if (left || n == 0) \
113 i->iov_offset += __off; \
117 #define __iterate_and_advance(i, n, base, len, off, I, K) { \
118 if (unlikely(i->count < n)) \
121 if (likely(iter_is_ubuf(i))) { \
124 iterate_buf(i, n, base, len, off, \
126 } else if (likely(iter_is_iovec(i))) { \
127 const struct iovec *iov = iter_iov(i); \
130 iterate_iovec(i, n, base, len, off, \
132 i->nr_segs -= iov - iter_iov(i); \
134 } else if (iov_iter_is_bvec(i)) { \
135 const struct bio_vec *bvec = i->bvec; \
138 iterate_bvec(i, n, base, len, off, \
140 i->nr_segs -= bvec - i->bvec; \
142 } else if (iov_iter_is_kvec(i)) { \
143 const struct kvec *kvec = i->kvec; \
146 iterate_iovec(i, n, base, len, off, \
148 i->nr_segs -= kvec - i->kvec; \
150 } else if (iov_iter_is_xarray(i)) { \
153 iterate_xarray(i, n, base, len, off, \
159 #define iterate_and_advance(i, n, base, len, off, I, K) \
160 __iterate_and_advance(i, n, base, len, off, I, ((void)(K),0))
162 static int copyout(void __user
*to
, const void *from
, size_t n
)
164 if (should_fail_usercopy())
166 if (access_ok(to
, n
)) {
167 instrument_copy_to_user(to
, from
, n
);
168 n
= raw_copy_to_user(to
, from
, n
);
173 static int copyout_nofault(void __user
*to
, const void *from
, size_t n
)
177 if (should_fail_usercopy())
180 res
= copy_to_user_nofault(to
, from
, n
);
182 return res
< 0 ? n
: res
;
185 static int copyin(void *to
, const void __user
*from
, size_t n
)
189 if (should_fail_usercopy())
191 if (access_ok(from
, n
)) {
192 instrument_copy_from_user_before(to
, from
, n
);
193 res
= raw_copy_from_user(to
, from
, n
);
194 instrument_copy_from_user_after(to
, from
, n
, res
);
200 * fault_in_iov_iter_readable - fault in iov iterator for reading
202 * @size: maximum length
204 * Fault in one or more iovecs of the given iov_iter, to a maximum length of
205 * @size. For each iovec, fault in each page that constitutes the iovec.
207 * Returns the number of bytes not faulted in (like copy_to_user() and
210 * Always returns 0 for non-userspace iterators.
212 size_t fault_in_iov_iter_readable(const struct iov_iter
*i
, size_t size
)
214 if (iter_is_ubuf(i
)) {
215 size_t n
= min(size
, iov_iter_count(i
));
216 n
-= fault_in_readable(i
->ubuf
+ i
->iov_offset
, n
);
218 } else if (iter_is_iovec(i
)) {
219 size_t count
= min(size
, iov_iter_count(i
));
220 const struct iovec
*p
;
224 for (p
= iter_iov(i
), skip
= i
->iov_offset
; count
; p
++, skip
= 0) {
225 size_t len
= min(count
, p
->iov_len
- skip
);
230 ret
= fault_in_readable(p
->iov_base
+ skip
, len
);
239 EXPORT_SYMBOL(fault_in_iov_iter_readable
);
242 * fault_in_iov_iter_writeable - fault in iov iterator for writing
244 * @size: maximum length
246 * Faults in the iterator using get_user_pages(), i.e., without triggering
247 * hardware page faults. This is primarily useful when we already know that
248 * some or all of the pages in @i aren't in memory.
250 * Returns the number of bytes not faulted in, like copy_to_user() and
253 * Always returns 0 for non-user-space iterators.
255 size_t fault_in_iov_iter_writeable(const struct iov_iter
*i
, size_t size
)
257 if (iter_is_ubuf(i
)) {
258 size_t n
= min(size
, iov_iter_count(i
));
259 n
-= fault_in_safe_writeable(i
->ubuf
+ i
->iov_offset
, n
);
261 } else if (iter_is_iovec(i
)) {
262 size_t count
= min(size
, iov_iter_count(i
));
263 const struct iovec
*p
;
267 for (p
= iter_iov(i
), skip
= i
->iov_offset
; count
; p
++, skip
= 0) {
268 size_t len
= min(count
, p
->iov_len
- skip
);
273 ret
= fault_in_safe_writeable(p
->iov_base
+ skip
, len
);
282 EXPORT_SYMBOL(fault_in_iov_iter_writeable
);
284 void iov_iter_init(struct iov_iter
*i
, unsigned int direction
,
285 const struct iovec
*iov
, unsigned long nr_segs
,
288 WARN_ON(direction
& ~(READ
| WRITE
));
289 *i
= (struct iov_iter
) {
290 .iter_type
= ITER_IOVEC
,
294 .data_source
= direction
,
301 EXPORT_SYMBOL(iov_iter_init
);
303 static __wsum
csum_and_memcpy(void *to
, const void *from
, size_t len
,
304 __wsum sum
, size_t off
)
306 __wsum next
= csum_partial_copy_nocheck(from
, to
, len
);
307 return csum_block_add(sum
, next
, off
);
310 size_t _copy_to_iter(const void *addr
, size_t bytes
, struct iov_iter
*i
)
312 if (WARN_ON_ONCE(i
->data_source
))
314 if (user_backed_iter(i
))
316 iterate_and_advance(i
, bytes
, base
, len
, off
,
317 copyout(base
, addr
+ off
, len
),
318 memcpy(base
, addr
+ off
, len
)
323 EXPORT_SYMBOL(_copy_to_iter
);
325 #ifdef CONFIG_ARCH_HAS_COPY_MC
326 static int copyout_mc(void __user
*to
, const void *from
, size_t n
)
328 if (access_ok(to
, n
)) {
329 instrument_copy_to_user(to
, from
, n
);
330 n
= copy_mc_to_user((__force
void *) to
, from
, n
);
336 * _copy_mc_to_iter - copy to iter with source memory error exception handling
337 * @addr: source kernel address
338 * @bytes: total transfer length
339 * @i: destination iterator
341 * The pmem driver deploys this for the dax operation
342 * (dax_copy_to_iter()) for dax reads (bypass page-cache and the
343 * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes
344 * successfully copied.
346 * The main differences between this and typical _copy_to_iter().
348 * * Typical tail/residue handling after a fault retries the copy
349 * byte-by-byte until the fault happens again. Re-triggering machine
350 * checks is potentially fatal so the implementation uses source
351 * alignment and poison alignment assumptions to avoid re-triggering
352 * hardware exceptions.
354 * * ITER_KVEC and ITER_BVEC can return short copies. Compare to
355 * copy_to_iter() where only ITER_IOVEC attempts might return a short copy.
357 * Return: number of bytes copied (may be %0)
359 size_t _copy_mc_to_iter(const void *addr
, size_t bytes
, struct iov_iter
*i
)
361 if (WARN_ON_ONCE(i
->data_source
))
363 if (user_backed_iter(i
))
365 __iterate_and_advance(i
, bytes
, base
, len
, off
,
366 copyout_mc(base
, addr
+ off
, len
),
367 copy_mc_to_kernel(base
, addr
+ off
, len
)
372 EXPORT_SYMBOL_GPL(_copy_mc_to_iter
);
373 #endif /* CONFIG_ARCH_HAS_COPY_MC */
375 static void *memcpy_from_iter(struct iov_iter
*i
, void *to
, const void *from
,
378 if (iov_iter_is_copy_mc(i
))
379 return (void *)copy_mc_to_kernel(to
, from
, size
);
380 return memcpy(to
, from
, size
);
383 size_t _copy_from_iter(void *addr
, size_t bytes
, struct iov_iter
*i
)
385 if (WARN_ON_ONCE(!i
->data_source
))
388 if (user_backed_iter(i
))
390 iterate_and_advance(i
, bytes
, base
, len
, off
,
391 copyin(addr
+ off
, base
, len
),
392 memcpy_from_iter(i
, addr
+ off
, base
, len
)
397 EXPORT_SYMBOL(_copy_from_iter
);
399 size_t _copy_from_iter_nocache(void *addr
, size_t bytes
, struct iov_iter
*i
)
401 if (WARN_ON_ONCE(!i
->data_source
))
404 iterate_and_advance(i
, bytes
, base
, len
, off
,
405 __copy_from_user_inatomic_nocache(addr
+ off
, base
, len
),
406 memcpy(addr
+ off
, base
, len
)
411 EXPORT_SYMBOL(_copy_from_iter_nocache
);
413 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
415 * _copy_from_iter_flushcache - write destination through cpu cache
416 * @addr: destination kernel address
417 * @bytes: total transfer length
418 * @i: source iterator
420 * The pmem driver arranges for filesystem-dax to use this facility via
421 * dax_copy_from_iter() for ensuring that writes to persistent memory
422 * are flushed through the CPU cache. It is differentiated from
423 * _copy_from_iter_nocache() in that guarantees all data is flushed for
424 * all iterator types. The _copy_from_iter_nocache() only attempts to
425 * bypass the cache for the ITER_IOVEC case, and on some archs may use
426 * instructions that strand dirty-data in the cache.
428 * Return: number of bytes copied (may be %0)
430 size_t _copy_from_iter_flushcache(void *addr
, size_t bytes
, struct iov_iter
*i
)
432 if (WARN_ON_ONCE(!i
->data_source
))
435 iterate_and_advance(i
, bytes
, base
, len
, off
,
436 __copy_from_user_flushcache(addr
+ off
, base
, len
),
437 memcpy_flushcache(addr
+ off
, base
, len
)
442 EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache
);
445 static inline bool page_copy_sane(struct page
*page
, size_t offset
, size_t n
)
448 size_t v
= n
+ offset
;
451 * The general case needs to access the page order in order
452 * to compute the page size.
453 * However, we mostly deal with order-0 pages and thus can
454 * avoid a possible cache line miss for requests that fit all
457 if (n
<= v
&& v
<= PAGE_SIZE
)
460 head
= compound_head(page
);
461 v
+= (page
- head
) << PAGE_SHIFT
;
463 if (WARN_ON(n
> v
|| v
> page_size(head
)))
468 size_t copy_page_to_iter(struct page
*page
, size_t offset
, size_t bytes
,
472 if (!page_copy_sane(page
, offset
, bytes
))
474 if (WARN_ON_ONCE(i
->data_source
))
476 page
+= offset
/ PAGE_SIZE
; // first subpage
479 void *kaddr
= kmap_local_page(page
);
480 size_t n
= min(bytes
, (size_t)PAGE_SIZE
- offset
);
481 n
= _copy_to_iter(kaddr
+ offset
, n
, i
);
488 if (offset
== PAGE_SIZE
) {
495 EXPORT_SYMBOL(copy_page_to_iter
);
497 size_t copy_page_to_iter_nofault(struct page
*page
, unsigned offset
, size_t bytes
,
502 if (!page_copy_sane(page
, offset
, bytes
))
504 if (WARN_ON_ONCE(i
->data_source
))
506 page
+= offset
/ PAGE_SIZE
; // first subpage
509 void *kaddr
= kmap_local_page(page
);
510 size_t n
= min(bytes
, (size_t)PAGE_SIZE
- offset
);
512 iterate_and_advance(i
, n
, base
, len
, off
,
513 copyout_nofault(base
, kaddr
+ offset
+ off
, len
),
514 memcpy(base
, kaddr
+ offset
+ off
, len
)
522 if (offset
== PAGE_SIZE
) {
529 EXPORT_SYMBOL(copy_page_to_iter_nofault
);
531 size_t copy_page_from_iter(struct page
*page
, size_t offset
, size_t bytes
,
535 if (!page_copy_sane(page
, offset
, bytes
))
537 page
+= offset
/ PAGE_SIZE
; // first subpage
540 void *kaddr
= kmap_local_page(page
);
541 size_t n
= min(bytes
, (size_t)PAGE_SIZE
- offset
);
542 n
= _copy_from_iter(kaddr
+ offset
, n
, i
);
549 if (offset
== PAGE_SIZE
) {
556 EXPORT_SYMBOL(copy_page_from_iter
);
558 size_t iov_iter_zero(size_t bytes
, struct iov_iter
*i
)
560 iterate_and_advance(i
, bytes
, base
, len
, count
,
561 clear_user(base
, len
),
567 EXPORT_SYMBOL(iov_iter_zero
);
569 size_t copy_page_from_iter_atomic(struct page
*page
, size_t offset
,
570 size_t bytes
, struct iov_iter
*i
)
572 size_t n
, copied
= 0;
574 if (!page_copy_sane(page
, offset
, bytes
))
576 if (WARN_ON_ONCE(!i
->data_source
))
583 if (PageHighMem(page
)) {
584 page
+= offset
/ PAGE_SIZE
;
586 n
= min_t(size_t, n
, PAGE_SIZE
- offset
);
589 p
= kmap_atomic(page
) + offset
;
590 iterate_and_advance(i
, n
, base
, len
, off
,
591 copyin(p
+ off
, base
, len
),
592 memcpy_from_iter(i
, p
+ off
, base
, len
)
597 } while (PageHighMem(page
) && copied
!= bytes
&& n
> 0);
601 EXPORT_SYMBOL(copy_page_from_iter_atomic
);
603 static void iov_iter_bvec_advance(struct iov_iter
*i
, size_t size
)
605 const struct bio_vec
*bvec
, *end
;
611 size
+= i
->iov_offset
;
613 for (bvec
= i
->bvec
, end
= bvec
+ i
->nr_segs
; bvec
< end
; bvec
++) {
614 if (likely(size
< bvec
->bv_len
))
616 size
-= bvec
->bv_len
;
618 i
->iov_offset
= size
;
619 i
->nr_segs
-= bvec
- i
->bvec
;
623 static void iov_iter_iovec_advance(struct iov_iter
*i
, size_t size
)
625 const struct iovec
*iov
, *end
;
631 size
+= i
->iov_offset
; // from beginning of current segment
632 for (iov
= iter_iov(i
), end
= iov
+ i
->nr_segs
; iov
< end
; iov
++) {
633 if (likely(size
< iov
->iov_len
))
635 size
-= iov
->iov_len
;
637 i
->iov_offset
= size
;
638 i
->nr_segs
-= iov
- iter_iov(i
);
642 void iov_iter_advance(struct iov_iter
*i
, size_t size
)
644 if (unlikely(i
->count
< size
))
646 if (likely(iter_is_ubuf(i
)) || unlikely(iov_iter_is_xarray(i
))) {
647 i
->iov_offset
+= size
;
649 } else if (likely(iter_is_iovec(i
) || iov_iter_is_kvec(i
))) {
650 /* iovec and kvec have identical layouts */
651 iov_iter_iovec_advance(i
, size
);
652 } else if (iov_iter_is_bvec(i
)) {
653 iov_iter_bvec_advance(i
, size
);
654 } else if (iov_iter_is_discard(i
)) {
658 EXPORT_SYMBOL(iov_iter_advance
);
660 void iov_iter_revert(struct iov_iter
*i
, size_t unroll
)
664 if (WARN_ON(unroll
> MAX_RW_COUNT
))
667 if (unlikely(iov_iter_is_discard(i
)))
669 if (unroll
<= i
->iov_offset
) {
670 i
->iov_offset
-= unroll
;
673 unroll
-= i
->iov_offset
;
674 if (iov_iter_is_xarray(i
) || iter_is_ubuf(i
)) {
675 BUG(); /* We should never go beyond the start of the specified
676 * range since we might then be straying into pages that
679 } else if (iov_iter_is_bvec(i
)) {
680 const struct bio_vec
*bvec
= i
->bvec
;
682 size_t n
= (--bvec
)->bv_len
;
686 i
->iov_offset
= n
- unroll
;
691 } else { /* same logics for iovec and kvec */
692 const struct iovec
*iov
= iter_iov(i
);
694 size_t n
= (--iov
)->iov_len
;
698 i
->iov_offset
= n
- unroll
;
705 EXPORT_SYMBOL(iov_iter_revert
);
708 * Return the count of just the current iov_iter segment.
710 size_t iov_iter_single_seg_count(const struct iov_iter
*i
)
712 if (i
->nr_segs
> 1) {
713 if (likely(iter_is_iovec(i
) || iov_iter_is_kvec(i
)))
714 return min(i
->count
, iter_iov(i
)->iov_len
- i
->iov_offset
);
715 if (iov_iter_is_bvec(i
))
716 return min(i
->count
, i
->bvec
->bv_len
- i
->iov_offset
);
720 EXPORT_SYMBOL(iov_iter_single_seg_count
);
722 void iov_iter_kvec(struct iov_iter
*i
, unsigned int direction
,
723 const struct kvec
*kvec
, unsigned long nr_segs
,
726 WARN_ON(direction
& ~(READ
| WRITE
));
727 *i
= (struct iov_iter
){
728 .iter_type
= ITER_KVEC
,
730 .data_source
= direction
,
737 EXPORT_SYMBOL(iov_iter_kvec
);
739 void iov_iter_bvec(struct iov_iter
*i
, unsigned int direction
,
740 const struct bio_vec
*bvec
, unsigned long nr_segs
,
743 WARN_ON(direction
& ~(READ
| WRITE
));
744 *i
= (struct iov_iter
){
745 .iter_type
= ITER_BVEC
,
747 .data_source
= direction
,
754 EXPORT_SYMBOL(iov_iter_bvec
);
757 * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray
758 * @i: The iterator to initialise.
759 * @direction: The direction of the transfer.
760 * @xarray: The xarray to access.
761 * @start: The start file position.
762 * @count: The size of the I/O buffer in bytes.
764 * Set up an I/O iterator to either draw data out of the pages attached to an
765 * inode or to inject data into those pages. The pages *must* be prevented
766 * from evaporation, either by taking a ref on them or locking them by the
769 void iov_iter_xarray(struct iov_iter
*i
, unsigned int direction
,
770 struct xarray
*xarray
, loff_t start
, size_t count
)
772 BUG_ON(direction
& ~1);
773 *i
= (struct iov_iter
) {
774 .iter_type
= ITER_XARRAY
,
776 .data_source
= direction
,
778 .xarray_start
= start
,
783 EXPORT_SYMBOL(iov_iter_xarray
);
786 * iov_iter_discard - Initialise an I/O iterator that discards data
787 * @i: The iterator to initialise.
788 * @direction: The direction of the transfer.
789 * @count: The size of the I/O buffer in bytes.
791 * Set up an I/O iterator that just discards everything that's written to it.
792 * It's only available as a READ iterator.
794 void iov_iter_discard(struct iov_iter
*i
, unsigned int direction
, size_t count
)
796 BUG_ON(direction
!= READ
);
797 *i
= (struct iov_iter
){
798 .iter_type
= ITER_DISCARD
,
800 .data_source
= false,
805 EXPORT_SYMBOL(iov_iter_discard
);
807 static bool iov_iter_aligned_iovec(const struct iov_iter
*i
, unsigned addr_mask
,
810 size_t size
= i
->count
;
811 size_t skip
= i
->iov_offset
;
814 for (k
= 0; k
< i
->nr_segs
; k
++, skip
= 0) {
815 const struct iovec
*iov
= iter_iov(i
) + k
;
816 size_t len
= iov
->iov_len
- skip
;
822 if ((unsigned long)(iov
->iov_base
+ skip
) & addr_mask
)
832 static bool iov_iter_aligned_bvec(const struct iov_iter
*i
, unsigned addr_mask
,
835 size_t size
= i
->count
;
836 unsigned skip
= i
->iov_offset
;
839 for (k
= 0; k
< i
->nr_segs
; k
++, skip
= 0) {
840 size_t len
= i
->bvec
[k
].bv_len
- skip
;
846 if ((unsigned long)(i
->bvec
[k
].bv_offset
+ skip
) & addr_mask
)
857 * iov_iter_is_aligned() - Check if the addresses and lengths of each segments
858 * are aligned to the parameters.
860 * @i: &struct iov_iter to restore
861 * @addr_mask: bit mask to check against the iov element's addresses
862 * @len_mask: bit mask to check against the iov element's lengths
864 * Return: false if any addresses or lengths intersect with the provided masks
866 bool iov_iter_is_aligned(const struct iov_iter
*i
, unsigned addr_mask
,
869 if (likely(iter_is_ubuf(i
))) {
870 if (i
->count
& len_mask
)
872 if ((unsigned long)(i
->ubuf
+ i
->iov_offset
) & addr_mask
)
877 if (likely(iter_is_iovec(i
) || iov_iter_is_kvec(i
)))
878 return iov_iter_aligned_iovec(i
, addr_mask
, len_mask
);
880 if (iov_iter_is_bvec(i
))
881 return iov_iter_aligned_bvec(i
, addr_mask
, len_mask
);
883 if (iov_iter_is_xarray(i
)) {
884 if (i
->count
& len_mask
)
886 if ((i
->xarray_start
+ i
->iov_offset
) & addr_mask
)
892 EXPORT_SYMBOL_GPL(iov_iter_is_aligned
);
894 static unsigned long iov_iter_alignment_iovec(const struct iov_iter
*i
)
896 unsigned long res
= 0;
897 size_t size
= i
->count
;
898 size_t skip
= i
->iov_offset
;
901 for (k
= 0; k
< i
->nr_segs
; k
++, skip
= 0) {
902 const struct iovec
*iov
= iter_iov(i
) + k
;
903 size_t len
= iov
->iov_len
- skip
;
905 res
|= (unsigned long)iov
->iov_base
+ skip
;
917 static unsigned long iov_iter_alignment_bvec(const struct iov_iter
*i
)
920 size_t size
= i
->count
;
921 unsigned skip
= i
->iov_offset
;
924 for (k
= 0; k
< i
->nr_segs
; k
++, skip
= 0) {
925 size_t len
= i
->bvec
[k
].bv_len
- skip
;
926 res
|= (unsigned long)i
->bvec
[k
].bv_offset
+ skip
;
937 unsigned long iov_iter_alignment(const struct iov_iter
*i
)
939 if (likely(iter_is_ubuf(i
))) {
940 size_t size
= i
->count
;
942 return ((unsigned long)i
->ubuf
+ i
->iov_offset
) | size
;
946 /* iovec and kvec have identical layouts */
947 if (likely(iter_is_iovec(i
) || iov_iter_is_kvec(i
)))
948 return iov_iter_alignment_iovec(i
);
950 if (iov_iter_is_bvec(i
))
951 return iov_iter_alignment_bvec(i
);
953 if (iov_iter_is_xarray(i
))
954 return (i
->xarray_start
+ i
->iov_offset
) | i
->count
;
958 EXPORT_SYMBOL(iov_iter_alignment
);
960 unsigned long iov_iter_gap_alignment(const struct iov_iter
*i
)
962 unsigned long res
= 0;
964 size_t size
= i
->count
;
970 if (WARN_ON(!iter_is_iovec(i
)))
973 for (k
= 0; k
< i
->nr_segs
; k
++) {
974 const struct iovec
*iov
= iter_iov(i
) + k
;
976 unsigned long base
= (unsigned long)iov
->iov_base
;
977 if (v
) // if not the first one
978 res
|= base
| v
; // this start | previous end
979 v
= base
+ iov
->iov_len
;
980 if (size
<= iov
->iov_len
)
982 size
-= iov
->iov_len
;
987 EXPORT_SYMBOL(iov_iter_gap_alignment
);
989 static int want_pages_array(struct page
***res
, size_t size
,
990 size_t start
, unsigned int maxpages
)
992 unsigned int count
= DIV_ROUND_UP(size
+ start
, PAGE_SIZE
);
994 if (count
> maxpages
)
996 WARN_ON(!count
); // caller should've prevented that
998 *res
= kvmalloc_array(count
, sizeof(struct page
*), GFP_KERNEL
);
1005 static ssize_t
iter_xarray_populate_pages(struct page
**pages
, struct xarray
*xa
,
1006 pgoff_t index
, unsigned int nr_pages
)
1008 XA_STATE(xas
, xa
, index
);
1010 unsigned int ret
= 0;
1013 for (page
= xas_load(&xas
); page
; page
= xas_next(&xas
)) {
1014 if (xas_retry(&xas
, page
))
1017 /* Has the page moved or been split? */
1018 if (unlikely(page
!= xas_reload(&xas
))) {
1023 pages
[ret
] = find_subpage(page
, xas
.xa_index
);
1024 get_page(pages
[ret
]);
1025 if (++ret
== nr_pages
)
1032 static ssize_t
iter_xarray_get_pages(struct iov_iter
*i
,
1033 struct page
***pages
, size_t maxsize
,
1034 unsigned maxpages
, size_t *_start_offset
)
1036 unsigned nr
, offset
, count
;
1040 pos
= i
->xarray_start
+ i
->iov_offset
;
1041 index
= pos
>> PAGE_SHIFT
;
1042 offset
= pos
& ~PAGE_MASK
;
1043 *_start_offset
= offset
;
1045 count
= want_pages_array(pages
, maxsize
, offset
, maxpages
);
1048 nr
= iter_xarray_populate_pages(*pages
, i
->xarray
, index
, count
);
1052 maxsize
= min_t(size_t, nr
* PAGE_SIZE
- offset
, maxsize
);
1053 i
->iov_offset
+= maxsize
;
1054 i
->count
-= maxsize
;
1058 /* must be done on non-empty ITER_UBUF or ITER_IOVEC one */
1059 static unsigned long first_iovec_segment(const struct iov_iter
*i
, size_t *size
)
1064 if (iter_is_ubuf(i
))
1065 return (unsigned long)i
->ubuf
+ i
->iov_offset
;
1067 for (k
= 0, skip
= i
->iov_offset
; k
< i
->nr_segs
; k
++, skip
= 0) {
1068 const struct iovec
*iov
= iter_iov(i
) + k
;
1069 size_t len
= iov
->iov_len
- skip
;
1075 return (unsigned long)iov
->iov_base
+ skip
;
1077 BUG(); // if it had been empty, we wouldn't get called
1080 /* must be done on non-empty ITER_BVEC one */
1081 static struct page
*first_bvec_segment(const struct iov_iter
*i
,
1082 size_t *size
, size_t *start
)
1085 size_t skip
= i
->iov_offset
, len
;
1087 len
= i
->bvec
->bv_len
- skip
;
1090 skip
+= i
->bvec
->bv_offset
;
1091 page
= i
->bvec
->bv_page
+ skip
/ PAGE_SIZE
;
1092 *start
= skip
% PAGE_SIZE
;
1096 static ssize_t
__iov_iter_get_pages_alloc(struct iov_iter
*i
,
1097 struct page
***pages
, size_t maxsize
,
1098 unsigned int maxpages
, size_t *start
)
1100 unsigned int n
, gup_flags
= 0;
1102 if (maxsize
> i
->count
)
1106 if (maxsize
> MAX_RW_COUNT
)
1107 maxsize
= MAX_RW_COUNT
;
1109 if (likely(user_backed_iter(i
))) {
1113 if (iov_iter_rw(i
) != WRITE
)
1114 gup_flags
|= FOLL_WRITE
;
1116 gup_flags
|= FOLL_NOFAULT
;
1118 addr
= first_iovec_segment(i
, &maxsize
);
1119 *start
= addr
% PAGE_SIZE
;
1121 n
= want_pages_array(pages
, maxsize
, *start
, maxpages
);
1124 res
= get_user_pages_fast(addr
, n
, gup_flags
, *pages
);
1125 if (unlikely(res
<= 0))
1127 maxsize
= min_t(size_t, maxsize
, res
* PAGE_SIZE
- *start
);
1128 iov_iter_advance(i
, maxsize
);
1131 if (iov_iter_is_bvec(i
)) {
1135 page
= first_bvec_segment(i
, &maxsize
, start
);
1136 n
= want_pages_array(pages
, maxsize
, *start
, maxpages
);
1140 for (int k
= 0; k
< n
; k
++)
1141 get_page(p
[k
] = page
+ k
);
1142 maxsize
= min_t(size_t, maxsize
, n
* PAGE_SIZE
- *start
);
1143 i
->count
-= maxsize
;
1144 i
->iov_offset
+= maxsize
;
1145 if (i
->iov_offset
== i
->bvec
->bv_len
) {
1152 if (iov_iter_is_xarray(i
))
1153 return iter_xarray_get_pages(i
, pages
, maxsize
, maxpages
, start
);
1157 ssize_t
iov_iter_get_pages2(struct iov_iter
*i
, struct page
**pages
,
1158 size_t maxsize
, unsigned maxpages
, size_t *start
)
1164 return __iov_iter_get_pages_alloc(i
, &pages
, maxsize
, maxpages
, start
);
1166 EXPORT_SYMBOL(iov_iter_get_pages2
);
1168 ssize_t
iov_iter_get_pages_alloc2(struct iov_iter
*i
,
1169 struct page
***pages
, size_t maxsize
, size_t *start
)
1175 len
= __iov_iter_get_pages_alloc(i
, pages
, maxsize
, ~0U, start
);
1182 EXPORT_SYMBOL(iov_iter_get_pages_alloc2
);
1184 size_t csum_and_copy_from_iter(void *addr
, size_t bytes
, __wsum
*csum
,
1189 if (WARN_ON_ONCE(!i
->data_source
))
1192 iterate_and_advance(i
, bytes
, base
, len
, off
, ({
1193 next
= csum_and_copy_from_user(base
, addr
+ off
, len
);
1194 sum
= csum_block_add(sum
, next
, off
);
1197 sum
= csum_and_memcpy(addr
+ off
, base
, len
, sum
, off
);
1203 EXPORT_SYMBOL(csum_and_copy_from_iter
);
1205 size_t csum_and_copy_to_iter(const void *addr
, size_t bytes
, void *_csstate
,
1208 struct csum_state
*csstate
= _csstate
;
1211 if (WARN_ON_ONCE(i
->data_source
))
1213 if (unlikely(iov_iter_is_discard(i
))) {
1214 // can't use csum_memcpy() for that one - data is not copied
1215 csstate
->csum
= csum_block_add(csstate
->csum
,
1216 csum_partial(addr
, bytes
, 0),
1218 csstate
->off
+= bytes
;
1222 sum
= csum_shift(csstate
->csum
, csstate
->off
);
1223 iterate_and_advance(i
, bytes
, base
, len
, off
, ({
1224 next
= csum_and_copy_to_user(addr
+ off
, base
, len
);
1225 sum
= csum_block_add(sum
, next
, off
);
1228 sum
= csum_and_memcpy(base
, addr
+ off
, len
, sum
, off
);
1231 csstate
->csum
= csum_shift(sum
, csstate
->off
);
1232 csstate
->off
+= bytes
;
1235 EXPORT_SYMBOL(csum_and_copy_to_iter
);
1237 size_t hash_and_copy_to_iter(const void *addr
, size_t bytes
, void *hashp
,
1240 #ifdef CONFIG_CRYPTO_HASH
1241 struct ahash_request
*hash
= hashp
;
1242 struct scatterlist sg
;
1245 copied
= copy_to_iter(addr
, bytes
, i
);
1246 sg_init_one(&sg
, addr
, copied
);
1247 ahash_request_set_crypt(hash
, &sg
, NULL
, copied
);
1248 crypto_ahash_update(hash
);
1254 EXPORT_SYMBOL(hash_and_copy_to_iter
);
1256 static int iov_npages(const struct iov_iter
*i
, int maxpages
)
1258 size_t skip
= i
->iov_offset
, size
= i
->count
;
1259 const struct iovec
*p
;
1262 for (p
= iter_iov(i
); size
; skip
= 0, p
++) {
1263 unsigned offs
= offset_in_page(p
->iov_base
+ skip
);
1264 size_t len
= min(p
->iov_len
- skip
, size
);
1268 npages
+= DIV_ROUND_UP(offs
+ len
, PAGE_SIZE
);
1269 if (unlikely(npages
> maxpages
))
1276 static int bvec_npages(const struct iov_iter
*i
, int maxpages
)
1278 size_t skip
= i
->iov_offset
, size
= i
->count
;
1279 const struct bio_vec
*p
;
1282 for (p
= i
->bvec
; size
; skip
= 0, p
++) {
1283 unsigned offs
= (p
->bv_offset
+ skip
) % PAGE_SIZE
;
1284 size_t len
= min(p
->bv_len
- skip
, size
);
1287 npages
+= DIV_ROUND_UP(offs
+ len
, PAGE_SIZE
);
1288 if (unlikely(npages
> maxpages
))
1294 int iov_iter_npages(const struct iov_iter
*i
, int maxpages
)
1296 if (unlikely(!i
->count
))
1298 if (likely(iter_is_ubuf(i
))) {
1299 unsigned offs
= offset_in_page(i
->ubuf
+ i
->iov_offset
);
1300 int npages
= DIV_ROUND_UP(offs
+ i
->count
, PAGE_SIZE
);
1301 return min(npages
, maxpages
);
1303 /* iovec and kvec have identical layouts */
1304 if (likely(iter_is_iovec(i
) || iov_iter_is_kvec(i
)))
1305 return iov_npages(i
, maxpages
);
1306 if (iov_iter_is_bvec(i
))
1307 return bvec_npages(i
, maxpages
);
1308 if (iov_iter_is_xarray(i
)) {
1309 unsigned offset
= (i
->xarray_start
+ i
->iov_offset
) % PAGE_SIZE
;
1310 int npages
= DIV_ROUND_UP(offset
+ i
->count
, PAGE_SIZE
);
1311 return min(npages
, maxpages
);
1315 EXPORT_SYMBOL(iov_iter_npages
);
1317 const void *dup_iter(struct iov_iter
*new, struct iov_iter
*old
, gfp_t flags
)
1320 if (iov_iter_is_bvec(new))
1321 return new->bvec
= kmemdup(new->bvec
,
1322 new->nr_segs
* sizeof(struct bio_vec
),
1324 else if (iov_iter_is_kvec(new) || iter_is_iovec(new))
1325 /* iovec and kvec have identical layout */
1326 return new->__iov
= kmemdup(new->__iov
,
1327 new->nr_segs
* sizeof(struct iovec
),
1331 EXPORT_SYMBOL(dup_iter
);
1333 static __noclone
int copy_compat_iovec_from_user(struct iovec
*iov
,
1334 const struct iovec __user
*uvec
, unsigned long nr_segs
)
1336 const struct compat_iovec __user
*uiov
=
1337 (const struct compat_iovec __user
*)uvec
;
1338 int ret
= -EFAULT
, i
;
1340 if (!user_access_begin(uiov
, nr_segs
* sizeof(*uiov
)))
1343 for (i
= 0; i
< nr_segs
; i
++) {
1347 unsafe_get_user(len
, &uiov
[i
].iov_len
, uaccess_end
);
1348 unsafe_get_user(buf
, &uiov
[i
].iov_base
, uaccess_end
);
1350 /* check for compat_size_t not fitting in compat_ssize_t .. */
1355 iov
[i
].iov_base
= compat_ptr(buf
);
1356 iov
[i
].iov_len
= len
;
1365 static __noclone
int copy_iovec_from_user(struct iovec
*iov
,
1366 const struct iovec __user
*uiov
, unsigned long nr_segs
)
1370 if (!user_access_begin(uiov
, nr_segs
* sizeof(*uiov
)))
1377 unsafe_get_user(len
, &uiov
->iov_len
, uaccess_end
);
1378 unsafe_get_user(buf
, &uiov
->iov_base
, uaccess_end
);
1380 /* check for size_t not fitting in ssize_t .. */
1381 if (unlikely(len
< 0)) {
1385 iov
->iov_base
= buf
;
1389 } while (--nr_segs
);
1397 struct iovec
*iovec_from_user(const struct iovec __user
*uvec
,
1398 unsigned long nr_segs
, unsigned long fast_segs
,
1399 struct iovec
*fast_iov
, bool compat
)
1401 struct iovec
*iov
= fast_iov
;
1405 * SuS says "The readv() function *may* fail if the iovcnt argument was
1406 * less than or equal to 0, or greater than {IOV_MAX}. Linux has
1407 * traditionally returned zero for zero segments, so...
1411 if (nr_segs
> UIO_MAXIOV
)
1412 return ERR_PTR(-EINVAL
);
1413 if (nr_segs
> fast_segs
) {
1414 iov
= kmalloc_array(nr_segs
, sizeof(struct iovec
), GFP_KERNEL
);
1416 return ERR_PTR(-ENOMEM
);
1419 if (unlikely(compat
))
1420 ret
= copy_compat_iovec_from_user(iov
, uvec
, nr_segs
);
1422 ret
= copy_iovec_from_user(iov
, uvec
, nr_segs
);
1424 if (iov
!= fast_iov
)
1426 return ERR_PTR(ret
);
1433 * Single segment iovec supplied by the user, import it as ITER_UBUF.
1435 static ssize_t
__import_iovec_ubuf(int type
, const struct iovec __user
*uvec
,
1436 struct iovec
**iovp
, struct iov_iter
*i
,
1439 struct iovec
*iov
= *iovp
;
1443 ret
= copy_compat_iovec_from_user(iov
, uvec
, 1);
1445 ret
= copy_iovec_from_user(iov
, uvec
, 1);
1449 ret
= import_ubuf(type
, iov
->iov_base
, iov
->iov_len
, i
);
1456 ssize_t
__import_iovec(int type
, const struct iovec __user
*uvec
,
1457 unsigned nr_segs
, unsigned fast_segs
, struct iovec
**iovp
,
1458 struct iov_iter
*i
, bool compat
)
1460 ssize_t total_len
= 0;
1465 return __import_iovec_ubuf(type
, uvec
, iovp
, i
, compat
);
1467 iov
= iovec_from_user(uvec
, nr_segs
, fast_segs
, *iovp
, compat
);
1470 return PTR_ERR(iov
);
1474 * According to the Single Unix Specification we should return EINVAL if
1475 * an element length is < 0 when cast to ssize_t or if the total length
1476 * would overflow the ssize_t return value of the system call.
1478 * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the
1481 for (seg
= 0; seg
< nr_segs
; seg
++) {
1482 ssize_t len
= (ssize_t
)iov
[seg
].iov_len
;
1484 if (!access_ok(iov
[seg
].iov_base
, len
)) {
1491 if (len
> MAX_RW_COUNT
- total_len
) {
1492 len
= MAX_RW_COUNT
- total_len
;
1493 iov
[seg
].iov_len
= len
;
1498 iov_iter_init(i
, type
, iov
, nr_segs
, total_len
);
1507 * import_iovec() - Copy an array of &struct iovec from userspace
1508 * into the kernel, check that it is valid, and initialize a new
1509 * &struct iov_iter iterator to access it.
1511 * @type: One of %READ or %WRITE.
1512 * @uvec: Pointer to the userspace array.
1513 * @nr_segs: Number of elements in userspace array.
1514 * @fast_segs: Number of elements in @iov.
1515 * @iovp: (input and output parameter) Pointer to pointer to (usually small
1516 * on-stack) kernel array.
1517 * @i: Pointer to iterator that will be initialized on success.
1519 * If the array pointed to by *@iov is large enough to hold all @nr_segs,
1520 * then this function places %NULL in *@iov on return. Otherwise, a new
1521 * array will be allocated and the result placed in *@iov. This means that
1522 * the caller may call kfree() on *@iov regardless of whether the small
1523 * on-stack array was used or not (and regardless of whether this function
1524 * returns an error or not).
1526 * Return: Negative error code on error, bytes imported on success
1528 ssize_t
import_iovec(int type
, const struct iovec __user
*uvec
,
1529 unsigned nr_segs
, unsigned fast_segs
,
1530 struct iovec
**iovp
, struct iov_iter
*i
)
1532 return __import_iovec(type
, uvec
, nr_segs
, fast_segs
, iovp
, i
,
1533 in_compat_syscall());
1535 EXPORT_SYMBOL(import_iovec
);
1537 int import_single_range(int rw
, void __user
*buf
, size_t len
,
1538 struct iovec
*iov
, struct iov_iter
*i
)
1540 if (len
> MAX_RW_COUNT
)
1542 if (unlikely(!access_ok(buf
, len
)))
1545 iov_iter_ubuf(i
, rw
, buf
, len
);
1548 EXPORT_SYMBOL(import_single_range
);
1550 int import_ubuf(int rw
, void __user
*buf
, size_t len
, struct iov_iter
*i
)
1552 if (len
> MAX_RW_COUNT
)
1554 if (unlikely(!access_ok(buf
, len
)))
1557 iov_iter_ubuf(i
, rw
, buf
, len
);
1560 EXPORT_SYMBOL_GPL(import_ubuf
);
1563 * iov_iter_restore() - Restore a &struct iov_iter to the same state as when
1564 * iov_iter_save_state() was called.
1566 * @i: &struct iov_iter to restore
1567 * @state: state to restore from
1569 * Used after iov_iter_save_state() to bring restore @i, if operations may
1572 * Note: only works on ITER_IOVEC, ITER_BVEC, and ITER_KVEC
1574 void iov_iter_restore(struct iov_iter
*i
, struct iov_iter_state
*state
)
1576 if (WARN_ON_ONCE(!iov_iter_is_bvec(i
) && !iter_is_iovec(i
) &&
1577 !iter_is_ubuf(i
)) && !iov_iter_is_kvec(i
))
1579 i
->iov_offset
= state
->iov_offset
;
1580 i
->count
= state
->count
;
1581 if (iter_is_ubuf(i
))
1584 * For the *vec iters, nr_segs + iov is constant - if we increment
1585 * the vec, then we also decrement the nr_segs count. Hence we don't
1586 * need to track both of these, just one is enough and we can deduct
1587 * the other from that. ITER_KVEC and ITER_IOVEC are the same struct
1588 * size, so we can just increment the iov pointer as they are unionzed.
1589 * ITER_BVEC _may_ be the same size on some archs, but on others it is
1590 * not. Be safe and handle it separately.
1592 BUILD_BUG_ON(sizeof(struct iovec
) != sizeof(struct kvec
));
1593 if (iov_iter_is_bvec(i
))
1594 i
->bvec
-= state
->nr_segs
- i
->nr_segs
;
1596 i
->__iov
-= state
->nr_segs
- i
->nr_segs
;
1597 i
->nr_segs
= state
->nr_segs
;
1601 * Extract a list of contiguous pages from an ITER_XARRAY iterator. This does not
1602 * get references on the pages, nor does it get a pin on them.
1604 static ssize_t
iov_iter_extract_xarray_pages(struct iov_iter
*i
,
1605 struct page
***pages
, size_t maxsize
,
1606 unsigned int maxpages
,
1607 iov_iter_extraction_t extraction_flags
,
1610 struct page
*page
, **p
;
1611 unsigned int nr
= 0, offset
;
1612 loff_t pos
= i
->xarray_start
+ i
->iov_offset
;
1613 pgoff_t index
= pos
>> PAGE_SHIFT
;
1614 XA_STATE(xas
, i
->xarray
, index
);
1616 offset
= pos
& ~PAGE_MASK
;
1619 maxpages
= want_pages_array(pages
, maxsize
, offset
, maxpages
);
1625 for (page
= xas_load(&xas
); page
; page
= xas_next(&xas
)) {
1626 if (xas_retry(&xas
, page
))
1629 /* Has the page moved or been split? */
1630 if (unlikely(page
!= xas_reload(&xas
))) {
1635 p
[nr
++] = find_subpage(page
, xas
.xa_index
);
1641 maxsize
= min_t(size_t, nr
* PAGE_SIZE
- offset
, maxsize
);
1642 iov_iter_advance(i
, maxsize
);
1647 * Extract a list of contiguous pages from an ITER_BVEC iterator. This does
1648 * not get references on the pages, nor does it get a pin on them.
1650 static ssize_t
iov_iter_extract_bvec_pages(struct iov_iter
*i
,
1651 struct page
***pages
, size_t maxsize
,
1652 unsigned int maxpages
,
1653 iov_iter_extraction_t extraction_flags
,
1656 struct page
**p
, *page
;
1657 size_t skip
= i
->iov_offset
, offset
, size
;
1661 if (i
->nr_segs
== 0)
1663 size
= min(maxsize
, i
->bvec
->bv_len
- skip
);
1672 skip
+= i
->bvec
->bv_offset
;
1673 page
= i
->bvec
->bv_page
+ skip
/ PAGE_SIZE
;
1674 offset
= skip
% PAGE_SIZE
;
1677 maxpages
= want_pages_array(pages
, size
, offset
, maxpages
);
1681 for (k
= 0; k
< maxpages
; k
++)
1684 size
= min_t(size_t, size
, maxpages
* PAGE_SIZE
- offset
);
1685 iov_iter_advance(i
, size
);
1690 * Extract a list of virtually contiguous pages from an ITER_KVEC iterator.
1691 * This does not get references on the pages, nor does it get a pin on them.
1693 static ssize_t
iov_iter_extract_kvec_pages(struct iov_iter
*i
,
1694 struct page
***pages
, size_t maxsize
,
1695 unsigned int maxpages
,
1696 iov_iter_extraction_t extraction_flags
,
1699 struct page
**p
, *page
;
1701 size_t skip
= i
->iov_offset
, offset
, len
, size
;
1705 if (i
->nr_segs
== 0)
1707 size
= min(maxsize
, i
->kvec
->iov_len
- skip
);
1716 kaddr
= i
->kvec
->iov_base
+ skip
;
1717 offset
= (unsigned long)kaddr
& ~PAGE_MASK
;
1720 maxpages
= want_pages_array(pages
, size
, offset
, maxpages
);
1726 len
= offset
+ size
;
1727 for (k
= 0; k
< maxpages
; k
++) {
1728 size_t seg
= min_t(size_t, len
, PAGE_SIZE
);
1730 if (is_vmalloc_or_module_addr(kaddr
))
1731 page
= vmalloc_to_page(kaddr
);
1733 page
= virt_to_page(kaddr
);
1740 size
= min_t(size_t, size
, maxpages
* PAGE_SIZE
- offset
);
1741 iov_iter_advance(i
, size
);
1746 * Extract a list of contiguous pages from a user iterator and get a pin on
1747 * each of them. This should only be used if the iterator is user-backed
1750 * It does not get refs on the pages, but the pages must be unpinned by the
1751 * caller once the transfer is complete.
1753 * This is safe to be used where background IO/DMA *is* going to be modifying
1754 * the buffer; using a pin rather than a ref makes forces fork() to give the
1755 * child a copy of the page.
1757 static ssize_t
iov_iter_extract_user_pages(struct iov_iter
*i
,
1758 struct page
***pages
,
1760 unsigned int maxpages
,
1761 iov_iter_extraction_t extraction_flags
,
1765 unsigned int gup_flags
= 0;
1769 if (i
->data_source
== ITER_DEST
)
1770 gup_flags
|= FOLL_WRITE
;
1771 if (extraction_flags
& ITER_ALLOW_P2PDMA
)
1772 gup_flags
|= FOLL_PCI_P2PDMA
;
1774 gup_flags
|= FOLL_NOFAULT
;
1776 addr
= first_iovec_segment(i
, &maxsize
);
1777 *offset0
= offset
= addr
% PAGE_SIZE
;
1779 maxpages
= want_pages_array(pages
, maxsize
, offset
, maxpages
);
1782 res
= pin_user_pages_fast(addr
, maxpages
, gup_flags
, *pages
);
1783 if (unlikely(res
<= 0))
1785 maxsize
= min_t(size_t, maxsize
, res
* PAGE_SIZE
- offset
);
1786 iov_iter_advance(i
, maxsize
);
1791 * iov_iter_extract_pages - Extract a list of contiguous pages from an iterator
1792 * @i: The iterator to extract from
1793 * @pages: Where to return the list of pages
1794 * @maxsize: The maximum amount of iterator to extract
1795 * @maxpages: The maximum size of the list of pages
1796 * @extraction_flags: Flags to qualify request
1797 * @offset0: Where to return the starting offset into (*@pages)[0]
1799 * Extract a list of contiguous pages from the current point of the iterator,
1800 * advancing the iterator. The maximum number of pages and the maximum amount
1801 * of page contents can be set.
1803 * If *@pages is NULL, a page list will be allocated to the required size and
1804 * *@pages will be set to its base. If *@pages is not NULL, it will be assumed
1805 * that the caller allocated a page list at least @maxpages in size and this
1806 * will be filled in.
1808 * @extraction_flags can have ITER_ALLOW_P2PDMA set to request peer-to-peer DMA
1809 * be allowed on the pages extracted.
1811 * The iov_iter_extract_will_pin() function can be used to query how cleanup
1812 * should be performed.
1814 * Extra refs or pins on the pages may be obtained as follows:
1816 * (*) If the iterator is user-backed (ITER_IOVEC/ITER_UBUF), pins will be
1817 * added to the pages, but refs will not be taken.
1818 * iov_iter_extract_will_pin() will return true.
1820 * (*) If the iterator is ITER_KVEC, ITER_BVEC or ITER_XARRAY, the pages are
1821 * merely listed; no extra refs or pins are obtained.
1822 * iov_iter_extract_will_pin() will return 0.
1826 * (*) Use with ITER_DISCARD is not supported as that has no content.
1828 * On success, the function sets *@pages to the new pagelist, if allocated, and
1829 * sets *offset0 to the offset into the first page.
1831 * It may also return -ENOMEM and -EFAULT.
1833 ssize_t
iov_iter_extract_pages(struct iov_iter
*i
,
1834 struct page
***pages
,
1836 unsigned int maxpages
,
1837 iov_iter_extraction_t extraction_flags
,
1840 maxsize
= min_t(size_t, min_t(size_t, maxsize
, i
->count
), MAX_RW_COUNT
);
1844 if (likely(user_backed_iter(i
)))
1845 return iov_iter_extract_user_pages(i
, pages
, maxsize
,
1846 maxpages
, extraction_flags
,
1848 if (iov_iter_is_kvec(i
))
1849 return iov_iter_extract_kvec_pages(i
, pages
, maxsize
,
1850 maxpages
, extraction_flags
,
1852 if (iov_iter_is_bvec(i
))
1853 return iov_iter_extract_bvec_pages(i
, pages
, maxsize
,
1854 maxpages
, extraction_flags
,
1856 if (iov_iter_is_xarray(i
))
1857 return iov_iter_extract_xarray_pages(i
, pages
, maxsize
,
1858 maxpages
, extraction_flags
,
1862 EXPORT_SYMBOL_GPL(iov_iter_extract_pages
);