]> git.ipfire.org Git - people/arne_f/kernel.git/blame - lib/iov_iter.c
mm: add cond_resched() to generic_swapfile_activate()
[people/arne_f/kernel.git] / lib / iov_iter.c
CommitLineData
4f18cd31
AV
1#include <linux/export.h>
2#include <linux/uio.h>
3#include <linux/pagemap.h>
91f79c43
AV
4#include <linux/slab.h>
5#include <linux/vmalloc.h>
a604ec7e 6#include <net/checksum.h>
4f18cd31 7
04a31165
AV
8#define iterate_iovec(i, n, __v, __p, skip, STEP) { \
9 size_t left; \
10 size_t wanted = n; \
11 __p = i->iov; \
12 __v.iov_len = min(n, __p->iov_len - skip); \
13 if (likely(__v.iov_len)) { \
14 __v.iov_base = __p->iov_base + skip; \
15 left = (STEP); \
16 __v.iov_len -= left; \
17 skip += __v.iov_len; \
18 n -= __v.iov_len; \
19 } else { \
20 left = 0; \
21 } \
22 while (unlikely(!left && n)) { \
23 __p++; \
24 __v.iov_len = min(n, __p->iov_len); \
25 if (unlikely(!__v.iov_len)) \
26 continue; \
27 __v.iov_base = __p->iov_base; \
28 left = (STEP); \
29 __v.iov_len -= left; \
30 skip = __v.iov_len; \
31 n -= __v.iov_len; \
32 } \
33 n = wanted - n; \
34}
35
a280455f
AV
36#define iterate_kvec(i, n, __v, __p, skip, STEP) { \
37 size_t wanted = n; \
38 __p = i->kvec; \
39 __v.iov_len = min(n, __p->iov_len - skip); \
40 if (likely(__v.iov_len)) { \
41 __v.iov_base = __p->iov_base + skip; \
42 (void)(STEP); \
43 skip += __v.iov_len; \
44 n -= __v.iov_len; \
45 } \
46 while (unlikely(n)) { \
47 __p++; \
48 __v.iov_len = min(n, __p->iov_len); \
49 if (unlikely(!__v.iov_len)) \
50 continue; \
51 __v.iov_base = __p->iov_base; \
52 (void)(STEP); \
53 skip = __v.iov_len; \
54 n -= __v.iov_len; \
55 } \
56 n = wanted; \
57}
58
1bdc76ae
ML
59#define iterate_bvec(i, n, __v, __bi, skip, STEP) { \
60 struct bvec_iter __start; \
61 __start.bi_size = n; \
62 __start.bi_bvec_done = skip; \
63 __start.bi_idx = 0; \
64 for_each_bvec(__v, i->bvec, __bi, __start) { \
65 if (!__v.bv_len) \
04a31165 66 continue; \
04a31165 67 (void)(STEP); \
04a31165 68 } \
04a31165
AV
69}
70
a280455f 71#define iterate_all_kinds(i, n, v, I, B, K) { \
04a31165
AV
72 size_t skip = i->iov_offset; \
73 if (unlikely(i->type & ITER_BVEC)) { \
04a31165 74 struct bio_vec v; \
1bdc76ae
ML
75 struct bvec_iter __bi; \
76 iterate_bvec(i, n, v, __bi, skip, (B)) \
a280455f
AV
77 } else if (unlikely(i->type & ITER_KVEC)) { \
78 const struct kvec *kvec; \
79 struct kvec v; \
80 iterate_kvec(i, n, v, kvec, skip, (K)) \
04a31165
AV
81 } else { \
82 const struct iovec *iov; \
83 struct iovec v; \
84 iterate_iovec(i, n, v, iov, skip, (I)) \
85 } \
86}
87
a280455f 88#define iterate_and_advance(i, n, v, I, B, K) { \
dd254f5a
AV
89 if (unlikely(i->count < n)) \
90 n = i->count; \
19f18459 91 if (i->count) { \
dd254f5a
AV
92 size_t skip = i->iov_offset; \
93 if (unlikely(i->type & ITER_BVEC)) { \
1bdc76ae 94 const struct bio_vec *bvec = i->bvec; \
dd254f5a 95 struct bio_vec v; \
1bdc76ae
ML
96 struct bvec_iter __bi; \
97 iterate_bvec(i, n, v, __bi, skip, (B)) \
98 i->bvec = __bvec_iter_bvec(i->bvec, __bi); \
99 i->nr_segs -= i->bvec - bvec; \
100 skip = __bi.bi_bvec_done; \
dd254f5a
AV
101 } else if (unlikely(i->type & ITER_KVEC)) { \
102 const struct kvec *kvec; \
103 struct kvec v; \
104 iterate_kvec(i, n, v, kvec, skip, (K)) \
105 if (skip == kvec->iov_len) { \
106 kvec++; \
107 skip = 0; \
108 } \
109 i->nr_segs -= kvec - i->kvec; \
110 i->kvec = kvec; \
111 } else { \
112 const struct iovec *iov; \
113 struct iovec v; \
114 iterate_iovec(i, n, v, iov, skip, (I)) \
115 if (skip == iov->iov_len) { \
116 iov++; \
117 skip = 0; \
118 } \
119 i->nr_segs -= iov - i->iov; \
120 i->iov = iov; \
7ce2a91e 121 } \
dd254f5a
AV
122 i->count -= n; \
123 i->iov_offset = skip; \
7ce2a91e 124 } \
7ce2a91e
AV
125}
126
62a8067a 127static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
4f18cd31
AV
128 struct iov_iter *i)
129{
130 size_t skip, copy, left, wanted;
131 const struct iovec *iov;
132 char __user *buf;
133 void *kaddr, *from;
134
135 if (unlikely(bytes > i->count))
136 bytes = i->count;
137
138 if (unlikely(!bytes))
139 return 0;
140
141 wanted = bytes;
142 iov = i->iov;
143 skip = i->iov_offset;
144 buf = iov->iov_base + skip;
145 copy = min(bytes, iov->iov_len - skip);
146
147 if (!fault_in_pages_writeable(buf, copy)) {
148 kaddr = kmap_atomic(page);
149 from = kaddr + offset;
150
151 /* first chunk, usually the only one */
152 left = __copy_to_user_inatomic(buf, from, copy);
153 copy -= left;
154 skip += copy;
155 from += copy;
156 bytes -= copy;
157
158 while (unlikely(!left && bytes)) {
159 iov++;
160 buf = iov->iov_base;
161 copy = min(bytes, iov->iov_len);
162 left = __copy_to_user_inatomic(buf, from, copy);
163 copy -= left;
164 skip = copy;
165 from += copy;
166 bytes -= copy;
167 }
168 if (likely(!bytes)) {
169 kunmap_atomic(kaddr);
170 goto done;
171 }
172 offset = from - kaddr;
173 buf += copy;
174 kunmap_atomic(kaddr);
175 copy = min(bytes, iov->iov_len - skip);
176 }
177 /* Too bad - revert to non-atomic kmap */
178 kaddr = kmap(page);
179 from = kaddr + offset;
180 left = __copy_to_user(buf, from, copy);
181 copy -= left;
182 skip += copy;
183 from += copy;
184 bytes -= copy;
185 while (unlikely(!left && bytes)) {
186 iov++;
187 buf = iov->iov_base;
188 copy = min(bytes, iov->iov_len);
189 left = __copy_to_user(buf, from, copy);
190 copy -= left;
191 skip = copy;
192 from += copy;
193 bytes -= copy;
194 }
195 kunmap(page);
196done:
81055e58
AV
197 if (skip == iov->iov_len) {
198 iov++;
199 skip = 0;
200 }
4f18cd31
AV
201 i->count -= wanted - bytes;
202 i->nr_segs -= iov - i->iov;
203 i->iov = iov;
204 i->iov_offset = skip;
205 return wanted - bytes;
206}
4f18cd31 207
62a8067a 208static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
f0d1bec9
AV
209 struct iov_iter *i)
210{
211 size_t skip, copy, left, wanted;
212 const struct iovec *iov;
213 char __user *buf;
214 void *kaddr, *to;
215
216 if (unlikely(bytes > i->count))
217 bytes = i->count;
218
219 if (unlikely(!bytes))
220 return 0;
221
222 wanted = bytes;
223 iov = i->iov;
224 skip = i->iov_offset;
225 buf = iov->iov_base + skip;
226 copy = min(bytes, iov->iov_len - skip);
227
228 if (!fault_in_pages_readable(buf, copy)) {
229 kaddr = kmap_atomic(page);
230 to = kaddr + offset;
231
232 /* first chunk, usually the only one */
233 left = __copy_from_user_inatomic(to, buf, copy);
234 copy -= left;
235 skip += copy;
236 to += copy;
237 bytes -= copy;
238
239 while (unlikely(!left && bytes)) {
240 iov++;
241 buf = iov->iov_base;
242 copy = min(bytes, iov->iov_len);
243 left = __copy_from_user_inatomic(to, buf, copy);
244 copy -= left;
245 skip = copy;
246 to += copy;
247 bytes -= copy;
248 }
249 if (likely(!bytes)) {
250 kunmap_atomic(kaddr);
251 goto done;
252 }
253 offset = to - kaddr;
254 buf += copy;
255 kunmap_atomic(kaddr);
256 copy = min(bytes, iov->iov_len - skip);
257 }
258 /* Too bad - revert to non-atomic kmap */
259 kaddr = kmap(page);
260 to = kaddr + offset;
261 left = __copy_from_user(to, buf, copy);
262 copy -= left;
263 skip += copy;
264 to += copy;
265 bytes -= copy;
266 while (unlikely(!left && bytes)) {
267 iov++;
268 buf = iov->iov_base;
269 copy = min(bytes, iov->iov_len);
270 left = __copy_from_user(to, buf, copy);
271 copy -= left;
272 skip = copy;
273 to += copy;
274 bytes -= copy;
275 }
276 kunmap(page);
277done:
81055e58
AV
278 if (skip == iov->iov_len) {
279 iov++;
280 skip = 0;
281 }
f0d1bec9
AV
282 i->count -= wanted - bytes;
283 i->nr_segs -= iov - i->iov;
284 i->iov = iov;
285 i->iov_offset = skip;
286 return wanted - bytes;
287}
f0d1bec9 288
4f18cd31
AV
289/*
290 * Fault in the first iovec of the given iov_iter, to a maximum length
291 * of bytes. Returns 0 on success, or non-zero if the memory could not be
292 * accessed (ie. because it is an invalid address).
293 *
294 * writev-intensive code may want this to prefault several iovecs -- that
295 * would be possible (callers must not rely on the fact that _only_ the
296 * first iovec will be faulted with the current implementation).
297 */
298int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
299{
a280455f 300 if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
62a8067a
AV
301 char __user *buf = i->iov->iov_base + i->iov_offset;
302 bytes = min(bytes, i->iov->iov_len - i->iov_offset);
303 return fault_in_pages_readable(buf, bytes);
304 }
305 return 0;
4f18cd31
AV
306}
307EXPORT_SYMBOL(iov_iter_fault_in_readable);
308
171a0203
AA
309/*
310 * Fault in one or more iovecs of the given iov_iter, to a maximum length of
311 * bytes. For each iovec, fault in each page that constitutes the iovec.
312 *
313 * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
314 * because it is an invalid address).
315 */
316int iov_iter_fault_in_multipages_readable(struct iov_iter *i, size_t bytes)
317{
318 size_t skip = i->iov_offset;
319 const struct iovec *iov;
320 int err;
321 struct iovec v;
322
323 if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
324 iterate_iovec(i, bytes, v, iov, skip, ({
325 err = fault_in_multipages_readable(v.iov_base,
326 v.iov_len);
327 if (unlikely(err))
328 return err;
329 0;}))
330 }
331 return 0;
332}
333EXPORT_SYMBOL(iov_iter_fault_in_multipages_readable);
334
71d8e532
AV
335void iov_iter_init(struct iov_iter *i, int direction,
336 const struct iovec *iov, unsigned long nr_segs,
337 size_t count)
338{
339 /* It will get better. Eventually... */
a280455f 340 if (segment_eq(get_fs(), KERNEL_DS)) {
62a8067a 341 direction |= ITER_KVEC;
a280455f
AV
342 i->type = direction;
343 i->kvec = (struct kvec *)iov;
344 } else {
345 i->type = direction;
346 i->iov = iov;
347 }
71d8e532
AV
348 i->nr_segs = nr_segs;
349 i->iov_offset = 0;
350 i->count = count;
351}
352EXPORT_SYMBOL(iov_iter_init);
7b2c99d1 353
62a8067a
AV
354static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
355{
356 char *from = kmap_atomic(page);
357 memcpy(to, from + offset, len);
358 kunmap_atomic(from);
359}
360
36f7a8a4 361static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len)
62a8067a
AV
362{
363 char *to = kmap_atomic(page);
364 memcpy(to + offset, from, len);
365 kunmap_atomic(to);
366}
367
c35e0248
MW
368static void memzero_page(struct page *page, size_t offset, size_t len)
369{
370 char *addr = kmap_atomic(page);
371 memset(addr + offset, 0, len);
372 kunmap_atomic(addr);
373}
374
36f7a8a4 375size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
62a8067a 376{
36f7a8a4 377 const char *from = addr;
3d4d3e48
AV
378 iterate_and_advance(i, bytes, v,
379 __copy_to_user(v.iov_base, (from += v.iov_len) - v.iov_len,
380 v.iov_len),
381 memcpy_to_page(v.bv_page, v.bv_offset,
a280455f
AV
382 (from += v.bv_len) - v.bv_len, v.bv_len),
383 memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
3d4d3e48 384 )
62a8067a 385
3d4d3e48 386 return bytes;
c35e0248 387}
d271524a 388EXPORT_SYMBOL(copy_to_iter);
c35e0248 389
d271524a 390size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
c35e0248 391{
0dbca9a4 392 char *to = addr;
0dbca9a4
AV
393 iterate_and_advance(i, bytes, v,
394 __copy_from_user((to += v.iov_len) - v.iov_len, v.iov_base,
395 v.iov_len),
396 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
a280455f
AV
397 v.bv_offset, v.bv_len),
398 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
0dbca9a4
AV
399 )
400
401 return bytes;
c35e0248 402}
d271524a 403EXPORT_SYMBOL(copy_from_iter);
c35e0248 404
aa583096
AV
405size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
406{
407 char *to = addr;
aa583096
AV
408 iterate_and_advance(i, bytes, v,
409 __copy_from_user_nocache((to += v.iov_len) - v.iov_len,
410 v.iov_base, v.iov_len),
411 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
412 v.bv_offset, v.bv_len),
413 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
414 )
415
416 return bytes;
417}
418EXPORT_SYMBOL(copy_from_iter_nocache);
419
62a8067a
AV
420size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
421 struct iov_iter *i)
422{
d271524a
AV
423 if (i->type & (ITER_BVEC|ITER_KVEC)) {
424 void *kaddr = kmap_atomic(page);
425 size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
426 kunmap_atomic(kaddr);
427 return wanted;
428 } else
62a8067a
AV
429 return copy_page_to_iter_iovec(page, offset, bytes, i);
430}
431EXPORT_SYMBOL(copy_page_to_iter);
432
433size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
434 struct iov_iter *i)
435{
a280455f 436 if (i->type & (ITER_BVEC|ITER_KVEC)) {
d271524a
AV
437 void *kaddr = kmap_atomic(page);
438 size_t wanted = copy_from_iter(kaddr + offset, bytes, i);
439 kunmap_atomic(kaddr);
440 return wanted;
441 } else
62a8067a
AV
442 return copy_page_from_iter_iovec(page, offset, bytes, i);
443}
444EXPORT_SYMBOL(copy_page_from_iter);
445
c35e0248
MW
446size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
447{
8442fa46
AV
448 iterate_and_advance(i, bytes, v,
449 __clear_user(v.iov_base, v.iov_len),
a280455f
AV
450 memzero_page(v.bv_page, v.bv_offset, v.bv_len),
451 memset(v.iov_base, 0, v.iov_len)
8442fa46
AV
452 )
453
454 return bytes;
c35e0248
MW
455}
456EXPORT_SYMBOL(iov_iter_zero);
457
62a8067a
AV
458size_t iov_iter_copy_from_user_atomic(struct page *page,
459 struct iov_iter *i, unsigned long offset, size_t bytes)
460{
04a31165
AV
461 char *kaddr = kmap_atomic(page), *p = kaddr + offset;
462 iterate_all_kinds(i, bytes, v,
463 __copy_from_user_inatomic((p += v.iov_len) - v.iov_len,
464 v.iov_base, v.iov_len),
465 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
a280455f
AV
466 v.bv_offset, v.bv_len),
467 memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
04a31165
AV
468 )
469 kunmap_atomic(kaddr);
470 return bytes;
62a8067a
AV
471}
472EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
473
474void iov_iter_advance(struct iov_iter *i, size_t size)
475{
a280455f 476 iterate_and_advance(i, size, v, 0, 0, 0)
62a8067a
AV
477}
478EXPORT_SYMBOL(iov_iter_advance);
479
480/*
481 * Return the count of just the current iov_iter segment.
482 */
483size_t iov_iter_single_seg_count(const struct iov_iter *i)
484{
485 if (i->nr_segs == 1)
486 return i->count;
487 else if (i->type & ITER_BVEC)
62a8067a 488 return min(i->count, i->bvec->bv_len - i->iov_offset);
ad0eab92
PM
489 else
490 return min(i->count, i->iov->iov_len - i->iov_offset);
62a8067a
AV
491}
492EXPORT_SYMBOL(iov_iter_single_seg_count);
493
abb78f87 494void iov_iter_kvec(struct iov_iter *i, int direction,
05afcb77 495 const struct kvec *kvec, unsigned long nr_segs,
abb78f87
AV
496 size_t count)
497{
498 BUG_ON(!(direction & ITER_KVEC));
499 i->type = direction;
05afcb77 500 i->kvec = kvec;
abb78f87
AV
501 i->nr_segs = nr_segs;
502 i->iov_offset = 0;
503 i->count = count;
504}
505EXPORT_SYMBOL(iov_iter_kvec);
506
05afcb77
AV
507void iov_iter_bvec(struct iov_iter *i, int direction,
508 const struct bio_vec *bvec, unsigned long nr_segs,
509 size_t count)
510{
511 BUG_ON(!(direction & ITER_BVEC));
512 i->type = direction;
513 i->bvec = bvec;
514 i->nr_segs = nr_segs;
515 i->iov_offset = 0;
516 i->count = count;
517}
518EXPORT_SYMBOL(iov_iter_bvec);
519
62a8067a
AV
520unsigned long iov_iter_alignment(const struct iov_iter *i)
521{
04a31165
AV
522 unsigned long res = 0;
523 size_t size = i->count;
524
525 if (!size)
526 return 0;
527
528 iterate_all_kinds(i, size, v,
529 (res |= (unsigned long)v.iov_base | v.iov_len, 0),
a280455f
AV
530 res |= v.bv_offset | v.bv_len,
531 res |= (unsigned long)v.iov_base | v.iov_len
04a31165
AV
532 )
533 return res;
62a8067a
AV
534}
535EXPORT_SYMBOL(iov_iter_alignment);
536
357f435d
AV
537unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
538{
539 unsigned long res = 0;
540 size_t size = i->count;
541 if (!size)
542 return 0;
543
544 iterate_all_kinds(i, size, v,
545 (res |= (!res ? 0 : (unsigned long)v.iov_base) |
546 (size != v.iov_len ? size : 0), 0),
547 (res |= (!res ? 0 : (unsigned long)v.bv_offset) |
548 (size != v.bv_len ? size : 0)),
549 (res |= (!res ? 0 : (unsigned long)v.iov_base) |
550 (size != v.iov_len ? size : 0))
551 );
552 return res;
553}
554EXPORT_SYMBOL(iov_iter_gap_alignment);
555
62a8067a 556ssize_t iov_iter_get_pages(struct iov_iter *i,
2c80929c 557 struct page **pages, size_t maxsize, unsigned maxpages,
62a8067a
AV
558 size_t *start)
559{
e5393fae
AV
560 if (maxsize > i->count)
561 maxsize = i->count;
562
563 if (!maxsize)
564 return 0;
565
566 iterate_all_kinds(i, maxsize, v, ({
567 unsigned long addr = (unsigned long)v.iov_base;
568 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
569 int n;
570 int res;
571
572 if (len > maxpages * PAGE_SIZE)
573 len = maxpages * PAGE_SIZE;
574 addr &= ~(PAGE_SIZE - 1);
575 n = DIV_ROUND_UP(len, PAGE_SIZE);
576 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
577 if (unlikely(res < 0))
578 return res;
579 return (res == n ? len : res * PAGE_SIZE) - *start;
580 0;}),({
581 /* can't be more than PAGE_SIZE */
582 *start = v.bv_offset;
583 get_page(*pages = v.bv_page);
584 return v.bv_len;
a280455f
AV
585 }),({
586 return -EFAULT;
e5393fae
AV
587 })
588 )
589 return 0;
62a8067a
AV
590}
591EXPORT_SYMBOL(iov_iter_get_pages);
592
1b17f1f2
AV
593static struct page **get_pages_array(size_t n)
594{
595 struct page **p = kmalloc(n * sizeof(struct page *), GFP_KERNEL);
596 if (!p)
597 p = vmalloc(n * sizeof(struct page *));
598 return p;
599}
600
62a8067a
AV
601ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
602 struct page ***pages, size_t maxsize,
603 size_t *start)
604{
1b17f1f2
AV
605 struct page **p;
606
607 if (maxsize > i->count)
608 maxsize = i->count;
609
610 if (!maxsize)
611 return 0;
612
613 iterate_all_kinds(i, maxsize, v, ({
614 unsigned long addr = (unsigned long)v.iov_base;
615 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
616 int n;
617 int res;
618
619 addr &= ~(PAGE_SIZE - 1);
620 n = DIV_ROUND_UP(len, PAGE_SIZE);
621 p = get_pages_array(n);
622 if (!p)
623 return -ENOMEM;
624 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
625 if (unlikely(res < 0)) {
626 kvfree(p);
627 return res;
628 }
629 *pages = p;
630 return (res == n ? len : res * PAGE_SIZE) - *start;
631 0;}),({
632 /* can't be more than PAGE_SIZE */
633 *start = v.bv_offset;
634 *pages = p = get_pages_array(1);
635 if (!p)
636 return -ENOMEM;
637 get_page(*p = v.bv_page);
638 return v.bv_len;
a280455f
AV
639 }),({
640 return -EFAULT;
1b17f1f2
AV
641 })
642 )
643 return 0;
62a8067a
AV
644}
645EXPORT_SYMBOL(iov_iter_get_pages_alloc);
646
a604ec7e
AV
647size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
648 struct iov_iter *i)
649{
650 char *to = addr;
651 __wsum sum, next;
652 size_t off = 0;
a604ec7e
AV
653 sum = *csum;
654 iterate_and_advance(i, bytes, v, ({
655 int err = 0;
656 next = csum_and_copy_from_user(v.iov_base,
657 (to += v.iov_len) - v.iov_len,
658 v.iov_len, 0, &err);
659 if (!err) {
660 sum = csum_block_add(sum, next, off);
661 off += v.iov_len;
662 }
663 err ? v.iov_len : 0;
664 }), ({
665 char *p = kmap_atomic(v.bv_page);
666 next = csum_partial_copy_nocheck(p + v.bv_offset,
667 (to += v.bv_len) - v.bv_len,
668 v.bv_len, 0);
669 kunmap_atomic(p);
670 sum = csum_block_add(sum, next, off);
671 off += v.bv_len;
672 }),({
673 next = csum_partial_copy_nocheck(v.iov_base,
674 (to += v.iov_len) - v.iov_len,
675 v.iov_len, 0);
676 sum = csum_block_add(sum, next, off);
677 off += v.iov_len;
678 })
679 )
680 *csum = sum;
681 return bytes;
682}
683EXPORT_SYMBOL(csum_and_copy_from_iter);
684
36f7a8a4 685size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
a604ec7e
AV
686 struct iov_iter *i)
687{
36f7a8a4 688 const char *from = addr;
a604ec7e
AV
689 __wsum sum, next;
690 size_t off = 0;
a604ec7e
AV
691 sum = *csum;
692 iterate_and_advance(i, bytes, v, ({
693 int err = 0;
694 next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
695 v.iov_base,
696 v.iov_len, 0, &err);
697 if (!err) {
698 sum = csum_block_add(sum, next, off);
699 off += v.iov_len;
700 }
701 err ? v.iov_len : 0;
702 }), ({
703 char *p = kmap_atomic(v.bv_page);
704 next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len,
705 p + v.bv_offset,
706 v.bv_len, 0);
707 kunmap_atomic(p);
708 sum = csum_block_add(sum, next, off);
709 off += v.bv_len;
710 }),({
711 next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len,
712 v.iov_base,
713 v.iov_len, 0);
714 sum = csum_block_add(sum, next, off);
715 off += v.iov_len;
716 })
717 )
718 *csum = sum;
719 return bytes;
720}
721EXPORT_SYMBOL(csum_and_copy_to_iter);
722
62a8067a
AV
723int iov_iter_npages(const struct iov_iter *i, int maxpages)
724{
e0f2dc40
AV
725 size_t size = i->count;
726 int npages = 0;
727
728 if (!size)
729 return 0;
730
731 iterate_all_kinds(i, size, v, ({
732 unsigned long p = (unsigned long)v.iov_base;
733 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
734 - p / PAGE_SIZE;
735 if (npages >= maxpages)
736 return maxpages;
737 0;}),({
738 npages++;
739 if (npages >= maxpages)
740 return maxpages;
a280455f
AV
741 }),({
742 unsigned long p = (unsigned long)v.iov_base;
743 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
744 - p / PAGE_SIZE;
745 if (npages >= maxpages)
746 return maxpages;
e0f2dc40
AV
747 })
748 )
749 return npages;
62a8067a 750}
f67da30c 751EXPORT_SYMBOL(iov_iter_npages);
4b8164b9
AV
752
753const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
754{
755 *new = *old;
756 if (new->type & ITER_BVEC)
757 return new->bvec = kmemdup(new->bvec,
758 new->nr_segs * sizeof(struct bio_vec),
759 flags);
760 else
761 /* iovec and kvec have identical layout */
762 return new->iov = kmemdup(new->iov,
763 new->nr_segs * sizeof(struct iovec),
764 flags);
765}
766EXPORT_SYMBOL(dup_iter);
bc917be8
AV
767
768int import_iovec(int type, const struct iovec __user * uvector,
769 unsigned nr_segs, unsigned fast_segs,
770 struct iovec **iov, struct iov_iter *i)
771{
772 ssize_t n;
773 struct iovec *p;
774 n = rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
775 *iov, &p);
776 if (n < 0) {
777 if (p != *iov)
778 kfree(p);
779 *iov = NULL;
780 return n;
781 }
782 iov_iter_init(i, type, p, nr_segs, n);
783 *iov = p == *iov ? NULL : p;
784 return 0;
785}
786EXPORT_SYMBOL(import_iovec);
787
788#ifdef CONFIG_COMPAT
789#include <linux/compat.h>
790
791int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
792 unsigned nr_segs, unsigned fast_segs,
793 struct iovec **iov, struct iov_iter *i)
794{
795 ssize_t n;
796 struct iovec *p;
797 n = compat_rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
798 *iov, &p);
799 if (n < 0) {
800 if (p != *iov)
801 kfree(p);
802 *iov = NULL;
803 return n;
804 }
805 iov_iter_init(i, type, p, nr_segs, n);
806 *iov = p == *iov ? NULL : p;
807 return 0;
808}
809#endif
810
811int import_single_range(int rw, void __user *buf, size_t len,
812 struct iovec *iov, struct iov_iter *i)
813{
814 if (len > MAX_RW_COUNT)
815 len = MAX_RW_COUNT;
816 if (unlikely(!access_ok(!rw, buf, len)))
817 return -EFAULT;
818
819 iov->iov_base = buf;
820 iov->iov_len = len;
821 iov_iter_init(i, rw, iov, 1, len);
822 return 0;
823}
e1267585 824EXPORT_SYMBOL(import_single_range);