]> git.ipfire.org Git - people/arne_f/kernel.git/blame - lib/iov_iter.c
Linux 4.8-rc8
[people/arne_f/kernel.git] / lib / iov_iter.c
CommitLineData
4f18cd31
AV
1#include <linux/export.h>
2#include <linux/uio.h>
3#include <linux/pagemap.h>
91f79c43
AV
4#include <linux/slab.h>
5#include <linux/vmalloc.h>
a604ec7e 6#include <net/checksum.h>
4f18cd31 7
04a31165
AV
8#define iterate_iovec(i, n, __v, __p, skip, STEP) { \
9 size_t left; \
10 size_t wanted = n; \
11 __p = i->iov; \
12 __v.iov_len = min(n, __p->iov_len - skip); \
13 if (likely(__v.iov_len)) { \
14 __v.iov_base = __p->iov_base + skip; \
15 left = (STEP); \
16 __v.iov_len -= left; \
17 skip += __v.iov_len; \
18 n -= __v.iov_len; \
19 } else { \
20 left = 0; \
21 } \
22 while (unlikely(!left && n)) { \
23 __p++; \
24 __v.iov_len = min(n, __p->iov_len); \
25 if (unlikely(!__v.iov_len)) \
26 continue; \
27 __v.iov_base = __p->iov_base; \
28 left = (STEP); \
29 __v.iov_len -= left; \
30 skip = __v.iov_len; \
31 n -= __v.iov_len; \
32 } \
33 n = wanted - n; \
34}
35
a280455f
AV
36#define iterate_kvec(i, n, __v, __p, skip, STEP) { \
37 size_t wanted = n; \
38 __p = i->kvec; \
39 __v.iov_len = min(n, __p->iov_len - skip); \
40 if (likely(__v.iov_len)) { \
41 __v.iov_base = __p->iov_base + skip; \
42 (void)(STEP); \
43 skip += __v.iov_len; \
44 n -= __v.iov_len; \
45 } \
46 while (unlikely(n)) { \
47 __p++; \
48 __v.iov_len = min(n, __p->iov_len); \
49 if (unlikely(!__v.iov_len)) \
50 continue; \
51 __v.iov_base = __p->iov_base; \
52 (void)(STEP); \
53 skip = __v.iov_len; \
54 n -= __v.iov_len; \
55 } \
56 n = wanted; \
57}
58
1bdc76ae
ML
59#define iterate_bvec(i, n, __v, __bi, skip, STEP) { \
60 struct bvec_iter __start; \
61 __start.bi_size = n; \
62 __start.bi_bvec_done = skip; \
63 __start.bi_idx = 0; \
64 for_each_bvec(__v, i->bvec, __bi, __start) { \
65 if (!__v.bv_len) \
04a31165 66 continue; \
04a31165 67 (void)(STEP); \
04a31165 68 } \
04a31165
AV
69}
70
a280455f 71#define iterate_all_kinds(i, n, v, I, B, K) { \
04a31165
AV
72 size_t skip = i->iov_offset; \
73 if (unlikely(i->type & ITER_BVEC)) { \
04a31165 74 struct bio_vec v; \
1bdc76ae
ML
75 struct bvec_iter __bi; \
76 iterate_bvec(i, n, v, __bi, skip, (B)) \
a280455f
AV
77 } else if (unlikely(i->type & ITER_KVEC)) { \
78 const struct kvec *kvec; \
79 struct kvec v; \
80 iterate_kvec(i, n, v, kvec, skip, (K)) \
04a31165
AV
81 } else { \
82 const struct iovec *iov; \
83 struct iovec v; \
84 iterate_iovec(i, n, v, iov, skip, (I)) \
85 } \
86}
87
a280455f 88#define iterate_and_advance(i, n, v, I, B, K) { \
dd254f5a
AV
89 if (unlikely(i->count < n)) \
90 n = i->count; \
19f18459 91 if (i->count) { \
dd254f5a
AV
92 size_t skip = i->iov_offset; \
93 if (unlikely(i->type & ITER_BVEC)) { \
1bdc76ae 94 const struct bio_vec *bvec = i->bvec; \
dd254f5a 95 struct bio_vec v; \
1bdc76ae
ML
96 struct bvec_iter __bi; \
97 iterate_bvec(i, n, v, __bi, skip, (B)) \
98 i->bvec = __bvec_iter_bvec(i->bvec, __bi); \
99 i->nr_segs -= i->bvec - bvec; \
100 skip = __bi.bi_bvec_done; \
dd254f5a
AV
101 } else if (unlikely(i->type & ITER_KVEC)) { \
102 const struct kvec *kvec; \
103 struct kvec v; \
104 iterate_kvec(i, n, v, kvec, skip, (K)) \
105 if (skip == kvec->iov_len) { \
106 kvec++; \
107 skip = 0; \
108 } \
109 i->nr_segs -= kvec - i->kvec; \
110 i->kvec = kvec; \
111 } else { \
112 const struct iovec *iov; \
113 struct iovec v; \
114 iterate_iovec(i, n, v, iov, skip, (I)) \
115 if (skip == iov->iov_len) { \
116 iov++; \
117 skip = 0; \
118 } \
119 i->nr_segs -= iov - i->iov; \
120 i->iov = iov; \
7ce2a91e 121 } \
dd254f5a
AV
122 i->count -= n; \
123 i->iov_offset = skip; \
7ce2a91e 124 } \
7ce2a91e
AV
125}
126
62a8067a 127static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
4f18cd31
AV
128 struct iov_iter *i)
129{
130 size_t skip, copy, left, wanted;
131 const struct iovec *iov;
132 char __user *buf;
133 void *kaddr, *from;
134
135 if (unlikely(bytes > i->count))
136 bytes = i->count;
137
138 if (unlikely(!bytes))
139 return 0;
140
141 wanted = bytes;
142 iov = i->iov;
143 skip = i->iov_offset;
144 buf = iov->iov_base + skip;
145 copy = min(bytes, iov->iov_len - skip);
146
3fa6c507 147 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) {
4f18cd31
AV
148 kaddr = kmap_atomic(page);
149 from = kaddr + offset;
150
151 /* first chunk, usually the only one */
152 left = __copy_to_user_inatomic(buf, from, copy);
153 copy -= left;
154 skip += copy;
155 from += copy;
156 bytes -= copy;
157
158 while (unlikely(!left && bytes)) {
159 iov++;
160 buf = iov->iov_base;
161 copy = min(bytes, iov->iov_len);
162 left = __copy_to_user_inatomic(buf, from, copy);
163 copy -= left;
164 skip = copy;
165 from += copy;
166 bytes -= copy;
167 }
168 if (likely(!bytes)) {
169 kunmap_atomic(kaddr);
170 goto done;
171 }
172 offset = from - kaddr;
173 buf += copy;
174 kunmap_atomic(kaddr);
175 copy = min(bytes, iov->iov_len - skip);
176 }
177 /* Too bad - revert to non-atomic kmap */
3fa6c507 178
4f18cd31
AV
179 kaddr = kmap(page);
180 from = kaddr + offset;
181 left = __copy_to_user(buf, from, copy);
182 copy -= left;
183 skip += copy;
184 from += copy;
185 bytes -= copy;
186 while (unlikely(!left && bytes)) {
187 iov++;
188 buf = iov->iov_base;
189 copy = min(bytes, iov->iov_len);
190 left = __copy_to_user(buf, from, copy);
191 copy -= left;
192 skip = copy;
193 from += copy;
194 bytes -= copy;
195 }
196 kunmap(page);
3fa6c507 197
4f18cd31 198done:
81055e58
AV
199 if (skip == iov->iov_len) {
200 iov++;
201 skip = 0;
202 }
4f18cd31
AV
203 i->count -= wanted - bytes;
204 i->nr_segs -= iov - i->iov;
205 i->iov = iov;
206 i->iov_offset = skip;
207 return wanted - bytes;
208}
4f18cd31 209
62a8067a 210static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
f0d1bec9
AV
211 struct iov_iter *i)
212{
213 size_t skip, copy, left, wanted;
214 const struct iovec *iov;
215 char __user *buf;
216 void *kaddr, *to;
217
218 if (unlikely(bytes > i->count))
219 bytes = i->count;
220
221 if (unlikely(!bytes))
222 return 0;
223
224 wanted = bytes;
225 iov = i->iov;
226 skip = i->iov_offset;
227 buf = iov->iov_base + skip;
228 copy = min(bytes, iov->iov_len - skip);
229
3fa6c507 230 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) {
f0d1bec9
AV
231 kaddr = kmap_atomic(page);
232 to = kaddr + offset;
233
234 /* first chunk, usually the only one */
235 left = __copy_from_user_inatomic(to, buf, copy);
236 copy -= left;
237 skip += copy;
238 to += copy;
239 bytes -= copy;
240
241 while (unlikely(!left && bytes)) {
242 iov++;
243 buf = iov->iov_base;
244 copy = min(bytes, iov->iov_len);
245 left = __copy_from_user_inatomic(to, buf, copy);
246 copy -= left;
247 skip = copy;
248 to += copy;
249 bytes -= copy;
250 }
251 if (likely(!bytes)) {
252 kunmap_atomic(kaddr);
253 goto done;
254 }
255 offset = to - kaddr;
256 buf += copy;
257 kunmap_atomic(kaddr);
258 copy = min(bytes, iov->iov_len - skip);
259 }
260 /* Too bad - revert to non-atomic kmap */
3fa6c507 261
f0d1bec9
AV
262 kaddr = kmap(page);
263 to = kaddr + offset;
264 left = __copy_from_user(to, buf, copy);
265 copy -= left;
266 skip += copy;
267 to += copy;
268 bytes -= copy;
269 while (unlikely(!left && bytes)) {
270 iov++;
271 buf = iov->iov_base;
272 copy = min(bytes, iov->iov_len);
273 left = __copy_from_user(to, buf, copy);
274 copy -= left;
275 skip = copy;
276 to += copy;
277 bytes -= copy;
278 }
279 kunmap(page);
3fa6c507 280
f0d1bec9 281done:
81055e58
AV
282 if (skip == iov->iov_len) {
283 iov++;
284 skip = 0;
285 }
f0d1bec9
AV
286 i->count -= wanted - bytes;
287 i->nr_segs -= iov - i->iov;
288 i->iov = iov;
289 i->iov_offset = skip;
290 return wanted - bytes;
291}
f0d1bec9 292
171a0203
AA
293/*
294 * Fault in one or more iovecs of the given iov_iter, to a maximum length of
295 * bytes. For each iovec, fault in each page that constitutes the iovec.
296 *
297 * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
298 * because it is an invalid address).
299 */
d4690f1e 300int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
171a0203
AA
301{
302 size_t skip = i->iov_offset;
303 const struct iovec *iov;
304 int err;
305 struct iovec v;
306
307 if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
308 iterate_iovec(i, bytes, v, iov, skip, ({
309 err = fault_in_multipages_readable(v.iov_base,
310 v.iov_len);
311 if (unlikely(err))
312 return err;
313 0;}))
314 }
315 return 0;
316}
d4690f1e 317EXPORT_SYMBOL(iov_iter_fault_in_readable);
171a0203 318
71d8e532
AV
319void iov_iter_init(struct iov_iter *i, int direction,
320 const struct iovec *iov, unsigned long nr_segs,
321 size_t count)
322{
323 /* It will get better. Eventually... */
a280455f 324 if (segment_eq(get_fs(), KERNEL_DS)) {
62a8067a 325 direction |= ITER_KVEC;
a280455f
AV
326 i->type = direction;
327 i->kvec = (struct kvec *)iov;
328 } else {
329 i->type = direction;
330 i->iov = iov;
331 }
71d8e532
AV
332 i->nr_segs = nr_segs;
333 i->iov_offset = 0;
334 i->count = count;
335}
336EXPORT_SYMBOL(iov_iter_init);
7b2c99d1 337
62a8067a
AV
338static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
339{
340 char *from = kmap_atomic(page);
341 memcpy(to, from + offset, len);
342 kunmap_atomic(from);
343}
344
36f7a8a4 345static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len)
62a8067a
AV
346{
347 char *to = kmap_atomic(page);
348 memcpy(to + offset, from, len);
349 kunmap_atomic(to);
350}
351
c35e0248
MW
352static void memzero_page(struct page *page, size_t offset, size_t len)
353{
354 char *addr = kmap_atomic(page);
355 memset(addr + offset, 0, len);
356 kunmap_atomic(addr);
357}
358
36f7a8a4 359size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
62a8067a 360{
36f7a8a4 361 const char *from = addr;
3d4d3e48
AV
362 iterate_and_advance(i, bytes, v,
363 __copy_to_user(v.iov_base, (from += v.iov_len) - v.iov_len,
364 v.iov_len),
365 memcpy_to_page(v.bv_page, v.bv_offset,
a280455f
AV
366 (from += v.bv_len) - v.bv_len, v.bv_len),
367 memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
3d4d3e48 368 )
62a8067a 369
3d4d3e48 370 return bytes;
c35e0248 371}
d271524a 372EXPORT_SYMBOL(copy_to_iter);
c35e0248 373
d271524a 374size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
c35e0248 375{
0dbca9a4 376 char *to = addr;
0dbca9a4
AV
377 iterate_and_advance(i, bytes, v,
378 __copy_from_user((to += v.iov_len) - v.iov_len, v.iov_base,
379 v.iov_len),
380 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
a280455f
AV
381 v.bv_offset, v.bv_len),
382 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
0dbca9a4
AV
383 )
384
385 return bytes;
c35e0248 386}
d271524a 387EXPORT_SYMBOL(copy_from_iter);
c35e0248 388
aa583096
AV
389size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
390{
391 char *to = addr;
aa583096
AV
392 iterate_and_advance(i, bytes, v,
393 __copy_from_user_nocache((to += v.iov_len) - v.iov_len,
394 v.iov_base, v.iov_len),
395 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
396 v.bv_offset, v.bv_len),
397 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
398 )
399
400 return bytes;
401}
402EXPORT_SYMBOL(copy_from_iter_nocache);
403
62a8067a
AV
404size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
405 struct iov_iter *i)
406{
d271524a
AV
407 if (i->type & (ITER_BVEC|ITER_KVEC)) {
408 void *kaddr = kmap_atomic(page);
409 size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
410 kunmap_atomic(kaddr);
411 return wanted;
412 } else
62a8067a
AV
413 return copy_page_to_iter_iovec(page, offset, bytes, i);
414}
415EXPORT_SYMBOL(copy_page_to_iter);
416
417size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
418 struct iov_iter *i)
419{
a280455f 420 if (i->type & (ITER_BVEC|ITER_KVEC)) {
d271524a
AV
421 void *kaddr = kmap_atomic(page);
422 size_t wanted = copy_from_iter(kaddr + offset, bytes, i);
423 kunmap_atomic(kaddr);
424 return wanted;
425 } else
62a8067a
AV
426 return copy_page_from_iter_iovec(page, offset, bytes, i);
427}
428EXPORT_SYMBOL(copy_page_from_iter);
429
c35e0248
MW
430size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
431{
8442fa46
AV
432 iterate_and_advance(i, bytes, v,
433 __clear_user(v.iov_base, v.iov_len),
a280455f
AV
434 memzero_page(v.bv_page, v.bv_offset, v.bv_len),
435 memset(v.iov_base, 0, v.iov_len)
8442fa46
AV
436 )
437
438 return bytes;
c35e0248
MW
439}
440EXPORT_SYMBOL(iov_iter_zero);
441
62a8067a
AV
442size_t iov_iter_copy_from_user_atomic(struct page *page,
443 struct iov_iter *i, unsigned long offset, size_t bytes)
444{
04a31165
AV
445 char *kaddr = kmap_atomic(page), *p = kaddr + offset;
446 iterate_all_kinds(i, bytes, v,
447 __copy_from_user_inatomic((p += v.iov_len) - v.iov_len,
448 v.iov_base, v.iov_len),
449 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
a280455f
AV
450 v.bv_offset, v.bv_len),
451 memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
04a31165
AV
452 )
453 kunmap_atomic(kaddr);
454 return bytes;
62a8067a
AV
455}
456EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
457
458void iov_iter_advance(struct iov_iter *i, size_t size)
459{
a280455f 460 iterate_and_advance(i, size, v, 0, 0, 0)
62a8067a
AV
461}
462EXPORT_SYMBOL(iov_iter_advance);
463
464/*
465 * Return the count of just the current iov_iter segment.
466 */
467size_t iov_iter_single_seg_count(const struct iov_iter *i)
468{
469 if (i->nr_segs == 1)
470 return i->count;
471 else if (i->type & ITER_BVEC)
62a8067a 472 return min(i->count, i->bvec->bv_len - i->iov_offset);
ad0eab92
PM
473 else
474 return min(i->count, i->iov->iov_len - i->iov_offset);
62a8067a
AV
475}
476EXPORT_SYMBOL(iov_iter_single_seg_count);
477
abb78f87 478void iov_iter_kvec(struct iov_iter *i, int direction,
05afcb77 479 const struct kvec *kvec, unsigned long nr_segs,
abb78f87
AV
480 size_t count)
481{
482 BUG_ON(!(direction & ITER_KVEC));
483 i->type = direction;
05afcb77 484 i->kvec = kvec;
abb78f87
AV
485 i->nr_segs = nr_segs;
486 i->iov_offset = 0;
487 i->count = count;
488}
489EXPORT_SYMBOL(iov_iter_kvec);
490
05afcb77
AV
491void iov_iter_bvec(struct iov_iter *i, int direction,
492 const struct bio_vec *bvec, unsigned long nr_segs,
493 size_t count)
494{
495 BUG_ON(!(direction & ITER_BVEC));
496 i->type = direction;
497 i->bvec = bvec;
498 i->nr_segs = nr_segs;
499 i->iov_offset = 0;
500 i->count = count;
501}
502EXPORT_SYMBOL(iov_iter_bvec);
503
62a8067a
AV
504unsigned long iov_iter_alignment(const struct iov_iter *i)
505{
04a31165
AV
506 unsigned long res = 0;
507 size_t size = i->count;
508
509 if (!size)
510 return 0;
511
512 iterate_all_kinds(i, size, v,
513 (res |= (unsigned long)v.iov_base | v.iov_len, 0),
a280455f
AV
514 res |= v.bv_offset | v.bv_len,
515 res |= (unsigned long)v.iov_base | v.iov_len
04a31165
AV
516 )
517 return res;
62a8067a
AV
518}
519EXPORT_SYMBOL(iov_iter_alignment);
520
357f435d
AV
521unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
522{
523 unsigned long res = 0;
524 size_t size = i->count;
525 if (!size)
526 return 0;
527
528 iterate_all_kinds(i, size, v,
529 (res |= (!res ? 0 : (unsigned long)v.iov_base) |
530 (size != v.iov_len ? size : 0), 0),
531 (res |= (!res ? 0 : (unsigned long)v.bv_offset) |
532 (size != v.bv_len ? size : 0)),
533 (res |= (!res ? 0 : (unsigned long)v.iov_base) |
534 (size != v.iov_len ? size : 0))
535 );
536 return res;
537}
538EXPORT_SYMBOL(iov_iter_gap_alignment);
539
62a8067a 540ssize_t iov_iter_get_pages(struct iov_iter *i,
2c80929c 541 struct page **pages, size_t maxsize, unsigned maxpages,
62a8067a
AV
542 size_t *start)
543{
e5393fae
AV
544 if (maxsize > i->count)
545 maxsize = i->count;
546
547 if (!maxsize)
548 return 0;
549
550 iterate_all_kinds(i, maxsize, v, ({
551 unsigned long addr = (unsigned long)v.iov_base;
552 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
553 int n;
554 int res;
555
556 if (len > maxpages * PAGE_SIZE)
557 len = maxpages * PAGE_SIZE;
558 addr &= ~(PAGE_SIZE - 1);
559 n = DIV_ROUND_UP(len, PAGE_SIZE);
560 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
561 if (unlikely(res < 0))
562 return res;
563 return (res == n ? len : res * PAGE_SIZE) - *start;
564 0;}),({
565 /* can't be more than PAGE_SIZE */
566 *start = v.bv_offset;
567 get_page(*pages = v.bv_page);
568 return v.bv_len;
a280455f
AV
569 }),({
570 return -EFAULT;
e5393fae
AV
571 })
572 )
573 return 0;
62a8067a
AV
574}
575EXPORT_SYMBOL(iov_iter_get_pages);
576
1b17f1f2
AV
577static struct page **get_pages_array(size_t n)
578{
579 struct page **p = kmalloc(n * sizeof(struct page *), GFP_KERNEL);
580 if (!p)
581 p = vmalloc(n * sizeof(struct page *));
582 return p;
583}
584
62a8067a
AV
585ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
586 struct page ***pages, size_t maxsize,
587 size_t *start)
588{
1b17f1f2
AV
589 struct page **p;
590
591 if (maxsize > i->count)
592 maxsize = i->count;
593
594 if (!maxsize)
595 return 0;
596
597 iterate_all_kinds(i, maxsize, v, ({
598 unsigned long addr = (unsigned long)v.iov_base;
599 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
600 int n;
601 int res;
602
603 addr &= ~(PAGE_SIZE - 1);
604 n = DIV_ROUND_UP(len, PAGE_SIZE);
605 p = get_pages_array(n);
606 if (!p)
607 return -ENOMEM;
608 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
609 if (unlikely(res < 0)) {
610 kvfree(p);
611 return res;
612 }
613 *pages = p;
614 return (res == n ? len : res * PAGE_SIZE) - *start;
615 0;}),({
616 /* can't be more than PAGE_SIZE */
617 *start = v.bv_offset;
618 *pages = p = get_pages_array(1);
619 if (!p)
620 return -ENOMEM;
621 get_page(*p = v.bv_page);
622 return v.bv_len;
a280455f
AV
623 }),({
624 return -EFAULT;
1b17f1f2
AV
625 })
626 )
627 return 0;
62a8067a
AV
628}
629EXPORT_SYMBOL(iov_iter_get_pages_alloc);
630
a604ec7e
AV
631size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
632 struct iov_iter *i)
633{
634 char *to = addr;
635 __wsum sum, next;
636 size_t off = 0;
a604ec7e
AV
637 sum = *csum;
638 iterate_and_advance(i, bytes, v, ({
639 int err = 0;
640 next = csum_and_copy_from_user(v.iov_base,
641 (to += v.iov_len) - v.iov_len,
642 v.iov_len, 0, &err);
643 if (!err) {
644 sum = csum_block_add(sum, next, off);
645 off += v.iov_len;
646 }
647 err ? v.iov_len : 0;
648 }), ({
649 char *p = kmap_atomic(v.bv_page);
650 next = csum_partial_copy_nocheck(p + v.bv_offset,
651 (to += v.bv_len) - v.bv_len,
652 v.bv_len, 0);
653 kunmap_atomic(p);
654 sum = csum_block_add(sum, next, off);
655 off += v.bv_len;
656 }),({
657 next = csum_partial_copy_nocheck(v.iov_base,
658 (to += v.iov_len) - v.iov_len,
659 v.iov_len, 0);
660 sum = csum_block_add(sum, next, off);
661 off += v.iov_len;
662 })
663 )
664 *csum = sum;
665 return bytes;
666}
667EXPORT_SYMBOL(csum_and_copy_from_iter);
668
36f7a8a4 669size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
a604ec7e
AV
670 struct iov_iter *i)
671{
36f7a8a4 672 const char *from = addr;
a604ec7e
AV
673 __wsum sum, next;
674 size_t off = 0;
a604ec7e
AV
675 sum = *csum;
676 iterate_and_advance(i, bytes, v, ({
677 int err = 0;
678 next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
679 v.iov_base,
680 v.iov_len, 0, &err);
681 if (!err) {
682 sum = csum_block_add(sum, next, off);
683 off += v.iov_len;
684 }
685 err ? v.iov_len : 0;
686 }), ({
687 char *p = kmap_atomic(v.bv_page);
688 next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len,
689 p + v.bv_offset,
690 v.bv_len, 0);
691 kunmap_atomic(p);
692 sum = csum_block_add(sum, next, off);
693 off += v.bv_len;
694 }),({
695 next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len,
696 v.iov_base,
697 v.iov_len, 0);
698 sum = csum_block_add(sum, next, off);
699 off += v.iov_len;
700 })
701 )
702 *csum = sum;
703 return bytes;
704}
705EXPORT_SYMBOL(csum_and_copy_to_iter);
706
62a8067a
AV
707int iov_iter_npages(const struct iov_iter *i, int maxpages)
708{
e0f2dc40
AV
709 size_t size = i->count;
710 int npages = 0;
711
712 if (!size)
713 return 0;
714
715 iterate_all_kinds(i, size, v, ({
716 unsigned long p = (unsigned long)v.iov_base;
717 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
718 - p / PAGE_SIZE;
719 if (npages >= maxpages)
720 return maxpages;
721 0;}),({
722 npages++;
723 if (npages >= maxpages)
724 return maxpages;
a280455f
AV
725 }),({
726 unsigned long p = (unsigned long)v.iov_base;
727 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
728 - p / PAGE_SIZE;
729 if (npages >= maxpages)
730 return maxpages;
e0f2dc40
AV
731 })
732 )
733 return npages;
62a8067a 734}
f67da30c 735EXPORT_SYMBOL(iov_iter_npages);
4b8164b9
AV
736
737const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
738{
739 *new = *old;
740 if (new->type & ITER_BVEC)
741 return new->bvec = kmemdup(new->bvec,
742 new->nr_segs * sizeof(struct bio_vec),
743 flags);
744 else
745 /* iovec and kvec have identical layout */
746 return new->iov = kmemdup(new->iov,
747 new->nr_segs * sizeof(struct iovec),
748 flags);
749}
750EXPORT_SYMBOL(dup_iter);
bc917be8
AV
751
752int import_iovec(int type, const struct iovec __user * uvector,
753 unsigned nr_segs, unsigned fast_segs,
754 struct iovec **iov, struct iov_iter *i)
755{
756 ssize_t n;
757 struct iovec *p;
758 n = rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
759 *iov, &p);
760 if (n < 0) {
761 if (p != *iov)
762 kfree(p);
763 *iov = NULL;
764 return n;
765 }
766 iov_iter_init(i, type, p, nr_segs, n);
767 *iov = p == *iov ? NULL : p;
768 return 0;
769}
770EXPORT_SYMBOL(import_iovec);
771
772#ifdef CONFIG_COMPAT
773#include <linux/compat.h>
774
775int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
776 unsigned nr_segs, unsigned fast_segs,
777 struct iovec **iov, struct iov_iter *i)
778{
779 ssize_t n;
780 struct iovec *p;
781 n = compat_rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
782 *iov, &p);
783 if (n < 0) {
784 if (p != *iov)
785 kfree(p);
786 *iov = NULL;
787 return n;
788 }
789 iov_iter_init(i, type, p, nr_segs, n);
790 *iov = p == *iov ? NULL : p;
791 return 0;
792}
793#endif
794
795int import_single_range(int rw, void __user *buf, size_t len,
796 struct iovec *iov, struct iov_iter *i)
797{
798 if (len > MAX_RW_COUNT)
799 len = MAX_RW_COUNT;
800 if (unlikely(!access_ok(!rw, buf, len)))
801 return -EFAULT;
802
803 iov->iov_base = buf;
804 iov->iov_len = len;
805 iov_iter_init(i, rw, iov, 1, len);
806 return 0;
807}
e1267585 808EXPORT_SYMBOL(import_single_range);