Linux 4.6-rc6
[cris-mirror.git] / lib / iov_iter.c
blob5fecddc32b1b47e1c2d7ce9706b9a840995f3d4e
1 #include <linux/export.h>
2 #include <linux/uio.h>
3 #include <linux/pagemap.h>
4 #include <linux/slab.h>
5 #include <linux/vmalloc.h>
6 #include <net/checksum.h>
8 #define iterate_iovec(i, n, __v, __p, skip, STEP) { \
9 size_t left; \
10 size_t wanted = n; \
11 __p = i->iov; \
12 __v.iov_len = min(n, __p->iov_len - skip); \
13 if (likely(__v.iov_len)) { \
14 __v.iov_base = __p->iov_base + skip; \
15 left = (STEP); \
16 __v.iov_len -= left; \
17 skip += __v.iov_len; \
18 n -= __v.iov_len; \
19 } else { \
20 left = 0; \
21 } \
22 while (unlikely(!left && n)) { \
23 __p++; \
24 __v.iov_len = min(n, __p->iov_len); \
25 if (unlikely(!__v.iov_len)) \
26 continue; \
27 __v.iov_base = __p->iov_base; \
28 left = (STEP); \
29 __v.iov_len -= left; \
30 skip = __v.iov_len; \
31 n -= __v.iov_len; \
32 } \
33 n = wanted - n; \
36 #define iterate_kvec(i, n, __v, __p, skip, STEP) { \
37 size_t wanted = n; \
38 __p = i->kvec; \
39 __v.iov_len = min(n, __p->iov_len - skip); \
40 if (likely(__v.iov_len)) { \
41 __v.iov_base = __p->iov_base + skip; \
42 (void)(STEP); \
43 skip += __v.iov_len; \
44 n -= __v.iov_len; \
45 } \
46 while (unlikely(n)) { \
47 __p++; \
48 __v.iov_len = min(n, __p->iov_len); \
49 if (unlikely(!__v.iov_len)) \
50 continue; \
51 __v.iov_base = __p->iov_base; \
52 (void)(STEP); \
53 skip = __v.iov_len; \
54 n -= __v.iov_len; \
55 } \
56 n = wanted; \
59 #define iterate_bvec(i, n, __v, __p, skip, STEP) { \
60 size_t wanted = n; \
61 __p = i->bvec; \
62 __v.bv_len = min_t(size_t, n, __p->bv_len - skip); \
63 if (likely(__v.bv_len)) { \
64 __v.bv_page = __p->bv_page; \
65 __v.bv_offset = __p->bv_offset + skip; \
66 (void)(STEP); \
67 skip += __v.bv_len; \
68 n -= __v.bv_len; \
69 } \
70 while (unlikely(n)) { \
71 __p++; \
72 __v.bv_len = min_t(size_t, n, __p->bv_len); \
73 if (unlikely(!__v.bv_len)) \
74 continue; \
75 __v.bv_page = __p->bv_page; \
76 __v.bv_offset = __p->bv_offset; \
77 (void)(STEP); \
78 skip = __v.bv_len; \
79 n -= __v.bv_len; \
80 } \
81 n = wanted; \
84 #define iterate_all_kinds(i, n, v, I, B, K) { \
85 size_t skip = i->iov_offset; \
86 if (unlikely(i->type & ITER_BVEC)) { \
87 const struct bio_vec *bvec; \
88 struct bio_vec v; \
89 iterate_bvec(i, n, v, bvec, skip, (B)) \
90 } else if (unlikely(i->type & ITER_KVEC)) { \
91 const struct kvec *kvec; \
92 struct kvec v; \
93 iterate_kvec(i, n, v, kvec, skip, (K)) \
94 } else { \
95 const struct iovec *iov; \
96 struct iovec v; \
97 iterate_iovec(i, n, v, iov, skip, (I)) \
98 } \
101 #define iterate_and_advance(i, n, v, I, B, K) { \
102 size_t skip = i->iov_offset; \
103 if (unlikely(i->type & ITER_BVEC)) { \
104 const struct bio_vec *bvec; \
105 struct bio_vec v; \
106 iterate_bvec(i, n, v, bvec, skip, (B)) \
107 if (skip == bvec->bv_len) { \
108 bvec++; \
109 skip = 0; \
111 i->nr_segs -= bvec - i->bvec; \
112 i->bvec = bvec; \
113 } else if (unlikely(i->type & ITER_KVEC)) { \
114 const struct kvec *kvec; \
115 struct kvec v; \
116 iterate_kvec(i, n, v, kvec, skip, (K)) \
117 if (skip == kvec->iov_len) { \
118 kvec++; \
119 skip = 0; \
121 i->nr_segs -= kvec - i->kvec; \
122 i->kvec = kvec; \
123 } else { \
124 const struct iovec *iov; \
125 struct iovec v; \
126 iterate_iovec(i, n, v, iov, skip, (I)) \
127 if (skip == iov->iov_len) { \
128 iov++; \
129 skip = 0; \
131 i->nr_segs -= iov - i->iov; \
132 i->iov = iov; \
134 i->count -= n; \
135 i->iov_offset = skip; \
138 static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
139 struct iov_iter *i)
141 size_t skip, copy, left, wanted;
142 const struct iovec *iov;
143 char __user *buf;
144 void *kaddr, *from;
146 if (unlikely(bytes > i->count))
147 bytes = i->count;
149 if (unlikely(!bytes))
150 return 0;
152 wanted = bytes;
153 iov = i->iov;
154 skip = i->iov_offset;
155 buf = iov->iov_base + skip;
156 copy = min(bytes, iov->iov_len - skip);
158 if (!fault_in_pages_writeable(buf, copy)) {
159 kaddr = kmap_atomic(page);
160 from = kaddr + offset;
162 /* first chunk, usually the only one */
163 left = __copy_to_user_inatomic(buf, from, copy);
164 copy -= left;
165 skip += copy;
166 from += copy;
167 bytes -= copy;
169 while (unlikely(!left && bytes)) {
170 iov++;
171 buf = iov->iov_base;
172 copy = min(bytes, iov->iov_len);
173 left = __copy_to_user_inatomic(buf, from, copy);
174 copy -= left;
175 skip = copy;
176 from += copy;
177 bytes -= copy;
179 if (likely(!bytes)) {
180 kunmap_atomic(kaddr);
181 goto done;
183 offset = from - kaddr;
184 buf += copy;
185 kunmap_atomic(kaddr);
186 copy = min(bytes, iov->iov_len - skip);
188 /* Too bad - revert to non-atomic kmap */
189 kaddr = kmap(page);
190 from = kaddr + offset;
191 left = __copy_to_user(buf, from, copy);
192 copy -= left;
193 skip += copy;
194 from += copy;
195 bytes -= copy;
196 while (unlikely(!left && bytes)) {
197 iov++;
198 buf = iov->iov_base;
199 copy = min(bytes, iov->iov_len);
200 left = __copy_to_user(buf, from, copy);
201 copy -= left;
202 skip = copy;
203 from += copy;
204 bytes -= copy;
206 kunmap(page);
207 done:
208 if (skip == iov->iov_len) {
209 iov++;
210 skip = 0;
212 i->count -= wanted - bytes;
213 i->nr_segs -= iov - i->iov;
214 i->iov = iov;
215 i->iov_offset = skip;
216 return wanted - bytes;
219 static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
220 struct iov_iter *i)
222 size_t skip, copy, left, wanted;
223 const struct iovec *iov;
224 char __user *buf;
225 void *kaddr, *to;
227 if (unlikely(bytes > i->count))
228 bytes = i->count;
230 if (unlikely(!bytes))
231 return 0;
233 wanted = bytes;
234 iov = i->iov;
235 skip = i->iov_offset;
236 buf = iov->iov_base + skip;
237 copy = min(bytes, iov->iov_len - skip);
239 if (!fault_in_pages_readable(buf, copy)) {
240 kaddr = kmap_atomic(page);
241 to = kaddr + offset;
243 /* first chunk, usually the only one */
244 left = __copy_from_user_inatomic(to, buf, copy);
245 copy -= left;
246 skip += copy;
247 to += copy;
248 bytes -= copy;
250 while (unlikely(!left && bytes)) {
251 iov++;
252 buf = iov->iov_base;
253 copy = min(bytes, iov->iov_len);
254 left = __copy_from_user_inatomic(to, buf, copy);
255 copy -= left;
256 skip = copy;
257 to += copy;
258 bytes -= copy;
260 if (likely(!bytes)) {
261 kunmap_atomic(kaddr);
262 goto done;
264 offset = to - kaddr;
265 buf += copy;
266 kunmap_atomic(kaddr);
267 copy = min(bytes, iov->iov_len - skip);
269 /* Too bad - revert to non-atomic kmap */
270 kaddr = kmap(page);
271 to = kaddr + offset;
272 left = __copy_from_user(to, buf, copy);
273 copy -= left;
274 skip += copy;
275 to += copy;
276 bytes -= copy;
277 while (unlikely(!left && bytes)) {
278 iov++;
279 buf = iov->iov_base;
280 copy = min(bytes, iov->iov_len);
281 left = __copy_from_user(to, buf, copy);
282 copy -= left;
283 skip = copy;
284 to += copy;
285 bytes -= copy;
287 kunmap(page);
288 done:
289 if (skip == iov->iov_len) {
290 iov++;
291 skip = 0;
293 i->count -= wanted - bytes;
294 i->nr_segs -= iov - i->iov;
295 i->iov = iov;
296 i->iov_offset = skip;
297 return wanted - bytes;
301 * Fault in the first iovec of the given iov_iter, to a maximum length
302 * of bytes. Returns 0 on success, or non-zero if the memory could not be
303 * accessed (ie. because it is an invalid address).
305 * writev-intensive code may want this to prefault several iovecs -- that
306 * would be possible (callers must not rely on the fact that _only_ the
307 * first iovec will be faulted with the current implementation).
309 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
311 if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
312 char __user *buf = i->iov->iov_base + i->iov_offset;
313 bytes = min(bytes, i->iov->iov_len - i->iov_offset);
314 return fault_in_pages_readable(buf, bytes);
316 return 0;
318 EXPORT_SYMBOL(iov_iter_fault_in_readable);
321 * Fault in one or more iovecs of the given iov_iter, to a maximum length of
322 * bytes. For each iovec, fault in each page that constitutes the iovec.
324 * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
325 * because it is an invalid address).
327 int iov_iter_fault_in_multipages_readable(struct iov_iter *i, size_t bytes)
329 size_t skip = i->iov_offset;
330 const struct iovec *iov;
331 int err;
332 struct iovec v;
334 if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
335 iterate_iovec(i, bytes, v, iov, skip, ({
336 err = fault_in_multipages_readable(v.iov_base,
337 v.iov_len);
338 if (unlikely(err))
339 return err;
340 0;}))
342 return 0;
344 EXPORT_SYMBOL(iov_iter_fault_in_multipages_readable);
346 void iov_iter_init(struct iov_iter *i, int direction,
347 const struct iovec *iov, unsigned long nr_segs,
348 size_t count)
350 /* It will get better. Eventually... */
351 if (segment_eq(get_fs(), KERNEL_DS)) {
352 direction |= ITER_KVEC;
353 i->type = direction;
354 i->kvec = (struct kvec *)iov;
355 } else {
356 i->type = direction;
357 i->iov = iov;
359 i->nr_segs = nr_segs;
360 i->iov_offset = 0;
361 i->count = count;
363 EXPORT_SYMBOL(iov_iter_init);
365 static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
367 char *from = kmap_atomic(page);
368 memcpy(to, from + offset, len);
369 kunmap_atomic(from);
372 static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len)
374 char *to = kmap_atomic(page);
375 memcpy(to + offset, from, len);
376 kunmap_atomic(to);
379 static void memzero_page(struct page *page, size_t offset, size_t len)
381 char *addr = kmap_atomic(page);
382 memset(addr + offset, 0, len);
383 kunmap_atomic(addr);
386 size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
388 const char *from = addr;
389 if (unlikely(bytes > i->count))
390 bytes = i->count;
392 if (unlikely(!bytes))
393 return 0;
395 iterate_and_advance(i, bytes, v,
396 __copy_to_user(v.iov_base, (from += v.iov_len) - v.iov_len,
397 v.iov_len),
398 memcpy_to_page(v.bv_page, v.bv_offset,
399 (from += v.bv_len) - v.bv_len, v.bv_len),
400 memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
403 return bytes;
405 EXPORT_SYMBOL(copy_to_iter);
407 size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
409 char *to = addr;
410 if (unlikely(bytes > i->count))
411 bytes = i->count;
413 if (unlikely(!bytes))
414 return 0;
416 iterate_and_advance(i, bytes, v,
417 __copy_from_user((to += v.iov_len) - v.iov_len, v.iov_base,
418 v.iov_len),
419 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
420 v.bv_offset, v.bv_len),
421 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
424 return bytes;
426 EXPORT_SYMBOL(copy_from_iter);
428 size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
430 char *to = addr;
431 if (unlikely(bytes > i->count))
432 bytes = i->count;
434 if (unlikely(!bytes))
435 return 0;
437 iterate_and_advance(i, bytes, v,
438 __copy_from_user_nocache((to += v.iov_len) - v.iov_len,
439 v.iov_base, v.iov_len),
440 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
441 v.bv_offset, v.bv_len),
442 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
445 return bytes;
447 EXPORT_SYMBOL(copy_from_iter_nocache);
449 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
450 struct iov_iter *i)
452 if (i->type & (ITER_BVEC|ITER_KVEC)) {
453 void *kaddr = kmap_atomic(page);
454 size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
455 kunmap_atomic(kaddr);
456 return wanted;
457 } else
458 return copy_page_to_iter_iovec(page, offset, bytes, i);
460 EXPORT_SYMBOL(copy_page_to_iter);
462 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
463 struct iov_iter *i)
465 if (i->type & (ITER_BVEC|ITER_KVEC)) {
466 void *kaddr = kmap_atomic(page);
467 size_t wanted = copy_from_iter(kaddr + offset, bytes, i);
468 kunmap_atomic(kaddr);
469 return wanted;
470 } else
471 return copy_page_from_iter_iovec(page, offset, bytes, i);
473 EXPORT_SYMBOL(copy_page_from_iter);
475 size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
477 if (unlikely(bytes > i->count))
478 bytes = i->count;
480 if (unlikely(!bytes))
481 return 0;
483 iterate_and_advance(i, bytes, v,
484 __clear_user(v.iov_base, v.iov_len),
485 memzero_page(v.bv_page, v.bv_offset, v.bv_len),
486 memset(v.iov_base, 0, v.iov_len)
489 return bytes;
491 EXPORT_SYMBOL(iov_iter_zero);
493 size_t iov_iter_copy_from_user_atomic(struct page *page,
494 struct iov_iter *i, unsigned long offset, size_t bytes)
496 char *kaddr = kmap_atomic(page), *p = kaddr + offset;
497 iterate_all_kinds(i, bytes, v,
498 __copy_from_user_inatomic((p += v.iov_len) - v.iov_len,
499 v.iov_base, v.iov_len),
500 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
501 v.bv_offset, v.bv_len),
502 memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
504 kunmap_atomic(kaddr);
505 return bytes;
507 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
509 void iov_iter_advance(struct iov_iter *i, size_t size)
511 iterate_and_advance(i, size, v, 0, 0, 0)
513 EXPORT_SYMBOL(iov_iter_advance);
516 * Return the count of just the current iov_iter segment.
518 size_t iov_iter_single_seg_count(const struct iov_iter *i)
520 if (i->nr_segs == 1)
521 return i->count;
522 else if (i->type & ITER_BVEC)
523 return min(i->count, i->bvec->bv_len - i->iov_offset);
524 else
525 return min(i->count, i->iov->iov_len - i->iov_offset);
527 EXPORT_SYMBOL(iov_iter_single_seg_count);
529 void iov_iter_kvec(struct iov_iter *i, int direction,
530 const struct kvec *kvec, unsigned long nr_segs,
531 size_t count)
533 BUG_ON(!(direction & ITER_KVEC));
534 i->type = direction;
535 i->kvec = kvec;
536 i->nr_segs = nr_segs;
537 i->iov_offset = 0;
538 i->count = count;
540 EXPORT_SYMBOL(iov_iter_kvec);
542 void iov_iter_bvec(struct iov_iter *i, int direction,
543 const struct bio_vec *bvec, unsigned long nr_segs,
544 size_t count)
546 BUG_ON(!(direction & ITER_BVEC));
547 i->type = direction;
548 i->bvec = bvec;
549 i->nr_segs = nr_segs;
550 i->iov_offset = 0;
551 i->count = count;
553 EXPORT_SYMBOL(iov_iter_bvec);
555 unsigned long iov_iter_alignment(const struct iov_iter *i)
557 unsigned long res = 0;
558 size_t size = i->count;
560 if (!size)
561 return 0;
563 iterate_all_kinds(i, size, v,
564 (res |= (unsigned long)v.iov_base | v.iov_len, 0),
565 res |= v.bv_offset | v.bv_len,
566 res |= (unsigned long)v.iov_base | v.iov_len
568 return res;
570 EXPORT_SYMBOL(iov_iter_alignment);
572 ssize_t iov_iter_get_pages(struct iov_iter *i,
573 struct page **pages, size_t maxsize, unsigned maxpages,
574 size_t *start)
576 if (maxsize > i->count)
577 maxsize = i->count;
579 if (!maxsize)
580 return 0;
582 iterate_all_kinds(i, maxsize, v, ({
583 unsigned long addr = (unsigned long)v.iov_base;
584 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
585 int n;
586 int res;
588 if (len > maxpages * PAGE_SIZE)
589 len = maxpages * PAGE_SIZE;
590 addr &= ~(PAGE_SIZE - 1);
591 n = DIV_ROUND_UP(len, PAGE_SIZE);
592 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
593 if (unlikely(res < 0))
594 return res;
595 return (res == n ? len : res * PAGE_SIZE) - *start;
596 0;}),({
597 /* can't be more than PAGE_SIZE */
598 *start = v.bv_offset;
599 get_page(*pages = v.bv_page);
600 return v.bv_len;
601 }),({
602 return -EFAULT;
605 return 0;
607 EXPORT_SYMBOL(iov_iter_get_pages);
609 static struct page **get_pages_array(size_t n)
611 struct page **p = kmalloc(n * sizeof(struct page *), GFP_KERNEL);
612 if (!p)
613 p = vmalloc(n * sizeof(struct page *));
614 return p;
617 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
618 struct page ***pages, size_t maxsize,
619 size_t *start)
621 struct page **p;
623 if (maxsize > i->count)
624 maxsize = i->count;
626 if (!maxsize)
627 return 0;
629 iterate_all_kinds(i, maxsize, v, ({
630 unsigned long addr = (unsigned long)v.iov_base;
631 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
632 int n;
633 int res;
635 addr &= ~(PAGE_SIZE - 1);
636 n = DIV_ROUND_UP(len, PAGE_SIZE);
637 p = get_pages_array(n);
638 if (!p)
639 return -ENOMEM;
640 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
641 if (unlikely(res < 0)) {
642 kvfree(p);
643 return res;
645 *pages = p;
646 return (res == n ? len : res * PAGE_SIZE) - *start;
647 0;}),({
648 /* can't be more than PAGE_SIZE */
649 *start = v.bv_offset;
650 *pages = p = get_pages_array(1);
651 if (!p)
652 return -ENOMEM;
653 get_page(*p = v.bv_page);
654 return v.bv_len;
655 }),({
656 return -EFAULT;
659 return 0;
661 EXPORT_SYMBOL(iov_iter_get_pages_alloc);
663 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
664 struct iov_iter *i)
666 char *to = addr;
667 __wsum sum, next;
668 size_t off = 0;
669 if (unlikely(bytes > i->count))
670 bytes = i->count;
672 if (unlikely(!bytes))
673 return 0;
675 sum = *csum;
676 iterate_and_advance(i, bytes, v, ({
677 int err = 0;
678 next = csum_and_copy_from_user(v.iov_base,
679 (to += v.iov_len) - v.iov_len,
680 v.iov_len, 0, &err);
681 if (!err) {
682 sum = csum_block_add(sum, next, off);
683 off += v.iov_len;
685 err ? v.iov_len : 0;
686 }), ({
687 char *p = kmap_atomic(v.bv_page);
688 next = csum_partial_copy_nocheck(p + v.bv_offset,
689 (to += v.bv_len) - v.bv_len,
690 v.bv_len, 0);
691 kunmap_atomic(p);
692 sum = csum_block_add(sum, next, off);
693 off += v.bv_len;
694 }),({
695 next = csum_partial_copy_nocheck(v.iov_base,
696 (to += v.iov_len) - v.iov_len,
697 v.iov_len, 0);
698 sum = csum_block_add(sum, next, off);
699 off += v.iov_len;
702 *csum = sum;
703 return bytes;
705 EXPORT_SYMBOL(csum_and_copy_from_iter);
707 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
708 struct iov_iter *i)
710 const char *from = addr;
711 __wsum sum, next;
712 size_t off = 0;
713 if (unlikely(bytes > i->count))
714 bytes = i->count;
716 if (unlikely(!bytes))
717 return 0;
719 sum = *csum;
720 iterate_and_advance(i, bytes, v, ({
721 int err = 0;
722 next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
723 v.iov_base,
724 v.iov_len, 0, &err);
725 if (!err) {
726 sum = csum_block_add(sum, next, off);
727 off += v.iov_len;
729 err ? v.iov_len : 0;
730 }), ({
731 char *p = kmap_atomic(v.bv_page);
732 next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len,
733 p + v.bv_offset,
734 v.bv_len, 0);
735 kunmap_atomic(p);
736 sum = csum_block_add(sum, next, off);
737 off += v.bv_len;
738 }),({
739 next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len,
740 v.iov_base,
741 v.iov_len, 0);
742 sum = csum_block_add(sum, next, off);
743 off += v.iov_len;
746 *csum = sum;
747 return bytes;
749 EXPORT_SYMBOL(csum_and_copy_to_iter);
751 int iov_iter_npages(const struct iov_iter *i, int maxpages)
753 size_t size = i->count;
754 int npages = 0;
756 if (!size)
757 return 0;
759 iterate_all_kinds(i, size, v, ({
760 unsigned long p = (unsigned long)v.iov_base;
761 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
762 - p / PAGE_SIZE;
763 if (npages >= maxpages)
764 return maxpages;
765 0;}),({
766 npages++;
767 if (npages >= maxpages)
768 return maxpages;
769 }),({
770 unsigned long p = (unsigned long)v.iov_base;
771 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
772 - p / PAGE_SIZE;
773 if (npages >= maxpages)
774 return maxpages;
777 return npages;
779 EXPORT_SYMBOL(iov_iter_npages);
781 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
783 *new = *old;
784 if (new->type & ITER_BVEC)
785 return new->bvec = kmemdup(new->bvec,
786 new->nr_segs * sizeof(struct bio_vec),
787 flags);
788 else
789 /* iovec and kvec have identical layout */
790 return new->iov = kmemdup(new->iov,
791 new->nr_segs * sizeof(struct iovec),
792 flags);
794 EXPORT_SYMBOL(dup_iter);
796 int import_iovec(int type, const struct iovec __user * uvector,
797 unsigned nr_segs, unsigned fast_segs,
798 struct iovec **iov, struct iov_iter *i)
800 ssize_t n;
801 struct iovec *p;
802 n = rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
803 *iov, &p);
804 if (n < 0) {
805 if (p != *iov)
806 kfree(p);
807 *iov = NULL;
808 return n;
810 iov_iter_init(i, type, p, nr_segs, n);
811 *iov = p == *iov ? NULL : p;
812 return 0;
814 EXPORT_SYMBOL(import_iovec);
816 #ifdef CONFIG_COMPAT
817 #include <linux/compat.h>
819 int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
820 unsigned nr_segs, unsigned fast_segs,
821 struct iovec **iov, struct iov_iter *i)
823 ssize_t n;
824 struct iovec *p;
825 n = compat_rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
826 *iov, &p);
827 if (n < 0) {
828 if (p != *iov)
829 kfree(p);
830 *iov = NULL;
831 return n;
833 iov_iter_init(i, type, p, nr_segs, n);
834 *iov = p == *iov ? NULL : p;
835 return 0;
837 #endif
839 int import_single_range(int rw, void __user *buf, size_t len,
840 struct iovec *iov, struct iov_iter *i)
842 if (len > MAX_RW_COUNT)
843 len = MAX_RW_COUNT;
844 if (unlikely(!access_ok(!rw, buf, len)))
845 return -EFAULT;
847 iov->iov_base = buf;
848 iov->iov_len = len;
849 iov_iter_init(i, rw, iov, 1, len);
850 return 0;
852 EXPORT_SYMBOL(import_single_range);