1 #include <linux/export.h>
3 #include <linux/pagemap.h>
4 #include <linux/slab.h>
5 #include <linux/vmalloc.h>
6 #include <net/checksum.h>
8 #define iterate_iovec(i, n, __v, __p, skip, STEP) { \
12 __v.iov_len = min(n, __p->iov_len - skip); \
13 if (likely(__v.iov_len)) { \
14 __v.iov_base = __p->iov_base + skip; \
16 __v.iov_len -= left; \
17 skip += __v.iov_len; \
22 while (unlikely(!left && n)) { \
24 __v.iov_len = min(n, __p->iov_len); \
25 if (unlikely(!__v.iov_len)) \
27 __v.iov_base = __p->iov_base; \
29 __v.iov_len -= left; \
36 #define iterate_kvec(i, n, __v, __p, skip, STEP) { \
39 __v.iov_len = min(n, __p->iov_len - skip); \
40 if (likely(__v.iov_len)) { \
41 __v.iov_base = __p->iov_base + skip; \
43 skip += __v.iov_len; \
46 while (unlikely(n)) { \
48 __v.iov_len = min(n, __p->iov_len); \
49 if (unlikely(!__v.iov_len)) \
51 __v.iov_base = __p->iov_base; \
59 #define iterate_bvec(i, n, __v, __p, skip, STEP) { \
62 __v.bv_len = min_t(size_t, n, __p->bv_len - skip); \
63 if (likely(__v.bv_len)) { \
64 __v.bv_page = __p->bv_page; \
65 __v.bv_offset = __p->bv_offset + skip; \
70 while (unlikely(n)) { \
72 __v.bv_len = min_t(size_t, n, __p->bv_len); \
73 if (unlikely(!__v.bv_len)) \
75 __v.bv_page = __p->bv_page; \
76 __v.bv_offset = __p->bv_offset; \
84 #define iterate_all_kinds(i, n, v, I, B, K) { \
85 size_t skip = i->iov_offset; \
86 if (unlikely(i->type & ITER_BVEC)) { \
87 const struct bio_vec *bvec; \
89 iterate_bvec(i, n, v, bvec, skip, (B)) \
90 } else if (unlikely(i->type & ITER_KVEC)) { \
91 const struct kvec *kvec; \
93 iterate_kvec(i, n, v, kvec, skip, (K)) \
95 const struct iovec *iov; \
97 iterate_iovec(i, n, v, iov, skip, (I)) \
101 #define iterate_and_advance(i, n, v, I, B, K) { \
102 size_t skip = i->iov_offset; \
103 if (unlikely(i->type & ITER_BVEC)) { \
104 const struct bio_vec *bvec; \
106 iterate_bvec(i, n, v, bvec, skip, (B)) \
107 if (skip == bvec->bv_len) { \
111 i->nr_segs -= bvec - i->bvec; \
113 } else if (unlikely(i->type & ITER_KVEC)) { \
114 const struct kvec *kvec; \
116 iterate_kvec(i, n, v, kvec, skip, (K)) \
117 if (skip == kvec->iov_len) { \
121 i->nr_segs -= kvec - i->kvec; \
124 const struct iovec *iov; \
126 iterate_iovec(i, n, v, iov, skip, (I)) \
127 if (skip == iov->iov_len) { \
131 i->nr_segs -= iov - i->iov; \
135 i->iov_offset = skip; \
138 static size_t copy_page_to_iter_iovec(struct page
*page
, size_t offset
, size_t bytes
,
141 size_t skip
, copy
, left
, wanted
;
142 const struct iovec
*iov
;
146 if (unlikely(bytes
> i
->count
))
149 if (unlikely(!bytes
))
154 skip
= i
->iov_offset
;
155 buf
= iov
->iov_base
+ skip
;
156 copy
= min(bytes
, iov
->iov_len
- skip
);
158 if (!fault_in_pages_writeable(buf
, copy
)) {
159 kaddr
= kmap_atomic(page
);
160 from
= kaddr
+ offset
;
162 /* first chunk, usually the only one */
163 left
= __copy_to_user_inatomic(buf
, from
, copy
);
169 while (unlikely(!left
&& bytes
)) {
172 copy
= min(bytes
, iov
->iov_len
);
173 left
= __copy_to_user_inatomic(buf
, from
, copy
);
179 if (likely(!bytes
)) {
180 kunmap_atomic(kaddr
);
183 offset
= from
- kaddr
;
185 kunmap_atomic(kaddr
);
186 copy
= min(bytes
, iov
->iov_len
- skip
);
188 /* Too bad - revert to non-atomic kmap */
190 from
= kaddr
+ offset
;
191 left
= __copy_to_user(buf
, from
, copy
);
196 while (unlikely(!left
&& bytes
)) {
199 copy
= min(bytes
, iov
->iov_len
);
200 left
= __copy_to_user(buf
, from
, copy
);
208 if (skip
== iov
->iov_len
) {
212 i
->count
-= wanted
- bytes
;
213 i
->nr_segs
-= iov
- i
->iov
;
215 i
->iov_offset
= skip
;
216 return wanted
- bytes
;
219 static size_t copy_page_from_iter_iovec(struct page
*page
, size_t offset
, size_t bytes
,
222 size_t skip
, copy
, left
, wanted
;
223 const struct iovec
*iov
;
227 if (unlikely(bytes
> i
->count
))
230 if (unlikely(!bytes
))
235 skip
= i
->iov_offset
;
236 buf
= iov
->iov_base
+ skip
;
237 copy
= min(bytes
, iov
->iov_len
- skip
);
239 if (!fault_in_pages_readable(buf
, copy
)) {
240 kaddr
= kmap_atomic(page
);
243 /* first chunk, usually the only one */
244 left
= __copy_from_user_inatomic(to
, buf
, copy
);
250 while (unlikely(!left
&& bytes
)) {
253 copy
= min(bytes
, iov
->iov_len
);
254 left
= __copy_from_user_inatomic(to
, buf
, copy
);
260 if (likely(!bytes
)) {
261 kunmap_atomic(kaddr
);
266 kunmap_atomic(kaddr
);
267 copy
= min(bytes
, iov
->iov_len
- skip
);
269 /* Too bad - revert to non-atomic kmap */
272 left
= __copy_from_user(to
, buf
, copy
);
277 while (unlikely(!left
&& bytes
)) {
280 copy
= min(bytes
, iov
->iov_len
);
281 left
= __copy_from_user(to
, buf
, copy
);
289 if (skip
== iov
->iov_len
) {
293 i
->count
-= wanted
- bytes
;
294 i
->nr_segs
-= iov
- i
->iov
;
296 i
->iov_offset
= skip
;
297 return wanted
- bytes
;
301 * Fault in one or more iovecs of the given iov_iter, to a maximum length of
302 * bytes. For each iovec, fault in each page that constitutes the iovec.
304 * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
305 * because it is an invalid address).
307 int iov_iter_fault_in_readable(struct iov_iter
*i
, size_t bytes
)
309 size_t skip
= i
->iov_offset
;
310 const struct iovec
*iov
;
314 if (!(i
->type
& (ITER_BVEC
|ITER_KVEC
))) {
315 iterate_iovec(i
, bytes
, v
, iov
, skip
, ({
316 err
= fault_in_multipages_readable(v
.iov_base
,
324 EXPORT_SYMBOL(iov_iter_fault_in_readable
);
326 void iov_iter_init(struct iov_iter
*i
, int direction
,
327 const struct iovec
*iov
, unsigned long nr_segs
,
330 /* It will get better. Eventually... */
331 if (segment_eq(get_fs(), KERNEL_DS
)) {
332 direction
|= ITER_KVEC
;
334 i
->kvec
= (struct kvec
*)iov
;
339 i
->nr_segs
= nr_segs
;
343 EXPORT_SYMBOL(iov_iter_init
);
345 static void memcpy_from_page(char *to
, struct page
*page
, size_t offset
, size_t len
)
347 char *from
= kmap_atomic(page
);
348 memcpy(to
, from
+ offset
, len
);
352 static void memcpy_to_page(struct page
*page
, size_t offset
, char *from
, size_t len
)
354 char *to
= kmap_atomic(page
);
355 memcpy(to
+ offset
, from
, len
);
359 static void memzero_page(struct page
*page
, size_t offset
, size_t len
)
361 char *addr
= kmap_atomic(page
);
362 memset(addr
+ offset
, 0, len
);
366 size_t copy_to_iter(void *addr
, size_t bytes
, struct iov_iter
*i
)
369 if (unlikely(bytes
> i
->count
))
372 if (unlikely(!bytes
))
375 iterate_and_advance(i
, bytes
, v
,
376 __copy_to_user(v
.iov_base
, (from
+= v
.iov_len
) - v
.iov_len
,
378 memcpy_to_page(v
.bv_page
, v
.bv_offset
,
379 (from
+= v
.bv_len
) - v
.bv_len
, v
.bv_len
),
380 memcpy(v
.iov_base
, (from
+= v
.iov_len
) - v
.iov_len
, v
.iov_len
)
385 EXPORT_SYMBOL(copy_to_iter
);
387 size_t copy_from_iter(void *addr
, size_t bytes
, struct iov_iter
*i
)
390 if (unlikely(bytes
> i
->count
))
393 if (unlikely(!bytes
))
396 iterate_and_advance(i
, bytes
, v
,
397 __copy_from_user((to
+= v
.iov_len
) - v
.iov_len
, v
.iov_base
,
399 memcpy_from_page((to
+= v
.bv_len
) - v
.bv_len
, v
.bv_page
,
400 v
.bv_offset
, v
.bv_len
),
401 memcpy((to
+= v
.iov_len
) - v
.iov_len
, v
.iov_base
, v
.iov_len
)
406 EXPORT_SYMBOL(copy_from_iter
);
408 size_t copy_from_iter_nocache(void *addr
, size_t bytes
, struct iov_iter
*i
)
411 if (unlikely(bytes
> i
->count
))
414 if (unlikely(!bytes
))
417 iterate_and_advance(i
, bytes
, v
,
418 __copy_from_user_nocache((to
+= v
.iov_len
) - v
.iov_len
,
419 v
.iov_base
, v
.iov_len
),
420 memcpy_from_page((to
+= v
.bv_len
) - v
.bv_len
, v
.bv_page
,
421 v
.bv_offset
, v
.bv_len
),
422 memcpy((to
+= v
.iov_len
) - v
.iov_len
, v
.iov_base
, v
.iov_len
)
427 EXPORT_SYMBOL(copy_from_iter_nocache
);
429 size_t copy_page_to_iter(struct page
*page
, size_t offset
, size_t bytes
,
432 if (i
->type
& (ITER_BVEC
|ITER_KVEC
)) {
433 void *kaddr
= kmap_atomic(page
);
434 size_t wanted
= copy_to_iter(kaddr
+ offset
, bytes
, i
);
435 kunmap_atomic(kaddr
);
438 return copy_page_to_iter_iovec(page
, offset
, bytes
, i
);
440 EXPORT_SYMBOL(copy_page_to_iter
);
442 size_t copy_page_from_iter(struct page
*page
, size_t offset
, size_t bytes
,
445 if (i
->type
& (ITER_BVEC
|ITER_KVEC
)) {
446 void *kaddr
= kmap_atomic(page
);
447 size_t wanted
= copy_from_iter(kaddr
+ offset
, bytes
, i
);
448 kunmap_atomic(kaddr
);
451 return copy_page_from_iter_iovec(page
, offset
, bytes
, i
);
453 EXPORT_SYMBOL(copy_page_from_iter
);
455 size_t iov_iter_zero(size_t bytes
, struct iov_iter
*i
)
457 if (unlikely(bytes
> i
->count
))
460 if (unlikely(!bytes
))
463 iterate_and_advance(i
, bytes
, v
,
464 __clear_user(v
.iov_base
, v
.iov_len
),
465 memzero_page(v
.bv_page
, v
.bv_offset
, v
.bv_len
),
466 memset(v
.iov_base
, 0, v
.iov_len
)
471 EXPORT_SYMBOL(iov_iter_zero
);
473 size_t iov_iter_copy_from_user_atomic(struct page
*page
,
474 struct iov_iter
*i
, unsigned long offset
, size_t bytes
)
476 char *kaddr
= kmap_atomic(page
), *p
= kaddr
+ offset
;
477 iterate_all_kinds(i
, bytes
, v
,
478 __copy_from_user_inatomic((p
+= v
.iov_len
) - v
.iov_len
,
479 v
.iov_base
, v
.iov_len
),
480 memcpy_from_page((p
+= v
.bv_len
) - v
.bv_len
, v
.bv_page
,
481 v
.bv_offset
, v
.bv_len
),
482 memcpy((p
+= v
.iov_len
) - v
.iov_len
, v
.iov_base
, v
.iov_len
)
484 kunmap_atomic(kaddr
);
487 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic
);
489 void iov_iter_advance(struct iov_iter
*i
, size_t size
)
491 iterate_and_advance(i
, size
, v
, 0, 0, 0)
493 EXPORT_SYMBOL(iov_iter_advance
);
496 * Return the count of just the current iov_iter segment.
498 size_t iov_iter_single_seg_count(const struct iov_iter
*i
)
502 else if (i
->type
& ITER_BVEC
)
503 return min(i
->count
, i
->bvec
->bv_len
- i
->iov_offset
);
505 return min(i
->count
, i
->iov
->iov_len
- i
->iov_offset
);
507 EXPORT_SYMBOL(iov_iter_single_seg_count
);
509 void iov_iter_kvec(struct iov_iter
*i
, int direction
,
510 const struct kvec
*kvec
, unsigned long nr_segs
,
513 BUG_ON(!(direction
& ITER_KVEC
));
516 i
->nr_segs
= nr_segs
;
520 EXPORT_SYMBOL(iov_iter_kvec
);
522 void iov_iter_bvec(struct iov_iter
*i
, int direction
,
523 const struct bio_vec
*bvec
, unsigned long nr_segs
,
526 BUG_ON(!(direction
& ITER_BVEC
));
529 i
->nr_segs
= nr_segs
;
533 EXPORT_SYMBOL(iov_iter_bvec
);
535 unsigned long iov_iter_alignment(const struct iov_iter
*i
)
537 unsigned long res
= 0;
538 size_t size
= i
->count
;
543 iterate_all_kinds(i
, size
, v
,
544 (res
|= (unsigned long)v
.iov_base
| v
.iov_len
, 0),
545 res
|= v
.bv_offset
| v
.bv_len
,
546 res
|= (unsigned long)v
.iov_base
| v
.iov_len
550 EXPORT_SYMBOL(iov_iter_alignment
);
552 ssize_t
iov_iter_get_pages(struct iov_iter
*i
,
553 struct page
**pages
, size_t maxsize
, unsigned maxpages
,
556 if (maxsize
> i
->count
)
562 iterate_all_kinds(i
, maxsize
, v
, ({
563 unsigned long addr
= (unsigned long)v
.iov_base
;
564 size_t len
= v
.iov_len
+ (*start
= addr
& (PAGE_SIZE
- 1));
568 if (len
> maxpages
* PAGE_SIZE
)
569 len
= maxpages
* PAGE_SIZE
;
570 addr
&= ~(PAGE_SIZE
- 1);
571 n
= DIV_ROUND_UP(len
, PAGE_SIZE
);
572 res
= get_user_pages_fast(addr
, n
, (i
->type
& WRITE
) != WRITE
, pages
);
573 if (unlikely(res
< 0))
575 return (res
== n
? len
: res
* PAGE_SIZE
) - *start
;
577 /* can't be more than PAGE_SIZE */
578 *start
= v
.bv_offset
;
579 get_page(*pages
= v
.bv_page
);
587 EXPORT_SYMBOL(iov_iter_get_pages
);
589 static struct page
**get_pages_array(size_t n
)
591 struct page
**p
= kmalloc(n
* sizeof(struct page
*), GFP_KERNEL
);
593 p
= vmalloc(n
* sizeof(struct page
*));
597 ssize_t
iov_iter_get_pages_alloc(struct iov_iter
*i
,
598 struct page
***pages
, size_t maxsize
,
603 if (maxsize
> i
->count
)
609 iterate_all_kinds(i
, maxsize
, v
, ({
610 unsigned long addr
= (unsigned long)v
.iov_base
;
611 size_t len
= v
.iov_len
+ (*start
= addr
& (PAGE_SIZE
- 1));
615 addr
&= ~(PAGE_SIZE
- 1);
616 n
= DIV_ROUND_UP(len
, PAGE_SIZE
);
617 p
= get_pages_array(n
);
620 res
= get_user_pages_fast(addr
, n
, (i
->type
& WRITE
) != WRITE
, p
);
621 if (unlikely(res
< 0)) {
626 return (res
== n
? len
: res
* PAGE_SIZE
) - *start
;
628 /* can't be more than PAGE_SIZE */
629 *start
= v
.bv_offset
;
630 *pages
= p
= get_pages_array(1);
633 get_page(*p
= v
.bv_page
);
641 EXPORT_SYMBOL(iov_iter_get_pages_alloc
);
643 size_t csum_and_copy_from_iter(void *addr
, size_t bytes
, __wsum
*csum
,
649 if (unlikely(bytes
> i
->count
))
652 if (unlikely(!bytes
))
656 iterate_and_advance(i
, bytes
, v
, ({
658 next
= csum_and_copy_from_user(v
.iov_base
,
659 (to
+= v
.iov_len
) - v
.iov_len
,
662 sum
= csum_block_add(sum
, next
, off
);
667 char *p
= kmap_atomic(v
.bv_page
);
668 next
= csum_partial_copy_nocheck(p
+ v
.bv_offset
,
669 (to
+= v
.bv_len
) - v
.bv_len
,
672 sum
= csum_block_add(sum
, next
, off
);
675 next
= csum_partial_copy_nocheck(v
.iov_base
,
676 (to
+= v
.iov_len
) - v
.iov_len
,
678 sum
= csum_block_add(sum
, next
, off
);
685 EXPORT_SYMBOL(csum_and_copy_from_iter
);
687 size_t csum_and_copy_to_iter(void *addr
, size_t bytes
, __wsum
*csum
,
693 if (unlikely(bytes
> i
->count
))
696 if (unlikely(!bytes
))
700 iterate_and_advance(i
, bytes
, v
, ({
702 next
= csum_and_copy_to_user((from
+= v
.iov_len
) - v
.iov_len
,
706 sum
= csum_block_add(sum
, next
, off
);
711 char *p
= kmap_atomic(v
.bv_page
);
712 next
= csum_partial_copy_nocheck((from
+= v
.bv_len
) - v
.bv_len
,
716 sum
= csum_block_add(sum
, next
, off
);
719 next
= csum_partial_copy_nocheck((from
+= v
.iov_len
) - v
.iov_len
,
722 sum
= csum_block_add(sum
, next
, off
);
729 EXPORT_SYMBOL(csum_and_copy_to_iter
);
731 int iov_iter_npages(const struct iov_iter
*i
, int maxpages
)
733 size_t size
= i
->count
;
739 iterate_all_kinds(i
, size
, v
, ({
740 unsigned long p
= (unsigned long)v
.iov_base
;
741 npages
+= DIV_ROUND_UP(p
+ v
.iov_len
, PAGE_SIZE
)
743 if (npages
>= maxpages
)
747 if (npages
>= maxpages
)
750 unsigned long p
= (unsigned long)v
.iov_base
;
751 npages
+= DIV_ROUND_UP(p
+ v
.iov_len
, PAGE_SIZE
)
753 if (npages
>= maxpages
)
759 EXPORT_SYMBOL(iov_iter_npages
);
761 const void *dup_iter(struct iov_iter
*new, struct iov_iter
*old
, gfp_t flags
)
764 if (new->type
& ITER_BVEC
)
765 return new->bvec
= kmemdup(new->bvec
,
766 new->nr_segs
* sizeof(struct bio_vec
),
769 /* iovec and kvec have identical layout */
770 return new->iov
= kmemdup(new->iov
,
771 new->nr_segs
* sizeof(struct iovec
),
774 EXPORT_SYMBOL(dup_iter
);
776 int import_iovec(int type
, const struct iovec __user
* uvector
,
777 unsigned nr_segs
, unsigned fast_segs
,
778 struct iovec
**iov
, struct iov_iter
*i
)
782 n
= rw_copy_check_uvector(type
, uvector
, nr_segs
, fast_segs
,
790 iov_iter_init(i
, type
, p
, nr_segs
, n
);
791 *iov
= p
== *iov
? NULL
: p
;
794 EXPORT_SYMBOL(import_iovec
);
797 #include <linux/compat.h>
799 int compat_import_iovec(int type
, const struct compat_iovec __user
* uvector
,
800 unsigned nr_segs
, unsigned fast_segs
,
801 struct iovec
**iov
, struct iov_iter
*i
)
805 n
= compat_rw_copy_check_uvector(type
, uvector
, nr_segs
, fast_segs
,
813 iov_iter_init(i
, type
, p
, nr_segs
, n
);
814 *iov
= p
== *iov
? NULL
: p
;
819 int import_single_range(int rw
, void __user
*buf
, size_t len
,
820 struct iovec
*iov
, struct iov_iter
*i
)
822 if (len
> MAX_RW_COUNT
)
824 if (unlikely(!access_ok(!rw
, buf
, len
)))
829 iov_iter_init(i
, rw
, iov
, 1, len
);