1 #include <linux/export.h>
3 #include <linux/pagemap.h>
4 #include <linux/slab.h>
5 #include <linux/vmalloc.h>
6 #include <net/checksum.h>
8 #define iterate_iovec(i, n, __v, __p, skip, STEP) { \
12 __v.iov_len = min(n, __p->iov_len - skip); \
13 if (likely(__v.iov_len)) { \
14 __v.iov_base = __p->iov_base + skip; \
16 __v.iov_len -= left; \
17 skip += __v.iov_len; \
22 while (unlikely(!left && n)) { \
24 __v.iov_len = min(n, __p->iov_len); \
25 if (unlikely(!__v.iov_len)) \
27 __v.iov_base = __p->iov_base; \
29 __v.iov_len -= left; \
36 #define iterate_kvec(i, n, __v, __p, skip, STEP) { \
39 __v.iov_len = min(n, __p->iov_len - skip); \
40 if (likely(__v.iov_len)) { \
41 __v.iov_base = __p->iov_base + skip; \
43 skip += __v.iov_len; \
46 while (unlikely(n)) { \
48 __v.iov_len = min(n, __p->iov_len); \
49 if (unlikely(!__v.iov_len)) \
51 __v.iov_base = __p->iov_base; \
59 #define iterate_bvec(i, n, __v, __bi, skip, STEP) { \
60 struct bvec_iter __start; \
61 __start.bi_size = n; \
62 __start.bi_bvec_done = skip; \
64 for_each_bvec(__v, i->bvec, __bi, __start) { \
71 #define iterate_all_kinds(i, n, v, I, B, K) { \
72 size_t skip = i->iov_offset; \
73 if (unlikely(i->type & ITER_BVEC)) { \
75 struct bvec_iter __bi; \
76 iterate_bvec(i, n, v, __bi, skip, (B)) \
77 } else if (unlikely(i->type & ITER_KVEC)) { \
78 const struct kvec *kvec; \
80 iterate_kvec(i, n, v, kvec, skip, (K)) \
82 const struct iovec *iov; \
84 iterate_iovec(i, n, v, iov, skip, (I)) \
88 #define iterate_and_advance(i, n, v, I, B, K) { \
89 if (unlikely(i->count < n)) \
92 size_t skip = i->iov_offset; \
93 if (unlikely(i->type & ITER_BVEC)) { \
94 const struct bio_vec *bvec = i->bvec; \
96 struct bvec_iter __bi; \
97 iterate_bvec(i, n, v, __bi, skip, (B)) \
98 i->bvec = __bvec_iter_bvec(i->bvec, __bi); \
99 i->nr_segs -= i->bvec - bvec; \
100 skip = __bi.bi_bvec_done; \
101 } else if (unlikely(i->type & ITER_KVEC)) { \
102 const struct kvec *kvec; \
104 iterate_kvec(i, n, v, kvec, skip, (K)) \
105 if (skip == kvec->iov_len) { \
109 i->nr_segs -= kvec - i->kvec; \
112 const struct iovec *iov; \
114 iterate_iovec(i, n, v, iov, skip, (I)) \
115 if (skip == iov->iov_len) { \
119 i->nr_segs -= iov - i->iov; \
123 i->iov_offset = skip; \
127 static size_t copy_page_to_iter_iovec(struct page
*page
, size_t offset
, size_t bytes
,
130 size_t skip
, copy
, left
, wanted
;
131 const struct iovec
*iov
;
135 if (unlikely(bytes
> i
->count
))
138 if (unlikely(!bytes
))
143 skip
= i
->iov_offset
;
144 buf
= iov
->iov_base
+ skip
;
145 copy
= min(bytes
, iov
->iov_len
- skip
);
147 if (IS_ENABLED(CONFIG_HIGHMEM
) && !fault_in_pages_writeable(buf
, copy
)) {
148 kaddr
= kmap_atomic(page
);
149 from
= kaddr
+ offset
;
151 /* first chunk, usually the only one */
152 left
= __copy_to_user_inatomic(buf
, from
, copy
);
158 while (unlikely(!left
&& bytes
)) {
161 copy
= min(bytes
, iov
->iov_len
);
162 left
= __copy_to_user_inatomic(buf
, from
, copy
);
168 if (likely(!bytes
)) {
169 kunmap_atomic(kaddr
);
172 offset
= from
- kaddr
;
174 kunmap_atomic(kaddr
);
175 copy
= min(bytes
, iov
->iov_len
- skip
);
177 /* Too bad - revert to non-atomic kmap */
180 from
= kaddr
+ offset
;
181 left
= __copy_to_user(buf
, from
, copy
);
186 while (unlikely(!left
&& bytes
)) {
189 copy
= min(bytes
, iov
->iov_len
);
190 left
= __copy_to_user(buf
, from
, copy
);
199 if (skip
== iov
->iov_len
) {
203 i
->count
-= wanted
- bytes
;
204 i
->nr_segs
-= iov
- i
->iov
;
206 i
->iov_offset
= skip
;
207 return wanted
- bytes
;
210 static size_t copy_page_from_iter_iovec(struct page
*page
, size_t offset
, size_t bytes
,
213 size_t skip
, copy
, left
, wanted
;
214 const struct iovec
*iov
;
218 if (unlikely(bytes
> i
->count
))
221 if (unlikely(!bytes
))
226 skip
= i
->iov_offset
;
227 buf
= iov
->iov_base
+ skip
;
228 copy
= min(bytes
, iov
->iov_len
- skip
);
230 if (IS_ENABLED(CONFIG_HIGHMEM
) && !fault_in_pages_readable(buf
, copy
)) {
231 kaddr
= kmap_atomic(page
);
234 /* first chunk, usually the only one */
235 left
= __copy_from_user_inatomic(to
, buf
, copy
);
241 while (unlikely(!left
&& bytes
)) {
244 copy
= min(bytes
, iov
->iov_len
);
245 left
= __copy_from_user_inatomic(to
, buf
, copy
);
251 if (likely(!bytes
)) {
252 kunmap_atomic(kaddr
);
257 kunmap_atomic(kaddr
);
258 copy
= min(bytes
, iov
->iov_len
- skip
);
260 /* Too bad - revert to non-atomic kmap */
264 left
= __copy_from_user(to
, buf
, copy
);
269 while (unlikely(!left
&& bytes
)) {
272 copy
= min(bytes
, iov
->iov_len
);
273 left
= __copy_from_user(to
, buf
, copy
);
282 if (skip
== iov
->iov_len
) {
286 i
->count
-= wanted
- bytes
;
287 i
->nr_segs
-= iov
- i
->iov
;
289 i
->iov_offset
= skip
;
290 return wanted
- bytes
;
294 * Fault in one or more iovecs of the given iov_iter, to a maximum length of
295 * bytes. For each iovec, fault in each page that constitutes the iovec.
297 * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
298 * because it is an invalid address).
300 int iov_iter_fault_in_readable(struct iov_iter
*i
, size_t bytes
)
302 size_t skip
= i
->iov_offset
;
303 const struct iovec
*iov
;
307 if (!(i
->type
& (ITER_BVEC
|ITER_KVEC
))) {
308 iterate_iovec(i
, bytes
, v
, iov
, skip
, ({
309 err
= fault_in_multipages_readable(v
.iov_base
,
317 EXPORT_SYMBOL(iov_iter_fault_in_readable
);
319 void iov_iter_init(struct iov_iter
*i
, int direction
,
320 const struct iovec
*iov
, unsigned long nr_segs
,
323 /* It will get better. Eventually... */
324 if (segment_eq(get_fs(), KERNEL_DS
)) {
325 direction
|= ITER_KVEC
;
327 i
->kvec
= (struct kvec
*)iov
;
332 i
->nr_segs
= nr_segs
;
336 EXPORT_SYMBOL(iov_iter_init
);
338 static void memcpy_from_page(char *to
, struct page
*page
, size_t offset
, size_t len
)
340 char *from
= kmap_atomic(page
);
341 memcpy(to
, from
+ offset
, len
);
345 static void memcpy_to_page(struct page
*page
, size_t offset
, const char *from
, size_t len
)
347 char *to
= kmap_atomic(page
);
348 memcpy(to
+ offset
, from
, len
);
352 static void memzero_page(struct page
*page
, size_t offset
, size_t len
)
354 char *addr
= kmap_atomic(page
);
355 memset(addr
+ offset
, 0, len
);
359 size_t copy_to_iter(const void *addr
, size_t bytes
, struct iov_iter
*i
)
361 const char *from
= addr
;
362 iterate_and_advance(i
, bytes
, v
,
363 __copy_to_user(v
.iov_base
, (from
+= v
.iov_len
) - v
.iov_len
,
365 memcpy_to_page(v
.bv_page
, v
.bv_offset
,
366 (from
+= v
.bv_len
) - v
.bv_len
, v
.bv_len
),
367 memcpy(v
.iov_base
, (from
+= v
.iov_len
) - v
.iov_len
, v
.iov_len
)
372 EXPORT_SYMBOL(copy_to_iter
);
374 size_t copy_from_iter(void *addr
, size_t bytes
, struct iov_iter
*i
)
377 iterate_and_advance(i
, bytes
, v
,
378 __copy_from_user((to
+= v
.iov_len
) - v
.iov_len
, v
.iov_base
,
380 memcpy_from_page((to
+= v
.bv_len
) - v
.bv_len
, v
.bv_page
,
381 v
.bv_offset
, v
.bv_len
),
382 memcpy((to
+= v
.iov_len
) - v
.iov_len
, v
.iov_base
, v
.iov_len
)
387 EXPORT_SYMBOL(copy_from_iter
);
389 size_t copy_from_iter_nocache(void *addr
, size_t bytes
, struct iov_iter
*i
)
392 iterate_and_advance(i
, bytes
, v
,
393 __copy_from_user_nocache((to
+= v
.iov_len
) - v
.iov_len
,
394 v
.iov_base
, v
.iov_len
),
395 memcpy_from_page((to
+= v
.bv_len
) - v
.bv_len
, v
.bv_page
,
396 v
.bv_offset
, v
.bv_len
),
397 memcpy((to
+= v
.iov_len
) - v
.iov_len
, v
.iov_base
, v
.iov_len
)
402 EXPORT_SYMBOL(copy_from_iter_nocache
);
404 size_t copy_page_to_iter(struct page
*page
, size_t offset
, size_t bytes
,
407 if (i
->type
& (ITER_BVEC
|ITER_KVEC
)) {
408 void *kaddr
= kmap_atomic(page
);
409 size_t wanted
= copy_to_iter(kaddr
+ offset
, bytes
, i
);
410 kunmap_atomic(kaddr
);
413 return copy_page_to_iter_iovec(page
, offset
, bytes
, i
);
415 EXPORT_SYMBOL(copy_page_to_iter
);
417 size_t copy_page_from_iter(struct page
*page
, size_t offset
, size_t bytes
,
420 if (i
->type
& (ITER_BVEC
|ITER_KVEC
)) {
421 void *kaddr
= kmap_atomic(page
);
422 size_t wanted
= copy_from_iter(kaddr
+ offset
, bytes
, i
);
423 kunmap_atomic(kaddr
);
426 return copy_page_from_iter_iovec(page
, offset
, bytes
, i
);
428 EXPORT_SYMBOL(copy_page_from_iter
);
430 size_t iov_iter_zero(size_t bytes
, struct iov_iter
*i
)
432 iterate_and_advance(i
, bytes
, v
,
433 __clear_user(v
.iov_base
, v
.iov_len
),
434 memzero_page(v
.bv_page
, v
.bv_offset
, v
.bv_len
),
435 memset(v
.iov_base
, 0, v
.iov_len
)
440 EXPORT_SYMBOL(iov_iter_zero
);
442 size_t iov_iter_copy_from_user_atomic(struct page
*page
,
443 struct iov_iter
*i
, unsigned long offset
, size_t bytes
)
445 char *kaddr
= kmap_atomic(page
), *p
= kaddr
+ offset
;
446 iterate_all_kinds(i
, bytes
, v
,
447 __copy_from_user_inatomic((p
+= v
.iov_len
) - v
.iov_len
,
448 v
.iov_base
, v
.iov_len
),
449 memcpy_from_page((p
+= v
.bv_len
) - v
.bv_len
, v
.bv_page
,
450 v
.bv_offset
, v
.bv_len
),
451 memcpy((p
+= v
.iov_len
) - v
.iov_len
, v
.iov_base
, v
.iov_len
)
453 kunmap_atomic(kaddr
);
456 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic
);
458 void iov_iter_advance(struct iov_iter
*i
, size_t size
)
460 iterate_and_advance(i
, size
, v
, 0, 0, 0)
462 EXPORT_SYMBOL(iov_iter_advance
);
465 * Return the count of just the current iov_iter segment.
467 size_t iov_iter_single_seg_count(const struct iov_iter
*i
)
471 else if (i
->type
& ITER_BVEC
)
472 return min(i
->count
, i
->bvec
->bv_len
- i
->iov_offset
);
474 return min(i
->count
, i
->iov
->iov_len
- i
->iov_offset
);
476 EXPORT_SYMBOL(iov_iter_single_seg_count
);
478 void iov_iter_kvec(struct iov_iter
*i
, int direction
,
479 const struct kvec
*kvec
, unsigned long nr_segs
,
482 BUG_ON(!(direction
& ITER_KVEC
));
485 i
->nr_segs
= nr_segs
;
489 EXPORT_SYMBOL(iov_iter_kvec
);
491 void iov_iter_bvec(struct iov_iter
*i
, int direction
,
492 const struct bio_vec
*bvec
, unsigned long nr_segs
,
495 BUG_ON(!(direction
& ITER_BVEC
));
498 i
->nr_segs
= nr_segs
;
502 EXPORT_SYMBOL(iov_iter_bvec
);
504 unsigned long iov_iter_alignment(const struct iov_iter
*i
)
506 unsigned long res
= 0;
507 size_t size
= i
->count
;
512 iterate_all_kinds(i
, size
, v
,
513 (res
|= (unsigned long)v
.iov_base
| v
.iov_len
, 0),
514 res
|= v
.bv_offset
| v
.bv_len
,
515 res
|= (unsigned long)v
.iov_base
| v
.iov_len
519 EXPORT_SYMBOL(iov_iter_alignment
);
521 unsigned long iov_iter_gap_alignment(const struct iov_iter
*i
)
523 unsigned long res
= 0;
524 size_t size
= i
->count
;
528 iterate_all_kinds(i
, size
, v
,
529 (res
|= (!res
? 0 : (unsigned long)v
.iov_base
) |
530 (size
!= v
.iov_len
? size
: 0), 0),
531 (res
|= (!res
? 0 : (unsigned long)v
.bv_offset
) |
532 (size
!= v
.bv_len
? size
: 0)),
533 (res
|= (!res
? 0 : (unsigned long)v
.iov_base
) |
534 (size
!= v
.iov_len
? size
: 0))
538 EXPORT_SYMBOL(iov_iter_gap_alignment
);
540 ssize_t
iov_iter_get_pages(struct iov_iter
*i
,
541 struct page
**pages
, size_t maxsize
, unsigned maxpages
,
544 if (maxsize
> i
->count
)
550 iterate_all_kinds(i
, maxsize
, v
, ({
551 unsigned long addr
= (unsigned long)v
.iov_base
;
552 size_t len
= v
.iov_len
+ (*start
= addr
& (PAGE_SIZE
- 1));
556 if (len
> maxpages
* PAGE_SIZE
)
557 len
= maxpages
* PAGE_SIZE
;
558 addr
&= ~(PAGE_SIZE
- 1);
559 n
= DIV_ROUND_UP(len
, PAGE_SIZE
);
560 res
= get_user_pages_fast(addr
, n
, (i
->type
& WRITE
) != WRITE
, pages
);
561 if (unlikely(res
< 0))
563 return (res
== n
? len
: res
* PAGE_SIZE
) - *start
;
565 /* can't be more than PAGE_SIZE */
566 *start
= v
.bv_offset
;
567 get_page(*pages
= v
.bv_page
);
575 EXPORT_SYMBOL(iov_iter_get_pages
);
577 static struct page
**get_pages_array(size_t n
)
579 struct page
**p
= kmalloc(n
* sizeof(struct page
*), GFP_KERNEL
);
581 p
= vmalloc(n
* sizeof(struct page
*));
585 ssize_t
iov_iter_get_pages_alloc(struct iov_iter
*i
,
586 struct page
***pages
, size_t maxsize
,
591 if (maxsize
> i
->count
)
597 iterate_all_kinds(i
, maxsize
, v
, ({
598 unsigned long addr
= (unsigned long)v
.iov_base
;
599 size_t len
= v
.iov_len
+ (*start
= addr
& (PAGE_SIZE
- 1));
603 addr
&= ~(PAGE_SIZE
- 1);
604 n
= DIV_ROUND_UP(len
, PAGE_SIZE
);
605 p
= get_pages_array(n
);
608 res
= get_user_pages_fast(addr
, n
, (i
->type
& WRITE
) != WRITE
, p
);
609 if (unlikely(res
< 0)) {
614 return (res
== n
? len
: res
* PAGE_SIZE
) - *start
;
616 /* can't be more than PAGE_SIZE */
617 *start
= v
.bv_offset
;
618 *pages
= p
= get_pages_array(1);
621 get_page(*p
= v
.bv_page
);
629 EXPORT_SYMBOL(iov_iter_get_pages_alloc
);
631 size_t csum_and_copy_from_iter(void *addr
, size_t bytes
, __wsum
*csum
,
638 iterate_and_advance(i
, bytes
, v
, ({
640 next
= csum_and_copy_from_user(v
.iov_base
,
641 (to
+= v
.iov_len
) - v
.iov_len
,
644 sum
= csum_block_add(sum
, next
, off
);
649 char *p
= kmap_atomic(v
.bv_page
);
650 next
= csum_partial_copy_nocheck(p
+ v
.bv_offset
,
651 (to
+= v
.bv_len
) - v
.bv_len
,
654 sum
= csum_block_add(sum
, next
, off
);
657 next
= csum_partial_copy_nocheck(v
.iov_base
,
658 (to
+= v
.iov_len
) - v
.iov_len
,
660 sum
= csum_block_add(sum
, next
, off
);
667 EXPORT_SYMBOL(csum_and_copy_from_iter
);
669 size_t csum_and_copy_to_iter(const void *addr
, size_t bytes
, __wsum
*csum
,
672 const char *from
= addr
;
676 iterate_and_advance(i
, bytes
, v
, ({
678 next
= csum_and_copy_to_user((from
+= v
.iov_len
) - v
.iov_len
,
682 sum
= csum_block_add(sum
, next
, off
);
687 char *p
= kmap_atomic(v
.bv_page
);
688 next
= csum_partial_copy_nocheck((from
+= v
.bv_len
) - v
.bv_len
,
692 sum
= csum_block_add(sum
, next
, off
);
695 next
= csum_partial_copy_nocheck((from
+= v
.iov_len
) - v
.iov_len
,
698 sum
= csum_block_add(sum
, next
, off
);
705 EXPORT_SYMBOL(csum_and_copy_to_iter
);
707 int iov_iter_npages(const struct iov_iter
*i
, int maxpages
)
709 size_t size
= i
->count
;
715 iterate_all_kinds(i
, size
, v
, ({
716 unsigned long p
= (unsigned long)v
.iov_base
;
717 npages
+= DIV_ROUND_UP(p
+ v
.iov_len
, PAGE_SIZE
)
719 if (npages
>= maxpages
)
723 if (npages
>= maxpages
)
726 unsigned long p
= (unsigned long)v
.iov_base
;
727 npages
+= DIV_ROUND_UP(p
+ v
.iov_len
, PAGE_SIZE
)
729 if (npages
>= maxpages
)
735 EXPORT_SYMBOL(iov_iter_npages
);
737 const void *dup_iter(struct iov_iter
*new, struct iov_iter
*old
, gfp_t flags
)
740 if (new->type
& ITER_BVEC
)
741 return new->bvec
= kmemdup(new->bvec
,
742 new->nr_segs
* sizeof(struct bio_vec
),
745 /* iovec and kvec have identical layout */
746 return new->iov
= kmemdup(new->iov
,
747 new->nr_segs
* sizeof(struct iovec
),
750 EXPORT_SYMBOL(dup_iter
);
752 int import_iovec(int type
, const struct iovec __user
* uvector
,
753 unsigned nr_segs
, unsigned fast_segs
,
754 struct iovec
**iov
, struct iov_iter
*i
)
758 n
= rw_copy_check_uvector(type
, uvector
, nr_segs
, fast_segs
,
766 iov_iter_init(i
, type
, p
, nr_segs
, n
);
767 *iov
= p
== *iov
? NULL
: p
;
770 EXPORT_SYMBOL(import_iovec
);
773 #include <linux/compat.h>
775 int compat_import_iovec(int type
, const struct compat_iovec __user
* uvector
,
776 unsigned nr_segs
, unsigned fast_segs
,
777 struct iovec
**iov
, struct iov_iter
*i
)
781 n
= compat_rw_copy_check_uvector(type
, uvector
, nr_segs
, fast_segs
,
789 iov_iter_init(i
, type
, p
, nr_segs
, n
);
790 *iov
= p
== *iov
? NULL
: p
;
795 int import_single_range(int rw
, void __user
*buf
, size_t len
,
796 struct iovec
*iov
, struct iov_iter
*i
)
798 if (len
> MAX_RW_COUNT
)
800 if (unlikely(!access_ok(!rw
, buf
, len
)))
805 iov_iter_init(i
, rw
, iov
, 1, len
);
808 EXPORT_SYMBOL(import_single_range
);