1 #include <linux/export.h>
2 #include <linux/bvec.h>
4 #include <linux/pagemap.h>
5 #include <linux/slab.h>
6 #include <linux/vmalloc.h>
7 #include <linux/splice.h>
8 #include <net/checksum.h>
10 #define PIPE_PARANOIA /* for now */
12 #define iterate_iovec(i, n, __v, __p, skip, STEP) { \
16 __v.iov_len = min(n, __p->iov_len - skip); \
17 if (likely(__v.iov_len)) { \
18 __v.iov_base = __p->iov_base + skip; \
20 __v.iov_len -= left; \
21 skip += __v.iov_len; \
26 while (unlikely(!left && n)) { \
28 __v.iov_len = min(n, __p->iov_len); \
29 if (unlikely(!__v.iov_len)) \
31 __v.iov_base = __p->iov_base; \
33 __v.iov_len -= left; \
40 #define iterate_kvec(i, n, __v, __p, skip, STEP) { \
43 __v.iov_len = min(n, __p->iov_len - skip); \
44 if (likely(__v.iov_len)) { \
45 __v.iov_base = __p->iov_base + skip; \
47 skip += __v.iov_len; \
50 while (unlikely(n)) { \
52 __v.iov_len = min(n, __p->iov_len); \
53 if (unlikely(!__v.iov_len)) \
55 __v.iov_base = __p->iov_base; \
63 #define iterate_bvec(i, n, __v, __bi, skip, STEP) { \
64 struct bvec_iter __start; \
65 __start.bi_size = n; \
66 __start.bi_bvec_done = skip; \
68 for_each_bvec(__v, i->bvec, __bi, __start) { \
75 #define iterate_all_kinds(i, n, v, I, B, K) { \
77 size_t skip = i->iov_offset; \
78 if (unlikely(i->type & ITER_BVEC)) { \
80 struct bvec_iter __bi; \
81 iterate_bvec(i, n, v, __bi, skip, (B)) \
82 } else if (unlikely(i->type & ITER_KVEC)) { \
83 const struct kvec *kvec; \
85 iterate_kvec(i, n, v, kvec, skip, (K)) \
87 const struct iovec *iov; \
89 iterate_iovec(i, n, v, iov, skip, (I)) \
94 #define iterate_and_advance(i, n, v, I, B, K) { \
95 if (unlikely(i->count < n)) \
98 size_t skip = i->iov_offset; \
99 if (unlikely(i->type & ITER_BVEC)) { \
100 const struct bio_vec *bvec = i->bvec; \
102 struct bvec_iter __bi; \
103 iterate_bvec(i, n, v, __bi, skip, (B)) \
104 i->bvec = __bvec_iter_bvec(i->bvec, __bi); \
105 i->nr_segs -= i->bvec - bvec; \
106 skip = __bi.bi_bvec_done; \
107 } else if (unlikely(i->type & ITER_KVEC)) { \
108 const struct kvec *kvec; \
110 iterate_kvec(i, n, v, kvec, skip, (K)) \
111 if (skip == kvec->iov_len) { \
115 i->nr_segs -= kvec - i->kvec; \
118 const struct iovec *iov; \
120 iterate_iovec(i, n, v, iov, skip, (I)) \
121 if (skip == iov->iov_len) { \
125 i->nr_segs -= iov - i->iov; \
129 i->iov_offset = skip; \
133 static size_t copy_page_to_iter_iovec(struct page
*page
, size_t offset
, size_t bytes
,
136 size_t skip
, copy
, left
, wanted
;
137 const struct iovec
*iov
;
141 if (unlikely(bytes
> i
->count
))
144 if (unlikely(!bytes
))
149 skip
= i
->iov_offset
;
150 buf
= iov
->iov_base
+ skip
;
151 copy
= min(bytes
, iov
->iov_len
- skip
);
153 if (IS_ENABLED(CONFIG_HIGHMEM
) && !fault_in_pages_writeable(buf
, copy
)) {
154 kaddr
= kmap_atomic(page
);
155 from
= kaddr
+ offset
;
157 /* first chunk, usually the only one */
158 left
= __copy_to_user_inatomic(buf
, from
, copy
);
164 while (unlikely(!left
&& bytes
)) {
167 copy
= min(bytes
, iov
->iov_len
);
168 left
= __copy_to_user_inatomic(buf
, from
, copy
);
174 if (likely(!bytes
)) {
175 kunmap_atomic(kaddr
);
178 offset
= from
- kaddr
;
180 kunmap_atomic(kaddr
);
181 copy
= min(bytes
, iov
->iov_len
- skip
);
183 /* Too bad - revert to non-atomic kmap */
186 from
= kaddr
+ offset
;
187 left
= __copy_to_user(buf
, from
, copy
);
192 while (unlikely(!left
&& bytes
)) {
195 copy
= min(bytes
, iov
->iov_len
);
196 left
= __copy_to_user(buf
, from
, copy
);
205 if (skip
== iov
->iov_len
) {
209 i
->count
-= wanted
- bytes
;
210 i
->nr_segs
-= iov
- i
->iov
;
212 i
->iov_offset
= skip
;
213 return wanted
- bytes
;
216 static size_t copy_page_from_iter_iovec(struct page
*page
, size_t offset
, size_t bytes
,
219 size_t skip
, copy
, left
, wanted
;
220 const struct iovec
*iov
;
224 if (unlikely(bytes
> i
->count
))
227 if (unlikely(!bytes
))
232 skip
= i
->iov_offset
;
233 buf
= iov
->iov_base
+ skip
;
234 copy
= min(bytes
, iov
->iov_len
- skip
);
236 if (IS_ENABLED(CONFIG_HIGHMEM
) && !fault_in_pages_readable(buf
, copy
)) {
237 kaddr
= kmap_atomic(page
);
240 /* first chunk, usually the only one */
241 left
= __copy_from_user_inatomic(to
, buf
, copy
);
247 while (unlikely(!left
&& bytes
)) {
250 copy
= min(bytes
, iov
->iov_len
);
251 left
= __copy_from_user_inatomic(to
, buf
, copy
);
257 if (likely(!bytes
)) {
258 kunmap_atomic(kaddr
);
263 kunmap_atomic(kaddr
);
264 copy
= min(bytes
, iov
->iov_len
- skip
);
266 /* Too bad - revert to non-atomic kmap */
270 left
= __copy_from_user(to
, buf
, copy
);
275 while (unlikely(!left
&& bytes
)) {
278 copy
= min(bytes
, iov
->iov_len
);
279 left
= __copy_from_user(to
, buf
, copy
);
288 if (skip
== iov
->iov_len
) {
292 i
->count
-= wanted
- bytes
;
293 i
->nr_segs
-= iov
- i
->iov
;
295 i
->iov_offset
= skip
;
296 return wanted
- bytes
;
300 static bool sanity(const struct iov_iter
*i
)
302 struct pipe_inode_info
*pipe
= i
->pipe
;
304 int next
= pipe
->curbuf
+ pipe
->nrbufs
;
306 struct pipe_buffer
*p
;
307 if (unlikely(!pipe
->nrbufs
))
308 goto Bad
; // pipe must be non-empty
309 if (unlikely(idx
!= ((next
- 1) & (pipe
->buffers
- 1))))
310 goto Bad
; // must be at the last buffer...
312 p
= &pipe
->bufs
[idx
];
313 if (unlikely(p
->offset
+ p
->len
!= i
->iov_offset
))
314 goto Bad
; // ... at the end of segment
316 if (idx
!= (next
& (pipe
->buffers
- 1)))
317 goto Bad
; // must be right after the last buffer
321 printk(KERN_ERR
"idx = %d, offset = %zd\n", i
->idx
, i
->iov_offset
);
322 printk(KERN_ERR
"curbuf = %d, nrbufs = %d, buffers = %d\n",
323 pipe
->curbuf
, pipe
->nrbufs
, pipe
->buffers
);
324 for (idx
= 0; idx
< pipe
->buffers
; idx
++)
325 printk(KERN_ERR
"[%p %p %d %d]\n",
327 pipe
->bufs
[idx
].page
,
328 pipe
->bufs
[idx
].offset
,
329 pipe
->bufs
[idx
].len
);
334 #define sanity(i) true
337 static inline int next_idx(int idx
, struct pipe_inode_info
*pipe
)
339 return (idx
+ 1) & (pipe
->buffers
- 1);
342 static size_t copy_page_to_iter_pipe(struct page
*page
, size_t offset
, size_t bytes
,
345 struct pipe_inode_info
*pipe
= i
->pipe
;
346 struct pipe_buffer
*buf
;
350 if (unlikely(bytes
> i
->count
))
353 if (unlikely(!bytes
))
361 buf
= &pipe
->bufs
[idx
];
363 if (offset
== off
&& buf
->page
== page
) {
364 /* merge with the last one */
366 i
->iov_offset
+= bytes
;
369 idx
= next_idx(idx
, pipe
);
370 buf
= &pipe
->bufs
[idx
];
372 if (idx
== pipe
->curbuf
&& pipe
->nrbufs
)
375 buf
->ops
= &page_cache_pipe_buf_ops
;
376 get_page(buf
->page
= page
);
377 buf
->offset
= offset
;
379 i
->iov_offset
= offset
+ bytes
;
387 * Fault in one or more iovecs of the given iov_iter, to a maximum length of
388 * bytes. For each iovec, fault in each page that constitutes the iovec.
390 * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
391 * because it is an invalid address).
393 int iov_iter_fault_in_readable(struct iov_iter
*i
, size_t bytes
)
395 size_t skip
= i
->iov_offset
;
396 const struct iovec
*iov
;
400 if (!(i
->type
& (ITER_BVEC
|ITER_KVEC
))) {
401 iterate_iovec(i
, bytes
, v
, iov
, skip
, ({
402 err
= fault_in_pages_readable(v
.iov_base
, v
.iov_len
);
409 EXPORT_SYMBOL(iov_iter_fault_in_readable
);
411 void iov_iter_init(struct iov_iter
*i
, int direction
,
412 const struct iovec
*iov
, unsigned long nr_segs
,
415 /* It will get better. Eventually... */
416 if (segment_eq(get_fs(), KERNEL_DS
)) {
417 direction
|= ITER_KVEC
;
419 i
->kvec
= (struct kvec
*)iov
;
424 i
->nr_segs
= nr_segs
;
428 EXPORT_SYMBOL(iov_iter_init
);
430 static void memcpy_from_page(char *to
, struct page
*page
, size_t offset
, size_t len
)
432 char *from
= kmap_atomic(page
);
433 memcpy(to
, from
+ offset
, len
);
437 static void memcpy_to_page(struct page
*page
, size_t offset
, const char *from
, size_t len
)
439 char *to
= kmap_atomic(page
);
440 memcpy(to
+ offset
, from
, len
);
444 static void memzero_page(struct page
*page
, size_t offset
, size_t len
)
446 char *addr
= kmap_atomic(page
);
447 memset(addr
+ offset
, 0, len
);
451 static inline bool allocated(struct pipe_buffer
*buf
)
453 return buf
->ops
== &default_pipe_buf_ops
;
456 static inline void data_start(const struct iov_iter
*i
, int *idxp
, size_t *offp
)
458 size_t off
= i
->iov_offset
;
460 if (off
&& (!allocated(&i
->pipe
->bufs
[idx
]) || off
== PAGE_SIZE
)) {
461 idx
= next_idx(idx
, i
->pipe
);
468 static size_t push_pipe(struct iov_iter
*i
, size_t size
,
469 int *idxp
, size_t *offp
)
471 struct pipe_inode_info
*pipe
= i
->pipe
;
476 if (unlikely(size
> i
->count
))
482 data_start(i
, &idx
, &off
);
486 left
-= PAGE_SIZE
- off
;
488 pipe
->bufs
[idx
].len
+= size
;
491 pipe
->bufs
[idx
].len
= PAGE_SIZE
;
492 idx
= next_idx(idx
, pipe
);
494 while (idx
!= pipe
->curbuf
|| !pipe
->nrbufs
) {
495 struct page
*page
= alloc_page(GFP_USER
);
499 pipe
->bufs
[idx
].ops
= &default_pipe_buf_ops
;
500 pipe
->bufs
[idx
].page
= page
;
501 pipe
->bufs
[idx
].offset
= 0;
502 if (left
<= PAGE_SIZE
) {
503 pipe
->bufs
[idx
].len
= left
;
506 pipe
->bufs
[idx
].len
= PAGE_SIZE
;
508 idx
= next_idx(idx
, pipe
);
513 static size_t copy_pipe_to_iter(const void *addr
, size_t bytes
,
516 struct pipe_inode_info
*pipe
= i
->pipe
;
523 bytes
= n
= push_pipe(i
, bytes
, &idx
, &off
);
526 for ( ; n
; idx
= next_idx(idx
, pipe
), off
= 0) {
527 size_t chunk
= min_t(size_t, n
, PAGE_SIZE
- off
);
528 memcpy_to_page(pipe
->bufs
[idx
].page
, off
, addr
, chunk
);
530 i
->iov_offset
= off
+ chunk
;
538 size_t copy_to_iter(const void *addr
, size_t bytes
, struct iov_iter
*i
)
540 const char *from
= addr
;
541 if (unlikely(i
->type
& ITER_PIPE
))
542 return copy_pipe_to_iter(addr
, bytes
, i
);
543 iterate_and_advance(i
, bytes
, v
,
544 __copy_to_user(v
.iov_base
, (from
+= v
.iov_len
) - v
.iov_len
,
546 memcpy_to_page(v
.bv_page
, v
.bv_offset
,
547 (from
+= v
.bv_len
) - v
.bv_len
, v
.bv_len
),
548 memcpy(v
.iov_base
, (from
+= v
.iov_len
) - v
.iov_len
, v
.iov_len
)
553 EXPORT_SYMBOL(copy_to_iter
);
555 size_t copy_from_iter(void *addr
, size_t bytes
, struct iov_iter
*i
)
558 if (unlikely(i
->type
& ITER_PIPE
)) {
562 iterate_and_advance(i
, bytes
, v
,
563 __copy_from_user((to
+= v
.iov_len
) - v
.iov_len
, v
.iov_base
,
565 memcpy_from_page((to
+= v
.bv_len
) - v
.bv_len
, v
.bv_page
,
566 v
.bv_offset
, v
.bv_len
),
567 memcpy((to
+= v
.iov_len
) - v
.iov_len
, v
.iov_base
, v
.iov_len
)
572 EXPORT_SYMBOL(copy_from_iter
);
574 bool copy_from_iter_full(void *addr
, size_t bytes
, struct iov_iter
*i
)
577 if (unlikely(i
->type
& ITER_PIPE
)) {
581 if (unlikely(i
->count
< bytes
))
584 iterate_all_kinds(i
, bytes
, v
, ({
585 if (__copy_from_user((to
+= v
.iov_len
) - v
.iov_len
,
586 v
.iov_base
, v
.iov_len
))
589 memcpy_from_page((to
+= v
.bv_len
) - v
.bv_len
, v
.bv_page
,
590 v
.bv_offset
, v
.bv_len
),
591 memcpy((to
+= v
.iov_len
) - v
.iov_len
, v
.iov_base
, v
.iov_len
)
594 iov_iter_advance(i
, bytes
);
597 EXPORT_SYMBOL(copy_from_iter_full
);
599 size_t copy_from_iter_nocache(void *addr
, size_t bytes
, struct iov_iter
*i
)
602 if (unlikely(i
->type
& ITER_PIPE
)) {
606 iterate_and_advance(i
, bytes
, v
,
607 __copy_from_user_nocache((to
+= v
.iov_len
) - v
.iov_len
,
608 v
.iov_base
, v
.iov_len
),
609 memcpy_from_page((to
+= v
.bv_len
) - v
.bv_len
, v
.bv_page
,
610 v
.bv_offset
, v
.bv_len
),
611 memcpy((to
+= v
.iov_len
) - v
.iov_len
, v
.iov_base
, v
.iov_len
)
616 EXPORT_SYMBOL(copy_from_iter_nocache
);
618 bool copy_from_iter_full_nocache(void *addr
, size_t bytes
, struct iov_iter
*i
)
621 if (unlikely(i
->type
& ITER_PIPE
)) {
625 if (unlikely(i
->count
< bytes
))
627 iterate_all_kinds(i
, bytes
, v
, ({
628 if (__copy_from_user_nocache((to
+= v
.iov_len
) - v
.iov_len
,
629 v
.iov_base
, v
.iov_len
))
632 memcpy_from_page((to
+= v
.bv_len
) - v
.bv_len
, v
.bv_page
,
633 v
.bv_offset
, v
.bv_len
),
634 memcpy((to
+= v
.iov_len
) - v
.iov_len
, v
.iov_base
, v
.iov_len
)
637 iov_iter_advance(i
, bytes
);
640 EXPORT_SYMBOL(copy_from_iter_full_nocache
);
642 size_t copy_page_to_iter(struct page
*page
, size_t offset
, size_t bytes
,
645 if (i
->type
& (ITER_BVEC
|ITER_KVEC
)) {
646 void *kaddr
= kmap_atomic(page
);
647 size_t wanted
= copy_to_iter(kaddr
+ offset
, bytes
, i
);
648 kunmap_atomic(kaddr
);
650 } else if (likely(!(i
->type
& ITER_PIPE
)))
651 return copy_page_to_iter_iovec(page
, offset
, bytes
, i
);
653 return copy_page_to_iter_pipe(page
, offset
, bytes
, i
);
655 EXPORT_SYMBOL(copy_page_to_iter
);
657 size_t copy_page_from_iter(struct page
*page
, size_t offset
, size_t bytes
,
660 if (unlikely(i
->type
& ITER_PIPE
)) {
664 if (i
->type
& (ITER_BVEC
|ITER_KVEC
)) {
665 void *kaddr
= kmap_atomic(page
);
666 size_t wanted
= copy_from_iter(kaddr
+ offset
, bytes
, i
);
667 kunmap_atomic(kaddr
);
670 return copy_page_from_iter_iovec(page
, offset
, bytes
, i
);
672 EXPORT_SYMBOL(copy_page_from_iter
);
674 static size_t pipe_zero(size_t bytes
, struct iov_iter
*i
)
676 struct pipe_inode_info
*pipe
= i
->pipe
;
683 bytes
= n
= push_pipe(i
, bytes
, &idx
, &off
);
687 for ( ; n
; idx
= next_idx(idx
, pipe
), off
= 0) {
688 size_t chunk
= min_t(size_t, n
, PAGE_SIZE
- off
);
689 memzero_page(pipe
->bufs
[idx
].page
, off
, chunk
);
691 i
->iov_offset
= off
+ chunk
;
698 size_t iov_iter_zero(size_t bytes
, struct iov_iter
*i
)
700 if (unlikely(i
->type
& ITER_PIPE
))
701 return pipe_zero(bytes
, i
);
702 iterate_and_advance(i
, bytes
, v
,
703 __clear_user(v
.iov_base
, v
.iov_len
),
704 memzero_page(v
.bv_page
, v
.bv_offset
, v
.bv_len
),
705 memset(v
.iov_base
, 0, v
.iov_len
)
710 EXPORT_SYMBOL(iov_iter_zero
);
712 size_t iov_iter_copy_from_user_atomic(struct page
*page
,
713 struct iov_iter
*i
, unsigned long offset
, size_t bytes
)
715 char *kaddr
= kmap_atomic(page
), *p
= kaddr
+ offset
;
716 if (unlikely(i
->type
& ITER_PIPE
)) {
717 kunmap_atomic(kaddr
);
721 iterate_all_kinds(i
, bytes
, v
,
722 __copy_from_user_inatomic((p
+= v
.iov_len
) - v
.iov_len
,
723 v
.iov_base
, v
.iov_len
),
724 memcpy_from_page((p
+= v
.bv_len
) - v
.bv_len
, v
.bv_page
,
725 v
.bv_offset
, v
.bv_len
),
726 memcpy((p
+= v
.iov_len
) - v
.iov_len
, v
.iov_base
, v
.iov_len
)
728 kunmap_atomic(kaddr
);
731 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic
);
733 static void pipe_advance(struct iov_iter
*i
, size_t size
)
735 struct pipe_inode_info
*pipe
= i
->pipe
;
736 struct pipe_buffer
*buf
;
738 size_t off
= i
->iov_offset
, orig_sz
;
740 if (unlikely(i
->count
< size
))
745 if (off
) /* make it relative to the beginning of buffer */
746 size
+= off
- pipe
->bufs
[idx
].offset
;
748 buf
= &pipe
->bufs
[idx
];
749 if (size
<= buf
->len
)
752 idx
= next_idx(idx
, pipe
);
756 off
= i
->iov_offset
= buf
->offset
+ size
;
759 idx
= next_idx(idx
, pipe
);
761 int unused
= (pipe
->curbuf
+ pipe
->nrbufs
) & (pipe
->buffers
- 1);
762 /* [curbuf,unused) is in use. Free [idx,unused) */
763 while (idx
!= unused
) {
764 pipe_buf_release(pipe
, &pipe
->bufs
[idx
]);
765 idx
= next_idx(idx
, pipe
);
772 void iov_iter_advance(struct iov_iter
*i
, size_t size
)
774 if (unlikely(i
->type
& ITER_PIPE
)) {
775 pipe_advance(i
, size
);
778 iterate_and_advance(i
, size
, v
, 0, 0, 0)
780 EXPORT_SYMBOL(iov_iter_advance
);
783 * Return the count of just the current iov_iter segment.
785 size_t iov_iter_single_seg_count(const struct iov_iter
*i
)
787 if (unlikely(i
->type
& ITER_PIPE
))
788 return i
->count
; // it is a silly place, anyway
791 else if (i
->type
& ITER_BVEC
)
792 return min(i
->count
, i
->bvec
->bv_len
- i
->iov_offset
);
794 return min(i
->count
, i
->iov
->iov_len
- i
->iov_offset
);
796 EXPORT_SYMBOL(iov_iter_single_seg_count
);
798 void iov_iter_kvec(struct iov_iter
*i
, int direction
,
799 const struct kvec
*kvec
, unsigned long nr_segs
,
802 BUG_ON(!(direction
& ITER_KVEC
));
805 i
->nr_segs
= nr_segs
;
809 EXPORT_SYMBOL(iov_iter_kvec
);
811 void iov_iter_bvec(struct iov_iter
*i
, int direction
,
812 const struct bio_vec
*bvec
, unsigned long nr_segs
,
815 BUG_ON(!(direction
& ITER_BVEC
));
818 i
->nr_segs
= nr_segs
;
822 EXPORT_SYMBOL(iov_iter_bvec
);
824 void iov_iter_pipe(struct iov_iter
*i
, int direction
,
825 struct pipe_inode_info
*pipe
,
828 BUG_ON(direction
!= ITER_PIPE
);
831 i
->idx
= (pipe
->curbuf
+ pipe
->nrbufs
) & (pipe
->buffers
- 1);
835 EXPORT_SYMBOL(iov_iter_pipe
);
837 unsigned long iov_iter_alignment(const struct iov_iter
*i
)
839 unsigned long res
= 0;
840 size_t size
= i
->count
;
842 if (unlikely(i
->type
& ITER_PIPE
)) {
843 if (size
&& i
->iov_offset
&& allocated(&i
->pipe
->bufs
[i
->idx
]))
844 return size
| i
->iov_offset
;
847 iterate_all_kinds(i
, size
, v
,
848 (res
|= (unsigned long)v
.iov_base
| v
.iov_len
, 0),
849 res
|= v
.bv_offset
| v
.bv_len
,
850 res
|= (unsigned long)v
.iov_base
| v
.iov_len
854 EXPORT_SYMBOL(iov_iter_alignment
);
856 unsigned long iov_iter_gap_alignment(const struct iov_iter
*i
)
858 unsigned long res
= 0;
859 size_t size
= i
->count
;
861 if (unlikely(i
->type
& ITER_PIPE
)) {
866 iterate_all_kinds(i
, size
, v
,
867 (res
|= (!res
? 0 : (unsigned long)v
.iov_base
) |
868 (size
!= v
.iov_len
? size
: 0), 0),
869 (res
|= (!res
? 0 : (unsigned long)v
.bv_offset
) |
870 (size
!= v
.bv_len
? size
: 0)),
871 (res
|= (!res
? 0 : (unsigned long)v
.iov_base
) |
872 (size
!= v
.iov_len
? size
: 0))
876 EXPORT_SYMBOL(iov_iter_gap_alignment
);
878 static inline size_t __pipe_get_pages(struct iov_iter
*i
,
884 struct pipe_inode_info
*pipe
= i
->pipe
;
885 ssize_t n
= push_pipe(i
, maxsize
, &idx
, start
);
892 get_page(*pages
++ = pipe
->bufs
[idx
].page
);
893 idx
= next_idx(idx
, pipe
);
900 static ssize_t
pipe_get_pages(struct iov_iter
*i
,
901 struct page
**pages
, size_t maxsize
, unsigned maxpages
,
914 data_start(i
, &idx
, start
);
915 /* some of this one + all after this one */
916 npages
= ((i
->pipe
->curbuf
- idx
- 1) & (i
->pipe
->buffers
- 1)) + 1;
917 capacity
= min(npages
,maxpages
) * PAGE_SIZE
- *start
;
919 return __pipe_get_pages(i
, min(maxsize
, capacity
), pages
, idx
, start
);
922 ssize_t
iov_iter_get_pages(struct iov_iter
*i
,
923 struct page
**pages
, size_t maxsize
, unsigned maxpages
,
926 if (maxsize
> i
->count
)
929 if (unlikely(i
->type
& ITER_PIPE
))
930 return pipe_get_pages(i
, pages
, maxsize
, maxpages
, start
);
931 iterate_all_kinds(i
, maxsize
, v
, ({
932 unsigned long addr
= (unsigned long)v
.iov_base
;
933 size_t len
= v
.iov_len
+ (*start
= addr
& (PAGE_SIZE
- 1));
937 if (len
> maxpages
* PAGE_SIZE
)
938 len
= maxpages
* PAGE_SIZE
;
939 addr
&= ~(PAGE_SIZE
- 1);
940 n
= DIV_ROUND_UP(len
, PAGE_SIZE
);
941 res
= get_user_pages_fast(addr
, n
, (i
->type
& WRITE
) != WRITE
, pages
);
942 if (unlikely(res
< 0))
944 return (res
== n
? len
: res
* PAGE_SIZE
) - *start
;
946 /* can't be more than PAGE_SIZE */
947 *start
= v
.bv_offset
;
948 get_page(*pages
= v
.bv_page
);
956 EXPORT_SYMBOL(iov_iter_get_pages
);
958 static struct page
**get_pages_array(size_t n
)
960 struct page
**p
= kmalloc(n
* sizeof(struct page
*), GFP_KERNEL
);
962 p
= vmalloc(n
* sizeof(struct page
*));
966 static ssize_t
pipe_get_pages_alloc(struct iov_iter
*i
,
967 struct page
***pages
, size_t maxsize
,
981 data_start(i
, &idx
, start
);
982 /* some of this one + all after this one */
983 npages
= ((i
->pipe
->curbuf
- idx
- 1) & (i
->pipe
->buffers
- 1)) + 1;
984 n
= npages
* PAGE_SIZE
- *start
;
988 npages
= DIV_ROUND_UP(maxsize
+ *start
, PAGE_SIZE
);
989 p
= get_pages_array(npages
);
992 n
= __pipe_get_pages(i
, maxsize
, p
, idx
, start
);
1000 ssize_t
iov_iter_get_pages_alloc(struct iov_iter
*i
,
1001 struct page
***pages
, size_t maxsize
,
1006 if (maxsize
> i
->count
)
1009 if (unlikely(i
->type
& ITER_PIPE
))
1010 return pipe_get_pages_alloc(i
, pages
, maxsize
, start
);
1011 iterate_all_kinds(i
, maxsize
, v
, ({
1012 unsigned long addr
= (unsigned long)v
.iov_base
;
1013 size_t len
= v
.iov_len
+ (*start
= addr
& (PAGE_SIZE
- 1));
1017 addr
&= ~(PAGE_SIZE
- 1);
1018 n
= DIV_ROUND_UP(len
, PAGE_SIZE
);
1019 p
= get_pages_array(n
);
1022 res
= get_user_pages_fast(addr
, n
, (i
->type
& WRITE
) != WRITE
, p
);
1023 if (unlikely(res
< 0)) {
1028 return (res
== n
? len
: res
* PAGE_SIZE
) - *start
;
1030 /* can't be more than PAGE_SIZE */
1031 *start
= v
.bv_offset
;
1032 *pages
= p
= get_pages_array(1);
1035 get_page(*p
= v
.bv_page
);
1043 EXPORT_SYMBOL(iov_iter_get_pages_alloc
);
1045 size_t csum_and_copy_from_iter(void *addr
, size_t bytes
, __wsum
*csum
,
1052 if (unlikely(i
->type
& ITER_PIPE
)) {
1056 iterate_and_advance(i
, bytes
, v
, ({
1058 next
= csum_and_copy_from_user(v
.iov_base
,
1059 (to
+= v
.iov_len
) - v
.iov_len
,
1060 v
.iov_len
, 0, &err
);
1062 sum
= csum_block_add(sum
, next
, off
);
1065 err
? v
.iov_len
: 0;
1067 char *p
= kmap_atomic(v
.bv_page
);
1068 next
= csum_partial_copy_nocheck(p
+ v
.bv_offset
,
1069 (to
+= v
.bv_len
) - v
.bv_len
,
1072 sum
= csum_block_add(sum
, next
, off
);
1075 next
= csum_partial_copy_nocheck(v
.iov_base
,
1076 (to
+= v
.iov_len
) - v
.iov_len
,
1078 sum
= csum_block_add(sum
, next
, off
);
1085 EXPORT_SYMBOL(csum_and_copy_from_iter
);
1087 bool csum_and_copy_from_iter_full(void *addr
, size_t bytes
, __wsum
*csum
,
1094 if (unlikely(i
->type
& ITER_PIPE
)) {
1098 if (unlikely(i
->count
< bytes
))
1100 iterate_all_kinds(i
, bytes
, v
, ({
1102 next
= csum_and_copy_from_user(v
.iov_base
,
1103 (to
+= v
.iov_len
) - v
.iov_len
,
1104 v
.iov_len
, 0, &err
);
1107 sum
= csum_block_add(sum
, next
, off
);
1111 char *p
= kmap_atomic(v
.bv_page
);
1112 next
= csum_partial_copy_nocheck(p
+ v
.bv_offset
,
1113 (to
+= v
.bv_len
) - v
.bv_len
,
1116 sum
= csum_block_add(sum
, next
, off
);
1119 next
= csum_partial_copy_nocheck(v
.iov_base
,
1120 (to
+= v
.iov_len
) - v
.iov_len
,
1122 sum
= csum_block_add(sum
, next
, off
);
1127 iov_iter_advance(i
, bytes
);
1130 EXPORT_SYMBOL(csum_and_copy_from_iter_full
);
1132 size_t csum_and_copy_to_iter(const void *addr
, size_t bytes
, __wsum
*csum
,
1135 const char *from
= addr
;
1139 if (unlikely(i
->type
& ITER_PIPE
)) {
1140 WARN_ON(1); /* for now */
1143 iterate_and_advance(i
, bytes
, v
, ({
1145 next
= csum_and_copy_to_user((from
+= v
.iov_len
) - v
.iov_len
,
1147 v
.iov_len
, 0, &err
);
1149 sum
= csum_block_add(sum
, next
, off
);
1152 err
? v
.iov_len
: 0;
1154 char *p
= kmap_atomic(v
.bv_page
);
1155 next
= csum_partial_copy_nocheck((from
+= v
.bv_len
) - v
.bv_len
,
1159 sum
= csum_block_add(sum
, next
, off
);
1162 next
= csum_partial_copy_nocheck((from
+= v
.iov_len
) - v
.iov_len
,
1165 sum
= csum_block_add(sum
, next
, off
);
1172 EXPORT_SYMBOL(csum_and_copy_to_iter
);
1174 int iov_iter_npages(const struct iov_iter
*i
, int maxpages
)
1176 size_t size
= i
->count
;
1182 if (unlikely(i
->type
& ITER_PIPE
)) {
1183 struct pipe_inode_info
*pipe
= i
->pipe
;
1190 data_start(i
, &idx
, &off
);
1191 /* some of this one + all after this one */
1192 npages
= ((pipe
->curbuf
- idx
- 1) & (pipe
->buffers
- 1)) + 1;
1193 if (npages
>= maxpages
)
1195 } else iterate_all_kinds(i
, size
, v
, ({
1196 unsigned long p
= (unsigned long)v
.iov_base
;
1197 npages
+= DIV_ROUND_UP(p
+ v
.iov_len
, PAGE_SIZE
)
1199 if (npages
>= maxpages
)
1203 if (npages
>= maxpages
)
1206 unsigned long p
= (unsigned long)v
.iov_base
;
1207 npages
+= DIV_ROUND_UP(p
+ v
.iov_len
, PAGE_SIZE
)
1209 if (npages
>= maxpages
)
1215 EXPORT_SYMBOL(iov_iter_npages
);
1217 const void *dup_iter(struct iov_iter
*new, struct iov_iter
*old
, gfp_t flags
)
1220 if (unlikely(new->type
& ITER_PIPE
)) {
1224 if (new->type
& ITER_BVEC
)
1225 return new->bvec
= kmemdup(new->bvec
,
1226 new->nr_segs
* sizeof(struct bio_vec
),
1229 /* iovec and kvec have identical layout */
1230 return new->iov
= kmemdup(new->iov
,
1231 new->nr_segs
* sizeof(struct iovec
),
1234 EXPORT_SYMBOL(dup_iter
);
1237 * import_iovec() - Copy an array of &struct iovec from userspace
1238 * into the kernel, check that it is valid, and initialize a new
1239 * &struct iov_iter iterator to access it.
1241 * @type: One of %READ or %WRITE.
1242 * @uvector: Pointer to the userspace array.
1243 * @nr_segs: Number of elements in userspace array.
1244 * @fast_segs: Number of elements in @iov.
1245 * @iov: (input and output parameter) Pointer to pointer to (usually small
1246 * on-stack) kernel array.
1247 * @i: Pointer to iterator that will be initialized on success.
1249 * If the array pointed to by *@iov is large enough to hold all @nr_segs,
1250 * then this function places %NULL in *@iov on return. Otherwise, a new
1251 * array will be allocated and the result placed in *@iov. This means that
1252 * the caller may call kfree() on *@iov regardless of whether the small
1253 * on-stack array was used or not (and regardless of whether this function
1254 * returns an error or not).
1256 * Return: 0 on success or negative error code on error.
1258 int import_iovec(int type
, const struct iovec __user
* uvector
,
1259 unsigned nr_segs
, unsigned fast_segs
,
1260 struct iovec
**iov
, struct iov_iter
*i
)
1264 n
= rw_copy_check_uvector(type
, uvector
, nr_segs
, fast_segs
,
1272 iov_iter_init(i
, type
, p
, nr_segs
, n
);
1273 *iov
= p
== *iov
? NULL
: p
;
1276 EXPORT_SYMBOL(import_iovec
);
1278 #ifdef CONFIG_COMPAT
1279 #include <linux/compat.h>
1281 int compat_import_iovec(int type
, const struct compat_iovec __user
* uvector
,
1282 unsigned nr_segs
, unsigned fast_segs
,
1283 struct iovec
**iov
, struct iov_iter
*i
)
1287 n
= compat_rw_copy_check_uvector(type
, uvector
, nr_segs
, fast_segs
,
1295 iov_iter_init(i
, type
, p
, nr_segs
, n
);
1296 *iov
= p
== *iov
? NULL
: p
;
1301 int import_single_range(int rw
, void __user
*buf
, size_t len
,
1302 struct iovec
*iov
, struct iov_iter
*i
)
1304 if (len
> MAX_RW_COUNT
)
1306 if (unlikely(!access_ok(!rw
, buf
, len
)))
1309 iov
->iov_base
= buf
;
1311 iov_iter_init(i
, rw
, iov
, 1, len
);
1314 EXPORT_SYMBOL(import_single_range
);