1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/export.h>
3 #include <linux/bvec.h>
5 #include <linux/pagemap.h>
6 #include <linux/slab.h>
7 #include <linux/vmalloc.h>
8 #include <linux/splice.h>
9 #include <net/checksum.h>
10 #include <linux/scatterlist.h>
12 #define PIPE_PARANOIA /* for now */
14 #define iterate_iovec(i, n, __v, __p, skip, STEP) { \
18 __v.iov_len = min(n, __p->iov_len - skip); \
19 if (likely(__v.iov_len)) { \
20 __v.iov_base = __p->iov_base + skip; \
22 __v.iov_len -= left; \
23 skip += __v.iov_len; \
28 while (unlikely(!left && n)) { \
30 __v.iov_len = min(n, __p->iov_len); \
31 if (unlikely(!__v.iov_len)) \
33 __v.iov_base = __p->iov_base; \
35 __v.iov_len -= left; \
42 #define iterate_kvec(i, n, __v, __p, skip, STEP) { \
45 __v.iov_len = min(n, __p->iov_len - skip); \
46 if (likely(__v.iov_len)) { \
47 __v.iov_base = __p->iov_base + skip; \
49 skip += __v.iov_len; \
52 while (unlikely(n)) { \
54 __v.iov_len = min(n, __p->iov_len); \
55 if (unlikely(!__v.iov_len)) \
57 __v.iov_base = __p->iov_base; \
65 #define iterate_bvec(i, n, __v, __bi, skip, STEP) { \
66 struct bvec_iter __start; \
67 __start.bi_size = n; \
68 __start.bi_bvec_done = skip; \
70 for_each_bvec(__v, i->bvec, __bi, __start) { \
77 #define iterate_all_kinds(i, n, v, I, B, K) { \
79 size_t skip = i->iov_offset; \
80 if (unlikely(i->type & ITER_BVEC)) { \
82 struct bvec_iter __bi; \
83 iterate_bvec(i, n, v, __bi, skip, (B)) \
84 } else if (unlikely(i->type & ITER_KVEC)) { \
85 const struct kvec *kvec; \
87 iterate_kvec(i, n, v, kvec, skip, (K)) \
88 } else if (unlikely(i->type & ITER_DISCARD)) { \
90 const struct iovec *iov; \
92 iterate_iovec(i, n, v, iov, skip, (I)) \
97 #define iterate_and_advance(i, n, v, I, B, K) { \
98 if (unlikely(i->count < n)) \
101 size_t skip = i->iov_offset; \
102 if (unlikely(i->type & ITER_BVEC)) { \
103 const struct bio_vec *bvec = i->bvec; \
105 struct bvec_iter __bi; \
106 iterate_bvec(i, n, v, __bi, skip, (B)) \
107 i->bvec = __bvec_iter_bvec(i->bvec, __bi); \
108 i->nr_segs -= i->bvec - bvec; \
109 skip = __bi.bi_bvec_done; \
110 } else if (unlikely(i->type & ITER_KVEC)) { \
111 const struct kvec *kvec; \
113 iterate_kvec(i, n, v, kvec, skip, (K)) \
114 if (skip == kvec->iov_len) { \
118 i->nr_segs -= kvec - i->kvec; \
120 } else if (unlikely(i->type & ITER_DISCARD)) { \
123 const struct iovec *iov; \
125 iterate_iovec(i, n, v, iov, skip, (I)) \
126 if (skip == iov->iov_len) { \
130 i->nr_segs -= iov - i->iov; \
134 i->iov_offset = skip; \
138 static int copyout(void __user
*to
, const void *from
, size_t n
)
140 if (access_ok(to
, n
)) {
141 kasan_check_read(from
, n
);
142 n
= raw_copy_to_user(to
, from
, n
);
147 static int copyin(void *to
, const void __user
*from
, size_t n
)
149 if (access_ok(from
, n
)) {
150 kasan_check_write(to
, n
);
151 n
= raw_copy_from_user(to
, from
, n
);
156 static size_t copy_page_to_iter_iovec(struct page
*page
, size_t offset
, size_t bytes
,
159 size_t skip
, copy
, left
, wanted
;
160 const struct iovec
*iov
;
164 if (unlikely(bytes
> i
->count
))
167 if (unlikely(!bytes
))
173 skip
= i
->iov_offset
;
174 buf
= iov
->iov_base
+ skip
;
175 copy
= min(bytes
, iov
->iov_len
- skip
);
177 if (IS_ENABLED(CONFIG_HIGHMEM
) && !fault_in_pages_writeable(buf
, copy
)) {
178 kaddr
= kmap_atomic(page
);
179 from
= kaddr
+ offset
;
181 /* first chunk, usually the only one */
182 left
= copyout(buf
, from
, copy
);
188 while (unlikely(!left
&& bytes
)) {
191 copy
= min(bytes
, iov
->iov_len
);
192 left
= copyout(buf
, from
, copy
);
198 if (likely(!bytes
)) {
199 kunmap_atomic(kaddr
);
202 offset
= from
- kaddr
;
204 kunmap_atomic(kaddr
);
205 copy
= min(bytes
, iov
->iov_len
- skip
);
207 /* Too bad - revert to non-atomic kmap */
210 from
= kaddr
+ offset
;
211 left
= copyout(buf
, from
, copy
);
216 while (unlikely(!left
&& bytes
)) {
219 copy
= min(bytes
, iov
->iov_len
);
220 left
= copyout(buf
, from
, copy
);
229 if (skip
== iov
->iov_len
) {
233 i
->count
-= wanted
- bytes
;
234 i
->nr_segs
-= iov
- i
->iov
;
236 i
->iov_offset
= skip
;
237 return wanted
- bytes
;
240 static size_t copy_page_from_iter_iovec(struct page
*page
, size_t offset
, size_t bytes
,
243 size_t skip
, copy
, left
, wanted
;
244 const struct iovec
*iov
;
248 if (unlikely(bytes
> i
->count
))
251 if (unlikely(!bytes
))
257 skip
= i
->iov_offset
;
258 buf
= iov
->iov_base
+ skip
;
259 copy
= min(bytes
, iov
->iov_len
- skip
);
261 if (IS_ENABLED(CONFIG_HIGHMEM
) && !fault_in_pages_readable(buf
, copy
)) {
262 kaddr
= kmap_atomic(page
);
265 /* first chunk, usually the only one */
266 left
= copyin(to
, buf
, copy
);
272 while (unlikely(!left
&& bytes
)) {
275 copy
= min(bytes
, iov
->iov_len
);
276 left
= copyin(to
, buf
, copy
);
282 if (likely(!bytes
)) {
283 kunmap_atomic(kaddr
);
288 kunmap_atomic(kaddr
);
289 copy
= min(bytes
, iov
->iov_len
- skip
);
291 /* Too bad - revert to non-atomic kmap */
295 left
= copyin(to
, buf
, copy
);
300 while (unlikely(!left
&& bytes
)) {
303 copy
= min(bytes
, iov
->iov_len
);
304 left
= copyin(to
, buf
, copy
);
313 if (skip
== iov
->iov_len
) {
317 i
->count
-= wanted
- bytes
;
318 i
->nr_segs
-= iov
- i
->iov
;
320 i
->iov_offset
= skip
;
321 return wanted
- bytes
;
325 static bool sanity(const struct iov_iter
*i
)
327 struct pipe_inode_info
*pipe
= i
->pipe
;
329 int next
= pipe
->curbuf
+ pipe
->nrbufs
;
331 struct pipe_buffer
*p
;
332 if (unlikely(!pipe
->nrbufs
))
333 goto Bad
; // pipe must be non-empty
334 if (unlikely(idx
!= ((next
- 1) & (pipe
->buffers
- 1))))
335 goto Bad
; // must be at the last buffer...
337 p
= &pipe
->bufs
[idx
];
338 if (unlikely(p
->offset
+ p
->len
!= i
->iov_offset
))
339 goto Bad
; // ... at the end of segment
341 if (idx
!= (next
& (pipe
->buffers
- 1)))
342 goto Bad
; // must be right after the last buffer
346 printk(KERN_ERR
"idx = %d, offset = %zd\n", i
->idx
, i
->iov_offset
);
347 printk(KERN_ERR
"curbuf = %d, nrbufs = %d, buffers = %d\n",
348 pipe
->curbuf
, pipe
->nrbufs
, pipe
->buffers
);
349 for (idx
= 0; idx
< pipe
->buffers
; idx
++)
350 printk(KERN_ERR
"[%p %p %d %d]\n",
352 pipe
->bufs
[idx
].page
,
353 pipe
->bufs
[idx
].offset
,
354 pipe
->bufs
[idx
].len
);
359 #define sanity(i) true
362 static inline int next_idx(int idx
, struct pipe_inode_info
*pipe
)
364 return (idx
+ 1) & (pipe
->buffers
- 1);
367 static size_t copy_page_to_iter_pipe(struct page
*page
, size_t offset
, size_t bytes
,
370 struct pipe_inode_info
*pipe
= i
->pipe
;
371 struct pipe_buffer
*buf
;
375 if (unlikely(bytes
> i
->count
))
378 if (unlikely(!bytes
))
386 buf
= &pipe
->bufs
[idx
];
388 if (offset
== off
&& buf
->page
== page
) {
389 /* merge with the last one */
391 i
->iov_offset
+= bytes
;
394 idx
= next_idx(idx
, pipe
);
395 buf
= &pipe
->bufs
[idx
];
397 if (idx
== pipe
->curbuf
&& pipe
->nrbufs
)
400 buf
->ops
= &page_cache_pipe_buf_ops
;
401 get_page(buf
->page
= page
);
402 buf
->offset
= offset
;
404 i
->iov_offset
= offset
+ bytes
;
412 * Fault in one or more iovecs of the given iov_iter, to a maximum length of
413 * bytes. For each iovec, fault in each page that constitutes the iovec.
415 * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
416 * because it is an invalid address).
418 int iov_iter_fault_in_readable(struct iov_iter
*i
, size_t bytes
)
420 size_t skip
= i
->iov_offset
;
421 const struct iovec
*iov
;
425 if (!(i
->type
& (ITER_BVEC
|ITER_KVEC
))) {
426 iterate_iovec(i
, bytes
, v
, iov
, skip
, ({
427 err
= fault_in_pages_readable(v
.iov_base
, v
.iov_len
);
434 EXPORT_SYMBOL(iov_iter_fault_in_readable
);
436 void iov_iter_init(struct iov_iter
*i
, unsigned int direction
,
437 const struct iovec
*iov
, unsigned long nr_segs
,
440 WARN_ON(direction
& ~(READ
| WRITE
));
441 direction
&= READ
| WRITE
;
443 /* It will get better. Eventually... */
444 if (uaccess_kernel()) {
445 i
->type
= ITER_KVEC
| direction
;
446 i
->kvec
= (struct kvec
*)iov
;
448 i
->type
= ITER_IOVEC
| direction
;
451 i
->nr_segs
= nr_segs
;
455 EXPORT_SYMBOL(iov_iter_init
);
457 static void memcpy_from_page(char *to
, struct page
*page
, size_t offset
, size_t len
)
459 char *from
= kmap_atomic(page
);
460 memcpy(to
, from
+ offset
, len
);
464 static void memcpy_to_page(struct page
*page
, size_t offset
, const char *from
, size_t len
)
466 char *to
= kmap_atomic(page
);
467 memcpy(to
+ offset
, from
, len
);
471 static void memzero_page(struct page
*page
, size_t offset
, size_t len
)
473 char *addr
= kmap_atomic(page
);
474 memset(addr
+ offset
, 0, len
);
478 static inline bool allocated(struct pipe_buffer
*buf
)
480 return buf
->ops
== &default_pipe_buf_ops
;
483 static inline void data_start(const struct iov_iter
*i
, int *idxp
, size_t *offp
)
485 size_t off
= i
->iov_offset
;
487 if (off
&& (!allocated(&i
->pipe
->bufs
[idx
]) || off
== PAGE_SIZE
)) {
488 idx
= next_idx(idx
, i
->pipe
);
495 static size_t push_pipe(struct iov_iter
*i
, size_t size
,
496 int *idxp
, size_t *offp
)
498 struct pipe_inode_info
*pipe
= i
->pipe
;
503 if (unlikely(size
> i
->count
))
509 data_start(i
, &idx
, &off
);
513 left
-= PAGE_SIZE
- off
;
515 pipe
->bufs
[idx
].len
+= size
;
518 pipe
->bufs
[idx
].len
= PAGE_SIZE
;
519 idx
= next_idx(idx
, pipe
);
521 while (idx
!= pipe
->curbuf
|| !pipe
->nrbufs
) {
522 struct page
*page
= alloc_page(GFP_USER
);
526 pipe
->bufs
[idx
].ops
= &default_pipe_buf_ops
;
527 pipe
->bufs
[idx
].page
= page
;
528 pipe
->bufs
[idx
].offset
= 0;
529 if (left
<= PAGE_SIZE
) {
530 pipe
->bufs
[idx
].len
= left
;
533 pipe
->bufs
[idx
].len
= PAGE_SIZE
;
535 idx
= next_idx(idx
, pipe
);
540 static size_t copy_pipe_to_iter(const void *addr
, size_t bytes
,
543 struct pipe_inode_info
*pipe
= i
->pipe
;
550 bytes
= n
= push_pipe(i
, bytes
, &idx
, &off
);
553 for ( ; n
; idx
= next_idx(idx
, pipe
), off
= 0) {
554 size_t chunk
= min_t(size_t, n
, PAGE_SIZE
- off
);
555 memcpy_to_page(pipe
->bufs
[idx
].page
, off
, addr
, chunk
);
557 i
->iov_offset
= off
+ chunk
;
565 static __wsum
csum_and_memcpy(void *to
, const void *from
, size_t len
,
566 __wsum sum
, size_t off
)
568 __wsum next
= csum_partial_copy_nocheck(from
, to
, len
, 0);
569 return csum_block_add(sum
, next
, off
);
572 static size_t csum_and_copy_to_pipe_iter(const void *addr
, size_t bytes
,
573 __wsum
*csum
, struct iov_iter
*i
)
575 struct pipe_inode_info
*pipe
= i
->pipe
;
584 bytes
= n
= push_pipe(i
, bytes
, &idx
, &r
);
587 for ( ; n
; idx
= next_idx(idx
, pipe
), r
= 0) {
588 size_t chunk
= min_t(size_t, n
, PAGE_SIZE
- r
);
589 char *p
= kmap_atomic(pipe
->bufs
[idx
].page
);
590 sum
= csum_and_memcpy(p
+ r
, addr
, chunk
, sum
, off
);
593 i
->iov_offset
= r
+ chunk
;
603 size_t _copy_to_iter(const void *addr
, size_t bytes
, struct iov_iter
*i
)
605 const char *from
= addr
;
606 if (unlikely(iov_iter_is_pipe(i
)))
607 return copy_pipe_to_iter(addr
, bytes
, i
);
608 if (iter_is_iovec(i
))
610 iterate_and_advance(i
, bytes
, v
,
611 copyout(v
.iov_base
, (from
+= v
.iov_len
) - v
.iov_len
, v
.iov_len
),
612 memcpy_to_page(v
.bv_page
, v
.bv_offset
,
613 (from
+= v
.bv_len
) - v
.bv_len
, v
.bv_len
),
614 memcpy(v
.iov_base
, (from
+= v
.iov_len
) - v
.iov_len
, v
.iov_len
)
619 EXPORT_SYMBOL(_copy_to_iter
);
621 #ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE
622 static int copyout_mcsafe(void __user
*to
, const void *from
, size_t n
)
624 if (access_ok(to
, n
)) {
625 kasan_check_read(from
, n
);
626 n
= copy_to_user_mcsafe((__force
void *) to
, from
, n
);
631 static unsigned long memcpy_mcsafe_to_page(struct page
*page
, size_t offset
,
632 const char *from
, size_t len
)
637 to
= kmap_atomic(page
);
638 ret
= memcpy_mcsafe(to
+ offset
, from
, len
);
644 static size_t copy_pipe_to_iter_mcsafe(const void *addr
, size_t bytes
,
647 struct pipe_inode_info
*pipe
= i
->pipe
;
648 size_t n
, off
, xfer
= 0;
654 bytes
= n
= push_pipe(i
, bytes
, &idx
, &off
);
657 for ( ; n
; idx
= next_idx(idx
, pipe
), off
= 0) {
658 size_t chunk
= min_t(size_t, n
, PAGE_SIZE
- off
);
661 rem
= memcpy_mcsafe_to_page(pipe
->bufs
[idx
].page
, off
, addr
,
664 i
->iov_offset
= off
+ chunk
- rem
;
676 * _copy_to_iter_mcsafe - copy to user with source-read error exception handling
677 * @addr: source kernel address
678 * @bytes: total transfer length
679 * @iter: destination iterator
681 * The pmem driver arranges for filesystem-dax to use this facility via
682 * dax_copy_to_iter() for protecting read/write to persistent memory.
683 * Unless / until an architecture can guarantee identical performance
684 * between _copy_to_iter_mcsafe() and _copy_to_iter() it would be a
685 * performance regression to switch more users to the mcsafe version.
687 * Otherwise, the main differences between this and typical _copy_to_iter().
689 * * Typical tail/residue handling after a fault retries the copy
690 * byte-by-byte until the fault happens again. Re-triggering machine
691 * checks is potentially fatal so the implementation uses source
692 * alignment and poison alignment assumptions to avoid re-triggering
693 * hardware exceptions.
695 * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies.
696 * Compare to copy_to_iter() where only ITER_IOVEC attempts might return
699 * See MCSAFE_TEST for self-test.
701 size_t _copy_to_iter_mcsafe(const void *addr
, size_t bytes
, struct iov_iter
*i
)
703 const char *from
= addr
;
704 unsigned long rem
, curr_addr
, s_addr
= (unsigned long) addr
;
706 if (unlikely(iov_iter_is_pipe(i
)))
707 return copy_pipe_to_iter_mcsafe(addr
, bytes
, i
);
708 if (iter_is_iovec(i
))
710 iterate_and_advance(i
, bytes
, v
,
711 copyout_mcsafe(v
.iov_base
, (from
+= v
.iov_len
) - v
.iov_len
, v
.iov_len
),
713 rem
= memcpy_mcsafe_to_page(v
.bv_page
, v
.bv_offset
,
714 (from
+= v
.bv_len
) - v
.bv_len
, v
.bv_len
);
716 curr_addr
= (unsigned long) from
;
717 bytes
= curr_addr
- s_addr
- rem
;
722 rem
= memcpy_mcsafe(v
.iov_base
, (from
+= v
.iov_len
) - v
.iov_len
,
725 curr_addr
= (unsigned long) from
;
726 bytes
= curr_addr
- s_addr
- rem
;
734 EXPORT_SYMBOL_GPL(_copy_to_iter_mcsafe
);
735 #endif /* CONFIG_ARCH_HAS_UACCESS_MCSAFE */
737 size_t _copy_from_iter(void *addr
, size_t bytes
, struct iov_iter
*i
)
740 if (unlikely(iov_iter_is_pipe(i
))) {
744 if (iter_is_iovec(i
))
746 iterate_and_advance(i
, bytes
, v
,
747 copyin((to
+= v
.iov_len
) - v
.iov_len
, v
.iov_base
, v
.iov_len
),
748 memcpy_from_page((to
+= v
.bv_len
) - v
.bv_len
, v
.bv_page
,
749 v
.bv_offset
, v
.bv_len
),
750 memcpy((to
+= v
.iov_len
) - v
.iov_len
, v
.iov_base
, v
.iov_len
)
755 EXPORT_SYMBOL(_copy_from_iter
);
757 bool _copy_from_iter_full(void *addr
, size_t bytes
, struct iov_iter
*i
)
760 if (unlikely(iov_iter_is_pipe(i
))) {
764 if (unlikely(i
->count
< bytes
))
767 if (iter_is_iovec(i
))
769 iterate_all_kinds(i
, bytes
, v
, ({
770 if (copyin((to
+= v
.iov_len
) - v
.iov_len
,
771 v
.iov_base
, v
.iov_len
))
774 memcpy_from_page((to
+= v
.bv_len
) - v
.bv_len
, v
.bv_page
,
775 v
.bv_offset
, v
.bv_len
),
776 memcpy((to
+= v
.iov_len
) - v
.iov_len
, v
.iov_base
, v
.iov_len
)
779 iov_iter_advance(i
, bytes
);
782 EXPORT_SYMBOL(_copy_from_iter_full
);
784 size_t _copy_from_iter_nocache(void *addr
, size_t bytes
, struct iov_iter
*i
)
787 if (unlikely(iov_iter_is_pipe(i
))) {
791 iterate_and_advance(i
, bytes
, v
,
792 __copy_from_user_inatomic_nocache((to
+= v
.iov_len
) - v
.iov_len
,
793 v
.iov_base
, v
.iov_len
),
794 memcpy_from_page((to
+= v
.bv_len
) - v
.bv_len
, v
.bv_page
,
795 v
.bv_offset
, v
.bv_len
),
796 memcpy((to
+= v
.iov_len
) - v
.iov_len
, v
.iov_base
, v
.iov_len
)
801 EXPORT_SYMBOL(_copy_from_iter_nocache
);
803 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
805 * _copy_from_iter_flushcache - write destination through cpu cache
806 * @addr: destination kernel address
807 * @bytes: total transfer length
808 * @iter: source iterator
810 * The pmem driver arranges for filesystem-dax to use this facility via
811 * dax_copy_from_iter() for ensuring that writes to persistent memory
812 * are flushed through the CPU cache. It is differentiated from
813 * _copy_from_iter_nocache() in that guarantees all data is flushed for
814 * all iterator types. The _copy_from_iter_nocache() only attempts to
815 * bypass the cache for the ITER_IOVEC case, and on some archs may use
816 * instructions that strand dirty-data in the cache.
818 size_t _copy_from_iter_flushcache(void *addr
, size_t bytes
, struct iov_iter
*i
)
821 if (unlikely(iov_iter_is_pipe(i
))) {
825 iterate_and_advance(i
, bytes
, v
,
826 __copy_from_user_flushcache((to
+= v
.iov_len
) - v
.iov_len
,
827 v
.iov_base
, v
.iov_len
),
828 memcpy_page_flushcache((to
+= v
.bv_len
) - v
.bv_len
, v
.bv_page
,
829 v
.bv_offset
, v
.bv_len
),
830 memcpy_flushcache((to
+= v
.iov_len
) - v
.iov_len
, v
.iov_base
,
836 EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache
);
839 bool _copy_from_iter_full_nocache(void *addr
, size_t bytes
, struct iov_iter
*i
)
842 if (unlikely(iov_iter_is_pipe(i
))) {
846 if (unlikely(i
->count
< bytes
))
848 iterate_all_kinds(i
, bytes
, v
, ({
849 if (__copy_from_user_inatomic_nocache((to
+= v
.iov_len
) - v
.iov_len
,
850 v
.iov_base
, v
.iov_len
))
853 memcpy_from_page((to
+= v
.bv_len
) - v
.bv_len
, v
.bv_page
,
854 v
.bv_offset
, v
.bv_len
),
855 memcpy((to
+= v
.iov_len
) - v
.iov_len
, v
.iov_base
, v
.iov_len
)
858 iov_iter_advance(i
, bytes
);
861 EXPORT_SYMBOL(_copy_from_iter_full_nocache
);
863 static inline bool page_copy_sane(struct page
*page
, size_t offset
, size_t n
)
866 size_t v
= n
+ offset
;
869 * The general case needs to access the page order in order
870 * to compute the page size.
871 * However, we mostly deal with order-0 pages and thus can
872 * avoid a possible cache line miss for requests that fit all
875 if (n
<= v
&& v
<= PAGE_SIZE
)
878 head
= compound_head(page
);
879 v
+= (page
- head
) << PAGE_SHIFT
;
881 if (likely(n
<= v
&& v
<= (page_size(head
))))
887 size_t copy_page_to_iter(struct page
*page
, size_t offset
, size_t bytes
,
890 if (unlikely(!page_copy_sane(page
, offset
, bytes
)))
892 if (i
->type
& (ITER_BVEC
|ITER_KVEC
)) {
893 void *kaddr
= kmap_atomic(page
);
894 size_t wanted
= copy_to_iter(kaddr
+ offset
, bytes
, i
);
895 kunmap_atomic(kaddr
);
897 } else if (unlikely(iov_iter_is_discard(i
)))
899 else if (likely(!iov_iter_is_pipe(i
)))
900 return copy_page_to_iter_iovec(page
, offset
, bytes
, i
);
902 return copy_page_to_iter_pipe(page
, offset
, bytes
, i
);
904 EXPORT_SYMBOL(copy_page_to_iter
);
906 size_t copy_page_from_iter(struct page
*page
, size_t offset
, size_t bytes
,
909 if (unlikely(!page_copy_sane(page
, offset
, bytes
)))
911 if (unlikely(iov_iter_is_pipe(i
) || iov_iter_is_discard(i
))) {
915 if (i
->type
& (ITER_BVEC
|ITER_KVEC
)) {
916 void *kaddr
= kmap_atomic(page
);
917 size_t wanted
= _copy_from_iter(kaddr
+ offset
, bytes
, i
);
918 kunmap_atomic(kaddr
);
921 return copy_page_from_iter_iovec(page
, offset
, bytes
, i
);
923 EXPORT_SYMBOL(copy_page_from_iter
);
925 static size_t pipe_zero(size_t bytes
, struct iov_iter
*i
)
927 struct pipe_inode_info
*pipe
= i
->pipe
;
934 bytes
= n
= push_pipe(i
, bytes
, &idx
, &off
);
938 for ( ; n
; idx
= next_idx(idx
, pipe
), off
= 0) {
939 size_t chunk
= min_t(size_t, n
, PAGE_SIZE
- off
);
940 memzero_page(pipe
->bufs
[idx
].page
, off
, chunk
);
942 i
->iov_offset
= off
+ chunk
;
949 size_t iov_iter_zero(size_t bytes
, struct iov_iter
*i
)
951 if (unlikely(iov_iter_is_pipe(i
)))
952 return pipe_zero(bytes
, i
);
953 iterate_and_advance(i
, bytes
, v
,
954 clear_user(v
.iov_base
, v
.iov_len
),
955 memzero_page(v
.bv_page
, v
.bv_offset
, v
.bv_len
),
956 memset(v
.iov_base
, 0, v
.iov_len
)
961 EXPORT_SYMBOL(iov_iter_zero
);
963 size_t iov_iter_copy_from_user_atomic(struct page
*page
,
964 struct iov_iter
*i
, unsigned long offset
, size_t bytes
)
966 char *kaddr
= kmap_atomic(page
), *p
= kaddr
+ offset
;
967 if (unlikely(!page_copy_sane(page
, offset
, bytes
))) {
968 kunmap_atomic(kaddr
);
971 if (unlikely(iov_iter_is_pipe(i
) || iov_iter_is_discard(i
))) {
972 kunmap_atomic(kaddr
);
976 iterate_all_kinds(i
, bytes
, v
,
977 copyin((p
+= v
.iov_len
) - v
.iov_len
, v
.iov_base
, v
.iov_len
),
978 memcpy_from_page((p
+= v
.bv_len
) - v
.bv_len
, v
.bv_page
,
979 v
.bv_offset
, v
.bv_len
),
980 memcpy((p
+= v
.iov_len
) - v
.iov_len
, v
.iov_base
, v
.iov_len
)
982 kunmap_atomic(kaddr
);
985 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic
);
987 static inline void pipe_truncate(struct iov_iter
*i
)
989 struct pipe_inode_info
*pipe
= i
->pipe
;
991 size_t off
= i
->iov_offset
;
993 int nrbufs
= (idx
- pipe
->curbuf
) & (pipe
->buffers
- 1);
995 pipe
->bufs
[idx
].len
= off
- pipe
->bufs
[idx
].offset
;
996 idx
= next_idx(idx
, pipe
);
999 while (pipe
->nrbufs
> nrbufs
) {
1000 pipe_buf_release(pipe
, &pipe
->bufs
[idx
]);
1001 idx
= next_idx(idx
, pipe
);
1007 static void pipe_advance(struct iov_iter
*i
, size_t size
)
1009 struct pipe_inode_info
*pipe
= i
->pipe
;
1010 if (unlikely(i
->count
< size
))
1013 struct pipe_buffer
*buf
;
1014 size_t off
= i
->iov_offset
, left
= size
;
1016 if (off
) /* make it relative to the beginning of buffer */
1017 left
+= off
- pipe
->bufs
[idx
].offset
;
1019 buf
= &pipe
->bufs
[idx
];
1020 if (left
<= buf
->len
)
1023 idx
= next_idx(idx
, pipe
);
1026 i
->iov_offset
= buf
->offset
+ left
;
1029 /* ... and discard everything past that point */
1033 void iov_iter_advance(struct iov_iter
*i
, size_t size
)
1035 if (unlikely(iov_iter_is_pipe(i
))) {
1036 pipe_advance(i
, size
);
1039 if (unlikely(iov_iter_is_discard(i
))) {
1043 iterate_and_advance(i
, size
, v
, 0, 0, 0)
1045 EXPORT_SYMBOL(iov_iter_advance
);
1047 void iov_iter_revert(struct iov_iter
*i
, size_t unroll
)
1051 if (WARN_ON(unroll
> MAX_RW_COUNT
))
1054 if (unlikely(iov_iter_is_pipe(i
))) {
1055 struct pipe_inode_info
*pipe
= i
->pipe
;
1057 size_t off
= i
->iov_offset
;
1059 size_t n
= off
- pipe
->bufs
[idx
].offset
;
1065 if (!unroll
&& idx
== i
->start_idx
) {
1070 idx
= pipe
->buffers
- 1;
1071 off
= pipe
->bufs
[idx
].offset
+ pipe
->bufs
[idx
].len
;
1073 i
->iov_offset
= off
;
1078 if (unlikely(iov_iter_is_discard(i
)))
1080 if (unroll
<= i
->iov_offset
) {
1081 i
->iov_offset
-= unroll
;
1084 unroll
-= i
->iov_offset
;
1085 if (iov_iter_is_bvec(i
)) {
1086 const struct bio_vec
*bvec
= i
->bvec
;
1088 size_t n
= (--bvec
)->bv_len
;
1092 i
->iov_offset
= n
- unroll
;
1097 } else { /* same logics for iovec and kvec */
1098 const struct iovec
*iov
= i
->iov
;
1100 size_t n
= (--iov
)->iov_len
;
1104 i
->iov_offset
= n
- unroll
;
1111 EXPORT_SYMBOL(iov_iter_revert
);
1114 * Return the count of just the current iov_iter segment.
1116 size_t iov_iter_single_seg_count(const struct iov_iter
*i
)
1118 if (unlikely(iov_iter_is_pipe(i
)))
1119 return i
->count
; // it is a silly place, anyway
1120 if (i
->nr_segs
== 1)
1122 if (unlikely(iov_iter_is_discard(i
)))
1124 else if (iov_iter_is_bvec(i
))
1125 return min(i
->count
, i
->bvec
->bv_len
- i
->iov_offset
);
1127 return min(i
->count
, i
->iov
->iov_len
- i
->iov_offset
);
1129 EXPORT_SYMBOL(iov_iter_single_seg_count
);
1131 void iov_iter_kvec(struct iov_iter
*i
, unsigned int direction
,
1132 const struct kvec
*kvec
, unsigned long nr_segs
,
1135 WARN_ON(direction
& ~(READ
| WRITE
));
1136 i
->type
= ITER_KVEC
| (direction
& (READ
| WRITE
));
1138 i
->nr_segs
= nr_segs
;
1142 EXPORT_SYMBOL(iov_iter_kvec
);
1144 void iov_iter_bvec(struct iov_iter
*i
, unsigned int direction
,
1145 const struct bio_vec
*bvec
, unsigned long nr_segs
,
1148 WARN_ON(direction
& ~(READ
| WRITE
));
1149 i
->type
= ITER_BVEC
| (direction
& (READ
| WRITE
));
1151 i
->nr_segs
= nr_segs
;
1155 EXPORT_SYMBOL(iov_iter_bvec
);
1157 void iov_iter_pipe(struct iov_iter
*i
, unsigned int direction
,
1158 struct pipe_inode_info
*pipe
,
1161 BUG_ON(direction
!= READ
);
1162 WARN_ON(pipe
->nrbufs
== pipe
->buffers
);
1163 i
->type
= ITER_PIPE
| READ
;
1165 i
->idx
= (pipe
->curbuf
+ pipe
->nrbufs
) & (pipe
->buffers
- 1);
1168 i
->start_idx
= i
->idx
;
1170 EXPORT_SYMBOL(iov_iter_pipe
);
1173 * iov_iter_discard - Initialise an I/O iterator that discards data
1174 * @i: The iterator to initialise.
1175 * @direction: The direction of the transfer.
1176 * @count: The size of the I/O buffer in bytes.
1178 * Set up an I/O iterator that just discards everything that's written to it.
1179 * It's only available as a READ iterator.
1181 void iov_iter_discard(struct iov_iter
*i
, unsigned int direction
, size_t count
)
1183 BUG_ON(direction
!= READ
);
1184 i
->type
= ITER_DISCARD
| READ
;
1188 EXPORT_SYMBOL(iov_iter_discard
);
1190 unsigned long iov_iter_alignment(const struct iov_iter
*i
)
1192 unsigned long res
= 0;
1193 size_t size
= i
->count
;
1195 if (unlikely(iov_iter_is_pipe(i
))) {
1196 if (size
&& i
->iov_offset
&& allocated(&i
->pipe
->bufs
[i
->idx
]))
1197 return size
| i
->iov_offset
;
1200 iterate_all_kinds(i
, size
, v
,
1201 (res
|= (unsigned long)v
.iov_base
| v
.iov_len
, 0),
1202 res
|= v
.bv_offset
| v
.bv_len
,
1203 res
|= (unsigned long)v
.iov_base
| v
.iov_len
1207 EXPORT_SYMBOL(iov_iter_alignment
);
1209 unsigned long iov_iter_gap_alignment(const struct iov_iter
*i
)
1211 unsigned long res
= 0;
1212 size_t size
= i
->count
;
1214 if (unlikely(iov_iter_is_pipe(i
) || iov_iter_is_discard(i
))) {
1219 iterate_all_kinds(i
, size
, v
,
1220 (res
|= (!res
? 0 : (unsigned long)v
.iov_base
) |
1221 (size
!= v
.iov_len
? size
: 0), 0),
1222 (res
|= (!res
? 0 : (unsigned long)v
.bv_offset
) |
1223 (size
!= v
.bv_len
? size
: 0)),
1224 (res
|= (!res
? 0 : (unsigned long)v
.iov_base
) |
1225 (size
!= v
.iov_len
? size
: 0))
1229 EXPORT_SYMBOL(iov_iter_gap_alignment
);
1231 static inline ssize_t
__pipe_get_pages(struct iov_iter
*i
,
1233 struct page
**pages
,
1237 struct pipe_inode_info
*pipe
= i
->pipe
;
1238 ssize_t n
= push_pipe(i
, maxsize
, &idx
, start
);
1245 get_page(*pages
++ = pipe
->bufs
[idx
].page
);
1246 idx
= next_idx(idx
, pipe
);
1253 static ssize_t
pipe_get_pages(struct iov_iter
*i
,
1254 struct page
**pages
, size_t maxsize
, unsigned maxpages
,
1267 data_start(i
, &idx
, start
);
1268 /* some of this one + all after this one */
1269 npages
= ((i
->pipe
->curbuf
- idx
- 1) & (i
->pipe
->buffers
- 1)) + 1;
1270 capacity
= min(npages
,maxpages
) * PAGE_SIZE
- *start
;
1272 return __pipe_get_pages(i
, min(maxsize
, capacity
), pages
, idx
, start
);
1275 ssize_t
iov_iter_get_pages(struct iov_iter
*i
,
1276 struct page
**pages
, size_t maxsize
, unsigned maxpages
,
1279 if (maxsize
> i
->count
)
1282 if (unlikely(iov_iter_is_pipe(i
)))
1283 return pipe_get_pages(i
, pages
, maxsize
, maxpages
, start
);
1284 if (unlikely(iov_iter_is_discard(i
)))
1287 iterate_all_kinds(i
, maxsize
, v
, ({
1288 unsigned long addr
= (unsigned long)v
.iov_base
;
1289 size_t len
= v
.iov_len
+ (*start
= addr
& (PAGE_SIZE
- 1));
1293 if (len
> maxpages
* PAGE_SIZE
)
1294 len
= maxpages
* PAGE_SIZE
;
1295 addr
&= ~(PAGE_SIZE
- 1);
1296 n
= DIV_ROUND_UP(len
, PAGE_SIZE
);
1297 res
= get_user_pages_fast(addr
, n
,
1298 iov_iter_rw(i
) != WRITE
? FOLL_WRITE
: 0,
1300 if (unlikely(res
< 0))
1302 return (res
== n
? len
: res
* PAGE_SIZE
) - *start
;
1304 /* can't be more than PAGE_SIZE */
1305 *start
= v
.bv_offset
;
1306 get_page(*pages
= v
.bv_page
);
1314 EXPORT_SYMBOL(iov_iter_get_pages
);
1316 static struct page
**get_pages_array(size_t n
)
1318 return kvmalloc_array(n
, sizeof(struct page
*), GFP_KERNEL
);
1321 static ssize_t
pipe_get_pages_alloc(struct iov_iter
*i
,
1322 struct page
***pages
, size_t maxsize
,
1336 data_start(i
, &idx
, start
);
1337 /* some of this one + all after this one */
1338 npages
= ((i
->pipe
->curbuf
- idx
- 1) & (i
->pipe
->buffers
- 1)) + 1;
1339 n
= npages
* PAGE_SIZE
- *start
;
1343 npages
= DIV_ROUND_UP(maxsize
+ *start
, PAGE_SIZE
);
1344 p
= get_pages_array(npages
);
1347 n
= __pipe_get_pages(i
, maxsize
, p
, idx
, start
);
1355 ssize_t
iov_iter_get_pages_alloc(struct iov_iter
*i
,
1356 struct page
***pages
, size_t maxsize
,
1361 if (maxsize
> i
->count
)
1364 if (unlikely(iov_iter_is_pipe(i
)))
1365 return pipe_get_pages_alloc(i
, pages
, maxsize
, start
);
1366 if (unlikely(iov_iter_is_discard(i
)))
1369 iterate_all_kinds(i
, maxsize
, v
, ({
1370 unsigned long addr
= (unsigned long)v
.iov_base
;
1371 size_t len
= v
.iov_len
+ (*start
= addr
& (PAGE_SIZE
- 1));
1375 addr
&= ~(PAGE_SIZE
- 1);
1376 n
= DIV_ROUND_UP(len
, PAGE_SIZE
);
1377 p
= get_pages_array(n
);
1380 res
= get_user_pages_fast(addr
, n
,
1381 iov_iter_rw(i
) != WRITE
? FOLL_WRITE
: 0, p
);
1382 if (unlikely(res
< 0)) {
1387 return (res
== n
? len
: res
* PAGE_SIZE
) - *start
;
1389 /* can't be more than PAGE_SIZE */
1390 *start
= v
.bv_offset
;
1391 *pages
= p
= get_pages_array(1);
1394 get_page(*p
= v
.bv_page
);
1402 EXPORT_SYMBOL(iov_iter_get_pages_alloc
);
1404 size_t csum_and_copy_from_iter(void *addr
, size_t bytes
, __wsum
*csum
,
1411 if (unlikely(iov_iter_is_pipe(i
) || iov_iter_is_discard(i
))) {
1415 iterate_and_advance(i
, bytes
, v
, ({
1417 next
= csum_and_copy_from_user(v
.iov_base
,
1418 (to
+= v
.iov_len
) - v
.iov_len
,
1419 v
.iov_len
, 0, &err
);
1421 sum
= csum_block_add(sum
, next
, off
);
1424 err
? v
.iov_len
: 0;
1426 char *p
= kmap_atomic(v
.bv_page
);
1427 sum
= csum_and_memcpy((to
+= v
.bv_len
) - v
.bv_len
,
1428 p
+ v
.bv_offset
, v
.bv_len
,
1433 sum
= csum_and_memcpy((to
+= v
.iov_len
) - v
.iov_len
,
1434 v
.iov_base
, v
.iov_len
,
1442 EXPORT_SYMBOL(csum_and_copy_from_iter
);
1444 bool csum_and_copy_from_iter_full(void *addr
, size_t bytes
, __wsum
*csum
,
1451 if (unlikely(iov_iter_is_pipe(i
) || iov_iter_is_discard(i
))) {
1455 if (unlikely(i
->count
< bytes
))
1457 iterate_all_kinds(i
, bytes
, v
, ({
1459 next
= csum_and_copy_from_user(v
.iov_base
,
1460 (to
+= v
.iov_len
) - v
.iov_len
,
1461 v
.iov_len
, 0, &err
);
1464 sum
= csum_block_add(sum
, next
, off
);
1468 char *p
= kmap_atomic(v
.bv_page
);
1469 sum
= csum_and_memcpy((to
+= v
.bv_len
) - v
.bv_len
,
1470 p
+ v
.bv_offset
, v
.bv_len
,
1475 sum
= csum_and_memcpy((to
+= v
.iov_len
) - v
.iov_len
,
1476 v
.iov_base
, v
.iov_len
,
1482 iov_iter_advance(i
, bytes
);
1485 EXPORT_SYMBOL(csum_and_copy_from_iter_full
);
1487 size_t csum_and_copy_to_iter(const void *addr
, size_t bytes
, void *csump
,
1490 const char *from
= addr
;
1491 __wsum
*csum
= csump
;
1495 if (unlikely(iov_iter_is_pipe(i
)))
1496 return csum_and_copy_to_pipe_iter(addr
, bytes
, csum
, i
);
1499 if (unlikely(iov_iter_is_discard(i
))) {
1500 WARN_ON(1); /* for now */
1503 iterate_and_advance(i
, bytes
, v
, ({
1505 next
= csum_and_copy_to_user((from
+= v
.iov_len
) - v
.iov_len
,
1507 v
.iov_len
, 0, &err
);
1509 sum
= csum_block_add(sum
, next
, off
);
1512 err
? v
.iov_len
: 0;
1514 char *p
= kmap_atomic(v
.bv_page
);
1515 sum
= csum_and_memcpy(p
+ v
.bv_offset
,
1516 (from
+= v
.bv_len
) - v
.bv_len
,
1517 v
.bv_len
, sum
, off
);
1521 sum
= csum_and_memcpy(v
.iov_base
,
1522 (from
+= v
.iov_len
) - v
.iov_len
,
1523 v
.iov_len
, sum
, off
);
1530 EXPORT_SYMBOL(csum_and_copy_to_iter
);
1532 size_t hash_and_copy_to_iter(const void *addr
, size_t bytes
, void *hashp
,
1535 #ifdef CONFIG_CRYPTO
1536 struct ahash_request
*hash
= hashp
;
1537 struct scatterlist sg
;
1540 copied
= copy_to_iter(addr
, bytes
, i
);
1541 sg_init_one(&sg
, addr
, copied
);
1542 ahash_request_set_crypt(hash
, &sg
, NULL
, copied
);
1543 crypto_ahash_update(hash
);
1549 EXPORT_SYMBOL(hash_and_copy_to_iter
);
1551 int iov_iter_npages(const struct iov_iter
*i
, int maxpages
)
1553 size_t size
= i
->count
;
1558 if (unlikely(iov_iter_is_discard(i
)))
1561 if (unlikely(iov_iter_is_pipe(i
))) {
1562 struct pipe_inode_info
*pipe
= i
->pipe
;
1569 data_start(i
, &idx
, &off
);
1570 /* some of this one + all after this one */
1571 npages
= ((pipe
->curbuf
- idx
- 1) & (pipe
->buffers
- 1)) + 1;
1572 if (npages
>= maxpages
)
1574 } else iterate_all_kinds(i
, size
, v
, ({
1575 unsigned long p
= (unsigned long)v
.iov_base
;
1576 npages
+= DIV_ROUND_UP(p
+ v
.iov_len
, PAGE_SIZE
)
1578 if (npages
>= maxpages
)
1582 if (npages
>= maxpages
)
1585 unsigned long p
= (unsigned long)v
.iov_base
;
1586 npages
+= DIV_ROUND_UP(p
+ v
.iov_len
, PAGE_SIZE
)
1588 if (npages
>= maxpages
)
1594 EXPORT_SYMBOL(iov_iter_npages
);
1596 const void *dup_iter(struct iov_iter
*new, struct iov_iter
*old
, gfp_t flags
)
1599 if (unlikely(iov_iter_is_pipe(new))) {
1603 if (unlikely(iov_iter_is_discard(new)))
1605 if (iov_iter_is_bvec(new))
1606 return new->bvec
= kmemdup(new->bvec
,
1607 new->nr_segs
* sizeof(struct bio_vec
),
1610 /* iovec and kvec have identical layout */
1611 return new->iov
= kmemdup(new->iov
,
1612 new->nr_segs
* sizeof(struct iovec
),
1615 EXPORT_SYMBOL(dup_iter
);
1618 * import_iovec() - Copy an array of &struct iovec from userspace
1619 * into the kernel, check that it is valid, and initialize a new
1620 * &struct iov_iter iterator to access it.
1622 * @type: One of %READ or %WRITE.
1623 * @uvector: Pointer to the userspace array.
1624 * @nr_segs: Number of elements in userspace array.
1625 * @fast_segs: Number of elements in @iov.
1626 * @iov: (input and output parameter) Pointer to pointer to (usually small
1627 * on-stack) kernel array.
1628 * @i: Pointer to iterator that will be initialized on success.
1630 * If the array pointed to by *@iov is large enough to hold all @nr_segs,
1631 * then this function places %NULL in *@iov on return. Otherwise, a new
1632 * array will be allocated and the result placed in *@iov. This means that
1633 * the caller may call kfree() on *@iov regardless of whether the small
1634 * on-stack array was used or not (and regardless of whether this function
1635 * returns an error or not).
1637 * Return: Negative error code on error, bytes imported on success
1639 ssize_t
import_iovec(int type
, const struct iovec __user
* uvector
,
1640 unsigned nr_segs
, unsigned fast_segs
,
1641 struct iovec
**iov
, struct iov_iter
*i
)
1645 n
= rw_copy_check_uvector(type
, uvector
, nr_segs
, fast_segs
,
1653 iov_iter_init(i
, type
, p
, nr_segs
, n
);
1654 *iov
= p
== *iov
? NULL
: p
;
1657 EXPORT_SYMBOL(import_iovec
);
1659 #ifdef CONFIG_COMPAT
1660 #include <linux/compat.h>
1662 ssize_t
compat_import_iovec(int type
,
1663 const struct compat_iovec __user
* uvector
,
1664 unsigned nr_segs
, unsigned fast_segs
,
1665 struct iovec
**iov
, struct iov_iter
*i
)
1669 n
= compat_rw_copy_check_uvector(type
, uvector
, nr_segs
, fast_segs
,
1677 iov_iter_init(i
, type
, p
, nr_segs
, n
);
1678 *iov
= p
== *iov
? NULL
: p
;
1683 int import_single_range(int rw
, void __user
*buf
, size_t len
,
1684 struct iovec
*iov
, struct iov_iter
*i
)
1686 if (len
> MAX_RW_COUNT
)
1688 if (unlikely(!access_ok(buf
, len
)))
1691 iov
->iov_base
= buf
;
1693 iov_iter_init(i
, rw
, iov
, 1, len
);
1696 EXPORT_SYMBOL(import_single_range
);
1698 int iov_iter_for_each_range(struct iov_iter
*i
, size_t bytes
,
1699 int (*f
)(struct kvec
*vec
, void *context
),
1707 iterate_all_kinds(i
, bytes
, v
, -EINVAL
, ({
1708 w
.iov_base
= kmap(v
.bv_page
) + v
.bv_offset
;
1709 w
.iov_len
= v
.bv_len
;
1710 err
= f(&w
, context
);
1714 err
= f(&w
, context
);})
1718 EXPORT_SYMBOL(iov_iter_for_each_range
);