Linux 4.9.27
[linux/fpc-iii.git] / lib / iov_iter.c
bloba75ea633b5c46e42dfe6758011ad5c9665c3fd31
1 #include <linux/export.h>
2 #include <linux/uio.h>
3 #include <linux/pagemap.h>
4 #include <linux/slab.h>
5 #include <linux/vmalloc.h>
6 #include <linux/splice.h>
7 #include <net/checksum.h>
9 #define PIPE_PARANOIA /* for now */
11 #define iterate_iovec(i, n, __v, __p, skip, STEP) { \
12 size_t left; \
13 size_t wanted = n; \
14 __p = i->iov; \
15 __v.iov_len = min(n, __p->iov_len - skip); \
16 if (likely(__v.iov_len)) { \
17 __v.iov_base = __p->iov_base + skip; \
18 left = (STEP); \
19 __v.iov_len -= left; \
20 skip += __v.iov_len; \
21 n -= __v.iov_len; \
22 } else { \
23 left = 0; \
24 } \
25 while (unlikely(!left && n)) { \
26 __p++; \
27 __v.iov_len = min(n, __p->iov_len); \
28 if (unlikely(!__v.iov_len)) \
29 continue; \
30 __v.iov_base = __p->iov_base; \
31 left = (STEP); \
32 __v.iov_len -= left; \
33 skip = __v.iov_len; \
34 n -= __v.iov_len; \
35 } \
36 n = wanted - n; \
39 #define iterate_kvec(i, n, __v, __p, skip, STEP) { \
40 size_t wanted = n; \
41 __p = i->kvec; \
42 __v.iov_len = min(n, __p->iov_len - skip); \
43 if (likely(__v.iov_len)) { \
44 __v.iov_base = __p->iov_base + skip; \
45 (void)(STEP); \
46 skip += __v.iov_len; \
47 n -= __v.iov_len; \
48 } \
49 while (unlikely(n)) { \
50 __p++; \
51 __v.iov_len = min(n, __p->iov_len); \
52 if (unlikely(!__v.iov_len)) \
53 continue; \
54 __v.iov_base = __p->iov_base; \
55 (void)(STEP); \
56 skip = __v.iov_len; \
57 n -= __v.iov_len; \
58 } \
59 n = wanted; \
62 #define iterate_bvec(i, n, __v, __bi, skip, STEP) { \
63 struct bvec_iter __start; \
64 __start.bi_size = n; \
65 __start.bi_bvec_done = skip; \
66 __start.bi_idx = 0; \
67 for_each_bvec(__v, i->bvec, __bi, __start) { \
68 if (!__v.bv_len) \
69 continue; \
70 (void)(STEP); \
71 } \
74 #define iterate_all_kinds(i, n, v, I, B, K) { \
75 size_t skip = i->iov_offset; \
76 if (unlikely(i->type & ITER_BVEC)) { \
77 struct bio_vec v; \
78 struct bvec_iter __bi; \
79 iterate_bvec(i, n, v, __bi, skip, (B)) \
80 } else if (unlikely(i->type & ITER_KVEC)) { \
81 const struct kvec *kvec; \
82 struct kvec v; \
83 iterate_kvec(i, n, v, kvec, skip, (K)) \
84 } else { \
85 const struct iovec *iov; \
86 struct iovec v; \
87 iterate_iovec(i, n, v, iov, skip, (I)) \
88 } \
91 #define iterate_and_advance(i, n, v, I, B, K) { \
92 if (unlikely(i->count < n)) \
93 n = i->count; \
94 if (i->count) { \
95 size_t skip = i->iov_offset; \
96 if (unlikely(i->type & ITER_BVEC)) { \
97 const struct bio_vec *bvec = i->bvec; \
98 struct bio_vec v; \
99 struct bvec_iter __bi; \
100 iterate_bvec(i, n, v, __bi, skip, (B)) \
101 i->bvec = __bvec_iter_bvec(i->bvec, __bi); \
102 i->nr_segs -= i->bvec - bvec; \
103 skip = __bi.bi_bvec_done; \
104 } else if (unlikely(i->type & ITER_KVEC)) { \
105 const struct kvec *kvec; \
106 struct kvec v; \
107 iterate_kvec(i, n, v, kvec, skip, (K)) \
108 if (skip == kvec->iov_len) { \
109 kvec++; \
110 skip = 0; \
112 i->nr_segs -= kvec - i->kvec; \
113 i->kvec = kvec; \
114 } else { \
115 const struct iovec *iov; \
116 struct iovec v; \
117 iterate_iovec(i, n, v, iov, skip, (I)) \
118 if (skip == iov->iov_len) { \
119 iov++; \
120 skip = 0; \
122 i->nr_segs -= iov - i->iov; \
123 i->iov = iov; \
125 i->count -= n; \
126 i->iov_offset = skip; \
130 static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
131 struct iov_iter *i)
133 size_t skip, copy, left, wanted;
134 const struct iovec *iov;
135 char __user *buf;
136 void *kaddr, *from;
138 if (unlikely(bytes > i->count))
139 bytes = i->count;
141 if (unlikely(!bytes))
142 return 0;
144 wanted = bytes;
145 iov = i->iov;
146 skip = i->iov_offset;
147 buf = iov->iov_base + skip;
148 copy = min(bytes, iov->iov_len - skip);
150 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) {
151 kaddr = kmap_atomic(page);
152 from = kaddr + offset;
154 /* first chunk, usually the only one */
155 left = __copy_to_user_inatomic(buf, from, copy);
156 copy -= left;
157 skip += copy;
158 from += copy;
159 bytes -= copy;
161 while (unlikely(!left && bytes)) {
162 iov++;
163 buf = iov->iov_base;
164 copy = min(bytes, iov->iov_len);
165 left = __copy_to_user_inatomic(buf, from, copy);
166 copy -= left;
167 skip = copy;
168 from += copy;
169 bytes -= copy;
171 if (likely(!bytes)) {
172 kunmap_atomic(kaddr);
173 goto done;
175 offset = from - kaddr;
176 buf += copy;
177 kunmap_atomic(kaddr);
178 copy = min(bytes, iov->iov_len - skip);
180 /* Too bad - revert to non-atomic kmap */
182 kaddr = kmap(page);
183 from = kaddr + offset;
184 left = __copy_to_user(buf, from, copy);
185 copy -= left;
186 skip += copy;
187 from += copy;
188 bytes -= copy;
189 while (unlikely(!left && bytes)) {
190 iov++;
191 buf = iov->iov_base;
192 copy = min(bytes, iov->iov_len);
193 left = __copy_to_user(buf, from, copy);
194 copy -= left;
195 skip = copy;
196 from += copy;
197 bytes -= copy;
199 kunmap(page);
201 done:
202 if (skip == iov->iov_len) {
203 iov++;
204 skip = 0;
206 i->count -= wanted - bytes;
207 i->nr_segs -= iov - i->iov;
208 i->iov = iov;
209 i->iov_offset = skip;
210 return wanted - bytes;
213 static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
214 struct iov_iter *i)
216 size_t skip, copy, left, wanted;
217 const struct iovec *iov;
218 char __user *buf;
219 void *kaddr, *to;
221 if (unlikely(bytes > i->count))
222 bytes = i->count;
224 if (unlikely(!bytes))
225 return 0;
227 wanted = bytes;
228 iov = i->iov;
229 skip = i->iov_offset;
230 buf = iov->iov_base + skip;
231 copy = min(bytes, iov->iov_len - skip);
233 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) {
234 kaddr = kmap_atomic(page);
235 to = kaddr + offset;
237 /* first chunk, usually the only one */
238 left = __copy_from_user_inatomic(to, buf, copy);
239 copy -= left;
240 skip += copy;
241 to += copy;
242 bytes -= copy;
244 while (unlikely(!left && bytes)) {
245 iov++;
246 buf = iov->iov_base;
247 copy = min(bytes, iov->iov_len);
248 left = __copy_from_user_inatomic(to, buf, copy);
249 copy -= left;
250 skip = copy;
251 to += copy;
252 bytes -= copy;
254 if (likely(!bytes)) {
255 kunmap_atomic(kaddr);
256 goto done;
258 offset = to - kaddr;
259 buf += copy;
260 kunmap_atomic(kaddr);
261 copy = min(bytes, iov->iov_len - skip);
263 /* Too bad - revert to non-atomic kmap */
265 kaddr = kmap(page);
266 to = kaddr + offset;
267 left = __copy_from_user(to, buf, copy);
268 copy -= left;
269 skip += copy;
270 to += copy;
271 bytes -= copy;
272 while (unlikely(!left && bytes)) {
273 iov++;
274 buf = iov->iov_base;
275 copy = min(bytes, iov->iov_len);
276 left = __copy_from_user(to, buf, copy);
277 copy -= left;
278 skip = copy;
279 to += copy;
280 bytes -= copy;
282 kunmap(page);
284 done:
285 if (skip == iov->iov_len) {
286 iov++;
287 skip = 0;
289 i->count -= wanted - bytes;
290 i->nr_segs -= iov - i->iov;
291 i->iov = iov;
292 i->iov_offset = skip;
293 return wanted - bytes;
296 #ifdef PIPE_PARANOIA
297 static bool sanity(const struct iov_iter *i)
299 struct pipe_inode_info *pipe = i->pipe;
300 int idx = i->idx;
301 int next = pipe->curbuf + pipe->nrbufs;
302 if (i->iov_offset) {
303 struct pipe_buffer *p;
304 if (unlikely(!pipe->nrbufs))
305 goto Bad; // pipe must be non-empty
306 if (unlikely(idx != ((next - 1) & (pipe->buffers - 1))))
307 goto Bad; // must be at the last buffer...
309 p = &pipe->bufs[idx];
310 if (unlikely(p->offset + p->len != i->iov_offset))
311 goto Bad; // ... at the end of segment
312 } else {
313 if (idx != (next & (pipe->buffers - 1)))
314 goto Bad; // must be right after the last buffer
316 return true;
317 Bad:
318 printk(KERN_ERR "idx = %d, offset = %zd\n", i->idx, i->iov_offset);
319 printk(KERN_ERR "curbuf = %d, nrbufs = %d, buffers = %d\n",
320 pipe->curbuf, pipe->nrbufs, pipe->buffers);
321 for (idx = 0; idx < pipe->buffers; idx++)
322 printk(KERN_ERR "[%p %p %d %d]\n",
323 pipe->bufs[idx].ops,
324 pipe->bufs[idx].page,
325 pipe->bufs[idx].offset,
326 pipe->bufs[idx].len);
327 WARN_ON(1);
328 return false;
330 #else
331 #define sanity(i) true
332 #endif
334 static inline int next_idx(int idx, struct pipe_inode_info *pipe)
336 return (idx + 1) & (pipe->buffers - 1);
339 static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
340 struct iov_iter *i)
342 struct pipe_inode_info *pipe = i->pipe;
343 struct pipe_buffer *buf;
344 size_t off;
345 int idx;
347 if (unlikely(bytes > i->count))
348 bytes = i->count;
350 if (unlikely(!bytes))
351 return 0;
353 if (!sanity(i))
354 return 0;
356 off = i->iov_offset;
357 idx = i->idx;
358 buf = &pipe->bufs[idx];
359 if (off) {
360 if (offset == off && buf->page == page) {
361 /* merge with the last one */
362 buf->len += bytes;
363 i->iov_offset += bytes;
364 goto out;
366 idx = next_idx(idx, pipe);
367 buf = &pipe->bufs[idx];
369 if (idx == pipe->curbuf && pipe->nrbufs)
370 return 0;
371 pipe->nrbufs++;
372 buf->ops = &page_cache_pipe_buf_ops;
373 get_page(buf->page = page);
374 buf->offset = offset;
375 buf->len = bytes;
376 i->iov_offset = offset + bytes;
377 i->idx = idx;
378 out:
379 i->count -= bytes;
380 return bytes;
384 * Fault in one or more iovecs of the given iov_iter, to a maximum length of
385 * bytes. For each iovec, fault in each page that constitutes the iovec.
387 * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
388 * because it is an invalid address).
390 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
392 size_t skip = i->iov_offset;
393 const struct iovec *iov;
394 int err;
395 struct iovec v;
397 if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
398 iterate_iovec(i, bytes, v, iov, skip, ({
399 err = fault_in_pages_readable(v.iov_base, v.iov_len);
400 if (unlikely(err))
401 return err;
402 0;}))
404 return 0;
406 EXPORT_SYMBOL(iov_iter_fault_in_readable);
408 void iov_iter_init(struct iov_iter *i, int direction,
409 const struct iovec *iov, unsigned long nr_segs,
410 size_t count)
412 /* It will get better. Eventually... */
413 if (segment_eq(get_fs(), KERNEL_DS)) {
414 direction |= ITER_KVEC;
415 i->type = direction;
416 i->kvec = (struct kvec *)iov;
417 } else {
418 i->type = direction;
419 i->iov = iov;
421 i->nr_segs = nr_segs;
422 i->iov_offset = 0;
423 i->count = count;
425 EXPORT_SYMBOL(iov_iter_init);
427 static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
429 char *from = kmap_atomic(page);
430 memcpy(to, from + offset, len);
431 kunmap_atomic(from);
434 static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len)
436 char *to = kmap_atomic(page);
437 memcpy(to + offset, from, len);
438 kunmap_atomic(to);
441 static void memzero_page(struct page *page, size_t offset, size_t len)
443 char *addr = kmap_atomic(page);
444 memset(addr + offset, 0, len);
445 kunmap_atomic(addr);
448 static inline bool allocated(struct pipe_buffer *buf)
450 return buf->ops == &default_pipe_buf_ops;
453 static inline void data_start(const struct iov_iter *i, int *idxp, size_t *offp)
455 size_t off = i->iov_offset;
456 int idx = i->idx;
457 if (off && (!allocated(&i->pipe->bufs[idx]) || off == PAGE_SIZE)) {
458 idx = next_idx(idx, i->pipe);
459 off = 0;
461 *idxp = idx;
462 *offp = off;
465 static size_t push_pipe(struct iov_iter *i, size_t size,
466 int *idxp, size_t *offp)
468 struct pipe_inode_info *pipe = i->pipe;
469 size_t off;
470 int idx;
471 ssize_t left;
473 if (unlikely(size > i->count))
474 size = i->count;
475 if (unlikely(!size))
476 return 0;
478 left = size;
479 data_start(i, &idx, &off);
480 *idxp = idx;
481 *offp = off;
482 if (off) {
483 left -= PAGE_SIZE - off;
484 if (left <= 0) {
485 pipe->bufs[idx].len += size;
486 return size;
488 pipe->bufs[idx].len = PAGE_SIZE;
489 idx = next_idx(idx, pipe);
491 while (idx != pipe->curbuf || !pipe->nrbufs) {
492 struct page *page = alloc_page(GFP_USER);
493 if (!page)
494 break;
495 pipe->nrbufs++;
496 pipe->bufs[idx].ops = &default_pipe_buf_ops;
497 pipe->bufs[idx].page = page;
498 pipe->bufs[idx].offset = 0;
499 if (left <= PAGE_SIZE) {
500 pipe->bufs[idx].len = left;
501 return size;
503 pipe->bufs[idx].len = PAGE_SIZE;
504 left -= PAGE_SIZE;
505 idx = next_idx(idx, pipe);
507 return size - left;
510 static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
511 struct iov_iter *i)
513 struct pipe_inode_info *pipe = i->pipe;
514 size_t n, off;
515 int idx;
517 if (!sanity(i))
518 return 0;
520 bytes = n = push_pipe(i, bytes, &idx, &off);
521 if (unlikely(!n))
522 return 0;
523 for ( ; n; idx = next_idx(idx, pipe), off = 0) {
524 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
525 memcpy_to_page(pipe->bufs[idx].page, off, addr, chunk);
526 i->idx = idx;
527 i->iov_offset = off + chunk;
528 n -= chunk;
529 addr += chunk;
531 i->count -= bytes;
532 return bytes;
535 size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
537 const char *from = addr;
538 if (unlikely(i->type & ITER_PIPE))
539 return copy_pipe_to_iter(addr, bytes, i);
540 iterate_and_advance(i, bytes, v,
541 __copy_to_user(v.iov_base, (from += v.iov_len) - v.iov_len,
542 v.iov_len),
543 memcpy_to_page(v.bv_page, v.bv_offset,
544 (from += v.bv_len) - v.bv_len, v.bv_len),
545 memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
548 return bytes;
550 EXPORT_SYMBOL(copy_to_iter);
552 size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
554 char *to = addr;
555 if (unlikely(i->type & ITER_PIPE)) {
556 WARN_ON(1);
557 return 0;
559 iterate_and_advance(i, bytes, v,
560 __copy_from_user((to += v.iov_len) - v.iov_len, v.iov_base,
561 v.iov_len),
562 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
563 v.bv_offset, v.bv_len),
564 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
567 return bytes;
569 EXPORT_SYMBOL(copy_from_iter);
571 size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
573 char *to = addr;
574 if (unlikely(i->type & ITER_PIPE)) {
575 WARN_ON(1);
576 return 0;
578 iterate_and_advance(i, bytes, v,
579 __copy_from_user_nocache((to += v.iov_len) - v.iov_len,
580 v.iov_base, v.iov_len),
581 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
582 v.bv_offset, v.bv_len),
583 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
586 return bytes;
588 EXPORT_SYMBOL(copy_from_iter_nocache);
590 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
591 struct iov_iter *i)
593 if (i->type & (ITER_BVEC|ITER_KVEC)) {
594 void *kaddr = kmap_atomic(page);
595 size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
596 kunmap_atomic(kaddr);
597 return wanted;
598 } else if (likely(!(i->type & ITER_PIPE)))
599 return copy_page_to_iter_iovec(page, offset, bytes, i);
600 else
601 return copy_page_to_iter_pipe(page, offset, bytes, i);
603 EXPORT_SYMBOL(copy_page_to_iter);
605 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
606 struct iov_iter *i)
608 if (unlikely(i->type & ITER_PIPE)) {
609 WARN_ON(1);
610 return 0;
612 if (i->type & (ITER_BVEC|ITER_KVEC)) {
613 void *kaddr = kmap_atomic(page);
614 size_t wanted = copy_from_iter(kaddr + offset, bytes, i);
615 kunmap_atomic(kaddr);
616 return wanted;
617 } else
618 return copy_page_from_iter_iovec(page, offset, bytes, i);
620 EXPORT_SYMBOL(copy_page_from_iter);
622 static size_t pipe_zero(size_t bytes, struct iov_iter *i)
624 struct pipe_inode_info *pipe = i->pipe;
625 size_t n, off;
626 int idx;
628 if (!sanity(i))
629 return 0;
631 bytes = n = push_pipe(i, bytes, &idx, &off);
632 if (unlikely(!n))
633 return 0;
635 for ( ; n; idx = next_idx(idx, pipe), off = 0) {
636 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
637 memzero_page(pipe->bufs[idx].page, off, chunk);
638 i->idx = idx;
639 i->iov_offset = off + chunk;
640 n -= chunk;
642 i->count -= bytes;
643 return bytes;
646 size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
648 if (unlikely(i->type & ITER_PIPE))
649 return pipe_zero(bytes, i);
650 iterate_and_advance(i, bytes, v,
651 __clear_user(v.iov_base, v.iov_len),
652 memzero_page(v.bv_page, v.bv_offset, v.bv_len),
653 memset(v.iov_base, 0, v.iov_len)
656 return bytes;
658 EXPORT_SYMBOL(iov_iter_zero);
660 size_t iov_iter_copy_from_user_atomic(struct page *page,
661 struct iov_iter *i, unsigned long offset, size_t bytes)
663 char *kaddr = kmap_atomic(page), *p = kaddr + offset;
664 if (unlikely(i->type & ITER_PIPE)) {
665 kunmap_atomic(kaddr);
666 WARN_ON(1);
667 return 0;
669 iterate_all_kinds(i, bytes, v,
670 __copy_from_user_inatomic((p += v.iov_len) - v.iov_len,
671 v.iov_base, v.iov_len),
672 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
673 v.bv_offset, v.bv_len),
674 memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
676 kunmap_atomic(kaddr);
677 return bytes;
679 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
681 static inline void pipe_truncate(struct iov_iter *i)
683 struct pipe_inode_info *pipe = i->pipe;
684 if (pipe->nrbufs) {
685 size_t off = i->iov_offset;
686 int idx = i->idx;
687 int nrbufs = (idx - pipe->curbuf) & (pipe->buffers - 1);
688 if (off) {
689 pipe->bufs[idx].len = off - pipe->bufs[idx].offset;
690 idx = next_idx(idx, pipe);
691 nrbufs++;
693 while (pipe->nrbufs > nrbufs) {
694 pipe_buf_release(pipe, &pipe->bufs[idx]);
695 idx = next_idx(idx, pipe);
696 pipe->nrbufs--;
701 static void pipe_advance(struct iov_iter *i, size_t size)
703 struct pipe_inode_info *pipe = i->pipe;
704 if (unlikely(i->count < size))
705 size = i->count;
706 if (size) {
707 struct pipe_buffer *buf;
708 size_t off = i->iov_offset, left = size;
709 int idx = i->idx;
710 if (off) /* make it relative to the beginning of buffer */
711 left += off - pipe->bufs[idx].offset;
712 while (1) {
713 buf = &pipe->bufs[idx];
714 if (left <= buf->len)
715 break;
716 left -= buf->len;
717 idx = next_idx(idx, pipe);
719 i->idx = idx;
720 i->iov_offset = buf->offset + left;
722 i->count -= size;
723 /* ... and discard everything past that point */
724 pipe_truncate(i);
727 void iov_iter_advance(struct iov_iter *i, size_t size)
729 if (unlikely(i->type & ITER_PIPE)) {
730 pipe_advance(i, size);
731 return;
733 iterate_and_advance(i, size, v, 0, 0, 0)
735 EXPORT_SYMBOL(iov_iter_advance);
737 void iov_iter_revert(struct iov_iter *i, size_t unroll)
739 if (!unroll)
740 return;
741 i->count += unroll;
742 if (unlikely(i->type & ITER_PIPE)) {
743 struct pipe_inode_info *pipe = i->pipe;
744 int idx = i->idx;
745 size_t off = i->iov_offset;
746 while (1) {
747 size_t n = off - pipe->bufs[idx].offset;
748 if (unroll < n) {
749 off -= (n - unroll);
750 break;
752 unroll -= n;
753 if (!unroll && idx == i->start_idx) {
754 off = 0;
755 break;
757 if (!idx--)
758 idx = pipe->buffers - 1;
759 off = pipe->bufs[idx].offset + pipe->bufs[idx].len;
761 i->iov_offset = off;
762 i->idx = idx;
763 pipe_truncate(i);
764 return;
766 if (unroll <= i->iov_offset) {
767 i->iov_offset -= unroll;
768 return;
770 unroll -= i->iov_offset;
771 if (i->type & ITER_BVEC) {
772 const struct bio_vec *bvec = i->bvec;
773 while (1) {
774 size_t n = (--bvec)->bv_len;
775 i->nr_segs++;
776 if (unroll <= n) {
777 i->bvec = bvec;
778 i->iov_offset = n - unroll;
779 return;
781 unroll -= n;
783 } else { /* same logics for iovec and kvec */
784 const struct iovec *iov = i->iov;
785 while (1) {
786 size_t n = (--iov)->iov_len;
787 i->nr_segs++;
788 if (unroll <= n) {
789 i->iov = iov;
790 i->iov_offset = n - unroll;
791 return;
793 unroll -= n;
797 EXPORT_SYMBOL(iov_iter_revert);
800 * Return the count of just the current iov_iter segment.
802 size_t iov_iter_single_seg_count(const struct iov_iter *i)
804 if (unlikely(i->type & ITER_PIPE))
805 return i->count; // it is a silly place, anyway
806 if (i->nr_segs == 1)
807 return i->count;
808 else if (i->type & ITER_BVEC)
809 return min(i->count, i->bvec->bv_len - i->iov_offset);
810 else
811 return min(i->count, i->iov->iov_len - i->iov_offset);
813 EXPORT_SYMBOL(iov_iter_single_seg_count);
815 void iov_iter_kvec(struct iov_iter *i, int direction,
816 const struct kvec *kvec, unsigned long nr_segs,
817 size_t count)
819 BUG_ON(!(direction & ITER_KVEC));
820 i->type = direction;
821 i->kvec = kvec;
822 i->nr_segs = nr_segs;
823 i->iov_offset = 0;
824 i->count = count;
826 EXPORT_SYMBOL(iov_iter_kvec);
828 void iov_iter_bvec(struct iov_iter *i, int direction,
829 const struct bio_vec *bvec, unsigned long nr_segs,
830 size_t count)
832 BUG_ON(!(direction & ITER_BVEC));
833 i->type = direction;
834 i->bvec = bvec;
835 i->nr_segs = nr_segs;
836 i->iov_offset = 0;
837 i->count = count;
839 EXPORT_SYMBOL(iov_iter_bvec);
841 void iov_iter_pipe(struct iov_iter *i, int direction,
842 struct pipe_inode_info *pipe,
843 size_t count)
845 BUG_ON(direction != ITER_PIPE);
846 WARN_ON(pipe->nrbufs == pipe->buffers);
847 i->type = direction;
848 i->pipe = pipe;
849 i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
850 i->iov_offset = 0;
851 i->count = count;
852 i->start_idx = i->idx;
854 EXPORT_SYMBOL(iov_iter_pipe);
856 unsigned long iov_iter_alignment(const struct iov_iter *i)
858 unsigned long res = 0;
859 size_t size = i->count;
861 if (!size)
862 return 0;
864 if (unlikely(i->type & ITER_PIPE)) {
865 if (i->iov_offset && allocated(&i->pipe->bufs[i->idx]))
866 return size | i->iov_offset;
867 return size;
869 iterate_all_kinds(i, size, v,
870 (res |= (unsigned long)v.iov_base | v.iov_len, 0),
871 res |= v.bv_offset | v.bv_len,
872 res |= (unsigned long)v.iov_base | v.iov_len
874 return res;
876 EXPORT_SYMBOL(iov_iter_alignment);
878 unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
880 unsigned long res = 0;
881 size_t size = i->count;
882 if (!size)
883 return 0;
885 if (unlikely(i->type & ITER_PIPE)) {
886 WARN_ON(1);
887 return ~0U;
890 iterate_all_kinds(i, size, v,
891 (res |= (!res ? 0 : (unsigned long)v.iov_base) |
892 (size != v.iov_len ? size : 0), 0),
893 (res |= (!res ? 0 : (unsigned long)v.bv_offset) |
894 (size != v.bv_len ? size : 0)),
895 (res |= (!res ? 0 : (unsigned long)v.iov_base) |
896 (size != v.iov_len ? size : 0))
898 return res;
900 EXPORT_SYMBOL(iov_iter_gap_alignment);
902 static inline size_t __pipe_get_pages(struct iov_iter *i,
903 size_t maxsize,
904 struct page **pages,
905 int idx,
906 size_t *start)
908 struct pipe_inode_info *pipe = i->pipe;
909 ssize_t n = push_pipe(i, maxsize, &idx, start);
910 if (!n)
911 return -EFAULT;
913 maxsize = n;
914 n += *start;
915 while (n > 0) {
916 get_page(*pages++ = pipe->bufs[idx].page);
917 idx = next_idx(idx, pipe);
918 n -= PAGE_SIZE;
921 return maxsize;
924 static ssize_t pipe_get_pages(struct iov_iter *i,
925 struct page **pages, size_t maxsize, unsigned maxpages,
926 size_t *start)
928 unsigned npages;
929 size_t capacity;
930 int idx;
932 if (!sanity(i))
933 return -EFAULT;
935 data_start(i, &idx, start);
936 /* some of this one + all after this one */
937 npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1;
938 capacity = min(npages,maxpages) * PAGE_SIZE - *start;
940 return __pipe_get_pages(i, min(maxsize, capacity), pages, idx, start);
943 ssize_t iov_iter_get_pages(struct iov_iter *i,
944 struct page **pages, size_t maxsize, unsigned maxpages,
945 size_t *start)
947 if (maxsize > i->count)
948 maxsize = i->count;
950 if (!maxsize)
951 return 0;
953 if (unlikely(i->type & ITER_PIPE))
954 return pipe_get_pages(i, pages, maxsize, maxpages, start);
955 iterate_all_kinds(i, maxsize, v, ({
956 unsigned long addr = (unsigned long)v.iov_base;
957 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
958 int n;
959 int res;
961 if (len > maxpages * PAGE_SIZE)
962 len = maxpages * PAGE_SIZE;
963 addr &= ~(PAGE_SIZE - 1);
964 n = DIV_ROUND_UP(len, PAGE_SIZE);
965 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
966 if (unlikely(res < 0))
967 return res;
968 return (res == n ? len : res * PAGE_SIZE) - *start;
969 0;}),({
970 /* can't be more than PAGE_SIZE */
971 *start = v.bv_offset;
972 get_page(*pages = v.bv_page);
973 return v.bv_len;
974 }),({
975 return -EFAULT;
978 return 0;
980 EXPORT_SYMBOL(iov_iter_get_pages);
982 static struct page **get_pages_array(size_t n)
984 struct page **p = kmalloc(n * sizeof(struct page *), GFP_KERNEL);
985 if (!p)
986 p = vmalloc(n * sizeof(struct page *));
987 return p;
990 static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
991 struct page ***pages, size_t maxsize,
992 size_t *start)
994 struct page **p;
995 size_t n;
996 int idx;
997 int npages;
999 if (!sanity(i))
1000 return -EFAULT;
1002 data_start(i, &idx, start);
1003 /* some of this one + all after this one */
1004 npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1;
1005 n = npages * PAGE_SIZE - *start;
1006 if (maxsize > n)
1007 maxsize = n;
1008 else
1009 npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
1010 p = get_pages_array(npages);
1011 if (!p)
1012 return -ENOMEM;
1013 n = __pipe_get_pages(i, maxsize, p, idx, start);
1014 if (n > 0)
1015 *pages = p;
1016 else
1017 kvfree(p);
1018 return n;
1021 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
1022 struct page ***pages, size_t maxsize,
1023 size_t *start)
1025 struct page **p;
1027 if (maxsize > i->count)
1028 maxsize = i->count;
1030 if (!maxsize)
1031 return 0;
1033 if (unlikely(i->type & ITER_PIPE))
1034 return pipe_get_pages_alloc(i, pages, maxsize, start);
1035 iterate_all_kinds(i, maxsize, v, ({
1036 unsigned long addr = (unsigned long)v.iov_base;
1037 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
1038 int n;
1039 int res;
1041 addr &= ~(PAGE_SIZE - 1);
1042 n = DIV_ROUND_UP(len, PAGE_SIZE);
1043 p = get_pages_array(n);
1044 if (!p)
1045 return -ENOMEM;
1046 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
1047 if (unlikely(res < 0)) {
1048 kvfree(p);
1049 return res;
1051 *pages = p;
1052 return (res == n ? len : res * PAGE_SIZE) - *start;
1053 0;}),({
1054 /* can't be more than PAGE_SIZE */
1055 *start = v.bv_offset;
1056 *pages = p = get_pages_array(1);
1057 if (!p)
1058 return -ENOMEM;
1059 get_page(*p = v.bv_page);
1060 return v.bv_len;
1061 }),({
1062 return -EFAULT;
1065 return 0;
1067 EXPORT_SYMBOL(iov_iter_get_pages_alloc);
1069 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
1070 struct iov_iter *i)
1072 char *to = addr;
1073 __wsum sum, next;
1074 size_t off = 0;
1075 sum = *csum;
1076 if (unlikely(i->type & ITER_PIPE)) {
1077 WARN_ON(1);
1078 return 0;
1080 iterate_and_advance(i, bytes, v, ({
1081 int err = 0;
1082 next = csum_and_copy_from_user(v.iov_base,
1083 (to += v.iov_len) - v.iov_len,
1084 v.iov_len, 0, &err);
1085 if (!err) {
1086 sum = csum_block_add(sum, next, off);
1087 off += v.iov_len;
1089 err ? v.iov_len : 0;
1090 }), ({
1091 char *p = kmap_atomic(v.bv_page);
1092 next = csum_partial_copy_nocheck(p + v.bv_offset,
1093 (to += v.bv_len) - v.bv_len,
1094 v.bv_len, 0);
1095 kunmap_atomic(p);
1096 sum = csum_block_add(sum, next, off);
1097 off += v.bv_len;
1098 }),({
1099 next = csum_partial_copy_nocheck(v.iov_base,
1100 (to += v.iov_len) - v.iov_len,
1101 v.iov_len, 0);
1102 sum = csum_block_add(sum, next, off);
1103 off += v.iov_len;
1106 *csum = sum;
1107 return bytes;
1109 EXPORT_SYMBOL(csum_and_copy_from_iter);
1111 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
1112 struct iov_iter *i)
1114 const char *from = addr;
1115 __wsum sum, next;
1116 size_t off = 0;
1117 sum = *csum;
1118 if (unlikely(i->type & ITER_PIPE)) {
1119 WARN_ON(1); /* for now */
1120 return 0;
1122 iterate_and_advance(i, bytes, v, ({
1123 int err = 0;
1124 next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
1125 v.iov_base,
1126 v.iov_len, 0, &err);
1127 if (!err) {
1128 sum = csum_block_add(sum, next, off);
1129 off += v.iov_len;
1131 err ? v.iov_len : 0;
1132 }), ({
1133 char *p = kmap_atomic(v.bv_page);
1134 next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len,
1135 p + v.bv_offset,
1136 v.bv_len, 0);
1137 kunmap_atomic(p);
1138 sum = csum_block_add(sum, next, off);
1139 off += v.bv_len;
1140 }),({
1141 next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len,
1142 v.iov_base,
1143 v.iov_len, 0);
1144 sum = csum_block_add(sum, next, off);
1145 off += v.iov_len;
1148 *csum = sum;
1149 return bytes;
1151 EXPORT_SYMBOL(csum_and_copy_to_iter);
1153 int iov_iter_npages(const struct iov_iter *i, int maxpages)
1155 size_t size = i->count;
1156 int npages = 0;
1158 if (!size)
1159 return 0;
1161 if (unlikely(i->type & ITER_PIPE)) {
1162 struct pipe_inode_info *pipe = i->pipe;
1163 size_t off;
1164 int idx;
1166 if (!sanity(i))
1167 return 0;
1169 data_start(i, &idx, &off);
1170 /* some of this one + all after this one */
1171 npages = ((pipe->curbuf - idx - 1) & (pipe->buffers - 1)) + 1;
1172 if (npages >= maxpages)
1173 return maxpages;
1174 } else iterate_all_kinds(i, size, v, ({
1175 unsigned long p = (unsigned long)v.iov_base;
1176 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1177 - p / PAGE_SIZE;
1178 if (npages >= maxpages)
1179 return maxpages;
1180 0;}),({
1181 npages++;
1182 if (npages >= maxpages)
1183 return maxpages;
1184 }),({
1185 unsigned long p = (unsigned long)v.iov_base;
1186 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1187 - p / PAGE_SIZE;
1188 if (npages >= maxpages)
1189 return maxpages;
1192 return npages;
1194 EXPORT_SYMBOL(iov_iter_npages);
1196 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
1198 *new = *old;
1199 if (unlikely(new->type & ITER_PIPE)) {
1200 WARN_ON(1);
1201 return NULL;
1203 if (new->type & ITER_BVEC)
1204 return new->bvec = kmemdup(new->bvec,
1205 new->nr_segs * sizeof(struct bio_vec),
1206 flags);
1207 else
1208 /* iovec and kvec have identical layout */
1209 return new->iov = kmemdup(new->iov,
1210 new->nr_segs * sizeof(struct iovec),
1211 flags);
1213 EXPORT_SYMBOL(dup_iter);
1216 * import_iovec() - Copy an array of &struct iovec from userspace
1217 * into the kernel, check that it is valid, and initialize a new
1218 * &struct iov_iter iterator to access it.
1220 * @type: One of %READ or %WRITE.
1221 * @uvector: Pointer to the userspace array.
1222 * @nr_segs: Number of elements in userspace array.
1223 * @fast_segs: Number of elements in @iov.
1224 * @iov: (input and output parameter) Pointer to pointer to (usually small
1225 * on-stack) kernel array.
1226 * @i: Pointer to iterator that will be initialized on success.
1228 * If the array pointed to by *@iov is large enough to hold all @nr_segs,
1229 * then this function places %NULL in *@iov on return. Otherwise, a new
1230 * array will be allocated and the result placed in *@iov. This means that
1231 * the caller may call kfree() on *@iov regardless of whether the small
1232 * on-stack array was used or not (and regardless of whether this function
1233 * returns an error or not).
1235 * Return: 0 on success or negative error code on error.
1237 int import_iovec(int type, const struct iovec __user * uvector,
1238 unsigned nr_segs, unsigned fast_segs,
1239 struct iovec **iov, struct iov_iter *i)
1241 ssize_t n;
1242 struct iovec *p;
1243 n = rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
1244 *iov, &p);
1245 if (n < 0) {
1246 if (p != *iov)
1247 kfree(p);
1248 *iov = NULL;
1249 return n;
1251 iov_iter_init(i, type, p, nr_segs, n);
1252 *iov = p == *iov ? NULL : p;
1253 return 0;
1255 EXPORT_SYMBOL(import_iovec);
1257 #ifdef CONFIG_COMPAT
1258 #include <linux/compat.h>
1260 int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
1261 unsigned nr_segs, unsigned fast_segs,
1262 struct iovec **iov, struct iov_iter *i)
1264 ssize_t n;
1265 struct iovec *p;
1266 n = compat_rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
1267 *iov, &p);
1268 if (n < 0) {
1269 if (p != *iov)
1270 kfree(p);
1271 *iov = NULL;
1272 return n;
1274 iov_iter_init(i, type, p, nr_segs, n);
1275 *iov = p == *iov ? NULL : p;
1276 return 0;
1278 #endif
1280 int import_single_range(int rw, void __user *buf, size_t len,
1281 struct iovec *iov, struct iov_iter *i)
1283 if (len > MAX_RW_COUNT)
1284 len = MAX_RW_COUNT;
1285 if (unlikely(!access_ok(!rw, buf, len)))
1286 return -EFAULT;
1288 iov->iov_base = buf;
1289 iov->iov_len = len;
1290 iov_iter_init(i, rw, iov, 1, len);
1291 return 0;
1293 EXPORT_SYMBOL(import_single_range);