mm: fix exec activate_mm vs TLB shootdown and lazy tlb switching race
[linux/fpc-iii.git] / lib / iov_iter.c
blob7b2fd5f251f267b4119e968b1c71b0cd51f64d50
1 #include <linux/export.h>
2 #include <linux/bvec.h>
3 #include <linux/uio.h>
4 #include <linux/pagemap.h>
5 #include <linux/slab.h>
6 #include <linux/vmalloc.h>
7 #include <linux/splice.h>
8 #include <net/checksum.h>
10 #define PIPE_PARANOIA /* for now */
12 #define iterate_iovec(i, n, __v, __p, skip, STEP) { \
13 size_t left; \
14 size_t wanted = n; \
15 __p = i->iov; \
16 __v.iov_len = min(n, __p->iov_len - skip); \
17 if (likely(__v.iov_len)) { \
18 __v.iov_base = __p->iov_base + skip; \
19 left = (STEP); \
20 __v.iov_len -= left; \
21 skip += __v.iov_len; \
22 n -= __v.iov_len; \
23 } else { \
24 left = 0; \
25 } \
26 while (unlikely(!left && n)) { \
27 __p++; \
28 __v.iov_len = min(n, __p->iov_len); \
29 if (unlikely(!__v.iov_len)) \
30 continue; \
31 __v.iov_base = __p->iov_base; \
32 left = (STEP); \
33 __v.iov_len -= left; \
34 skip = __v.iov_len; \
35 n -= __v.iov_len; \
36 } \
37 n = wanted - n; \
40 #define iterate_kvec(i, n, __v, __p, skip, STEP) { \
41 size_t wanted = n; \
42 __p = i->kvec; \
43 __v.iov_len = min(n, __p->iov_len - skip); \
44 if (likely(__v.iov_len)) { \
45 __v.iov_base = __p->iov_base + skip; \
46 (void)(STEP); \
47 skip += __v.iov_len; \
48 n -= __v.iov_len; \
49 } \
50 while (unlikely(n)) { \
51 __p++; \
52 __v.iov_len = min(n, __p->iov_len); \
53 if (unlikely(!__v.iov_len)) \
54 continue; \
55 __v.iov_base = __p->iov_base; \
56 (void)(STEP); \
57 skip = __v.iov_len; \
58 n -= __v.iov_len; \
59 } \
60 n = wanted; \
63 #define iterate_bvec(i, n, __v, __bi, skip, STEP) { \
64 struct bvec_iter __start; \
65 __start.bi_size = n; \
66 __start.bi_bvec_done = skip; \
67 __start.bi_idx = 0; \
68 for_each_bvec(__v, i->bvec, __bi, __start) { \
69 if (!__v.bv_len) \
70 continue; \
71 (void)(STEP); \
72 } \
75 #define iterate_all_kinds(i, n, v, I, B, K) { \
76 if (likely(n)) { \
77 size_t skip = i->iov_offset; \
78 if (unlikely(i->type & ITER_BVEC)) { \
79 struct bio_vec v; \
80 struct bvec_iter __bi; \
81 iterate_bvec(i, n, v, __bi, skip, (B)) \
82 } else if (unlikely(i->type & ITER_KVEC)) { \
83 const struct kvec *kvec; \
84 struct kvec v; \
85 iterate_kvec(i, n, v, kvec, skip, (K)) \
86 } else { \
87 const struct iovec *iov; \
88 struct iovec v; \
89 iterate_iovec(i, n, v, iov, skip, (I)) \
90 } \
91 } \
94 #define iterate_and_advance(i, n, v, I, B, K) { \
95 if (unlikely(i->count < n)) \
96 n = i->count; \
97 if (i->count) { \
98 size_t skip = i->iov_offset; \
99 if (unlikely(i->type & ITER_BVEC)) { \
100 const struct bio_vec *bvec = i->bvec; \
101 struct bio_vec v; \
102 struct bvec_iter __bi; \
103 iterate_bvec(i, n, v, __bi, skip, (B)) \
104 i->bvec = __bvec_iter_bvec(i->bvec, __bi); \
105 i->nr_segs -= i->bvec - bvec; \
106 skip = __bi.bi_bvec_done; \
107 } else if (unlikely(i->type & ITER_KVEC)) { \
108 const struct kvec *kvec; \
109 struct kvec v; \
110 iterate_kvec(i, n, v, kvec, skip, (K)) \
111 if (skip == kvec->iov_len) { \
112 kvec++; \
113 skip = 0; \
115 i->nr_segs -= kvec - i->kvec; \
116 i->kvec = kvec; \
117 } else { \
118 const struct iovec *iov; \
119 struct iovec v; \
120 iterate_iovec(i, n, v, iov, skip, (I)) \
121 if (skip == iov->iov_len) { \
122 iov++; \
123 skip = 0; \
125 i->nr_segs -= iov - i->iov; \
126 i->iov = iov; \
128 i->count -= n; \
129 i->iov_offset = skip; \
133 static int copyout(void __user *to, const void *from, size_t n)
135 if (access_ok(VERIFY_WRITE, to, n)) {
136 kasan_check_read(from, n);
137 n = raw_copy_to_user(to, from, n);
139 return n;
142 static int copyin(void *to, const void __user *from, size_t n)
144 if (access_ok(VERIFY_READ, from, n)) {
145 kasan_check_write(to, n);
146 n = raw_copy_from_user(to, from, n);
148 return n;
151 static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
152 struct iov_iter *i)
154 size_t skip, copy, left, wanted;
155 const struct iovec *iov;
156 char __user *buf;
157 void *kaddr, *from;
159 if (unlikely(bytes > i->count))
160 bytes = i->count;
162 if (unlikely(!bytes))
163 return 0;
165 might_fault();
166 wanted = bytes;
167 iov = i->iov;
168 skip = i->iov_offset;
169 buf = iov->iov_base + skip;
170 copy = min(bytes, iov->iov_len - skip);
172 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) {
173 kaddr = kmap_atomic(page);
174 from = kaddr + offset;
176 /* first chunk, usually the only one */
177 left = copyout(buf, from, copy);
178 copy -= left;
179 skip += copy;
180 from += copy;
181 bytes -= copy;
183 while (unlikely(!left && bytes)) {
184 iov++;
185 buf = iov->iov_base;
186 copy = min(bytes, iov->iov_len);
187 left = copyout(buf, from, copy);
188 copy -= left;
189 skip = copy;
190 from += copy;
191 bytes -= copy;
193 if (likely(!bytes)) {
194 kunmap_atomic(kaddr);
195 goto done;
197 offset = from - kaddr;
198 buf += copy;
199 kunmap_atomic(kaddr);
200 copy = min(bytes, iov->iov_len - skip);
202 /* Too bad - revert to non-atomic kmap */
204 kaddr = kmap(page);
205 from = kaddr + offset;
206 left = copyout(buf, from, copy);
207 copy -= left;
208 skip += copy;
209 from += copy;
210 bytes -= copy;
211 while (unlikely(!left && bytes)) {
212 iov++;
213 buf = iov->iov_base;
214 copy = min(bytes, iov->iov_len);
215 left = copyout(buf, from, copy);
216 copy -= left;
217 skip = copy;
218 from += copy;
219 bytes -= copy;
221 kunmap(page);
223 done:
224 if (skip == iov->iov_len) {
225 iov++;
226 skip = 0;
228 i->count -= wanted - bytes;
229 i->nr_segs -= iov - i->iov;
230 i->iov = iov;
231 i->iov_offset = skip;
232 return wanted - bytes;
235 static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
236 struct iov_iter *i)
238 size_t skip, copy, left, wanted;
239 const struct iovec *iov;
240 char __user *buf;
241 void *kaddr, *to;
243 if (unlikely(bytes > i->count))
244 bytes = i->count;
246 if (unlikely(!bytes))
247 return 0;
249 might_fault();
250 wanted = bytes;
251 iov = i->iov;
252 skip = i->iov_offset;
253 buf = iov->iov_base + skip;
254 copy = min(bytes, iov->iov_len - skip);
256 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) {
257 kaddr = kmap_atomic(page);
258 to = kaddr + offset;
260 /* first chunk, usually the only one */
261 left = copyin(to, buf, copy);
262 copy -= left;
263 skip += copy;
264 to += copy;
265 bytes -= copy;
267 while (unlikely(!left && bytes)) {
268 iov++;
269 buf = iov->iov_base;
270 copy = min(bytes, iov->iov_len);
271 left = copyin(to, buf, copy);
272 copy -= left;
273 skip = copy;
274 to += copy;
275 bytes -= copy;
277 if (likely(!bytes)) {
278 kunmap_atomic(kaddr);
279 goto done;
281 offset = to - kaddr;
282 buf += copy;
283 kunmap_atomic(kaddr);
284 copy = min(bytes, iov->iov_len - skip);
286 /* Too bad - revert to non-atomic kmap */
288 kaddr = kmap(page);
289 to = kaddr + offset;
290 left = copyin(to, buf, copy);
291 copy -= left;
292 skip += copy;
293 to += copy;
294 bytes -= copy;
295 while (unlikely(!left && bytes)) {
296 iov++;
297 buf = iov->iov_base;
298 copy = min(bytes, iov->iov_len);
299 left = copyin(to, buf, copy);
300 copy -= left;
301 skip = copy;
302 to += copy;
303 bytes -= copy;
305 kunmap(page);
307 done:
308 if (skip == iov->iov_len) {
309 iov++;
310 skip = 0;
312 i->count -= wanted - bytes;
313 i->nr_segs -= iov - i->iov;
314 i->iov = iov;
315 i->iov_offset = skip;
316 return wanted - bytes;
319 #ifdef PIPE_PARANOIA
320 static bool sanity(const struct iov_iter *i)
322 struct pipe_inode_info *pipe = i->pipe;
323 int idx = i->idx;
324 int next = pipe->curbuf + pipe->nrbufs;
325 if (i->iov_offset) {
326 struct pipe_buffer *p;
327 if (unlikely(!pipe->nrbufs))
328 goto Bad; // pipe must be non-empty
329 if (unlikely(idx != ((next - 1) & (pipe->buffers - 1))))
330 goto Bad; // must be at the last buffer...
332 p = &pipe->bufs[idx];
333 if (unlikely(p->offset + p->len != i->iov_offset))
334 goto Bad; // ... at the end of segment
335 } else {
336 if (idx != (next & (pipe->buffers - 1)))
337 goto Bad; // must be right after the last buffer
339 return true;
340 Bad:
341 printk(KERN_ERR "idx = %d, offset = %zd\n", i->idx, i->iov_offset);
342 printk(KERN_ERR "curbuf = %d, nrbufs = %d, buffers = %d\n",
343 pipe->curbuf, pipe->nrbufs, pipe->buffers);
344 for (idx = 0; idx < pipe->buffers; idx++)
345 printk(KERN_ERR "[%p %p %d %d]\n",
346 pipe->bufs[idx].ops,
347 pipe->bufs[idx].page,
348 pipe->bufs[idx].offset,
349 pipe->bufs[idx].len);
350 WARN_ON(1);
351 return false;
353 #else
354 #define sanity(i) true
355 #endif
357 static inline int next_idx(int idx, struct pipe_inode_info *pipe)
359 return (idx + 1) & (pipe->buffers - 1);
362 static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
363 struct iov_iter *i)
365 struct pipe_inode_info *pipe = i->pipe;
366 struct pipe_buffer *buf;
367 size_t off;
368 int idx;
370 if (unlikely(bytes > i->count))
371 bytes = i->count;
373 if (unlikely(!bytes))
374 return 0;
376 if (!sanity(i))
377 return 0;
379 off = i->iov_offset;
380 idx = i->idx;
381 buf = &pipe->bufs[idx];
382 if (off) {
383 if (offset == off && buf->page == page) {
384 /* merge with the last one */
385 buf->len += bytes;
386 i->iov_offset += bytes;
387 goto out;
389 idx = next_idx(idx, pipe);
390 buf = &pipe->bufs[idx];
392 if (idx == pipe->curbuf && pipe->nrbufs)
393 return 0;
394 pipe->nrbufs++;
395 buf->ops = &page_cache_pipe_buf_ops;
396 get_page(buf->page = page);
397 buf->offset = offset;
398 buf->len = bytes;
399 i->iov_offset = offset + bytes;
400 i->idx = idx;
401 out:
402 i->count -= bytes;
403 return bytes;
407 * Fault in one or more iovecs of the given iov_iter, to a maximum length of
408 * bytes. For each iovec, fault in each page that constitutes the iovec.
410 * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
411 * because it is an invalid address).
413 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
415 size_t skip = i->iov_offset;
416 const struct iovec *iov;
417 int err;
418 struct iovec v;
420 if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
421 iterate_iovec(i, bytes, v, iov, skip, ({
422 err = fault_in_pages_readable(v.iov_base, v.iov_len);
423 if (unlikely(err))
424 return err;
425 0;}))
427 return 0;
429 EXPORT_SYMBOL(iov_iter_fault_in_readable);
431 void iov_iter_init(struct iov_iter *i, int direction,
432 const struct iovec *iov, unsigned long nr_segs,
433 size_t count)
435 /* It will get better. Eventually... */
436 if (uaccess_kernel()) {
437 direction |= ITER_KVEC;
438 i->type = direction;
439 i->kvec = (struct kvec *)iov;
440 } else {
441 i->type = direction;
442 i->iov = iov;
444 i->nr_segs = nr_segs;
445 i->iov_offset = 0;
446 i->count = count;
448 EXPORT_SYMBOL(iov_iter_init);
450 static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
452 char *from = kmap_atomic(page);
453 memcpy(to, from + offset, len);
454 kunmap_atomic(from);
457 static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len)
459 char *to = kmap_atomic(page);
460 memcpy(to + offset, from, len);
461 kunmap_atomic(to);
464 static void memzero_page(struct page *page, size_t offset, size_t len)
466 char *addr = kmap_atomic(page);
467 memset(addr + offset, 0, len);
468 kunmap_atomic(addr);
471 static inline bool allocated(struct pipe_buffer *buf)
473 return buf->ops == &default_pipe_buf_ops;
476 static inline void data_start(const struct iov_iter *i, int *idxp, size_t *offp)
478 size_t off = i->iov_offset;
479 int idx = i->idx;
480 if (off && (!allocated(&i->pipe->bufs[idx]) || off == PAGE_SIZE)) {
481 idx = next_idx(idx, i->pipe);
482 off = 0;
484 *idxp = idx;
485 *offp = off;
488 static size_t push_pipe(struct iov_iter *i, size_t size,
489 int *idxp, size_t *offp)
491 struct pipe_inode_info *pipe = i->pipe;
492 size_t off;
493 int idx;
494 ssize_t left;
496 if (unlikely(size > i->count))
497 size = i->count;
498 if (unlikely(!size))
499 return 0;
501 left = size;
502 data_start(i, &idx, &off);
503 *idxp = idx;
504 *offp = off;
505 if (off) {
506 left -= PAGE_SIZE - off;
507 if (left <= 0) {
508 pipe->bufs[idx].len += size;
509 return size;
511 pipe->bufs[idx].len = PAGE_SIZE;
512 idx = next_idx(idx, pipe);
514 while (idx != pipe->curbuf || !pipe->nrbufs) {
515 struct page *page = alloc_page(GFP_USER);
516 if (!page)
517 break;
518 pipe->nrbufs++;
519 pipe->bufs[idx].ops = &default_pipe_buf_ops;
520 pipe->bufs[idx].page = page;
521 pipe->bufs[idx].offset = 0;
522 if (left <= PAGE_SIZE) {
523 pipe->bufs[idx].len = left;
524 return size;
526 pipe->bufs[idx].len = PAGE_SIZE;
527 left -= PAGE_SIZE;
528 idx = next_idx(idx, pipe);
530 return size - left;
533 static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
534 struct iov_iter *i)
536 struct pipe_inode_info *pipe = i->pipe;
537 size_t n, off;
538 int idx;
540 if (!sanity(i))
541 return 0;
543 bytes = n = push_pipe(i, bytes, &idx, &off);
544 if (unlikely(!n))
545 return 0;
546 for ( ; n; idx = next_idx(idx, pipe), off = 0) {
547 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
548 memcpy_to_page(pipe->bufs[idx].page, off, addr, chunk);
549 i->idx = idx;
550 i->iov_offset = off + chunk;
551 n -= chunk;
552 addr += chunk;
554 i->count -= bytes;
555 return bytes;
558 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
560 const char *from = addr;
561 if (unlikely(i->type & ITER_PIPE))
562 return copy_pipe_to_iter(addr, bytes, i);
563 if (iter_is_iovec(i))
564 might_fault();
565 iterate_and_advance(i, bytes, v,
566 copyout(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
567 memcpy_to_page(v.bv_page, v.bv_offset,
568 (from += v.bv_len) - v.bv_len, v.bv_len),
569 memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
572 return bytes;
574 EXPORT_SYMBOL(_copy_to_iter);
576 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
578 char *to = addr;
579 if (unlikely(i->type & ITER_PIPE)) {
580 WARN_ON(1);
581 return 0;
583 if (iter_is_iovec(i))
584 might_fault();
585 iterate_and_advance(i, bytes, v,
586 copyin((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
587 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
588 v.bv_offset, v.bv_len),
589 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
592 return bytes;
594 EXPORT_SYMBOL(_copy_from_iter);
596 bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
598 char *to = addr;
599 if (unlikely(i->type & ITER_PIPE)) {
600 WARN_ON(1);
601 return false;
603 if (unlikely(i->count < bytes))
604 return false;
606 if (iter_is_iovec(i))
607 might_fault();
608 iterate_all_kinds(i, bytes, v, ({
609 if (copyin((to += v.iov_len) - v.iov_len,
610 v.iov_base, v.iov_len))
611 return false;
612 0;}),
613 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
614 v.bv_offset, v.bv_len),
615 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
618 iov_iter_advance(i, bytes);
619 return true;
621 EXPORT_SYMBOL(_copy_from_iter_full);
623 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
625 char *to = addr;
626 if (unlikely(i->type & ITER_PIPE)) {
627 WARN_ON(1);
628 return 0;
630 iterate_and_advance(i, bytes, v,
631 __copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
632 v.iov_base, v.iov_len),
633 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
634 v.bv_offset, v.bv_len),
635 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
638 return bytes;
640 EXPORT_SYMBOL(_copy_from_iter_nocache);
642 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
643 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
645 char *to = addr;
646 if (unlikely(i->type & ITER_PIPE)) {
647 WARN_ON(1);
648 return 0;
650 iterate_and_advance(i, bytes, v,
651 __copy_from_user_flushcache((to += v.iov_len) - v.iov_len,
652 v.iov_base, v.iov_len),
653 memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page,
654 v.bv_offset, v.bv_len),
655 memcpy_flushcache((to += v.iov_len) - v.iov_len, v.iov_base,
656 v.iov_len)
659 return bytes;
661 EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
662 #endif
664 bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
666 char *to = addr;
667 if (unlikely(i->type & ITER_PIPE)) {
668 WARN_ON(1);
669 return false;
671 if (unlikely(i->count < bytes))
672 return false;
673 iterate_all_kinds(i, bytes, v, ({
674 if (__copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
675 v.iov_base, v.iov_len))
676 return false;
677 0;}),
678 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
679 v.bv_offset, v.bv_len),
680 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
683 iov_iter_advance(i, bytes);
684 return true;
686 EXPORT_SYMBOL(_copy_from_iter_full_nocache);
688 static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
690 struct page *head;
691 size_t v = n + offset;
694 * The general case needs to access the page order in order
695 * to compute the page size.
696 * However, we mostly deal with order-0 pages and thus can
697 * avoid a possible cache line miss for requests that fit all
698 * page orders.
700 if (n <= v && v <= PAGE_SIZE)
701 return true;
703 head = compound_head(page);
704 v += (page - head) << PAGE_SHIFT;
706 if (likely(n <= v && v <= (PAGE_SIZE << compound_order(head))))
707 return true;
708 WARN_ON(1);
709 return false;
712 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
713 struct iov_iter *i)
715 if (unlikely(!page_copy_sane(page, offset, bytes)))
716 return 0;
717 if (i->type & (ITER_BVEC|ITER_KVEC)) {
718 void *kaddr = kmap_atomic(page);
719 size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
720 kunmap_atomic(kaddr);
721 return wanted;
722 } else if (likely(!(i->type & ITER_PIPE)))
723 return copy_page_to_iter_iovec(page, offset, bytes, i);
724 else
725 return copy_page_to_iter_pipe(page, offset, bytes, i);
727 EXPORT_SYMBOL(copy_page_to_iter);
729 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
730 struct iov_iter *i)
732 if (unlikely(!page_copy_sane(page, offset, bytes)))
733 return 0;
734 if (unlikely(i->type & ITER_PIPE)) {
735 WARN_ON(1);
736 return 0;
738 if (i->type & (ITER_BVEC|ITER_KVEC)) {
739 void *kaddr = kmap_atomic(page);
740 size_t wanted = _copy_from_iter(kaddr + offset, bytes, i);
741 kunmap_atomic(kaddr);
742 return wanted;
743 } else
744 return copy_page_from_iter_iovec(page, offset, bytes, i);
746 EXPORT_SYMBOL(copy_page_from_iter);
748 static size_t pipe_zero(size_t bytes, struct iov_iter *i)
750 struct pipe_inode_info *pipe = i->pipe;
751 size_t n, off;
752 int idx;
754 if (!sanity(i))
755 return 0;
757 bytes = n = push_pipe(i, bytes, &idx, &off);
758 if (unlikely(!n))
759 return 0;
761 for ( ; n; idx = next_idx(idx, pipe), off = 0) {
762 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
763 memzero_page(pipe->bufs[idx].page, off, chunk);
764 i->idx = idx;
765 i->iov_offset = off + chunk;
766 n -= chunk;
768 i->count -= bytes;
769 return bytes;
772 size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
774 if (unlikely(i->type & ITER_PIPE))
775 return pipe_zero(bytes, i);
776 iterate_and_advance(i, bytes, v,
777 clear_user(v.iov_base, v.iov_len),
778 memzero_page(v.bv_page, v.bv_offset, v.bv_len),
779 memset(v.iov_base, 0, v.iov_len)
782 return bytes;
784 EXPORT_SYMBOL(iov_iter_zero);
786 size_t iov_iter_copy_from_user_atomic(struct page *page,
787 struct iov_iter *i, unsigned long offset, size_t bytes)
789 char *kaddr = kmap_atomic(page), *p = kaddr + offset;
790 if (unlikely(!page_copy_sane(page, offset, bytes))) {
791 kunmap_atomic(kaddr);
792 return 0;
794 if (unlikely(i->type & ITER_PIPE)) {
795 kunmap_atomic(kaddr);
796 WARN_ON(1);
797 return 0;
799 iterate_all_kinds(i, bytes, v,
800 copyin((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
801 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
802 v.bv_offset, v.bv_len),
803 memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
805 kunmap_atomic(kaddr);
806 return bytes;
808 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
810 static inline void pipe_truncate(struct iov_iter *i)
812 struct pipe_inode_info *pipe = i->pipe;
813 if (pipe->nrbufs) {
814 size_t off = i->iov_offset;
815 int idx = i->idx;
816 int nrbufs = (idx - pipe->curbuf) & (pipe->buffers - 1);
817 if (off) {
818 pipe->bufs[idx].len = off - pipe->bufs[idx].offset;
819 idx = next_idx(idx, pipe);
820 nrbufs++;
822 while (pipe->nrbufs > nrbufs) {
823 pipe_buf_release(pipe, &pipe->bufs[idx]);
824 idx = next_idx(idx, pipe);
825 pipe->nrbufs--;
830 static void pipe_advance(struct iov_iter *i, size_t size)
832 struct pipe_inode_info *pipe = i->pipe;
833 if (unlikely(i->count < size))
834 size = i->count;
835 if (size) {
836 struct pipe_buffer *buf;
837 size_t off = i->iov_offset, left = size;
838 int idx = i->idx;
839 if (off) /* make it relative to the beginning of buffer */
840 left += off - pipe->bufs[idx].offset;
841 while (1) {
842 buf = &pipe->bufs[idx];
843 if (left <= buf->len)
844 break;
845 left -= buf->len;
846 idx = next_idx(idx, pipe);
848 i->idx = idx;
849 i->iov_offset = buf->offset + left;
851 i->count -= size;
852 /* ... and discard everything past that point */
853 pipe_truncate(i);
856 void iov_iter_advance(struct iov_iter *i, size_t size)
858 if (unlikely(i->type & ITER_PIPE)) {
859 pipe_advance(i, size);
860 return;
862 iterate_and_advance(i, size, v, 0, 0, 0)
864 EXPORT_SYMBOL(iov_iter_advance);
866 void iov_iter_revert(struct iov_iter *i, size_t unroll)
868 if (!unroll)
869 return;
870 if (WARN_ON(unroll > MAX_RW_COUNT))
871 return;
872 i->count += unroll;
873 if (unlikely(i->type & ITER_PIPE)) {
874 struct pipe_inode_info *pipe = i->pipe;
875 int idx = i->idx;
876 size_t off = i->iov_offset;
877 while (1) {
878 size_t n = off - pipe->bufs[idx].offset;
879 if (unroll < n) {
880 off -= unroll;
881 break;
883 unroll -= n;
884 if (!unroll && idx == i->start_idx) {
885 off = 0;
886 break;
888 if (!idx--)
889 idx = pipe->buffers - 1;
890 off = pipe->bufs[idx].offset + pipe->bufs[idx].len;
892 i->iov_offset = off;
893 i->idx = idx;
894 pipe_truncate(i);
895 return;
897 if (unroll <= i->iov_offset) {
898 i->iov_offset -= unroll;
899 return;
901 unroll -= i->iov_offset;
902 if (i->type & ITER_BVEC) {
903 const struct bio_vec *bvec = i->bvec;
904 while (1) {
905 size_t n = (--bvec)->bv_len;
906 i->nr_segs++;
907 if (unroll <= n) {
908 i->bvec = bvec;
909 i->iov_offset = n - unroll;
910 return;
912 unroll -= n;
914 } else { /* same logics for iovec and kvec */
915 const struct iovec *iov = i->iov;
916 while (1) {
917 size_t n = (--iov)->iov_len;
918 i->nr_segs++;
919 if (unroll <= n) {
920 i->iov = iov;
921 i->iov_offset = n - unroll;
922 return;
924 unroll -= n;
928 EXPORT_SYMBOL(iov_iter_revert);
931 * Return the count of just the current iov_iter segment.
933 size_t iov_iter_single_seg_count(const struct iov_iter *i)
935 if (unlikely(i->type & ITER_PIPE))
936 return i->count; // it is a silly place, anyway
937 if (i->nr_segs == 1)
938 return i->count;
939 else if (i->type & ITER_BVEC)
940 return min(i->count, i->bvec->bv_len - i->iov_offset);
941 else
942 return min(i->count, i->iov->iov_len - i->iov_offset);
944 EXPORT_SYMBOL(iov_iter_single_seg_count);
946 void iov_iter_kvec(struct iov_iter *i, int direction,
947 const struct kvec *kvec, unsigned long nr_segs,
948 size_t count)
950 BUG_ON(!(direction & ITER_KVEC));
951 i->type = direction;
952 i->kvec = kvec;
953 i->nr_segs = nr_segs;
954 i->iov_offset = 0;
955 i->count = count;
957 EXPORT_SYMBOL(iov_iter_kvec);
959 void iov_iter_bvec(struct iov_iter *i, int direction,
960 const struct bio_vec *bvec, unsigned long nr_segs,
961 size_t count)
963 BUG_ON(!(direction & ITER_BVEC));
964 i->type = direction;
965 i->bvec = bvec;
966 i->nr_segs = nr_segs;
967 i->iov_offset = 0;
968 i->count = count;
970 EXPORT_SYMBOL(iov_iter_bvec);
972 void iov_iter_pipe(struct iov_iter *i, int direction,
973 struct pipe_inode_info *pipe,
974 size_t count)
976 BUG_ON(direction != ITER_PIPE);
977 WARN_ON(pipe->nrbufs == pipe->buffers);
978 i->type = direction;
979 i->pipe = pipe;
980 i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
981 i->iov_offset = 0;
982 i->count = count;
983 i->start_idx = i->idx;
985 EXPORT_SYMBOL(iov_iter_pipe);
987 unsigned long iov_iter_alignment(const struct iov_iter *i)
989 unsigned long res = 0;
990 size_t size = i->count;
992 if (unlikely(i->type & ITER_PIPE)) {
993 if (size && i->iov_offset && allocated(&i->pipe->bufs[i->idx]))
994 return size | i->iov_offset;
995 return size;
997 iterate_all_kinds(i, size, v,
998 (res |= (unsigned long)v.iov_base | v.iov_len, 0),
999 res |= v.bv_offset | v.bv_len,
1000 res |= (unsigned long)v.iov_base | v.iov_len
1002 return res;
1004 EXPORT_SYMBOL(iov_iter_alignment);
1006 unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
1008 unsigned long res = 0;
1009 size_t size = i->count;
1011 if (unlikely(i->type & ITER_PIPE)) {
1012 WARN_ON(1);
1013 return ~0U;
1016 iterate_all_kinds(i, size, v,
1017 (res |= (!res ? 0 : (unsigned long)v.iov_base) |
1018 (size != v.iov_len ? size : 0), 0),
1019 (res |= (!res ? 0 : (unsigned long)v.bv_offset) |
1020 (size != v.bv_len ? size : 0)),
1021 (res |= (!res ? 0 : (unsigned long)v.iov_base) |
1022 (size != v.iov_len ? size : 0))
1024 return res;
1026 EXPORT_SYMBOL(iov_iter_gap_alignment);
1028 static inline size_t __pipe_get_pages(struct iov_iter *i,
1029 size_t maxsize,
1030 struct page **pages,
1031 int idx,
1032 size_t *start)
1034 struct pipe_inode_info *pipe = i->pipe;
1035 ssize_t n = push_pipe(i, maxsize, &idx, start);
1036 if (!n)
1037 return -EFAULT;
1039 maxsize = n;
1040 n += *start;
1041 while (n > 0) {
1042 get_page(*pages++ = pipe->bufs[idx].page);
1043 idx = next_idx(idx, pipe);
1044 n -= PAGE_SIZE;
1047 return maxsize;
1050 static ssize_t pipe_get_pages(struct iov_iter *i,
1051 struct page **pages, size_t maxsize, unsigned maxpages,
1052 size_t *start)
1054 unsigned npages;
1055 size_t capacity;
1056 int idx;
1058 if (!maxsize)
1059 return 0;
1061 if (!sanity(i))
1062 return -EFAULT;
1064 data_start(i, &idx, start);
1065 /* some of this one + all after this one */
1066 npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1;
1067 capacity = min(npages,maxpages) * PAGE_SIZE - *start;
1069 return __pipe_get_pages(i, min(maxsize, capacity), pages, idx, start);
1072 ssize_t iov_iter_get_pages(struct iov_iter *i,
1073 struct page **pages, size_t maxsize, unsigned maxpages,
1074 size_t *start)
1076 if (maxsize > i->count)
1077 maxsize = i->count;
1079 if (unlikely(i->type & ITER_PIPE))
1080 return pipe_get_pages(i, pages, maxsize, maxpages, start);
1081 iterate_all_kinds(i, maxsize, v, ({
1082 unsigned long addr = (unsigned long)v.iov_base;
1083 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
1084 int n;
1085 int res;
1087 if (len > maxpages * PAGE_SIZE)
1088 len = maxpages * PAGE_SIZE;
1089 addr &= ~(PAGE_SIZE - 1);
1090 n = DIV_ROUND_UP(len, PAGE_SIZE);
1091 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
1092 if (unlikely(res < 0))
1093 return res;
1094 return (res == n ? len : res * PAGE_SIZE) - *start;
1095 0;}),({
1096 /* can't be more than PAGE_SIZE */
1097 *start = v.bv_offset;
1098 get_page(*pages = v.bv_page);
1099 return v.bv_len;
1100 }),({
1101 return -EFAULT;
1104 return 0;
1106 EXPORT_SYMBOL(iov_iter_get_pages);
1108 static struct page **get_pages_array(size_t n)
1110 return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL);
1113 static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
1114 struct page ***pages, size_t maxsize,
1115 size_t *start)
1117 struct page **p;
1118 size_t n;
1119 int idx;
1120 int npages;
1122 if (!maxsize)
1123 return 0;
1125 if (!sanity(i))
1126 return -EFAULT;
1128 data_start(i, &idx, start);
1129 /* some of this one + all after this one */
1130 npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1;
1131 n = npages * PAGE_SIZE - *start;
1132 if (maxsize > n)
1133 maxsize = n;
1134 else
1135 npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
1136 p = get_pages_array(npages);
1137 if (!p)
1138 return -ENOMEM;
1139 n = __pipe_get_pages(i, maxsize, p, idx, start);
1140 if (n > 0)
1141 *pages = p;
1142 else
1143 kvfree(p);
1144 return n;
1147 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
1148 struct page ***pages, size_t maxsize,
1149 size_t *start)
1151 struct page **p;
1153 if (maxsize > i->count)
1154 maxsize = i->count;
1156 if (unlikely(i->type & ITER_PIPE))
1157 return pipe_get_pages_alloc(i, pages, maxsize, start);
1158 iterate_all_kinds(i, maxsize, v, ({
1159 unsigned long addr = (unsigned long)v.iov_base;
1160 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
1161 int n;
1162 int res;
1164 addr &= ~(PAGE_SIZE - 1);
1165 n = DIV_ROUND_UP(len, PAGE_SIZE);
1166 p = get_pages_array(n);
1167 if (!p)
1168 return -ENOMEM;
1169 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
1170 if (unlikely(res < 0)) {
1171 kvfree(p);
1172 return res;
1174 *pages = p;
1175 return (res == n ? len : res * PAGE_SIZE) - *start;
1176 0;}),({
1177 /* can't be more than PAGE_SIZE */
1178 *start = v.bv_offset;
1179 *pages = p = get_pages_array(1);
1180 if (!p)
1181 return -ENOMEM;
1182 get_page(*p = v.bv_page);
1183 return v.bv_len;
1184 }),({
1185 return -EFAULT;
1188 return 0;
1190 EXPORT_SYMBOL(iov_iter_get_pages_alloc);
1192 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
1193 struct iov_iter *i)
1195 char *to = addr;
1196 __wsum sum, next;
1197 size_t off = 0;
1198 sum = *csum;
1199 if (unlikely(i->type & ITER_PIPE)) {
1200 WARN_ON(1);
1201 return 0;
1203 iterate_and_advance(i, bytes, v, ({
1204 int err = 0;
1205 next = csum_and_copy_from_user(v.iov_base,
1206 (to += v.iov_len) - v.iov_len,
1207 v.iov_len, 0, &err);
1208 if (!err) {
1209 sum = csum_block_add(sum, next, off);
1210 off += v.iov_len;
1212 err ? v.iov_len : 0;
1213 }), ({
1214 char *p = kmap_atomic(v.bv_page);
1215 next = csum_partial_copy_nocheck(p + v.bv_offset,
1216 (to += v.bv_len) - v.bv_len,
1217 v.bv_len, 0);
1218 kunmap_atomic(p);
1219 sum = csum_block_add(sum, next, off);
1220 off += v.bv_len;
1221 }),({
1222 next = csum_partial_copy_nocheck(v.iov_base,
1223 (to += v.iov_len) - v.iov_len,
1224 v.iov_len, 0);
1225 sum = csum_block_add(sum, next, off);
1226 off += v.iov_len;
1229 *csum = sum;
1230 return bytes;
1232 EXPORT_SYMBOL(csum_and_copy_from_iter);
1234 bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
1235 struct iov_iter *i)
1237 char *to = addr;
1238 __wsum sum, next;
1239 size_t off = 0;
1240 sum = *csum;
1241 if (unlikely(i->type & ITER_PIPE)) {
1242 WARN_ON(1);
1243 return false;
1245 if (unlikely(i->count < bytes))
1246 return false;
1247 iterate_all_kinds(i, bytes, v, ({
1248 int err = 0;
1249 next = csum_and_copy_from_user(v.iov_base,
1250 (to += v.iov_len) - v.iov_len,
1251 v.iov_len, 0, &err);
1252 if (err)
1253 return false;
1254 sum = csum_block_add(sum, next, off);
1255 off += v.iov_len;
1257 }), ({
1258 char *p = kmap_atomic(v.bv_page);
1259 next = csum_partial_copy_nocheck(p + v.bv_offset,
1260 (to += v.bv_len) - v.bv_len,
1261 v.bv_len, 0);
1262 kunmap_atomic(p);
1263 sum = csum_block_add(sum, next, off);
1264 off += v.bv_len;
1265 }),({
1266 next = csum_partial_copy_nocheck(v.iov_base,
1267 (to += v.iov_len) - v.iov_len,
1268 v.iov_len, 0);
1269 sum = csum_block_add(sum, next, off);
1270 off += v.iov_len;
1273 *csum = sum;
1274 iov_iter_advance(i, bytes);
1275 return true;
1277 EXPORT_SYMBOL(csum_and_copy_from_iter_full);
1279 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
1280 struct iov_iter *i)
1282 const char *from = addr;
1283 __wsum sum, next;
1284 size_t off = 0;
1285 sum = *csum;
1286 if (unlikely(i->type & ITER_PIPE)) {
1287 WARN_ON(1); /* for now */
1288 return 0;
1290 iterate_and_advance(i, bytes, v, ({
1291 int err = 0;
1292 next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
1293 v.iov_base,
1294 v.iov_len, 0, &err);
1295 if (!err) {
1296 sum = csum_block_add(sum, next, off);
1297 off += v.iov_len;
1299 err ? v.iov_len : 0;
1300 }), ({
1301 char *p = kmap_atomic(v.bv_page);
1302 next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len,
1303 p + v.bv_offset,
1304 v.bv_len, 0);
1305 kunmap_atomic(p);
1306 sum = csum_block_add(sum, next, off);
1307 off += v.bv_len;
1308 }),({
1309 next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len,
1310 v.iov_base,
1311 v.iov_len, 0);
1312 sum = csum_block_add(sum, next, off);
1313 off += v.iov_len;
1316 *csum = sum;
1317 return bytes;
1319 EXPORT_SYMBOL(csum_and_copy_to_iter);
1321 int iov_iter_npages(const struct iov_iter *i, int maxpages)
1323 size_t size = i->count;
1324 int npages = 0;
1326 if (!size)
1327 return 0;
1329 if (unlikely(i->type & ITER_PIPE)) {
1330 struct pipe_inode_info *pipe = i->pipe;
1331 size_t off;
1332 int idx;
1334 if (!sanity(i))
1335 return 0;
1337 data_start(i, &idx, &off);
1338 /* some of this one + all after this one */
1339 npages = ((pipe->curbuf - idx - 1) & (pipe->buffers - 1)) + 1;
1340 if (npages >= maxpages)
1341 return maxpages;
1342 } else iterate_all_kinds(i, size, v, ({
1343 unsigned long p = (unsigned long)v.iov_base;
1344 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1345 - p / PAGE_SIZE;
1346 if (npages >= maxpages)
1347 return maxpages;
1348 0;}),({
1349 npages++;
1350 if (npages >= maxpages)
1351 return maxpages;
1352 }),({
1353 unsigned long p = (unsigned long)v.iov_base;
1354 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1355 - p / PAGE_SIZE;
1356 if (npages >= maxpages)
1357 return maxpages;
1360 return npages;
1362 EXPORT_SYMBOL(iov_iter_npages);
1364 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
1366 *new = *old;
1367 if (unlikely(new->type & ITER_PIPE)) {
1368 WARN_ON(1);
1369 return NULL;
1371 if (new->type & ITER_BVEC)
1372 return new->bvec = kmemdup(new->bvec,
1373 new->nr_segs * sizeof(struct bio_vec),
1374 flags);
1375 else
1376 /* iovec and kvec have identical layout */
1377 return new->iov = kmemdup(new->iov,
1378 new->nr_segs * sizeof(struct iovec),
1379 flags);
1381 EXPORT_SYMBOL(dup_iter);
1384 * import_iovec() - Copy an array of &struct iovec from userspace
1385 * into the kernel, check that it is valid, and initialize a new
1386 * &struct iov_iter iterator to access it.
1388 * @type: One of %READ or %WRITE.
1389 * @uvector: Pointer to the userspace array.
1390 * @nr_segs: Number of elements in userspace array.
1391 * @fast_segs: Number of elements in @iov.
1392 * @iov: (input and output parameter) Pointer to pointer to (usually small
1393 * on-stack) kernel array.
1394 * @i: Pointer to iterator that will be initialized on success.
1396 * If the array pointed to by *@iov is large enough to hold all @nr_segs,
1397 * then this function places %NULL in *@iov on return. Otherwise, a new
1398 * array will be allocated and the result placed in *@iov. This means that
1399 * the caller may call kfree() on *@iov regardless of whether the small
1400 * on-stack array was used or not (and regardless of whether this function
1401 * returns an error or not).
1403 * Return: 0 on success or negative error code on error.
1405 int import_iovec(int type, const struct iovec __user * uvector,
1406 unsigned nr_segs, unsigned fast_segs,
1407 struct iovec **iov, struct iov_iter *i)
1409 ssize_t n;
1410 struct iovec *p;
1411 n = rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
1412 *iov, &p);
1413 if (n < 0) {
1414 if (p != *iov)
1415 kfree(p);
1416 *iov = NULL;
1417 return n;
1419 iov_iter_init(i, type, p, nr_segs, n);
1420 *iov = p == *iov ? NULL : p;
1421 return 0;
1423 EXPORT_SYMBOL(import_iovec);
1425 #ifdef CONFIG_COMPAT
1426 #include <linux/compat.h>
1428 int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
1429 unsigned nr_segs, unsigned fast_segs,
1430 struct iovec **iov, struct iov_iter *i)
1432 ssize_t n;
1433 struct iovec *p;
1434 n = compat_rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
1435 *iov, &p);
1436 if (n < 0) {
1437 if (p != *iov)
1438 kfree(p);
1439 *iov = NULL;
1440 return n;
1442 iov_iter_init(i, type, p, nr_segs, n);
1443 *iov = p == *iov ? NULL : p;
1444 return 0;
1446 #endif
1448 int import_single_range(int rw, void __user *buf, size_t len,
1449 struct iovec *iov, struct iov_iter *i)
1451 if (len > MAX_RW_COUNT)
1452 len = MAX_RW_COUNT;
1453 if (unlikely(!access_ok(!rw, buf, len)))
1454 return -EFAULT;
1456 iov->iov_base = buf;
1457 iov->iov_len = len;
1458 iov_iter_init(i, rw, iov, 1, len);
1459 return 0;
1461 EXPORT_SYMBOL(import_single_range);