mtd: nand: Add OX820 NAND hardware dependency
[linux/fpc-iii.git] / lib / iov_iter.c
blob60abc44385b795de7449b3810e9e294b7d2c48fc
1 #include <linux/export.h>
2 #include <linux/bvec.h>
3 #include <linux/uio.h>
4 #include <linux/pagemap.h>
5 #include <linux/slab.h>
6 #include <linux/vmalloc.h>
7 #include <linux/splice.h>
8 #include <net/checksum.h>
10 #define PIPE_PARANOIA /* for now */
12 #define iterate_iovec(i, n, __v, __p, skip, STEP) { \
13 size_t left; \
14 size_t wanted = n; \
15 __p = i->iov; \
16 __v.iov_len = min(n, __p->iov_len - skip); \
17 if (likely(__v.iov_len)) { \
18 __v.iov_base = __p->iov_base + skip; \
19 left = (STEP); \
20 __v.iov_len -= left; \
21 skip += __v.iov_len; \
22 n -= __v.iov_len; \
23 } else { \
24 left = 0; \
25 } \
26 while (unlikely(!left && n)) { \
27 __p++; \
28 __v.iov_len = min(n, __p->iov_len); \
29 if (unlikely(!__v.iov_len)) \
30 continue; \
31 __v.iov_base = __p->iov_base; \
32 left = (STEP); \
33 __v.iov_len -= left; \
34 skip = __v.iov_len; \
35 n -= __v.iov_len; \
36 } \
37 n = wanted - n; \
40 #define iterate_kvec(i, n, __v, __p, skip, STEP) { \
41 size_t wanted = n; \
42 __p = i->kvec; \
43 __v.iov_len = min(n, __p->iov_len - skip); \
44 if (likely(__v.iov_len)) { \
45 __v.iov_base = __p->iov_base + skip; \
46 (void)(STEP); \
47 skip += __v.iov_len; \
48 n -= __v.iov_len; \
49 } \
50 while (unlikely(n)) { \
51 __p++; \
52 __v.iov_len = min(n, __p->iov_len); \
53 if (unlikely(!__v.iov_len)) \
54 continue; \
55 __v.iov_base = __p->iov_base; \
56 (void)(STEP); \
57 skip = __v.iov_len; \
58 n -= __v.iov_len; \
59 } \
60 n = wanted; \
63 #define iterate_bvec(i, n, __v, __bi, skip, STEP) { \
64 struct bvec_iter __start; \
65 __start.bi_size = n; \
66 __start.bi_bvec_done = skip; \
67 __start.bi_idx = 0; \
68 for_each_bvec(__v, i->bvec, __bi, __start) { \
69 if (!__v.bv_len) \
70 continue; \
71 (void)(STEP); \
72 } \
75 #define iterate_all_kinds(i, n, v, I, B, K) { \
76 if (likely(n)) { \
77 size_t skip = i->iov_offset; \
78 if (unlikely(i->type & ITER_BVEC)) { \
79 struct bio_vec v; \
80 struct bvec_iter __bi; \
81 iterate_bvec(i, n, v, __bi, skip, (B)) \
82 } else if (unlikely(i->type & ITER_KVEC)) { \
83 const struct kvec *kvec; \
84 struct kvec v; \
85 iterate_kvec(i, n, v, kvec, skip, (K)) \
86 } else { \
87 const struct iovec *iov; \
88 struct iovec v; \
89 iterate_iovec(i, n, v, iov, skip, (I)) \
90 } \
91 } \
94 #define iterate_and_advance(i, n, v, I, B, K) { \
95 if (unlikely(i->count < n)) \
96 n = i->count; \
97 if (i->count) { \
98 size_t skip = i->iov_offset; \
99 if (unlikely(i->type & ITER_BVEC)) { \
100 const struct bio_vec *bvec = i->bvec; \
101 struct bio_vec v; \
102 struct bvec_iter __bi; \
103 iterate_bvec(i, n, v, __bi, skip, (B)) \
104 i->bvec = __bvec_iter_bvec(i->bvec, __bi); \
105 i->nr_segs -= i->bvec - bvec; \
106 skip = __bi.bi_bvec_done; \
107 } else if (unlikely(i->type & ITER_KVEC)) { \
108 const struct kvec *kvec; \
109 struct kvec v; \
110 iterate_kvec(i, n, v, kvec, skip, (K)) \
111 if (skip == kvec->iov_len) { \
112 kvec++; \
113 skip = 0; \
115 i->nr_segs -= kvec - i->kvec; \
116 i->kvec = kvec; \
117 } else { \
118 const struct iovec *iov; \
119 struct iovec v; \
120 iterate_iovec(i, n, v, iov, skip, (I)) \
121 if (skip == iov->iov_len) { \
122 iov++; \
123 skip = 0; \
125 i->nr_segs -= iov - i->iov; \
126 i->iov = iov; \
128 i->count -= n; \
129 i->iov_offset = skip; \
133 static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
134 struct iov_iter *i)
136 size_t skip, copy, left, wanted;
137 const struct iovec *iov;
138 char __user *buf;
139 void *kaddr, *from;
141 if (unlikely(bytes > i->count))
142 bytes = i->count;
144 if (unlikely(!bytes))
145 return 0;
147 wanted = bytes;
148 iov = i->iov;
149 skip = i->iov_offset;
150 buf = iov->iov_base + skip;
151 copy = min(bytes, iov->iov_len - skip);
153 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) {
154 kaddr = kmap_atomic(page);
155 from = kaddr + offset;
157 /* first chunk, usually the only one */
158 left = __copy_to_user_inatomic(buf, from, copy);
159 copy -= left;
160 skip += copy;
161 from += copy;
162 bytes -= copy;
164 while (unlikely(!left && bytes)) {
165 iov++;
166 buf = iov->iov_base;
167 copy = min(bytes, iov->iov_len);
168 left = __copy_to_user_inatomic(buf, from, copy);
169 copy -= left;
170 skip = copy;
171 from += copy;
172 bytes -= copy;
174 if (likely(!bytes)) {
175 kunmap_atomic(kaddr);
176 goto done;
178 offset = from - kaddr;
179 buf += copy;
180 kunmap_atomic(kaddr);
181 copy = min(bytes, iov->iov_len - skip);
183 /* Too bad - revert to non-atomic kmap */
185 kaddr = kmap(page);
186 from = kaddr + offset;
187 left = __copy_to_user(buf, from, copy);
188 copy -= left;
189 skip += copy;
190 from += copy;
191 bytes -= copy;
192 while (unlikely(!left && bytes)) {
193 iov++;
194 buf = iov->iov_base;
195 copy = min(bytes, iov->iov_len);
196 left = __copy_to_user(buf, from, copy);
197 copy -= left;
198 skip = copy;
199 from += copy;
200 bytes -= copy;
202 kunmap(page);
204 done:
205 if (skip == iov->iov_len) {
206 iov++;
207 skip = 0;
209 i->count -= wanted - bytes;
210 i->nr_segs -= iov - i->iov;
211 i->iov = iov;
212 i->iov_offset = skip;
213 return wanted - bytes;
216 static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
217 struct iov_iter *i)
219 size_t skip, copy, left, wanted;
220 const struct iovec *iov;
221 char __user *buf;
222 void *kaddr, *to;
224 if (unlikely(bytes > i->count))
225 bytes = i->count;
227 if (unlikely(!bytes))
228 return 0;
230 wanted = bytes;
231 iov = i->iov;
232 skip = i->iov_offset;
233 buf = iov->iov_base + skip;
234 copy = min(bytes, iov->iov_len - skip);
236 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) {
237 kaddr = kmap_atomic(page);
238 to = kaddr + offset;
240 /* first chunk, usually the only one */
241 left = __copy_from_user_inatomic(to, buf, copy);
242 copy -= left;
243 skip += copy;
244 to += copy;
245 bytes -= copy;
247 while (unlikely(!left && bytes)) {
248 iov++;
249 buf = iov->iov_base;
250 copy = min(bytes, iov->iov_len);
251 left = __copy_from_user_inatomic(to, buf, copy);
252 copy -= left;
253 skip = copy;
254 to += copy;
255 bytes -= copy;
257 if (likely(!bytes)) {
258 kunmap_atomic(kaddr);
259 goto done;
261 offset = to - kaddr;
262 buf += copy;
263 kunmap_atomic(kaddr);
264 copy = min(bytes, iov->iov_len - skip);
266 /* Too bad - revert to non-atomic kmap */
268 kaddr = kmap(page);
269 to = kaddr + offset;
270 left = __copy_from_user(to, buf, copy);
271 copy -= left;
272 skip += copy;
273 to += copy;
274 bytes -= copy;
275 while (unlikely(!left && bytes)) {
276 iov++;
277 buf = iov->iov_base;
278 copy = min(bytes, iov->iov_len);
279 left = __copy_from_user(to, buf, copy);
280 copy -= left;
281 skip = copy;
282 to += copy;
283 bytes -= copy;
285 kunmap(page);
287 done:
288 if (skip == iov->iov_len) {
289 iov++;
290 skip = 0;
292 i->count -= wanted - bytes;
293 i->nr_segs -= iov - i->iov;
294 i->iov = iov;
295 i->iov_offset = skip;
296 return wanted - bytes;
299 #ifdef PIPE_PARANOIA
300 static bool sanity(const struct iov_iter *i)
302 struct pipe_inode_info *pipe = i->pipe;
303 int idx = i->idx;
304 int next = pipe->curbuf + pipe->nrbufs;
305 if (i->iov_offset) {
306 struct pipe_buffer *p;
307 if (unlikely(!pipe->nrbufs))
308 goto Bad; // pipe must be non-empty
309 if (unlikely(idx != ((next - 1) & (pipe->buffers - 1))))
310 goto Bad; // must be at the last buffer...
312 p = &pipe->bufs[idx];
313 if (unlikely(p->offset + p->len != i->iov_offset))
314 goto Bad; // ... at the end of segment
315 } else {
316 if (idx != (next & (pipe->buffers - 1)))
317 goto Bad; // must be right after the last buffer
319 return true;
320 Bad:
321 printk(KERN_ERR "idx = %d, offset = %zd\n", i->idx, i->iov_offset);
322 printk(KERN_ERR "curbuf = %d, nrbufs = %d, buffers = %d\n",
323 pipe->curbuf, pipe->nrbufs, pipe->buffers);
324 for (idx = 0; idx < pipe->buffers; idx++)
325 printk(KERN_ERR "[%p %p %d %d]\n",
326 pipe->bufs[idx].ops,
327 pipe->bufs[idx].page,
328 pipe->bufs[idx].offset,
329 pipe->bufs[idx].len);
330 WARN_ON(1);
331 return false;
333 #else
334 #define sanity(i) true
335 #endif
337 static inline int next_idx(int idx, struct pipe_inode_info *pipe)
339 return (idx + 1) & (pipe->buffers - 1);
342 static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
343 struct iov_iter *i)
345 struct pipe_inode_info *pipe = i->pipe;
346 struct pipe_buffer *buf;
347 size_t off;
348 int idx;
350 if (unlikely(bytes > i->count))
351 bytes = i->count;
353 if (unlikely(!bytes))
354 return 0;
356 if (!sanity(i))
357 return 0;
359 off = i->iov_offset;
360 idx = i->idx;
361 buf = &pipe->bufs[idx];
362 if (off) {
363 if (offset == off && buf->page == page) {
364 /* merge with the last one */
365 buf->len += bytes;
366 i->iov_offset += bytes;
367 goto out;
369 idx = next_idx(idx, pipe);
370 buf = &pipe->bufs[idx];
372 if (idx == pipe->curbuf && pipe->nrbufs)
373 return 0;
374 pipe->nrbufs++;
375 buf->ops = &page_cache_pipe_buf_ops;
376 get_page(buf->page = page);
377 buf->offset = offset;
378 buf->len = bytes;
379 i->iov_offset = offset + bytes;
380 i->idx = idx;
381 out:
382 i->count -= bytes;
383 return bytes;
387 * Fault in one or more iovecs of the given iov_iter, to a maximum length of
388 * bytes. For each iovec, fault in each page that constitutes the iovec.
390 * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
391 * because it is an invalid address).
393 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
395 size_t skip = i->iov_offset;
396 const struct iovec *iov;
397 int err;
398 struct iovec v;
400 if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
401 iterate_iovec(i, bytes, v, iov, skip, ({
402 err = fault_in_pages_readable(v.iov_base, v.iov_len);
403 if (unlikely(err))
404 return err;
405 0;}))
407 return 0;
409 EXPORT_SYMBOL(iov_iter_fault_in_readable);
411 void iov_iter_init(struct iov_iter *i, int direction,
412 const struct iovec *iov, unsigned long nr_segs,
413 size_t count)
415 /* It will get better. Eventually... */
416 if (segment_eq(get_fs(), KERNEL_DS)) {
417 direction |= ITER_KVEC;
418 i->type = direction;
419 i->kvec = (struct kvec *)iov;
420 } else {
421 i->type = direction;
422 i->iov = iov;
424 i->nr_segs = nr_segs;
425 i->iov_offset = 0;
426 i->count = count;
428 EXPORT_SYMBOL(iov_iter_init);
430 static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
432 char *from = kmap_atomic(page);
433 memcpy(to, from + offset, len);
434 kunmap_atomic(from);
437 static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len)
439 char *to = kmap_atomic(page);
440 memcpy(to + offset, from, len);
441 kunmap_atomic(to);
444 static void memzero_page(struct page *page, size_t offset, size_t len)
446 char *addr = kmap_atomic(page);
447 memset(addr + offset, 0, len);
448 kunmap_atomic(addr);
451 static inline bool allocated(struct pipe_buffer *buf)
453 return buf->ops == &default_pipe_buf_ops;
456 static inline void data_start(const struct iov_iter *i, int *idxp, size_t *offp)
458 size_t off = i->iov_offset;
459 int idx = i->idx;
460 if (off && (!allocated(&i->pipe->bufs[idx]) || off == PAGE_SIZE)) {
461 idx = next_idx(idx, i->pipe);
462 off = 0;
464 *idxp = idx;
465 *offp = off;
468 static size_t push_pipe(struct iov_iter *i, size_t size,
469 int *idxp, size_t *offp)
471 struct pipe_inode_info *pipe = i->pipe;
472 size_t off;
473 int idx;
474 ssize_t left;
476 if (unlikely(size > i->count))
477 size = i->count;
478 if (unlikely(!size))
479 return 0;
481 left = size;
482 data_start(i, &idx, &off);
483 *idxp = idx;
484 *offp = off;
485 if (off) {
486 left -= PAGE_SIZE - off;
487 if (left <= 0) {
488 pipe->bufs[idx].len += size;
489 return size;
491 pipe->bufs[idx].len = PAGE_SIZE;
492 idx = next_idx(idx, pipe);
494 while (idx != pipe->curbuf || !pipe->nrbufs) {
495 struct page *page = alloc_page(GFP_USER);
496 if (!page)
497 break;
498 pipe->nrbufs++;
499 pipe->bufs[idx].ops = &default_pipe_buf_ops;
500 pipe->bufs[idx].page = page;
501 pipe->bufs[idx].offset = 0;
502 if (left <= PAGE_SIZE) {
503 pipe->bufs[idx].len = left;
504 return size;
506 pipe->bufs[idx].len = PAGE_SIZE;
507 left -= PAGE_SIZE;
508 idx = next_idx(idx, pipe);
510 return size - left;
513 static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
514 struct iov_iter *i)
516 struct pipe_inode_info *pipe = i->pipe;
517 size_t n, off;
518 int idx;
520 if (!sanity(i))
521 return 0;
523 bytes = n = push_pipe(i, bytes, &idx, &off);
524 if (unlikely(!n))
525 return 0;
526 for ( ; n; idx = next_idx(idx, pipe), off = 0) {
527 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
528 memcpy_to_page(pipe->bufs[idx].page, off, addr, chunk);
529 i->idx = idx;
530 i->iov_offset = off + chunk;
531 n -= chunk;
532 addr += chunk;
534 i->count -= bytes;
535 return bytes;
538 size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
540 const char *from = addr;
541 if (unlikely(i->type & ITER_PIPE))
542 return copy_pipe_to_iter(addr, bytes, i);
543 iterate_and_advance(i, bytes, v,
544 __copy_to_user(v.iov_base, (from += v.iov_len) - v.iov_len,
545 v.iov_len),
546 memcpy_to_page(v.bv_page, v.bv_offset,
547 (from += v.bv_len) - v.bv_len, v.bv_len),
548 memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
551 return bytes;
553 EXPORT_SYMBOL(copy_to_iter);
555 size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
557 char *to = addr;
558 if (unlikely(i->type & ITER_PIPE)) {
559 WARN_ON(1);
560 return 0;
562 iterate_and_advance(i, bytes, v,
563 __copy_from_user((to += v.iov_len) - v.iov_len, v.iov_base,
564 v.iov_len),
565 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
566 v.bv_offset, v.bv_len),
567 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
570 return bytes;
572 EXPORT_SYMBOL(copy_from_iter);
574 bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
576 char *to = addr;
577 if (unlikely(i->type & ITER_PIPE)) {
578 WARN_ON(1);
579 return false;
581 if (unlikely(i->count < bytes))
582 return false;
584 iterate_all_kinds(i, bytes, v, ({
585 if (__copy_from_user((to += v.iov_len) - v.iov_len,
586 v.iov_base, v.iov_len))
587 return false;
588 0;}),
589 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
590 v.bv_offset, v.bv_len),
591 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
594 iov_iter_advance(i, bytes);
595 return true;
597 EXPORT_SYMBOL(copy_from_iter_full);
599 size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
601 char *to = addr;
602 if (unlikely(i->type & ITER_PIPE)) {
603 WARN_ON(1);
604 return 0;
606 iterate_and_advance(i, bytes, v,
607 __copy_from_user_nocache((to += v.iov_len) - v.iov_len,
608 v.iov_base, v.iov_len),
609 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
610 v.bv_offset, v.bv_len),
611 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
614 return bytes;
616 EXPORT_SYMBOL(copy_from_iter_nocache);
618 bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
620 char *to = addr;
621 if (unlikely(i->type & ITER_PIPE)) {
622 WARN_ON(1);
623 return false;
625 if (unlikely(i->count < bytes))
626 return false;
627 iterate_all_kinds(i, bytes, v, ({
628 if (__copy_from_user_nocache((to += v.iov_len) - v.iov_len,
629 v.iov_base, v.iov_len))
630 return false;
631 0;}),
632 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
633 v.bv_offset, v.bv_len),
634 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
637 iov_iter_advance(i, bytes);
638 return true;
640 EXPORT_SYMBOL(copy_from_iter_full_nocache);
642 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
643 struct iov_iter *i)
645 if (i->type & (ITER_BVEC|ITER_KVEC)) {
646 void *kaddr = kmap_atomic(page);
647 size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
648 kunmap_atomic(kaddr);
649 return wanted;
650 } else if (likely(!(i->type & ITER_PIPE)))
651 return copy_page_to_iter_iovec(page, offset, bytes, i);
652 else
653 return copy_page_to_iter_pipe(page, offset, bytes, i);
655 EXPORT_SYMBOL(copy_page_to_iter);
657 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
658 struct iov_iter *i)
660 if (unlikely(i->type & ITER_PIPE)) {
661 WARN_ON(1);
662 return 0;
664 if (i->type & (ITER_BVEC|ITER_KVEC)) {
665 void *kaddr = kmap_atomic(page);
666 size_t wanted = copy_from_iter(kaddr + offset, bytes, i);
667 kunmap_atomic(kaddr);
668 return wanted;
669 } else
670 return copy_page_from_iter_iovec(page, offset, bytes, i);
672 EXPORT_SYMBOL(copy_page_from_iter);
674 static size_t pipe_zero(size_t bytes, struct iov_iter *i)
676 struct pipe_inode_info *pipe = i->pipe;
677 size_t n, off;
678 int idx;
680 if (!sanity(i))
681 return 0;
683 bytes = n = push_pipe(i, bytes, &idx, &off);
684 if (unlikely(!n))
685 return 0;
687 for ( ; n; idx = next_idx(idx, pipe), off = 0) {
688 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
689 memzero_page(pipe->bufs[idx].page, off, chunk);
690 i->idx = idx;
691 i->iov_offset = off + chunk;
692 n -= chunk;
694 i->count -= bytes;
695 return bytes;
698 size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
700 if (unlikely(i->type & ITER_PIPE))
701 return pipe_zero(bytes, i);
702 iterate_and_advance(i, bytes, v,
703 __clear_user(v.iov_base, v.iov_len),
704 memzero_page(v.bv_page, v.bv_offset, v.bv_len),
705 memset(v.iov_base, 0, v.iov_len)
708 return bytes;
710 EXPORT_SYMBOL(iov_iter_zero);
712 size_t iov_iter_copy_from_user_atomic(struct page *page,
713 struct iov_iter *i, unsigned long offset, size_t bytes)
715 char *kaddr = kmap_atomic(page), *p = kaddr + offset;
716 if (unlikely(i->type & ITER_PIPE)) {
717 kunmap_atomic(kaddr);
718 WARN_ON(1);
719 return 0;
721 iterate_all_kinds(i, bytes, v,
722 __copy_from_user_inatomic((p += v.iov_len) - v.iov_len,
723 v.iov_base, v.iov_len),
724 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
725 v.bv_offset, v.bv_len),
726 memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
728 kunmap_atomic(kaddr);
729 return bytes;
731 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
733 static inline void pipe_truncate(struct iov_iter *i)
735 struct pipe_inode_info *pipe = i->pipe;
736 if (pipe->nrbufs) {
737 size_t off = i->iov_offset;
738 int idx = i->idx;
739 int nrbufs = (idx - pipe->curbuf) & (pipe->buffers - 1);
740 if (off) {
741 pipe->bufs[idx].len = off - pipe->bufs[idx].offset;
742 idx = next_idx(idx, pipe);
743 nrbufs++;
745 while (pipe->nrbufs > nrbufs) {
746 pipe_buf_release(pipe, &pipe->bufs[idx]);
747 idx = next_idx(idx, pipe);
748 pipe->nrbufs--;
753 static void pipe_advance(struct iov_iter *i, size_t size)
755 struct pipe_inode_info *pipe = i->pipe;
756 if (unlikely(i->count < size))
757 size = i->count;
758 if (size) {
759 struct pipe_buffer *buf;
760 size_t off = i->iov_offset, left = size;
761 int idx = i->idx;
762 if (off) /* make it relative to the beginning of buffer */
763 left += off - pipe->bufs[idx].offset;
764 while (1) {
765 buf = &pipe->bufs[idx];
766 if (left <= buf->len)
767 break;
768 left -= buf->len;
769 idx = next_idx(idx, pipe);
771 i->idx = idx;
772 i->iov_offset = buf->offset + left;
774 i->count -= size;
775 /* ... and discard everything past that point */
776 pipe_truncate(i);
779 void iov_iter_advance(struct iov_iter *i, size_t size)
781 if (unlikely(i->type & ITER_PIPE)) {
782 pipe_advance(i, size);
783 return;
785 iterate_and_advance(i, size, v, 0, 0, 0)
787 EXPORT_SYMBOL(iov_iter_advance);
789 void iov_iter_revert(struct iov_iter *i, size_t unroll)
791 if (!unroll)
792 return;
793 i->count += unroll;
794 if (unlikely(i->type & ITER_PIPE)) {
795 struct pipe_inode_info *pipe = i->pipe;
796 int idx = i->idx;
797 size_t off = i->iov_offset;
798 while (1) {
799 size_t n = off - pipe->bufs[idx].offset;
800 if (unroll < n) {
801 off -= (n - unroll);
802 break;
804 unroll -= n;
805 if (!unroll && idx == i->start_idx) {
806 off = 0;
807 break;
809 if (!idx--)
810 idx = pipe->buffers - 1;
811 off = pipe->bufs[idx].offset + pipe->bufs[idx].len;
813 i->iov_offset = off;
814 i->idx = idx;
815 pipe_truncate(i);
816 return;
818 if (unroll <= i->iov_offset) {
819 i->iov_offset -= unroll;
820 return;
822 unroll -= i->iov_offset;
823 if (i->type & ITER_BVEC) {
824 const struct bio_vec *bvec = i->bvec;
825 while (1) {
826 size_t n = (--bvec)->bv_len;
827 i->nr_segs++;
828 if (unroll <= n) {
829 i->bvec = bvec;
830 i->iov_offset = n - unroll;
831 return;
833 unroll -= n;
835 } else { /* same logics for iovec and kvec */
836 const struct iovec *iov = i->iov;
837 while (1) {
838 size_t n = (--iov)->iov_len;
839 i->nr_segs++;
840 if (unroll <= n) {
841 i->iov = iov;
842 i->iov_offset = n - unroll;
843 return;
845 unroll -= n;
849 EXPORT_SYMBOL(iov_iter_revert);
852 * Return the count of just the current iov_iter segment.
854 size_t iov_iter_single_seg_count(const struct iov_iter *i)
856 if (unlikely(i->type & ITER_PIPE))
857 return i->count; // it is a silly place, anyway
858 if (i->nr_segs == 1)
859 return i->count;
860 else if (i->type & ITER_BVEC)
861 return min(i->count, i->bvec->bv_len - i->iov_offset);
862 else
863 return min(i->count, i->iov->iov_len - i->iov_offset);
865 EXPORT_SYMBOL(iov_iter_single_seg_count);
867 void iov_iter_kvec(struct iov_iter *i, int direction,
868 const struct kvec *kvec, unsigned long nr_segs,
869 size_t count)
871 BUG_ON(!(direction & ITER_KVEC));
872 i->type = direction;
873 i->kvec = kvec;
874 i->nr_segs = nr_segs;
875 i->iov_offset = 0;
876 i->count = count;
878 EXPORT_SYMBOL(iov_iter_kvec);
880 void iov_iter_bvec(struct iov_iter *i, int direction,
881 const struct bio_vec *bvec, unsigned long nr_segs,
882 size_t count)
884 BUG_ON(!(direction & ITER_BVEC));
885 i->type = direction;
886 i->bvec = bvec;
887 i->nr_segs = nr_segs;
888 i->iov_offset = 0;
889 i->count = count;
891 EXPORT_SYMBOL(iov_iter_bvec);
893 void iov_iter_pipe(struct iov_iter *i, int direction,
894 struct pipe_inode_info *pipe,
895 size_t count)
897 BUG_ON(direction != ITER_PIPE);
898 WARN_ON(pipe->nrbufs == pipe->buffers);
899 i->type = direction;
900 i->pipe = pipe;
901 i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
902 i->iov_offset = 0;
903 i->count = count;
904 i->start_idx = i->idx;
906 EXPORT_SYMBOL(iov_iter_pipe);
908 unsigned long iov_iter_alignment(const struct iov_iter *i)
910 unsigned long res = 0;
911 size_t size = i->count;
913 if (unlikely(i->type & ITER_PIPE)) {
914 if (size && i->iov_offset && allocated(&i->pipe->bufs[i->idx]))
915 return size | i->iov_offset;
916 return size;
918 iterate_all_kinds(i, size, v,
919 (res |= (unsigned long)v.iov_base | v.iov_len, 0),
920 res |= v.bv_offset | v.bv_len,
921 res |= (unsigned long)v.iov_base | v.iov_len
923 return res;
925 EXPORT_SYMBOL(iov_iter_alignment);
927 unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
929 unsigned long res = 0;
930 size_t size = i->count;
932 if (unlikely(i->type & ITER_PIPE)) {
933 WARN_ON(1);
934 return ~0U;
937 iterate_all_kinds(i, size, v,
938 (res |= (!res ? 0 : (unsigned long)v.iov_base) |
939 (size != v.iov_len ? size : 0), 0),
940 (res |= (!res ? 0 : (unsigned long)v.bv_offset) |
941 (size != v.bv_len ? size : 0)),
942 (res |= (!res ? 0 : (unsigned long)v.iov_base) |
943 (size != v.iov_len ? size : 0))
945 return res;
947 EXPORT_SYMBOL(iov_iter_gap_alignment);
949 static inline size_t __pipe_get_pages(struct iov_iter *i,
950 size_t maxsize,
951 struct page **pages,
952 int idx,
953 size_t *start)
955 struct pipe_inode_info *pipe = i->pipe;
956 ssize_t n = push_pipe(i, maxsize, &idx, start);
957 if (!n)
958 return -EFAULT;
960 maxsize = n;
961 n += *start;
962 while (n > 0) {
963 get_page(*pages++ = pipe->bufs[idx].page);
964 idx = next_idx(idx, pipe);
965 n -= PAGE_SIZE;
968 return maxsize;
971 static ssize_t pipe_get_pages(struct iov_iter *i,
972 struct page **pages, size_t maxsize, unsigned maxpages,
973 size_t *start)
975 unsigned npages;
976 size_t capacity;
977 int idx;
979 if (!maxsize)
980 return 0;
982 if (!sanity(i))
983 return -EFAULT;
985 data_start(i, &idx, start);
986 /* some of this one + all after this one */
987 npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1;
988 capacity = min(npages,maxpages) * PAGE_SIZE - *start;
990 return __pipe_get_pages(i, min(maxsize, capacity), pages, idx, start);
993 ssize_t iov_iter_get_pages(struct iov_iter *i,
994 struct page **pages, size_t maxsize, unsigned maxpages,
995 size_t *start)
997 if (maxsize > i->count)
998 maxsize = i->count;
1000 if (unlikely(i->type & ITER_PIPE))
1001 return pipe_get_pages(i, pages, maxsize, maxpages, start);
1002 iterate_all_kinds(i, maxsize, v, ({
1003 unsigned long addr = (unsigned long)v.iov_base;
1004 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
1005 int n;
1006 int res;
1008 if (len > maxpages * PAGE_SIZE)
1009 len = maxpages * PAGE_SIZE;
1010 addr &= ~(PAGE_SIZE - 1);
1011 n = DIV_ROUND_UP(len, PAGE_SIZE);
1012 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
1013 if (unlikely(res < 0))
1014 return res;
1015 return (res == n ? len : res * PAGE_SIZE) - *start;
1016 0;}),({
1017 /* can't be more than PAGE_SIZE */
1018 *start = v.bv_offset;
1019 get_page(*pages = v.bv_page);
1020 return v.bv_len;
1021 }),({
1022 return -EFAULT;
1025 return 0;
1027 EXPORT_SYMBOL(iov_iter_get_pages);
1029 static struct page **get_pages_array(size_t n)
1031 struct page **p = kmalloc(n * sizeof(struct page *), GFP_KERNEL);
1032 if (!p)
1033 p = vmalloc(n * sizeof(struct page *));
1034 return p;
1037 static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
1038 struct page ***pages, size_t maxsize,
1039 size_t *start)
1041 struct page **p;
1042 size_t n;
1043 int idx;
1044 int npages;
1046 if (!maxsize)
1047 return 0;
1049 if (!sanity(i))
1050 return -EFAULT;
1052 data_start(i, &idx, start);
1053 /* some of this one + all after this one */
1054 npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1;
1055 n = npages * PAGE_SIZE - *start;
1056 if (maxsize > n)
1057 maxsize = n;
1058 else
1059 npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
1060 p = get_pages_array(npages);
1061 if (!p)
1062 return -ENOMEM;
1063 n = __pipe_get_pages(i, maxsize, p, idx, start);
1064 if (n > 0)
1065 *pages = p;
1066 else
1067 kvfree(p);
1068 return n;
1071 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
1072 struct page ***pages, size_t maxsize,
1073 size_t *start)
1075 struct page **p;
1077 if (maxsize > i->count)
1078 maxsize = i->count;
1080 if (unlikely(i->type & ITER_PIPE))
1081 return pipe_get_pages_alloc(i, pages, maxsize, start);
1082 iterate_all_kinds(i, maxsize, v, ({
1083 unsigned long addr = (unsigned long)v.iov_base;
1084 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
1085 int n;
1086 int res;
1088 addr &= ~(PAGE_SIZE - 1);
1089 n = DIV_ROUND_UP(len, PAGE_SIZE);
1090 p = get_pages_array(n);
1091 if (!p)
1092 return -ENOMEM;
1093 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
1094 if (unlikely(res < 0)) {
1095 kvfree(p);
1096 return res;
1098 *pages = p;
1099 return (res == n ? len : res * PAGE_SIZE) - *start;
1100 0;}),({
1101 /* can't be more than PAGE_SIZE */
1102 *start = v.bv_offset;
1103 *pages = p = get_pages_array(1);
1104 if (!p)
1105 return -ENOMEM;
1106 get_page(*p = v.bv_page);
1107 return v.bv_len;
1108 }),({
1109 return -EFAULT;
1112 return 0;
1114 EXPORT_SYMBOL(iov_iter_get_pages_alloc);
1116 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
1117 struct iov_iter *i)
1119 char *to = addr;
1120 __wsum sum, next;
1121 size_t off = 0;
1122 sum = *csum;
1123 if (unlikely(i->type & ITER_PIPE)) {
1124 WARN_ON(1);
1125 return 0;
1127 iterate_and_advance(i, bytes, v, ({
1128 int err = 0;
1129 next = csum_and_copy_from_user(v.iov_base,
1130 (to += v.iov_len) - v.iov_len,
1131 v.iov_len, 0, &err);
1132 if (!err) {
1133 sum = csum_block_add(sum, next, off);
1134 off += v.iov_len;
1136 err ? v.iov_len : 0;
1137 }), ({
1138 char *p = kmap_atomic(v.bv_page);
1139 next = csum_partial_copy_nocheck(p + v.bv_offset,
1140 (to += v.bv_len) - v.bv_len,
1141 v.bv_len, 0);
1142 kunmap_atomic(p);
1143 sum = csum_block_add(sum, next, off);
1144 off += v.bv_len;
1145 }),({
1146 next = csum_partial_copy_nocheck(v.iov_base,
1147 (to += v.iov_len) - v.iov_len,
1148 v.iov_len, 0);
1149 sum = csum_block_add(sum, next, off);
1150 off += v.iov_len;
1153 *csum = sum;
1154 return bytes;
1156 EXPORT_SYMBOL(csum_and_copy_from_iter);
1158 bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
1159 struct iov_iter *i)
1161 char *to = addr;
1162 __wsum sum, next;
1163 size_t off = 0;
1164 sum = *csum;
1165 if (unlikely(i->type & ITER_PIPE)) {
1166 WARN_ON(1);
1167 return false;
1169 if (unlikely(i->count < bytes))
1170 return false;
1171 iterate_all_kinds(i, bytes, v, ({
1172 int err = 0;
1173 next = csum_and_copy_from_user(v.iov_base,
1174 (to += v.iov_len) - v.iov_len,
1175 v.iov_len, 0, &err);
1176 if (err)
1177 return false;
1178 sum = csum_block_add(sum, next, off);
1179 off += v.iov_len;
1181 }), ({
1182 char *p = kmap_atomic(v.bv_page);
1183 next = csum_partial_copy_nocheck(p + v.bv_offset,
1184 (to += v.bv_len) - v.bv_len,
1185 v.bv_len, 0);
1186 kunmap_atomic(p);
1187 sum = csum_block_add(sum, next, off);
1188 off += v.bv_len;
1189 }),({
1190 next = csum_partial_copy_nocheck(v.iov_base,
1191 (to += v.iov_len) - v.iov_len,
1192 v.iov_len, 0);
1193 sum = csum_block_add(sum, next, off);
1194 off += v.iov_len;
1197 *csum = sum;
1198 iov_iter_advance(i, bytes);
1199 return true;
1201 EXPORT_SYMBOL(csum_and_copy_from_iter_full);
1203 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
1204 struct iov_iter *i)
1206 const char *from = addr;
1207 __wsum sum, next;
1208 size_t off = 0;
1209 sum = *csum;
1210 if (unlikely(i->type & ITER_PIPE)) {
1211 WARN_ON(1); /* for now */
1212 return 0;
1214 iterate_and_advance(i, bytes, v, ({
1215 int err = 0;
1216 next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
1217 v.iov_base,
1218 v.iov_len, 0, &err);
1219 if (!err) {
1220 sum = csum_block_add(sum, next, off);
1221 off += v.iov_len;
1223 err ? v.iov_len : 0;
1224 }), ({
1225 char *p = kmap_atomic(v.bv_page);
1226 next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len,
1227 p + v.bv_offset,
1228 v.bv_len, 0);
1229 kunmap_atomic(p);
1230 sum = csum_block_add(sum, next, off);
1231 off += v.bv_len;
1232 }),({
1233 next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len,
1234 v.iov_base,
1235 v.iov_len, 0);
1236 sum = csum_block_add(sum, next, off);
1237 off += v.iov_len;
1240 *csum = sum;
1241 return bytes;
1243 EXPORT_SYMBOL(csum_and_copy_to_iter);
1245 int iov_iter_npages(const struct iov_iter *i, int maxpages)
1247 size_t size = i->count;
1248 int npages = 0;
1250 if (!size)
1251 return 0;
1253 if (unlikely(i->type & ITER_PIPE)) {
1254 struct pipe_inode_info *pipe = i->pipe;
1255 size_t off;
1256 int idx;
1258 if (!sanity(i))
1259 return 0;
1261 data_start(i, &idx, &off);
1262 /* some of this one + all after this one */
1263 npages = ((pipe->curbuf - idx - 1) & (pipe->buffers - 1)) + 1;
1264 if (npages >= maxpages)
1265 return maxpages;
1266 } else iterate_all_kinds(i, size, v, ({
1267 unsigned long p = (unsigned long)v.iov_base;
1268 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1269 - p / PAGE_SIZE;
1270 if (npages >= maxpages)
1271 return maxpages;
1272 0;}),({
1273 npages++;
1274 if (npages >= maxpages)
1275 return maxpages;
1276 }),({
1277 unsigned long p = (unsigned long)v.iov_base;
1278 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1279 - p / PAGE_SIZE;
1280 if (npages >= maxpages)
1281 return maxpages;
1284 return npages;
1286 EXPORT_SYMBOL(iov_iter_npages);
1288 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
1290 *new = *old;
1291 if (unlikely(new->type & ITER_PIPE)) {
1292 WARN_ON(1);
1293 return NULL;
1295 if (new->type & ITER_BVEC)
1296 return new->bvec = kmemdup(new->bvec,
1297 new->nr_segs * sizeof(struct bio_vec),
1298 flags);
1299 else
1300 /* iovec and kvec have identical layout */
1301 return new->iov = kmemdup(new->iov,
1302 new->nr_segs * sizeof(struct iovec),
1303 flags);
1305 EXPORT_SYMBOL(dup_iter);
1308 * import_iovec() - Copy an array of &struct iovec from userspace
1309 * into the kernel, check that it is valid, and initialize a new
1310 * &struct iov_iter iterator to access it.
1312 * @type: One of %READ or %WRITE.
1313 * @uvector: Pointer to the userspace array.
1314 * @nr_segs: Number of elements in userspace array.
1315 * @fast_segs: Number of elements in @iov.
1316 * @iov: (input and output parameter) Pointer to pointer to (usually small
1317 * on-stack) kernel array.
1318 * @i: Pointer to iterator that will be initialized on success.
1320 * If the array pointed to by *@iov is large enough to hold all @nr_segs,
1321 * then this function places %NULL in *@iov on return. Otherwise, a new
1322 * array will be allocated and the result placed in *@iov. This means that
1323 * the caller may call kfree() on *@iov regardless of whether the small
1324 * on-stack array was used or not (and regardless of whether this function
1325 * returns an error or not).
1327 * Return: 0 on success or negative error code on error.
1329 int import_iovec(int type, const struct iovec __user * uvector,
1330 unsigned nr_segs, unsigned fast_segs,
1331 struct iovec **iov, struct iov_iter *i)
1333 ssize_t n;
1334 struct iovec *p;
1335 n = rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
1336 *iov, &p);
1337 if (n < 0) {
1338 if (p != *iov)
1339 kfree(p);
1340 *iov = NULL;
1341 return n;
1343 iov_iter_init(i, type, p, nr_segs, n);
1344 *iov = p == *iov ? NULL : p;
1345 return 0;
1347 EXPORT_SYMBOL(import_iovec);
1349 #ifdef CONFIG_COMPAT
1350 #include <linux/compat.h>
1352 int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
1353 unsigned nr_segs, unsigned fast_segs,
1354 struct iovec **iov, struct iov_iter *i)
1356 ssize_t n;
1357 struct iovec *p;
1358 n = compat_rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
1359 *iov, &p);
1360 if (n < 0) {
1361 if (p != *iov)
1362 kfree(p);
1363 *iov = NULL;
1364 return n;
1366 iov_iter_init(i, type, p, nr_segs, n);
1367 *iov = p == *iov ? NULL : p;
1368 return 0;
1370 #endif
1372 int import_single_range(int rw, void __user *buf, size_t len,
1373 struct iovec *iov, struct iov_iter *i)
1375 if (len > MAX_RW_COUNT)
1376 len = MAX_RW_COUNT;
1377 if (unlikely(!access_ok(!rw, buf, len)))
1378 return -EFAULT;
1380 iov->iov_base = buf;
1381 iov->iov_len = len;
1382 iov_iter_init(i, rw, iov, 1, len);
1383 return 0;
1385 EXPORT_SYMBOL(import_single_range);