1 #include <linux/export.h>
3 #include <linux/pagemap.h>
4 #include <linux/slab.h>
5 #include <linux/vmalloc.h>
7 static size_t copy_page_to_iter_iovec(struct page
*page
, size_t offset
, size_t bytes
,
10 size_t skip
, copy
, left
, wanted
;
11 const struct iovec
*iov
;
15 if (unlikely(bytes
> i
->count
))
24 buf
= iov
->iov_base
+ skip
;
25 copy
= min(bytes
, iov
->iov_len
- skip
);
27 if (!fault_in_pages_writeable(buf
, copy
)) {
28 kaddr
= kmap_atomic(page
);
29 from
= kaddr
+ offset
;
31 /* first chunk, usually the only one */
32 left
= __copy_to_user_inatomic(buf
, from
, copy
);
38 while (unlikely(!left
&& bytes
)) {
41 copy
= min(bytes
, iov
->iov_len
);
42 left
= __copy_to_user_inatomic(buf
, from
, copy
);
52 offset
= from
- kaddr
;
55 copy
= min(bytes
, iov
->iov_len
- skip
);
57 /* Too bad - revert to non-atomic kmap */
59 from
= kaddr
+ offset
;
60 left
= __copy_to_user(buf
, from
, copy
);
65 while (unlikely(!left
&& bytes
)) {
68 copy
= min(bytes
, iov
->iov_len
);
69 left
= __copy_to_user(buf
, from
, copy
);
77 if (skip
== iov
->iov_len
) {
81 i
->count
-= wanted
- bytes
;
82 i
->nr_segs
-= iov
- i
->iov
;
85 return wanted
- bytes
;
88 static size_t copy_page_from_iter_iovec(struct page
*page
, size_t offset
, size_t bytes
,
91 size_t skip
, copy
, left
, wanted
;
92 const struct iovec
*iov
;
96 if (unlikely(bytes
> i
->count
))
104 skip
= i
->iov_offset
;
105 buf
= iov
->iov_base
+ skip
;
106 copy
= min(bytes
, iov
->iov_len
- skip
);
108 if (!fault_in_pages_readable(buf
, copy
)) {
109 kaddr
= kmap_atomic(page
);
112 /* first chunk, usually the only one */
113 left
= __copy_from_user_inatomic(to
, buf
, copy
);
119 while (unlikely(!left
&& bytes
)) {
122 copy
= min(bytes
, iov
->iov_len
);
123 left
= __copy_from_user_inatomic(to
, buf
, copy
);
129 if (likely(!bytes
)) {
130 kunmap_atomic(kaddr
);
135 kunmap_atomic(kaddr
);
136 copy
= min(bytes
, iov
->iov_len
- skip
);
138 /* Too bad - revert to non-atomic kmap */
141 left
= __copy_from_user(to
, buf
, copy
);
146 while (unlikely(!left
&& bytes
)) {
149 copy
= min(bytes
, iov
->iov_len
);
150 left
= __copy_from_user(to
, buf
, copy
);
158 if (skip
== iov
->iov_len
) {
162 i
->count
-= wanted
- bytes
;
163 i
->nr_segs
-= iov
- i
->iov
;
165 i
->iov_offset
= skip
;
166 return wanted
- bytes
;
169 static size_t __iovec_copy_from_user_inatomic(char *vaddr
,
170 const struct iovec
*iov
, size_t base
, size_t bytes
)
172 size_t copied
= 0, left
= 0;
175 char __user
*buf
= iov
->iov_base
+ base
;
176 int copy
= min(bytes
, iov
->iov_len
- base
);
179 left
= __copy_from_user_inatomic(vaddr
, buf
, copy
);
188 return copied
- left
;
192 * Copy as much as we can into the page and return the number of bytes which
193 * were successfully copied. If a fault is encountered then return the number of
194 * bytes which were copied.
196 static size_t copy_from_user_atomic_iovec(struct page
*page
,
197 struct iov_iter
*i
, unsigned long offset
, size_t bytes
)
202 kaddr
= kmap_atomic(page
);
203 if (likely(i
->nr_segs
== 1)) {
205 char __user
*buf
= i
->iov
->iov_base
+ i
->iov_offset
;
206 left
= __copy_from_user_inatomic(kaddr
+ offset
, buf
, bytes
);
207 copied
= bytes
- left
;
209 copied
= __iovec_copy_from_user_inatomic(kaddr
+ offset
,
210 i
->iov
, i
->iov_offset
, bytes
);
212 kunmap_atomic(kaddr
);
217 static void advance_iovec(struct iov_iter
*i
, size_t bytes
)
219 BUG_ON(i
->count
< bytes
);
221 if (likely(i
->nr_segs
== 1)) {
222 i
->iov_offset
+= bytes
;
225 const struct iovec
*iov
= i
->iov
;
226 size_t base
= i
->iov_offset
;
227 unsigned long nr_segs
= i
->nr_segs
;
230 * The !iov->iov_len check ensures we skip over unlikely
231 * zero-length segments (without overruning the iovec).
233 while (bytes
|| unlikely(i
->count
&& !iov
->iov_len
)) {
236 copy
= min(bytes
, iov
->iov_len
- base
);
237 BUG_ON(!i
->count
|| i
->count
< copy
);
241 if (iov
->iov_len
== base
) {
248 i
->iov_offset
= base
;
249 i
->nr_segs
= nr_segs
;
254 * Fault in the first iovec of the given iov_iter, to a maximum length
255 * of bytes. Returns 0 on success, or non-zero if the memory could not be
256 * accessed (ie. because it is an invalid address).
258 * writev-intensive code may want this to prefault several iovecs -- that
259 * would be possible (callers must not rely on the fact that _only_ the
260 * first iovec will be faulted with the current implementation).
262 int iov_iter_fault_in_readable(struct iov_iter
*i
, size_t bytes
)
264 if (!(i
->type
& ITER_BVEC
)) {
265 char __user
*buf
= i
->iov
->iov_base
+ i
->iov_offset
;
266 bytes
= min(bytes
, i
->iov
->iov_len
- i
->iov_offset
);
267 return fault_in_pages_readable(buf
, bytes
);
271 EXPORT_SYMBOL(iov_iter_fault_in_readable
);
273 static unsigned long alignment_iovec(const struct iov_iter
*i
)
275 const struct iovec
*iov
= i
->iov
;
277 size_t size
= i
->count
;
283 res
= (unsigned long)iov
->iov_base
+ i
->iov_offset
;
284 n
= iov
->iov_len
- i
->iov_offset
;
289 while (size
> (++iov
)->iov_len
) {
290 res
|= (unsigned long)iov
->iov_base
| iov
->iov_len
;
291 size
-= iov
->iov_len
;
293 res
|= (unsigned long)iov
->iov_base
| size
;
297 void iov_iter_init(struct iov_iter
*i
, int direction
,
298 const struct iovec
*iov
, unsigned long nr_segs
,
301 /* It will get better. Eventually... */
302 if (segment_eq(get_fs(), KERNEL_DS
))
303 direction
|= ITER_KVEC
;
306 i
->nr_segs
= nr_segs
;
310 EXPORT_SYMBOL(iov_iter_init
);
312 static ssize_t
get_pages_iovec(struct iov_iter
*i
,
313 struct page
**pages
, size_t maxsize
, unsigned maxpages
,
316 size_t offset
= i
->iov_offset
;
317 const struct iovec
*iov
= i
->iov
;
323 len
= iov
->iov_len
- offset
;
328 addr
= (unsigned long)iov
->iov_base
+ offset
;
329 len
+= *start
= addr
& (PAGE_SIZE
- 1);
330 if (len
> maxpages
* PAGE_SIZE
)
331 len
= maxpages
* PAGE_SIZE
;
332 addr
&= ~(PAGE_SIZE
- 1);
333 n
= (len
+ PAGE_SIZE
- 1) / PAGE_SIZE
;
334 res
= get_user_pages_fast(addr
, n
, (i
->type
& WRITE
) != WRITE
, pages
);
335 if (unlikely(res
< 0))
337 return (res
== n
? len
: res
* PAGE_SIZE
) - *start
;
340 static ssize_t
get_pages_alloc_iovec(struct iov_iter
*i
,
341 struct page
***pages
, size_t maxsize
,
344 size_t offset
= i
->iov_offset
;
345 const struct iovec
*iov
= i
->iov
;
352 len
= iov
->iov_len
- offset
;
357 addr
= (unsigned long)iov
->iov_base
+ offset
;
358 len
+= *start
= addr
& (PAGE_SIZE
- 1);
359 addr
&= ~(PAGE_SIZE
- 1);
360 n
= (len
+ PAGE_SIZE
- 1) / PAGE_SIZE
;
362 p
= kmalloc(n
* sizeof(struct page
*), GFP_KERNEL
);
364 p
= vmalloc(n
* sizeof(struct page
*));
368 res
= get_user_pages_fast(addr
, n
, (i
->type
& WRITE
) != WRITE
, p
);
369 if (unlikely(res
< 0)) {
374 return (res
== n
? len
: res
* PAGE_SIZE
) - *start
;
377 static int iov_iter_npages_iovec(const struct iov_iter
*i
, int maxpages
)
379 size_t offset
= i
->iov_offset
;
380 size_t size
= i
->count
;
381 const struct iovec
*iov
= i
->iov
;
385 for (n
= 0; size
&& n
< i
->nr_segs
; n
++, iov
++) {
386 unsigned long addr
= (unsigned long)iov
->iov_base
+ offset
;
387 size_t len
= iov
->iov_len
- offset
;
389 if (unlikely(!len
)) /* empty segment */
393 npages
+= (addr
+ len
+ PAGE_SIZE
- 1) / PAGE_SIZE
395 if (npages
>= maxpages
) /* don't bother going further */
400 return min(npages
, maxpages
);
403 static void memcpy_from_page(char *to
, struct page
*page
, size_t offset
, size_t len
)
405 char *from
= kmap_atomic(page
);
406 memcpy(to
, from
+ offset
, len
);
410 static void memcpy_to_page(struct page
*page
, size_t offset
, char *from
, size_t len
)
412 char *to
= kmap_atomic(page
);
413 memcpy(to
+ offset
, from
, len
);
417 static size_t copy_page_to_iter_bvec(struct page
*page
, size_t offset
, size_t bytes
,
420 size_t skip
, copy
, wanted
;
421 const struct bio_vec
*bvec
;
424 if (unlikely(bytes
> i
->count
))
427 if (unlikely(!bytes
))
432 skip
= i
->iov_offset
;
433 copy
= min_t(size_t, bytes
, bvec
->bv_len
- skip
);
435 kaddr
= kmap_atomic(page
);
436 from
= kaddr
+ offset
;
437 memcpy_to_page(bvec
->bv_page
, skip
+ bvec
->bv_offset
, from
, copy
);
443 copy
= min(bytes
, (size_t)bvec
->bv_len
);
444 memcpy_to_page(bvec
->bv_page
, bvec
->bv_offset
, from
, copy
);
449 kunmap_atomic(kaddr
);
450 if (skip
== bvec
->bv_len
) {
454 i
->count
-= wanted
- bytes
;
455 i
->nr_segs
-= bvec
- i
->bvec
;
457 i
->iov_offset
= skip
;
458 return wanted
- bytes
;
461 static size_t copy_page_from_iter_bvec(struct page
*page
, size_t offset
, size_t bytes
,
464 size_t skip
, copy
, wanted
;
465 const struct bio_vec
*bvec
;
468 if (unlikely(bytes
> i
->count
))
471 if (unlikely(!bytes
))
476 skip
= i
->iov_offset
;
478 kaddr
= kmap_atomic(page
);
482 copy
= min(bytes
, bvec
->bv_len
- skip
);
484 memcpy_from_page(to
, bvec
->bv_page
, bvec
->bv_offset
+ skip
, copy
);
492 copy
= min(bytes
, (size_t)bvec
->bv_len
);
493 memcpy_from_page(to
, bvec
->bv_page
, bvec
->bv_offset
, copy
);
498 kunmap_atomic(kaddr
);
499 if (skip
== bvec
->bv_len
) {
504 i
->nr_segs
-= bvec
- i
->bvec
;
506 i
->iov_offset
= skip
;
510 static size_t copy_from_user_bvec(struct page
*page
,
511 struct iov_iter
*i
, unsigned long offset
, size_t bytes
)
515 const struct bio_vec
*bvec
;
516 size_t base
= i
->iov_offset
;
518 kaddr
= kmap_atomic(page
);
519 for (left
= bytes
, bvec
= i
->bvec
; left
; bvec
++, base
= 0) {
520 size_t copy
= min(left
, bvec
->bv_len
- base
);
523 memcpy_from_page(kaddr
+ offset
, bvec
->bv_page
,
524 bvec
->bv_offset
+ base
, copy
);
528 kunmap_atomic(kaddr
);
532 static void advance_bvec(struct iov_iter
*i
, size_t bytes
)
534 BUG_ON(i
->count
< bytes
);
536 if (likely(i
->nr_segs
== 1)) {
537 i
->iov_offset
+= bytes
;
540 const struct bio_vec
*bvec
= i
->bvec
;
541 size_t base
= i
->iov_offset
;
542 unsigned long nr_segs
= i
->nr_segs
;
545 * The !iov->iov_len check ensures we skip over unlikely
546 * zero-length segments (without overruning the iovec).
548 while (bytes
|| unlikely(i
->count
&& !bvec
->bv_len
)) {
551 copy
= min(bytes
, bvec
->bv_len
- base
);
552 BUG_ON(!i
->count
|| i
->count
< copy
);
556 if (bvec
->bv_len
== base
) {
563 i
->iov_offset
= base
;
564 i
->nr_segs
= nr_segs
;
568 static unsigned long alignment_bvec(const struct iov_iter
*i
)
570 const struct bio_vec
*bvec
= i
->bvec
;
572 size_t size
= i
->count
;
578 res
= bvec
->bv_offset
+ i
->iov_offset
;
579 n
= bvec
->bv_len
- i
->iov_offset
;
584 while (size
> (++bvec
)->bv_len
) {
585 res
|= bvec
->bv_offset
| bvec
->bv_len
;
586 size
-= bvec
->bv_len
;
588 res
|= bvec
->bv_offset
| size
;
592 static ssize_t
get_pages_bvec(struct iov_iter
*i
,
593 struct page
**pages
, size_t maxsize
, unsigned maxpages
,
596 const struct bio_vec
*bvec
= i
->bvec
;
597 size_t len
= bvec
->bv_len
- i
->iov_offset
;
602 /* can't be more than PAGE_SIZE */
603 *start
= bvec
->bv_offset
+ i
->iov_offset
;
605 get_page(*pages
= bvec
->bv_page
);
610 static ssize_t
get_pages_alloc_bvec(struct iov_iter
*i
,
611 struct page
***pages
, size_t maxsize
,
614 const struct bio_vec
*bvec
= i
->bvec
;
615 size_t len
= bvec
->bv_len
- i
->iov_offset
;
620 *start
= bvec
->bv_offset
+ i
->iov_offset
;
622 *pages
= kmalloc(sizeof(struct page
*), GFP_KERNEL
);
626 get_page(**pages
= bvec
->bv_page
);
631 static int iov_iter_npages_bvec(const struct iov_iter
*i
, int maxpages
)
633 size_t offset
= i
->iov_offset
;
634 size_t size
= i
->count
;
635 const struct bio_vec
*bvec
= i
->bvec
;
639 for (n
= 0; size
&& n
< i
->nr_segs
; n
++, bvec
++) {
640 size_t len
= bvec
->bv_len
- offset
;
642 if (unlikely(!len
)) /* empty segment */
647 if (npages
>= maxpages
) /* don't bother going further */
652 return min(npages
, maxpages
);
655 size_t copy_page_to_iter(struct page
*page
, size_t offset
, size_t bytes
,
658 if (i
->type
& ITER_BVEC
)
659 return copy_page_to_iter_bvec(page
, offset
, bytes
, i
);
661 return copy_page_to_iter_iovec(page
, offset
, bytes
, i
);
663 EXPORT_SYMBOL(copy_page_to_iter
);
665 size_t copy_page_from_iter(struct page
*page
, size_t offset
, size_t bytes
,
668 if (i
->type
& ITER_BVEC
)
669 return copy_page_from_iter_bvec(page
, offset
, bytes
, i
);
671 return copy_page_from_iter_iovec(page
, offset
, bytes
, i
);
673 EXPORT_SYMBOL(copy_page_from_iter
);
675 size_t iov_iter_copy_from_user_atomic(struct page
*page
,
676 struct iov_iter
*i
, unsigned long offset
, size_t bytes
)
678 if (i
->type
& ITER_BVEC
)
679 return copy_from_user_bvec(page
, i
, offset
, bytes
);
681 return copy_from_user_atomic_iovec(page
, i
, offset
, bytes
);
683 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic
);
685 void iov_iter_advance(struct iov_iter
*i
, size_t size
)
687 if (i
->type
& ITER_BVEC
)
688 advance_bvec(i
, size
);
690 advance_iovec(i
, size
);
692 EXPORT_SYMBOL(iov_iter_advance
);
695 * Return the count of just the current iov_iter segment.
697 size_t iov_iter_single_seg_count(const struct iov_iter
*i
)
701 else if (i
->type
& ITER_BVEC
)
702 return min(i
->count
, i
->iov
->iov_len
- i
->iov_offset
);
704 return min(i
->count
, i
->bvec
->bv_len
- i
->iov_offset
);
706 EXPORT_SYMBOL(iov_iter_single_seg_count
);
708 unsigned long iov_iter_alignment(const struct iov_iter
*i
)
710 if (i
->type
& ITER_BVEC
)
711 return alignment_bvec(i
);
713 return alignment_iovec(i
);
715 EXPORT_SYMBOL(iov_iter_alignment
);
717 ssize_t
iov_iter_get_pages(struct iov_iter
*i
,
718 struct page
**pages
, size_t maxsize
, unsigned maxpages
,
721 if (i
->type
& ITER_BVEC
)
722 return get_pages_bvec(i
, pages
, maxsize
, maxpages
, start
);
724 return get_pages_iovec(i
, pages
, maxsize
, maxpages
, start
);
726 EXPORT_SYMBOL(iov_iter_get_pages
);
728 ssize_t
iov_iter_get_pages_alloc(struct iov_iter
*i
,
729 struct page
***pages
, size_t maxsize
,
732 if (i
->type
& ITER_BVEC
)
733 return get_pages_alloc_bvec(i
, pages
, maxsize
, start
);
735 return get_pages_alloc_iovec(i
, pages
, maxsize
, start
);
737 EXPORT_SYMBOL(iov_iter_get_pages_alloc
);
739 int iov_iter_npages(const struct iov_iter
*i
, int maxpages
)
741 if (i
->type
& ITER_BVEC
)
742 return iov_iter_npages_bvec(i
, maxpages
);
744 return iov_iter_npages_iovec(i
, maxpages
);
746 EXPORT_SYMBOL(iov_iter_npages
);