drm/connector: hdmi: Fix memory leak in drm_display_mode_from_cea_vic()
[drm/drm-misc.git] / lib / iov_iter.c
blob1abb32c0da50bc5c8cc1d72c7bb2c00f63289b9f
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/export.h>
3 #include <linux/bvec.h>
4 #include <linux/fault-inject-usercopy.h>
5 #include <linux/uio.h>
6 #include <linux/pagemap.h>
7 #include <linux/highmem.h>
8 #include <linux/slab.h>
9 #include <linux/vmalloc.h>
10 #include <linux/splice.h>
11 #include <linux/compat.h>
12 #include <linux/scatterlist.h>
13 #include <linux/instrumented.h>
14 #include <linux/iov_iter.h>
16 static __always_inline
17 size_t copy_to_user_iter(void __user *iter_to, size_t progress,
18 size_t len, void *from, void *priv2)
20 if (should_fail_usercopy())
21 return len;
22 if (access_ok(iter_to, len)) {
23 from += progress;
24 instrument_copy_to_user(iter_to, from, len);
25 len = raw_copy_to_user(iter_to, from, len);
27 return len;
30 static __always_inline
31 size_t copy_to_user_iter_nofault(void __user *iter_to, size_t progress,
32 size_t len, void *from, void *priv2)
34 ssize_t res;
36 if (should_fail_usercopy())
37 return len;
39 from += progress;
40 res = copy_to_user_nofault(iter_to, from, len);
41 return res < 0 ? len : res;
44 static __always_inline
45 size_t copy_from_user_iter(void __user *iter_from, size_t progress,
46 size_t len, void *to, void *priv2)
48 size_t res = len;
50 if (should_fail_usercopy())
51 return len;
52 if (access_ok(iter_from, len)) {
53 to += progress;
54 instrument_copy_from_user_before(to, iter_from, len);
55 res = raw_copy_from_user(to, iter_from, len);
56 instrument_copy_from_user_after(to, iter_from, len, res);
58 return res;
61 static __always_inline
62 size_t memcpy_to_iter(void *iter_to, size_t progress,
63 size_t len, void *from, void *priv2)
65 memcpy(iter_to, from + progress, len);
66 return 0;
69 static __always_inline
70 size_t memcpy_from_iter(void *iter_from, size_t progress,
71 size_t len, void *to, void *priv2)
73 memcpy(to + progress, iter_from, len);
74 return 0;
78 * fault_in_iov_iter_readable - fault in iov iterator for reading
79 * @i: iterator
80 * @size: maximum length
82 * Fault in one or more iovecs of the given iov_iter, to a maximum length of
83 * @size. For each iovec, fault in each page that constitutes the iovec.
85 * Returns the number of bytes not faulted in (like copy_to_user() and
86 * copy_from_user()).
88 * Always returns 0 for non-userspace iterators.
90 size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t size)
92 if (iter_is_ubuf(i)) {
93 size_t n = min(size, iov_iter_count(i));
94 n -= fault_in_readable(i->ubuf + i->iov_offset, n);
95 return size - n;
96 } else if (iter_is_iovec(i)) {
97 size_t count = min(size, iov_iter_count(i));
98 const struct iovec *p;
99 size_t skip;
101 size -= count;
102 for (p = iter_iov(i), skip = i->iov_offset; count; p++, skip = 0) {
103 size_t len = min(count, p->iov_len - skip);
104 size_t ret;
106 if (unlikely(!len))
107 continue;
108 ret = fault_in_readable(p->iov_base + skip, len);
109 count -= len - ret;
110 if (ret)
111 break;
113 return count + size;
115 return 0;
117 EXPORT_SYMBOL(fault_in_iov_iter_readable);
120 * fault_in_iov_iter_writeable - fault in iov iterator for writing
121 * @i: iterator
122 * @size: maximum length
124 * Faults in the iterator using get_user_pages(), i.e., without triggering
125 * hardware page faults. This is primarily useful when we already know that
126 * some or all of the pages in @i aren't in memory.
128 * Returns the number of bytes not faulted in, like copy_to_user() and
129 * copy_from_user().
131 * Always returns 0 for non-user-space iterators.
133 size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t size)
135 if (iter_is_ubuf(i)) {
136 size_t n = min(size, iov_iter_count(i));
137 n -= fault_in_safe_writeable(i->ubuf + i->iov_offset, n);
138 return size - n;
139 } else if (iter_is_iovec(i)) {
140 size_t count = min(size, iov_iter_count(i));
141 const struct iovec *p;
142 size_t skip;
144 size -= count;
145 for (p = iter_iov(i), skip = i->iov_offset; count; p++, skip = 0) {
146 size_t len = min(count, p->iov_len - skip);
147 size_t ret;
149 if (unlikely(!len))
150 continue;
151 ret = fault_in_safe_writeable(p->iov_base + skip, len);
152 count -= len - ret;
153 if (ret)
154 break;
156 return count + size;
158 return 0;
160 EXPORT_SYMBOL(fault_in_iov_iter_writeable);
162 void iov_iter_init(struct iov_iter *i, unsigned int direction,
163 const struct iovec *iov, unsigned long nr_segs,
164 size_t count)
166 WARN_ON(direction & ~(READ | WRITE));
167 *i = (struct iov_iter) {
168 .iter_type = ITER_IOVEC,
169 .nofault = false,
170 .data_source = direction,
171 .__iov = iov,
172 .nr_segs = nr_segs,
173 .iov_offset = 0,
174 .count = count
177 EXPORT_SYMBOL(iov_iter_init);
179 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
181 if (WARN_ON_ONCE(i->data_source))
182 return 0;
183 if (user_backed_iter(i))
184 might_fault();
185 return iterate_and_advance(i, bytes, (void *)addr,
186 copy_to_user_iter, memcpy_to_iter);
188 EXPORT_SYMBOL(_copy_to_iter);
190 #ifdef CONFIG_ARCH_HAS_COPY_MC
191 static __always_inline
192 size_t copy_to_user_iter_mc(void __user *iter_to, size_t progress,
193 size_t len, void *from, void *priv2)
195 if (access_ok(iter_to, len)) {
196 from += progress;
197 instrument_copy_to_user(iter_to, from, len);
198 len = copy_mc_to_user(iter_to, from, len);
200 return len;
203 static __always_inline
204 size_t memcpy_to_iter_mc(void *iter_to, size_t progress,
205 size_t len, void *from, void *priv2)
207 return copy_mc_to_kernel(iter_to, from + progress, len);
211 * _copy_mc_to_iter - copy to iter with source memory error exception handling
212 * @addr: source kernel address
213 * @bytes: total transfer length
214 * @i: destination iterator
216 * The pmem driver deploys this for the dax operation
217 * (dax_copy_to_iter()) for dax reads (bypass page-cache and the
218 * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes
219 * successfully copied.
221 * The main differences between this and typical _copy_to_iter().
223 * * Typical tail/residue handling after a fault retries the copy
224 * byte-by-byte until the fault happens again. Re-triggering machine
225 * checks is potentially fatal so the implementation uses source
226 * alignment and poison alignment assumptions to avoid re-triggering
227 * hardware exceptions.
229 * * ITER_KVEC and ITER_BVEC can return short copies. Compare to
230 * copy_to_iter() where only ITER_IOVEC attempts might return a short copy.
232 * Return: number of bytes copied (may be %0)
234 size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
236 if (WARN_ON_ONCE(i->data_source))
237 return 0;
238 if (user_backed_iter(i))
239 might_fault();
240 return iterate_and_advance(i, bytes, (void *)addr,
241 copy_to_user_iter_mc, memcpy_to_iter_mc);
243 EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
244 #endif /* CONFIG_ARCH_HAS_COPY_MC */
246 static __always_inline
247 size_t __copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
249 return iterate_and_advance(i, bytes, addr,
250 copy_from_user_iter, memcpy_from_iter);
253 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
255 if (WARN_ON_ONCE(!i->data_source))
256 return 0;
258 if (user_backed_iter(i))
259 might_fault();
260 return __copy_from_iter(addr, bytes, i);
262 EXPORT_SYMBOL(_copy_from_iter);
264 static __always_inline
265 size_t copy_from_user_iter_nocache(void __user *iter_from, size_t progress,
266 size_t len, void *to, void *priv2)
268 return __copy_from_user_inatomic_nocache(to + progress, iter_from, len);
271 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
273 if (WARN_ON_ONCE(!i->data_source))
274 return 0;
276 return iterate_and_advance(i, bytes, addr,
277 copy_from_user_iter_nocache,
278 memcpy_from_iter);
280 EXPORT_SYMBOL(_copy_from_iter_nocache);
282 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
283 static __always_inline
284 size_t copy_from_user_iter_flushcache(void __user *iter_from, size_t progress,
285 size_t len, void *to, void *priv2)
287 return __copy_from_user_flushcache(to + progress, iter_from, len);
290 static __always_inline
291 size_t memcpy_from_iter_flushcache(void *iter_from, size_t progress,
292 size_t len, void *to, void *priv2)
294 memcpy_flushcache(to + progress, iter_from, len);
295 return 0;
299 * _copy_from_iter_flushcache - write destination through cpu cache
300 * @addr: destination kernel address
301 * @bytes: total transfer length
302 * @i: source iterator
304 * The pmem driver arranges for filesystem-dax to use this facility via
305 * dax_copy_from_iter() for ensuring that writes to persistent memory
306 * are flushed through the CPU cache. It is differentiated from
307 * _copy_from_iter_nocache() in that guarantees all data is flushed for
308 * all iterator types. The _copy_from_iter_nocache() only attempts to
309 * bypass the cache for the ITER_IOVEC case, and on some archs may use
310 * instructions that strand dirty-data in the cache.
312 * Return: number of bytes copied (may be %0)
314 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
316 if (WARN_ON_ONCE(!i->data_source))
317 return 0;
319 return iterate_and_advance(i, bytes, addr,
320 copy_from_user_iter_flushcache,
321 memcpy_from_iter_flushcache);
323 EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
324 #endif
326 static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
328 struct page *head;
329 size_t v = n + offset;
332 * The general case needs to access the page order in order
333 * to compute the page size.
334 * However, we mostly deal with order-0 pages and thus can
335 * avoid a possible cache line miss for requests that fit all
336 * page orders.
338 if (n <= v && v <= PAGE_SIZE)
339 return true;
341 head = compound_head(page);
342 v += (page - head) << PAGE_SHIFT;
344 if (WARN_ON(n > v || v > page_size(head)))
345 return false;
346 return true;
349 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
350 struct iov_iter *i)
352 size_t res = 0;
353 if (!page_copy_sane(page, offset, bytes))
354 return 0;
355 if (WARN_ON_ONCE(i->data_source))
356 return 0;
357 page += offset / PAGE_SIZE; // first subpage
358 offset %= PAGE_SIZE;
359 while (1) {
360 void *kaddr = kmap_local_page(page);
361 size_t n = min(bytes, (size_t)PAGE_SIZE - offset);
362 n = _copy_to_iter(kaddr + offset, n, i);
363 kunmap_local(kaddr);
364 res += n;
365 bytes -= n;
366 if (!bytes || !n)
367 break;
368 offset += n;
369 if (offset == PAGE_SIZE) {
370 page++;
371 offset = 0;
374 return res;
376 EXPORT_SYMBOL(copy_page_to_iter);
378 size_t copy_page_to_iter_nofault(struct page *page, unsigned offset, size_t bytes,
379 struct iov_iter *i)
381 size_t res = 0;
383 if (!page_copy_sane(page, offset, bytes))
384 return 0;
385 if (WARN_ON_ONCE(i->data_source))
386 return 0;
387 page += offset / PAGE_SIZE; // first subpage
388 offset %= PAGE_SIZE;
389 while (1) {
390 void *kaddr = kmap_local_page(page);
391 size_t n = min(bytes, (size_t)PAGE_SIZE - offset);
393 n = iterate_and_advance(i, n, kaddr + offset,
394 copy_to_user_iter_nofault,
395 memcpy_to_iter);
396 kunmap_local(kaddr);
397 res += n;
398 bytes -= n;
399 if (!bytes || !n)
400 break;
401 offset += n;
402 if (offset == PAGE_SIZE) {
403 page++;
404 offset = 0;
407 return res;
409 EXPORT_SYMBOL(copy_page_to_iter_nofault);
411 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
412 struct iov_iter *i)
414 size_t res = 0;
415 if (!page_copy_sane(page, offset, bytes))
416 return 0;
417 page += offset / PAGE_SIZE; // first subpage
418 offset %= PAGE_SIZE;
419 while (1) {
420 void *kaddr = kmap_local_page(page);
421 size_t n = min(bytes, (size_t)PAGE_SIZE - offset);
422 n = _copy_from_iter(kaddr + offset, n, i);
423 kunmap_local(kaddr);
424 res += n;
425 bytes -= n;
426 if (!bytes || !n)
427 break;
428 offset += n;
429 if (offset == PAGE_SIZE) {
430 page++;
431 offset = 0;
434 return res;
436 EXPORT_SYMBOL(copy_page_from_iter);
438 static __always_inline
439 size_t zero_to_user_iter(void __user *iter_to, size_t progress,
440 size_t len, void *priv, void *priv2)
442 return clear_user(iter_to, len);
445 static __always_inline
446 size_t zero_to_iter(void *iter_to, size_t progress,
447 size_t len, void *priv, void *priv2)
449 memset(iter_to, 0, len);
450 return 0;
453 size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
455 return iterate_and_advance(i, bytes, NULL,
456 zero_to_user_iter, zero_to_iter);
458 EXPORT_SYMBOL(iov_iter_zero);
460 size_t copy_page_from_iter_atomic(struct page *page, size_t offset,
461 size_t bytes, struct iov_iter *i)
463 size_t n, copied = 0;
465 if (!page_copy_sane(page, offset, bytes))
466 return 0;
467 if (WARN_ON_ONCE(!i->data_source))
468 return 0;
470 do {
471 char *p;
473 n = bytes - copied;
474 if (PageHighMem(page)) {
475 page += offset / PAGE_SIZE;
476 offset %= PAGE_SIZE;
477 n = min_t(size_t, n, PAGE_SIZE - offset);
480 p = kmap_atomic(page) + offset;
481 n = __copy_from_iter(p, n, i);
482 kunmap_atomic(p);
483 copied += n;
484 offset += n;
485 } while (PageHighMem(page) && copied != bytes && n > 0);
487 return copied;
489 EXPORT_SYMBOL(copy_page_from_iter_atomic);
491 static void iov_iter_bvec_advance(struct iov_iter *i, size_t size)
493 const struct bio_vec *bvec, *end;
495 if (!i->count)
496 return;
497 i->count -= size;
499 size += i->iov_offset;
501 for (bvec = i->bvec, end = bvec + i->nr_segs; bvec < end; bvec++) {
502 if (likely(size < bvec->bv_len))
503 break;
504 size -= bvec->bv_len;
506 i->iov_offset = size;
507 i->nr_segs -= bvec - i->bvec;
508 i->bvec = bvec;
511 static void iov_iter_iovec_advance(struct iov_iter *i, size_t size)
513 const struct iovec *iov, *end;
515 if (!i->count)
516 return;
517 i->count -= size;
519 size += i->iov_offset; // from beginning of current segment
520 for (iov = iter_iov(i), end = iov + i->nr_segs; iov < end; iov++) {
521 if (likely(size < iov->iov_len))
522 break;
523 size -= iov->iov_len;
525 i->iov_offset = size;
526 i->nr_segs -= iov - iter_iov(i);
527 i->__iov = iov;
530 static void iov_iter_folioq_advance(struct iov_iter *i, size_t size)
532 const struct folio_queue *folioq = i->folioq;
533 unsigned int slot = i->folioq_slot;
535 if (!i->count)
536 return;
537 i->count -= size;
539 if (slot >= folioq_nr_slots(folioq)) {
540 folioq = folioq->next;
541 slot = 0;
544 size += i->iov_offset; /* From beginning of current segment. */
545 do {
546 size_t fsize = folioq_folio_size(folioq, slot);
548 if (likely(size < fsize))
549 break;
550 size -= fsize;
551 slot++;
552 if (slot >= folioq_nr_slots(folioq) && folioq->next) {
553 folioq = folioq->next;
554 slot = 0;
556 } while (size);
558 i->iov_offset = size;
559 i->folioq_slot = slot;
560 i->folioq = folioq;
563 void iov_iter_advance(struct iov_iter *i, size_t size)
565 if (unlikely(i->count < size))
566 size = i->count;
567 if (likely(iter_is_ubuf(i)) || unlikely(iov_iter_is_xarray(i))) {
568 i->iov_offset += size;
569 i->count -= size;
570 } else if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) {
571 /* iovec and kvec have identical layouts */
572 iov_iter_iovec_advance(i, size);
573 } else if (iov_iter_is_bvec(i)) {
574 iov_iter_bvec_advance(i, size);
575 } else if (iov_iter_is_folioq(i)) {
576 iov_iter_folioq_advance(i, size);
577 } else if (iov_iter_is_discard(i)) {
578 i->count -= size;
581 EXPORT_SYMBOL(iov_iter_advance);
583 static void iov_iter_folioq_revert(struct iov_iter *i, size_t unroll)
585 const struct folio_queue *folioq = i->folioq;
586 unsigned int slot = i->folioq_slot;
588 for (;;) {
589 size_t fsize;
591 if (slot == 0) {
592 folioq = folioq->prev;
593 slot = folioq_nr_slots(folioq);
595 slot--;
597 fsize = folioq_folio_size(folioq, slot);
598 if (unroll <= fsize) {
599 i->iov_offset = fsize - unroll;
600 break;
602 unroll -= fsize;
605 i->folioq_slot = slot;
606 i->folioq = folioq;
609 void iov_iter_revert(struct iov_iter *i, size_t unroll)
611 if (!unroll)
612 return;
613 if (WARN_ON(unroll > MAX_RW_COUNT))
614 return;
615 i->count += unroll;
616 if (unlikely(iov_iter_is_discard(i)))
617 return;
618 if (unroll <= i->iov_offset) {
619 i->iov_offset -= unroll;
620 return;
622 unroll -= i->iov_offset;
623 if (iov_iter_is_xarray(i) || iter_is_ubuf(i)) {
624 BUG(); /* We should never go beyond the start of the specified
625 * range since we might then be straying into pages that
626 * aren't pinned.
628 } else if (iov_iter_is_bvec(i)) {
629 const struct bio_vec *bvec = i->bvec;
630 while (1) {
631 size_t n = (--bvec)->bv_len;
632 i->nr_segs++;
633 if (unroll <= n) {
634 i->bvec = bvec;
635 i->iov_offset = n - unroll;
636 return;
638 unroll -= n;
640 } else if (iov_iter_is_folioq(i)) {
641 i->iov_offset = 0;
642 iov_iter_folioq_revert(i, unroll);
643 } else { /* same logics for iovec and kvec */
644 const struct iovec *iov = iter_iov(i);
645 while (1) {
646 size_t n = (--iov)->iov_len;
647 i->nr_segs++;
648 if (unroll <= n) {
649 i->__iov = iov;
650 i->iov_offset = n - unroll;
651 return;
653 unroll -= n;
657 EXPORT_SYMBOL(iov_iter_revert);
660 * Return the count of just the current iov_iter segment.
662 size_t iov_iter_single_seg_count(const struct iov_iter *i)
664 if (i->nr_segs > 1) {
665 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
666 return min(i->count, iter_iov(i)->iov_len - i->iov_offset);
667 if (iov_iter_is_bvec(i))
668 return min(i->count, i->bvec->bv_len - i->iov_offset);
670 if (unlikely(iov_iter_is_folioq(i)))
671 return !i->count ? 0 :
672 umin(folioq_folio_size(i->folioq, i->folioq_slot), i->count);
673 return i->count;
675 EXPORT_SYMBOL(iov_iter_single_seg_count);
677 void iov_iter_kvec(struct iov_iter *i, unsigned int direction,
678 const struct kvec *kvec, unsigned long nr_segs,
679 size_t count)
681 WARN_ON(direction & ~(READ | WRITE));
682 *i = (struct iov_iter){
683 .iter_type = ITER_KVEC,
684 .data_source = direction,
685 .kvec = kvec,
686 .nr_segs = nr_segs,
687 .iov_offset = 0,
688 .count = count
691 EXPORT_SYMBOL(iov_iter_kvec);
693 void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
694 const struct bio_vec *bvec, unsigned long nr_segs,
695 size_t count)
697 WARN_ON(direction & ~(READ | WRITE));
698 *i = (struct iov_iter){
699 .iter_type = ITER_BVEC,
700 .data_source = direction,
701 .bvec = bvec,
702 .nr_segs = nr_segs,
703 .iov_offset = 0,
704 .count = count
707 EXPORT_SYMBOL(iov_iter_bvec);
710 * iov_iter_folio_queue - Initialise an I/O iterator to use the folios in a folio queue
711 * @i: The iterator to initialise.
712 * @direction: The direction of the transfer.
713 * @folioq: The starting point in the folio queue.
714 * @first_slot: The first slot in the folio queue to use
715 * @offset: The offset into the folio in the first slot to start at
716 * @count: The size of the I/O buffer in bytes.
718 * Set up an I/O iterator to either draw data out of the pages attached to an
719 * inode or to inject data into those pages. The pages *must* be prevented
720 * from evaporation, either by taking a ref on them or locking them by the
721 * caller.
723 void iov_iter_folio_queue(struct iov_iter *i, unsigned int direction,
724 const struct folio_queue *folioq, unsigned int first_slot,
725 unsigned int offset, size_t count)
727 BUG_ON(direction & ~1);
728 *i = (struct iov_iter) {
729 .iter_type = ITER_FOLIOQ,
730 .data_source = direction,
731 .folioq = folioq,
732 .folioq_slot = first_slot,
733 .count = count,
734 .iov_offset = offset,
737 EXPORT_SYMBOL(iov_iter_folio_queue);
740 * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray
741 * @i: The iterator to initialise.
742 * @direction: The direction of the transfer.
743 * @xarray: The xarray to access.
744 * @start: The start file position.
745 * @count: The size of the I/O buffer in bytes.
747 * Set up an I/O iterator to either draw data out of the pages attached to an
748 * inode or to inject data into those pages. The pages *must* be prevented
749 * from evaporation, either by taking a ref on them or locking them by the
750 * caller.
752 void iov_iter_xarray(struct iov_iter *i, unsigned int direction,
753 struct xarray *xarray, loff_t start, size_t count)
755 BUG_ON(direction & ~1);
756 *i = (struct iov_iter) {
757 .iter_type = ITER_XARRAY,
758 .data_source = direction,
759 .xarray = xarray,
760 .xarray_start = start,
761 .count = count,
762 .iov_offset = 0
765 EXPORT_SYMBOL(iov_iter_xarray);
768 * iov_iter_discard - Initialise an I/O iterator that discards data
769 * @i: The iterator to initialise.
770 * @direction: The direction of the transfer.
771 * @count: The size of the I/O buffer in bytes.
773 * Set up an I/O iterator that just discards everything that's written to it.
774 * It's only available as a READ iterator.
776 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
778 BUG_ON(direction != READ);
779 *i = (struct iov_iter){
780 .iter_type = ITER_DISCARD,
781 .data_source = false,
782 .count = count,
783 .iov_offset = 0
786 EXPORT_SYMBOL(iov_iter_discard);
788 static bool iov_iter_aligned_iovec(const struct iov_iter *i, unsigned addr_mask,
789 unsigned len_mask)
791 const struct iovec *iov = iter_iov(i);
792 size_t size = i->count;
793 size_t skip = i->iov_offset;
795 do {
796 size_t len = iov->iov_len - skip;
798 if (len > size)
799 len = size;
800 if (len & len_mask)
801 return false;
802 if ((unsigned long)(iov->iov_base + skip) & addr_mask)
803 return false;
805 iov++;
806 size -= len;
807 skip = 0;
808 } while (size);
810 return true;
813 static bool iov_iter_aligned_bvec(const struct iov_iter *i, unsigned addr_mask,
814 unsigned len_mask)
816 const struct bio_vec *bvec = i->bvec;
817 unsigned skip = i->iov_offset;
818 size_t size = i->count;
820 do {
821 size_t len = bvec->bv_len;
823 if (len > size)
824 len = size;
825 if (len & len_mask)
826 return false;
827 if ((unsigned long)(bvec->bv_offset + skip) & addr_mask)
828 return false;
830 bvec++;
831 size -= len;
832 skip = 0;
833 } while (size);
835 return true;
839 * iov_iter_is_aligned() - Check if the addresses and lengths of each segments
840 * are aligned to the parameters.
842 * @i: &struct iov_iter to restore
843 * @addr_mask: bit mask to check against the iov element's addresses
844 * @len_mask: bit mask to check against the iov element's lengths
846 * Return: false if any addresses or lengths intersect with the provided masks
848 bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask,
849 unsigned len_mask)
851 if (likely(iter_is_ubuf(i))) {
852 if (i->count & len_mask)
853 return false;
854 if ((unsigned long)(i->ubuf + i->iov_offset) & addr_mask)
855 return false;
856 return true;
859 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
860 return iov_iter_aligned_iovec(i, addr_mask, len_mask);
862 if (iov_iter_is_bvec(i))
863 return iov_iter_aligned_bvec(i, addr_mask, len_mask);
865 /* With both xarray and folioq types, we're dealing with whole folios. */
866 if (iov_iter_is_xarray(i)) {
867 if (i->count & len_mask)
868 return false;
869 if ((i->xarray_start + i->iov_offset) & addr_mask)
870 return false;
872 if (iov_iter_is_folioq(i)) {
873 if (i->count & len_mask)
874 return false;
875 if (i->iov_offset & addr_mask)
876 return false;
879 return true;
881 EXPORT_SYMBOL_GPL(iov_iter_is_aligned);
883 static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i)
885 const struct iovec *iov = iter_iov(i);
886 unsigned long res = 0;
887 size_t size = i->count;
888 size_t skip = i->iov_offset;
890 do {
891 size_t len = iov->iov_len - skip;
892 if (len) {
893 res |= (unsigned long)iov->iov_base + skip;
894 if (len > size)
895 len = size;
896 res |= len;
897 size -= len;
899 iov++;
900 skip = 0;
901 } while (size);
902 return res;
905 static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i)
907 const struct bio_vec *bvec = i->bvec;
908 unsigned res = 0;
909 size_t size = i->count;
910 unsigned skip = i->iov_offset;
912 do {
913 size_t len = bvec->bv_len - skip;
914 res |= (unsigned long)bvec->bv_offset + skip;
915 if (len > size)
916 len = size;
917 res |= len;
918 bvec++;
919 size -= len;
920 skip = 0;
921 } while (size);
923 return res;
926 unsigned long iov_iter_alignment(const struct iov_iter *i)
928 if (likely(iter_is_ubuf(i))) {
929 size_t size = i->count;
930 if (size)
931 return ((unsigned long)i->ubuf + i->iov_offset) | size;
932 return 0;
935 /* iovec and kvec have identical layouts */
936 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
937 return iov_iter_alignment_iovec(i);
939 if (iov_iter_is_bvec(i))
940 return iov_iter_alignment_bvec(i);
942 /* With both xarray and folioq types, we're dealing with whole folios. */
943 if (iov_iter_is_folioq(i))
944 return i->iov_offset | i->count;
945 if (iov_iter_is_xarray(i))
946 return (i->xarray_start + i->iov_offset) | i->count;
948 return 0;
950 EXPORT_SYMBOL(iov_iter_alignment);
952 unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
954 unsigned long res = 0;
955 unsigned long v = 0;
956 size_t size = i->count;
957 unsigned k;
959 if (iter_is_ubuf(i))
960 return 0;
962 if (WARN_ON(!iter_is_iovec(i)))
963 return ~0U;
965 for (k = 0; k < i->nr_segs; k++) {
966 const struct iovec *iov = iter_iov(i) + k;
967 if (iov->iov_len) {
968 unsigned long base = (unsigned long)iov->iov_base;
969 if (v) // if not the first one
970 res |= base | v; // this start | previous end
971 v = base + iov->iov_len;
972 if (size <= iov->iov_len)
973 break;
974 size -= iov->iov_len;
977 return res;
979 EXPORT_SYMBOL(iov_iter_gap_alignment);
981 static int want_pages_array(struct page ***res, size_t size,
982 size_t start, unsigned int maxpages)
984 unsigned int count = DIV_ROUND_UP(size + start, PAGE_SIZE);
986 if (count > maxpages)
987 count = maxpages;
988 WARN_ON(!count); // caller should've prevented that
989 if (!*res) {
990 *res = kvmalloc_array(count, sizeof(struct page *), GFP_KERNEL);
991 if (!*res)
992 return 0;
994 return count;
997 static ssize_t iter_folioq_get_pages(struct iov_iter *iter,
998 struct page ***ppages, size_t maxsize,
999 unsigned maxpages, size_t *_start_offset)
1001 const struct folio_queue *folioq = iter->folioq;
1002 struct page **pages;
1003 unsigned int slot = iter->folioq_slot;
1004 size_t extracted = 0, count = iter->count, iov_offset = iter->iov_offset;
1006 if (slot >= folioq_nr_slots(folioq)) {
1007 folioq = folioq->next;
1008 slot = 0;
1009 if (WARN_ON(iov_offset != 0))
1010 return -EIO;
1013 maxpages = want_pages_array(ppages, maxsize, iov_offset & ~PAGE_MASK, maxpages);
1014 if (!maxpages)
1015 return -ENOMEM;
1016 *_start_offset = iov_offset & ~PAGE_MASK;
1017 pages = *ppages;
1019 for (;;) {
1020 struct folio *folio = folioq_folio(folioq, slot);
1021 size_t offset = iov_offset, fsize = folioq_folio_size(folioq, slot);
1022 size_t part = PAGE_SIZE - offset % PAGE_SIZE;
1024 part = umin(part, umin(maxsize - extracted, fsize - offset));
1025 count -= part;
1026 iov_offset += part;
1027 extracted += part;
1029 *pages = folio_page(folio, offset / PAGE_SIZE);
1030 get_page(*pages);
1031 pages++;
1032 maxpages--;
1033 if (maxpages == 0 || extracted >= maxsize)
1034 break;
1036 if (iov_offset >= fsize) {
1037 iov_offset = 0;
1038 slot++;
1039 if (slot == folioq_nr_slots(folioq) && folioq->next) {
1040 folioq = folioq->next;
1041 slot = 0;
1046 iter->count = count;
1047 iter->iov_offset = iov_offset;
1048 iter->folioq = folioq;
1049 iter->folioq_slot = slot;
1050 return extracted;
1053 static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa,
1054 pgoff_t index, unsigned int nr_pages)
1056 XA_STATE(xas, xa, index);
1057 struct page *page;
1058 unsigned int ret = 0;
1060 rcu_read_lock();
1061 for (page = xas_load(&xas); page; page = xas_next(&xas)) {
1062 if (xas_retry(&xas, page))
1063 continue;
1065 /* Has the page moved or been split? */
1066 if (unlikely(page != xas_reload(&xas))) {
1067 xas_reset(&xas);
1068 continue;
1071 pages[ret] = find_subpage(page, xas.xa_index);
1072 get_page(pages[ret]);
1073 if (++ret == nr_pages)
1074 break;
1076 rcu_read_unlock();
1077 return ret;
1080 static ssize_t iter_xarray_get_pages(struct iov_iter *i,
1081 struct page ***pages, size_t maxsize,
1082 unsigned maxpages, size_t *_start_offset)
1084 unsigned nr, offset, count;
1085 pgoff_t index;
1086 loff_t pos;
1088 pos = i->xarray_start + i->iov_offset;
1089 index = pos >> PAGE_SHIFT;
1090 offset = pos & ~PAGE_MASK;
1091 *_start_offset = offset;
1093 count = want_pages_array(pages, maxsize, offset, maxpages);
1094 if (!count)
1095 return -ENOMEM;
1096 nr = iter_xarray_populate_pages(*pages, i->xarray, index, count);
1097 if (nr == 0)
1098 return 0;
1100 maxsize = min_t(size_t, nr * PAGE_SIZE - offset, maxsize);
1101 i->iov_offset += maxsize;
1102 i->count -= maxsize;
1103 return maxsize;
1106 /* must be done on non-empty ITER_UBUF or ITER_IOVEC one */
1107 static unsigned long first_iovec_segment(const struct iov_iter *i, size_t *size)
1109 size_t skip;
1110 long k;
1112 if (iter_is_ubuf(i))
1113 return (unsigned long)i->ubuf + i->iov_offset;
1115 for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) {
1116 const struct iovec *iov = iter_iov(i) + k;
1117 size_t len = iov->iov_len - skip;
1119 if (unlikely(!len))
1120 continue;
1121 if (*size > len)
1122 *size = len;
1123 return (unsigned long)iov->iov_base + skip;
1125 BUG(); // if it had been empty, we wouldn't get called
1128 /* must be done on non-empty ITER_BVEC one */
1129 static struct page *first_bvec_segment(const struct iov_iter *i,
1130 size_t *size, size_t *start)
1132 struct page *page;
1133 size_t skip = i->iov_offset, len;
1135 len = i->bvec->bv_len - skip;
1136 if (*size > len)
1137 *size = len;
1138 skip += i->bvec->bv_offset;
1139 page = i->bvec->bv_page + skip / PAGE_SIZE;
1140 *start = skip % PAGE_SIZE;
1141 return page;
1144 static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i,
1145 struct page ***pages, size_t maxsize,
1146 unsigned int maxpages, size_t *start)
1148 unsigned int n, gup_flags = 0;
1150 if (maxsize > i->count)
1151 maxsize = i->count;
1152 if (!maxsize)
1153 return 0;
1154 if (maxsize > MAX_RW_COUNT)
1155 maxsize = MAX_RW_COUNT;
1157 if (likely(user_backed_iter(i))) {
1158 unsigned long addr;
1159 int res;
1161 if (iov_iter_rw(i) != WRITE)
1162 gup_flags |= FOLL_WRITE;
1163 if (i->nofault)
1164 gup_flags |= FOLL_NOFAULT;
1166 addr = first_iovec_segment(i, &maxsize);
1167 *start = addr % PAGE_SIZE;
1168 addr &= PAGE_MASK;
1169 n = want_pages_array(pages, maxsize, *start, maxpages);
1170 if (!n)
1171 return -ENOMEM;
1172 res = get_user_pages_fast(addr, n, gup_flags, *pages);
1173 if (unlikely(res <= 0))
1174 return res;
1175 maxsize = min_t(size_t, maxsize, res * PAGE_SIZE - *start);
1176 iov_iter_advance(i, maxsize);
1177 return maxsize;
1179 if (iov_iter_is_bvec(i)) {
1180 struct page **p;
1181 struct page *page;
1183 page = first_bvec_segment(i, &maxsize, start);
1184 n = want_pages_array(pages, maxsize, *start, maxpages);
1185 if (!n)
1186 return -ENOMEM;
1187 p = *pages;
1188 for (int k = 0; k < n; k++)
1189 get_page(p[k] = page + k);
1190 maxsize = min_t(size_t, maxsize, n * PAGE_SIZE - *start);
1191 i->count -= maxsize;
1192 i->iov_offset += maxsize;
1193 if (i->iov_offset == i->bvec->bv_len) {
1194 i->iov_offset = 0;
1195 i->bvec++;
1196 i->nr_segs--;
1198 return maxsize;
1200 if (iov_iter_is_folioq(i))
1201 return iter_folioq_get_pages(i, pages, maxsize, maxpages, start);
1202 if (iov_iter_is_xarray(i))
1203 return iter_xarray_get_pages(i, pages, maxsize, maxpages, start);
1204 return -EFAULT;
1207 ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages,
1208 size_t maxsize, unsigned maxpages, size_t *start)
1210 if (!maxpages)
1211 return 0;
1212 BUG_ON(!pages);
1214 return __iov_iter_get_pages_alloc(i, &pages, maxsize, maxpages, start);
1216 EXPORT_SYMBOL(iov_iter_get_pages2);
1218 ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i,
1219 struct page ***pages, size_t maxsize, size_t *start)
1221 ssize_t len;
1223 *pages = NULL;
1225 len = __iov_iter_get_pages_alloc(i, pages, maxsize, ~0U, start);
1226 if (len <= 0) {
1227 kvfree(*pages);
1228 *pages = NULL;
1230 return len;
1232 EXPORT_SYMBOL(iov_iter_get_pages_alloc2);
1234 static int iov_npages(const struct iov_iter *i, int maxpages)
1236 size_t skip = i->iov_offset, size = i->count;
1237 const struct iovec *p;
1238 int npages = 0;
1240 for (p = iter_iov(i); size; skip = 0, p++) {
1241 unsigned offs = offset_in_page(p->iov_base + skip);
1242 size_t len = min(p->iov_len - skip, size);
1244 if (len) {
1245 size -= len;
1246 npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
1247 if (unlikely(npages > maxpages))
1248 return maxpages;
1251 return npages;
1254 static int bvec_npages(const struct iov_iter *i, int maxpages)
1256 size_t skip = i->iov_offset, size = i->count;
1257 const struct bio_vec *p;
1258 int npages = 0;
1260 for (p = i->bvec; size; skip = 0, p++) {
1261 unsigned offs = (p->bv_offset + skip) % PAGE_SIZE;
1262 size_t len = min(p->bv_len - skip, size);
1264 size -= len;
1265 npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
1266 if (unlikely(npages > maxpages))
1267 return maxpages;
1269 return npages;
1272 int iov_iter_npages(const struct iov_iter *i, int maxpages)
1274 if (unlikely(!i->count))
1275 return 0;
1276 if (likely(iter_is_ubuf(i))) {
1277 unsigned offs = offset_in_page(i->ubuf + i->iov_offset);
1278 int npages = DIV_ROUND_UP(offs + i->count, PAGE_SIZE);
1279 return min(npages, maxpages);
1281 /* iovec and kvec have identical layouts */
1282 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1283 return iov_npages(i, maxpages);
1284 if (iov_iter_is_bvec(i))
1285 return bvec_npages(i, maxpages);
1286 if (iov_iter_is_folioq(i)) {
1287 unsigned offset = i->iov_offset % PAGE_SIZE;
1288 int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE);
1289 return min(npages, maxpages);
1291 if (iov_iter_is_xarray(i)) {
1292 unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE;
1293 int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE);
1294 return min(npages, maxpages);
1296 return 0;
1298 EXPORT_SYMBOL(iov_iter_npages);
1300 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
1302 *new = *old;
1303 if (iov_iter_is_bvec(new))
1304 return new->bvec = kmemdup(new->bvec,
1305 new->nr_segs * sizeof(struct bio_vec),
1306 flags);
1307 else if (iov_iter_is_kvec(new) || iter_is_iovec(new))
1308 /* iovec and kvec have identical layout */
1309 return new->__iov = kmemdup(new->__iov,
1310 new->nr_segs * sizeof(struct iovec),
1311 flags);
1312 return NULL;
1314 EXPORT_SYMBOL(dup_iter);
1316 static __noclone int copy_compat_iovec_from_user(struct iovec *iov,
1317 const struct iovec __user *uvec, u32 nr_segs)
1319 const struct compat_iovec __user *uiov =
1320 (const struct compat_iovec __user *)uvec;
1321 int ret = -EFAULT;
1322 u32 i;
1324 if (!user_access_begin(uiov, nr_segs * sizeof(*uiov)))
1325 return -EFAULT;
1327 for (i = 0; i < nr_segs; i++) {
1328 compat_uptr_t buf;
1329 compat_ssize_t len;
1331 unsafe_get_user(len, &uiov[i].iov_len, uaccess_end);
1332 unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end);
1334 /* check for compat_size_t not fitting in compat_ssize_t .. */
1335 if (len < 0) {
1336 ret = -EINVAL;
1337 goto uaccess_end;
1339 iov[i].iov_base = compat_ptr(buf);
1340 iov[i].iov_len = len;
1343 ret = 0;
1344 uaccess_end:
1345 user_access_end();
1346 return ret;
1349 static __noclone int copy_iovec_from_user(struct iovec *iov,
1350 const struct iovec __user *uiov, unsigned long nr_segs)
1352 int ret = -EFAULT;
1354 if (!user_access_begin(uiov, nr_segs * sizeof(*uiov)))
1355 return -EFAULT;
1357 do {
1358 void __user *buf;
1359 ssize_t len;
1361 unsafe_get_user(len, &uiov->iov_len, uaccess_end);
1362 unsafe_get_user(buf, &uiov->iov_base, uaccess_end);
1364 /* check for size_t not fitting in ssize_t .. */
1365 if (unlikely(len < 0)) {
1366 ret = -EINVAL;
1367 goto uaccess_end;
1369 iov->iov_base = buf;
1370 iov->iov_len = len;
1372 uiov++; iov++;
1373 } while (--nr_segs);
1375 ret = 0;
1376 uaccess_end:
1377 user_access_end();
1378 return ret;
1381 struct iovec *iovec_from_user(const struct iovec __user *uvec,
1382 unsigned long nr_segs, unsigned long fast_segs,
1383 struct iovec *fast_iov, bool compat)
1385 struct iovec *iov = fast_iov;
1386 int ret;
1389 * SuS says "The readv() function *may* fail if the iovcnt argument was
1390 * less than or equal to 0, or greater than {IOV_MAX}. Linux has
1391 * traditionally returned zero for zero segments, so...
1393 if (nr_segs == 0)
1394 return iov;
1395 if (nr_segs > UIO_MAXIOV)
1396 return ERR_PTR(-EINVAL);
1397 if (nr_segs > fast_segs) {
1398 iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL);
1399 if (!iov)
1400 return ERR_PTR(-ENOMEM);
1403 if (unlikely(compat))
1404 ret = copy_compat_iovec_from_user(iov, uvec, nr_segs);
1405 else
1406 ret = copy_iovec_from_user(iov, uvec, nr_segs);
1407 if (ret) {
1408 if (iov != fast_iov)
1409 kfree(iov);
1410 return ERR_PTR(ret);
1413 return iov;
1417 * Single segment iovec supplied by the user, import it as ITER_UBUF.
1419 static ssize_t __import_iovec_ubuf(int type, const struct iovec __user *uvec,
1420 struct iovec **iovp, struct iov_iter *i,
1421 bool compat)
1423 struct iovec *iov = *iovp;
1424 ssize_t ret;
1426 if (compat)
1427 ret = copy_compat_iovec_from_user(iov, uvec, 1);
1428 else
1429 ret = copy_iovec_from_user(iov, uvec, 1);
1430 if (unlikely(ret))
1431 return ret;
1433 ret = import_ubuf(type, iov->iov_base, iov->iov_len, i);
1434 if (unlikely(ret))
1435 return ret;
1436 *iovp = NULL;
1437 return i->count;
1440 ssize_t __import_iovec(int type, const struct iovec __user *uvec,
1441 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
1442 struct iov_iter *i, bool compat)
1444 ssize_t total_len = 0;
1445 unsigned long seg;
1446 struct iovec *iov;
1448 if (nr_segs == 1)
1449 return __import_iovec_ubuf(type, uvec, iovp, i, compat);
1451 iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat);
1452 if (IS_ERR(iov)) {
1453 *iovp = NULL;
1454 return PTR_ERR(iov);
1458 * According to the Single Unix Specification we should return EINVAL if
1459 * an element length is < 0 when cast to ssize_t or if the total length
1460 * would overflow the ssize_t return value of the system call.
1462 * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the
1463 * overflow case.
1465 for (seg = 0; seg < nr_segs; seg++) {
1466 ssize_t len = (ssize_t)iov[seg].iov_len;
1468 if (!access_ok(iov[seg].iov_base, len)) {
1469 if (iov != *iovp)
1470 kfree(iov);
1471 *iovp = NULL;
1472 return -EFAULT;
1475 if (len > MAX_RW_COUNT - total_len) {
1476 len = MAX_RW_COUNT - total_len;
1477 iov[seg].iov_len = len;
1479 total_len += len;
1482 iov_iter_init(i, type, iov, nr_segs, total_len);
1483 if (iov == *iovp)
1484 *iovp = NULL;
1485 else
1486 *iovp = iov;
1487 return total_len;
1491 * import_iovec() - Copy an array of &struct iovec from userspace
1492 * into the kernel, check that it is valid, and initialize a new
1493 * &struct iov_iter iterator to access it.
1495 * @type: One of %READ or %WRITE.
1496 * @uvec: Pointer to the userspace array.
1497 * @nr_segs: Number of elements in userspace array.
1498 * @fast_segs: Number of elements in @iov.
1499 * @iovp: (input and output parameter) Pointer to pointer to (usually small
1500 * on-stack) kernel array.
1501 * @i: Pointer to iterator that will be initialized on success.
1503 * If the array pointed to by *@iov is large enough to hold all @nr_segs,
1504 * then this function places %NULL in *@iov on return. Otherwise, a new
1505 * array will be allocated and the result placed in *@iov. This means that
1506 * the caller may call kfree() on *@iov regardless of whether the small
1507 * on-stack array was used or not (and regardless of whether this function
1508 * returns an error or not).
1510 * Return: Negative error code on error, bytes imported on success
1512 ssize_t import_iovec(int type, const struct iovec __user *uvec,
1513 unsigned nr_segs, unsigned fast_segs,
1514 struct iovec **iovp, struct iov_iter *i)
1516 return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i,
1517 in_compat_syscall());
1519 EXPORT_SYMBOL(import_iovec);
1521 int import_ubuf(int rw, void __user *buf, size_t len, struct iov_iter *i)
1523 if (len > MAX_RW_COUNT)
1524 len = MAX_RW_COUNT;
1525 if (unlikely(!access_ok(buf, len)))
1526 return -EFAULT;
1528 iov_iter_ubuf(i, rw, buf, len);
1529 return 0;
1531 EXPORT_SYMBOL_GPL(import_ubuf);
1534 * iov_iter_restore() - Restore a &struct iov_iter to the same state as when
1535 * iov_iter_save_state() was called.
1537 * @i: &struct iov_iter to restore
1538 * @state: state to restore from
1540 * Used after iov_iter_save_state() to bring restore @i, if operations may
1541 * have advanced it.
1543 * Note: only works on ITER_IOVEC, ITER_BVEC, and ITER_KVEC
1545 void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state)
1547 if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i) &&
1548 !iter_is_ubuf(i)) && !iov_iter_is_kvec(i))
1549 return;
1550 i->iov_offset = state->iov_offset;
1551 i->count = state->count;
1552 if (iter_is_ubuf(i))
1553 return;
1555 * For the *vec iters, nr_segs + iov is constant - if we increment
1556 * the vec, then we also decrement the nr_segs count. Hence we don't
1557 * need to track both of these, just one is enough and we can deduct
1558 * the other from that. ITER_KVEC and ITER_IOVEC are the same struct
1559 * size, so we can just increment the iov pointer as they are unionzed.
1560 * ITER_BVEC _may_ be the same size on some archs, but on others it is
1561 * not. Be safe and handle it separately.
1563 BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec));
1564 if (iov_iter_is_bvec(i))
1565 i->bvec -= state->nr_segs - i->nr_segs;
1566 else
1567 i->__iov -= state->nr_segs - i->nr_segs;
1568 i->nr_segs = state->nr_segs;
1572 * Extract a list of contiguous pages from an ITER_FOLIOQ iterator. This does
1573 * not get references on the pages, nor does it get a pin on them.
1575 static ssize_t iov_iter_extract_folioq_pages(struct iov_iter *i,
1576 struct page ***pages, size_t maxsize,
1577 unsigned int maxpages,
1578 iov_iter_extraction_t extraction_flags,
1579 size_t *offset0)
1581 const struct folio_queue *folioq = i->folioq;
1582 struct page **p;
1583 unsigned int nr = 0;
1584 size_t extracted = 0, offset, slot = i->folioq_slot;
1586 if (slot >= folioq_nr_slots(folioq)) {
1587 folioq = folioq->next;
1588 slot = 0;
1589 if (WARN_ON(i->iov_offset != 0))
1590 return -EIO;
1593 offset = i->iov_offset & ~PAGE_MASK;
1594 *offset0 = offset;
1596 maxpages = want_pages_array(pages, maxsize, offset, maxpages);
1597 if (!maxpages)
1598 return -ENOMEM;
1599 p = *pages;
1601 for (;;) {
1602 struct folio *folio = folioq_folio(folioq, slot);
1603 size_t offset = i->iov_offset, fsize = folioq_folio_size(folioq, slot);
1604 size_t part = PAGE_SIZE - offset % PAGE_SIZE;
1606 if (offset < fsize) {
1607 part = umin(part, umin(maxsize - extracted, fsize - offset));
1608 i->count -= part;
1609 i->iov_offset += part;
1610 extracted += part;
1612 p[nr++] = folio_page(folio, offset / PAGE_SIZE);
1615 if (nr >= maxpages || extracted >= maxsize)
1616 break;
1618 if (i->iov_offset >= fsize) {
1619 i->iov_offset = 0;
1620 slot++;
1621 if (slot == folioq_nr_slots(folioq) && folioq->next) {
1622 folioq = folioq->next;
1623 slot = 0;
1628 i->folioq = folioq;
1629 i->folioq_slot = slot;
1630 return extracted;
1634 * Extract a list of contiguous pages from an ITER_XARRAY iterator. This does not
1635 * get references on the pages, nor does it get a pin on them.
1637 static ssize_t iov_iter_extract_xarray_pages(struct iov_iter *i,
1638 struct page ***pages, size_t maxsize,
1639 unsigned int maxpages,
1640 iov_iter_extraction_t extraction_flags,
1641 size_t *offset0)
1643 struct page *page, **p;
1644 unsigned int nr = 0, offset;
1645 loff_t pos = i->xarray_start + i->iov_offset;
1646 pgoff_t index = pos >> PAGE_SHIFT;
1647 XA_STATE(xas, i->xarray, index);
1649 offset = pos & ~PAGE_MASK;
1650 *offset0 = offset;
1652 maxpages = want_pages_array(pages, maxsize, offset, maxpages);
1653 if (!maxpages)
1654 return -ENOMEM;
1655 p = *pages;
1657 rcu_read_lock();
1658 for (page = xas_load(&xas); page; page = xas_next(&xas)) {
1659 if (xas_retry(&xas, page))
1660 continue;
1662 /* Has the page moved or been split? */
1663 if (unlikely(page != xas_reload(&xas))) {
1664 xas_reset(&xas);
1665 continue;
1668 p[nr++] = find_subpage(page, xas.xa_index);
1669 if (nr == maxpages)
1670 break;
1672 rcu_read_unlock();
1674 maxsize = min_t(size_t, nr * PAGE_SIZE - offset, maxsize);
1675 iov_iter_advance(i, maxsize);
1676 return maxsize;
1680 * Extract a list of contiguous pages from an ITER_BVEC iterator. This does
1681 * not get references on the pages, nor does it get a pin on them.
1683 static ssize_t iov_iter_extract_bvec_pages(struct iov_iter *i,
1684 struct page ***pages, size_t maxsize,
1685 unsigned int maxpages,
1686 iov_iter_extraction_t extraction_flags,
1687 size_t *offset0)
1689 struct page **p, *page;
1690 size_t skip = i->iov_offset, offset, size;
1691 int k;
1693 for (;;) {
1694 if (i->nr_segs == 0)
1695 return 0;
1696 size = min(maxsize, i->bvec->bv_len - skip);
1697 if (size)
1698 break;
1699 i->iov_offset = 0;
1700 i->nr_segs--;
1701 i->bvec++;
1702 skip = 0;
1705 skip += i->bvec->bv_offset;
1706 page = i->bvec->bv_page + skip / PAGE_SIZE;
1707 offset = skip % PAGE_SIZE;
1708 *offset0 = offset;
1710 maxpages = want_pages_array(pages, size, offset, maxpages);
1711 if (!maxpages)
1712 return -ENOMEM;
1713 p = *pages;
1714 for (k = 0; k < maxpages; k++)
1715 p[k] = page + k;
1717 size = min_t(size_t, size, maxpages * PAGE_SIZE - offset);
1718 iov_iter_advance(i, size);
1719 return size;
1723 * Extract a list of virtually contiguous pages from an ITER_KVEC iterator.
1724 * This does not get references on the pages, nor does it get a pin on them.
1726 static ssize_t iov_iter_extract_kvec_pages(struct iov_iter *i,
1727 struct page ***pages, size_t maxsize,
1728 unsigned int maxpages,
1729 iov_iter_extraction_t extraction_flags,
1730 size_t *offset0)
1732 struct page **p, *page;
1733 const void *kaddr;
1734 size_t skip = i->iov_offset, offset, len, size;
1735 int k;
1737 for (;;) {
1738 if (i->nr_segs == 0)
1739 return 0;
1740 size = min(maxsize, i->kvec->iov_len - skip);
1741 if (size)
1742 break;
1743 i->iov_offset = 0;
1744 i->nr_segs--;
1745 i->kvec++;
1746 skip = 0;
1749 kaddr = i->kvec->iov_base + skip;
1750 offset = (unsigned long)kaddr & ~PAGE_MASK;
1751 *offset0 = offset;
1753 maxpages = want_pages_array(pages, size, offset, maxpages);
1754 if (!maxpages)
1755 return -ENOMEM;
1756 p = *pages;
1758 kaddr -= offset;
1759 len = offset + size;
1760 for (k = 0; k < maxpages; k++) {
1761 size_t seg = min_t(size_t, len, PAGE_SIZE);
1763 if (is_vmalloc_or_module_addr(kaddr))
1764 page = vmalloc_to_page(kaddr);
1765 else
1766 page = virt_to_page(kaddr);
1768 p[k] = page;
1769 len -= seg;
1770 kaddr += PAGE_SIZE;
1773 size = min_t(size_t, size, maxpages * PAGE_SIZE - offset);
1774 iov_iter_advance(i, size);
1775 return size;
1779 * Extract a list of contiguous pages from a user iterator and get a pin on
1780 * each of them. This should only be used if the iterator is user-backed
1781 * (IOBUF/UBUF).
1783 * It does not get refs on the pages, but the pages must be unpinned by the
1784 * caller once the transfer is complete.
1786 * This is safe to be used where background IO/DMA *is* going to be modifying
1787 * the buffer; using a pin rather than a ref makes forces fork() to give the
1788 * child a copy of the page.
1790 static ssize_t iov_iter_extract_user_pages(struct iov_iter *i,
1791 struct page ***pages,
1792 size_t maxsize,
1793 unsigned int maxpages,
1794 iov_iter_extraction_t extraction_flags,
1795 size_t *offset0)
1797 unsigned long addr;
1798 unsigned int gup_flags = 0;
1799 size_t offset;
1800 int res;
1802 if (i->data_source == ITER_DEST)
1803 gup_flags |= FOLL_WRITE;
1804 if (extraction_flags & ITER_ALLOW_P2PDMA)
1805 gup_flags |= FOLL_PCI_P2PDMA;
1806 if (i->nofault)
1807 gup_flags |= FOLL_NOFAULT;
1809 addr = first_iovec_segment(i, &maxsize);
1810 *offset0 = offset = addr % PAGE_SIZE;
1811 addr &= PAGE_MASK;
1812 maxpages = want_pages_array(pages, maxsize, offset, maxpages);
1813 if (!maxpages)
1814 return -ENOMEM;
1815 res = pin_user_pages_fast(addr, maxpages, gup_flags, *pages);
1816 if (unlikely(res <= 0))
1817 return res;
1818 maxsize = min_t(size_t, maxsize, res * PAGE_SIZE - offset);
1819 iov_iter_advance(i, maxsize);
1820 return maxsize;
1824 * iov_iter_extract_pages - Extract a list of contiguous pages from an iterator
1825 * @i: The iterator to extract from
1826 * @pages: Where to return the list of pages
1827 * @maxsize: The maximum amount of iterator to extract
1828 * @maxpages: The maximum size of the list of pages
1829 * @extraction_flags: Flags to qualify request
1830 * @offset0: Where to return the starting offset into (*@pages)[0]
1832 * Extract a list of contiguous pages from the current point of the iterator,
1833 * advancing the iterator. The maximum number of pages and the maximum amount
1834 * of page contents can be set.
1836 * If *@pages is NULL, a page list will be allocated to the required size and
1837 * *@pages will be set to its base. If *@pages is not NULL, it will be assumed
1838 * that the caller allocated a page list at least @maxpages in size and this
1839 * will be filled in.
1841 * @extraction_flags can have ITER_ALLOW_P2PDMA set to request peer-to-peer DMA
1842 * be allowed on the pages extracted.
1844 * The iov_iter_extract_will_pin() function can be used to query how cleanup
1845 * should be performed.
1847 * Extra refs or pins on the pages may be obtained as follows:
1849 * (*) If the iterator is user-backed (ITER_IOVEC/ITER_UBUF), pins will be
1850 * added to the pages, but refs will not be taken.
1851 * iov_iter_extract_will_pin() will return true.
1853 * (*) If the iterator is ITER_KVEC, ITER_BVEC, ITER_FOLIOQ or ITER_XARRAY, the
1854 * pages are merely listed; no extra refs or pins are obtained.
1855 * iov_iter_extract_will_pin() will return 0.
1857 * Note also:
1859 * (*) Use with ITER_DISCARD is not supported as that has no content.
1861 * On success, the function sets *@pages to the new pagelist, if allocated, and
1862 * sets *offset0 to the offset into the first page.
1864 * It may also return -ENOMEM and -EFAULT.
1866 ssize_t iov_iter_extract_pages(struct iov_iter *i,
1867 struct page ***pages,
1868 size_t maxsize,
1869 unsigned int maxpages,
1870 iov_iter_extraction_t extraction_flags,
1871 size_t *offset0)
1873 maxsize = min_t(size_t, min_t(size_t, maxsize, i->count), MAX_RW_COUNT);
1874 if (!maxsize)
1875 return 0;
1877 if (likely(user_backed_iter(i)))
1878 return iov_iter_extract_user_pages(i, pages, maxsize,
1879 maxpages, extraction_flags,
1880 offset0);
1881 if (iov_iter_is_kvec(i))
1882 return iov_iter_extract_kvec_pages(i, pages, maxsize,
1883 maxpages, extraction_flags,
1884 offset0);
1885 if (iov_iter_is_bvec(i))
1886 return iov_iter_extract_bvec_pages(i, pages, maxsize,
1887 maxpages, extraction_flags,
1888 offset0);
1889 if (iov_iter_is_folioq(i))
1890 return iov_iter_extract_folioq_pages(i, pages, maxsize,
1891 maxpages, extraction_flags,
1892 offset0);
1893 if (iov_iter_is_xarray(i))
1894 return iov_iter_extract_xarray_pages(i, pages, maxsize,
1895 maxpages, extraction_flags,
1896 offset0);
1897 return -EFAULT;
1899 EXPORT_SYMBOL_GPL(iov_iter_extract_pages);