1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * Copyright (C) 2022 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
8 #include <linux/export.h>
9 #include <linux/slab.h>
11 #include <linux/uio.h>
12 #include <linux/scatterlist.h>
13 #include <linux/netfs.h>
17 * netfs_extract_user_iter - Extract the pages from a user iterator into a bvec
18 * @orig: The original iterator
19 * @orig_len: The amount of iterator to copy
20 * @new: The iterator to be set up
21 * @extraction_flags: Flags to qualify the request
23 * Extract the page fragments from the given amount of the source iterator and
24 * build up a second iterator that refers to all of those bits. This allows
25 * the original iterator to disposed of.
27 * @extraction_flags can have ITER_ALLOW_P2PDMA set to request peer-to-peer DMA be
28 * allowed on the pages extracted.
30 * On success, the number of elements in the bvec is returned, the original
31 * iterator will have been advanced by the amount extracted.
33 * The iov_iter_extract_mode() function should be used to query how cleanup
34 * should be performed.
36 ssize_t
netfs_extract_user_iter(struct iov_iter
*orig
, size_t orig_len
,
38 iov_iter_extraction_t extraction_flags
)
40 struct bio_vec
*bv
= NULL
;
42 unsigned int cur_npages
;
43 unsigned int max_pages
;
44 unsigned int npages
= 0;
47 size_t count
= orig_len
, offset
, len
;
48 size_t bv_size
, pg_size
;
50 if (WARN_ON_ONCE(!iter_is_ubuf(orig
) && !iter_is_iovec(orig
)))
53 max_pages
= iov_iter_npages(orig
, INT_MAX
);
54 bv_size
= array_size(max_pages
, sizeof(*bv
));
55 bv
= kvmalloc(bv_size
, GFP_KERNEL
);
59 /* Put the page list at the end of the bvec list storage. bvec
60 * elements are larger than page pointers, so as long as we work
61 * 0->last, we should be fine.
63 pg_size
= array_size(max_pages
, sizeof(*pages
));
64 pages
= (void *)bv
+ bv_size
- pg_size
;
66 while (count
&& npages
< max_pages
) {
67 ret
= iov_iter_extract_pages(orig
, &pages
, count
,
68 max_pages
- npages
, extraction_flags
,
71 pr_err("Couldn't get user pages (rc=%zd)\n", ret
);
76 pr_err("get_pages rc=%zd more than %zu\n", ret
, count
);
82 cur_npages
= DIV_ROUND_UP(ret
, PAGE_SIZE
);
84 if (npages
+ cur_npages
> max_pages
) {
85 pr_err("Out of bvec array capacity (%u vs %u)\n",
86 npages
+ cur_npages
, max_pages
);
90 for (i
= 0; i
< cur_npages
; i
++) {
91 len
= ret
> PAGE_SIZE
? PAGE_SIZE
: ret
;
92 bvec_set_page(bv
+ npages
+ i
, *pages
++, len
- offset
, offset
);
100 iov_iter_bvec(new, orig
->data_source
, bv
, npages
, orig_len
- count
);
103 EXPORT_SYMBOL_GPL(netfs_extract_user_iter
);
106 * Select the span of a bvec iterator we're going to use. Limit it by both maximum
107 * size and maximum number of segments. Returns the size of the span in bytes.
109 static size_t netfs_limit_bvec(const struct iov_iter
*iter
, size_t start_offset
,
110 size_t max_size
, size_t max_segs
)
112 const struct bio_vec
*bvecs
= iter
->bvec
;
113 unsigned int nbv
= iter
->nr_segs
, ix
= 0, nsegs
= 0;
114 size_t len
, span
= 0, n
= iter
->count
;
115 size_t skip
= iter
->iov_offset
+ start_offset
;
117 if (WARN_ON(!iov_iter_is_bvec(iter
)) ||
118 WARN_ON(start_offset
> n
) ||
122 while (n
&& ix
< nbv
&& skip
) {
123 len
= bvecs
[ix
].bv_len
;
131 while (n
&& ix
< nbv
) {
132 len
= min3(n
, bvecs
[ix
].bv_len
- skip
, max_size
);
136 if (span
>= max_size
|| nsegs
>= max_segs
)
142 return min(span
, max_size
);
146 * Select the span of an xarray iterator we're going to use. Limit it by both
147 * maximum size and maximum number of segments. It is assumed that segments
148 * can be larger than a page in size, provided they're physically contiguous.
149 * Returns the size of the span in bytes.
151 static size_t netfs_limit_xarray(const struct iov_iter
*iter
, size_t start_offset
,
152 size_t max_size
, size_t max_segs
)
155 unsigned int nsegs
= 0;
156 loff_t pos
= iter
->xarray_start
+ iter
->iov_offset
;
157 pgoff_t index
= pos
/ PAGE_SIZE
;
158 size_t span
= 0, n
= iter
->count
;
160 XA_STATE(xas
, iter
->xarray
, index
);
162 if (WARN_ON(!iov_iter_is_xarray(iter
)) ||
163 WARN_ON(start_offset
> n
) ||
166 max_size
= min(max_size
, n
- start_offset
);
169 xas_for_each(&xas
, folio
, ULONG_MAX
) {
170 size_t offset
, flen
, len
;
171 if (xas_retry(&xas
, folio
))
173 if (WARN_ON(xa_is_value(folio
)))
175 if (WARN_ON(folio_test_hugetlb(folio
)))
178 flen
= folio_size(folio
);
179 offset
= offset_in_folio(folio
, pos
);
180 len
= min(max_size
, flen
- offset
);
183 if (span
>= max_size
|| nsegs
>= max_segs
)
188 return min(span
, max_size
);
192 * Select the span of a folio queue iterator we're going to use. Limit it by
193 * both maximum size and maximum number of segments. Returns the size of the
196 static size_t netfs_limit_folioq(const struct iov_iter
*iter
, size_t start_offset
,
197 size_t max_size
, size_t max_segs
)
199 const struct folio_queue
*folioq
= iter
->folioq
;
200 unsigned int nsegs
= 0;
201 unsigned int slot
= iter
->folioq_slot
;
202 size_t span
= 0, n
= iter
->count
;
204 if (WARN_ON(!iov_iter_is_folioq(iter
)) ||
205 WARN_ON(start_offset
> n
) ||
208 max_size
= umin(max_size
, n
- start_offset
);
210 if (slot
>= folioq_nr_slots(folioq
)) {
211 folioq
= folioq
->next
;
215 start_offset
+= iter
->iov_offset
;
217 size_t flen
= folioq_folio_size(folioq
, slot
);
219 if (start_offset
< flen
) {
220 span
+= flen
- start_offset
;
224 start_offset
-= flen
;
226 if (span
>= max_size
|| nsegs
>= max_segs
)
230 if (slot
>= folioq_nr_slots(folioq
)) {
231 folioq
= folioq
->next
;
236 return umin(span
, max_size
);
239 size_t netfs_limit_iter(const struct iov_iter
*iter
, size_t start_offset
,
240 size_t max_size
, size_t max_segs
)
242 if (iov_iter_is_folioq(iter
))
243 return netfs_limit_folioq(iter
, start_offset
, max_size
, max_segs
);
244 if (iov_iter_is_bvec(iter
))
245 return netfs_limit_bvec(iter
, start_offset
, max_size
, max_segs
);
246 if (iov_iter_is_xarray(iter
))
247 return netfs_limit_xarray(iter
, start_offset
, max_size
, max_segs
);
250 EXPORT_SYMBOL(netfs_limit_iter
);