1 #include <linux/export.h>
3 #include <linux/pagemap.h>
5 size_t copy_page_to_iter(struct page
*page
, size_t offset
, size_t bytes
,
8 size_t skip
, copy
, left
, wanted
;
9 const struct iovec
*iov
;
13 if (unlikely(bytes
> i
->count
))
22 buf
= iov
->iov_base
+ skip
;
23 copy
= min(bytes
, iov
->iov_len
- skip
);
25 if (!fault_in_pages_writeable(buf
, copy
)) {
26 kaddr
= kmap_atomic(page
);
27 from
= kaddr
+ offset
;
29 /* first chunk, usually the only one */
30 left
= __copy_to_user_inatomic(buf
, from
, copy
);
36 while (unlikely(!left
&& bytes
)) {
39 copy
= min(bytes
, iov
->iov_len
);
40 left
= __copy_to_user_inatomic(buf
, from
, copy
);
50 offset
= from
- kaddr
;
53 copy
= min(bytes
, iov
->iov_len
- skip
);
55 /* Too bad - revert to non-atomic kmap */
57 from
= kaddr
+ offset
;
58 left
= __copy_to_user(buf
, from
, copy
);
63 while (unlikely(!left
&& bytes
)) {
66 copy
= min(bytes
, iov
->iov_len
);
67 left
= __copy_to_user(buf
, from
, copy
);
75 i
->count
-= wanted
- bytes
;
76 i
->nr_segs
-= iov
- i
->iov
;
79 return wanted
- bytes
;
81 EXPORT_SYMBOL(copy_page_to_iter
);
83 static size_t __iovec_copy_from_user_inatomic(char *vaddr
,
84 const struct iovec
*iov
, size_t base
, size_t bytes
)
86 size_t copied
= 0, left
= 0;
89 char __user
*buf
= iov
->iov_base
+ base
;
90 int copy
= min(bytes
, iov
->iov_len
- base
);
93 left
= __copy_from_user_inatomic(vaddr
, buf
, copy
);
102 return copied
- left
;
106 * Copy as much as we can into the page and return the number of bytes which
107 * were successfully copied. If a fault is encountered then return the number of
108 * bytes which were copied.
110 size_t iov_iter_copy_from_user_atomic(struct page
*page
,
111 struct iov_iter
*i
, unsigned long offset
, size_t bytes
)
116 kaddr
= kmap_atomic(page
);
117 if (likely(i
->nr_segs
== 1)) {
119 char __user
*buf
= i
->iov
->iov_base
+ i
->iov_offset
;
120 left
= __copy_from_user_inatomic(kaddr
+ offset
, buf
, bytes
);
121 copied
= bytes
- left
;
123 copied
= __iovec_copy_from_user_inatomic(kaddr
+ offset
,
124 i
->iov
, i
->iov_offset
, bytes
);
126 kunmap_atomic(kaddr
);
130 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic
);
133 * This has the same sideeffects and return value as
134 * iov_iter_copy_from_user_atomic().
135 * The difference is that it attempts to resolve faults.
136 * Page must not be locked.
138 size_t iov_iter_copy_from_user(struct page
*page
,
139 struct iov_iter
*i
, unsigned long offset
, size_t bytes
)
145 if (likely(i
->nr_segs
== 1)) {
147 char __user
*buf
= i
->iov
->iov_base
+ i
->iov_offset
;
148 left
= __copy_from_user(kaddr
+ offset
, buf
, bytes
);
149 copied
= bytes
- left
;
151 copied
= __iovec_copy_from_user_inatomic(kaddr
+ offset
,
152 i
->iov
, i
->iov_offset
, bytes
);
157 EXPORT_SYMBOL(iov_iter_copy_from_user
);
159 void iov_iter_advance(struct iov_iter
*i
, size_t bytes
)
161 BUG_ON(i
->count
< bytes
);
163 if (likely(i
->nr_segs
== 1)) {
164 i
->iov_offset
+= bytes
;
167 const struct iovec
*iov
= i
->iov
;
168 size_t base
= i
->iov_offset
;
169 unsigned long nr_segs
= i
->nr_segs
;
172 * The !iov->iov_len check ensures we skip over unlikely
173 * zero-length segments (without overruning the iovec).
175 while (bytes
|| unlikely(i
->count
&& !iov
->iov_len
)) {
178 copy
= min(bytes
, iov
->iov_len
- base
);
179 BUG_ON(!i
->count
|| i
->count
< copy
);
183 if (iov
->iov_len
== base
) {
190 i
->iov_offset
= base
;
191 i
->nr_segs
= nr_segs
;
194 EXPORT_SYMBOL(iov_iter_advance
);
197 * Fault in the first iovec of the given iov_iter, to a maximum length
198 * of bytes. Returns 0 on success, or non-zero if the memory could not be
199 * accessed (ie. because it is an invalid address).
201 * writev-intensive code may want this to prefault several iovecs -- that
202 * would be possible (callers must not rely on the fact that _only_ the
203 * first iovec will be faulted with the current implementation).
205 int iov_iter_fault_in_readable(struct iov_iter
*i
, size_t bytes
)
207 char __user
*buf
= i
->iov
->iov_base
+ i
->iov_offset
;
208 bytes
= min(bytes
, i
->iov
->iov_len
- i
->iov_offset
);
209 return fault_in_pages_readable(buf
, bytes
);
211 EXPORT_SYMBOL(iov_iter_fault_in_readable
);
214 * Return the count of just the current iov_iter segment.
216 size_t iov_iter_single_seg_count(const struct iov_iter
*i
)
218 const struct iovec
*iov
= i
->iov
;
222 return min(i
->count
, iov
->iov_len
- i
->iov_offset
);
224 EXPORT_SYMBOL(iov_iter_single_seg_count
);