mpt2sas, mpt3sas: log exceeded temperature thresholds
[linux/fpc-iii.git] / mm / iov_iter.c
bloba1599ca4ab0ed0e27917276102612d2877038b08
1 #include <linux/export.h>
2 #include <linux/uio.h>
3 #include <linux/pagemap.h>
4 #include <linux/slab.h>
5 #include <linux/vmalloc.h>
6 #include <net/checksum.h>
8 #define iterate_iovec(i, n, __v, __p, skip, STEP) { \
9 size_t left; \
10 size_t wanted = n; \
11 __p = i->iov; \
12 __v.iov_len = min(n, __p->iov_len - skip); \
13 if (likely(__v.iov_len)) { \
14 __v.iov_base = __p->iov_base + skip; \
15 left = (STEP); \
16 __v.iov_len -= left; \
17 skip += __v.iov_len; \
18 n -= __v.iov_len; \
19 } else { \
20 left = 0; \
21 } \
22 while (unlikely(!left && n)) { \
23 __p++; \
24 __v.iov_len = min(n, __p->iov_len); \
25 if (unlikely(!__v.iov_len)) \
26 continue; \
27 __v.iov_base = __p->iov_base; \
28 left = (STEP); \
29 __v.iov_len -= left; \
30 skip = __v.iov_len; \
31 n -= __v.iov_len; \
32 } \
33 n = wanted - n; \
36 #define iterate_kvec(i, n, __v, __p, skip, STEP) { \
37 size_t wanted = n; \
38 __p = i->kvec; \
39 __v.iov_len = min(n, __p->iov_len - skip); \
40 if (likely(__v.iov_len)) { \
41 __v.iov_base = __p->iov_base + skip; \
42 (void)(STEP); \
43 skip += __v.iov_len; \
44 n -= __v.iov_len; \
45 } \
46 while (unlikely(n)) { \
47 __p++; \
48 __v.iov_len = min(n, __p->iov_len); \
49 if (unlikely(!__v.iov_len)) \
50 continue; \
51 __v.iov_base = __p->iov_base; \
52 (void)(STEP); \
53 skip = __v.iov_len; \
54 n -= __v.iov_len; \
55 } \
56 n = wanted; \
59 #define iterate_bvec(i, n, __v, __p, skip, STEP) { \
60 size_t wanted = n; \
61 __p = i->bvec; \
62 __v.bv_len = min_t(size_t, n, __p->bv_len - skip); \
63 if (likely(__v.bv_len)) { \
64 __v.bv_page = __p->bv_page; \
65 __v.bv_offset = __p->bv_offset + skip; \
66 (void)(STEP); \
67 skip += __v.bv_len; \
68 n -= __v.bv_len; \
69 } \
70 while (unlikely(n)) { \
71 __p++; \
72 __v.bv_len = min_t(size_t, n, __p->bv_len); \
73 if (unlikely(!__v.bv_len)) \
74 continue; \
75 __v.bv_page = __p->bv_page; \
76 __v.bv_offset = __p->bv_offset; \
77 (void)(STEP); \
78 skip = __v.bv_len; \
79 n -= __v.bv_len; \
80 } \
81 n = wanted; \
84 #define iterate_all_kinds(i, n, v, I, B, K) { \
85 size_t skip = i->iov_offset; \
86 if (unlikely(i->type & ITER_BVEC)) { \
87 const struct bio_vec *bvec; \
88 struct bio_vec v; \
89 iterate_bvec(i, n, v, bvec, skip, (B)) \
90 } else if (unlikely(i->type & ITER_KVEC)) { \
91 const struct kvec *kvec; \
92 struct kvec v; \
93 iterate_kvec(i, n, v, kvec, skip, (K)) \
94 } else { \
95 const struct iovec *iov; \
96 struct iovec v; \
97 iterate_iovec(i, n, v, iov, skip, (I)) \
98 } \
101 #define iterate_and_advance(i, n, v, I, B, K) { \
102 size_t skip = i->iov_offset; \
103 if (unlikely(i->type & ITER_BVEC)) { \
104 const struct bio_vec *bvec; \
105 struct bio_vec v; \
106 iterate_bvec(i, n, v, bvec, skip, (B)) \
107 if (skip == bvec->bv_len) { \
108 bvec++; \
109 skip = 0; \
111 i->nr_segs -= bvec - i->bvec; \
112 i->bvec = bvec; \
113 } else if (unlikely(i->type & ITER_KVEC)) { \
114 const struct kvec *kvec; \
115 struct kvec v; \
116 iterate_kvec(i, n, v, kvec, skip, (K)) \
117 if (skip == kvec->iov_len) { \
118 kvec++; \
119 skip = 0; \
121 i->nr_segs -= kvec - i->kvec; \
122 i->kvec = kvec; \
123 } else { \
124 const struct iovec *iov; \
125 struct iovec v; \
126 iterate_iovec(i, n, v, iov, skip, (I)) \
127 if (skip == iov->iov_len) { \
128 iov++; \
129 skip = 0; \
131 i->nr_segs -= iov - i->iov; \
132 i->iov = iov; \
134 i->count -= n; \
135 i->iov_offset = skip; \
138 static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
139 struct iov_iter *i)
141 size_t skip, copy, left, wanted;
142 const struct iovec *iov;
143 char __user *buf;
144 void *kaddr, *from;
146 if (unlikely(bytes > i->count))
147 bytes = i->count;
149 if (unlikely(!bytes))
150 return 0;
152 wanted = bytes;
153 iov = i->iov;
154 skip = i->iov_offset;
155 buf = iov->iov_base + skip;
156 copy = min(bytes, iov->iov_len - skip);
158 if (!fault_in_pages_writeable(buf, copy)) {
159 kaddr = kmap_atomic(page);
160 from = kaddr + offset;
162 /* first chunk, usually the only one */
163 left = __copy_to_user_inatomic(buf, from, copy);
164 copy -= left;
165 skip += copy;
166 from += copy;
167 bytes -= copy;
169 while (unlikely(!left && bytes)) {
170 iov++;
171 buf = iov->iov_base;
172 copy = min(bytes, iov->iov_len);
173 left = __copy_to_user_inatomic(buf, from, copy);
174 copy -= left;
175 skip = copy;
176 from += copy;
177 bytes -= copy;
179 if (likely(!bytes)) {
180 kunmap_atomic(kaddr);
181 goto done;
183 offset = from - kaddr;
184 buf += copy;
185 kunmap_atomic(kaddr);
186 copy = min(bytes, iov->iov_len - skip);
188 /* Too bad - revert to non-atomic kmap */
189 kaddr = kmap(page);
190 from = kaddr + offset;
191 left = __copy_to_user(buf, from, copy);
192 copy -= left;
193 skip += copy;
194 from += copy;
195 bytes -= copy;
196 while (unlikely(!left && bytes)) {
197 iov++;
198 buf = iov->iov_base;
199 copy = min(bytes, iov->iov_len);
200 left = __copy_to_user(buf, from, copy);
201 copy -= left;
202 skip = copy;
203 from += copy;
204 bytes -= copy;
206 kunmap(page);
207 done:
208 if (skip == iov->iov_len) {
209 iov++;
210 skip = 0;
212 i->count -= wanted - bytes;
213 i->nr_segs -= iov - i->iov;
214 i->iov = iov;
215 i->iov_offset = skip;
216 return wanted - bytes;
219 static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
220 struct iov_iter *i)
222 size_t skip, copy, left, wanted;
223 const struct iovec *iov;
224 char __user *buf;
225 void *kaddr, *to;
227 if (unlikely(bytes > i->count))
228 bytes = i->count;
230 if (unlikely(!bytes))
231 return 0;
233 wanted = bytes;
234 iov = i->iov;
235 skip = i->iov_offset;
236 buf = iov->iov_base + skip;
237 copy = min(bytes, iov->iov_len - skip);
239 if (!fault_in_pages_readable(buf, copy)) {
240 kaddr = kmap_atomic(page);
241 to = kaddr + offset;
243 /* first chunk, usually the only one */
244 left = __copy_from_user_inatomic(to, buf, copy);
245 copy -= left;
246 skip += copy;
247 to += copy;
248 bytes -= copy;
250 while (unlikely(!left && bytes)) {
251 iov++;
252 buf = iov->iov_base;
253 copy = min(bytes, iov->iov_len);
254 left = __copy_from_user_inatomic(to, buf, copy);
255 copy -= left;
256 skip = copy;
257 to += copy;
258 bytes -= copy;
260 if (likely(!bytes)) {
261 kunmap_atomic(kaddr);
262 goto done;
264 offset = to - kaddr;
265 buf += copy;
266 kunmap_atomic(kaddr);
267 copy = min(bytes, iov->iov_len - skip);
269 /* Too bad - revert to non-atomic kmap */
270 kaddr = kmap(page);
271 to = kaddr + offset;
272 left = __copy_from_user(to, buf, copy);
273 copy -= left;
274 skip += copy;
275 to += copy;
276 bytes -= copy;
277 while (unlikely(!left && bytes)) {
278 iov++;
279 buf = iov->iov_base;
280 copy = min(bytes, iov->iov_len);
281 left = __copy_from_user(to, buf, copy);
282 copy -= left;
283 skip = copy;
284 to += copy;
285 bytes -= copy;
287 kunmap(page);
288 done:
289 if (skip == iov->iov_len) {
290 iov++;
291 skip = 0;
293 i->count -= wanted - bytes;
294 i->nr_segs -= iov - i->iov;
295 i->iov = iov;
296 i->iov_offset = skip;
297 return wanted - bytes;
301 * Fault in the first iovec of the given iov_iter, to a maximum length
302 * of bytes. Returns 0 on success, or non-zero if the memory could not be
303 * accessed (ie. because it is an invalid address).
305 * writev-intensive code may want this to prefault several iovecs -- that
306 * would be possible (callers must not rely on the fact that _only_ the
307 * first iovec will be faulted with the current implementation).
309 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
311 if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
312 char __user *buf = i->iov->iov_base + i->iov_offset;
313 bytes = min(bytes, i->iov->iov_len - i->iov_offset);
314 return fault_in_pages_readable(buf, bytes);
316 return 0;
318 EXPORT_SYMBOL(iov_iter_fault_in_readable);
320 void iov_iter_init(struct iov_iter *i, int direction,
321 const struct iovec *iov, unsigned long nr_segs,
322 size_t count)
324 /* It will get better. Eventually... */
325 if (segment_eq(get_fs(), KERNEL_DS)) {
326 direction |= ITER_KVEC;
327 i->type = direction;
328 i->kvec = (struct kvec *)iov;
329 } else {
330 i->type = direction;
331 i->iov = iov;
333 i->nr_segs = nr_segs;
334 i->iov_offset = 0;
335 i->count = count;
337 EXPORT_SYMBOL(iov_iter_init);
339 static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
341 char *from = kmap_atomic(page);
342 memcpy(to, from + offset, len);
343 kunmap_atomic(from);
346 static void memcpy_to_page(struct page *page, size_t offset, char *from, size_t len)
348 char *to = kmap_atomic(page);
349 memcpy(to + offset, from, len);
350 kunmap_atomic(to);
353 static void memzero_page(struct page *page, size_t offset, size_t len)
355 char *addr = kmap_atomic(page);
356 memset(addr + offset, 0, len);
357 kunmap_atomic(addr);
360 size_t copy_to_iter(void *addr, size_t bytes, struct iov_iter *i)
362 char *from = addr;
363 if (unlikely(bytes > i->count))
364 bytes = i->count;
366 if (unlikely(!bytes))
367 return 0;
369 iterate_and_advance(i, bytes, v,
370 __copy_to_user(v.iov_base, (from += v.iov_len) - v.iov_len,
371 v.iov_len),
372 memcpy_to_page(v.bv_page, v.bv_offset,
373 (from += v.bv_len) - v.bv_len, v.bv_len),
374 memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
377 return bytes;
379 EXPORT_SYMBOL(copy_to_iter);
381 size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
383 char *to = addr;
384 if (unlikely(bytes > i->count))
385 bytes = i->count;
387 if (unlikely(!bytes))
388 return 0;
390 iterate_and_advance(i, bytes, v,
391 __copy_from_user((to += v.iov_len) - v.iov_len, v.iov_base,
392 v.iov_len),
393 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
394 v.bv_offset, v.bv_len),
395 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
398 return bytes;
400 EXPORT_SYMBOL(copy_from_iter);
402 size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
404 char *to = addr;
405 if (unlikely(bytes > i->count))
406 bytes = i->count;
408 if (unlikely(!bytes))
409 return 0;
411 iterate_and_advance(i, bytes, v,
412 __copy_from_user_nocache((to += v.iov_len) - v.iov_len,
413 v.iov_base, v.iov_len),
414 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
415 v.bv_offset, v.bv_len),
416 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
419 return bytes;
421 EXPORT_SYMBOL(copy_from_iter_nocache);
423 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
424 struct iov_iter *i)
426 if (i->type & (ITER_BVEC|ITER_KVEC)) {
427 void *kaddr = kmap_atomic(page);
428 size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
429 kunmap_atomic(kaddr);
430 return wanted;
431 } else
432 return copy_page_to_iter_iovec(page, offset, bytes, i);
434 EXPORT_SYMBOL(copy_page_to_iter);
436 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
437 struct iov_iter *i)
439 if (i->type & (ITER_BVEC|ITER_KVEC)) {
440 void *kaddr = kmap_atomic(page);
441 size_t wanted = copy_from_iter(kaddr + offset, bytes, i);
442 kunmap_atomic(kaddr);
443 return wanted;
444 } else
445 return copy_page_from_iter_iovec(page, offset, bytes, i);
447 EXPORT_SYMBOL(copy_page_from_iter);
449 size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
451 if (unlikely(bytes > i->count))
452 bytes = i->count;
454 if (unlikely(!bytes))
455 return 0;
457 iterate_and_advance(i, bytes, v,
458 __clear_user(v.iov_base, v.iov_len),
459 memzero_page(v.bv_page, v.bv_offset, v.bv_len),
460 memset(v.iov_base, 0, v.iov_len)
463 return bytes;
465 EXPORT_SYMBOL(iov_iter_zero);
467 size_t iov_iter_copy_from_user_atomic(struct page *page,
468 struct iov_iter *i, unsigned long offset, size_t bytes)
470 char *kaddr = kmap_atomic(page), *p = kaddr + offset;
471 iterate_all_kinds(i, bytes, v,
472 __copy_from_user_inatomic((p += v.iov_len) - v.iov_len,
473 v.iov_base, v.iov_len),
474 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
475 v.bv_offset, v.bv_len),
476 memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
478 kunmap_atomic(kaddr);
479 return bytes;
481 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
483 void iov_iter_advance(struct iov_iter *i, size_t size)
485 iterate_and_advance(i, size, v, 0, 0, 0)
487 EXPORT_SYMBOL(iov_iter_advance);
490 * Return the count of just the current iov_iter segment.
492 size_t iov_iter_single_seg_count(const struct iov_iter *i)
494 if (i->nr_segs == 1)
495 return i->count;
496 else if (i->type & ITER_BVEC)
497 return min(i->count, i->bvec->bv_len - i->iov_offset);
498 else
499 return min(i->count, i->iov->iov_len - i->iov_offset);
501 EXPORT_SYMBOL(iov_iter_single_seg_count);
503 void iov_iter_kvec(struct iov_iter *i, int direction,
504 const struct kvec *iov, unsigned long nr_segs,
505 size_t count)
507 BUG_ON(!(direction & ITER_KVEC));
508 i->type = direction;
509 i->kvec = (struct kvec *)iov;
510 i->nr_segs = nr_segs;
511 i->iov_offset = 0;
512 i->count = count;
514 EXPORT_SYMBOL(iov_iter_kvec);
516 unsigned long iov_iter_alignment(const struct iov_iter *i)
518 unsigned long res = 0;
519 size_t size = i->count;
521 if (!size)
522 return 0;
524 iterate_all_kinds(i, size, v,
525 (res |= (unsigned long)v.iov_base | v.iov_len, 0),
526 res |= v.bv_offset | v.bv_len,
527 res |= (unsigned long)v.iov_base | v.iov_len
529 return res;
531 EXPORT_SYMBOL(iov_iter_alignment);
533 ssize_t iov_iter_get_pages(struct iov_iter *i,
534 struct page **pages, size_t maxsize, unsigned maxpages,
535 size_t *start)
537 if (maxsize > i->count)
538 maxsize = i->count;
540 if (!maxsize)
541 return 0;
543 iterate_all_kinds(i, maxsize, v, ({
544 unsigned long addr = (unsigned long)v.iov_base;
545 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
546 int n;
547 int res;
549 if (len > maxpages * PAGE_SIZE)
550 len = maxpages * PAGE_SIZE;
551 addr &= ~(PAGE_SIZE - 1);
552 n = DIV_ROUND_UP(len, PAGE_SIZE);
553 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
554 if (unlikely(res < 0))
555 return res;
556 return (res == n ? len : res * PAGE_SIZE) - *start;
557 0;}),({
558 /* can't be more than PAGE_SIZE */
559 *start = v.bv_offset;
560 get_page(*pages = v.bv_page);
561 return v.bv_len;
562 }),({
563 return -EFAULT;
566 return 0;
568 EXPORT_SYMBOL(iov_iter_get_pages);
570 static struct page **get_pages_array(size_t n)
572 struct page **p = kmalloc(n * sizeof(struct page *), GFP_KERNEL);
573 if (!p)
574 p = vmalloc(n * sizeof(struct page *));
575 return p;
578 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
579 struct page ***pages, size_t maxsize,
580 size_t *start)
582 struct page **p;
584 if (maxsize > i->count)
585 maxsize = i->count;
587 if (!maxsize)
588 return 0;
590 iterate_all_kinds(i, maxsize, v, ({
591 unsigned long addr = (unsigned long)v.iov_base;
592 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
593 int n;
594 int res;
596 addr &= ~(PAGE_SIZE - 1);
597 n = DIV_ROUND_UP(len, PAGE_SIZE);
598 p = get_pages_array(n);
599 if (!p)
600 return -ENOMEM;
601 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
602 if (unlikely(res < 0)) {
603 kvfree(p);
604 return res;
606 *pages = p;
607 return (res == n ? len : res * PAGE_SIZE) - *start;
608 0;}),({
609 /* can't be more than PAGE_SIZE */
610 *start = v.bv_offset;
611 *pages = p = get_pages_array(1);
612 if (!p)
613 return -ENOMEM;
614 get_page(*p = v.bv_page);
615 return v.bv_len;
616 }),({
617 return -EFAULT;
620 return 0;
622 EXPORT_SYMBOL(iov_iter_get_pages_alloc);
624 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
625 struct iov_iter *i)
627 char *to = addr;
628 __wsum sum, next;
629 size_t off = 0;
630 if (unlikely(bytes > i->count))
631 bytes = i->count;
633 if (unlikely(!bytes))
634 return 0;
636 sum = *csum;
637 iterate_and_advance(i, bytes, v, ({
638 int err = 0;
639 next = csum_and_copy_from_user(v.iov_base,
640 (to += v.iov_len) - v.iov_len,
641 v.iov_len, 0, &err);
642 if (!err) {
643 sum = csum_block_add(sum, next, off);
644 off += v.iov_len;
646 err ? v.iov_len : 0;
647 }), ({
648 char *p = kmap_atomic(v.bv_page);
649 next = csum_partial_copy_nocheck(p + v.bv_offset,
650 (to += v.bv_len) - v.bv_len,
651 v.bv_len, 0);
652 kunmap_atomic(p);
653 sum = csum_block_add(sum, next, off);
654 off += v.bv_len;
655 }),({
656 next = csum_partial_copy_nocheck(v.iov_base,
657 (to += v.iov_len) - v.iov_len,
658 v.iov_len, 0);
659 sum = csum_block_add(sum, next, off);
660 off += v.iov_len;
663 *csum = sum;
664 return bytes;
666 EXPORT_SYMBOL(csum_and_copy_from_iter);
668 size_t csum_and_copy_to_iter(void *addr, size_t bytes, __wsum *csum,
669 struct iov_iter *i)
671 char *from = addr;
672 __wsum sum, next;
673 size_t off = 0;
674 if (unlikely(bytes > i->count))
675 bytes = i->count;
677 if (unlikely(!bytes))
678 return 0;
680 sum = *csum;
681 iterate_and_advance(i, bytes, v, ({
682 int err = 0;
683 next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
684 v.iov_base,
685 v.iov_len, 0, &err);
686 if (!err) {
687 sum = csum_block_add(sum, next, off);
688 off += v.iov_len;
690 err ? v.iov_len : 0;
691 }), ({
692 char *p = kmap_atomic(v.bv_page);
693 next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len,
694 p + v.bv_offset,
695 v.bv_len, 0);
696 kunmap_atomic(p);
697 sum = csum_block_add(sum, next, off);
698 off += v.bv_len;
699 }),({
700 next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len,
701 v.iov_base,
702 v.iov_len, 0);
703 sum = csum_block_add(sum, next, off);
704 off += v.iov_len;
707 *csum = sum;
708 return bytes;
710 EXPORT_SYMBOL(csum_and_copy_to_iter);
712 int iov_iter_npages(const struct iov_iter *i, int maxpages)
714 size_t size = i->count;
715 int npages = 0;
717 if (!size)
718 return 0;
720 iterate_all_kinds(i, size, v, ({
721 unsigned long p = (unsigned long)v.iov_base;
722 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
723 - p / PAGE_SIZE;
724 if (npages >= maxpages)
725 return maxpages;
726 0;}),({
727 npages++;
728 if (npages >= maxpages)
729 return maxpages;
730 }),({
731 unsigned long p = (unsigned long)v.iov_base;
732 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
733 - p / PAGE_SIZE;
734 if (npages >= maxpages)
735 return maxpages;
738 return npages;
740 EXPORT_SYMBOL(iov_iter_npages);