1 // SPDX-License-Identifier: GPL-2.0
3 * Functions related to mapping data to requests
5 #include <linux/kernel.h>
6 #include <linux/sched/task_stack.h>
7 #include <linux/module.h>
9 #include <linux/blkdev.h>
10 #include <linux/uio.h>
15 bool is_our_pages
: 1;
16 bool is_null_mapped
: 1;
21 static struct bio_map_data
*bio_alloc_map_data(struct iov_iter
*data
,
24 struct bio_map_data
*bmd
;
26 if (data
->nr_segs
> UIO_MAXIOV
)
29 bmd
= kmalloc(struct_size(bmd
, iov
, data
->nr_segs
), gfp_mask
);
33 if (iter_is_iovec(data
)) {
34 memcpy(bmd
->iov
, iter_iov(data
), sizeof(struct iovec
) * data
->nr_segs
);
35 bmd
->iter
.__iov
= bmd
->iov
;
41 * bio_copy_from_iter - copy all pages from iov_iter to bio
42 * @bio: The &struct bio which describes the I/O as destination
43 * @iter: iov_iter as source
45 * Copy all pages from iov_iter to bio.
46 * Returns 0 on success, or error on failure.
48 static int bio_copy_from_iter(struct bio
*bio
, struct iov_iter
*iter
)
51 struct bvec_iter_all iter_all
;
53 bio_for_each_segment_all(bvec
, bio
, iter_all
) {
56 ret
= copy_page_from_iter(bvec
->bv_page
,
61 if (!iov_iter_count(iter
))
64 if (ret
< bvec
->bv_len
)
72 * bio_copy_to_iter - copy all pages from bio to iov_iter
73 * @bio: The &struct bio which describes the I/O as source
74 * @iter: iov_iter as destination
76 * Copy all pages from bio to iov_iter.
77 * Returns 0 on success, or error on failure.
79 static int bio_copy_to_iter(struct bio
*bio
, struct iov_iter iter
)
82 struct bvec_iter_all iter_all
;
84 bio_for_each_segment_all(bvec
, bio
, iter_all
) {
87 ret
= copy_page_to_iter(bvec
->bv_page
,
92 if (!iov_iter_count(&iter
))
95 if (ret
< bvec
->bv_len
)
103 * bio_uncopy_user - finish previously mapped bio
104 * @bio: bio being terminated
106 * Free pages allocated from bio_copy_user_iov() and write back data
107 * to user space in case of a read.
109 static int bio_uncopy_user(struct bio
*bio
)
111 struct bio_map_data
*bmd
= bio
->bi_private
;
114 if (!bmd
->is_null_mapped
) {
116 * if we're in a workqueue, the request is orphaned, so
117 * don't copy into a random user address space, just free
118 * and return -EINTR so user space doesn't expect any data.
122 else if (bio_data_dir(bio
) == READ
)
123 ret
= bio_copy_to_iter(bio
, bmd
->iter
);
124 if (bmd
->is_our_pages
)
131 static int bio_copy_user_iov(struct request
*rq
, struct rq_map_data
*map_data
,
132 struct iov_iter
*iter
, gfp_t gfp_mask
)
134 struct bio_map_data
*bmd
;
139 unsigned int len
= iter
->count
;
140 unsigned int offset
= map_data
? offset_in_page(map_data
->offset
) : 0;
142 bmd
= bio_alloc_map_data(iter
, gfp_mask
);
147 * We need to do a deep copy of the iov_iter including the iovecs.
148 * The caller provided iov might point to an on-stack or otherwise
151 bmd
->is_our_pages
= !map_data
;
152 bmd
->is_null_mapped
= (map_data
&& map_data
->null_mapped
);
154 nr_pages
= bio_max_segs(DIV_ROUND_UP(offset
+ len
, PAGE_SIZE
));
157 bio
= bio_kmalloc(nr_pages
, gfp_mask
);
160 bio_init(bio
, NULL
, bio
->bi_inline_vecs
, nr_pages
, req_op(rq
));
163 nr_pages
= 1U << map_data
->page_order
;
164 i
= map_data
->offset
/ PAGE_SIZE
;
167 unsigned int bytes
= PAGE_SIZE
;
175 if (i
== map_data
->nr_entries
* nr_pages
) {
180 page
= map_data
->pages
[i
/ nr_pages
];
181 page
+= (i
% nr_pages
);
185 page
= alloc_page(GFP_NOIO
| gfp_mask
);
192 if (bio_add_pc_page(rq
->q
, bio
, page
, bytes
, offset
) < bytes
) {
203 map_data
->offset
+= bio
->bi_iter
.bi_size
;
208 if (iov_iter_rw(iter
) == WRITE
&&
209 (!map_data
|| !map_data
->null_mapped
)) {
210 ret
= bio_copy_from_iter(bio
, iter
);
213 } else if (map_data
&& map_data
->from_user
) {
214 struct iov_iter iter2
= *iter
;
216 /* This is the copy-in part of SG_DXFER_TO_FROM_DEV. */
217 iter2
.data_source
= ITER_SOURCE
;
218 ret
= bio_copy_from_iter(bio
, &iter2
);
222 if (bmd
->is_our_pages
)
224 iov_iter_advance(iter
, bio
->bi_iter
.bi_size
);
227 bio
->bi_private
= bmd
;
229 ret
= blk_rq_append_bio(rq
, bio
);
243 static void blk_mq_map_bio_put(struct bio
*bio
)
245 if (bio
->bi_opf
& REQ_ALLOC_CACHE
) {
253 static struct bio
*blk_rq_map_bio_alloc(struct request
*rq
,
254 unsigned int nr_vecs
, gfp_t gfp_mask
)
258 if (rq
->cmd_flags
& REQ_ALLOC_CACHE
&& (nr_vecs
<= BIO_INLINE_VECS
)) {
259 bio
= bio_alloc_bioset(NULL
, nr_vecs
, rq
->cmd_flags
, gfp_mask
,
264 bio
= bio_kmalloc(nr_vecs
, gfp_mask
);
267 bio_init(bio
, NULL
, bio
->bi_inline_vecs
, nr_vecs
, req_op(rq
));
272 static int bio_map_user_iov(struct request
*rq
, struct iov_iter
*iter
,
275 iov_iter_extraction_t extraction_flags
= 0;
276 unsigned int max_sectors
= queue_max_hw_sectors(rq
->q
);
277 unsigned int nr_vecs
= iov_iter_npages(iter
, BIO_MAX_VECS
);
282 if (!iov_iter_count(iter
))
285 bio
= blk_rq_map_bio_alloc(rq
, nr_vecs
, gfp_mask
);
289 if (blk_queue_pci_p2pdma(rq
->q
))
290 extraction_flags
|= ITER_ALLOW_P2PDMA
;
291 if (iov_iter_extract_will_pin(iter
))
292 bio_set_flag(bio
, BIO_PAGE_PINNED
);
294 while (iov_iter_count(iter
)) {
295 struct page
*stack_pages
[UIO_FASTIOV
];
296 struct page
**pages
= stack_pages
;
301 if (nr_vecs
> ARRAY_SIZE(stack_pages
))
304 bytes
= iov_iter_extract_pages(iter
, &pages
, LONG_MAX
,
305 nr_vecs
, extraction_flags
, &offs
);
306 if (unlikely(bytes
<= 0)) {
307 ret
= bytes
? bytes
: -EFAULT
;
311 npages
= DIV_ROUND_UP(offs
+ bytes
, PAGE_SIZE
);
313 if (unlikely(offs
& queue_dma_alignment(rq
->q
)))
316 for (j
= 0; j
< npages
; j
++) {
317 struct page
*page
= pages
[j
];
318 unsigned int n
= PAGE_SIZE
- offs
;
319 bool same_page
= false;
324 if (!bio_add_hw_page(rq
->q
, bio
, page
, n
, offs
,
325 max_sectors
, &same_page
))
329 bio_release_page(bio
, page
);
335 * release the pages we didn't map into the bio, if any
338 bio_release_page(bio
, pages
[j
++]);
339 if (pages
!= stack_pages
)
341 /* couldn't stuff something into bio? */
343 iov_iter_revert(iter
, bytes
);
348 ret
= blk_rq_append_bio(rq
, bio
);
354 bio_release_pages(bio
, false);
355 blk_mq_map_bio_put(bio
);
359 static void bio_invalidate_vmalloc_pages(struct bio
*bio
)
361 #ifdef ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE
362 if (bio
->bi_private
&& !op_is_write(bio_op(bio
))) {
363 unsigned long i
, len
= 0;
365 for (i
= 0; i
< bio
->bi_vcnt
; i
++)
366 len
+= bio
->bi_io_vec
[i
].bv_len
;
367 invalidate_kernel_vmap_range(bio
->bi_private
, len
);
372 static void bio_map_kern_endio(struct bio
*bio
)
374 bio_invalidate_vmalloc_pages(bio
);
380 * bio_map_kern - map kernel address into bio
381 * @q: the struct request_queue for the bio
382 * @data: pointer to buffer to map
383 * @len: length in bytes
384 * @gfp_mask: allocation flags for bio allocation
386 * Map the kernel address into a bio suitable for io to a block
387 * device. Returns an error pointer in case of error.
389 static struct bio
*bio_map_kern(struct request_queue
*q
, void *data
,
390 unsigned int len
, gfp_t gfp_mask
)
392 unsigned long kaddr
= (unsigned long)data
;
393 unsigned long end
= (kaddr
+ len
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
394 unsigned long start
= kaddr
>> PAGE_SHIFT
;
395 const int nr_pages
= end
- start
;
396 bool is_vmalloc
= is_vmalloc_addr(data
);
401 bio
= bio_kmalloc(nr_pages
, gfp_mask
);
403 return ERR_PTR(-ENOMEM
);
404 bio_init(bio
, NULL
, bio
->bi_inline_vecs
, nr_pages
, 0);
407 flush_kernel_vmap_range(data
, len
);
408 bio
->bi_private
= data
;
411 offset
= offset_in_page(kaddr
);
412 for (i
= 0; i
< nr_pages
; i
++) {
413 unsigned int bytes
= PAGE_SIZE
- offset
;
422 page
= virt_to_page(data
);
424 page
= vmalloc_to_page(data
);
425 if (bio_add_pc_page(q
, bio
, page
, bytes
,
427 /* we don't support partial mappings */
430 return ERR_PTR(-EINVAL
);
438 bio
->bi_end_io
= bio_map_kern_endio
;
442 static void bio_copy_kern_endio(struct bio
*bio
)
449 static void bio_copy_kern_endio_read(struct bio
*bio
)
451 char *p
= bio
->bi_private
;
452 struct bio_vec
*bvec
;
453 struct bvec_iter_all iter_all
;
455 bio_for_each_segment_all(bvec
, bio
, iter_all
) {
456 memcpy_from_bvec(p
, bvec
);
460 bio_copy_kern_endio(bio
);
464 * bio_copy_kern - copy kernel address into bio
465 * @q: the struct request_queue for the bio
466 * @data: pointer to buffer to copy
467 * @len: length in bytes
468 * @gfp_mask: allocation flags for bio and page allocation
469 * @reading: data direction is READ
471 * copy the kernel address into a bio suitable for io to a block
472 * device. Returns an error pointer in case of error.
474 static struct bio
*bio_copy_kern(struct request_queue
*q
, void *data
,
475 unsigned int len
, gfp_t gfp_mask
, int reading
)
477 unsigned long kaddr
= (unsigned long)data
;
478 unsigned long end
= (kaddr
+ len
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
479 unsigned long start
= kaddr
>> PAGE_SHIFT
;
488 return ERR_PTR(-EINVAL
);
490 nr_pages
= end
- start
;
491 bio
= bio_kmalloc(nr_pages
, gfp_mask
);
493 return ERR_PTR(-ENOMEM
);
494 bio_init(bio
, NULL
, bio
->bi_inline_vecs
, nr_pages
, 0);
498 unsigned int bytes
= PAGE_SIZE
;
503 page
= alloc_page(GFP_NOIO
| __GFP_ZERO
| gfp_mask
);
508 memcpy(page_address(page
), p
, bytes
);
510 if (bio_add_pc_page(q
, bio
, page
, bytes
, 0) < bytes
)
518 bio
->bi_end_io
= bio_copy_kern_endio_read
;
519 bio
->bi_private
= data
;
521 bio
->bi_end_io
= bio_copy_kern_endio
;
530 return ERR_PTR(-ENOMEM
);
534 * Append a bio to a passthrough request. Only works if the bio can be merged
535 * into the request based on the driver constraints.
537 int blk_rq_append_bio(struct request
*rq
, struct bio
*bio
)
539 struct bvec_iter iter
;
541 unsigned int nr_segs
= 0;
543 bio_for_each_bvec(bv
, bio
, iter
)
547 blk_rq_bio_prep(rq
, bio
, nr_segs
);
549 if (!ll_back_merge_fn(rq
, bio
, nr_segs
))
551 rq
->biotail
->bi_next
= bio
;
553 rq
->__data_len
+= (bio
)->bi_iter
.bi_size
;
554 bio_crypt_free_ctx(bio
);
559 EXPORT_SYMBOL(blk_rq_append_bio
);
561 /* Prepare bio for passthrough IO given ITER_BVEC iter */
562 static int blk_rq_map_user_bvec(struct request
*rq
, const struct iov_iter
*iter
)
564 struct request_queue
*q
= rq
->q
;
565 size_t nr_iter
= iov_iter_count(iter
);
566 size_t nr_segs
= iter
->nr_segs
;
567 struct bio_vec
*bvecs
, *bvprvp
= NULL
;
568 const struct queue_limits
*lim
= &q
->limits
;
569 unsigned int nsegs
= 0, bytes
= 0;
573 if (!nr_iter
|| (nr_iter
>> SECTOR_SHIFT
) > queue_max_hw_sectors(q
))
575 if (nr_segs
> queue_max_segments(q
))
578 /* no iovecs to alloc, as we already have a BVEC iterator */
579 bio
= blk_rq_map_bio_alloc(rq
, 0, GFP_KERNEL
);
583 bio_iov_bvec_set(bio
, (struct iov_iter
*)iter
);
584 blk_rq_bio_prep(rq
, bio
, nr_segs
);
586 /* loop to perform a bunch of sanity checks */
587 bvecs
= (struct bio_vec
*)iter
->bvec
;
588 for (i
= 0; i
< nr_segs
; i
++) {
589 struct bio_vec
*bv
= &bvecs
[i
];
592 * If the queue doesn't support SG gaps and adding this
593 * offset would create a gap, fallback to copy.
595 if (bvprvp
&& bvec_gap_to_prev(lim
, bvprvp
, bv
->bv_offset
)) {
596 blk_mq_map_bio_put(bio
);
599 /* check full condition */
600 if (nsegs
>= nr_segs
|| bytes
> UINT_MAX
- bv
->bv_len
)
602 if (bytes
+ bv
->bv_len
> nr_iter
)
604 if (bv
->bv_offset
+ bv
->bv_len
> PAGE_SIZE
)
613 blk_mq_map_bio_put(bio
);
618 * blk_rq_map_user_iov - map user data to a request, for passthrough requests
619 * @q: request queue where request should be inserted
620 * @rq: request to map data to
621 * @map_data: pointer to the rq_map_data holding pages (if necessary)
622 * @iter: iovec iterator
623 * @gfp_mask: memory allocation flags
626 * Data will be mapped directly for zero copy I/O, if possible. Otherwise
627 * a kernel bounce buffer is used.
629 * A matching blk_rq_unmap_user() must be issued at the end of I/O, while
630 * still in process context.
632 int blk_rq_map_user_iov(struct request_queue
*q
, struct request
*rq
,
633 struct rq_map_data
*map_data
,
634 const struct iov_iter
*iter
, gfp_t gfp_mask
)
636 bool copy
= false, map_bvec
= false;
637 unsigned long align
= blk_lim_dma_alignment_and_pad(&q
->limits
);
638 struct bio
*bio
= NULL
;
644 else if (blk_queue_may_bounce(q
))
646 else if (iov_iter_alignment(iter
) & align
)
648 else if (iov_iter_is_bvec(iter
))
650 else if (!user_backed_iter(iter
))
652 else if (queue_virt_boundary(q
))
653 copy
= queue_virt_boundary(q
) & iov_iter_gap_alignment(iter
);
656 ret
= blk_rq_map_user_bvec(rq
, iter
);
659 if (ret
!= -EREMOTEIO
)
661 /* fall back to copying the data on limits mismatches */
668 ret
= bio_copy_user_iov(rq
, map_data
, &i
, gfp_mask
);
670 ret
= bio_map_user_iov(rq
, &i
, gfp_mask
);
675 } while (iov_iter_count(&i
));
680 blk_rq_unmap_user(bio
);
685 EXPORT_SYMBOL(blk_rq_map_user_iov
);
687 int blk_rq_map_user(struct request_queue
*q
, struct request
*rq
,
688 struct rq_map_data
*map_data
, void __user
*ubuf
,
689 unsigned long len
, gfp_t gfp_mask
)
692 int ret
= import_ubuf(rq_data_dir(rq
), ubuf
, len
, &i
);
694 if (unlikely(ret
< 0))
697 return blk_rq_map_user_iov(q
, rq
, map_data
, &i
, gfp_mask
);
699 EXPORT_SYMBOL(blk_rq_map_user
);
701 int blk_rq_map_user_io(struct request
*req
, struct rq_map_data
*map_data
,
702 void __user
*ubuf
, unsigned long buf_len
, gfp_t gfp_mask
,
703 bool vec
, int iov_count
, bool check_iter_count
, int rw
)
708 struct iovec fast_iov
[UIO_FASTIOV
];
709 struct iovec
*iov
= fast_iov
;
710 struct iov_iter iter
;
712 ret
= import_iovec(rw
, ubuf
, iov_count
? iov_count
: buf_len
,
713 UIO_FASTIOV
, &iov
, &iter
);
718 /* SG_IO howto says that the shorter of the two wins */
719 iov_iter_truncate(&iter
, buf_len
);
720 if (check_iter_count
&& !iov_iter_count(&iter
)) {
726 ret
= blk_rq_map_user_iov(req
->q
, req
, map_data
, &iter
,
729 } else if (buf_len
) {
730 ret
= blk_rq_map_user(req
->q
, req
, map_data
, ubuf
, buf_len
,
735 EXPORT_SYMBOL(blk_rq_map_user_io
);
738 * blk_rq_unmap_user - unmap a request with user data
739 * @bio: start of bio list
742 * Unmap a rq previously mapped by blk_rq_map_user(). The caller must
743 * supply the original rq->bio from the blk_rq_map_user() return, since
744 * the I/O completion may have changed rq->bio.
746 int blk_rq_unmap_user(struct bio
*bio
)
748 struct bio
*next_bio
;
752 if (bio
->bi_private
) {
753 ret2
= bio_uncopy_user(bio
);
757 bio_release_pages(bio
, bio_data_dir(bio
) == READ
);
760 if (bio_integrity(bio
))
761 bio_integrity_unmap_user(bio
);
765 blk_mq_map_bio_put(next_bio
);
770 EXPORT_SYMBOL(blk_rq_unmap_user
);
773 * blk_rq_map_kern - map kernel data to a request, for passthrough requests
774 * @q: request queue where request should be inserted
775 * @rq: request to fill
776 * @kbuf: the kernel buffer
777 * @len: length of user data
778 * @gfp_mask: memory allocation flags
781 * Data will be mapped directly if possible. Otherwise a bounce
782 * buffer is used. Can be called multiple times to append multiple
785 int blk_rq_map_kern(struct request_queue
*q
, struct request
*rq
, void *kbuf
,
786 unsigned int len
, gfp_t gfp_mask
)
788 int reading
= rq_data_dir(rq
) == READ
;
789 unsigned long addr
= (unsigned long) kbuf
;
793 if (len
> (queue_max_hw_sectors(q
) << 9))
798 if (!blk_rq_aligned(q
, addr
, len
) || object_is_on_stack(kbuf
) ||
799 blk_queue_may_bounce(q
))
800 bio
= bio_copy_kern(q
, kbuf
, len
, gfp_mask
, reading
);
802 bio
= bio_map_kern(q
, kbuf
, len
, gfp_mask
);
807 bio
->bi_opf
&= ~REQ_OP_MASK
;
808 bio
->bi_opf
|= req_op(rq
);
810 ret
= blk_rq_append_bio(rq
, bio
);
817 EXPORT_SYMBOL(blk_rq_map_kern
);