1 // SPDX-License-Identifier: GPL-2.0
3 * Functions related to mapping data to requests
5 #include <linux/kernel.h>
6 #include <linux/sched/task_stack.h>
7 #include <linux/module.h>
9 #include <linux/blkdev.h>
10 #include <linux/uio.h>
15 bool is_our_pages
: 1;
16 bool is_null_mapped
: 1;
21 static struct bio_map_data
*bio_alloc_map_data(struct iov_iter
*data
,
24 struct bio_map_data
*bmd
;
26 if (data
->nr_segs
> UIO_MAXIOV
)
29 bmd
= kmalloc(struct_size(bmd
, iov
, data
->nr_segs
), gfp_mask
);
33 if (iter_is_iovec(data
)) {
34 memcpy(bmd
->iov
, iter_iov(data
), sizeof(struct iovec
) * data
->nr_segs
);
35 bmd
->iter
.__iov
= bmd
->iov
;
41 * bio_copy_from_iter - copy all pages from iov_iter to bio
42 * @bio: The &struct bio which describes the I/O as destination
43 * @iter: iov_iter as source
45 * Copy all pages from iov_iter to bio.
46 * Returns 0 on success, or error on failure.
48 static int bio_copy_from_iter(struct bio
*bio
, struct iov_iter
*iter
)
51 struct bvec_iter_all iter_all
;
53 bio_for_each_segment_all(bvec
, bio
, iter_all
) {
56 ret
= copy_page_from_iter(bvec
->bv_page
,
61 if (!iov_iter_count(iter
))
64 if (ret
< bvec
->bv_len
)
72 * bio_copy_to_iter - copy all pages from bio to iov_iter
73 * @bio: The &struct bio which describes the I/O as source
74 * @iter: iov_iter as destination
76 * Copy all pages from bio to iov_iter.
77 * Returns 0 on success, or error on failure.
79 static int bio_copy_to_iter(struct bio
*bio
, struct iov_iter iter
)
82 struct bvec_iter_all iter_all
;
84 bio_for_each_segment_all(bvec
, bio
, iter_all
) {
87 ret
= copy_page_to_iter(bvec
->bv_page
,
92 if (!iov_iter_count(&iter
))
95 if (ret
< bvec
->bv_len
)
103 * bio_uncopy_user - finish previously mapped bio
104 * @bio: bio being terminated
106 * Free pages allocated from bio_copy_user_iov() and write back data
107 * to user space in case of a read.
109 static int bio_uncopy_user(struct bio
*bio
)
111 struct bio_map_data
*bmd
= bio
->bi_private
;
114 if (!bmd
->is_null_mapped
) {
116 * if we're in a workqueue, the request is orphaned, so
117 * don't copy into a random user address space, just free
118 * and return -EINTR so user space doesn't expect any data.
122 else if (bio_data_dir(bio
) == READ
)
123 ret
= bio_copy_to_iter(bio
, bmd
->iter
);
124 if (bmd
->is_our_pages
)
131 static int bio_copy_user_iov(struct request
*rq
, struct rq_map_data
*map_data
,
132 struct iov_iter
*iter
, gfp_t gfp_mask
)
134 struct bio_map_data
*bmd
;
139 unsigned int len
= iter
->count
;
140 unsigned int offset
= map_data
? offset_in_page(map_data
->offset
) : 0;
142 bmd
= bio_alloc_map_data(iter
, gfp_mask
);
147 * We need to do a deep copy of the iov_iter including the iovecs.
148 * The caller provided iov might point to an on-stack or otherwise
151 bmd
->is_our_pages
= !map_data
;
152 bmd
->is_null_mapped
= (map_data
&& map_data
->null_mapped
);
154 nr_pages
= bio_max_segs(DIV_ROUND_UP(offset
+ len
, PAGE_SIZE
));
157 bio
= bio_kmalloc(nr_pages
, gfp_mask
);
160 bio_init(bio
, NULL
, bio
->bi_inline_vecs
, nr_pages
, req_op(rq
));
163 nr_pages
= 1U << map_data
->page_order
;
164 i
= map_data
->offset
/ PAGE_SIZE
;
167 unsigned int bytes
= PAGE_SIZE
;
175 if (i
== map_data
->nr_entries
* nr_pages
) {
180 page
= map_data
->pages
[i
/ nr_pages
];
181 page
+= (i
% nr_pages
);
185 page
= alloc_page(GFP_NOIO
| gfp_mask
);
192 if (bio_add_pc_page(rq
->q
, bio
, page
, bytes
, offset
) < bytes
) {
203 map_data
->offset
+= bio
->bi_iter
.bi_size
;
208 if (iov_iter_rw(iter
) == WRITE
&&
209 (!map_data
|| !map_data
->null_mapped
)) {
210 ret
= bio_copy_from_iter(bio
, iter
);
213 } else if (map_data
&& map_data
->from_user
) {
214 struct iov_iter iter2
= *iter
;
216 /* This is the copy-in part of SG_DXFER_TO_FROM_DEV. */
217 iter2
.data_source
= ITER_SOURCE
;
218 ret
= bio_copy_from_iter(bio
, &iter2
);
222 if (bmd
->is_our_pages
)
224 iov_iter_advance(iter
, bio
->bi_iter
.bi_size
);
227 bio
->bi_private
= bmd
;
229 ret
= blk_rq_append_bio(rq
, bio
);
243 static void blk_mq_map_bio_put(struct bio
*bio
)
245 if (bio
->bi_opf
& REQ_ALLOC_CACHE
) {
253 static struct bio
*blk_rq_map_bio_alloc(struct request
*rq
,
254 unsigned int nr_vecs
, gfp_t gfp_mask
)
258 if (rq
->cmd_flags
& REQ_ALLOC_CACHE
&& (nr_vecs
<= BIO_INLINE_VECS
)) {
259 bio
= bio_alloc_bioset(NULL
, nr_vecs
, rq
->cmd_flags
, gfp_mask
,
264 bio
= bio_kmalloc(nr_vecs
, gfp_mask
);
267 bio_init(bio
, NULL
, bio
->bi_inline_vecs
, nr_vecs
, req_op(rq
));
272 static int bio_map_user_iov(struct request
*rq
, struct iov_iter
*iter
,
275 iov_iter_extraction_t extraction_flags
= 0;
276 unsigned int max_sectors
= queue_max_hw_sectors(rq
->q
);
277 unsigned int nr_vecs
= iov_iter_npages(iter
, BIO_MAX_VECS
);
282 if (!iov_iter_count(iter
))
285 bio
= blk_rq_map_bio_alloc(rq
, nr_vecs
, gfp_mask
);
289 if (blk_queue_pci_p2pdma(rq
->q
))
290 extraction_flags
|= ITER_ALLOW_P2PDMA
;
291 if (iov_iter_extract_will_pin(iter
))
292 bio_set_flag(bio
, BIO_PAGE_PINNED
);
294 while (iov_iter_count(iter
)) {
295 struct page
*stack_pages
[UIO_FASTIOV
];
296 struct page
**pages
= stack_pages
;
301 if (nr_vecs
> ARRAY_SIZE(stack_pages
))
304 bytes
= iov_iter_extract_pages(iter
, &pages
, LONG_MAX
,
305 nr_vecs
, extraction_flags
, &offs
);
306 if (unlikely(bytes
<= 0)) {
307 ret
= bytes
? bytes
: -EFAULT
;
311 npages
= DIV_ROUND_UP(offs
+ bytes
, PAGE_SIZE
);
313 if (unlikely(offs
& queue_dma_alignment(rq
->q
)))
316 for (j
= 0; j
< npages
; j
++) {
317 struct page
*page
= pages
[j
];
318 unsigned int n
= PAGE_SIZE
- offs
;
319 bool same_page
= false;
324 if (!bio_add_hw_page(rq
->q
, bio
, page
, n
, offs
,
325 max_sectors
, &same_page
))
329 bio_release_page(bio
, page
);
335 * release the pages we didn't map into the bio, if any
338 bio_release_page(bio
, pages
[j
++]);
339 if (pages
!= stack_pages
)
341 /* couldn't stuff something into bio? */
343 iov_iter_revert(iter
, bytes
);
348 ret
= blk_rq_append_bio(rq
, bio
);
354 bio_release_pages(bio
, false);
355 blk_mq_map_bio_put(bio
);
359 static void bio_invalidate_vmalloc_pages(struct bio
*bio
)
361 #ifdef ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE
362 if (bio
->bi_private
&& !op_is_write(bio_op(bio
))) {
363 unsigned long i
, len
= 0;
365 for (i
= 0; i
< bio
->bi_vcnt
; i
++)
366 len
+= bio
->bi_io_vec
[i
].bv_len
;
367 invalidate_kernel_vmap_range(bio
->bi_private
, len
);
372 static void bio_map_kern_endio(struct bio
*bio
)
374 bio_invalidate_vmalloc_pages(bio
);
380 * bio_map_kern - map kernel address into bio
381 * @q: the struct request_queue for the bio
382 * @data: pointer to buffer to map
383 * @len: length in bytes
384 * @gfp_mask: allocation flags for bio allocation
386 * Map the kernel address into a bio suitable for io to a block
387 * device. Returns an error pointer in case of error.
389 static struct bio
*bio_map_kern(struct request_queue
*q
, void *data
,
390 unsigned int len
, gfp_t gfp_mask
)
392 unsigned long kaddr
= (unsigned long)data
;
393 unsigned long end
= (kaddr
+ len
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
394 unsigned long start
= kaddr
>> PAGE_SHIFT
;
395 const int nr_pages
= end
- start
;
396 bool is_vmalloc
= is_vmalloc_addr(data
);
401 bio
= bio_kmalloc(nr_pages
, gfp_mask
);
403 return ERR_PTR(-ENOMEM
);
404 bio_init(bio
, NULL
, bio
->bi_inline_vecs
, nr_pages
, 0);
407 flush_kernel_vmap_range(data
, len
);
408 bio
->bi_private
= data
;
411 offset
= offset_in_page(kaddr
);
412 for (i
= 0; i
< nr_pages
; i
++) {
413 unsigned int bytes
= PAGE_SIZE
- offset
;
422 page
= virt_to_page(data
);
424 page
= vmalloc_to_page(data
);
425 if (bio_add_pc_page(q
, bio
, page
, bytes
,
427 /* we don't support partial mappings */
430 return ERR_PTR(-EINVAL
);
438 bio
->bi_end_io
= bio_map_kern_endio
;
442 static void bio_copy_kern_endio(struct bio
*bio
)
449 static void bio_copy_kern_endio_read(struct bio
*bio
)
451 char *p
= bio
->bi_private
;
452 struct bio_vec
*bvec
;
453 struct bvec_iter_all iter_all
;
455 bio_for_each_segment_all(bvec
, bio
, iter_all
) {
456 memcpy_from_bvec(p
, bvec
);
460 bio_copy_kern_endio(bio
);
464 * bio_copy_kern - copy kernel address into bio
465 * @q: the struct request_queue for the bio
466 * @data: pointer to buffer to copy
467 * @len: length in bytes
468 * @gfp_mask: allocation flags for bio and page allocation
469 * @reading: data direction is READ
471 * copy the kernel address into a bio suitable for io to a block
472 * device. Returns an error pointer in case of error.
474 static struct bio
*bio_copy_kern(struct request_queue
*q
, void *data
,
475 unsigned int len
, gfp_t gfp_mask
, int reading
)
477 unsigned long kaddr
= (unsigned long)data
;
478 unsigned long end
= (kaddr
+ len
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
479 unsigned long start
= kaddr
>> PAGE_SHIFT
;
488 return ERR_PTR(-EINVAL
);
490 nr_pages
= end
- start
;
491 bio
= bio_kmalloc(nr_pages
, gfp_mask
);
493 return ERR_PTR(-ENOMEM
);
494 bio_init(bio
, NULL
, bio
->bi_inline_vecs
, nr_pages
, 0);
498 unsigned int bytes
= PAGE_SIZE
;
503 page
= alloc_page(GFP_NOIO
| __GFP_ZERO
| gfp_mask
);
508 memcpy(page_address(page
), p
, bytes
);
510 if (bio_add_pc_page(q
, bio
, page
, bytes
, 0) < bytes
)
518 bio
->bi_end_io
= bio_copy_kern_endio_read
;
519 bio
->bi_private
= data
;
521 bio
->bi_end_io
= bio_copy_kern_endio
;
530 return ERR_PTR(-ENOMEM
);
534 * Append a bio to a passthrough request. Only works if the bio can be merged
535 * into the request based on the driver constraints.
537 int blk_rq_append_bio(struct request
*rq
, struct bio
*bio
)
539 struct bvec_iter iter
;
541 unsigned int nr_segs
= 0;
543 bio_for_each_bvec(bv
, bio
, iter
)
547 blk_rq_bio_prep(rq
, bio
, nr_segs
);
549 if (!ll_back_merge_fn(rq
, bio
, nr_segs
))
551 rq
->biotail
->bi_next
= bio
;
553 rq
->__data_len
+= (bio
)->bi_iter
.bi_size
;
554 bio_crypt_free_ctx(bio
);
559 EXPORT_SYMBOL(blk_rq_append_bio
);
561 /* Prepare bio for passthrough IO given ITER_BVEC iter */
562 static int blk_rq_map_user_bvec(struct request
*rq
, const struct iov_iter
*iter
)
564 const struct queue_limits
*lim
= &rq
->q
->limits
;
565 unsigned int max_bytes
= lim
->max_hw_sectors
<< SECTOR_SHIFT
;
570 if (!iov_iter_count(iter
) || iov_iter_count(iter
) > max_bytes
)
573 /* reuse the bvecs from the iterator instead of allocating new ones */
574 bio
= blk_rq_map_bio_alloc(rq
, 0, GFP_KERNEL
);
577 bio_iov_bvec_set(bio
, (struct iov_iter
*)iter
);
579 /* check that the data layout matches the hardware restrictions */
580 ret
= bio_split_rw_at(bio
, lim
, &nsegs
, max_bytes
);
582 /* if we would have to split the bio, copy instead */
585 blk_mq_map_bio_put(bio
);
589 blk_rq_bio_prep(rq
, bio
, nsegs
);
594 * blk_rq_map_user_iov - map user data to a request, for passthrough requests
595 * @q: request queue where request should be inserted
596 * @rq: request to map data to
597 * @map_data: pointer to the rq_map_data holding pages (if necessary)
598 * @iter: iovec iterator
599 * @gfp_mask: memory allocation flags
602 * Data will be mapped directly for zero copy I/O, if possible. Otherwise
603 * a kernel bounce buffer is used.
605 * A matching blk_rq_unmap_user() must be issued at the end of I/O, while
606 * still in process context.
608 int blk_rq_map_user_iov(struct request_queue
*q
, struct request
*rq
,
609 struct rq_map_data
*map_data
,
610 const struct iov_iter
*iter
, gfp_t gfp_mask
)
612 bool copy
= false, map_bvec
= false;
613 unsigned long align
= blk_lim_dma_alignment_and_pad(&q
->limits
);
614 struct bio
*bio
= NULL
;
620 else if (blk_queue_may_bounce(q
))
622 else if (iov_iter_alignment(iter
) & align
)
624 else if (iov_iter_is_bvec(iter
))
626 else if (!user_backed_iter(iter
))
628 else if (queue_virt_boundary(q
))
629 copy
= queue_virt_boundary(q
) & iov_iter_gap_alignment(iter
);
632 ret
= blk_rq_map_user_bvec(rq
, iter
);
635 if (ret
!= -EREMOTEIO
)
637 /* fall back to copying the data on limits mismatches */
644 ret
= bio_copy_user_iov(rq
, map_data
, &i
, gfp_mask
);
646 ret
= bio_map_user_iov(rq
, &i
, gfp_mask
);
651 } while (iov_iter_count(&i
));
656 blk_rq_unmap_user(bio
);
661 EXPORT_SYMBOL(blk_rq_map_user_iov
);
663 int blk_rq_map_user(struct request_queue
*q
, struct request
*rq
,
664 struct rq_map_data
*map_data
, void __user
*ubuf
,
665 unsigned long len
, gfp_t gfp_mask
)
668 int ret
= import_ubuf(rq_data_dir(rq
), ubuf
, len
, &i
);
670 if (unlikely(ret
< 0))
673 return blk_rq_map_user_iov(q
, rq
, map_data
, &i
, gfp_mask
);
675 EXPORT_SYMBOL(blk_rq_map_user
);
677 int blk_rq_map_user_io(struct request
*req
, struct rq_map_data
*map_data
,
678 void __user
*ubuf
, unsigned long buf_len
, gfp_t gfp_mask
,
679 bool vec
, int iov_count
, bool check_iter_count
, int rw
)
684 struct iovec fast_iov
[UIO_FASTIOV
];
685 struct iovec
*iov
= fast_iov
;
686 struct iov_iter iter
;
688 ret
= import_iovec(rw
, ubuf
, iov_count
? iov_count
: buf_len
,
689 UIO_FASTIOV
, &iov
, &iter
);
694 /* SG_IO howto says that the shorter of the two wins */
695 iov_iter_truncate(&iter
, buf_len
);
696 if (check_iter_count
&& !iov_iter_count(&iter
)) {
702 ret
= blk_rq_map_user_iov(req
->q
, req
, map_data
, &iter
,
705 } else if (buf_len
) {
706 ret
= blk_rq_map_user(req
->q
, req
, map_data
, ubuf
, buf_len
,
711 EXPORT_SYMBOL(blk_rq_map_user_io
);
714 * blk_rq_unmap_user - unmap a request with user data
715 * @bio: start of bio list
718 * Unmap a rq previously mapped by blk_rq_map_user(). The caller must
719 * supply the original rq->bio from the blk_rq_map_user() return, since
720 * the I/O completion may have changed rq->bio.
722 int blk_rq_unmap_user(struct bio
*bio
)
724 struct bio
*next_bio
;
728 if (bio
->bi_private
) {
729 ret2
= bio_uncopy_user(bio
);
733 bio_release_pages(bio
, bio_data_dir(bio
) == READ
);
736 if (bio_integrity(bio
))
737 bio_integrity_unmap_user(bio
);
741 blk_mq_map_bio_put(next_bio
);
746 EXPORT_SYMBOL(blk_rq_unmap_user
);
749 * blk_rq_map_kern - map kernel data to a request, for passthrough requests
750 * @q: request queue where request should be inserted
751 * @rq: request to fill
752 * @kbuf: the kernel buffer
753 * @len: length of user data
754 * @gfp_mask: memory allocation flags
757 * Data will be mapped directly if possible. Otherwise a bounce
758 * buffer is used. Can be called multiple times to append multiple
761 int blk_rq_map_kern(struct request_queue
*q
, struct request
*rq
, void *kbuf
,
762 unsigned int len
, gfp_t gfp_mask
)
764 int reading
= rq_data_dir(rq
) == READ
;
765 unsigned long addr
= (unsigned long) kbuf
;
769 if (len
> (queue_max_hw_sectors(q
) << 9))
774 if (!blk_rq_aligned(q
, addr
, len
) || object_is_on_stack(kbuf
) ||
775 blk_queue_may_bounce(q
))
776 bio
= bio_copy_kern(q
, kbuf
, len
, gfp_mask
, reading
);
778 bio
= bio_map_kern(q
, kbuf
, len
, gfp_mask
);
783 bio
->bi_opf
&= ~REQ_OP_MASK
;
784 bio
->bi_opf
|= req_op(rq
);
786 ret
= blk_rq_append_bio(rq
, bio
);
793 EXPORT_SYMBOL(blk_rq_map_kern
);