1 // SPDX-License-Identifier: GPL-2.0
3 * Functions related to mapping data to requests
5 #include <linux/kernel.h>
6 #include <linux/sched/task_stack.h>
7 #include <linux/module.h>
9 #include <linux/blkdev.h>
10 #include <linux/uio.h>
15 bool is_our_pages
: 1;
16 bool is_null_mapped
: 1;
21 static struct bio_map_data
*bio_alloc_map_data(struct iov_iter
*data
,
24 struct bio_map_data
*bmd
;
26 if (data
->nr_segs
> UIO_MAXIOV
)
29 bmd
= kmalloc(struct_size(bmd
, iov
, data
->nr_segs
), gfp_mask
);
32 memcpy(bmd
->iov
, data
->iov
, sizeof(struct iovec
) * data
->nr_segs
);
34 bmd
->iter
.iov
= bmd
->iov
;
39 * bio_copy_from_iter - copy all pages from iov_iter to bio
40 * @bio: The &struct bio which describes the I/O as destination
41 * @iter: iov_iter as source
43 * Copy all pages from iov_iter to bio.
44 * Returns 0 on success, or error on failure.
46 static int bio_copy_from_iter(struct bio
*bio
, struct iov_iter
*iter
)
49 struct bvec_iter_all iter_all
;
51 bio_for_each_segment_all(bvec
, bio
, iter_all
) {
54 ret
= copy_page_from_iter(bvec
->bv_page
,
59 if (!iov_iter_count(iter
))
62 if (ret
< bvec
->bv_len
)
70 * bio_copy_to_iter - copy all pages from bio to iov_iter
71 * @bio: The &struct bio which describes the I/O as source
72 * @iter: iov_iter as destination
74 * Copy all pages from bio to iov_iter.
75 * Returns 0 on success, or error on failure.
77 static int bio_copy_to_iter(struct bio
*bio
, struct iov_iter iter
)
80 struct bvec_iter_all iter_all
;
82 bio_for_each_segment_all(bvec
, bio
, iter_all
) {
85 ret
= copy_page_to_iter(bvec
->bv_page
,
90 if (!iov_iter_count(&iter
))
93 if (ret
< bvec
->bv_len
)
101 * bio_uncopy_user - finish previously mapped bio
102 * @bio: bio being terminated
104 * Free pages allocated from bio_copy_user_iov() and write back data
105 * to user space in case of a read.
107 static int bio_uncopy_user(struct bio
*bio
)
109 struct bio_map_data
*bmd
= bio
->bi_private
;
112 if (!bmd
->is_null_mapped
) {
114 * if we're in a workqueue, the request is orphaned, so
115 * don't copy into a random user address space, just free
116 * and return -EINTR so user space doesn't expect any data.
120 else if (bio_data_dir(bio
) == READ
)
121 ret
= bio_copy_to_iter(bio
, bmd
->iter
);
122 if (bmd
->is_our_pages
)
130 static int bio_copy_user_iov(struct request
*rq
, struct rq_map_data
*map_data
,
131 struct iov_iter
*iter
, gfp_t gfp_mask
)
133 struct bio_map_data
*bmd
;
135 struct bio
*bio
, *bounce_bio
;
138 unsigned int len
= iter
->count
;
139 unsigned int offset
= map_data
? offset_in_page(map_data
->offset
) : 0;
141 bmd
= bio_alloc_map_data(iter
, gfp_mask
);
146 * We need to do a deep copy of the iov_iter including the iovecs.
147 * The caller provided iov might point to an on-stack or otherwise
150 bmd
->is_our_pages
= !map_data
;
151 bmd
->is_null_mapped
= (map_data
&& map_data
->null_mapped
);
153 nr_pages
= DIV_ROUND_UP(offset
+ len
, PAGE_SIZE
);
154 if (nr_pages
> BIO_MAX_PAGES
)
155 nr_pages
= BIO_MAX_PAGES
;
158 bio
= bio_kmalloc(gfp_mask
, nr_pages
);
161 bio
->bi_opf
|= req_op(rq
);
164 nr_pages
= 1 << map_data
->page_order
;
165 i
= map_data
->offset
/ PAGE_SIZE
;
168 unsigned int bytes
= PAGE_SIZE
;
176 if (i
== map_data
->nr_entries
* nr_pages
) {
181 page
= map_data
->pages
[i
/ nr_pages
];
182 page
+= (i
% nr_pages
);
186 page
= alloc_page(rq
->q
->bounce_gfp
| gfp_mask
);
193 if (bio_add_pc_page(rq
->q
, bio
, page
, bytes
, offset
) < bytes
) {
204 map_data
->offset
+= bio
->bi_iter
.bi_size
;
209 if ((iov_iter_rw(iter
) == WRITE
&&
210 (!map_data
|| !map_data
->null_mapped
)) ||
211 (map_data
&& map_data
->from_user
)) {
212 ret
= bio_copy_from_iter(bio
, iter
);
216 if (bmd
->is_our_pages
)
218 iov_iter_advance(iter
, bio
->bi_iter
.bi_size
);
221 bio
->bi_private
= bmd
;
224 ret
= blk_rq_append_bio(rq
, &bounce_bio
);
229 * We link the bounce buffer in and could have to traverse it later, so
230 * we have to get a ref to prevent it from being freed
243 static int bio_map_user_iov(struct request
*rq
, struct iov_iter
*iter
,
246 unsigned int max_sectors
= queue_max_hw_sectors(rq
->q
);
247 struct bio
*bio
, *bounce_bio
;
251 if (!iov_iter_count(iter
))
254 bio
= bio_kmalloc(gfp_mask
, iov_iter_npages(iter
, BIO_MAX_PAGES
));
257 bio
->bi_opf
|= req_op(rq
);
259 while (iov_iter_count(iter
)) {
262 size_t offs
, added
= 0;
265 bytes
= iov_iter_get_pages_alloc(iter
, &pages
, LONG_MAX
, &offs
);
266 if (unlikely(bytes
<= 0)) {
267 ret
= bytes
? bytes
: -EFAULT
;
271 npages
= DIV_ROUND_UP(offs
+ bytes
, PAGE_SIZE
);
273 if (unlikely(offs
& queue_dma_alignment(rq
->q
))) {
277 for (j
= 0; j
< npages
; j
++) {
278 struct page
*page
= pages
[j
];
279 unsigned int n
= PAGE_SIZE
- offs
;
280 bool same_page
= false;
285 if (!bio_add_hw_page(rq
->q
, bio
, page
, n
, offs
,
286 max_sectors
, &same_page
)) {
296 iov_iter_advance(iter
, added
);
299 * release the pages we didn't map into the bio, if any
302 put_page(pages
[j
++]);
304 /* couldn't stuff something into bio? */
310 * Subtle: if we end up needing to bounce a bio, it would normally
311 * disappear when its bi_end_io is run. However, we need the original
312 * bio for the unmap, so grab an extra reference to it
317 ret
= blk_rq_append_bio(rq
, &bounce_bio
);
322 * We link the bounce buffer in and could have to traverse it
323 * later, so we have to get a ref to prevent it from being freed
331 bio_release_pages(bio
, false);
337 * bio_unmap_user - unmap a bio
338 * @bio: the bio being unmapped
340 * Unmap a bio previously mapped by bio_map_user_iov(). Must be called from
343 * bio_unmap_user() may sleep.
345 static void bio_unmap_user(struct bio
*bio
)
347 bio_release_pages(bio
, bio_data_dir(bio
) == READ
);
352 static void bio_invalidate_vmalloc_pages(struct bio
*bio
)
354 #ifdef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
355 if (bio
->bi_private
&& !op_is_write(bio_op(bio
))) {
356 unsigned long i
, len
= 0;
358 for (i
= 0; i
< bio
->bi_vcnt
; i
++)
359 len
+= bio
->bi_io_vec
[i
].bv_len
;
360 invalidate_kernel_vmap_range(bio
->bi_private
, len
);
365 static void bio_map_kern_endio(struct bio
*bio
)
367 bio_invalidate_vmalloc_pages(bio
);
372 * bio_map_kern - map kernel address into bio
373 * @q: the struct request_queue for the bio
374 * @data: pointer to buffer to map
375 * @len: length in bytes
376 * @gfp_mask: allocation flags for bio allocation
378 * Map the kernel address into a bio suitable for io to a block
379 * device. Returns an error pointer in case of error.
381 static struct bio
*bio_map_kern(struct request_queue
*q
, void *data
,
382 unsigned int len
, gfp_t gfp_mask
)
384 unsigned long kaddr
= (unsigned long)data
;
385 unsigned long end
= (kaddr
+ len
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
386 unsigned long start
= kaddr
>> PAGE_SHIFT
;
387 const int nr_pages
= end
- start
;
388 bool is_vmalloc
= is_vmalloc_addr(data
);
393 bio
= bio_kmalloc(gfp_mask
, nr_pages
);
395 return ERR_PTR(-ENOMEM
);
398 flush_kernel_vmap_range(data
, len
);
399 bio
->bi_private
= data
;
402 offset
= offset_in_page(kaddr
);
403 for (i
= 0; i
< nr_pages
; i
++) {
404 unsigned int bytes
= PAGE_SIZE
- offset
;
413 page
= virt_to_page(data
);
415 page
= vmalloc_to_page(data
);
416 if (bio_add_pc_page(q
, bio
, page
, bytes
,
418 /* we don't support partial mappings */
420 return ERR_PTR(-EINVAL
);
428 bio
->bi_end_io
= bio_map_kern_endio
;
432 static void bio_copy_kern_endio(struct bio
*bio
)
438 static void bio_copy_kern_endio_read(struct bio
*bio
)
440 char *p
= bio
->bi_private
;
441 struct bio_vec
*bvec
;
442 struct bvec_iter_all iter_all
;
444 bio_for_each_segment_all(bvec
, bio
, iter_all
) {
445 memcpy(p
, page_address(bvec
->bv_page
), bvec
->bv_len
);
449 bio_copy_kern_endio(bio
);
453 * bio_copy_kern - copy kernel address into bio
454 * @q: the struct request_queue for the bio
455 * @data: pointer to buffer to copy
456 * @len: length in bytes
457 * @gfp_mask: allocation flags for bio and page allocation
458 * @reading: data direction is READ
460 * copy the kernel address into a bio suitable for io to a block
461 * device. Returns an error pointer in case of error.
463 static struct bio
*bio_copy_kern(struct request_queue
*q
, void *data
,
464 unsigned int len
, gfp_t gfp_mask
, int reading
)
466 unsigned long kaddr
= (unsigned long)data
;
467 unsigned long end
= (kaddr
+ len
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
468 unsigned long start
= kaddr
>> PAGE_SHIFT
;
477 return ERR_PTR(-EINVAL
);
479 nr_pages
= end
- start
;
480 bio
= bio_kmalloc(gfp_mask
, nr_pages
);
482 return ERR_PTR(-ENOMEM
);
486 unsigned int bytes
= PAGE_SIZE
;
491 page
= alloc_page(q
->bounce_gfp
| gfp_mask
);
496 memcpy(page_address(page
), p
, bytes
);
498 if (bio_add_pc_page(q
, bio
, page
, bytes
, 0) < bytes
)
506 bio
->bi_end_io
= bio_copy_kern_endio_read
;
507 bio
->bi_private
= data
;
509 bio
->bi_end_io
= bio_copy_kern_endio
;
517 return ERR_PTR(-ENOMEM
);
521 * Append a bio to a passthrough request. Only works if the bio can be merged
522 * into the request based on the driver constraints.
524 int blk_rq_append_bio(struct request
*rq
, struct bio
**bio
)
526 struct bio
*orig_bio
= *bio
;
527 struct bvec_iter iter
;
529 unsigned int nr_segs
= 0;
531 blk_queue_bounce(rq
->q
, bio
);
533 bio_for_each_bvec(bv
, *bio
, iter
)
537 blk_rq_bio_prep(rq
, *bio
, nr_segs
);
539 if (!ll_back_merge_fn(rq
, *bio
, nr_segs
)) {
540 if (orig_bio
!= *bio
) {
547 rq
->biotail
->bi_next
= *bio
;
549 rq
->__data_len
+= (*bio
)->bi_iter
.bi_size
;
550 bio_crypt_free_ctx(*bio
);
555 EXPORT_SYMBOL(blk_rq_append_bio
);
558 * blk_rq_map_user_iov - map user data to a request, for passthrough requests
559 * @q: request queue where request should be inserted
560 * @rq: request to map data to
561 * @map_data: pointer to the rq_map_data holding pages (if necessary)
562 * @iter: iovec iterator
563 * @gfp_mask: memory allocation flags
566 * Data will be mapped directly for zero copy I/O, if possible. Otherwise
567 * a kernel bounce buffer is used.
569 * A matching blk_rq_unmap_user() must be issued at the end of I/O, while
570 * still in process context.
572 * Note: The mapped bio may need to be bounced through blk_queue_bounce()
573 * before being submitted to the device, as pages mapped may be out of
574 * reach. It's the callers responsibility to make sure this happens. The
575 * original bio must be passed back in to blk_rq_unmap_user() for proper
578 int blk_rq_map_user_iov(struct request_queue
*q
, struct request
*rq
,
579 struct rq_map_data
*map_data
,
580 const struct iov_iter
*iter
, gfp_t gfp_mask
)
583 unsigned long align
= q
->dma_pad_mask
| queue_dma_alignment(q
);
584 struct bio
*bio
= NULL
;
588 if (!iter_is_iovec(iter
))
593 else if (iov_iter_alignment(iter
) & align
)
595 else if (queue_virt_boundary(q
))
596 copy
= queue_virt_boundary(q
) & iov_iter_gap_alignment(iter
);
601 ret
= bio_copy_user_iov(rq
, map_data
, &i
, gfp_mask
);
603 ret
= bio_map_user_iov(rq
, &i
, gfp_mask
);
608 } while (iov_iter_count(&i
));
613 blk_rq_unmap_user(bio
);
618 EXPORT_SYMBOL(blk_rq_map_user_iov
);
620 int blk_rq_map_user(struct request_queue
*q
, struct request
*rq
,
621 struct rq_map_data
*map_data
, void __user
*ubuf
,
622 unsigned long len
, gfp_t gfp_mask
)
626 int ret
= import_single_range(rq_data_dir(rq
), ubuf
, len
, &iov
, &i
);
628 if (unlikely(ret
< 0))
631 return blk_rq_map_user_iov(q
, rq
, map_data
, &i
, gfp_mask
);
633 EXPORT_SYMBOL(blk_rq_map_user
);
636 * blk_rq_unmap_user - unmap a request with user data
637 * @bio: start of bio list
640 * Unmap a rq previously mapped by blk_rq_map_user(). The caller must
641 * supply the original rq->bio from the blk_rq_map_user() return, since
642 * the I/O completion may have changed rq->bio.
644 int blk_rq_unmap_user(struct bio
*bio
)
646 struct bio
*mapped_bio
;
651 if (unlikely(bio_flagged(bio
, BIO_BOUNCED
)))
652 mapped_bio
= bio
->bi_private
;
654 if (bio
->bi_private
) {
655 ret2
= bio_uncopy_user(mapped_bio
);
659 bio_unmap_user(mapped_bio
);
669 EXPORT_SYMBOL(blk_rq_unmap_user
);
672 * blk_rq_map_kern - map kernel data to a request, for passthrough requests
673 * @q: request queue where request should be inserted
674 * @rq: request to fill
675 * @kbuf: the kernel buffer
676 * @len: length of user data
677 * @gfp_mask: memory allocation flags
680 * Data will be mapped directly if possible. Otherwise a bounce
681 * buffer is used. Can be called multiple times to append multiple
684 int blk_rq_map_kern(struct request_queue
*q
, struct request
*rq
, void *kbuf
,
685 unsigned int len
, gfp_t gfp_mask
)
687 int reading
= rq_data_dir(rq
) == READ
;
688 unsigned long addr
= (unsigned long) kbuf
;
689 struct bio
*bio
, *orig_bio
;
692 if (len
> (queue_max_hw_sectors(q
) << 9))
697 if (!blk_rq_aligned(q
, addr
, len
) || object_is_on_stack(kbuf
))
698 bio
= bio_copy_kern(q
, kbuf
, len
, gfp_mask
, reading
);
700 bio
= bio_map_kern(q
, kbuf
, len
, gfp_mask
);
705 bio
->bi_opf
&= ~REQ_OP_MASK
;
706 bio
->bi_opf
|= req_op(rq
);
709 ret
= blk_rq_append_bio(rq
, &bio
);
711 /* request is too big */
718 EXPORT_SYMBOL(blk_rq_map_kern
);