1 // SPDX-License-Identifier: GPL-2.0
3 * Functions related to mapping data to requests
5 #include <linux/kernel.h>
6 #include <linux/sched/task_stack.h>
7 #include <linux/module.h>
9 #include <linux/blkdev.h>
10 #include <linux/uio.h>
15 * Append a bio to a passthrough request. Only works if the bio can be merged
16 * into the request based on the driver constraints.
18 int blk_rq_append_bio(struct request
*rq
, struct bio
**bio
)
20 struct bio
*orig_bio
= *bio
;
22 blk_queue_bounce(rq
->q
, bio
);
25 blk_rq_bio_prep(rq
->q
, rq
, *bio
);
27 if (!ll_back_merge_fn(rq
->q
, rq
, *bio
)) {
28 if (orig_bio
!= *bio
) {
35 rq
->biotail
->bi_next
= *bio
;
37 rq
->__data_len
+= (*bio
)->bi_iter
.bi_size
;
42 EXPORT_SYMBOL(blk_rq_append_bio
);
44 static int __blk_rq_unmap_user(struct bio
*bio
)
49 if (bio_flagged(bio
, BIO_USER_MAPPED
))
52 ret
= bio_uncopy_user(bio
);
58 static int __blk_rq_map_user_iov(struct request
*rq
,
59 struct rq_map_data
*map_data
, struct iov_iter
*iter
,
60 gfp_t gfp_mask
, bool copy
)
62 struct request_queue
*q
= rq
->q
;
63 struct bio
*bio
, *orig_bio
;
67 bio
= bio_copy_user_iov(q
, map_data
, iter
, gfp_mask
);
69 bio
= bio_map_user_iov(q
, iter
, gfp_mask
);
74 bio
->bi_opf
&= ~REQ_OP_MASK
;
75 bio
->bi_opf
|= req_op(rq
);
80 * We link the bounce buffer in and could have to traverse it
81 * later so we have to get a ref to prevent it from being freed
83 ret
= blk_rq_append_bio(rq
, &bio
);
85 __blk_rq_unmap_user(orig_bio
);
94 * blk_rq_map_user_iov - map user data to a request, for passthrough requests
95 * @q: request queue where request should be inserted
96 * @rq: request to map data to
97 * @map_data: pointer to the rq_map_data holding pages (if necessary)
98 * @iter: iovec iterator
99 * @gfp_mask: memory allocation flags
102 * Data will be mapped directly for zero copy I/O, if possible. Otherwise
103 * a kernel bounce buffer is used.
105 * A matching blk_rq_unmap_user() must be issued at the end of I/O, while
106 * still in process context.
108 * Note: The mapped bio may need to be bounced through blk_queue_bounce()
109 * before being submitted to the device, as pages mapped may be out of
110 * reach. It's the callers responsibility to make sure this happens. The
111 * original bio must be passed back in to blk_rq_unmap_user() for proper
114 int blk_rq_map_user_iov(struct request_queue
*q
, struct request
*rq
,
115 struct rq_map_data
*map_data
,
116 const struct iov_iter
*iter
, gfp_t gfp_mask
)
119 unsigned long align
= q
->dma_pad_mask
| queue_dma_alignment(q
);
120 struct bio
*bio
= NULL
;
124 if (!iter_is_iovec(iter
))
129 else if (iov_iter_alignment(iter
) & align
)
131 else if (queue_virt_boundary(q
))
132 copy
= queue_virt_boundary(q
) & iov_iter_gap_alignment(iter
);
136 ret
=__blk_rq_map_user_iov(rq
, map_data
, &i
, gfp_mask
, copy
);
141 } while (iov_iter_count(&i
));
143 if (!bio_flagged(bio
, BIO_USER_MAPPED
))
144 rq
->rq_flags
|= RQF_COPY_USER
;
148 blk_rq_unmap_user(bio
);
153 EXPORT_SYMBOL(blk_rq_map_user_iov
);
155 int blk_rq_map_user(struct request_queue
*q
, struct request
*rq
,
156 struct rq_map_data
*map_data
, void __user
*ubuf
,
157 unsigned long len
, gfp_t gfp_mask
)
161 int ret
= import_single_range(rq_data_dir(rq
), ubuf
, len
, &iov
, &i
);
163 if (unlikely(ret
< 0))
166 return blk_rq_map_user_iov(q
, rq
, map_data
, &i
, gfp_mask
);
168 EXPORT_SYMBOL(blk_rq_map_user
);
171 * blk_rq_unmap_user - unmap a request with user data
172 * @bio: start of bio list
175 * Unmap a rq previously mapped by blk_rq_map_user(). The caller must
176 * supply the original rq->bio from the blk_rq_map_user() return, since
177 * the I/O completion may have changed rq->bio.
179 int blk_rq_unmap_user(struct bio
*bio
)
181 struct bio
*mapped_bio
;
186 if (unlikely(bio_flagged(bio
, BIO_BOUNCED
)))
187 mapped_bio
= bio
->bi_private
;
189 ret2
= __blk_rq_unmap_user(mapped_bio
);
200 EXPORT_SYMBOL(blk_rq_unmap_user
);
203 * blk_rq_map_kern - map kernel data to a request, for passthrough requests
204 * @q: request queue where request should be inserted
205 * @rq: request to fill
206 * @kbuf: the kernel buffer
207 * @len: length of user data
208 * @gfp_mask: memory allocation flags
211 * Data will be mapped directly if possible. Otherwise a bounce
212 * buffer is used. Can be called multiple times to append multiple
215 int blk_rq_map_kern(struct request_queue
*q
, struct request
*rq
, void *kbuf
,
216 unsigned int len
, gfp_t gfp_mask
)
218 int reading
= rq_data_dir(rq
) == READ
;
219 unsigned long addr
= (unsigned long) kbuf
;
221 struct bio
*bio
, *orig_bio
;
224 if (len
> (queue_max_hw_sectors(q
) << 9))
229 do_copy
= !blk_rq_aligned(q
, addr
, len
) || object_is_on_stack(kbuf
);
231 bio
= bio_copy_kern(q
, kbuf
, len
, gfp_mask
, reading
);
233 bio
= bio_map_kern(q
, kbuf
, len
, gfp_mask
);
238 bio
->bi_opf
&= ~REQ_OP_MASK
;
239 bio
->bi_opf
|= req_op(rq
);
242 rq
->rq_flags
|= RQF_COPY_USER
;
245 ret
= blk_rq_append_bio(rq
, &bio
);
247 /* request is too big */
254 EXPORT_SYMBOL(blk_rq_map_kern
);