2 * Functions related to mapping data to requests
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/blkdev.h>
12 int blk_rq_append_bio(struct request_queue
*q
, struct request
*rq
,
16 blk_rq_bio_prep(q
, rq
, bio
);
17 else if (!ll_back_merge_fn(q
, rq
, bio
))
20 rq
->biotail
->bi_next
= bio
;
23 rq
->__data_len
+= bio
->bi_iter
.bi_size
;
28 static int __blk_rq_unmap_user(struct bio
*bio
)
33 if (bio_flagged(bio
, BIO_USER_MAPPED
))
36 ret
= bio_uncopy_user(bio
);
43 * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage
44 * @q: request queue where request should be inserted
45 * @rq: request to map data to
46 * @map_data: pointer to the rq_map_data holding pages (if necessary)
47 * @iter: iovec iterator
48 * @gfp_mask: memory allocation flags
51 * Data will be mapped directly for zero copy I/O, if possible. Otherwise
52 * a kernel bounce buffer is used.
54 * A matching blk_rq_unmap_user() must be issued at the end of I/O, while
55 * still in process context.
57 * Note: The mapped bio may need to be bounced through blk_queue_bounce()
58 * before being submitted to the device, as pages mapped may be out of
59 * reach. It's the callers responsibility to make sure this happens. The
60 * original bio must be passed back in to blk_rq_unmap_user() for proper
63 int blk_rq_map_user_iov(struct request_queue
*q
, struct request
*rq
,
64 struct rq_map_data
*map_data
,
65 const struct iov_iter
*iter
, gfp_t gfp_mask
)
72 if (!iter
|| !iter
->count
)
75 iov_for_each(iov
, i
, *iter
) {
76 unsigned long uaddr
= (unsigned long) iov
.iov_base
;
82 * Keep going so we check length of all segments
84 if (uaddr
& queue_dma_alignment(q
))
88 if (unaligned
|| (q
->dma_pad_mask
& iter
->count
) || map_data
)
89 bio
= bio_copy_user_iov(q
, map_data
, iter
, gfp_mask
);
91 bio
= bio_map_user_iov(q
, iter
, gfp_mask
);
96 if (map_data
&& map_data
->null_mapped
)
97 bio
->bi_flags
|= (1 << BIO_NULL_MAPPED
);
99 if (bio
->bi_iter
.bi_size
!= iter
->count
) {
101 * Grab an extra reference to this bio, as bio_unmap_user()
102 * expects to be able to drop it twice as it happens on the
103 * normal IO completion path
107 __blk_rq_unmap_user(bio
);
111 if (!bio_flagged(bio
, BIO_USER_MAPPED
))
112 rq
->cmd_flags
|= REQ_COPY_USER
;
114 blk_queue_bounce(q
, &bio
);
116 blk_rq_bio_prep(q
, rq
, bio
);
119 EXPORT_SYMBOL(blk_rq_map_user_iov
);
121 int blk_rq_map_user(struct request_queue
*q
, struct request
*rq
,
122 struct rq_map_data
*map_data
, void __user
*ubuf
,
123 unsigned long len
, gfp_t gfp_mask
)
130 iov_iter_init(&i
, rq_data_dir(rq
), &iov
, 1, len
);
132 return blk_rq_map_user_iov(q
, rq
, map_data
, &i
, gfp_mask
);
134 EXPORT_SYMBOL(blk_rq_map_user
);
137 * blk_rq_unmap_user - unmap a request with user data
138 * @bio: start of bio list
141 * Unmap a rq previously mapped by blk_rq_map_user(). The caller must
142 * supply the original rq->bio from the blk_rq_map_user() return, since
143 * the I/O completion may have changed rq->bio.
145 int blk_rq_unmap_user(struct bio
*bio
)
147 struct bio
*mapped_bio
;
152 if (unlikely(bio_flagged(bio
, BIO_BOUNCED
)))
153 mapped_bio
= bio
->bi_private
;
155 ret2
= __blk_rq_unmap_user(mapped_bio
);
166 EXPORT_SYMBOL(blk_rq_unmap_user
);
169 * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
170 * @q: request queue where request should be inserted
171 * @rq: request to fill
172 * @kbuf: the kernel buffer
173 * @len: length of user data
174 * @gfp_mask: memory allocation flags
177 * Data will be mapped directly if possible. Otherwise a bounce
178 * buffer is used. Can be called multiple times to append multiple
181 int blk_rq_map_kern(struct request_queue
*q
, struct request
*rq
, void *kbuf
,
182 unsigned int len
, gfp_t gfp_mask
)
184 int reading
= rq_data_dir(rq
) == READ
;
185 unsigned long addr
= (unsigned long) kbuf
;
190 if (len
> (queue_max_hw_sectors(q
) << 9))
195 do_copy
= !blk_rq_aligned(q
, addr
, len
) || object_is_on_stack(kbuf
);
197 bio
= bio_copy_kern(q
, kbuf
, len
, gfp_mask
, reading
);
199 bio
= bio_map_kern(q
, kbuf
, len
, gfp_mask
);
205 bio
->bi_rw
|= REQ_WRITE
;
208 rq
->cmd_flags
|= REQ_COPY_USER
;
210 ret
= blk_rq_append_bio(q
, rq
, bio
);
212 /* request is too big */
217 blk_queue_bounce(q
, &rq
->bio
);
220 EXPORT_SYMBOL(blk_rq_map_kern
);