x86/mm/pat: Don't report PAT on CPUs that don't support it
[linux/fpc-iii.git] / block / blk-map.c
blob3b5cb863318f31bb6ea33a4c51e4a144b4cb07d9
1 /*
2 * Functions related to mapping data to requests
3 */
4 #include <linux/kernel.h>
5 #include <linux/sched/task_stack.h>
6 #include <linux/module.h>
7 #include <linux/bio.h>
8 #include <linux/blkdev.h>
9 #include <linux/uio.h>
11 #include "blk.h"
14 * Append a bio to a passthrough request. Only works can be merged into
15 * the request based on the driver constraints.
17 int blk_rq_append_bio(struct request *rq, struct bio *bio)
19 if (!rq->bio) {
20 blk_rq_bio_prep(rq->q, rq, bio);
21 } else {
22 if (!ll_back_merge_fn(rq->q, rq, bio))
23 return -EINVAL;
25 rq->biotail->bi_next = bio;
26 rq->biotail = bio;
27 rq->__data_len += bio->bi_iter.bi_size;
30 return 0;
32 EXPORT_SYMBOL(blk_rq_append_bio);
34 static int __blk_rq_unmap_user(struct bio *bio)
36 int ret = 0;
38 if (bio) {
39 if (bio_flagged(bio, BIO_USER_MAPPED))
40 bio_unmap_user(bio);
41 else
42 ret = bio_uncopy_user(bio);
45 return ret;
48 static int __blk_rq_map_user_iov(struct request *rq,
49 struct rq_map_data *map_data, struct iov_iter *iter,
50 gfp_t gfp_mask, bool copy)
52 struct request_queue *q = rq->q;
53 struct bio *bio, *orig_bio;
54 int ret;
56 if (copy)
57 bio = bio_copy_user_iov(q, map_data, iter, gfp_mask);
58 else
59 bio = bio_map_user_iov(q, iter, gfp_mask);
61 if (IS_ERR(bio))
62 return PTR_ERR(bio);
64 bio->bi_opf &= ~REQ_OP_MASK;
65 bio->bi_opf |= req_op(rq);
67 if (map_data && map_data->null_mapped)
68 bio_set_flag(bio, BIO_NULL_MAPPED);
70 iov_iter_advance(iter, bio->bi_iter.bi_size);
71 if (map_data)
72 map_data->offset += bio->bi_iter.bi_size;
74 orig_bio = bio;
75 blk_queue_bounce(q, &bio);
78 * We link the bounce buffer in and could have to traverse it
79 * later so we have to get a ref to prevent it from being freed
81 bio_get(bio);
83 ret = blk_rq_append_bio(rq, bio);
84 if (ret) {
85 bio_endio(bio);
86 __blk_rq_unmap_user(orig_bio);
87 bio_put(bio);
88 return ret;
91 return 0;
94 /**
95 * blk_rq_map_user_iov - map user data to a request, for passthrough requests
96 * @q: request queue where request should be inserted
97 * @rq: request to map data to
98 * @map_data: pointer to the rq_map_data holding pages (if necessary)
99 * @iter: iovec iterator
100 * @gfp_mask: memory allocation flags
102 * Description:
103 * Data will be mapped directly for zero copy I/O, if possible. Otherwise
104 * a kernel bounce buffer is used.
106 * A matching blk_rq_unmap_user() must be issued at the end of I/O, while
107 * still in process context.
109 * Note: The mapped bio may need to be bounced through blk_queue_bounce()
110 * before being submitted to the device, as pages mapped may be out of
111 * reach. It's the callers responsibility to make sure this happens. The
112 * original bio must be passed back in to blk_rq_unmap_user() for proper
113 * unmapping.
115 int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
116 struct rq_map_data *map_data,
117 const struct iov_iter *iter, gfp_t gfp_mask)
119 bool copy = false;
120 unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
121 struct bio *bio = NULL;
122 struct iov_iter i;
123 int ret;
125 if (!iter_is_iovec(iter))
126 goto fail;
128 if (map_data)
129 copy = true;
130 else if (iov_iter_alignment(iter) & align)
131 copy = true;
132 else if (queue_virt_boundary(q))
133 copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter);
135 i = *iter;
136 do {
137 ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy);
138 if (ret)
139 goto unmap_rq;
140 if (!bio)
141 bio = rq->bio;
142 } while (iov_iter_count(&i));
144 if (!bio_flagged(bio, BIO_USER_MAPPED))
145 rq->rq_flags |= RQF_COPY_USER;
146 return 0;
148 unmap_rq:
149 __blk_rq_unmap_user(bio);
150 fail:
151 rq->bio = NULL;
152 return -EINVAL;
154 EXPORT_SYMBOL(blk_rq_map_user_iov);
156 int blk_rq_map_user(struct request_queue *q, struct request *rq,
157 struct rq_map_data *map_data, void __user *ubuf,
158 unsigned long len, gfp_t gfp_mask)
160 struct iovec iov;
161 struct iov_iter i;
162 int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i);
164 if (unlikely(ret < 0))
165 return ret;
167 return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask);
169 EXPORT_SYMBOL(blk_rq_map_user);
172 * blk_rq_unmap_user - unmap a request with user data
173 * @bio: start of bio list
175 * Description:
176 * Unmap a rq previously mapped by blk_rq_map_user(). The caller must
177 * supply the original rq->bio from the blk_rq_map_user() return, since
178 * the I/O completion may have changed rq->bio.
180 int blk_rq_unmap_user(struct bio *bio)
182 struct bio *mapped_bio;
183 int ret = 0, ret2;
185 while (bio) {
186 mapped_bio = bio;
187 if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
188 mapped_bio = bio->bi_private;
190 ret2 = __blk_rq_unmap_user(mapped_bio);
191 if (ret2 && !ret)
192 ret = ret2;
194 mapped_bio = bio;
195 bio = bio->bi_next;
196 bio_put(mapped_bio);
199 return ret;
201 EXPORT_SYMBOL(blk_rq_unmap_user);
204 * blk_rq_map_kern - map kernel data to a request, for passthrough requests
205 * @q: request queue where request should be inserted
206 * @rq: request to fill
207 * @kbuf: the kernel buffer
208 * @len: length of user data
209 * @gfp_mask: memory allocation flags
211 * Description:
212 * Data will be mapped directly if possible. Otherwise a bounce
213 * buffer is used. Can be called multiple times to append multiple
214 * buffers.
216 int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
217 unsigned int len, gfp_t gfp_mask)
219 int reading = rq_data_dir(rq) == READ;
220 unsigned long addr = (unsigned long) kbuf;
221 int do_copy = 0;
222 struct bio *bio;
223 int ret;
225 if (len > (queue_max_hw_sectors(q) << 9))
226 return -EINVAL;
227 if (!len || !kbuf)
228 return -EINVAL;
230 do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
231 if (do_copy)
232 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
233 else
234 bio = bio_map_kern(q, kbuf, len, gfp_mask);
236 if (IS_ERR(bio))
237 return PTR_ERR(bio);
239 bio->bi_opf &= ~REQ_OP_MASK;
240 bio->bi_opf |= req_op(rq);
242 if (do_copy)
243 rq->rq_flags |= RQF_COPY_USER;
245 ret = blk_rq_append_bio(rq, bio);
246 if (unlikely(ret)) {
247 /* request is too big */
248 bio_put(bio);
249 return ret;
252 blk_queue_bounce(q, &rq->bio);
253 return 0;
255 EXPORT_SYMBOL(blk_rq_map_kern);