2 * Copyright (C) 2003 Sistina Software
3 * Copyright (C) 2006 Red Hat GmbH
5 * This file is released under the GPL.
10 #include <linux/device-mapper.h>
12 #include <linux/bio.h>
13 #include <linux/completion.h>
14 #include <linux/mempool.h>
15 #include <linux/module.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dm-io.h>
20 #define DM_MSG_PREFIX "io"
22 #define DM_IO_MAX_REGIONS BITS_PER_LONG
30 * Aligning 'struct io' reduces the number of bits required to store
31 * its address. Refer to store_io_and_region_in_bio() below.
34 unsigned long error_bits
;
36 struct dm_io_client
*client
;
37 io_notify_fn callback
;
39 void *vma_invalidate_address
;
40 unsigned long vma_invalidate_size
;
41 } __attribute__((aligned(DM_IO_MAX_REGIONS
)));
43 static struct kmem_cache
*_dm_io_cache
;
46 * Create a client with mempool and bioset.
48 struct dm_io_client
*dm_io_client_create(void)
50 struct dm_io_client
*client
;
51 unsigned min_ios
= dm_get_reserved_bio_based_ios();
53 client
= kmalloc(sizeof(*client
), GFP_KERNEL
);
55 return ERR_PTR(-ENOMEM
);
57 client
->pool
= mempool_create_slab_pool(min_ios
, _dm_io_cache
);
61 client
->bios
= bioset_create(min_ios
, 0, (BIOSET_NEED_BVECS
|
62 BIOSET_NEED_RESCUER
));
69 mempool_destroy(client
->pool
);
71 return ERR_PTR(-ENOMEM
);
73 EXPORT_SYMBOL(dm_io_client_create
);
75 void dm_io_client_destroy(struct dm_io_client
*client
)
77 mempool_destroy(client
->pool
);
78 bioset_free(client
->bios
);
81 EXPORT_SYMBOL(dm_io_client_destroy
);
83 /*-----------------------------------------------------------------
84 * We need to keep track of which region a bio is doing io for.
85 * To avoid a memory allocation to store just 5 or 6 bits, we
86 * ensure the 'struct io' pointer is aligned so enough low bits are
87 * always zero and then combine it with the region number directly in
89 *---------------------------------------------------------------*/
90 static void store_io_and_region_in_bio(struct bio
*bio
, struct io
*io
,
93 if (unlikely(!IS_ALIGNED((unsigned long)io
, DM_IO_MAX_REGIONS
))) {
94 DMCRIT("Unaligned struct io pointer %p", io
);
98 bio
->bi_private
= (void *)((unsigned long)io
| region
);
101 static void retrieve_io_and_region_from_bio(struct bio
*bio
, struct io
**io
,
104 unsigned long val
= (unsigned long)bio
->bi_private
;
106 *io
= (void *)(val
& -(unsigned long)DM_IO_MAX_REGIONS
);
107 *region
= val
& (DM_IO_MAX_REGIONS
- 1);
110 /*-----------------------------------------------------------------
111 * We need an io object to keep track of the number of bios that
112 * have been dispatched for a particular io.
113 *---------------------------------------------------------------*/
114 static void complete_io(struct io
*io
)
116 unsigned long error_bits
= io
->error_bits
;
117 io_notify_fn fn
= io
->callback
;
118 void *context
= io
->context
;
120 if (io
->vma_invalidate_size
)
121 invalidate_kernel_vmap_range(io
->vma_invalidate_address
,
122 io
->vma_invalidate_size
);
124 mempool_free(io
, io
->client
->pool
);
125 fn(error_bits
, context
);
128 static void dec_count(struct io
*io
, unsigned int region
, blk_status_t error
)
131 set_bit(region
, &io
->error_bits
);
133 if (atomic_dec_and_test(&io
->count
))
137 static void endio(struct bio
*bio
)
143 if (bio
->bi_status
&& bio_data_dir(bio
) == READ
)
147 * The bio destructor in bio_put() may use the io object.
149 retrieve_io_and_region_from_bio(bio
, &io
, ®ion
);
151 error
= bio
->bi_status
;
154 dec_count(io
, region
, error
);
157 /*-----------------------------------------------------------------
158 * These little objects provide an abstraction for getting a new
159 * destination page for io.
160 *---------------------------------------------------------------*/
162 void (*get_page
)(struct dpages
*dp
,
163 struct page
**p
, unsigned long *len
, unsigned *offset
);
164 void (*next_page
)(struct dpages
*dp
);
168 struct bvec_iter context_bi
;
172 void *vma_invalidate_address
;
173 unsigned long vma_invalidate_size
;
177 * Functions for getting the pages from a list.
179 static void list_get_page(struct dpages
*dp
,
180 struct page
**p
, unsigned long *len
, unsigned *offset
)
182 unsigned o
= dp
->context_u
;
183 struct page_list
*pl
= (struct page_list
*) dp
->context_ptr
;
186 *len
= PAGE_SIZE
- o
;
190 static void list_next_page(struct dpages
*dp
)
192 struct page_list
*pl
= (struct page_list
*) dp
->context_ptr
;
193 dp
->context_ptr
= pl
->next
;
197 static void list_dp_init(struct dpages
*dp
, struct page_list
*pl
, unsigned offset
)
199 dp
->get_page
= list_get_page
;
200 dp
->next_page
= list_next_page
;
201 dp
->context_u
= offset
;
202 dp
->context_ptr
= pl
;
206 * Functions for getting the pages from a bvec.
208 static void bio_get_page(struct dpages
*dp
, struct page
**p
,
209 unsigned long *len
, unsigned *offset
)
211 struct bio_vec bvec
= bvec_iter_bvec((struct bio_vec
*)dp
->context_ptr
,
216 *offset
= bvec
.bv_offset
;
218 /* avoid figuring it out again in bio_next_page() */
219 dp
->context_bi
.bi_sector
= (sector_t
)bvec
.bv_len
;
222 static void bio_next_page(struct dpages
*dp
)
224 unsigned int len
= (unsigned int)dp
->context_bi
.bi_sector
;
226 bvec_iter_advance((struct bio_vec
*)dp
->context_ptr
,
227 &dp
->context_bi
, len
);
230 static void bio_dp_init(struct dpages
*dp
, struct bio
*bio
)
232 dp
->get_page
= bio_get_page
;
233 dp
->next_page
= bio_next_page
;
236 * We just use bvec iterator to retrieve pages, so it is ok to
237 * access the bvec table directly here
239 dp
->context_ptr
= bio
->bi_io_vec
;
240 dp
->context_bi
= bio
->bi_iter
;
244 * Functions for getting the pages from a VMA.
246 static void vm_get_page(struct dpages
*dp
,
247 struct page
**p
, unsigned long *len
, unsigned *offset
)
249 *p
= vmalloc_to_page(dp
->context_ptr
);
250 *offset
= dp
->context_u
;
251 *len
= PAGE_SIZE
- dp
->context_u
;
254 static void vm_next_page(struct dpages
*dp
)
256 dp
->context_ptr
+= PAGE_SIZE
- dp
->context_u
;
260 static void vm_dp_init(struct dpages
*dp
, void *data
)
262 dp
->get_page
= vm_get_page
;
263 dp
->next_page
= vm_next_page
;
264 dp
->context_u
= offset_in_page(data
);
265 dp
->context_ptr
= data
;
269 * Functions for getting the pages from kernel memory.
271 static void km_get_page(struct dpages
*dp
, struct page
**p
, unsigned long *len
,
274 *p
= virt_to_page(dp
->context_ptr
);
275 *offset
= dp
->context_u
;
276 *len
= PAGE_SIZE
- dp
->context_u
;
279 static void km_next_page(struct dpages
*dp
)
281 dp
->context_ptr
+= PAGE_SIZE
- dp
->context_u
;
285 static void km_dp_init(struct dpages
*dp
, void *data
)
287 dp
->get_page
= km_get_page
;
288 dp
->next_page
= km_next_page
;
289 dp
->context_u
= offset_in_page(data
);
290 dp
->context_ptr
= data
;
293 /*-----------------------------------------------------------------
294 * IO routines that accept a list of pages.
295 *---------------------------------------------------------------*/
296 static void do_region(int op
, int op_flags
, unsigned region
,
297 struct dm_io_region
*where
, struct dpages
*dp
,
305 sector_t remaining
= where
->count
;
306 struct request_queue
*q
= bdev_get_queue(where
->bdev
);
307 unsigned short logical_block_size
= queue_logical_block_size(q
);
308 sector_t num_sectors
;
309 unsigned int uninitialized_var(special_cmd_max_sectors
);
312 * Reject unsupported discard and write same requests.
314 if (op
== REQ_OP_DISCARD
)
315 special_cmd_max_sectors
= q
->limits
.max_discard_sectors
;
316 else if (op
== REQ_OP_WRITE_ZEROES
)
317 special_cmd_max_sectors
= q
->limits
.max_write_zeroes_sectors
;
318 else if (op
== REQ_OP_WRITE_SAME
)
319 special_cmd_max_sectors
= q
->limits
.max_write_same_sectors
;
320 if ((op
== REQ_OP_DISCARD
|| op
== REQ_OP_WRITE_ZEROES
||
321 op
== REQ_OP_WRITE_SAME
) && special_cmd_max_sectors
== 0) {
322 atomic_inc(&io
->count
);
323 dec_count(io
, region
, BLK_STS_NOTSUPP
);
328 * where->count may be zero if op holds a flush and we need to
329 * send a zero-sized flush.
333 * Allocate a suitably sized-bio.
337 case REQ_OP_WRITE_ZEROES
:
340 case REQ_OP_WRITE_SAME
:
344 num_bvecs
= min_t(int, BIO_MAX_PAGES
,
345 dm_sector_div_up(remaining
, (PAGE_SIZE
>> SECTOR_SHIFT
)));
348 bio
= bio_alloc_bioset(GFP_NOIO
, num_bvecs
, io
->client
->bios
);
349 bio
->bi_iter
.bi_sector
= where
->sector
+ (where
->count
- remaining
);
350 bio_set_dev(bio
, where
->bdev
);
351 bio
->bi_end_io
= endio
;
352 bio_set_op_attrs(bio
, op
, op_flags
);
353 store_io_and_region_in_bio(bio
, io
, region
);
355 if (op
== REQ_OP_DISCARD
|| op
== REQ_OP_WRITE_ZEROES
) {
356 num_sectors
= min_t(sector_t
, special_cmd_max_sectors
, remaining
);
357 bio
->bi_iter
.bi_size
= num_sectors
<< SECTOR_SHIFT
;
358 remaining
-= num_sectors
;
359 } else if (op
== REQ_OP_WRITE_SAME
) {
361 * WRITE SAME only uses a single page.
363 dp
->get_page(dp
, &page
, &len
, &offset
);
364 bio_add_page(bio
, page
, logical_block_size
, offset
);
365 num_sectors
= min_t(sector_t
, special_cmd_max_sectors
, remaining
);
366 bio
->bi_iter
.bi_size
= num_sectors
<< SECTOR_SHIFT
;
369 remaining
-= num_sectors
;
371 } else while (remaining
) {
373 * Try and add as many pages as possible.
375 dp
->get_page(dp
, &page
, &len
, &offset
);
376 len
= min(len
, to_bytes(remaining
));
377 if (!bio_add_page(bio
, page
, len
, offset
))
381 remaining
-= to_sector(len
);
385 atomic_inc(&io
->count
);
390 static void dispatch_io(int op
, int op_flags
, unsigned int num_regions
,
391 struct dm_io_region
*where
, struct dpages
*dp
,
392 struct io
*io
, int sync
)
395 struct dpages old_pages
= *dp
;
397 BUG_ON(num_regions
> DM_IO_MAX_REGIONS
);
400 op_flags
|= REQ_SYNC
;
403 * For multiple regions we need to be careful to rewind
404 * the dp object for each call to do_region.
406 for (i
= 0; i
< num_regions
; i
++) {
408 if (where
[i
].count
|| (op_flags
& REQ_PREFLUSH
))
409 do_region(op
, op_flags
, i
, where
+ i
, dp
, io
);
413 * Drop the extra reference that we were holding to avoid
414 * the io being completed too early.
420 unsigned long error_bits
;
421 struct completion wait
;
424 static void sync_io_complete(unsigned long error
, void *context
)
426 struct sync_io
*sio
= context
;
428 sio
->error_bits
= error
;
429 complete(&sio
->wait
);
432 static int sync_io(struct dm_io_client
*client
, unsigned int num_regions
,
433 struct dm_io_region
*where
, int op
, int op_flags
,
434 struct dpages
*dp
, unsigned long *error_bits
)
439 if (num_regions
> 1 && !op_is_write(op
)) {
444 init_completion(&sio
.wait
);
446 io
= mempool_alloc(client
->pool
, GFP_NOIO
);
448 atomic_set(&io
->count
, 1); /* see dispatch_io() */
450 io
->callback
= sync_io_complete
;
453 io
->vma_invalidate_address
= dp
->vma_invalidate_address
;
454 io
->vma_invalidate_size
= dp
->vma_invalidate_size
;
456 dispatch_io(op
, op_flags
, num_regions
, where
, dp
, io
, 1);
458 wait_for_completion_io(&sio
.wait
);
461 *error_bits
= sio
.error_bits
;
463 return sio
.error_bits
? -EIO
: 0;
466 static int async_io(struct dm_io_client
*client
, unsigned int num_regions
,
467 struct dm_io_region
*where
, int op
, int op_flags
,
468 struct dpages
*dp
, io_notify_fn fn
, void *context
)
472 if (num_regions
> 1 && !op_is_write(op
)) {
478 io
= mempool_alloc(client
->pool
, GFP_NOIO
);
480 atomic_set(&io
->count
, 1); /* see dispatch_io() */
483 io
->context
= context
;
485 io
->vma_invalidate_address
= dp
->vma_invalidate_address
;
486 io
->vma_invalidate_size
= dp
->vma_invalidate_size
;
488 dispatch_io(op
, op_flags
, num_regions
, where
, dp
, io
, 0);
492 static int dp_init(struct dm_io_request
*io_req
, struct dpages
*dp
,
495 /* Set up dpages based on memory type */
497 dp
->vma_invalidate_address
= NULL
;
498 dp
->vma_invalidate_size
= 0;
500 switch (io_req
->mem
.type
) {
501 case DM_IO_PAGE_LIST
:
502 list_dp_init(dp
, io_req
->mem
.ptr
.pl
, io_req
->mem
.offset
);
506 bio_dp_init(dp
, io_req
->mem
.ptr
.bio
);
510 flush_kernel_vmap_range(io_req
->mem
.ptr
.vma
, size
);
511 if (io_req
->bi_op
== REQ_OP_READ
) {
512 dp
->vma_invalidate_address
= io_req
->mem
.ptr
.vma
;
513 dp
->vma_invalidate_size
= size
;
515 vm_dp_init(dp
, io_req
->mem
.ptr
.vma
);
519 km_dp_init(dp
, io_req
->mem
.ptr
.addr
);
530 * New collapsed (a)synchronous interface.
532 * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
533 * the queue with blk_unplug() some time later or set REQ_SYNC in
534 * io_req->bi_opf. If you fail to do one of these, the IO will be submitted to
535 * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c.
537 int dm_io(struct dm_io_request
*io_req
, unsigned num_regions
,
538 struct dm_io_region
*where
, unsigned long *sync_error_bits
)
543 r
= dp_init(io_req
, &dp
, (unsigned long)where
->count
<< SECTOR_SHIFT
);
547 if (!io_req
->notify
.fn
)
548 return sync_io(io_req
->client
, num_regions
, where
,
549 io_req
->bi_op
, io_req
->bi_op_flags
, &dp
,
552 return async_io(io_req
->client
, num_regions
, where
, io_req
->bi_op
,
553 io_req
->bi_op_flags
, &dp
, io_req
->notify
.fn
,
554 io_req
->notify
.context
);
556 EXPORT_SYMBOL(dm_io
);
558 int __init
dm_io_init(void)
560 _dm_io_cache
= KMEM_CACHE(io
, 0);
567 void dm_io_exit(void)
569 kmem_cache_destroy(_dm_io_cache
);