1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2003 Sistina Software
4 * Copyright (C) 2006 Red Hat GmbH
6 * This file is released under the GPL.
11 #include <linux/device-mapper.h>
13 #include <linux/bio.h>
14 #include <linux/completion.h>
15 #include <linux/mempool.h>
16 #include <linux/module.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/dm-io.h>
21 #define DM_MSG_PREFIX "io"
23 #define DM_IO_MAX_REGIONS BITS_PER_LONG
31 * Aligning 'struct io' reduces the number of bits required to store
32 * its address. Refer to store_io_and_region_in_bio() below.
35 unsigned long error_bits
;
37 struct dm_io_client
*client
;
38 io_notify_fn callback
;
40 void *vma_invalidate_address
;
41 unsigned long vma_invalidate_size
;
42 } __aligned(DM_IO_MAX_REGIONS
);
44 static struct kmem_cache
*_dm_io_cache
;
47 * Create a client with mempool and bioset.
49 struct dm_io_client
*dm_io_client_create(void)
51 struct dm_io_client
*client
;
52 unsigned int min_ios
= dm_get_reserved_bio_based_ios();
55 client
= kzalloc(sizeof(*client
), GFP_KERNEL
);
57 return ERR_PTR(-ENOMEM
);
59 ret
= mempool_init_slab_pool(&client
->pool
, min_ios
, _dm_io_cache
);
63 ret
= bioset_init(&client
->bios
, min_ios
, 0, BIOSET_NEED_BVECS
);
70 mempool_exit(&client
->pool
);
74 EXPORT_SYMBOL(dm_io_client_create
);
76 void dm_io_client_destroy(struct dm_io_client
*client
)
78 mempool_exit(&client
->pool
);
79 bioset_exit(&client
->bios
);
82 EXPORT_SYMBOL(dm_io_client_destroy
);
85 *-------------------------------------------------------------------
86 * We need to keep track of which region a bio is doing io for.
87 * To avoid a memory allocation to store just 5 or 6 bits, we
88 * ensure the 'struct io' pointer is aligned so enough low bits are
89 * always zero and then combine it with the region number directly in
91 *-------------------------------------------------------------------
93 static void store_io_and_region_in_bio(struct bio
*bio
, struct io
*io
,
96 if (unlikely(!IS_ALIGNED((unsigned long)io
, DM_IO_MAX_REGIONS
))) {
97 DMCRIT("Unaligned struct io pointer %p", io
);
101 bio
->bi_private
= (void *)((unsigned long)io
| region
);
104 static void retrieve_io_and_region_from_bio(struct bio
*bio
, struct io
**io
,
105 unsigned int *region
)
107 unsigned long val
= (unsigned long)bio
->bi_private
;
109 *io
= (void *)(val
& -(unsigned long)DM_IO_MAX_REGIONS
);
110 *region
= val
& (DM_IO_MAX_REGIONS
- 1);
114 *--------------------------------------------------------------
115 * We need an io object to keep track of the number of bios that
116 * have been dispatched for a particular io.
117 *--------------------------------------------------------------
119 static void complete_io(struct io
*io
)
121 unsigned long error_bits
= io
->error_bits
;
122 io_notify_fn fn
= io
->callback
;
123 void *context
= io
->context
;
125 if (io
->vma_invalidate_size
)
126 invalidate_kernel_vmap_range(io
->vma_invalidate_address
,
127 io
->vma_invalidate_size
);
129 mempool_free(io
, &io
->client
->pool
);
130 fn(error_bits
, context
);
133 static void dec_count(struct io
*io
, unsigned int region
, blk_status_t error
)
136 set_bit(region
, &io
->error_bits
);
138 if (atomic_dec_and_test(&io
->count
))
142 static void endio(struct bio
*bio
)
148 if (bio
->bi_status
&& bio_data_dir(bio
) == READ
)
152 * The bio destructor in bio_put() may use the io object.
154 retrieve_io_and_region_from_bio(bio
, &io
, ®ion
);
156 error
= bio
->bi_status
;
159 dec_count(io
, region
, error
);
163 *--------------------------------------------------------------
164 * These little objects provide an abstraction for getting a new
165 * destination page for io.
166 *--------------------------------------------------------------
169 void (*get_page
)(struct dpages
*dp
,
170 struct page
**p
, unsigned long *len
, unsigned int *offset
);
171 void (*next_page
)(struct dpages
*dp
);
174 unsigned int context_u
;
175 struct bvec_iter context_bi
;
179 void *vma_invalidate_address
;
180 unsigned long vma_invalidate_size
;
184 * Functions for getting the pages from a list.
186 static void list_get_page(struct dpages
*dp
,
187 struct page
**p
, unsigned long *len
, unsigned int *offset
)
189 unsigned int o
= dp
->context_u
;
190 struct page_list
*pl
= dp
->context_ptr
;
193 *len
= PAGE_SIZE
- o
;
197 static void list_next_page(struct dpages
*dp
)
199 struct page_list
*pl
= dp
->context_ptr
;
201 dp
->context_ptr
= pl
->next
;
205 static void list_dp_init(struct dpages
*dp
, struct page_list
*pl
, unsigned int offset
)
207 dp
->get_page
= list_get_page
;
208 dp
->next_page
= list_next_page
;
209 dp
->context_u
= offset
;
210 dp
->context_ptr
= pl
;
214 * Functions for getting the pages from a bvec.
216 static void bio_get_page(struct dpages
*dp
, struct page
**p
,
217 unsigned long *len
, unsigned int *offset
)
219 struct bio_vec bvec
= bvec_iter_bvec((struct bio_vec
*)dp
->context_ptr
,
224 *offset
= bvec
.bv_offset
;
226 /* avoid figuring it out again in bio_next_page() */
227 dp
->context_bi
.bi_sector
= (sector_t
)bvec
.bv_len
;
230 static void bio_next_page(struct dpages
*dp
)
232 unsigned int len
= (unsigned int)dp
->context_bi
.bi_sector
;
234 bvec_iter_advance((struct bio_vec
*)dp
->context_ptr
,
235 &dp
->context_bi
, len
);
238 static void bio_dp_init(struct dpages
*dp
, struct bio
*bio
)
240 dp
->get_page
= bio_get_page
;
241 dp
->next_page
= bio_next_page
;
244 * We just use bvec iterator to retrieve pages, so it is ok to
245 * access the bvec table directly here
247 dp
->context_ptr
= bio
->bi_io_vec
;
248 dp
->context_bi
= bio
->bi_iter
;
252 * Functions for getting the pages from a VMA.
254 static void vm_get_page(struct dpages
*dp
,
255 struct page
**p
, unsigned long *len
, unsigned int *offset
)
257 *p
= vmalloc_to_page(dp
->context_ptr
);
258 *offset
= dp
->context_u
;
259 *len
= PAGE_SIZE
- dp
->context_u
;
262 static void vm_next_page(struct dpages
*dp
)
264 dp
->context_ptr
+= PAGE_SIZE
- dp
->context_u
;
268 static void vm_dp_init(struct dpages
*dp
, void *data
)
270 dp
->get_page
= vm_get_page
;
271 dp
->next_page
= vm_next_page
;
272 dp
->context_u
= offset_in_page(data
);
273 dp
->context_ptr
= data
;
277 * Functions for getting the pages from kernel memory.
279 static void km_get_page(struct dpages
*dp
, struct page
**p
, unsigned long *len
,
280 unsigned int *offset
)
282 *p
= virt_to_page(dp
->context_ptr
);
283 *offset
= dp
->context_u
;
284 *len
= PAGE_SIZE
- dp
->context_u
;
287 static void km_next_page(struct dpages
*dp
)
289 dp
->context_ptr
+= PAGE_SIZE
- dp
->context_u
;
293 static void km_dp_init(struct dpages
*dp
, void *data
)
295 dp
->get_page
= km_get_page
;
296 dp
->next_page
= km_next_page
;
297 dp
->context_u
= offset_in_page(data
);
298 dp
->context_ptr
= data
;
302 *---------------------------------------------------------------
303 * IO routines that accept a list of pages.
304 *---------------------------------------------------------------
306 static void do_region(const blk_opf_t opf
, unsigned int region
,
307 struct dm_io_region
*where
, struct dpages
*dp
,
308 struct io
*io
, unsigned short ioprio
)
314 unsigned int num_bvecs
;
315 sector_t remaining
= where
->count
;
316 struct request_queue
*q
= bdev_get_queue(where
->bdev
);
317 sector_t num_sectors
;
318 unsigned int special_cmd_max_sectors
;
319 const enum req_op op
= opf
& REQ_OP_MASK
;
322 * Reject unsupported discard and write same requests.
324 if (op
== REQ_OP_DISCARD
)
325 special_cmd_max_sectors
= bdev_max_discard_sectors(where
->bdev
);
326 else if (op
== REQ_OP_WRITE_ZEROES
)
327 special_cmd_max_sectors
= q
->limits
.max_write_zeroes_sectors
;
328 if ((op
== REQ_OP_DISCARD
|| op
== REQ_OP_WRITE_ZEROES
) &&
329 special_cmd_max_sectors
== 0) {
330 atomic_inc(&io
->count
);
331 dec_count(io
, region
, BLK_STS_NOTSUPP
);
336 * where->count may be zero if op holds a flush and we need to
337 * send a zero-sized flush.
341 * Allocate a suitably sized-bio.
345 case REQ_OP_WRITE_ZEROES
:
349 num_bvecs
= bio_max_segs(dm_sector_div_up(remaining
,
350 (PAGE_SIZE
>> SECTOR_SHIFT
)) + 1);
353 bio
= bio_alloc_bioset(where
->bdev
, num_bvecs
, opf
, GFP_NOIO
,
355 bio
->bi_iter
.bi_sector
= where
->sector
+ (where
->count
- remaining
);
356 bio
->bi_end_io
= endio
;
357 bio
->bi_ioprio
= ioprio
;
358 store_io_and_region_in_bio(bio
, io
, region
);
360 if (op
== REQ_OP_DISCARD
|| op
== REQ_OP_WRITE_ZEROES
) {
361 num_sectors
= min_t(sector_t
, special_cmd_max_sectors
, remaining
);
362 bio
->bi_iter
.bi_size
= num_sectors
<< SECTOR_SHIFT
;
363 remaining
-= num_sectors
;
367 * Try and add as many pages as possible.
369 dp
->get_page(dp
, &page
, &len
, &offset
);
370 len
= min(len
, to_bytes(remaining
));
371 if (!bio_add_page(bio
, page
, len
, offset
))
375 remaining
-= to_sector(len
);
380 atomic_inc(&io
->count
);
385 static void dispatch_io(blk_opf_t opf
, unsigned int num_regions
,
386 struct dm_io_region
*where
, struct dpages
*dp
,
387 struct io
*io
, unsigned short ioprio
)
390 struct dpages old_pages
= *dp
;
392 BUG_ON(num_regions
> DM_IO_MAX_REGIONS
);
395 * For multiple regions we need to be careful to rewind
396 * the dp object for each call to do_region.
398 for (i
= 0; i
< num_regions
; i
++) {
400 if (where
[i
].count
|| (opf
& REQ_PREFLUSH
))
401 do_region(opf
, i
, where
+ i
, dp
, io
, ioprio
);
405 * Drop the extra reference that we were holding to avoid
406 * the io being completed too early.
411 static void async_io(struct dm_io_client
*client
, unsigned int num_regions
,
412 struct dm_io_region
*where
, blk_opf_t opf
,
413 struct dpages
*dp
, io_notify_fn fn
, void *context
,
414 unsigned short ioprio
)
418 io
= mempool_alloc(&client
->pool
, GFP_NOIO
);
420 atomic_set(&io
->count
, 1); /* see dispatch_io() */
423 io
->context
= context
;
425 io
->vma_invalidate_address
= dp
->vma_invalidate_address
;
426 io
->vma_invalidate_size
= dp
->vma_invalidate_size
;
428 dispatch_io(opf
, num_regions
, where
, dp
, io
, ioprio
);
432 unsigned long error_bits
;
433 struct completion wait
;
436 static void sync_io_complete(unsigned long error
, void *context
)
438 struct sync_io
*sio
= context
;
440 sio
->error_bits
= error
;
441 complete(&sio
->wait
);
444 static int sync_io(struct dm_io_client
*client
, unsigned int num_regions
,
445 struct dm_io_region
*where
, blk_opf_t opf
, struct dpages
*dp
,
446 unsigned long *error_bits
, unsigned short ioprio
)
450 init_completion(&sio
.wait
);
452 async_io(client
, num_regions
, where
, opf
| REQ_SYNC
, dp
,
453 sync_io_complete
, &sio
, ioprio
);
455 wait_for_completion_io(&sio
.wait
);
458 *error_bits
= sio
.error_bits
;
460 return sio
.error_bits
? -EIO
: 0;
463 static int dp_init(struct dm_io_request
*io_req
, struct dpages
*dp
,
466 /* Set up dpages based on memory type */
468 dp
->vma_invalidate_address
= NULL
;
469 dp
->vma_invalidate_size
= 0;
471 switch (io_req
->mem
.type
) {
472 case DM_IO_PAGE_LIST
:
473 list_dp_init(dp
, io_req
->mem
.ptr
.pl
, io_req
->mem
.offset
);
477 bio_dp_init(dp
, io_req
->mem
.ptr
.bio
);
481 flush_kernel_vmap_range(io_req
->mem
.ptr
.vma
, size
);
482 if ((io_req
->bi_opf
& REQ_OP_MASK
) == REQ_OP_READ
) {
483 dp
->vma_invalidate_address
= io_req
->mem
.ptr
.vma
;
484 dp
->vma_invalidate_size
= size
;
486 vm_dp_init(dp
, io_req
->mem
.ptr
.vma
);
490 km_dp_init(dp
, io_req
->mem
.ptr
.addr
);
500 int dm_io(struct dm_io_request
*io_req
, unsigned int num_regions
,
501 struct dm_io_region
*where
, unsigned long *sync_error_bits
,
502 unsigned short ioprio
)
507 if (num_regions
> 1 && !op_is_write(io_req
->bi_opf
)) {
512 r
= dp_init(io_req
, &dp
, (unsigned long)where
->count
<< SECTOR_SHIFT
);
516 if (!io_req
->notify
.fn
)
517 return sync_io(io_req
->client
, num_regions
, where
,
518 io_req
->bi_opf
, &dp
, sync_error_bits
, ioprio
);
520 async_io(io_req
->client
, num_regions
, where
, io_req
->bi_opf
, &dp
,
521 io_req
->notify
.fn
, io_req
->notify
.context
, ioprio
);
524 EXPORT_SYMBOL(dm_io
);
526 int __init
dm_io_init(void)
528 _dm_io_cache
= KMEM_CACHE(io
, 0);
535 void dm_io_exit(void)
537 kmem_cache_destroy(_dm_io_cache
);