2 * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
4 * This file is released under the GPL.
10 #include <linux/elevator.h> /* for rq_end_sector() */
11 #include <linux/blk-mq.h>
13 #define DM_MSG_PREFIX "core-rq"
16 * One of these is allocated per request.
18 struct dm_rq_target_io
{
19 struct mapped_device
*md
;
21 struct request
*orig
, *clone
;
22 struct kthread_work work
;
25 struct dm_stats_aux stats_aux
;
26 unsigned long duration_jiffies
;
31 #define DM_MQ_NR_HW_QUEUES 1
32 #define DM_MQ_QUEUE_DEPTH 2048
33 static unsigned dm_mq_nr_hw_queues
= DM_MQ_NR_HW_QUEUES
;
34 static unsigned dm_mq_queue_depth
= DM_MQ_QUEUE_DEPTH
;
37 * Request-based DM's mempools' reserved IOs set by the user.
39 #define RESERVED_REQUEST_BASED_IOS 256
40 static unsigned reserved_rq_based_ios
= RESERVED_REQUEST_BASED_IOS
;
42 unsigned dm_get_reserved_rq_based_ios(void)
44 return __dm_get_module_param(&reserved_rq_based_ios
,
45 RESERVED_REQUEST_BASED_IOS
, DM_RESERVED_MAX_IOS
);
47 EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios
);
49 static unsigned dm_get_blk_mq_nr_hw_queues(void)
51 return __dm_get_module_param(&dm_mq_nr_hw_queues
, 1, 32);
54 static unsigned dm_get_blk_mq_queue_depth(void)
56 return __dm_get_module_param(&dm_mq_queue_depth
,
57 DM_MQ_QUEUE_DEPTH
, BLK_MQ_MAX_DEPTH
);
60 int dm_request_based(struct mapped_device
*md
)
62 return queue_is_mq(md
->queue
);
65 void dm_start_queue(struct request_queue
*q
)
67 blk_mq_unquiesce_queue(q
);
68 blk_mq_kick_requeue_list(q
);
71 void dm_stop_queue(struct request_queue
*q
)
73 if (blk_mq_queue_stopped(q
))
76 blk_mq_quiesce_queue(q
);
80 * Partial completion handling for request-based dm
82 static void end_clone_bio(struct bio
*clone
)
84 struct dm_rq_clone_bio_info
*info
=
85 container_of(clone
, struct dm_rq_clone_bio_info
, clone
);
86 struct dm_rq_target_io
*tio
= info
->tio
;
87 unsigned int nr_bytes
= info
->orig
->bi_iter
.bi_size
;
88 blk_status_t error
= clone
->bi_status
;
89 bool is_last
= !clone
->bi_next
;
95 * An error has already been detected on the request.
96 * Once error occurred, just let clone->end_io() handle
102 * Don't notice the error to the upper layer yet.
103 * The error handling decision is made by the target driver,
104 * when the request is completed.
111 * I/O for the bio successfully completed.
112 * Notice the data completion to the upper layer.
114 tio
->completed
+= nr_bytes
;
117 * Update the original request.
118 * Do not use blk_mq_end_request() here, because it may complete
119 * the original request before the clone, and break the ordering.
123 blk_update_request(tio
->orig
, BLK_STS_OK
, tio
->completed
);
126 static struct dm_rq_target_io
*tio_from_request(struct request
*rq
)
128 return blk_mq_rq_to_pdu(rq
);
131 static void rq_end_stats(struct mapped_device
*md
, struct request
*orig
)
133 if (unlikely(dm_stats_used(&md
->stats
))) {
134 struct dm_rq_target_io
*tio
= tio_from_request(orig
);
135 tio
->duration_jiffies
= jiffies
- tio
->duration_jiffies
;
136 dm_stats_account_io(&md
->stats
, rq_data_dir(orig
),
137 blk_rq_pos(orig
), tio
->n_sectors
, true,
138 tio
->duration_jiffies
, &tio
->stats_aux
);
143 * Don't touch any member of the md after calling this function because
144 * the md may be freed in dm_put() at the end of this function.
145 * Or do dm_get() before calling this function and dm_put() later.
147 static void rq_completed(struct mapped_device
*md
)
149 /* nudge anyone waiting on suspend queue */
150 if (unlikely(wq_has_sleeper(&md
->wait
)))
154 * dm_put() must be at the end of this function. See the comment above
160 * Complete the clone and the original request.
161 * Must be called without clone's queue lock held,
162 * see end_clone_request() for more details.
164 static void dm_end_request(struct request
*clone
, blk_status_t error
)
166 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
167 struct mapped_device
*md
= tio
->md
;
168 struct request
*rq
= tio
->orig
;
170 blk_rq_unprep_clone(clone
);
171 tio
->ti
->type
->release_clone_rq(clone
, NULL
);
173 rq_end_stats(md
, rq
);
174 blk_mq_end_request(rq
, error
);
178 static void __dm_mq_kick_requeue_list(struct request_queue
*q
, unsigned long msecs
)
180 blk_mq_delay_kick_requeue_list(q
, msecs
);
183 void dm_mq_kick_requeue_list(struct mapped_device
*md
)
185 __dm_mq_kick_requeue_list(dm_get_md_queue(md
), 0);
187 EXPORT_SYMBOL(dm_mq_kick_requeue_list
);
189 static void dm_mq_delay_requeue_request(struct request
*rq
, unsigned long msecs
)
191 blk_mq_requeue_request(rq
, false);
192 __dm_mq_kick_requeue_list(rq
->q
, msecs
);
195 static void dm_requeue_original_request(struct dm_rq_target_io
*tio
, bool delay_requeue
)
197 struct mapped_device
*md
= tio
->md
;
198 struct request
*rq
= tio
->orig
;
199 unsigned long delay_ms
= delay_requeue
? 100 : 0;
201 rq_end_stats(md
, rq
);
203 blk_rq_unprep_clone(tio
->clone
);
204 tio
->ti
->type
->release_clone_rq(tio
->clone
, NULL
);
207 dm_mq_delay_requeue_request(rq
, delay_ms
);
211 static void dm_done(struct request
*clone
, blk_status_t error
, bool mapped
)
213 int r
= DM_ENDIO_DONE
;
214 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
215 dm_request_endio_fn rq_end_io
= NULL
;
218 rq_end_io
= tio
->ti
->type
->rq_end_io
;
220 if (mapped
&& rq_end_io
)
221 r
= rq_end_io(tio
->ti
, clone
, error
, &tio
->info
);
224 if (unlikely(error
== BLK_STS_TARGET
)) {
225 if (req_op(clone
) == REQ_OP_DISCARD
&&
226 !clone
->q
->limits
.max_discard_sectors
)
227 disable_discard(tio
->md
);
228 else if (req_op(clone
) == REQ_OP_WRITE_SAME
&&
229 !clone
->q
->limits
.max_write_same_sectors
)
230 disable_write_same(tio
->md
);
231 else if (req_op(clone
) == REQ_OP_WRITE_ZEROES
&&
232 !clone
->q
->limits
.max_write_zeroes_sectors
)
233 disable_write_zeroes(tio
->md
);
238 /* The target wants to complete the I/O */
239 dm_end_request(clone
, error
);
241 case DM_ENDIO_INCOMPLETE
:
242 /* The target will handle the I/O */
244 case DM_ENDIO_REQUEUE
:
245 /* The target wants to requeue the I/O */
246 dm_requeue_original_request(tio
, false);
248 case DM_ENDIO_DELAY_REQUEUE
:
249 /* The target wants to requeue the I/O after a delay */
250 dm_requeue_original_request(tio
, true);
253 DMWARN("unimplemented target endio return value: %d", r
);
259 * Request completion handler for request-based dm
261 static void dm_softirq_done(struct request
*rq
)
264 struct dm_rq_target_io
*tio
= tio_from_request(rq
);
265 struct request
*clone
= tio
->clone
;
268 struct mapped_device
*md
= tio
->md
;
270 rq_end_stats(md
, rq
);
271 blk_mq_end_request(rq
, tio
->error
);
276 if (rq
->rq_flags
& RQF_FAILED
)
279 dm_done(clone
, tio
->error
, mapped
);
283 * Complete the clone and the original request with the error status
284 * through softirq context.
286 static void dm_complete_request(struct request
*rq
, blk_status_t error
)
288 struct dm_rq_target_io
*tio
= tio_from_request(rq
);
291 blk_mq_complete_request(rq
);
295 * Complete the not-mapped clone and the original request with the error status
296 * through softirq context.
297 * Target's rq_end_io() function isn't called.
298 * This may be used when the target's clone_and_map_rq() function fails.
300 static void dm_kill_unmapped_request(struct request
*rq
, blk_status_t error
)
302 rq
->rq_flags
|= RQF_FAILED
;
303 dm_complete_request(rq
, error
);
306 static void end_clone_request(struct request
*clone
, blk_status_t error
)
308 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
310 dm_complete_request(tio
->orig
, error
);
313 static blk_status_t
dm_dispatch_clone_request(struct request
*clone
, struct request
*rq
)
317 if (blk_queue_io_stat(clone
->q
))
318 clone
->rq_flags
|= RQF_IO_STAT
;
320 clone
->start_time_ns
= ktime_get_ns();
321 r
= blk_insert_cloned_request(clone
->q
, clone
);
322 if (r
!= BLK_STS_OK
&& r
!= BLK_STS_RESOURCE
&& r
!= BLK_STS_DEV_RESOURCE
)
323 /* must complete clone in terms of original request */
324 dm_complete_request(rq
, r
);
328 static int dm_rq_bio_constructor(struct bio
*bio
, struct bio
*bio_orig
,
331 struct dm_rq_target_io
*tio
= data
;
332 struct dm_rq_clone_bio_info
*info
=
333 container_of(bio
, struct dm_rq_clone_bio_info
, clone
);
335 info
->orig
= bio_orig
;
337 bio
->bi_end_io
= end_clone_bio
;
342 static int setup_clone(struct request
*clone
, struct request
*rq
,
343 struct dm_rq_target_io
*tio
, gfp_t gfp_mask
)
347 r
= blk_rq_prep_clone(clone
, rq
, &tio
->md
->bs
, gfp_mask
,
348 dm_rq_bio_constructor
, tio
);
352 clone
->end_io
= end_clone_request
;
353 clone
->end_io_data
= tio
;
360 static void init_tio(struct dm_rq_target_io
*tio
, struct request
*rq
,
361 struct mapped_device
*md
)
370 * Avoid initializing info for blk-mq; it passes
371 * target-specific data through info.ptr
372 * (see: dm_mq_init_request)
374 if (!md
->init_tio_pdu
)
375 memset(&tio
->info
, 0, sizeof(tio
->info
));
380 * DM_MAPIO_* : the request has been processed as indicated
381 * DM_MAPIO_REQUEUE : the original request needs to be immediately requeued
382 * < 0 : the request was completed due to failure
384 static int map_request(struct dm_rq_target_io
*tio
)
387 struct dm_target
*ti
= tio
->ti
;
388 struct mapped_device
*md
= tio
->md
;
389 struct request
*rq
= tio
->orig
;
390 struct request
*clone
= NULL
;
393 r
= ti
->type
->clone_and_map_rq(ti
, rq
, &tio
->info
, &clone
);
395 case DM_MAPIO_SUBMITTED
:
396 /* The target has taken the I/O to submit by itself later */
398 case DM_MAPIO_REMAPPED
:
399 if (setup_clone(clone
, rq
, tio
, GFP_ATOMIC
)) {
401 ti
->type
->release_clone_rq(clone
, &tio
->info
);
402 return DM_MAPIO_REQUEUE
;
405 /* The target has remapped the I/O so dispatch it */
406 trace_block_rq_remap(clone
->q
, clone
, disk_devt(dm_disk(md
)),
408 ret
= dm_dispatch_clone_request(clone
, rq
);
409 if (ret
== BLK_STS_RESOURCE
|| ret
== BLK_STS_DEV_RESOURCE
) {
410 blk_rq_unprep_clone(clone
);
411 blk_mq_cleanup_rq(clone
);
412 tio
->ti
->type
->release_clone_rq(clone
, &tio
->info
);
414 return DM_MAPIO_REQUEUE
;
417 case DM_MAPIO_REQUEUE
:
418 /* The target wants to requeue the I/O */
420 case DM_MAPIO_DELAY_REQUEUE
:
421 /* The target wants to requeue the I/O after a delay */
422 dm_requeue_original_request(tio
, true);
425 /* The target wants to complete the I/O */
426 dm_kill_unmapped_request(rq
, BLK_STS_IOERR
);
429 DMWARN("unimplemented target map return value: %d", r
);
436 /* DEPRECATED: previously used for request-based merge heuristic in dm_request_fn() */
437 ssize_t
dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device
*md
, char *buf
)
439 return sprintf(buf
, "%u\n", 0);
442 ssize_t
dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device
*md
,
443 const char *buf
, size_t count
)
448 static void dm_start_request(struct mapped_device
*md
, struct request
*orig
)
450 blk_mq_start_request(orig
);
452 if (unlikely(dm_stats_used(&md
->stats
))) {
453 struct dm_rq_target_io
*tio
= tio_from_request(orig
);
454 tio
->duration_jiffies
= jiffies
;
455 tio
->n_sectors
= blk_rq_sectors(orig
);
456 dm_stats_account_io(&md
->stats
, rq_data_dir(orig
),
457 blk_rq_pos(orig
), tio
->n_sectors
, false, 0,
462 * Hold the md reference here for the in-flight I/O.
463 * We can't rely on the reference count by device opener,
464 * because the device may be closed during the request completion
465 * when all bios are completed.
466 * See the comment in rq_completed() too.
471 static int dm_mq_init_request(struct blk_mq_tag_set
*set
, struct request
*rq
,
472 unsigned int hctx_idx
, unsigned int numa_node
)
474 struct mapped_device
*md
= set
->driver_data
;
475 struct dm_rq_target_io
*tio
= blk_mq_rq_to_pdu(rq
);
478 * Must initialize md member of tio, otherwise it won't
479 * be available in dm_mq_queue_rq.
483 if (md
->init_tio_pdu
) {
484 /* target-specific per-io data is immediately after the tio */
485 tio
->info
.ptr
= tio
+ 1;
491 static blk_status_t
dm_mq_queue_rq(struct blk_mq_hw_ctx
*hctx
,
492 const struct blk_mq_queue_data
*bd
)
494 struct request
*rq
= bd
->rq
;
495 struct dm_rq_target_io
*tio
= blk_mq_rq_to_pdu(rq
);
496 struct mapped_device
*md
= tio
->md
;
497 struct dm_target
*ti
= md
->immutable_target
;
501 struct dm_table
*map
= dm_get_live_table(md
, &srcu_idx
);
503 ti
= dm_table_find_target(map
, 0);
504 dm_put_live_table(md
, srcu_idx
);
507 if (ti
->type
->busy
&& ti
->type
->busy(ti
))
508 return BLK_STS_RESOURCE
;
510 dm_start_request(md
, rq
);
512 /* Init tio using md established in .init_request */
513 init_tio(tio
, rq
, md
);
516 * Establish tio->ti before calling map_request().
520 /* Direct call is fine since .queue_rq allows allocations */
521 if (map_request(tio
) == DM_MAPIO_REQUEUE
) {
522 /* Undo dm_start_request() before requeuing */
523 rq_end_stats(md
, rq
);
525 return BLK_STS_RESOURCE
;
531 static const struct blk_mq_ops dm_mq_ops
= {
532 .queue_rq
= dm_mq_queue_rq
,
533 .complete
= dm_softirq_done
,
534 .init_request
= dm_mq_init_request
,
537 int dm_mq_init_request_queue(struct mapped_device
*md
, struct dm_table
*t
)
539 struct request_queue
*q
;
540 struct dm_target
*immutable_tgt
;
543 md
->tag_set
= kzalloc_node(sizeof(struct blk_mq_tag_set
), GFP_KERNEL
, md
->numa_node_id
);
547 md
->tag_set
->ops
= &dm_mq_ops
;
548 md
->tag_set
->queue_depth
= dm_get_blk_mq_queue_depth();
549 md
->tag_set
->numa_node
= md
->numa_node_id
;
550 md
->tag_set
->flags
= BLK_MQ_F_SHOULD_MERGE
;
551 md
->tag_set
->nr_hw_queues
= dm_get_blk_mq_nr_hw_queues();
552 md
->tag_set
->driver_data
= md
;
554 md
->tag_set
->cmd_size
= sizeof(struct dm_rq_target_io
);
555 immutable_tgt
= dm_table_get_immutable_target(t
);
556 if (immutable_tgt
&& immutable_tgt
->per_io_data_size
) {
557 /* any target-specific per-io data is immediately after the tio */
558 md
->tag_set
->cmd_size
+= immutable_tgt
->per_io_data_size
;
559 md
->init_tio_pdu
= true;
562 err
= blk_mq_alloc_tag_set(md
->tag_set
);
564 goto out_kfree_tag_set
;
566 q
= blk_mq_init_allocated_queue(md
->tag_set
, md
->queue
, true);
575 blk_mq_free_tag_set(md
->tag_set
);
582 void dm_mq_cleanup_mapped_device(struct mapped_device
*md
)
585 blk_mq_free_tag_set(md
->tag_set
);
590 module_param(reserved_rq_based_ios
, uint
, S_IRUGO
| S_IWUSR
);
591 MODULE_PARM_DESC(reserved_rq_based_ios
, "Reserved IOs in request-based mempools");
593 /* Unused, but preserved for userspace compatibility */
594 static bool use_blk_mq
= true;
595 module_param(use_blk_mq
, bool, S_IRUGO
| S_IWUSR
);
596 MODULE_PARM_DESC(use_blk_mq
, "Use block multiqueue for request-based DM devices");
598 module_param(dm_mq_nr_hw_queues
, uint
, S_IRUGO
| S_IWUSR
);
599 MODULE_PARM_DESC(dm_mq_nr_hw_queues
, "Number of hardware queues for request-based dm-mq devices");
601 module_param(dm_mq_queue_depth
, uint
, S_IRUGO
| S_IWUSR
);
602 MODULE_PARM_DESC(dm_mq_queue_depth
, "Queue depth for request-based dm-mq devices");