2 * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
4 * This file is released under the GPL.
10 #include <linux/elevator.h> /* for rq_end_sector() */
11 #include <linux/blk-mq.h>
13 #define DM_MSG_PREFIX "core-rq"
15 #define DM_MQ_NR_HW_QUEUES 1
16 #define DM_MQ_QUEUE_DEPTH 2048
17 static unsigned dm_mq_nr_hw_queues
= DM_MQ_NR_HW_QUEUES
;
18 static unsigned dm_mq_queue_depth
= DM_MQ_QUEUE_DEPTH
;
21 * Request-based DM's mempools' reserved IOs set by the user.
23 #define RESERVED_REQUEST_BASED_IOS 256
24 static unsigned reserved_rq_based_ios
= RESERVED_REQUEST_BASED_IOS
;
26 #ifdef CONFIG_DM_MQ_DEFAULT
27 static bool use_blk_mq
= true;
29 static bool use_blk_mq
= false;
32 bool dm_use_blk_mq_default(void)
37 bool dm_use_blk_mq(struct mapped_device
*md
)
39 return md
->use_blk_mq
;
41 EXPORT_SYMBOL_GPL(dm_use_blk_mq
);
43 unsigned dm_get_reserved_rq_based_ios(void)
45 return __dm_get_module_param(&reserved_rq_based_ios
,
46 RESERVED_REQUEST_BASED_IOS
, DM_RESERVED_MAX_IOS
);
48 EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios
);
50 static unsigned dm_get_blk_mq_nr_hw_queues(void)
52 return __dm_get_module_param(&dm_mq_nr_hw_queues
, 1, 32);
55 static unsigned dm_get_blk_mq_queue_depth(void)
57 return __dm_get_module_param(&dm_mq_queue_depth
,
58 DM_MQ_QUEUE_DEPTH
, BLK_MQ_MAX_DEPTH
);
61 int dm_request_based(struct mapped_device
*md
)
63 return blk_queue_stackable(md
->queue
);
66 static void dm_old_start_queue(struct request_queue
*q
)
70 spin_lock_irqsave(q
->queue_lock
, flags
);
71 if (blk_queue_stopped(q
))
73 spin_unlock_irqrestore(q
->queue_lock
, flags
);
76 static void dm_mq_start_queue(struct request_queue
*q
)
80 spin_lock_irqsave(q
->queue_lock
, flags
);
81 queue_flag_clear(QUEUE_FLAG_STOPPED
, q
);
82 spin_unlock_irqrestore(q
->queue_lock
, flags
);
84 blk_mq_start_stopped_hw_queues(q
, true);
85 blk_mq_kick_requeue_list(q
);
88 void dm_start_queue(struct request_queue
*q
)
91 dm_old_start_queue(q
);
96 static void dm_old_stop_queue(struct request_queue
*q
)
100 spin_lock_irqsave(q
->queue_lock
, flags
);
101 if (!blk_queue_stopped(q
))
103 spin_unlock_irqrestore(q
->queue_lock
, flags
);
106 static void dm_mq_stop_queue(struct request_queue
*q
)
110 spin_lock_irqsave(q
->queue_lock
, flags
);
111 if (blk_queue_stopped(q
)) {
112 spin_unlock_irqrestore(q
->queue_lock
, flags
);
116 queue_flag_set(QUEUE_FLAG_STOPPED
, q
);
117 spin_unlock_irqrestore(q
->queue_lock
, flags
);
119 /* Avoid that requeuing could restart the queue. */
120 blk_mq_cancel_requeue_work(q
);
121 blk_mq_stop_hw_queues(q
);
124 void dm_stop_queue(struct request_queue
*q
)
127 dm_old_stop_queue(q
);
132 static struct dm_rq_target_io
*alloc_old_rq_tio(struct mapped_device
*md
,
135 return mempool_alloc(md
->io_pool
, gfp_mask
);
138 static void free_old_rq_tio(struct dm_rq_target_io
*tio
)
140 mempool_free(tio
, tio
->md
->io_pool
);
143 static struct request
*alloc_old_clone_request(struct mapped_device
*md
,
146 return mempool_alloc(md
->rq_pool
, gfp_mask
);
149 static void free_old_clone_request(struct mapped_device
*md
, struct request
*rq
)
151 mempool_free(rq
, md
->rq_pool
);
155 * Partial completion handling for request-based dm
157 static void end_clone_bio(struct bio
*clone
)
159 struct dm_rq_clone_bio_info
*info
=
160 container_of(clone
, struct dm_rq_clone_bio_info
, clone
);
161 struct dm_rq_target_io
*tio
= info
->tio
;
162 struct bio
*bio
= info
->orig
;
163 unsigned int nr_bytes
= info
->orig
->bi_iter
.bi_size
;
164 int error
= clone
->bi_error
;
170 * An error has already been detected on the request.
171 * Once error occurred, just let clone->end_io() handle
177 * Don't notice the error to the upper layer yet.
178 * The error handling decision is made by the target driver,
179 * when the request is completed.
186 * I/O for the bio successfully completed.
187 * Notice the data completion to the upper layer.
191 * bios are processed from the head of the list.
192 * So the completing bio should always be rq->bio.
193 * If it's not, something wrong is happening.
195 if (tio
->orig
->bio
!= bio
)
196 DMERR("bio completion is going in the middle of the request");
199 * Update the original request.
200 * Do not use blk_end_request() here, because it may complete
201 * the original request before the clone, and break the ordering.
203 blk_update_request(tio
->orig
, 0, nr_bytes
);
206 static struct dm_rq_target_io
*tio_from_request(struct request
*rq
)
208 return (rq
->q
->mq_ops
? blk_mq_rq_to_pdu(rq
) : rq
->special
);
211 static void rq_end_stats(struct mapped_device
*md
, struct request
*orig
)
213 if (unlikely(dm_stats_used(&md
->stats
))) {
214 struct dm_rq_target_io
*tio
= tio_from_request(orig
);
215 tio
->duration_jiffies
= jiffies
- tio
->duration_jiffies
;
216 dm_stats_account_io(&md
->stats
, rq_data_dir(orig
),
217 blk_rq_pos(orig
), tio
->n_sectors
, true,
218 tio
->duration_jiffies
, &tio
->stats_aux
);
223 * Don't touch any member of the md after calling this function because
224 * the md may be freed in dm_put() at the end of this function.
225 * Or do dm_get() before calling this function and dm_put() later.
227 static void rq_completed(struct mapped_device
*md
, int rw
, bool run_queue
)
229 struct request_queue
*q
= md
->queue
;
232 atomic_dec(&md
->pending
[rw
]);
234 /* nudge anyone waiting on suspend queue */
235 if (!md_in_flight(md
))
239 * Run this off this callpath, as drivers could invoke end_io while
240 * inside their request_fn (and holding the queue lock). Calling
241 * back into ->request_fn() could deadlock attempting to grab the
244 if (!q
->mq_ops
&& run_queue
) {
245 spin_lock_irqsave(q
->queue_lock
, flags
);
246 blk_run_queue_async(q
);
247 spin_unlock_irqrestore(q
->queue_lock
, flags
);
251 * dm_put() must be at the end of this function. See the comment above
256 static void free_rq_clone(struct request
*clone
)
258 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
259 struct mapped_device
*md
= tio
->md
;
261 blk_rq_unprep_clone(clone
);
264 * It is possible for a clone_old_rq() allocated clone to
265 * get passed in -- it may not yet have a request_queue.
266 * This is known to occur if the error target replaces
267 * a multipath target that has a request_fn queue stacked
268 * on blk-mq queue(s).
270 if (clone
->q
&& clone
->q
->mq_ops
)
271 /* stacked on blk-mq queue(s) */
272 tio
->ti
->type
->release_clone_rq(clone
);
273 else if (!md
->queue
->mq_ops
)
274 /* request_fn queue stacked on request_fn queue(s) */
275 free_old_clone_request(md
, clone
);
277 if (!md
->queue
->mq_ops
)
278 free_old_rq_tio(tio
);
282 * Complete the clone and the original request.
283 * Must be called without clone's queue lock held,
284 * see end_clone_request() for more details.
286 static void dm_end_request(struct request
*clone
, int error
)
288 int rw
= rq_data_dir(clone
);
289 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
290 struct mapped_device
*md
= tio
->md
;
291 struct request
*rq
= tio
->orig
;
293 if (rq
->cmd_type
== REQ_TYPE_BLOCK_PC
) {
294 rq
->errors
= clone
->errors
;
295 rq
->resid_len
= clone
->resid_len
;
299 * We are using the sense buffer of the original
301 * So setting the length of the sense data is enough.
303 rq
->sense_len
= clone
->sense_len
;
306 free_rq_clone(clone
);
307 rq_end_stats(md
, rq
);
309 blk_end_request_all(rq
, error
);
311 blk_mq_end_request(rq
, error
);
312 rq_completed(md
, rw
, true);
315 static void dm_unprep_request(struct request
*rq
)
317 struct dm_rq_target_io
*tio
= tio_from_request(rq
);
318 struct request
*clone
= tio
->clone
;
320 if (!rq
->q
->mq_ops
) {
322 rq
->cmd_flags
&= ~REQ_DONTPREP
;
326 free_rq_clone(clone
);
327 else if (!tio
->md
->queue
->mq_ops
)
328 free_old_rq_tio(tio
);
332 * Requeue the original request of a clone.
334 static void dm_old_requeue_request(struct request
*rq
)
336 struct request_queue
*q
= rq
->q
;
339 spin_lock_irqsave(q
->queue_lock
, flags
);
340 blk_requeue_request(q
, rq
);
341 blk_run_queue_async(q
);
342 spin_unlock_irqrestore(q
->queue_lock
, flags
);
345 static void __dm_mq_kick_requeue_list(struct request_queue
*q
, unsigned long msecs
)
349 spin_lock_irqsave(q
->queue_lock
, flags
);
350 if (!blk_queue_stopped(q
))
351 blk_mq_delay_kick_requeue_list(q
, msecs
);
352 spin_unlock_irqrestore(q
->queue_lock
, flags
);
355 void dm_mq_kick_requeue_list(struct mapped_device
*md
)
357 __dm_mq_kick_requeue_list(dm_get_md_queue(md
), 0);
359 EXPORT_SYMBOL(dm_mq_kick_requeue_list
);
361 static void dm_mq_delay_requeue_request(struct request
*rq
, unsigned long msecs
)
363 blk_mq_requeue_request(rq
);
364 __dm_mq_kick_requeue_list(rq
->q
, msecs
);
367 static void dm_requeue_original_request(struct dm_rq_target_io
*tio
, bool delay_requeue
)
369 struct mapped_device
*md
= tio
->md
;
370 struct request
*rq
= tio
->orig
;
371 int rw
= rq_data_dir(rq
);
373 rq_end_stats(md
, rq
);
374 dm_unprep_request(rq
);
377 dm_old_requeue_request(rq
);
379 dm_mq_delay_requeue_request(rq
, delay_requeue
? 5000 : 0);
381 rq_completed(md
, rw
, false);
384 static void dm_done(struct request
*clone
, int error
, bool mapped
)
387 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
388 dm_request_endio_fn rq_end_io
= NULL
;
391 rq_end_io
= tio
->ti
->type
->rq_end_io
;
393 if (mapped
&& rq_end_io
)
394 r
= rq_end_io(tio
->ti
, clone
, error
, &tio
->info
);
397 if (unlikely(r
== -EREMOTEIO
&& (req_op(clone
) == REQ_OP_WRITE_SAME
) &&
398 !clone
->q
->limits
.max_write_same_sectors
))
399 disable_write_same(tio
->md
);
402 /* The target wants to complete the I/O */
403 dm_end_request(clone
, r
);
404 else if (r
== DM_ENDIO_INCOMPLETE
)
405 /* The target will handle the I/O */
407 else if (r
== DM_ENDIO_REQUEUE
)
408 /* The target wants to requeue the I/O */
409 dm_requeue_original_request(tio
, false);
411 DMWARN("unimplemented target endio return value: %d", r
);
417 * Request completion handler for request-based dm
419 static void dm_softirq_done(struct request
*rq
)
422 struct dm_rq_target_io
*tio
= tio_from_request(rq
);
423 struct request
*clone
= tio
->clone
;
427 rq_end_stats(tio
->md
, rq
);
428 rw
= rq_data_dir(rq
);
429 if (!rq
->q
->mq_ops
) {
430 blk_end_request_all(rq
, tio
->error
);
431 rq_completed(tio
->md
, rw
, false);
432 free_old_rq_tio(tio
);
434 blk_mq_end_request(rq
, tio
->error
);
435 rq_completed(tio
->md
, rw
, false);
440 if (rq
->cmd_flags
& REQ_FAILED
)
443 dm_done(clone
, tio
->error
, mapped
);
447 * Complete the clone and the original request with the error status
448 * through softirq context.
450 static void dm_complete_request(struct request
*rq
, int error
)
452 struct dm_rq_target_io
*tio
= tio_from_request(rq
);
456 blk_complete_request(rq
);
458 blk_mq_complete_request(rq
, error
);
462 * Complete the not-mapped clone and the original request with the error status
463 * through softirq context.
464 * Target's rq_end_io() function isn't called.
465 * This may be used when the target's map_rq() or clone_and_map_rq() functions fail.
467 static void dm_kill_unmapped_request(struct request
*rq
, int error
)
469 rq
->cmd_flags
|= REQ_FAILED
;
470 dm_complete_request(rq
, error
);
474 * Called with the clone's queue lock held (in the case of .request_fn)
476 static void end_clone_request(struct request
*clone
, int error
)
478 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
480 if (!clone
->q
->mq_ops
) {
482 * For just cleaning up the information of the queue in which
483 * the clone was dispatched.
484 * The clone is *NOT* freed actually here because it is alloced
485 * from dm own mempool (REQ_ALLOCED isn't set).
487 __blk_put_request(clone
->q
, clone
);
491 * Actual request completion is done in a softirq context which doesn't
492 * hold the clone's queue lock. Otherwise, deadlock could occur because:
493 * - another request may be submitted by the upper level driver
494 * of the stacking during the completion
495 * - the submission which requires queue lock may be done
496 * against this clone's queue
498 dm_complete_request(tio
->orig
, error
);
501 static void dm_dispatch_clone_request(struct request
*clone
, struct request
*rq
)
505 if (blk_queue_io_stat(clone
->q
))
506 clone
->cmd_flags
|= REQ_IO_STAT
;
508 clone
->start_time
= jiffies
;
509 r
= blk_insert_cloned_request(clone
->q
, clone
);
511 /* must complete clone in terms of original request */
512 dm_complete_request(rq
, r
);
515 static int dm_rq_bio_constructor(struct bio
*bio
, struct bio
*bio_orig
,
518 struct dm_rq_target_io
*tio
= data
;
519 struct dm_rq_clone_bio_info
*info
=
520 container_of(bio
, struct dm_rq_clone_bio_info
, clone
);
522 info
->orig
= bio_orig
;
524 bio
->bi_end_io
= end_clone_bio
;
529 static int setup_clone(struct request
*clone
, struct request
*rq
,
530 struct dm_rq_target_io
*tio
, gfp_t gfp_mask
)
534 r
= blk_rq_prep_clone(clone
, rq
, tio
->md
->bs
, gfp_mask
,
535 dm_rq_bio_constructor
, tio
);
539 clone
->cmd
= rq
->cmd
;
540 clone
->cmd_len
= rq
->cmd_len
;
541 clone
->sense
= rq
->sense
;
542 clone
->end_io
= end_clone_request
;
543 clone
->end_io_data
= tio
;
550 static struct request
*clone_old_rq(struct request
*rq
, struct mapped_device
*md
,
551 struct dm_rq_target_io
*tio
, gfp_t gfp_mask
)
554 * Create clone for use with .request_fn request_queue
556 struct request
*clone
;
558 clone
= alloc_old_clone_request(md
, gfp_mask
);
562 blk_rq_init(NULL
, clone
);
563 if (setup_clone(clone
, rq
, tio
, gfp_mask
)) {
565 free_old_clone_request(md
, clone
);
572 static void map_tio_request(struct kthread_work
*work
);
574 static void init_tio(struct dm_rq_target_io
*tio
, struct request
*rq
,
575 struct mapped_device
*md
)
583 * Avoid initializing info for blk-mq; it passes
584 * target-specific data through info.ptr
585 * (see: dm_mq_init_request)
587 if (!md
->init_tio_pdu
)
588 memset(&tio
->info
, 0, sizeof(tio
->info
));
589 if (md
->kworker_task
)
590 kthread_init_work(&tio
->work
, map_tio_request
);
593 static struct dm_rq_target_io
*dm_old_prep_tio(struct request
*rq
,
594 struct mapped_device
*md
,
597 struct dm_rq_target_io
*tio
;
599 struct dm_table
*table
;
601 tio
= alloc_old_rq_tio(md
, gfp_mask
);
605 init_tio(tio
, rq
, md
);
607 table
= dm_get_live_table(md
, &srcu_idx
);
609 * Must clone a request if this .request_fn DM device
610 * is stacked on .request_fn device(s).
612 if (!dm_table_all_blk_mq_devices(table
)) {
613 if (!clone_old_rq(rq
, md
, tio
, gfp_mask
)) {
614 dm_put_live_table(md
, srcu_idx
);
615 free_old_rq_tio(tio
);
619 dm_put_live_table(md
, srcu_idx
);
625 * Called with the queue lock held.
627 static int dm_old_prep_fn(struct request_queue
*q
, struct request
*rq
)
629 struct mapped_device
*md
= q
->queuedata
;
630 struct dm_rq_target_io
*tio
;
632 if (unlikely(rq
->special
)) {
633 DMWARN("Already has something in rq->special.");
637 tio
= dm_old_prep_tio(rq
, md
, GFP_ATOMIC
);
639 return BLKPREP_DEFER
;
642 rq
->cmd_flags
|= REQ_DONTPREP
;
649 * DM_MAPIO_* : the request has been processed as indicated
650 * DM_MAPIO_REQUEUE : the original request needs to be immediately requeued
651 * < 0 : the request was completed due to failure
653 static int map_request(struct dm_rq_target_io
*tio
)
656 struct dm_target
*ti
= tio
->ti
;
657 struct mapped_device
*md
= tio
->md
;
658 struct request
*rq
= tio
->orig
;
659 struct request
*clone
= NULL
;
663 r
= ti
->type
->map_rq(ti
, clone
, &tio
->info
);
664 if (r
== DM_MAPIO_DELAY_REQUEUE
)
665 return DM_MAPIO_REQUEUE
; /* .request_fn requeue is always immediate */
667 r
= ti
->type
->clone_and_map_rq(ti
, rq
, &tio
->info
, &clone
);
669 /* The target wants to complete the I/O */
670 dm_kill_unmapped_request(rq
, r
);
673 if (r
== DM_MAPIO_REMAPPED
&&
674 setup_clone(clone
, rq
, tio
, GFP_ATOMIC
)) {
676 ti
->type
->release_clone_rq(clone
);
677 return DM_MAPIO_REQUEUE
;
682 case DM_MAPIO_SUBMITTED
:
683 /* The target has taken the I/O to submit by itself later */
685 case DM_MAPIO_REMAPPED
:
686 /* The target has remapped the I/O so dispatch it */
687 trace_block_rq_remap(clone
->q
, clone
, disk_devt(dm_disk(md
)),
689 dm_dispatch_clone_request(clone
, rq
);
691 case DM_MAPIO_REQUEUE
:
692 /* The target wants to requeue the I/O */
694 case DM_MAPIO_DELAY_REQUEUE
:
695 /* The target wants to requeue the I/O after a delay */
696 dm_requeue_original_request(tio
, true);
700 DMWARN("unimplemented target map return value: %d", r
);
704 /* The target wants to complete the I/O */
705 dm_kill_unmapped_request(rq
, r
);
711 static void dm_start_request(struct mapped_device
*md
, struct request
*orig
)
713 if (!orig
->q
->mq_ops
)
714 blk_start_request(orig
);
716 blk_mq_start_request(orig
);
717 atomic_inc(&md
->pending
[rq_data_dir(orig
)]);
719 if (md
->seq_rq_merge_deadline_usecs
) {
720 md
->last_rq_pos
= rq_end_sector(orig
);
721 md
->last_rq_rw
= rq_data_dir(orig
);
722 md
->last_rq_start_time
= ktime_get();
725 if (unlikely(dm_stats_used(&md
->stats
))) {
726 struct dm_rq_target_io
*tio
= tio_from_request(orig
);
727 tio
->duration_jiffies
= jiffies
;
728 tio
->n_sectors
= blk_rq_sectors(orig
);
729 dm_stats_account_io(&md
->stats
, rq_data_dir(orig
),
730 blk_rq_pos(orig
), tio
->n_sectors
, false, 0,
735 * Hold the md reference here for the in-flight I/O.
736 * We can't rely on the reference count by device opener,
737 * because the device may be closed during the request completion
738 * when all bios are completed.
739 * See the comment in rq_completed() too.
744 static void map_tio_request(struct kthread_work
*work
)
746 struct dm_rq_target_io
*tio
= container_of(work
, struct dm_rq_target_io
, work
);
748 if (map_request(tio
) == DM_MAPIO_REQUEUE
)
749 dm_requeue_original_request(tio
, false);
752 ssize_t
dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device
*md
, char *buf
)
754 return sprintf(buf
, "%u\n", md
->seq_rq_merge_deadline_usecs
);
757 #define MAX_SEQ_RQ_MERGE_DEADLINE_USECS 100000
759 ssize_t
dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device
*md
,
760 const char *buf
, size_t count
)
764 if (dm_get_md_type(md
) != DM_TYPE_REQUEST_BASED
)
767 if (kstrtouint(buf
, 10, &deadline
))
770 if (deadline
> MAX_SEQ_RQ_MERGE_DEADLINE_USECS
)
771 deadline
= MAX_SEQ_RQ_MERGE_DEADLINE_USECS
;
773 md
->seq_rq_merge_deadline_usecs
= deadline
;
778 static bool dm_old_request_peeked_before_merge_deadline(struct mapped_device
*md
)
782 if (!md
->seq_rq_merge_deadline_usecs
)
785 kt_deadline
= ns_to_ktime((u64
)md
->seq_rq_merge_deadline_usecs
* NSEC_PER_USEC
);
786 kt_deadline
= ktime_add_safe(md
->last_rq_start_time
, kt_deadline
);
788 return !ktime_after(ktime_get(), kt_deadline
);
792 * q->request_fn for old request-based dm.
793 * Called with the queue lock held.
795 static void dm_old_request_fn(struct request_queue
*q
)
797 struct mapped_device
*md
= q
->queuedata
;
798 struct dm_target
*ti
= md
->immutable_target
;
800 struct dm_rq_target_io
*tio
;
805 struct dm_table
*map
= dm_get_live_table(md
, &srcu_idx
);
807 if (unlikely(!map
)) {
808 dm_put_live_table(md
, srcu_idx
);
811 ti
= dm_table_find_target(map
, pos
);
812 dm_put_live_table(md
, srcu_idx
);
816 * For suspend, check blk_queue_stopped() and increment
817 * ->pending within a single queue_lock not to increment the
818 * number of in-flight I/Os after the queue is stopped in
821 while (!blk_queue_stopped(q
)) {
822 rq
= blk_peek_request(q
);
826 /* always use block 0 to find the target for flushes for now */
828 if (req_op(rq
) != REQ_OP_FLUSH
)
829 pos
= blk_rq_pos(rq
);
831 if ((dm_old_request_peeked_before_merge_deadline(md
) &&
832 md_in_flight(md
) && rq
->bio
&& rq
->bio
->bi_vcnt
== 1 &&
833 md
->last_rq_pos
== pos
&& md
->last_rq_rw
== rq_data_dir(rq
)) ||
834 (ti
->type
->busy
&& ti
->type
->busy(ti
))) {
835 blk_delay_queue(q
, 10);
839 dm_start_request(md
, rq
);
841 tio
= tio_from_request(rq
);
842 /* Establish tio->ti before queuing work (map_tio_request) */
844 kthread_queue_work(&md
->kworker
, &tio
->work
);
845 BUG_ON(!irqs_disabled());
850 * Fully initialize a .request_fn request-based queue.
852 int dm_old_init_request_queue(struct mapped_device
*md
)
854 /* Fully initialize the queue */
855 if (!blk_init_allocated_queue(md
->queue
, dm_old_request_fn
, NULL
))
858 /* disable dm_old_request_fn's merge heuristic by default */
859 md
->seq_rq_merge_deadline_usecs
= 0;
861 dm_init_normal_md_queue(md
);
862 blk_queue_softirq_done(md
->queue
, dm_softirq_done
);
863 blk_queue_prep_rq(md
->queue
, dm_old_prep_fn
);
865 /* Initialize the request-based DM worker thread */
866 kthread_init_worker(&md
->kworker
);
867 md
->kworker_task
= kthread_run(kthread_worker_fn
, &md
->kworker
,
868 "kdmwork-%s", dm_device_name(md
));
869 if (IS_ERR(md
->kworker_task
)) {
870 int error
= PTR_ERR(md
->kworker_task
);
871 md
->kworker_task
= NULL
;
875 elv_register_queue(md
->queue
);
880 static int dm_mq_init_request(void *data
, struct request
*rq
,
881 unsigned int hctx_idx
, unsigned int request_idx
,
882 unsigned int numa_node
)
884 struct mapped_device
*md
= data
;
885 struct dm_rq_target_io
*tio
= blk_mq_rq_to_pdu(rq
);
888 * Must initialize md member of tio, otherwise it won't
889 * be available in dm_mq_queue_rq.
893 if (md
->init_tio_pdu
) {
894 /* target-specific per-io data is immediately after the tio */
895 tio
->info
.ptr
= tio
+ 1;
901 static int dm_mq_queue_rq(struct blk_mq_hw_ctx
*hctx
,
902 const struct blk_mq_queue_data
*bd
)
904 struct request
*rq
= bd
->rq
;
905 struct dm_rq_target_io
*tio
= blk_mq_rq_to_pdu(rq
);
906 struct mapped_device
*md
= tio
->md
;
907 struct dm_target
*ti
= md
->immutable_target
;
911 struct dm_table
*map
= dm_get_live_table(md
, &srcu_idx
);
913 ti
= dm_table_find_target(map
, 0);
914 dm_put_live_table(md
, srcu_idx
);
918 * On suspend dm_stop_queue() handles stopping the blk-mq
919 * request_queue BUT: even though the hw_queues are marked
920 * BLK_MQ_S_STOPPED at that point there is still a race that
921 * is allowing block/blk-mq.c to call ->queue_rq against a
922 * hctx that it really shouldn't. The following check guards
923 * against this rarity (albeit _not_ race-free).
925 if (unlikely(test_bit(BLK_MQ_S_STOPPED
, &hctx
->state
)))
926 return BLK_MQ_RQ_QUEUE_BUSY
;
928 if (ti
->type
->busy
&& ti
->type
->busy(ti
))
929 return BLK_MQ_RQ_QUEUE_BUSY
;
931 dm_start_request(md
, rq
);
933 /* Init tio using md established in .init_request */
934 init_tio(tio
, rq
, md
);
937 * Establish tio->ti before calling map_request().
941 /* Direct call is fine since .queue_rq allows allocations */
942 if (map_request(tio
) == DM_MAPIO_REQUEUE
) {
943 /* Undo dm_start_request() before requeuing */
944 rq_end_stats(md
, rq
);
945 rq_completed(md
, rq_data_dir(rq
), false);
946 return BLK_MQ_RQ_QUEUE_BUSY
;
949 return BLK_MQ_RQ_QUEUE_OK
;
952 static struct blk_mq_ops dm_mq_ops
= {
953 .queue_rq
= dm_mq_queue_rq
,
954 .complete
= dm_softirq_done
,
955 .init_request
= dm_mq_init_request
,
958 int dm_mq_init_request_queue(struct mapped_device
*md
, struct dm_table
*t
)
960 struct request_queue
*q
;
961 struct dm_target
*immutable_tgt
;
964 if (!dm_table_all_blk_mq_devices(t
)) {
965 DMERR("request-based dm-mq may only be stacked on blk-mq device(s)");
969 md
->tag_set
= kzalloc_node(sizeof(struct blk_mq_tag_set
), GFP_KERNEL
, md
->numa_node_id
);
973 md
->tag_set
->ops
= &dm_mq_ops
;
974 md
->tag_set
->queue_depth
= dm_get_blk_mq_queue_depth();
975 md
->tag_set
->numa_node
= md
->numa_node_id
;
976 md
->tag_set
->flags
= BLK_MQ_F_SHOULD_MERGE
| BLK_MQ_F_SG_MERGE
;
977 md
->tag_set
->nr_hw_queues
= dm_get_blk_mq_nr_hw_queues();
978 md
->tag_set
->driver_data
= md
;
980 md
->tag_set
->cmd_size
= sizeof(struct dm_rq_target_io
);
981 immutable_tgt
= dm_table_get_immutable_target(t
);
982 if (immutable_tgt
&& immutable_tgt
->per_io_data_size
) {
983 /* any target-specific per-io data is immediately after the tio */
984 md
->tag_set
->cmd_size
+= immutable_tgt
->per_io_data_size
;
985 md
->init_tio_pdu
= true;
988 err
= blk_mq_alloc_tag_set(md
->tag_set
);
990 goto out_kfree_tag_set
;
992 q
= blk_mq_init_allocated_queue(md
->tag_set
, md
->queue
);
997 dm_init_md_queue(md
);
999 /* backfill 'mq' sysfs registration normally done in blk_register_queue */
1000 err
= blk_mq_register_dev(disk_to_dev(md
->disk
), q
);
1002 goto out_cleanup_queue
;
1007 blk_cleanup_queue(q
);
1009 blk_mq_free_tag_set(md
->tag_set
);
1016 void dm_mq_cleanup_mapped_device(struct mapped_device
*md
)
1019 blk_mq_free_tag_set(md
->tag_set
);
1024 module_param(reserved_rq_based_ios
, uint
, S_IRUGO
| S_IWUSR
);
1025 MODULE_PARM_DESC(reserved_rq_based_ios
, "Reserved IOs in request-based mempools");
1027 module_param(use_blk_mq
, bool, S_IRUGO
| S_IWUSR
);
1028 MODULE_PARM_DESC(use_blk_mq
, "Use block multiqueue for request-based DM devices");
1030 module_param(dm_mq_nr_hw_queues
, uint
, S_IRUGO
| S_IWUSR
);
1031 MODULE_PARM_DESC(dm_mq_nr_hw_queues
, "Number of hardware queues for request-based dm-mq devices");
1033 module_param(dm_mq_queue_depth
, uint
, S_IRUGO
| S_IWUSR
);
1034 MODULE_PARM_DESC(dm_mq_queue_depth
, "Queue depth for request-based dm-mq devices");