1 // SPDX-License-Identifier: GPL-2.0
3 * blk-mq scheduling framework
5 * Copyright (C) 2016 Jens Axboe
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/list_sort.h>
11 #include <trace/events/block.h>
15 #include "blk-mq-debugfs.h"
16 #include "blk-mq-sched.h"
20 * Mark a hardware queue as needing a restart.
22 void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx
*hctx
)
24 if (test_bit(BLK_MQ_S_SCHED_RESTART
, &hctx
->state
))
27 set_bit(BLK_MQ_S_SCHED_RESTART
, &hctx
->state
);
29 EXPORT_SYMBOL_GPL(blk_mq_sched_mark_restart_hctx
);
31 void __blk_mq_sched_restart(struct blk_mq_hw_ctx
*hctx
)
33 clear_bit(BLK_MQ_S_SCHED_RESTART
, &hctx
->state
);
36 * Order clearing SCHED_RESTART and list_empty_careful(&hctx->dispatch)
37 * in blk_mq_run_hw_queue(). Its pair is the barrier in
38 * blk_mq_dispatch_rq_list(). So dispatch code won't see SCHED_RESTART,
39 * meantime new request added to hctx->dispatch is missed to check in
40 * blk_mq_run_hw_queue().
44 blk_mq_run_hw_queue(hctx
, true);
47 static int sched_rq_cmp(void *priv
, const struct list_head
*a
,
48 const struct list_head
*b
)
50 struct request
*rqa
= container_of(a
, struct request
, queuelist
);
51 struct request
*rqb
= container_of(b
, struct request
, queuelist
);
53 return rqa
->mq_hctx
> rqb
->mq_hctx
;
56 static bool blk_mq_dispatch_hctx_list(struct list_head
*rq_list
)
58 struct blk_mq_hw_ctx
*hctx
=
59 list_first_entry(rq_list
, struct request
, queuelist
)->mq_hctx
;
62 unsigned int count
= 0;
64 list_for_each_entry(rq
, rq_list
, queuelist
) {
65 if (rq
->mq_hctx
!= hctx
) {
66 list_cut_before(&hctx_list
, rq_list
, &rq
->queuelist
);
71 list_splice_tail_init(rq_list
, &hctx_list
);
74 return blk_mq_dispatch_rq_list(hctx
, &hctx_list
, count
);
77 #define BLK_MQ_BUDGET_DELAY 3 /* ms units */
80 * Only SCSI implements .get_budget and .put_budget, and SCSI restarts
81 * its queue by itself in its completion handler, so we don't need to
82 * restart queue if .get_budget() fails to get the budget.
84 * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to
85 * be run again. This is necessary to avoid starving flushes.
87 static int __blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx
*hctx
)
89 struct request_queue
*q
= hctx
->queue
;
90 struct elevator_queue
*e
= q
->elevator
;
91 bool multi_hctxs
= false, run_queue
= false;
92 bool dispatched
= false, busy
= false;
93 unsigned int max_dispatch
;
97 if (hctx
->dispatch_busy
)
100 max_dispatch
= hctx
->queue
->nr_requests
;
106 if (e
->type
->ops
.has_work
&& !e
->type
->ops
.has_work(hctx
))
109 if (!list_empty_careful(&hctx
->dispatch
)) {
114 budget_token
= blk_mq_get_dispatch_budget(q
);
115 if (budget_token
< 0)
118 rq
= e
->type
->ops
.dispatch_request(hctx
);
120 blk_mq_put_dispatch_budget(q
, budget_token
);
122 * We're releasing without dispatching. Holding the
123 * budget could have blocked any "hctx"s with the
124 * same queue and if we didn't dispatch then there's
125 * no guarantee anyone will kick the queue. Kick it
132 blk_mq_set_rq_budget_token(rq
, budget_token
);
135 * Now this rq owns the budget which has to be released
136 * if this rq won't be queued to driver via .queue_rq()
137 * in blk_mq_dispatch_rq_list().
139 list_add_tail(&rq
->queuelist
, &rq_list
);
141 if (rq
->mq_hctx
!= hctx
)
145 * If we cannot get tag for the request, stop dequeueing
146 * requests from the IO scheduler. We are unlikely to be able
147 * to submit them anyway and it creates false impression for
148 * scheduling heuristics that the device can take more IO.
150 if (!blk_mq_get_driver_tag(rq
))
152 } while (count
< max_dispatch
);
156 blk_mq_delay_run_hw_queues(q
, BLK_MQ_BUDGET_DELAY
);
157 } else if (multi_hctxs
) {
159 * Requests from different hctx may be dequeued from some
160 * schedulers, such as bfq and deadline.
162 * Sort the requests in the list according to their hctx,
163 * dispatch batching requests from same hctx at a time.
165 list_sort(NULL
, &rq_list
, sched_rq_cmp
);
167 dispatched
|= blk_mq_dispatch_hctx_list(&rq_list
);
168 } while (!list_empty(&rq_list
));
170 dispatched
= blk_mq_dispatch_rq_list(hctx
, &rq_list
, count
);
178 static int blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx
*hctx
)
180 unsigned long end
= jiffies
+ HZ
;
184 ret
= __blk_mq_do_dispatch_sched(hctx
);
187 if (need_resched() || time_is_before_jiffies(end
)) {
188 blk_mq_delay_run_hw_queue(hctx
, 0);
196 static struct blk_mq_ctx
*blk_mq_next_ctx(struct blk_mq_hw_ctx
*hctx
,
197 struct blk_mq_ctx
*ctx
)
199 unsigned short idx
= ctx
->index_hw
[hctx
->type
];
201 if (++idx
== hctx
->nr_ctx
)
204 return hctx
->ctxs
[idx
];
208 * Only SCSI implements .get_budget and .put_budget, and SCSI restarts
209 * its queue by itself in its completion handler, so we don't need to
210 * restart queue if .get_budget() fails to get the budget.
212 * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to
213 * be run again. This is necessary to avoid starving flushes.
215 static int blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx
*hctx
)
217 struct request_queue
*q
= hctx
->queue
;
219 struct blk_mq_ctx
*ctx
= READ_ONCE(hctx
->dispatch_from
);
226 if (!list_empty_careful(&hctx
->dispatch
)) {
231 if (!sbitmap_any_bit_set(&hctx
->ctx_map
))
234 budget_token
= blk_mq_get_dispatch_budget(q
);
235 if (budget_token
< 0)
238 rq
= blk_mq_dequeue_from_ctx(hctx
, ctx
);
240 blk_mq_put_dispatch_budget(q
, budget_token
);
242 * We're releasing without dispatching. Holding the
243 * budget could have blocked any "hctx"s with the
244 * same queue and if we didn't dispatch then there's
245 * no guarantee anyone will kick the queue. Kick it
248 blk_mq_delay_run_hw_queues(q
, BLK_MQ_BUDGET_DELAY
);
252 blk_mq_set_rq_budget_token(rq
, budget_token
);
255 * Now this rq owns the budget which has to be released
256 * if this rq won't be queued to driver via .queue_rq()
257 * in blk_mq_dispatch_rq_list().
259 list_add(&rq
->queuelist
, &rq_list
);
261 /* round robin for fair dispatch */
262 ctx
= blk_mq_next_ctx(hctx
, rq
->mq_ctx
);
264 } while (blk_mq_dispatch_rq_list(rq
->mq_hctx
, &rq_list
, 1));
266 WRITE_ONCE(hctx
->dispatch_from
, ctx
);
270 static int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx
*hctx
)
272 bool need_dispatch
= false;
276 * If we have previous entries on our dispatch list, grab them first for
277 * more fair dispatch.
279 if (!list_empty_careful(&hctx
->dispatch
)) {
280 spin_lock(&hctx
->lock
);
281 if (!list_empty(&hctx
->dispatch
))
282 list_splice_init(&hctx
->dispatch
, &rq_list
);
283 spin_unlock(&hctx
->lock
);
287 * Only ask the scheduler for requests, if we didn't have residual
288 * requests from the dispatch list. This is to avoid the case where
289 * we only ever dispatch a fraction of the requests available because
290 * of low device queue depth. Once we pull requests out of the IO
291 * scheduler, we can no longer merge or sort them. So it's best to
292 * leave them there for as long as we can. Mark the hw queue as
293 * needing a restart in that case.
295 * We want to dispatch from the scheduler if there was nothing
296 * on the dispatch list or we were able to dispatch from the
299 if (!list_empty(&rq_list
)) {
300 blk_mq_sched_mark_restart_hctx(hctx
);
301 if (!blk_mq_dispatch_rq_list(hctx
, &rq_list
, 0))
303 need_dispatch
= true;
305 need_dispatch
= hctx
->dispatch_busy
;
308 if (hctx
->queue
->elevator
)
309 return blk_mq_do_dispatch_sched(hctx
);
311 /* dequeue request one by one from sw queue if queue is busy */
313 return blk_mq_do_dispatch_ctx(hctx
);
314 blk_mq_flush_busy_ctxs(hctx
, &rq_list
);
315 blk_mq_dispatch_rq_list(hctx
, &rq_list
, 0);
319 void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx
*hctx
)
321 struct request_queue
*q
= hctx
->queue
;
323 /* RCU or SRCU read lock is needed before checking quiesced flag */
324 if (unlikely(blk_mq_hctx_stopped(hctx
) || blk_queue_quiesced(q
)))
328 * A return of -EAGAIN is an indication that hctx->dispatch is not
329 * empty and we must run again in order to avoid starving flushes.
331 if (__blk_mq_sched_dispatch_requests(hctx
) == -EAGAIN
) {
332 if (__blk_mq_sched_dispatch_requests(hctx
) == -EAGAIN
)
333 blk_mq_run_hw_queue(hctx
, true);
337 bool blk_mq_sched_bio_merge(struct request_queue
*q
, struct bio
*bio
,
338 unsigned int nr_segs
)
340 struct elevator_queue
*e
= q
->elevator
;
341 struct blk_mq_ctx
*ctx
;
342 struct blk_mq_hw_ctx
*hctx
;
346 if (e
&& e
->type
->ops
.bio_merge
) {
347 ret
= e
->type
->ops
.bio_merge(q
, bio
, nr_segs
);
351 ctx
= blk_mq_get_ctx(q
);
352 hctx
= blk_mq_map_queue(q
, bio
->bi_opf
, ctx
);
354 if (!(hctx
->flags
& BLK_MQ_F_SHOULD_MERGE
) ||
355 list_empty_careful(&ctx
->rq_lists
[type
]))
358 /* default per sw-queue merge */
359 spin_lock(&ctx
->lock
);
361 * Reverse check our software queue for entries that we could
362 * potentially merge with. Currently includes a hand-wavy stop
363 * count of 8, to not spend too much time checking for merges.
365 if (blk_bio_list_merge(q
, &ctx
->rq_lists
[type
], bio
, nr_segs
))
368 spin_unlock(&ctx
->lock
);
373 bool blk_mq_sched_try_insert_merge(struct request_queue
*q
, struct request
*rq
,
374 struct list_head
*free
)
376 return rq_mergeable(rq
) && elv_attempt_insert_merge(q
, rq
, free
);
378 EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge
);
380 static int blk_mq_sched_alloc_map_and_rqs(struct request_queue
*q
,
381 struct blk_mq_hw_ctx
*hctx
,
382 unsigned int hctx_idx
)
384 if (blk_mq_is_shared_tags(q
->tag_set
->flags
)) {
385 hctx
->sched_tags
= q
->sched_shared_tags
;
389 hctx
->sched_tags
= blk_mq_alloc_map_and_rqs(q
->tag_set
, hctx_idx
,
392 if (!hctx
->sched_tags
)
397 static void blk_mq_exit_sched_shared_tags(struct request_queue
*queue
)
399 blk_mq_free_rq_map(queue
->sched_shared_tags
);
400 queue
->sched_shared_tags
= NULL
;
403 /* called in queue's release handler, tagset has gone away */
404 static void blk_mq_sched_tags_teardown(struct request_queue
*q
, unsigned int flags
)
406 struct blk_mq_hw_ctx
*hctx
;
409 queue_for_each_hw_ctx(q
, hctx
, i
) {
410 if (hctx
->sched_tags
) {
411 if (!blk_mq_is_shared_tags(flags
))
412 blk_mq_free_rq_map(hctx
->sched_tags
);
413 hctx
->sched_tags
= NULL
;
417 if (blk_mq_is_shared_tags(flags
))
418 blk_mq_exit_sched_shared_tags(q
);
421 static int blk_mq_init_sched_shared_tags(struct request_queue
*queue
)
423 struct blk_mq_tag_set
*set
= queue
->tag_set
;
426 * Set initial depth at max so that we don't need to reallocate for
427 * updating nr_requests.
429 queue
->sched_shared_tags
= blk_mq_alloc_map_and_rqs(set
,
432 if (!queue
->sched_shared_tags
)
435 blk_mq_tag_update_sched_shared_tags(queue
);
440 /* caller must have a reference to @e, will grab another one if successful */
441 int blk_mq_init_sched(struct request_queue
*q
, struct elevator_type
*e
)
443 unsigned int flags
= q
->tag_set
->flags
;
444 struct blk_mq_hw_ctx
*hctx
;
445 struct elevator_queue
*eq
;
450 * Default to double of smaller one between hw queue_depth and 128,
451 * since we don't split into sync/async like the old code did.
452 * Additionally, this is a per-hw queue depth.
454 q
->nr_requests
= 2 * min_t(unsigned int, q
->tag_set
->queue_depth
,
457 if (blk_mq_is_shared_tags(flags
)) {
458 ret
= blk_mq_init_sched_shared_tags(q
);
463 queue_for_each_hw_ctx(q
, hctx
, i
) {
464 ret
= blk_mq_sched_alloc_map_and_rqs(q
, hctx
, i
);
466 goto err_free_map_and_rqs
;
469 ret
= e
->ops
.init_sched(q
, e
);
471 goto err_free_map_and_rqs
;
473 mutex_lock(&q
->debugfs_mutex
);
474 blk_mq_debugfs_register_sched(q
);
475 mutex_unlock(&q
->debugfs_mutex
);
477 queue_for_each_hw_ctx(q
, hctx
, i
) {
478 if (e
->ops
.init_hctx
) {
479 ret
= e
->ops
.init_hctx(hctx
, i
);
482 blk_mq_sched_free_rqs(q
);
483 blk_mq_exit_sched(q
, eq
);
484 kobject_put(&eq
->kobj
);
488 mutex_lock(&q
->debugfs_mutex
);
489 blk_mq_debugfs_register_sched_hctx(q
, hctx
);
490 mutex_unlock(&q
->debugfs_mutex
);
495 err_free_map_and_rqs
:
496 blk_mq_sched_free_rqs(q
);
497 blk_mq_sched_tags_teardown(q
, flags
);
504 * called in either blk_queue_cleanup or elevator_switch, tagset
505 * is required for freeing requests
507 void blk_mq_sched_free_rqs(struct request_queue
*q
)
509 struct blk_mq_hw_ctx
*hctx
;
512 if (blk_mq_is_shared_tags(q
->tag_set
->flags
)) {
513 blk_mq_free_rqs(q
->tag_set
, q
->sched_shared_tags
,
516 queue_for_each_hw_ctx(q
, hctx
, i
) {
517 if (hctx
->sched_tags
)
518 blk_mq_free_rqs(q
->tag_set
,
519 hctx
->sched_tags
, i
);
524 void blk_mq_exit_sched(struct request_queue
*q
, struct elevator_queue
*e
)
526 struct blk_mq_hw_ctx
*hctx
;
528 unsigned int flags
= 0;
530 queue_for_each_hw_ctx(q
, hctx
, i
) {
531 mutex_lock(&q
->debugfs_mutex
);
532 blk_mq_debugfs_unregister_sched_hctx(hctx
);
533 mutex_unlock(&q
->debugfs_mutex
);
535 if (e
->type
->ops
.exit_hctx
&& hctx
->sched_data
) {
536 e
->type
->ops
.exit_hctx(hctx
, i
);
537 hctx
->sched_data
= NULL
;
542 mutex_lock(&q
->debugfs_mutex
);
543 blk_mq_debugfs_unregister_sched(q
);
544 mutex_unlock(&q
->debugfs_mutex
);
546 if (e
->type
->ops
.exit_sched
)
547 e
->type
->ops
.exit_sched(e
);
548 blk_mq_sched_tags_teardown(q
, flags
);