mm: numa: avoid waiting on freed migrated pages
[linux/fpc-iii.git] / block / blk-mq.c
blobbb66c96850b18cb419b0e44aab1894169352f9af
1 /*
2 * Block multiqueue core code
4 * Copyright (C) 2013-2014 Jens Axboe
5 * Copyright (C) 2013-2014 Christoph Hellwig
6 */
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/backing-dev.h>
10 #include <linux/bio.h>
11 #include <linux/blkdev.h>
12 #include <linux/kmemleak.h>
13 #include <linux/mm.h>
14 #include <linux/init.h>
15 #include <linux/slab.h>
16 #include <linux/workqueue.h>
17 #include <linux/smp.h>
18 #include <linux/llist.h>
19 #include <linux/list_sort.h>
20 #include <linux/cpu.h>
21 #include <linux/cache.h>
22 #include <linux/sched/sysctl.h>
23 #include <linux/sched/topology.h>
24 #include <linux/sched/signal.h>
25 #include <linux/delay.h>
26 #include <linux/crash_dump.h>
27 #include <linux/prefetch.h>
29 #include <trace/events/block.h>
31 #include <linux/blk-mq.h>
32 #include "blk.h"
33 #include "blk-mq.h"
34 #include "blk-mq-debugfs.h"
35 #include "blk-mq-tag.h"
36 #include "blk-stat.h"
37 #include "blk-wbt.h"
38 #include "blk-mq-sched.h"
40 static DEFINE_MUTEX(all_q_mutex);
41 static LIST_HEAD(all_q_list);
43 static void blk_mq_poll_stats_start(struct request_queue *q);
44 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
45 static void __blk_mq_stop_hw_queues(struct request_queue *q, bool sync);
47 static int blk_mq_poll_stats_bkt(const struct request *rq)
49 int ddir, bytes, bucket;
51 ddir = rq_data_dir(rq);
52 bytes = blk_rq_bytes(rq);
54 bucket = ddir + 2*(ilog2(bytes) - 9);
56 if (bucket < 0)
57 return -1;
58 else if (bucket >= BLK_MQ_POLL_STATS_BKTS)
59 return ddir + BLK_MQ_POLL_STATS_BKTS - 2;
61 return bucket;
65 * Check if any of the ctx's have pending work in this hardware queue
67 bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
69 return sbitmap_any_bit_set(&hctx->ctx_map) ||
70 !list_empty_careful(&hctx->dispatch) ||
71 blk_mq_sched_has_work(hctx);
75 * Mark this ctx as having pending work in this hardware queue
77 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
78 struct blk_mq_ctx *ctx)
80 if (!sbitmap_test_bit(&hctx->ctx_map, ctx->index_hw))
81 sbitmap_set_bit(&hctx->ctx_map, ctx->index_hw);
84 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
85 struct blk_mq_ctx *ctx)
87 sbitmap_clear_bit(&hctx->ctx_map, ctx->index_hw);
90 void blk_freeze_queue_start(struct request_queue *q)
92 int freeze_depth;
94 freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
95 if (freeze_depth == 1) {
96 percpu_ref_kill(&q->q_usage_counter);
97 blk_mq_run_hw_queues(q, false);
100 EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
102 void blk_mq_freeze_queue_wait(struct request_queue *q)
104 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
106 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait);
108 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
109 unsigned long timeout)
111 return wait_event_timeout(q->mq_freeze_wq,
112 percpu_ref_is_zero(&q->q_usage_counter),
113 timeout);
115 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout);
118 * Guarantee no request is in use, so we can change any data structure of
119 * the queue afterward.
121 void blk_freeze_queue(struct request_queue *q)
124 * In the !blk_mq case we are only calling this to kill the
125 * q_usage_counter, otherwise this increases the freeze depth
126 * and waits for it to return to zero. For this reason there is
127 * no blk_unfreeze_queue(), and blk_freeze_queue() is not
128 * exported to drivers as the only user for unfreeze is blk_mq.
130 blk_freeze_queue_start(q);
131 blk_mq_freeze_queue_wait(q);
134 void blk_mq_freeze_queue(struct request_queue *q)
137 * ...just an alias to keep freeze and unfreeze actions balanced
138 * in the blk_mq_* namespace
140 blk_freeze_queue(q);
142 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
144 void blk_mq_unfreeze_queue(struct request_queue *q)
146 int freeze_depth;
148 freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
149 WARN_ON_ONCE(freeze_depth < 0);
150 if (!freeze_depth) {
151 percpu_ref_reinit(&q->q_usage_counter);
152 wake_up_all(&q->mq_freeze_wq);
155 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
158 * blk_mq_quiesce_queue() - wait until all ongoing queue_rq calls have finished
159 * @q: request queue.
161 * Note: this function does not prevent that the struct request end_io()
162 * callback function is invoked. Additionally, it is not prevented that
163 * new queue_rq() calls occur unless the queue has been stopped first.
165 void blk_mq_quiesce_queue(struct request_queue *q)
167 struct blk_mq_hw_ctx *hctx;
168 unsigned int i;
169 bool rcu = false;
171 __blk_mq_stop_hw_queues(q, true);
173 queue_for_each_hw_ctx(q, hctx, i) {
174 if (hctx->flags & BLK_MQ_F_BLOCKING)
175 synchronize_srcu(&hctx->queue_rq_srcu);
176 else
177 rcu = true;
179 if (rcu)
180 synchronize_rcu();
182 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
184 void blk_mq_wake_waiters(struct request_queue *q)
186 struct blk_mq_hw_ctx *hctx;
187 unsigned int i;
189 queue_for_each_hw_ctx(q, hctx, i)
190 if (blk_mq_hw_queue_mapped(hctx))
191 blk_mq_tag_wakeup_all(hctx->tags, true);
194 * If we are called because the queue has now been marked as
195 * dying, we need to ensure that processes currently waiting on
196 * the queue are notified as well.
198 wake_up_all(&q->mq_freeze_wq);
201 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
203 return blk_mq_has_free_tags(hctx->tags);
205 EXPORT_SYMBOL(blk_mq_can_queue);
207 void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
208 struct request *rq, unsigned int op)
210 INIT_LIST_HEAD(&rq->queuelist);
211 /* csd/requeue_work/fifo_time is initialized before use */
212 rq->q = q;
213 rq->mq_ctx = ctx;
214 rq->cmd_flags = op;
215 if (blk_queue_io_stat(q))
216 rq->rq_flags |= RQF_IO_STAT;
217 /* do not touch atomic flags, it needs atomic ops against the timer */
218 rq->cpu = -1;
219 INIT_HLIST_NODE(&rq->hash);
220 RB_CLEAR_NODE(&rq->rb_node);
221 rq->rq_disk = NULL;
222 rq->part = NULL;
223 rq->start_time = jiffies;
224 #ifdef CONFIG_BLK_CGROUP
225 rq->rl = NULL;
226 set_start_time_ns(rq);
227 rq->io_start_time_ns = 0;
228 #endif
229 rq->nr_phys_segments = 0;
230 #if defined(CONFIG_BLK_DEV_INTEGRITY)
231 rq->nr_integrity_segments = 0;
232 #endif
233 rq->special = NULL;
234 /* tag was already set */
235 rq->extra_len = 0;
237 INIT_LIST_HEAD(&rq->timeout_list);
238 rq->timeout = 0;
240 rq->end_io = NULL;
241 rq->end_io_data = NULL;
242 rq->next_rq = NULL;
244 ctx->rq_dispatched[op_is_sync(op)]++;
246 EXPORT_SYMBOL_GPL(blk_mq_rq_ctx_init);
248 struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data,
249 unsigned int op)
251 struct request *rq;
252 unsigned int tag;
254 tag = blk_mq_get_tag(data);
255 if (tag != BLK_MQ_TAG_FAIL) {
256 struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
258 rq = tags->static_rqs[tag];
260 if (data->flags & BLK_MQ_REQ_INTERNAL) {
261 rq->tag = -1;
262 rq->internal_tag = tag;
263 } else {
264 if (blk_mq_tag_busy(data->hctx)) {
265 rq->rq_flags = RQF_MQ_INFLIGHT;
266 atomic_inc(&data->hctx->nr_active);
268 rq->tag = tag;
269 rq->internal_tag = -1;
270 data->hctx->tags->rqs[rq->tag] = rq;
273 blk_mq_rq_ctx_init(data->q, data->ctx, rq, op);
274 return rq;
277 return NULL;
279 EXPORT_SYMBOL_GPL(__blk_mq_alloc_request);
281 struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
282 unsigned int flags)
284 struct blk_mq_alloc_data alloc_data = { .flags = flags };
285 struct request *rq;
286 int ret;
288 ret = blk_queue_enter(q, flags & BLK_MQ_REQ_NOWAIT);
289 if (ret)
290 return ERR_PTR(ret);
292 rq = blk_mq_sched_get_request(q, NULL, rw, &alloc_data);
294 blk_mq_put_ctx(alloc_data.ctx);
295 blk_queue_exit(q);
297 if (!rq)
298 return ERR_PTR(-EWOULDBLOCK);
300 rq->__data_len = 0;
301 rq->__sector = (sector_t) -1;
302 rq->bio = rq->biotail = NULL;
303 return rq;
305 EXPORT_SYMBOL(blk_mq_alloc_request);
307 struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw,
308 unsigned int flags, unsigned int hctx_idx)
310 struct blk_mq_alloc_data alloc_data = { .flags = flags };
311 struct request *rq;
312 unsigned int cpu;
313 int ret;
316 * If the tag allocator sleeps we could get an allocation for a
317 * different hardware context. No need to complicate the low level
318 * allocator for this for the rare use case of a command tied to
319 * a specific queue.
321 if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT)))
322 return ERR_PTR(-EINVAL);
324 if (hctx_idx >= q->nr_hw_queues)
325 return ERR_PTR(-EIO);
327 ret = blk_queue_enter(q, true);
328 if (ret)
329 return ERR_PTR(ret);
332 * Check if the hardware context is actually mapped to anything.
333 * If not tell the caller that it should skip this queue.
335 alloc_data.hctx = q->queue_hw_ctx[hctx_idx];
336 if (!blk_mq_hw_queue_mapped(alloc_data.hctx)) {
337 blk_queue_exit(q);
338 return ERR_PTR(-EXDEV);
340 cpu = cpumask_first(alloc_data.hctx->cpumask);
341 alloc_data.ctx = __blk_mq_get_ctx(q, cpu);
343 rq = blk_mq_sched_get_request(q, NULL, rw, &alloc_data);
345 blk_queue_exit(q);
347 if (!rq)
348 return ERR_PTR(-EWOULDBLOCK);
350 return rq;
352 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
354 void __blk_mq_finish_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
355 struct request *rq)
357 const int sched_tag = rq->internal_tag;
358 struct request_queue *q = rq->q;
360 if (rq->rq_flags & RQF_MQ_INFLIGHT)
361 atomic_dec(&hctx->nr_active);
363 wbt_done(q->rq_wb, &rq->issue_stat);
364 rq->rq_flags = 0;
366 clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
367 clear_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags);
368 if (rq->tag != -1)
369 blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag);
370 if (sched_tag != -1)
371 blk_mq_put_tag(hctx, hctx->sched_tags, ctx, sched_tag);
372 blk_mq_sched_restart(hctx);
373 blk_queue_exit(q);
376 static void blk_mq_finish_hctx_request(struct blk_mq_hw_ctx *hctx,
377 struct request *rq)
379 struct blk_mq_ctx *ctx = rq->mq_ctx;
381 ctx->rq_completed[rq_is_sync(rq)]++;
382 __blk_mq_finish_request(hctx, ctx, rq);
385 void blk_mq_finish_request(struct request *rq)
387 blk_mq_finish_hctx_request(blk_mq_map_queue(rq->q, rq->mq_ctx->cpu), rq);
389 EXPORT_SYMBOL_GPL(blk_mq_finish_request);
391 void blk_mq_free_request(struct request *rq)
393 blk_mq_sched_put_request(rq);
395 EXPORT_SYMBOL_GPL(blk_mq_free_request);
397 inline void __blk_mq_end_request(struct request *rq, int error)
399 blk_account_io_done(rq);
401 if (rq->end_io) {
402 wbt_done(rq->q->rq_wb, &rq->issue_stat);
403 rq->end_io(rq, error);
404 } else {
405 if (unlikely(blk_bidi_rq(rq)))
406 blk_mq_free_request(rq->next_rq);
407 blk_mq_free_request(rq);
410 EXPORT_SYMBOL(__blk_mq_end_request);
412 void blk_mq_end_request(struct request *rq, int error)
414 if (blk_update_request(rq, error, blk_rq_bytes(rq)))
415 BUG();
416 __blk_mq_end_request(rq, error);
418 EXPORT_SYMBOL(blk_mq_end_request);
420 static void __blk_mq_complete_request_remote(void *data)
422 struct request *rq = data;
424 rq->q->softirq_done_fn(rq);
427 static void __blk_mq_complete_request(struct request *rq)
429 struct blk_mq_ctx *ctx = rq->mq_ctx;
430 bool shared = false;
431 int cpu;
433 if (rq->internal_tag != -1)
434 blk_mq_sched_completed_request(rq);
435 if (rq->rq_flags & RQF_STATS) {
436 blk_mq_poll_stats_start(rq->q);
437 blk_stat_add(rq);
440 if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
441 rq->q->softirq_done_fn(rq);
442 return;
445 cpu = get_cpu();
446 if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
447 shared = cpus_share_cache(cpu, ctx->cpu);
449 if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
450 rq->csd.func = __blk_mq_complete_request_remote;
451 rq->csd.info = rq;
452 rq->csd.flags = 0;
453 smp_call_function_single_async(ctx->cpu, &rq->csd);
454 } else {
455 rq->q->softirq_done_fn(rq);
457 put_cpu();
461 * blk_mq_complete_request - end I/O on a request
462 * @rq: the request being processed
464 * Description:
465 * Ends all I/O on a request. It does not handle partial completions.
466 * The actual completion happens out-of-order, through a IPI handler.
468 void blk_mq_complete_request(struct request *rq)
470 struct request_queue *q = rq->q;
472 if (unlikely(blk_should_fake_timeout(q)))
473 return;
474 if (!blk_mark_rq_complete(rq))
475 __blk_mq_complete_request(rq);
477 EXPORT_SYMBOL(blk_mq_complete_request);
479 int blk_mq_request_started(struct request *rq)
481 return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
483 EXPORT_SYMBOL_GPL(blk_mq_request_started);
485 void blk_mq_start_request(struct request *rq)
487 struct request_queue *q = rq->q;
489 blk_mq_sched_started_request(rq);
491 trace_block_rq_issue(q, rq);
493 if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
494 blk_stat_set_issue(&rq->issue_stat, blk_rq_sectors(rq));
495 rq->rq_flags |= RQF_STATS;
496 wbt_issue(q->rq_wb, &rq->issue_stat);
499 blk_add_timer(rq);
502 * Ensure that ->deadline is visible before set the started
503 * flag and clear the completed flag.
505 smp_mb__before_atomic();
508 * Mark us as started and clear complete. Complete might have been
509 * set if requeue raced with timeout, which then marked it as
510 * complete. So be sure to clear complete again when we start
511 * the request, otherwise we'll ignore the completion event.
513 if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
514 set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
515 if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
516 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
518 if (q->dma_drain_size && blk_rq_bytes(rq)) {
520 * Make sure space for the drain appears. We know we can do
521 * this because max_hw_segments has been adjusted to be one
522 * fewer than the device can handle.
524 rq->nr_phys_segments++;
527 EXPORT_SYMBOL(blk_mq_start_request);
530 * When we reach here because queue is busy, REQ_ATOM_COMPLETE
531 * flag isn't set yet, so there may be race with timeout handler,
532 * but given rq->deadline is just set in .queue_rq() under
533 * this situation, the race won't be possible in reality because
534 * rq->timeout should be set as big enough to cover the window
535 * between blk_mq_start_request() called from .queue_rq() and
536 * clearing REQ_ATOM_STARTED here.
538 static void __blk_mq_requeue_request(struct request *rq)
540 struct request_queue *q = rq->q;
542 trace_block_rq_requeue(q, rq);
543 wbt_requeue(q->rq_wb, &rq->issue_stat);
544 blk_mq_sched_requeue_request(rq);
546 if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
547 if (q->dma_drain_size && blk_rq_bytes(rq))
548 rq->nr_phys_segments--;
552 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
554 __blk_mq_requeue_request(rq);
556 BUG_ON(blk_queued_rq(rq));
557 blk_mq_add_to_requeue_list(rq, true, kick_requeue_list);
559 EXPORT_SYMBOL(blk_mq_requeue_request);
561 static void blk_mq_requeue_work(struct work_struct *work)
563 struct request_queue *q =
564 container_of(work, struct request_queue, requeue_work.work);
565 LIST_HEAD(rq_list);
566 struct request *rq, *next;
567 unsigned long flags;
569 spin_lock_irqsave(&q->requeue_lock, flags);
570 list_splice_init(&q->requeue_list, &rq_list);
571 spin_unlock_irqrestore(&q->requeue_lock, flags);
573 list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
574 if (!(rq->rq_flags & RQF_SOFTBARRIER))
575 continue;
577 rq->rq_flags &= ~RQF_SOFTBARRIER;
578 list_del_init(&rq->queuelist);
579 blk_mq_sched_insert_request(rq, true, false, false, true);
582 while (!list_empty(&rq_list)) {
583 rq = list_entry(rq_list.next, struct request, queuelist);
584 list_del_init(&rq->queuelist);
585 blk_mq_sched_insert_request(rq, false, false, false, true);
588 blk_mq_run_hw_queues(q, false);
591 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
592 bool kick_requeue_list)
594 struct request_queue *q = rq->q;
595 unsigned long flags;
598 * We abuse this flag that is otherwise used by the I/O scheduler to
599 * request head insertation from the workqueue.
601 BUG_ON(rq->rq_flags & RQF_SOFTBARRIER);
603 spin_lock_irqsave(&q->requeue_lock, flags);
604 if (at_head) {
605 rq->rq_flags |= RQF_SOFTBARRIER;
606 list_add(&rq->queuelist, &q->requeue_list);
607 } else {
608 list_add_tail(&rq->queuelist, &q->requeue_list);
610 spin_unlock_irqrestore(&q->requeue_lock, flags);
612 if (kick_requeue_list)
613 blk_mq_kick_requeue_list(q);
615 EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
617 void blk_mq_kick_requeue_list(struct request_queue *q)
619 kblockd_schedule_delayed_work(&q->requeue_work, 0);
621 EXPORT_SYMBOL(blk_mq_kick_requeue_list);
623 void blk_mq_delay_kick_requeue_list(struct request_queue *q,
624 unsigned long msecs)
626 kblockd_schedule_delayed_work(&q->requeue_work,
627 msecs_to_jiffies(msecs));
629 EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
631 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
633 if (tag < tags->nr_tags) {
634 prefetch(tags->rqs[tag]);
635 return tags->rqs[tag];
638 return NULL;
640 EXPORT_SYMBOL(blk_mq_tag_to_rq);
642 struct blk_mq_timeout_data {
643 unsigned long next;
644 unsigned int next_set;
647 void blk_mq_rq_timed_out(struct request *req, bool reserved)
649 const struct blk_mq_ops *ops = req->q->mq_ops;
650 enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
653 * We know that complete is set at this point. If STARTED isn't set
654 * anymore, then the request isn't active and the "timeout" should
655 * just be ignored. This can happen due to the bitflag ordering.
656 * Timeout first checks if STARTED is set, and if it is, assumes
657 * the request is active. But if we race with completion, then
658 * both flags will get cleared. So check here again, and ignore
659 * a timeout event with a request that isn't active.
661 if (!test_bit(REQ_ATOM_STARTED, &req->atomic_flags))
662 return;
664 if (ops->timeout)
665 ret = ops->timeout(req, reserved);
667 switch (ret) {
668 case BLK_EH_HANDLED:
669 __blk_mq_complete_request(req);
670 break;
671 case BLK_EH_RESET_TIMER:
672 blk_add_timer(req);
673 blk_clear_rq_complete(req);
674 break;
675 case BLK_EH_NOT_HANDLED:
676 break;
677 default:
678 printk(KERN_ERR "block: bad eh return: %d\n", ret);
679 break;
683 static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
684 struct request *rq, void *priv, bool reserved)
686 struct blk_mq_timeout_data *data = priv;
688 if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
689 return;
692 * The rq being checked may have been freed and reallocated
693 * out already here, we avoid this race by checking rq->deadline
694 * and REQ_ATOM_COMPLETE flag together:
696 * - if rq->deadline is observed as new value because of
697 * reusing, the rq won't be timed out because of timing.
698 * - if rq->deadline is observed as previous value,
699 * REQ_ATOM_COMPLETE flag won't be cleared in reuse path
700 * because we put a barrier between setting rq->deadline
701 * and clearing the flag in blk_mq_start_request(), so
702 * this rq won't be timed out too.
704 if (time_after_eq(jiffies, rq->deadline)) {
705 if (!blk_mark_rq_complete(rq))
706 blk_mq_rq_timed_out(rq, reserved);
707 } else if (!data->next_set || time_after(data->next, rq->deadline)) {
708 data->next = rq->deadline;
709 data->next_set = 1;
713 static void blk_mq_timeout_work(struct work_struct *work)
715 struct request_queue *q =
716 container_of(work, struct request_queue, timeout_work);
717 struct blk_mq_timeout_data data = {
718 .next = 0,
719 .next_set = 0,
721 int i;
723 /* A deadlock might occur if a request is stuck requiring a
724 * timeout at the same time a queue freeze is waiting
725 * completion, since the timeout code would not be able to
726 * acquire the queue reference here.
728 * That's why we don't use blk_queue_enter here; instead, we use
729 * percpu_ref_tryget directly, because we need to be able to
730 * obtain a reference even in the short window between the queue
731 * starting to freeze, by dropping the first reference in
732 * blk_freeze_queue_start, and the moment the last request is
733 * consumed, marked by the instant q_usage_counter reaches
734 * zero.
736 if (!percpu_ref_tryget(&q->q_usage_counter))
737 return;
739 blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data);
741 if (data.next_set) {
742 data.next = blk_rq_timeout(round_jiffies_up(data.next));
743 mod_timer(&q->timeout, data.next);
744 } else {
745 struct blk_mq_hw_ctx *hctx;
747 queue_for_each_hw_ctx(q, hctx, i) {
748 /* the hctx may be unmapped, so check it here */
749 if (blk_mq_hw_queue_mapped(hctx))
750 blk_mq_tag_idle(hctx);
753 blk_queue_exit(q);
757 * Reverse check our software queue for entries that we could potentially
758 * merge with. Currently includes a hand-wavy stop count of 8, to not spend
759 * too much time checking for merges.
761 static bool blk_mq_attempt_merge(struct request_queue *q,
762 struct blk_mq_ctx *ctx, struct bio *bio)
764 struct request *rq;
765 int checked = 8;
767 list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
768 bool merged = false;
770 if (!checked--)
771 break;
773 if (!blk_rq_merge_ok(rq, bio))
774 continue;
776 switch (blk_try_merge(rq, bio)) {
777 case ELEVATOR_BACK_MERGE:
778 if (blk_mq_sched_allow_merge(q, rq, bio))
779 merged = bio_attempt_back_merge(q, rq, bio);
780 break;
781 case ELEVATOR_FRONT_MERGE:
782 if (blk_mq_sched_allow_merge(q, rq, bio))
783 merged = bio_attempt_front_merge(q, rq, bio);
784 break;
785 case ELEVATOR_DISCARD_MERGE:
786 merged = bio_attempt_discard_merge(q, rq, bio);
787 break;
788 default:
789 continue;
792 if (merged)
793 ctx->rq_merged++;
794 return merged;
797 return false;
800 struct flush_busy_ctx_data {
801 struct blk_mq_hw_ctx *hctx;
802 struct list_head *list;
805 static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data)
807 struct flush_busy_ctx_data *flush_data = data;
808 struct blk_mq_hw_ctx *hctx = flush_data->hctx;
809 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
811 sbitmap_clear_bit(sb, bitnr);
812 spin_lock(&ctx->lock);
813 list_splice_tail_init(&ctx->rq_list, flush_data->list);
814 spin_unlock(&ctx->lock);
815 return true;
819 * Process software queues that have been marked busy, splicing them
820 * to the for-dispatch
822 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
824 struct flush_busy_ctx_data data = {
825 .hctx = hctx,
826 .list = list,
829 sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data);
831 EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs);
833 static inline unsigned int queued_to_index(unsigned int queued)
835 if (!queued)
836 return 0;
838 return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1);
841 bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
842 bool wait)
844 struct blk_mq_alloc_data data = {
845 .q = rq->q,
846 .hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu),
847 .flags = wait ? 0 : BLK_MQ_REQ_NOWAIT,
850 might_sleep_if(wait);
852 if (rq->tag != -1)
853 goto done;
855 if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag))
856 data.flags |= BLK_MQ_REQ_RESERVED;
858 rq->tag = blk_mq_get_tag(&data);
859 if (rq->tag >= 0) {
860 if (blk_mq_tag_busy(data.hctx)) {
861 rq->rq_flags |= RQF_MQ_INFLIGHT;
862 atomic_inc(&data.hctx->nr_active);
864 data.hctx->tags->rqs[rq->tag] = rq;
867 done:
868 if (hctx)
869 *hctx = data.hctx;
870 return rq->tag != -1;
873 static void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
874 struct request *rq)
876 blk_mq_put_tag(hctx, hctx->tags, rq->mq_ctx, rq->tag);
877 rq->tag = -1;
879 if (rq->rq_flags & RQF_MQ_INFLIGHT) {
880 rq->rq_flags &= ~RQF_MQ_INFLIGHT;
881 atomic_dec(&hctx->nr_active);
885 static void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx *hctx,
886 struct request *rq)
888 if (rq->tag == -1 || rq->internal_tag == -1)
889 return;
891 __blk_mq_put_driver_tag(hctx, rq);
894 static void blk_mq_put_driver_tag(struct request *rq)
896 struct blk_mq_hw_ctx *hctx;
898 if (rq->tag == -1 || rq->internal_tag == -1)
899 return;
901 hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu);
902 __blk_mq_put_driver_tag(hctx, rq);
906 * If we fail getting a driver tag because all the driver tags are already
907 * assigned and on the dispatch list, BUT the first entry does not have a
908 * tag, then we could deadlock. For that case, move entries with assigned
909 * driver tags to the front, leaving the set of tagged requests in the
910 * same order, and the untagged set in the same order.
912 static bool reorder_tags_to_front(struct list_head *list)
914 struct request *rq, *tmp, *first = NULL;
916 list_for_each_entry_safe_reverse(rq, tmp, list, queuelist) {
917 if (rq == first)
918 break;
919 if (rq->tag != -1) {
920 list_move(&rq->queuelist, list);
921 if (!first)
922 first = rq;
926 return first != NULL;
929 static int blk_mq_dispatch_wake(wait_queue_t *wait, unsigned mode, int flags,
930 void *key)
932 struct blk_mq_hw_ctx *hctx;
934 hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
936 list_del(&wait->task_list);
937 clear_bit_unlock(BLK_MQ_S_TAG_WAITING, &hctx->state);
938 blk_mq_run_hw_queue(hctx, true);
939 return 1;
942 static bool blk_mq_dispatch_wait_add(struct blk_mq_hw_ctx *hctx)
944 struct sbq_wait_state *ws;
947 * The TAG_WAITING bit serves as a lock protecting hctx->dispatch_wait.
948 * The thread which wins the race to grab this bit adds the hardware
949 * queue to the wait queue.
951 if (test_bit(BLK_MQ_S_TAG_WAITING, &hctx->state) ||
952 test_and_set_bit_lock(BLK_MQ_S_TAG_WAITING, &hctx->state))
953 return false;
955 init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
956 ws = bt_wait_ptr(&hctx->tags->bitmap_tags, hctx);
959 * As soon as this returns, it's no longer safe to fiddle with
960 * hctx->dispatch_wait, since a completion can wake up the wait queue
961 * and unlock the bit.
963 add_wait_queue(&ws->wait, &hctx->dispatch_wait);
964 return true;
967 bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list)
969 struct blk_mq_hw_ctx *hctx;
970 struct request *rq;
971 int errors, queued, ret = BLK_MQ_RQ_QUEUE_OK;
973 if (list_empty(list))
974 return false;
977 * Now process all the entries, sending them to the driver.
979 errors = queued = 0;
980 do {
981 struct blk_mq_queue_data bd;
983 rq = list_first_entry(list, struct request, queuelist);
984 if (!blk_mq_get_driver_tag(rq, &hctx, false)) {
985 if (!queued && reorder_tags_to_front(list))
986 continue;
989 * The initial allocation attempt failed, so we need to
990 * rerun the hardware queue when a tag is freed.
992 if (!blk_mq_dispatch_wait_add(hctx))
993 break;
996 * It's possible that a tag was freed in the window
997 * between the allocation failure and adding the
998 * hardware queue to the wait queue.
1000 if (!blk_mq_get_driver_tag(rq, &hctx, false))
1001 break;
1004 list_del_init(&rq->queuelist);
1006 bd.rq = rq;
1009 * Flag last if we have no more requests, or if we have more
1010 * but can't assign a driver tag to it.
1012 if (list_empty(list))
1013 bd.last = true;
1014 else {
1015 struct request *nxt;
1017 nxt = list_first_entry(list, struct request, queuelist);
1018 bd.last = !blk_mq_get_driver_tag(nxt, NULL, false);
1021 ret = q->mq_ops->queue_rq(hctx, &bd);
1022 switch (ret) {
1023 case BLK_MQ_RQ_QUEUE_OK:
1024 queued++;
1025 break;
1026 case BLK_MQ_RQ_QUEUE_BUSY:
1027 blk_mq_put_driver_tag_hctx(hctx, rq);
1028 list_add(&rq->queuelist, list);
1029 __blk_mq_requeue_request(rq);
1030 break;
1031 default:
1032 pr_err("blk-mq: bad return on queue: %d\n", ret);
1033 case BLK_MQ_RQ_QUEUE_ERROR:
1034 errors++;
1035 blk_mq_end_request(rq, -EIO);
1036 break;
1039 if (ret == BLK_MQ_RQ_QUEUE_BUSY)
1040 break;
1041 } while (!list_empty(list));
1043 hctx->dispatched[queued_to_index(queued)]++;
1046 * Any items that need requeuing? Stuff them into hctx->dispatch,
1047 * that is where we will continue on next queue run.
1049 if (!list_empty(list)) {
1051 * If an I/O scheduler has been configured and we got a driver
1052 * tag for the next request already, free it again.
1054 rq = list_first_entry(list, struct request, queuelist);
1055 blk_mq_put_driver_tag(rq);
1057 spin_lock(&hctx->lock);
1058 list_splice_init(list, &hctx->dispatch);
1059 spin_unlock(&hctx->lock);
1062 * If SCHED_RESTART was set by the caller of this function and
1063 * it is no longer set that means that it was cleared by another
1064 * thread and hence that a queue rerun is needed.
1066 * If TAG_WAITING is set that means that an I/O scheduler has
1067 * been configured and another thread is waiting for a driver
1068 * tag. To guarantee fairness, do not rerun this hardware queue
1069 * but let the other thread grab the driver tag.
1071 * If no I/O scheduler has been configured it is possible that
1072 * the hardware queue got stopped and restarted before requests
1073 * were pushed back onto the dispatch list. Rerun the queue to
1074 * avoid starvation. Notes:
1075 * - blk_mq_run_hw_queue() checks whether or not a queue has
1076 * been stopped before rerunning a queue.
1077 * - Some but not all block drivers stop a queue before
1078 * returning BLK_MQ_RQ_QUEUE_BUSY. Two exceptions are scsi-mq
1079 * and dm-rq.
1081 if (!blk_mq_sched_needs_restart(hctx) &&
1082 !test_bit(BLK_MQ_S_TAG_WAITING, &hctx->state))
1083 blk_mq_run_hw_queue(hctx, true);
1086 return (queued + errors) != 0;
1089 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
1091 int srcu_idx;
1093 WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) &&
1094 cpu_online(hctx->next_cpu));
1096 if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
1097 rcu_read_lock();
1098 blk_mq_sched_dispatch_requests(hctx);
1099 rcu_read_unlock();
1100 } else {
1101 might_sleep();
1103 srcu_idx = srcu_read_lock(&hctx->queue_rq_srcu);
1104 blk_mq_sched_dispatch_requests(hctx);
1105 srcu_read_unlock(&hctx->queue_rq_srcu, srcu_idx);
1110 * It'd be great if the workqueue API had a way to pass
1111 * in a mask and had some smarts for more clever placement.
1112 * For now we just round-robin here, switching for every
1113 * BLK_MQ_CPU_WORK_BATCH queued items.
1115 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
1117 if (hctx->queue->nr_hw_queues == 1)
1118 return WORK_CPU_UNBOUND;
1120 if (--hctx->next_cpu_batch <= 0) {
1121 int next_cpu;
1123 next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
1124 if (next_cpu >= nr_cpu_ids)
1125 next_cpu = cpumask_first(hctx->cpumask);
1127 hctx->next_cpu = next_cpu;
1128 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1131 return hctx->next_cpu;
1134 static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
1135 unsigned long msecs)
1137 if (unlikely(blk_mq_hctx_stopped(hctx) ||
1138 !blk_mq_hw_queue_mapped(hctx)))
1139 return;
1141 if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
1142 int cpu = get_cpu();
1143 if (cpumask_test_cpu(cpu, hctx->cpumask)) {
1144 __blk_mq_run_hw_queue(hctx);
1145 put_cpu();
1146 return;
1149 put_cpu();
1152 kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
1153 &hctx->run_work,
1154 msecs_to_jiffies(msecs));
1157 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
1159 __blk_mq_delay_run_hw_queue(hctx, true, msecs);
1161 EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
1163 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1165 __blk_mq_delay_run_hw_queue(hctx, async, 0);
1167 EXPORT_SYMBOL(blk_mq_run_hw_queue);
1169 void blk_mq_run_hw_queues(struct request_queue *q, bool async)
1171 struct blk_mq_hw_ctx *hctx;
1172 int i;
1174 queue_for_each_hw_ctx(q, hctx, i) {
1175 if (!blk_mq_hctx_has_pending(hctx) ||
1176 blk_mq_hctx_stopped(hctx))
1177 continue;
1179 blk_mq_run_hw_queue(hctx, async);
1182 EXPORT_SYMBOL(blk_mq_run_hw_queues);
1185 * blk_mq_queue_stopped() - check whether one or more hctxs have been stopped
1186 * @q: request queue.
1188 * The caller is responsible for serializing this function against
1189 * blk_mq_{start,stop}_hw_queue().
1191 bool blk_mq_queue_stopped(struct request_queue *q)
1193 struct blk_mq_hw_ctx *hctx;
1194 int i;
1196 queue_for_each_hw_ctx(q, hctx, i)
1197 if (blk_mq_hctx_stopped(hctx))
1198 return true;
1200 return false;
1202 EXPORT_SYMBOL(blk_mq_queue_stopped);
1204 static void __blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx, bool sync)
1206 if (sync)
1207 cancel_delayed_work_sync(&hctx->run_work);
1208 else
1209 cancel_delayed_work(&hctx->run_work);
1211 set_bit(BLK_MQ_S_STOPPED, &hctx->state);
1214 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
1216 __blk_mq_stop_hw_queue(hctx, false);
1218 EXPORT_SYMBOL(blk_mq_stop_hw_queue);
1220 static void __blk_mq_stop_hw_queues(struct request_queue *q, bool sync)
1222 struct blk_mq_hw_ctx *hctx;
1223 int i;
1225 queue_for_each_hw_ctx(q, hctx, i)
1226 __blk_mq_stop_hw_queue(hctx, sync);
1229 void blk_mq_stop_hw_queues(struct request_queue *q)
1231 __blk_mq_stop_hw_queues(q, false);
1233 EXPORT_SYMBOL(blk_mq_stop_hw_queues);
1235 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
1237 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1239 blk_mq_run_hw_queue(hctx, false);
1241 EXPORT_SYMBOL(blk_mq_start_hw_queue);
1243 void blk_mq_start_hw_queues(struct request_queue *q)
1245 struct blk_mq_hw_ctx *hctx;
1246 int i;
1248 queue_for_each_hw_ctx(q, hctx, i)
1249 blk_mq_start_hw_queue(hctx);
1251 EXPORT_SYMBOL(blk_mq_start_hw_queues);
1253 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1255 if (!blk_mq_hctx_stopped(hctx))
1256 return;
1258 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1259 blk_mq_run_hw_queue(hctx, async);
1261 EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue);
1263 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
1265 struct blk_mq_hw_ctx *hctx;
1266 int i;
1268 queue_for_each_hw_ctx(q, hctx, i)
1269 blk_mq_start_stopped_hw_queue(hctx, async);
1271 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
1273 static void blk_mq_run_work_fn(struct work_struct *work)
1275 struct blk_mq_hw_ctx *hctx;
1277 hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
1280 * If we are stopped, don't run the queue. The exception is if
1281 * BLK_MQ_S_START_ON_RUN is set. For that case, we auto-clear
1282 * the STOPPED bit and run it.
1284 if (test_bit(BLK_MQ_S_STOPPED, &hctx->state)) {
1285 if (!test_bit(BLK_MQ_S_START_ON_RUN, &hctx->state))
1286 return;
1288 clear_bit(BLK_MQ_S_START_ON_RUN, &hctx->state);
1289 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1292 __blk_mq_run_hw_queue(hctx);
1296 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
1298 if (unlikely(!blk_mq_hw_queue_mapped(hctx)))
1299 return;
1302 * Stop the hw queue, then modify currently delayed work.
1303 * This should prevent us from running the queue prematurely.
1304 * Mark the queue as auto-clearing STOPPED when it runs.
1306 blk_mq_stop_hw_queue(hctx);
1307 set_bit(BLK_MQ_S_START_ON_RUN, &hctx->state);
1308 kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
1309 &hctx->run_work,
1310 msecs_to_jiffies(msecs));
1312 EXPORT_SYMBOL(blk_mq_delay_queue);
1314 static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
1315 struct request *rq,
1316 bool at_head)
1318 struct blk_mq_ctx *ctx = rq->mq_ctx;
1320 trace_block_rq_insert(hctx->queue, rq);
1322 if (at_head)
1323 list_add(&rq->queuelist, &ctx->rq_list);
1324 else
1325 list_add_tail(&rq->queuelist, &ctx->rq_list);
1328 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
1329 bool at_head)
1331 struct blk_mq_ctx *ctx = rq->mq_ctx;
1333 __blk_mq_insert_req_list(hctx, rq, at_head);
1334 blk_mq_hctx_mark_pending(hctx, ctx);
1337 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
1338 struct list_head *list)
1342 * preemption doesn't flush plug list, so it's possible ctx->cpu is
1343 * offline now
1345 spin_lock(&ctx->lock);
1346 while (!list_empty(list)) {
1347 struct request *rq;
1349 rq = list_first_entry(list, struct request, queuelist);
1350 BUG_ON(rq->mq_ctx != ctx);
1351 list_del_init(&rq->queuelist);
1352 __blk_mq_insert_req_list(hctx, rq, false);
1354 blk_mq_hctx_mark_pending(hctx, ctx);
1355 spin_unlock(&ctx->lock);
1358 static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
1360 struct request *rqa = container_of(a, struct request, queuelist);
1361 struct request *rqb = container_of(b, struct request, queuelist);
1363 return !(rqa->mq_ctx < rqb->mq_ctx ||
1364 (rqa->mq_ctx == rqb->mq_ctx &&
1365 blk_rq_pos(rqa) < blk_rq_pos(rqb)));
1368 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1370 struct blk_mq_ctx *this_ctx;
1371 struct request_queue *this_q;
1372 struct request *rq;
1373 LIST_HEAD(list);
1374 LIST_HEAD(ctx_list);
1375 unsigned int depth;
1377 list_splice_init(&plug->mq_list, &list);
1379 list_sort(NULL, &list, plug_ctx_cmp);
1381 this_q = NULL;
1382 this_ctx = NULL;
1383 depth = 0;
1385 while (!list_empty(&list)) {
1386 rq = list_entry_rq(list.next);
1387 list_del_init(&rq->queuelist);
1388 BUG_ON(!rq->q);
1389 if (rq->mq_ctx != this_ctx) {
1390 if (this_ctx) {
1391 trace_block_unplug(this_q, depth, from_schedule);
1392 blk_mq_sched_insert_requests(this_q, this_ctx,
1393 &ctx_list,
1394 from_schedule);
1397 this_ctx = rq->mq_ctx;
1398 this_q = rq->q;
1399 depth = 0;
1402 depth++;
1403 list_add_tail(&rq->queuelist, &ctx_list);
1407 * If 'this_ctx' is set, we know we have entries to complete
1408 * on 'ctx_list'. Do those.
1410 if (this_ctx) {
1411 trace_block_unplug(this_q, depth, from_schedule);
1412 blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list,
1413 from_schedule);
1417 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
1419 blk_init_request_from_bio(rq, bio);
1421 blk_account_io_start(rq, true);
1424 static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx)
1426 return (hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
1427 !blk_queue_nomerges(hctx->queue);
1430 static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
1431 struct blk_mq_ctx *ctx,
1432 struct request *rq, struct bio *bio)
1434 if (!hctx_allow_merges(hctx) || !bio_mergeable(bio)) {
1435 blk_mq_bio_to_request(rq, bio);
1436 spin_lock(&ctx->lock);
1437 insert_rq:
1438 __blk_mq_insert_request(hctx, rq, false);
1439 spin_unlock(&ctx->lock);
1440 return false;
1441 } else {
1442 struct request_queue *q = hctx->queue;
1444 spin_lock(&ctx->lock);
1445 if (!blk_mq_attempt_merge(q, ctx, bio)) {
1446 blk_mq_bio_to_request(rq, bio);
1447 goto insert_rq;
1450 spin_unlock(&ctx->lock);
1451 __blk_mq_finish_request(hctx, ctx, rq);
1452 return true;
1456 static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
1458 if (rq->tag != -1)
1459 return blk_tag_to_qc_t(rq->tag, hctx->queue_num, false);
1461 return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true);
1464 static void __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1465 struct request *rq,
1466 blk_qc_t *cookie, bool may_sleep)
1468 struct request_queue *q = rq->q;
1469 struct blk_mq_queue_data bd = {
1470 .rq = rq,
1471 .last = true,
1473 blk_qc_t new_cookie;
1474 int ret;
1475 bool run_queue = true;
1477 if (blk_mq_hctx_stopped(hctx)) {
1478 run_queue = false;
1479 goto insert;
1482 if (q->elevator)
1483 goto insert;
1485 if (!blk_mq_get_driver_tag(rq, NULL, false))
1486 goto insert;
1488 new_cookie = request_to_qc_t(hctx, rq);
1491 * For OK queue, we are done. For error, kill it. Any other
1492 * error (busy), just add it to our list as we previously
1493 * would have done
1495 ret = q->mq_ops->queue_rq(hctx, &bd);
1496 if (ret == BLK_MQ_RQ_QUEUE_OK) {
1497 *cookie = new_cookie;
1498 return;
1501 if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
1502 *cookie = BLK_QC_T_NONE;
1503 blk_mq_end_request(rq, -EIO);
1504 return;
1507 __blk_mq_requeue_request(rq);
1508 insert:
1509 blk_mq_sched_insert_request(rq, false, run_queue, false, may_sleep);
1512 static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1513 struct request *rq, blk_qc_t *cookie)
1515 if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
1516 rcu_read_lock();
1517 __blk_mq_try_issue_directly(hctx, rq, cookie, false);
1518 rcu_read_unlock();
1519 } else {
1520 unsigned int srcu_idx;
1522 might_sleep();
1524 srcu_idx = srcu_read_lock(&hctx->queue_rq_srcu);
1525 __blk_mq_try_issue_directly(hctx, rq, cookie, true);
1526 srcu_read_unlock(&hctx->queue_rq_srcu, srcu_idx);
1530 static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
1532 const int is_sync = op_is_sync(bio->bi_opf);
1533 const int is_flush_fua = op_is_flush(bio->bi_opf);
1534 struct blk_mq_alloc_data data = { .flags = 0 };
1535 struct request *rq;
1536 unsigned int request_count = 0;
1537 struct blk_plug *plug;
1538 struct request *same_queue_rq = NULL;
1539 blk_qc_t cookie;
1540 unsigned int wb_acct;
1542 blk_queue_bounce(q, &bio);
1544 blk_queue_split(q, &bio, q->bio_split);
1546 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1547 bio_io_error(bio);
1548 return BLK_QC_T_NONE;
1551 if (!is_flush_fua && !blk_queue_nomerges(q) &&
1552 blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
1553 return BLK_QC_T_NONE;
1555 if (blk_mq_sched_bio_merge(q, bio))
1556 return BLK_QC_T_NONE;
1558 wb_acct = wbt_wait(q->rq_wb, bio, NULL);
1560 trace_block_getrq(q, bio, bio->bi_opf);
1562 rq = blk_mq_sched_get_request(q, bio, bio->bi_opf, &data);
1563 if (unlikely(!rq)) {
1564 __wbt_done(q->rq_wb, wb_acct);
1565 return BLK_QC_T_NONE;
1568 wbt_track(&rq->issue_stat, wb_acct);
1570 cookie = request_to_qc_t(data.hctx, rq);
1572 plug = current->plug;
1573 if (unlikely(is_flush_fua)) {
1574 blk_mq_put_ctx(data.ctx);
1575 blk_mq_bio_to_request(rq, bio);
1576 if (q->elevator) {
1577 blk_mq_sched_insert_request(rq, false, true, true,
1578 true);
1579 } else {
1580 blk_insert_flush(rq);
1581 blk_mq_run_hw_queue(data.hctx, true);
1583 } else if (plug && q->nr_hw_queues == 1) {
1584 struct request *last = NULL;
1586 blk_mq_put_ctx(data.ctx);
1587 blk_mq_bio_to_request(rq, bio);
1590 * @request_count may become stale because of schedule
1591 * out, so check the list again.
1593 if (list_empty(&plug->mq_list))
1594 request_count = 0;
1595 else if (blk_queue_nomerges(q))
1596 request_count = blk_plug_queued_count(q);
1598 if (!request_count)
1599 trace_block_plug(q);
1600 else
1601 last = list_entry_rq(plug->mq_list.prev);
1603 if (request_count >= BLK_MAX_REQUEST_COUNT || (last &&
1604 blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
1605 blk_flush_plug_list(plug, false);
1606 trace_block_plug(q);
1609 list_add_tail(&rq->queuelist, &plug->mq_list);
1610 } else if (plug && !blk_queue_nomerges(q)) {
1611 blk_mq_bio_to_request(rq, bio);
1614 * We do limited plugging. If the bio can be merged, do that.
1615 * Otherwise the existing request in the plug list will be
1616 * issued. So the plug list will have one request at most
1617 * The plug list might get flushed before this. If that happens,
1618 * the plug list is empty, and same_queue_rq is invalid.
1620 if (list_empty(&plug->mq_list))
1621 same_queue_rq = NULL;
1622 if (same_queue_rq)
1623 list_del_init(&same_queue_rq->queuelist);
1624 list_add_tail(&rq->queuelist, &plug->mq_list);
1626 blk_mq_put_ctx(data.ctx);
1628 if (same_queue_rq) {
1629 data.hctx = blk_mq_map_queue(q,
1630 same_queue_rq->mq_ctx->cpu);
1631 blk_mq_try_issue_directly(data.hctx, same_queue_rq,
1632 &cookie);
1634 } else if (q->nr_hw_queues > 1 && is_sync) {
1635 blk_mq_put_ctx(data.ctx);
1636 blk_mq_bio_to_request(rq, bio);
1637 blk_mq_try_issue_directly(data.hctx, rq, &cookie);
1638 } else if (q->elevator) {
1639 blk_mq_put_ctx(data.ctx);
1640 blk_mq_bio_to_request(rq, bio);
1641 blk_mq_sched_insert_request(rq, false, true, true, true);
1642 } else if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1643 blk_mq_put_ctx(data.ctx);
1644 blk_mq_run_hw_queue(data.hctx, true);
1645 } else
1646 blk_mq_put_ctx(data.ctx);
1648 return cookie;
1651 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
1652 unsigned int hctx_idx)
1654 struct page *page;
1656 if (tags->rqs && set->ops->exit_request) {
1657 int i;
1659 for (i = 0; i < tags->nr_tags; i++) {
1660 struct request *rq = tags->static_rqs[i];
1662 if (!rq)
1663 continue;
1664 set->ops->exit_request(set, rq, hctx_idx);
1665 tags->static_rqs[i] = NULL;
1669 while (!list_empty(&tags->page_list)) {
1670 page = list_first_entry(&tags->page_list, struct page, lru);
1671 list_del_init(&page->lru);
1673 * Remove kmemleak object previously allocated in
1674 * blk_mq_init_rq_map().
1676 kmemleak_free(page_address(page));
1677 __free_pages(page, page->private);
1681 void blk_mq_free_rq_map(struct blk_mq_tags *tags)
1683 kfree(tags->rqs);
1684 tags->rqs = NULL;
1685 kfree(tags->static_rqs);
1686 tags->static_rqs = NULL;
1688 blk_mq_free_tags(tags);
1691 struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
1692 unsigned int hctx_idx,
1693 unsigned int nr_tags,
1694 unsigned int reserved_tags)
1696 struct blk_mq_tags *tags;
1697 int node;
1699 node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx);
1700 if (node == NUMA_NO_NODE)
1701 node = set->numa_node;
1703 tags = blk_mq_init_tags(nr_tags, reserved_tags, node,
1704 BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
1705 if (!tags)
1706 return NULL;
1708 tags->rqs = kzalloc_node(nr_tags * sizeof(struct request *),
1709 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
1710 node);
1711 if (!tags->rqs) {
1712 blk_mq_free_tags(tags);
1713 return NULL;
1716 tags->static_rqs = kzalloc_node(nr_tags * sizeof(struct request *),
1717 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
1718 node);
1719 if (!tags->static_rqs) {
1720 kfree(tags->rqs);
1721 blk_mq_free_tags(tags);
1722 return NULL;
1725 return tags;
1728 static size_t order_to_size(unsigned int order)
1730 return (size_t)PAGE_SIZE << order;
1733 int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
1734 unsigned int hctx_idx, unsigned int depth)
1736 unsigned int i, j, entries_per_page, max_order = 4;
1737 size_t rq_size, left;
1738 int node;
1740 node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx);
1741 if (node == NUMA_NO_NODE)
1742 node = set->numa_node;
1744 INIT_LIST_HEAD(&tags->page_list);
1747 * rq_size is the size of the request plus driver payload, rounded
1748 * to the cacheline size
1750 rq_size = round_up(sizeof(struct request) + set->cmd_size,
1751 cache_line_size());
1752 left = rq_size * depth;
1754 for (i = 0; i < depth; ) {
1755 int this_order = max_order;
1756 struct page *page;
1757 int to_do;
1758 void *p;
1760 while (this_order && left < order_to_size(this_order - 1))
1761 this_order--;
1763 do {
1764 page = alloc_pages_node(node,
1765 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
1766 this_order);
1767 if (page)
1768 break;
1769 if (!this_order--)
1770 break;
1771 if (order_to_size(this_order) < rq_size)
1772 break;
1773 } while (1);
1775 if (!page)
1776 goto fail;
1778 page->private = this_order;
1779 list_add_tail(&page->lru, &tags->page_list);
1781 p = page_address(page);
1783 * Allow kmemleak to scan these pages as they contain pointers
1784 * to additional allocations like via ops->init_request().
1786 kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
1787 entries_per_page = order_to_size(this_order) / rq_size;
1788 to_do = min(entries_per_page, depth - i);
1789 left -= to_do * rq_size;
1790 for (j = 0; j < to_do; j++) {
1791 struct request *rq = p;
1793 tags->static_rqs[i] = rq;
1794 if (set->ops->init_request) {
1795 if (set->ops->init_request(set, rq, hctx_idx,
1796 node)) {
1797 tags->static_rqs[i] = NULL;
1798 goto fail;
1802 p += rq_size;
1803 i++;
1806 return 0;
1808 fail:
1809 blk_mq_free_rqs(set, tags, hctx_idx);
1810 return -ENOMEM;
1814 * 'cpu' is going away. splice any existing rq_list entries from this
1815 * software queue to the hw queue dispatch list, and ensure that it
1816 * gets run.
1818 static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
1820 struct blk_mq_hw_ctx *hctx;
1821 struct blk_mq_ctx *ctx;
1822 LIST_HEAD(tmp);
1824 hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
1825 ctx = __blk_mq_get_ctx(hctx->queue, cpu);
1827 spin_lock(&ctx->lock);
1828 if (!list_empty(&ctx->rq_list)) {
1829 list_splice_init(&ctx->rq_list, &tmp);
1830 blk_mq_hctx_clear_pending(hctx, ctx);
1832 spin_unlock(&ctx->lock);
1834 if (list_empty(&tmp))
1835 return 0;
1837 spin_lock(&hctx->lock);
1838 list_splice_tail_init(&tmp, &hctx->dispatch);
1839 spin_unlock(&hctx->lock);
1841 blk_mq_run_hw_queue(hctx, true);
1842 return 0;
1845 static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
1847 cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
1848 &hctx->cpuhp_dead);
1851 /* hctx->ctxs will be freed in queue's release handler */
1852 static void blk_mq_exit_hctx(struct request_queue *q,
1853 struct blk_mq_tag_set *set,
1854 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
1856 blk_mq_debugfs_unregister_hctx(hctx);
1858 blk_mq_tag_idle(hctx);
1860 if (set->ops->exit_request)
1861 set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
1863 blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
1865 if (set->ops->exit_hctx)
1866 set->ops->exit_hctx(hctx, hctx_idx);
1868 if (hctx->flags & BLK_MQ_F_BLOCKING)
1869 cleanup_srcu_struct(&hctx->queue_rq_srcu);
1871 blk_mq_remove_cpuhp(hctx);
1872 blk_free_flush_queue(hctx->fq);
1873 sbitmap_free(&hctx->ctx_map);
1876 static void blk_mq_exit_hw_queues(struct request_queue *q,
1877 struct blk_mq_tag_set *set, int nr_queue)
1879 struct blk_mq_hw_ctx *hctx;
1880 unsigned int i;
1882 queue_for_each_hw_ctx(q, hctx, i) {
1883 if (i == nr_queue)
1884 break;
1885 blk_mq_exit_hctx(q, set, hctx, i);
1889 static int blk_mq_init_hctx(struct request_queue *q,
1890 struct blk_mq_tag_set *set,
1891 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
1893 int node;
1895 node = hctx->numa_node;
1896 if (node == NUMA_NO_NODE)
1897 node = hctx->numa_node = set->numa_node;
1899 INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
1900 spin_lock_init(&hctx->lock);
1901 INIT_LIST_HEAD(&hctx->dispatch);
1902 hctx->queue = q;
1903 hctx->queue_num = hctx_idx;
1904 hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED;
1906 cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
1908 hctx->tags = set->tags[hctx_idx];
1911 * Allocate space for all possible cpus to avoid allocation at
1912 * runtime
1914 hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
1915 GFP_KERNEL, node);
1916 if (!hctx->ctxs)
1917 goto unregister_cpu_notifier;
1919 if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), GFP_KERNEL,
1920 node))
1921 goto free_ctxs;
1923 hctx->nr_ctx = 0;
1925 if (set->ops->init_hctx &&
1926 set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
1927 goto free_bitmap;
1929 if (blk_mq_sched_init_hctx(q, hctx, hctx_idx))
1930 goto exit_hctx;
1932 hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
1933 if (!hctx->fq)
1934 goto sched_exit_hctx;
1936 if (set->ops->init_request &&
1937 set->ops->init_request(set, hctx->fq->flush_rq, hctx_idx,
1938 node))
1939 goto free_fq;
1941 if (hctx->flags & BLK_MQ_F_BLOCKING)
1942 init_srcu_struct(&hctx->queue_rq_srcu);
1944 blk_mq_debugfs_register_hctx(q, hctx);
1946 return 0;
1948 free_fq:
1949 kfree(hctx->fq);
1950 sched_exit_hctx:
1951 blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
1952 exit_hctx:
1953 if (set->ops->exit_hctx)
1954 set->ops->exit_hctx(hctx, hctx_idx);
1955 free_bitmap:
1956 sbitmap_free(&hctx->ctx_map);
1957 free_ctxs:
1958 kfree(hctx->ctxs);
1959 unregister_cpu_notifier:
1960 blk_mq_remove_cpuhp(hctx);
1961 return -1;
1964 static void blk_mq_init_cpu_queues(struct request_queue *q,
1965 unsigned int nr_hw_queues)
1967 unsigned int i;
1969 for_each_possible_cpu(i) {
1970 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
1971 struct blk_mq_hw_ctx *hctx;
1973 __ctx->cpu = i;
1974 spin_lock_init(&__ctx->lock);
1975 INIT_LIST_HEAD(&__ctx->rq_list);
1976 __ctx->queue = q;
1978 /* If the cpu isn't online, the cpu is mapped to first hctx */
1979 if (!cpu_online(i))
1980 continue;
1982 hctx = blk_mq_map_queue(q, i);
1985 * Set local node, IFF we have more than one hw queue. If
1986 * not, we remain on the home node of the device
1988 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
1989 hctx->numa_node = local_memory_node(cpu_to_node(i));
1993 static bool __blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, int hctx_idx)
1995 int ret = 0;
1997 set->tags[hctx_idx] = blk_mq_alloc_rq_map(set, hctx_idx,
1998 set->queue_depth, set->reserved_tags);
1999 if (!set->tags[hctx_idx])
2000 return false;
2002 ret = blk_mq_alloc_rqs(set, set->tags[hctx_idx], hctx_idx,
2003 set->queue_depth);
2004 if (!ret)
2005 return true;
2007 blk_mq_free_rq_map(set->tags[hctx_idx]);
2008 set->tags[hctx_idx] = NULL;
2009 return false;
2012 static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
2013 unsigned int hctx_idx)
2015 if (set->tags[hctx_idx]) {
2016 blk_mq_free_rqs(set, set->tags[hctx_idx], hctx_idx);
2017 blk_mq_free_rq_map(set->tags[hctx_idx]);
2018 set->tags[hctx_idx] = NULL;
2022 static void blk_mq_map_swqueue(struct request_queue *q,
2023 const struct cpumask *online_mask)
2025 unsigned int i, hctx_idx;
2026 struct blk_mq_hw_ctx *hctx;
2027 struct blk_mq_ctx *ctx;
2028 struct blk_mq_tag_set *set = q->tag_set;
2031 * Avoid others reading imcomplete hctx->cpumask through sysfs
2033 mutex_lock(&q->sysfs_lock);
2035 queue_for_each_hw_ctx(q, hctx, i) {
2036 cpumask_clear(hctx->cpumask);
2037 hctx->nr_ctx = 0;
2041 * Map software to hardware queues
2043 for_each_possible_cpu(i) {
2044 /* If the cpu isn't online, the cpu is mapped to first hctx */
2045 if (!cpumask_test_cpu(i, online_mask))
2046 continue;
2048 hctx_idx = q->mq_map[i];
2049 /* unmapped hw queue can be remapped after CPU topo changed */
2050 if (!set->tags[hctx_idx] &&
2051 !__blk_mq_alloc_rq_map(set, hctx_idx)) {
2053 * If tags initialization fail for some hctx,
2054 * that hctx won't be brought online. In this
2055 * case, remap the current ctx to hctx[0] which
2056 * is guaranteed to always have tags allocated
2058 q->mq_map[i] = 0;
2061 ctx = per_cpu_ptr(q->queue_ctx, i);
2062 hctx = blk_mq_map_queue(q, i);
2064 cpumask_set_cpu(i, hctx->cpumask);
2065 ctx->index_hw = hctx->nr_ctx;
2066 hctx->ctxs[hctx->nr_ctx++] = ctx;
2069 mutex_unlock(&q->sysfs_lock);
2071 queue_for_each_hw_ctx(q, hctx, i) {
2073 * If no software queues are mapped to this hardware queue,
2074 * disable it and free the request entries.
2076 if (!hctx->nr_ctx) {
2077 /* Never unmap queue 0. We need it as a
2078 * fallback in case of a new remap fails
2079 * allocation
2081 if (i && set->tags[i])
2082 blk_mq_free_map_and_requests(set, i);
2084 hctx->tags = NULL;
2085 continue;
2088 hctx->tags = set->tags[i];
2089 WARN_ON(!hctx->tags);
2092 * Set the map size to the number of mapped software queues.
2093 * This is more accurate and more efficient than looping
2094 * over all possibly mapped software queues.
2096 sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx);
2099 * Initialize batch roundrobin counts
2101 hctx->next_cpu = cpumask_first(hctx->cpumask);
2102 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
2106 static void queue_set_hctx_shared(struct request_queue *q, bool shared)
2108 struct blk_mq_hw_ctx *hctx;
2109 int i;
2111 queue_for_each_hw_ctx(q, hctx, i) {
2112 if (shared)
2113 hctx->flags |= BLK_MQ_F_TAG_SHARED;
2114 else
2115 hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
2119 static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set, bool shared)
2121 struct request_queue *q;
2123 lockdep_assert_held(&set->tag_list_lock);
2125 list_for_each_entry(q, &set->tag_list, tag_set_list) {
2126 blk_mq_freeze_queue(q);
2127 queue_set_hctx_shared(q, shared);
2128 blk_mq_unfreeze_queue(q);
2132 static void blk_mq_del_queue_tag_set(struct request_queue *q)
2134 struct blk_mq_tag_set *set = q->tag_set;
2136 mutex_lock(&set->tag_list_lock);
2137 list_del_rcu(&q->tag_set_list);
2138 INIT_LIST_HEAD(&q->tag_set_list);
2139 if (list_is_singular(&set->tag_list)) {
2140 /* just transitioned to unshared */
2141 set->flags &= ~BLK_MQ_F_TAG_SHARED;
2142 /* update existing queue */
2143 blk_mq_update_tag_set_depth(set, false);
2145 mutex_unlock(&set->tag_list_lock);
2147 synchronize_rcu();
2150 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
2151 struct request_queue *q)
2153 q->tag_set = set;
2155 mutex_lock(&set->tag_list_lock);
2157 /* Check to see if we're transitioning to shared (from 1 to 2 queues). */
2158 if (!list_empty(&set->tag_list) && !(set->flags & BLK_MQ_F_TAG_SHARED)) {
2159 set->flags |= BLK_MQ_F_TAG_SHARED;
2160 /* update existing queue */
2161 blk_mq_update_tag_set_depth(set, true);
2163 if (set->flags & BLK_MQ_F_TAG_SHARED)
2164 queue_set_hctx_shared(q, true);
2165 list_add_tail_rcu(&q->tag_set_list, &set->tag_list);
2167 mutex_unlock(&set->tag_list_lock);
2171 * It is the actual release handler for mq, but we do it from
2172 * request queue's release handler for avoiding use-after-free
2173 * and headache because q->mq_kobj shouldn't have been introduced,
2174 * but we can't group ctx/kctx kobj without it.
2176 void blk_mq_release(struct request_queue *q)
2178 struct blk_mq_hw_ctx *hctx;
2179 unsigned int i;
2181 /* hctx kobj stays in hctx */
2182 queue_for_each_hw_ctx(q, hctx, i) {
2183 if (!hctx)
2184 continue;
2185 kobject_put(&hctx->kobj);
2188 q->mq_map = NULL;
2190 kfree(q->queue_hw_ctx);
2193 * release .mq_kobj and sw queue's kobject now because
2194 * both share lifetime with request queue.
2196 blk_mq_sysfs_deinit(q);
2198 free_percpu(q->queue_ctx);
2201 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
2203 struct request_queue *uninit_q, *q;
2205 uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
2206 if (!uninit_q)
2207 return ERR_PTR(-ENOMEM);
2209 q = blk_mq_init_allocated_queue(set, uninit_q);
2210 if (IS_ERR(q))
2211 blk_cleanup_queue(uninit_q);
2213 return q;
2215 EXPORT_SYMBOL(blk_mq_init_queue);
2217 static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
2218 struct request_queue *q)
2220 int i, j;
2221 struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
2223 blk_mq_sysfs_unregister(q);
2224 for (i = 0; i < set->nr_hw_queues; i++) {
2225 int node;
2227 if (hctxs[i])
2228 continue;
2230 node = blk_mq_hw_queue_to_node(q->mq_map, i);
2231 hctxs[i] = kzalloc_node(sizeof(struct blk_mq_hw_ctx),
2232 GFP_KERNEL, node);
2233 if (!hctxs[i])
2234 break;
2236 if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask, GFP_KERNEL,
2237 node)) {
2238 kfree(hctxs[i]);
2239 hctxs[i] = NULL;
2240 break;
2243 atomic_set(&hctxs[i]->nr_active, 0);
2244 hctxs[i]->numa_node = node;
2245 hctxs[i]->queue_num = i;
2247 if (blk_mq_init_hctx(q, set, hctxs[i], i)) {
2248 free_cpumask_var(hctxs[i]->cpumask);
2249 kfree(hctxs[i]);
2250 hctxs[i] = NULL;
2251 break;
2253 blk_mq_hctx_kobj_init(hctxs[i]);
2255 for (j = i; j < q->nr_hw_queues; j++) {
2256 struct blk_mq_hw_ctx *hctx = hctxs[j];
2258 if (hctx) {
2259 if (hctx->tags)
2260 blk_mq_free_map_and_requests(set, j);
2261 blk_mq_exit_hctx(q, set, hctx, j);
2262 kobject_put(&hctx->kobj);
2263 hctxs[j] = NULL;
2267 q->nr_hw_queues = i;
2268 blk_mq_sysfs_register(q);
2271 struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
2272 struct request_queue *q)
2274 /* mark the queue as mq asap */
2275 q->mq_ops = set->ops;
2277 q->poll_cb = blk_stat_alloc_callback(blk_mq_poll_stats_fn,
2278 blk_mq_poll_stats_bkt,
2279 BLK_MQ_POLL_STATS_BKTS, q);
2280 if (!q->poll_cb)
2281 goto err_exit;
2283 q->queue_ctx = alloc_percpu(struct blk_mq_ctx);
2284 if (!q->queue_ctx)
2285 goto err_exit;
2287 /* init q->mq_kobj and sw queues' kobjects */
2288 blk_mq_sysfs_init(q);
2290 q->queue_hw_ctx = kzalloc_node(nr_cpu_ids * sizeof(*(q->queue_hw_ctx)),
2291 GFP_KERNEL, set->numa_node);
2292 if (!q->queue_hw_ctx)
2293 goto err_percpu;
2295 q->mq_map = set->mq_map;
2297 blk_mq_realloc_hw_ctxs(set, q);
2298 if (!q->nr_hw_queues)
2299 goto err_hctxs;
2301 INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
2302 blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
2304 q->nr_queues = nr_cpu_ids;
2306 q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
2308 if (!(set->flags & BLK_MQ_F_SG_MERGE))
2309 q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE;
2311 q->sg_reserved_size = INT_MAX;
2313 INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
2314 INIT_LIST_HEAD(&q->requeue_list);
2315 spin_lock_init(&q->requeue_lock);
2317 blk_queue_make_request(q, blk_mq_make_request);
2320 * Do this after blk_queue_make_request() overrides it...
2322 q->nr_requests = set->queue_depth;
2325 * Default to classic polling
2327 q->poll_nsec = -1;
2329 if (set->ops->complete)
2330 blk_queue_softirq_done(q, set->ops->complete);
2332 blk_mq_init_cpu_queues(q, set->nr_hw_queues);
2334 get_online_cpus();
2335 mutex_lock(&all_q_mutex);
2337 list_add_tail(&q->all_q_node, &all_q_list);
2338 blk_mq_add_queue_tag_set(set, q);
2339 blk_mq_map_swqueue(q, cpu_online_mask);
2341 mutex_unlock(&all_q_mutex);
2342 put_online_cpus();
2344 if (!(set->flags & BLK_MQ_F_NO_SCHED)) {
2345 int ret;
2347 ret = blk_mq_sched_init(q);
2348 if (ret)
2349 return ERR_PTR(ret);
2352 return q;
2354 err_hctxs:
2355 kfree(q->queue_hw_ctx);
2356 err_percpu:
2357 free_percpu(q->queue_ctx);
2358 err_exit:
2359 q->mq_ops = NULL;
2360 return ERR_PTR(-ENOMEM);
2362 EXPORT_SYMBOL(blk_mq_init_allocated_queue);
2364 void blk_mq_free_queue(struct request_queue *q)
2366 struct blk_mq_tag_set *set = q->tag_set;
2368 mutex_lock(&all_q_mutex);
2369 list_del_init(&q->all_q_node);
2370 mutex_unlock(&all_q_mutex);
2372 blk_mq_del_queue_tag_set(q);
2374 blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
2377 /* Basically redo blk_mq_init_queue with queue frozen */
2378 static void blk_mq_queue_reinit(struct request_queue *q,
2379 const struct cpumask *online_mask)
2381 WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
2383 blk_mq_debugfs_unregister_hctxs(q);
2384 blk_mq_sysfs_unregister(q);
2387 * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
2388 * we should change hctx numa_node according to new topology (this
2389 * involves free and re-allocate memory, worthy doing?)
2392 blk_mq_map_swqueue(q, online_mask);
2394 blk_mq_sysfs_register(q);
2395 blk_mq_debugfs_register_hctxs(q);
2399 * New online cpumask which is going to be set in this hotplug event.
2400 * Declare this cpumasks as global as cpu-hotplug operation is invoked
2401 * one-by-one and dynamically allocating this could result in a failure.
2403 static struct cpumask cpuhp_online_new;
2405 static void blk_mq_queue_reinit_work(void)
2407 struct request_queue *q;
2409 mutex_lock(&all_q_mutex);
2411 * We need to freeze and reinit all existing queues. Freezing
2412 * involves synchronous wait for an RCU grace period and doing it
2413 * one by one may take a long time. Start freezing all queues in
2414 * one swoop and then wait for the completions so that freezing can
2415 * take place in parallel.
2417 list_for_each_entry(q, &all_q_list, all_q_node)
2418 blk_freeze_queue_start(q);
2419 list_for_each_entry(q, &all_q_list, all_q_node)
2420 blk_mq_freeze_queue_wait(q);
2422 list_for_each_entry(q, &all_q_list, all_q_node)
2423 blk_mq_queue_reinit(q, &cpuhp_online_new);
2425 list_for_each_entry(q, &all_q_list, all_q_node)
2426 blk_mq_unfreeze_queue(q);
2428 mutex_unlock(&all_q_mutex);
2431 static int blk_mq_queue_reinit_dead(unsigned int cpu)
2433 cpumask_copy(&cpuhp_online_new, cpu_online_mask);
2434 blk_mq_queue_reinit_work();
2435 return 0;
2439 * Before hotadded cpu starts handling requests, new mappings must be
2440 * established. Otherwise, these requests in hw queue might never be
2441 * dispatched.
2443 * For example, there is a single hw queue (hctx) and two CPU queues (ctx0
2444 * for CPU0, and ctx1 for CPU1).
2446 * Now CPU1 is just onlined and a request is inserted into ctx1->rq_list
2447 * and set bit0 in pending bitmap as ctx1->index_hw is still zero.
2449 * And then while running hw queue, blk_mq_flush_busy_ctxs() finds bit0 is set
2450 * in pending bitmap and tries to retrieve requests in hctx->ctxs[0]->rq_list.
2451 * But htx->ctxs[0] is a pointer to ctx0, so the request in ctx1->rq_list is
2452 * ignored.
2454 static int blk_mq_queue_reinit_prepare(unsigned int cpu)
2456 cpumask_copy(&cpuhp_online_new, cpu_online_mask);
2457 cpumask_set_cpu(cpu, &cpuhp_online_new);
2458 blk_mq_queue_reinit_work();
2459 return 0;
2462 static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2464 int i;
2466 for (i = 0; i < set->nr_hw_queues; i++)
2467 if (!__blk_mq_alloc_rq_map(set, i))
2468 goto out_unwind;
2470 return 0;
2472 out_unwind:
2473 while (--i >= 0)
2474 blk_mq_free_rq_map(set->tags[i]);
2476 return -ENOMEM;
2480 * Allocate the request maps associated with this tag_set. Note that this
2481 * may reduce the depth asked for, if memory is tight. set->queue_depth
2482 * will be updated to reflect the allocated depth.
2484 static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2486 unsigned int depth;
2487 int err;
2489 depth = set->queue_depth;
2490 do {
2491 err = __blk_mq_alloc_rq_maps(set);
2492 if (!err)
2493 break;
2495 set->queue_depth >>= 1;
2496 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
2497 err = -ENOMEM;
2498 break;
2500 } while (set->queue_depth);
2502 if (!set->queue_depth || err) {
2503 pr_err("blk-mq: failed to allocate request map\n");
2504 return -ENOMEM;
2507 if (depth != set->queue_depth)
2508 pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
2509 depth, set->queue_depth);
2511 return 0;
2514 static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
2516 if (set->ops->map_queues)
2517 return set->ops->map_queues(set);
2518 else
2519 return blk_mq_map_queues(set);
2523 * Alloc a tag set to be associated with one or more request queues.
2524 * May fail with EINVAL for various error conditions. May adjust the
2525 * requested depth down, if if it too large. In that case, the set
2526 * value will be stored in set->queue_depth.
2528 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
2530 int ret;
2532 BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
2534 if (!set->nr_hw_queues)
2535 return -EINVAL;
2536 if (!set->queue_depth)
2537 return -EINVAL;
2538 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
2539 return -EINVAL;
2541 if (!set->ops->queue_rq)
2542 return -EINVAL;
2544 if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
2545 pr_info("blk-mq: reduced tag depth to %u\n",
2546 BLK_MQ_MAX_DEPTH);
2547 set->queue_depth = BLK_MQ_MAX_DEPTH;
2551 * If a crashdump is active, then we are potentially in a very
2552 * memory constrained environment. Limit us to 1 queue and
2553 * 64 tags to prevent using too much memory.
2555 if (is_kdump_kernel()) {
2556 set->nr_hw_queues = 1;
2557 set->queue_depth = min(64U, set->queue_depth);
2560 * There is no use for more h/w queues than cpus.
2562 if (set->nr_hw_queues > nr_cpu_ids)
2563 set->nr_hw_queues = nr_cpu_ids;
2565 set->tags = kzalloc_node(nr_cpu_ids * sizeof(struct blk_mq_tags *),
2566 GFP_KERNEL, set->numa_node);
2567 if (!set->tags)
2568 return -ENOMEM;
2570 ret = -ENOMEM;
2571 set->mq_map = kzalloc_node(sizeof(*set->mq_map) * nr_cpu_ids,
2572 GFP_KERNEL, set->numa_node);
2573 if (!set->mq_map)
2574 goto out_free_tags;
2576 ret = blk_mq_update_queue_map(set);
2577 if (ret)
2578 goto out_free_mq_map;
2580 ret = blk_mq_alloc_rq_maps(set);
2581 if (ret)
2582 goto out_free_mq_map;
2584 mutex_init(&set->tag_list_lock);
2585 INIT_LIST_HEAD(&set->tag_list);
2587 return 0;
2589 out_free_mq_map:
2590 kfree(set->mq_map);
2591 set->mq_map = NULL;
2592 out_free_tags:
2593 kfree(set->tags);
2594 set->tags = NULL;
2595 return ret;
2597 EXPORT_SYMBOL(blk_mq_alloc_tag_set);
2599 void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
2601 int i;
2603 for (i = 0; i < nr_cpu_ids; i++)
2604 blk_mq_free_map_and_requests(set, i);
2606 kfree(set->mq_map);
2607 set->mq_map = NULL;
2609 kfree(set->tags);
2610 set->tags = NULL;
2612 EXPORT_SYMBOL(blk_mq_free_tag_set);
2614 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
2616 struct blk_mq_tag_set *set = q->tag_set;
2617 struct blk_mq_hw_ctx *hctx;
2618 int i, ret;
2620 if (!set)
2621 return -EINVAL;
2623 blk_mq_freeze_queue(q);
2625 ret = 0;
2626 queue_for_each_hw_ctx(q, hctx, i) {
2627 if (!hctx->tags)
2628 continue;
2630 * If we're using an MQ scheduler, just update the scheduler
2631 * queue depth. This is similar to what the old code would do.
2633 if (!hctx->sched_tags) {
2634 ret = blk_mq_tag_update_depth(hctx, &hctx->tags,
2635 min(nr, set->queue_depth),
2636 false);
2637 } else {
2638 ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags,
2639 nr, true);
2641 if (ret)
2642 break;
2645 if (!ret)
2646 q->nr_requests = nr;
2648 blk_mq_unfreeze_queue(q);
2650 return ret;
2653 static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
2654 int nr_hw_queues)
2656 struct request_queue *q;
2658 lockdep_assert_held(&set->tag_list_lock);
2660 if (nr_hw_queues > nr_cpu_ids)
2661 nr_hw_queues = nr_cpu_ids;
2662 if (nr_hw_queues < 1 || nr_hw_queues == set->nr_hw_queues)
2663 return;
2665 list_for_each_entry(q, &set->tag_list, tag_set_list)
2666 blk_mq_freeze_queue(q);
2668 set->nr_hw_queues = nr_hw_queues;
2669 blk_mq_update_queue_map(set);
2670 list_for_each_entry(q, &set->tag_list, tag_set_list) {
2671 blk_mq_realloc_hw_ctxs(set, q);
2672 blk_mq_queue_reinit(q, cpu_online_mask);
2675 list_for_each_entry(q, &set->tag_list, tag_set_list)
2676 blk_mq_unfreeze_queue(q);
2679 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
2681 mutex_lock(&set->tag_list_lock);
2682 __blk_mq_update_nr_hw_queues(set, nr_hw_queues);
2683 mutex_unlock(&set->tag_list_lock);
2685 EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
2687 /* Enable polling stats and return whether they were already enabled. */
2688 static bool blk_poll_stats_enable(struct request_queue *q)
2690 if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) ||
2691 test_and_set_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags))
2692 return true;
2693 blk_stat_add_callback(q, q->poll_cb);
2694 return false;
2697 static void blk_mq_poll_stats_start(struct request_queue *q)
2700 * We don't arm the callback if polling stats are not enabled or the
2701 * callback is already active.
2703 if (!test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) ||
2704 blk_stat_is_active(q->poll_cb))
2705 return;
2707 blk_stat_activate_msecs(q->poll_cb, 100);
2710 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb)
2712 struct request_queue *q = cb->data;
2713 int bucket;
2715 for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS; bucket++) {
2716 if (cb->stat[bucket].nr_samples)
2717 q->poll_stat[bucket] = cb->stat[bucket];
2721 static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
2722 struct blk_mq_hw_ctx *hctx,
2723 struct request *rq)
2725 unsigned long ret = 0;
2726 int bucket;
2729 * If stats collection isn't on, don't sleep but turn it on for
2730 * future users
2732 if (!blk_poll_stats_enable(q))
2733 return 0;
2736 * As an optimistic guess, use half of the mean service time
2737 * for this type of request. We can (and should) make this smarter.
2738 * For instance, if the completion latencies are tight, we can
2739 * get closer than just half the mean. This is especially
2740 * important on devices where the completion latencies are longer
2741 * than ~10 usec. We do use the stats for the relevant IO size
2742 * if available which does lead to better estimates.
2744 bucket = blk_mq_poll_stats_bkt(rq);
2745 if (bucket < 0)
2746 return ret;
2748 if (q->poll_stat[bucket].nr_samples)
2749 ret = (q->poll_stat[bucket].mean + 1) / 2;
2751 return ret;
2754 static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
2755 struct blk_mq_hw_ctx *hctx,
2756 struct request *rq)
2758 struct hrtimer_sleeper hs;
2759 enum hrtimer_mode mode;
2760 unsigned int nsecs;
2761 ktime_t kt;
2763 if (test_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags))
2764 return false;
2767 * poll_nsec can be:
2769 * -1: don't ever hybrid sleep
2770 * 0: use half of prev avg
2771 * >0: use this specific value
2773 if (q->poll_nsec == -1)
2774 return false;
2775 else if (q->poll_nsec > 0)
2776 nsecs = q->poll_nsec;
2777 else
2778 nsecs = blk_mq_poll_nsecs(q, hctx, rq);
2780 if (!nsecs)
2781 return false;
2783 set_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags);
2786 * This will be replaced with the stats tracking code, using
2787 * 'avg_completion_time / 2' as the pre-sleep target.
2789 kt = nsecs;
2791 mode = HRTIMER_MODE_REL;
2792 hrtimer_init_on_stack(&hs.timer, CLOCK_MONOTONIC, mode);
2793 hrtimer_set_expires(&hs.timer, kt);
2795 hrtimer_init_sleeper(&hs, current);
2796 do {
2797 if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
2798 break;
2799 set_current_state(TASK_UNINTERRUPTIBLE);
2800 hrtimer_start_expires(&hs.timer, mode);
2801 if (hs.task)
2802 io_schedule();
2803 hrtimer_cancel(&hs.timer);
2804 mode = HRTIMER_MODE_ABS;
2805 } while (hs.task && !signal_pending(current));
2807 __set_current_state(TASK_RUNNING);
2808 destroy_hrtimer_on_stack(&hs.timer);
2809 return true;
2812 static bool __blk_mq_poll(struct blk_mq_hw_ctx *hctx, struct request *rq)
2814 struct request_queue *q = hctx->queue;
2815 long state;
2818 * If we sleep, have the caller restart the poll loop to reset
2819 * the state. Like for the other success return cases, the
2820 * caller is responsible for checking if the IO completed. If
2821 * the IO isn't complete, we'll get called again and will go
2822 * straight to the busy poll loop.
2824 if (blk_mq_poll_hybrid_sleep(q, hctx, rq))
2825 return true;
2827 hctx->poll_considered++;
2829 state = current->state;
2830 while (!need_resched()) {
2831 int ret;
2833 hctx->poll_invoked++;
2835 ret = q->mq_ops->poll(hctx, rq->tag);
2836 if (ret > 0) {
2837 hctx->poll_success++;
2838 set_current_state(TASK_RUNNING);
2839 return true;
2842 if (signal_pending_state(state, current))
2843 set_current_state(TASK_RUNNING);
2845 if (current->state == TASK_RUNNING)
2846 return true;
2847 if (ret < 0)
2848 break;
2849 cpu_relax();
2852 return false;
2855 bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
2857 struct blk_mq_hw_ctx *hctx;
2858 struct blk_plug *plug;
2859 struct request *rq;
2861 if (!q->mq_ops || !q->mq_ops->poll || !blk_qc_t_valid(cookie) ||
2862 !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
2863 return false;
2865 plug = current->plug;
2866 if (plug)
2867 blk_flush_plug_list(plug, false);
2869 hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
2870 if (!blk_qc_t_is_internal(cookie))
2871 rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie));
2872 else {
2873 rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie));
2875 * With scheduling, if the request has completed, we'll
2876 * get a NULL return here, as we clear the sched tag when
2877 * that happens. The request still remains valid, like always,
2878 * so we should be safe with just the NULL check.
2880 if (!rq)
2881 return false;
2884 return __blk_mq_poll(hctx, rq);
2886 EXPORT_SYMBOL_GPL(blk_mq_poll);
2888 void blk_mq_disable_hotplug(void)
2890 mutex_lock(&all_q_mutex);
2893 void blk_mq_enable_hotplug(void)
2895 mutex_unlock(&all_q_mutex);
2898 static int __init blk_mq_init(void)
2900 cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
2901 blk_mq_hctx_notify_dead);
2903 cpuhp_setup_state_nocalls(CPUHP_BLK_MQ_PREPARE, "block/mq:prepare",
2904 blk_mq_queue_reinit_prepare,
2905 blk_mq_queue_reinit_dead);
2906 return 0;
2908 subsys_initcall(blk_mq_init);