mm: fix exec activate_mm vs TLB shootdown and lazy tlb switching race
[linux/fpc-iii.git] / block / blk-core.c
blob2407c898ba7d8a9a30852d83c56e24dc9c4871e6
1 /*
2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics
4 * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
6 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
7 * - July2000
8 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
9 */
12 * This handles all read/write requests to block devices
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/backing-dev.h>
17 #include <linux/bio.h>
18 #include <linux/blkdev.h>
19 #include <linux/blk-mq.h>
20 #include <linux/highmem.h>
21 #include <linux/mm.h>
22 #include <linux/kernel_stat.h>
23 #include <linux/string.h>
24 #include <linux/init.h>
25 #include <linux/completion.h>
26 #include <linux/slab.h>
27 #include <linux/swap.h>
28 #include <linux/writeback.h>
29 #include <linux/task_io_accounting_ops.h>
30 #include <linux/fault-inject.h>
31 #include <linux/list_sort.h>
32 #include <linux/delay.h>
33 #include <linux/ratelimit.h>
34 #include <linux/pm_runtime.h>
35 #include <linux/blk-cgroup.h>
36 #include <linux/debugfs.h>
38 #define CREATE_TRACE_POINTS
39 #include <trace/events/block.h>
41 #include "blk.h"
42 #include "blk-mq.h"
43 #include "blk-mq-sched.h"
44 #include "blk-wbt.h"
46 #ifdef CONFIG_DEBUG_FS
47 struct dentry *blk_debugfs_root;
48 #endif
50 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
51 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
52 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
53 EXPORT_TRACEPOINT_SYMBOL_GPL(block_split);
54 EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
56 DEFINE_IDA(blk_queue_ida);
59 * For the allocated request tables
61 struct kmem_cache *request_cachep;
64 * For queue allocation
66 struct kmem_cache *blk_requestq_cachep;
69 * Controlling structure to kblockd
71 static struct workqueue_struct *kblockd_workqueue;
73 static void blk_clear_congested(struct request_list *rl, int sync)
75 #ifdef CONFIG_CGROUP_WRITEBACK
76 clear_wb_congested(rl->blkg->wb_congested, sync);
77 #else
79 * If !CGROUP_WRITEBACK, all blkg's map to bdi->wb and we shouldn't
80 * flip its congestion state for events on other blkcgs.
82 if (rl == &rl->q->root_rl)
83 clear_wb_congested(rl->q->backing_dev_info->wb.congested, sync);
84 #endif
87 static void blk_set_congested(struct request_list *rl, int sync)
89 #ifdef CONFIG_CGROUP_WRITEBACK
90 set_wb_congested(rl->blkg->wb_congested, sync);
91 #else
92 /* see blk_clear_congested() */
93 if (rl == &rl->q->root_rl)
94 set_wb_congested(rl->q->backing_dev_info->wb.congested, sync);
95 #endif
98 void blk_queue_congestion_threshold(struct request_queue *q)
100 int nr;
102 nr = q->nr_requests - (q->nr_requests / 8) + 1;
103 if (nr > q->nr_requests)
104 nr = q->nr_requests;
105 q->nr_congestion_on = nr;
107 nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1;
108 if (nr < 1)
109 nr = 1;
110 q->nr_congestion_off = nr;
113 void blk_rq_init(struct request_queue *q, struct request *rq)
115 memset(rq, 0, sizeof(*rq));
117 INIT_LIST_HEAD(&rq->queuelist);
118 INIT_LIST_HEAD(&rq->timeout_list);
119 rq->cpu = -1;
120 rq->q = q;
121 rq->__sector = (sector_t) -1;
122 INIT_HLIST_NODE(&rq->hash);
123 RB_CLEAR_NODE(&rq->rb_node);
124 rq->tag = -1;
125 rq->internal_tag = -1;
126 rq->start_time = jiffies;
127 set_start_time_ns(rq);
128 rq->part = NULL;
130 EXPORT_SYMBOL(blk_rq_init);
132 static const struct {
133 int errno;
134 const char *name;
135 } blk_errors[] = {
136 [BLK_STS_OK] = { 0, "" },
137 [BLK_STS_NOTSUPP] = { -EOPNOTSUPP, "operation not supported" },
138 [BLK_STS_TIMEOUT] = { -ETIMEDOUT, "timeout" },
139 [BLK_STS_NOSPC] = { -ENOSPC, "critical space allocation" },
140 [BLK_STS_TRANSPORT] = { -ENOLINK, "recoverable transport" },
141 [BLK_STS_TARGET] = { -EREMOTEIO, "critical target" },
142 [BLK_STS_NEXUS] = { -EBADE, "critical nexus" },
143 [BLK_STS_MEDIUM] = { -ENODATA, "critical medium" },
144 [BLK_STS_PROTECTION] = { -EILSEQ, "protection" },
145 [BLK_STS_RESOURCE] = { -ENOMEM, "kernel resource" },
146 [BLK_STS_AGAIN] = { -EAGAIN, "nonblocking retry" },
148 /* device mapper special case, should not leak out: */
149 [BLK_STS_DM_REQUEUE] = { -EREMCHG, "dm internal retry" },
151 /* everything else not covered above: */
152 [BLK_STS_IOERR] = { -EIO, "I/O" },
155 blk_status_t errno_to_blk_status(int errno)
157 int i;
159 for (i = 0; i < ARRAY_SIZE(blk_errors); i++) {
160 if (blk_errors[i].errno == errno)
161 return (__force blk_status_t)i;
164 return BLK_STS_IOERR;
166 EXPORT_SYMBOL_GPL(errno_to_blk_status);
168 int blk_status_to_errno(blk_status_t status)
170 int idx = (__force int)status;
172 if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
173 return -EIO;
174 return blk_errors[idx].errno;
176 EXPORT_SYMBOL_GPL(blk_status_to_errno);
178 static void print_req_error(struct request *req, blk_status_t status)
180 int idx = (__force int)status;
182 if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
183 return;
185 printk_ratelimited(KERN_ERR "%s: %s error, dev %s, sector %llu\n",
186 __func__, blk_errors[idx].name, req->rq_disk ?
187 req->rq_disk->disk_name : "?",
188 (unsigned long long)blk_rq_pos(req));
191 static void req_bio_endio(struct request *rq, struct bio *bio,
192 unsigned int nbytes, blk_status_t error)
194 if (error)
195 bio->bi_status = error;
197 if (unlikely(rq->rq_flags & RQF_QUIET))
198 bio_set_flag(bio, BIO_QUIET);
200 bio_advance(bio, nbytes);
202 /* don't actually finish bio if it's part of flush sequence */
203 if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
204 bio_endio(bio);
207 void blk_dump_rq_flags(struct request *rq, char *msg)
209 printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg,
210 rq->rq_disk ? rq->rq_disk->disk_name : "?",
211 (unsigned long long) rq->cmd_flags);
213 printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n",
214 (unsigned long long)blk_rq_pos(rq),
215 blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
216 printk(KERN_INFO " bio %p, biotail %p, len %u\n",
217 rq->bio, rq->biotail, blk_rq_bytes(rq));
219 EXPORT_SYMBOL(blk_dump_rq_flags);
221 static void blk_delay_work(struct work_struct *work)
223 struct request_queue *q;
225 q = container_of(work, struct request_queue, delay_work.work);
226 spin_lock_irq(q->queue_lock);
227 __blk_run_queue(q);
228 spin_unlock_irq(q->queue_lock);
232 * blk_delay_queue - restart queueing after defined interval
233 * @q: The &struct request_queue in question
234 * @msecs: Delay in msecs
236 * Description:
237 * Sometimes queueing needs to be postponed for a little while, to allow
238 * resources to come back. This function will make sure that queueing is
239 * restarted around the specified time.
241 void blk_delay_queue(struct request_queue *q, unsigned long msecs)
243 lockdep_assert_held(q->queue_lock);
244 WARN_ON_ONCE(q->mq_ops);
246 if (likely(!blk_queue_dead(q)))
247 queue_delayed_work(kblockd_workqueue, &q->delay_work,
248 msecs_to_jiffies(msecs));
250 EXPORT_SYMBOL(blk_delay_queue);
253 * blk_start_queue_async - asynchronously restart a previously stopped queue
254 * @q: The &struct request_queue in question
256 * Description:
257 * blk_start_queue_async() will clear the stop flag on the queue, and
258 * ensure that the request_fn for the queue is run from an async
259 * context.
261 void blk_start_queue_async(struct request_queue *q)
263 lockdep_assert_held(q->queue_lock);
264 WARN_ON_ONCE(q->mq_ops);
266 queue_flag_clear(QUEUE_FLAG_STOPPED, q);
267 blk_run_queue_async(q);
269 EXPORT_SYMBOL(blk_start_queue_async);
272 * blk_start_queue - restart a previously stopped queue
273 * @q: The &struct request_queue in question
275 * Description:
276 * blk_start_queue() will clear the stop flag on the queue, and call
277 * the request_fn for the queue if it was in a stopped state when
278 * entered. Also see blk_stop_queue().
280 void blk_start_queue(struct request_queue *q)
282 lockdep_assert_held(q->queue_lock);
283 WARN_ON(!in_interrupt() && !irqs_disabled());
284 WARN_ON_ONCE(q->mq_ops);
286 queue_flag_clear(QUEUE_FLAG_STOPPED, q);
287 __blk_run_queue(q);
289 EXPORT_SYMBOL(blk_start_queue);
292 * blk_stop_queue - stop a queue
293 * @q: The &struct request_queue in question
295 * Description:
296 * The Linux block layer assumes that a block driver will consume all
297 * entries on the request queue when the request_fn strategy is called.
298 * Often this will not happen, because of hardware limitations (queue
299 * depth settings). If a device driver gets a 'queue full' response,
300 * or if it simply chooses not to queue more I/O at one point, it can
301 * call this function to prevent the request_fn from being called until
302 * the driver has signalled it's ready to go again. This happens by calling
303 * blk_start_queue() to restart queue operations.
305 void blk_stop_queue(struct request_queue *q)
307 lockdep_assert_held(q->queue_lock);
308 WARN_ON_ONCE(q->mq_ops);
310 cancel_delayed_work(&q->delay_work);
311 queue_flag_set(QUEUE_FLAG_STOPPED, q);
313 EXPORT_SYMBOL(blk_stop_queue);
316 * blk_sync_queue - cancel any pending callbacks on a queue
317 * @q: the queue
319 * Description:
320 * The block layer may perform asynchronous callback activity
321 * on a queue, such as calling the unplug function after a timeout.
322 * A block device may call blk_sync_queue to ensure that any
323 * such activity is cancelled, thus allowing it to release resources
324 * that the callbacks might use. The caller must already have made sure
325 * that its ->make_request_fn will not re-add plugging prior to calling
326 * this function.
328 * This function does not cancel any asynchronous activity arising
329 * out of elevator or throttling code. That would require elevator_exit()
330 * and blkcg_exit_queue() to be called with queue lock initialized.
333 void blk_sync_queue(struct request_queue *q)
335 del_timer_sync(&q->timeout);
336 cancel_work_sync(&q->timeout_work);
338 if (q->mq_ops) {
339 struct blk_mq_hw_ctx *hctx;
340 int i;
342 queue_for_each_hw_ctx(q, hctx, i)
343 cancel_delayed_work_sync(&hctx->run_work);
344 } else {
345 cancel_delayed_work_sync(&q->delay_work);
348 EXPORT_SYMBOL(blk_sync_queue);
351 * __blk_run_queue_uncond - run a queue whether or not it has been stopped
352 * @q: The queue to run
354 * Description:
355 * Invoke request handling on a queue if there are any pending requests.
356 * May be used to restart request handling after a request has completed.
357 * This variant runs the queue whether or not the queue has been
358 * stopped. Must be called with the queue lock held and interrupts
359 * disabled. See also @blk_run_queue.
361 inline void __blk_run_queue_uncond(struct request_queue *q)
363 lockdep_assert_held(q->queue_lock);
364 WARN_ON_ONCE(q->mq_ops);
366 if (unlikely(blk_queue_dead(q)))
367 return;
370 * Some request_fn implementations, e.g. scsi_request_fn(), unlock
371 * the queue lock internally. As a result multiple threads may be
372 * running such a request function concurrently. Keep track of the
373 * number of active request_fn invocations such that blk_drain_queue()
374 * can wait until all these request_fn calls have finished.
376 q->request_fn_active++;
377 q->request_fn(q);
378 q->request_fn_active--;
380 EXPORT_SYMBOL_GPL(__blk_run_queue_uncond);
383 * __blk_run_queue - run a single device queue
384 * @q: The queue to run
386 * Description:
387 * See @blk_run_queue.
389 void __blk_run_queue(struct request_queue *q)
391 lockdep_assert_held(q->queue_lock);
392 WARN_ON_ONCE(q->mq_ops);
394 if (unlikely(blk_queue_stopped(q)))
395 return;
397 __blk_run_queue_uncond(q);
399 EXPORT_SYMBOL(__blk_run_queue);
402 * blk_run_queue_async - run a single device queue in workqueue context
403 * @q: The queue to run
405 * Description:
406 * Tells kblockd to perform the equivalent of @blk_run_queue on behalf
407 * of us.
409 * Note:
410 * Since it is not allowed to run q->delay_work after blk_cleanup_queue()
411 * has canceled q->delay_work, callers must hold the queue lock to avoid
412 * race conditions between blk_cleanup_queue() and blk_run_queue_async().
414 void blk_run_queue_async(struct request_queue *q)
416 lockdep_assert_held(q->queue_lock);
417 WARN_ON_ONCE(q->mq_ops);
419 if (likely(!blk_queue_stopped(q) && !blk_queue_dead(q)))
420 mod_delayed_work(kblockd_workqueue, &q->delay_work, 0);
422 EXPORT_SYMBOL(blk_run_queue_async);
425 * blk_run_queue - run a single device queue
426 * @q: The queue to run
428 * Description:
429 * Invoke request handling on this queue, if it has pending work to do.
430 * May be used to restart queueing when a request has completed.
432 void blk_run_queue(struct request_queue *q)
434 unsigned long flags;
436 WARN_ON_ONCE(q->mq_ops);
438 spin_lock_irqsave(q->queue_lock, flags);
439 __blk_run_queue(q);
440 spin_unlock_irqrestore(q->queue_lock, flags);
442 EXPORT_SYMBOL(blk_run_queue);
444 void blk_put_queue(struct request_queue *q)
446 kobject_put(&q->kobj);
448 EXPORT_SYMBOL(blk_put_queue);
451 * __blk_drain_queue - drain requests from request_queue
452 * @q: queue to drain
453 * @drain_all: whether to drain all requests or only the ones w/ ELVPRIV
455 * Drain requests from @q. If @drain_all is set, all requests are drained.
456 * If not, only ELVPRIV requests are drained. The caller is responsible
457 * for ensuring that no new requests which need to be drained are queued.
459 static void __blk_drain_queue(struct request_queue *q, bool drain_all)
460 __releases(q->queue_lock)
461 __acquires(q->queue_lock)
463 int i;
465 lockdep_assert_held(q->queue_lock);
466 WARN_ON_ONCE(q->mq_ops);
468 while (true) {
469 bool drain = false;
472 * The caller might be trying to drain @q before its
473 * elevator is initialized.
475 if (q->elevator)
476 elv_drain_elevator(q);
478 blkcg_drain_queue(q);
481 * This function might be called on a queue which failed
482 * driver init after queue creation or is not yet fully
483 * active yet. Some drivers (e.g. fd and loop) get unhappy
484 * in such cases. Kick queue iff dispatch queue has
485 * something on it and @q has request_fn set.
487 if (!list_empty(&q->queue_head) && q->request_fn)
488 __blk_run_queue(q);
490 drain |= q->nr_rqs_elvpriv;
491 drain |= q->request_fn_active;
494 * Unfortunately, requests are queued at and tracked from
495 * multiple places and there's no single counter which can
496 * be drained. Check all the queues and counters.
498 if (drain_all) {
499 struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
500 drain |= !list_empty(&q->queue_head);
501 for (i = 0; i < 2; i++) {
502 drain |= q->nr_rqs[i];
503 drain |= q->in_flight[i];
504 if (fq)
505 drain |= !list_empty(&fq->flush_queue[i]);
509 if (!drain)
510 break;
512 spin_unlock_irq(q->queue_lock);
514 msleep(10);
516 spin_lock_irq(q->queue_lock);
520 * With queue marked dead, any woken up waiter will fail the
521 * allocation path, so the wakeup chaining is lost and we're
522 * left with hung waiters. We need to wake up those waiters.
524 if (q->request_fn) {
525 struct request_list *rl;
527 blk_queue_for_each_rl(rl, q)
528 for (i = 0; i < ARRAY_SIZE(rl->wait); i++)
529 wake_up_all(&rl->wait[i]);
533 void blk_drain_queue(struct request_queue *q)
535 spin_lock_irq(q->queue_lock);
536 __blk_drain_queue(q, true);
537 spin_unlock_irq(q->queue_lock);
541 * blk_queue_bypass_start - enter queue bypass mode
542 * @q: queue of interest
544 * In bypass mode, only the dispatch FIFO queue of @q is used. This
545 * function makes @q enter bypass mode and drains all requests which were
546 * throttled or issued before. On return, it's guaranteed that no request
547 * is being throttled or has ELVPRIV set and blk_queue_bypass() %true
548 * inside queue or RCU read lock.
550 void blk_queue_bypass_start(struct request_queue *q)
552 WARN_ON_ONCE(q->mq_ops);
554 spin_lock_irq(q->queue_lock);
555 q->bypass_depth++;
556 queue_flag_set(QUEUE_FLAG_BYPASS, q);
557 spin_unlock_irq(q->queue_lock);
560 * Queues start drained. Skip actual draining till init is
561 * complete. This avoids lenghty delays during queue init which
562 * can happen many times during boot.
564 if (blk_queue_init_done(q)) {
565 spin_lock_irq(q->queue_lock);
566 __blk_drain_queue(q, false);
567 spin_unlock_irq(q->queue_lock);
569 /* ensure blk_queue_bypass() is %true inside RCU read lock */
570 synchronize_rcu();
573 EXPORT_SYMBOL_GPL(blk_queue_bypass_start);
576 * blk_queue_bypass_end - leave queue bypass mode
577 * @q: queue of interest
579 * Leave bypass mode and restore the normal queueing behavior.
581 * Note: although blk_queue_bypass_start() is only called for blk-sq queues,
582 * this function is called for both blk-sq and blk-mq queues.
584 void blk_queue_bypass_end(struct request_queue *q)
586 spin_lock_irq(q->queue_lock);
587 if (!--q->bypass_depth)
588 queue_flag_clear(QUEUE_FLAG_BYPASS, q);
589 WARN_ON_ONCE(q->bypass_depth < 0);
590 spin_unlock_irq(q->queue_lock);
592 EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
594 void blk_set_queue_dying(struct request_queue *q)
596 spin_lock_irq(q->queue_lock);
597 queue_flag_set(QUEUE_FLAG_DYING, q);
598 spin_unlock_irq(q->queue_lock);
601 * When queue DYING flag is set, we need to block new req
602 * entering queue, so we call blk_freeze_queue_start() to
603 * prevent I/O from crossing blk_queue_enter().
605 blk_freeze_queue_start(q);
607 if (q->mq_ops)
608 blk_mq_wake_waiters(q);
609 else {
610 struct request_list *rl;
612 spin_lock_irq(q->queue_lock);
613 blk_queue_for_each_rl(rl, q) {
614 if (rl->rq_pool) {
615 wake_up_all(&rl->wait[BLK_RW_SYNC]);
616 wake_up_all(&rl->wait[BLK_RW_ASYNC]);
619 spin_unlock_irq(q->queue_lock);
622 EXPORT_SYMBOL_GPL(blk_set_queue_dying);
625 * blk_cleanup_queue - shutdown a request queue
626 * @q: request queue to shutdown
628 * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and
629 * put it. All future requests will be failed immediately with -ENODEV.
631 void blk_cleanup_queue(struct request_queue *q)
633 spinlock_t *lock = q->queue_lock;
635 /* mark @q DYING, no new request or merges will be allowed afterwards */
636 mutex_lock(&q->sysfs_lock);
637 blk_set_queue_dying(q);
638 spin_lock_irq(lock);
641 * A dying queue is permanently in bypass mode till released. Note
642 * that, unlike blk_queue_bypass_start(), we aren't performing
643 * synchronize_rcu() after entering bypass mode to avoid the delay
644 * as some drivers create and destroy a lot of queues while
645 * probing. This is still safe because blk_release_queue() will be
646 * called only after the queue refcnt drops to zero and nothing,
647 * RCU or not, would be traversing the queue by then.
649 q->bypass_depth++;
650 queue_flag_set(QUEUE_FLAG_BYPASS, q);
652 queue_flag_set(QUEUE_FLAG_NOMERGES, q);
653 queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
654 queue_flag_set(QUEUE_FLAG_DYING, q);
655 spin_unlock_irq(lock);
656 mutex_unlock(&q->sysfs_lock);
659 * Drain all requests queued before DYING marking. Set DEAD flag to
660 * prevent that q->request_fn() gets invoked after draining finished.
662 blk_freeze_queue(q);
663 spin_lock_irq(lock);
664 queue_flag_set(QUEUE_FLAG_DEAD, q);
665 spin_unlock_irq(lock);
668 * make sure all in-progress dispatch are completed because
669 * blk_freeze_queue() can only complete all requests, and
670 * dispatch may still be in-progress since we dispatch requests
671 * from more than one contexts.
673 * We rely on driver to deal with the race in case that queue
674 * initialization isn't done.
676 if (q->mq_ops && blk_queue_init_done(q))
677 blk_mq_quiesce_queue(q);
679 /* for synchronous bio-based driver finish in-flight integrity i/o */
680 blk_flush_integrity();
682 /* @q won't process any more request, flush async actions */
683 del_timer_sync(&q->backing_dev_info->laptop_mode_wb_timer);
684 blk_sync_queue(q);
686 if (q->mq_ops)
687 blk_mq_free_queue(q);
688 percpu_ref_exit(&q->q_usage_counter);
690 spin_lock_irq(lock);
691 if (q->queue_lock != &q->__queue_lock)
692 q->queue_lock = &q->__queue_lock;
693 spin_unlock_irq(lock);
695 /* @q is and will stay empty, shutdown and put */
696 blk_put_queue(q);
698 EXPORT_SYMBOL(blk_cleanup_queue);
700 /* Allocate memory local to the request queue */
701 static void *alloc_request_simple(gfp_t gfp_mask, void *data)
703 struct request_queue *q = data;
705 return kmem_cache_alloc_node(request_cachep, gfp_mask, q->node);
708 static void free_request_simple(void *element, void *data)
710 kmem_cache_free(request_cachep, element);
713 static void *alloc_request_size(gfp_t gfp_mask, void *data)
715 struct request_queue *q = data;
716 struct request *rq;
718 rq = kmalloc_node(sizeof(struct request) + q->cmd_size, gfp_mask,
719 q->node);
720 if (rq && q->init_rq_fn && q->init_rq_fn(q, rq, gfp_mask) < 0) {
721 kfree(rq);
722 rq = NULL;
724 return rq;
727 static void free_request_size(void *element, void *data)
729 struct request_queue *q = data;
731 if (q->exit_rq_fn)
732 q->exit_rq_fn(q, element);
733 kfree(element);
736 int blk_init_rl(struct request_list *rl, struct request_queue *q,
737 gfp_t gfp_mask)
739 if (unlikely(rl->rq_pool))
740 return 0;
742 rl->q = q;
743 rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0;
744 rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0;
745 init_waitqueue_head(&rl->wait[BLK_RW_SYNC]);
746 init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]);
748 if (q->cmd_size) {
749 rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ,
750 alloc_request_size, free_request_size,
751 q, gfp_mask, q->node);
752 } else {
753 rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ,
754 alloc_request_simple, free_request_simple,
755 q, gfp_mask, q->node);
757 if (!rl->rq_pool)
758 return -ENOMEM;
760 if (rl != &q->root_rl)
761 WARN_ON_ONCE(!blk_get_queue(q));
763 return 0;
766 void blk_exit_rl(struct request_queue *q, struct request_list *rl)
768 if (rl->rq_pool) {
769 mempool_destroy(rl->rq_pool);
770 if (rl != &q->root_rl)
771 blk_put_queue(q);
775 struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
777 return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE);
779 EXPORT_SYMBOL(blk_alloc_queue);
781 int blk_queue_enter(struct request_queue *q, bool nowait)
783 while (true) {
785 if (percpu_ref_tryget_live(&q->q_usage_counter))
786 return 0;
788 if (nowait)
789 return -EBUSY;
792 * read pair of barrier in blk_freeze_queue_start(),
793 * we need to order reading __PERCPU_REF_DEAD flag of
794 * .q_usage_counter and reading .mq_freeze_depth or
795 * queue dying flag, otherwise the following wait may
796 * never return if the two reads are reordered.
798 smp_rmb();
800 wait_event(q->mq_freeze_wq,
801 !atomic_read(&q->mq_freeze_depth) ||
802 blk_queue_dying(q));
803 if (blk_queue_dying(q))
804 return -ENODEV;
808 void blk_queue_exit(struct request_queue *q)
810 percpu_ref_put(&q->q_usage_counter);
813 static void blk_queue_usage_counter_release(struct percpu_ref *ref)
815 struct request_queue *q =
816 container_of(ref, struct request_queue, q_usage_counter);
818 wake_up_all(&q->mq_freeze_wq);
821 static void blk_rq_timed_out_timer(unsigned long data)
823 struct request_queue *q = (struct request_queue *)data;
825 kblockd_schedule_work(&q->timeout_work);
828 struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
830 struct request_queue *q;
832 q = kmem_cache_alloc_node(blk_requestq_cachep,
833 gfp_mask | __GFP_ZERO, node_id);
834 if (!q)
835 return NULL;
837 q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask);
838 if (q->id < 0)
839 goto fail_q;
841 q->bio_split = bioset_create(BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
842 if (!q->bio_split)
843 goto fail_id;
845 q->backing_dev_info = bdi_alloc_node(gfp_mask, node_id);
846 if (!q->backing_dev_info)
847 goto fail_split;
849 q->stats = blk_alloc_queue_stats();
850 if (!q->stats)
851 goto fail_stats;
853 q->backing_dev_info->ra_pages =
854 (VM_MAX_READAHEAD * 1024) / PAGE_SIZE;
855 q->backing_dev_info->io_pages =
856 (VM_MAX_READAHEAD * 1024) / PAGE_SIZE;
857 q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK;
858 q->backing_dev_info->name = "block";
859 q->node = node_id;
861 setup_timer(&q->backing_dev_info->laptop_mode_wb_timer,
862 laptop_mode_timer_fn, (unsigned long) q);
863 setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
864 INIT_WORK(&q->timeout_work, NULL);
865 INIT_LIST_HEAD(&q->queue_head);
866 INIT_LIST_HEAD(&q->timeout_list);
867 INIT_LIST_HEAD(&q->icq_list);
868 #ifdef CONFIG_BLK_CGROUP
869 INIT_LIST_HEAD(&q->blkg_list);
870 #endif
871 INIT_DELAYED_WORK(&q->delay_work, blk_delay_work);
873 kobject_init(&q->kobj, &blk_queue_ktype);
875 #ifdef CONFIG_BLK_DEV_IO_TRACE
876 mutex_init(&q->blk_trace_mutex);
877 #endif
878 mutex_init(&q->sysfs_lock);
879 spin_lock_init(&q->__queue_lock);
882 * By default initialize queue_lock to internal lock and driver can
883 * override it later if need be.
885 q->queue_lock = &q->__queue_lock;
888 * A queue starts its life with bypass turned on to avoid
889 * unnecessary bypass on/off overhead and nasty surprises during
890 * init. The initial bypass will be finished when the queue is
891 * registered by blk_register_queue().
893 q->bypass_depth = 1;
894 __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
896 init_waitqueue_head(&q->mq_freeze_wq);
899 * Init percpu_ref in atomic mode so that it's faster to shutdown.
900 * See blk_register_queue() for details.
902 if (percpu_ref_init(&q->q_usage_counter,
903 blk_queue_usage_counter_release,
904 PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
905 goto fail_bdi;
907 if (blkcg_init_queue(q))
908 goto fail_ref;
910 return q;
912 fail_ref:
913 percpu_ref_exit(&q->q_usage_counter);
914 fail_bdi:
915 blk_free_queue_stats(q->stats);
916 fail_stats:
917 bdi_put(q->backing_dev_info);
918 fail_split:
919 bioset_free(q->bio_split);
920 fail_id:
921 ida_simple_remove(&blk_queue_ida, q->id);
922 fail_q:
923 kmem_cache_free(blk_requestq_cachep, q);
924 return NULL;
926 EXPORT_SYMBOL(blk_alloc_queue_node);
929 * blk_init_queue - prepare a request queue for use with a block device
930 * @rfn: The function to be called to process requests that have been
931 * placed on the queue.
932 * @lock: Request queue spin lock
934 * Description:
935 * If a block device wishes to use the standard request handling procedures,
936 * which sorts requests and coalesces adjacent requests, then it must
937 * call blk_init_queue(). The function @rfn will be called when there
938 * are requests on the queue that need to be processed. If the device
939 * supports plugging, then @rfn may not be called immediately when requests
940 * are available on the queue, but may be called at some time later instead.
941 * Plugged queues are generally unplugged when a buffer belonging to one
942 * of the requests on the queue is needed, or due to memory pressure.
944 * @rfn is not required, or even expected, to remove all requests off the
945 * queue, but only as many as it can handle at a time. If it does leave
946 * requests on the queue, it is responsible for arranging that the requests
947 * get dealt with eventually.
949 * The queue spin lock must be held while manipulating the requests on the
950 * request queue; this lock will be taken also from interrupt context, so irq
951 * disabling is needed for it.
953 * Function returns a pointer to the initialized request queue, or %NULL if
954 * it didn't succeed.
956 * Note:
957 * blk_init_queue() must be paired with a blk_cleanup_queue() call
958 * when the block device is deactivated (such as at module unload).
961 struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
963 return blk_init_queue_node(rfn, lock, NUMA_NO_NODE);
965 EXPORT_SYMBOL(blk_init_queue);
967 struct request_queue *
968 blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
970 struct request_queue *q;
972 q = blk_alloc_queue_node(GFP_KERNEL, node_id);
973 if (!q)
974 return NULL;
976 q->request_fn = rfn;
977 if (lock)
978 q->queue_lock = lock;
979 if (blk_init_allocated_queue(q) < 0) {
980 blk_cleanup_queue(q);
981 return NULL;
984 return q;
986 EXPORT_SYMBOL(blk_init_queue_node);
988 static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio);
991 int blk_init_allocated_queue(struct request_queue *q)
993 WARN_ON_ONCE(q->mq_ops);
995 q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, q->cmd_size);
996 if (!q->fq)
997 return -ENOMEM;
999 if (q->init_rq_fn && q->init_rq_fn(q, q->fq->flush_rq, GFP_KERNEL))
1000 goto out_free_flush_queue;
1002 if (blk_init_rl(&q->root_rl, q, GFP_KERNEL))
1003 goto out_exit_flush_rq;
1005 INIT_WORK(&q->timeout_work, blk_timeout_work);
1006 q->queue_flags |= QUEUE_FLAG_DEFAULT;
1009 * This also sets hw/phys segments, boundary and size
1011 blk_queue_make_request(q, blk_queue_bio);
1013 q->sg_reserved_size = INT_MAX;
1015 /* Protect q->elevator from elevator_change */
1016 mutex_lock(&q->sysfs_lock);
1018 /* init elevator */
1019 if (elevator_init(q, NULL)) {
1020 mutex_unlock(&q->sysfs_lock);
1021 goto out_exit_flush_rq;
1024 mutex_unlock(&q->sysfs_lock);
1025 return 0;
1027 out_exit_flush_rq:
1028 if (q->exit_rq_fn)
1029 q->exit_rq_fn(q, q->fq->flush_rq);
1030 out_free_flush_queue:
1031 blk_free_flush_queue(q->fq);
1032 q->fq = NULL;
1033 return -ENOMEM;
1035 EXPORT_SYMBOL(blk_init_allocated_queue);
1037 bool blk_get_queue(struct request_queue *q)
1039 if (likely(!blk_queue_dying(q))) {
1040 __blk_get_queue(q);
1041 return true;
1044 return false;
1046 EXPORT_SYMBOL(blk_get_queue);
1048 static inline void blk_free_request(struct request_list *rl, struct request *rq)
1050 if (rq->rq_flags & RQF_ELVPRIV) {
1051 elv_put_request(rl->q, rq);
1052 if (rq->elv.icq)
1053 put_io_context(rq->elv.icq->ioc);
1056 mempool_free(rq, rl->rq_pool);
1060 * ioc_batching returns true if the ioc is a valid batching request and
1061 * should be given priority access to a request.
1063 static inline int ioc_batching(struct request_queue *q, struct io_context *ioc)
1065 if (!ioc)
1066 return 0;
1069 * Make sure the process is able to allocate at least 1 request
1070 * even if the batch times out, otherwise we could theoretically
1071 * lose wakeups.
1073 return ioc->nr_batch_requests == q->nr_batching ||
1074 (ioc->nr_batch_requests > 0
1075 && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME));
1079 * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This
1080 * will cause the process to be a "batcher" on all queues in the system. This
1081 * is the behaviour we want though - once it gets a wakeup it should be given
1082 * a nice run.
1084 static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)
1086 if (!ioc || ioc_batching(q, ioc))
1087 return;
1089 ioc->nr_batch_requests = q->nr_batching;
1090 ioc->last_waited = jiffies;
1093 static void __freed_request(struct request_list *rl, int sync)
1095 struct request_queue *q = rl->q;
1097 if (rl->count[sync] < queue_congestion_off_threshold(q))
1098 blk_clear_congested(rl, sync);
1100 if (rl->count[sync] + 1 <= q->nr_requests) {
1101 if (waitqueue_active(&rl->wait[sync]))
1102 wake_up(&rl->wait[sync]);
1104 blk_clear_rl_full(rl, sync);
1109 * A request has just been released. Account for it, update the full and
1110 * congestion status, wake up any waiters. Called under q->queue_lock.
1112 static void freed_request(struct request_list *rl, bool sync,
1113 req_flags_t rq_flags)
1115 struct request_queue *q = rl->q;
1117 q->nr_rqs[sync]--;
1118 rl->count[sync]--;
1119 if (rq_flags & RQF_ELVPRIV)
1120 q->nr_rqs_elvpriv--;
1122 __freed_request(rl, sync);
1124 if (unlikely(rl->starved[sync ^ 1]))
1125 __freed_request(rl, sync ^ 1);
1128 int blk_update_nr_requests(struct request_queue *q, unsigned int nr)
1130 struct request_list *rl;
1131 int on_thresh, off_thresh;
1133 WARN_ON_ONCE(q->mq_ops);
1135 spin_lock_irq(q->queue_lock);
1136 q->nr_requests = nr;
1137 blk_queue_congestion_threshold(q);
1138 on_thresh = queue_congestion_on_threshold(q);
1139 off_thresh = queue_congestion_off_threshold(q);
1141 blk_queue_for_each_rl(rl, q) {
1142 if (rl->count[BLK_RW_SYNC] >= on_thresh)
1143 blk_set_congested(rl, BLK_RW_SYNC);
1144 else if (rl->count[BLK_RW_SYNC] < off_thresh)
1145 blk_clear_congested(rl, BLK_RW_SYNC);
1147 if (rl->count[BLK_RW_ASYNC] >= on_thresh)
1148 blk_set_congested(rl, BLK_RW_ASYNC);
1149 else if (rl->count[BLK_RW_ASYNC] < off_thresh)
1150 blk_clear_congested(rl, BLK_RW_ASYNC);
1152 if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
1153 blk_set_rl_full(rl, BLK_RW_SYNC);
1154 } else {
1155 blk_clear_rl_full(rl, BLK_RW_SYNC);
1156 wake_up(&rl->wait[BLK_RW_SYNC]);
1159 if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
1160 blk_set_rl_full(rl, BLK_RW_ASYNC);
1161 } else {
1162 blk_clear_rl_full(rl, BLK_RW_ASYNC);
1163 wake_up(&rl->wait[BLK_RW_ASYNC]);
1167 spin_unlock_irq(q->queue_lock);
1168 return 0;
1172 * __get_request - get a free request
1173 * @rl: request list to allocate from
1174 * @op: operation and flags
1175 * @bio: bio to allocate request for (can be %NULL)
1176 * @gfp_mask: allocation mask
1178 * Get a free request from @q. This function may fail under memory
1179 * pressure or if @q is dead.
1181 * Must be called with @q->queue_lock held and,
1182 * Returns ERR_PTR on failure, with @q->queue_lock held.
1183 * Returns request pointer on success, with @q->queue_lock *not held*.
1185 static struct request *__get_request(struct request_list *rl, unsigned int op,
1186 struct bio *bio, gfp_t gfp_mask)
1188 struct request_queue *q = rl->q;
1189 struct request *rq;
1190 struct elevator_type *et = q->elevator->type;
1191 struct io_context *ioc = rq_ioc(bio);
1192 struct io_cq *icq = NULL;
1193 const bool is_sync = op_is_sync(op);
1194 int may_queue;
1195 req_flags_t rq_flags = RQF_ALLOCED;
1197 lockdep_assert_held(q->queue_lock);
1199 if (unlikely(blk_queue_dying(q)))
1200 return ERR_PTR(-ENODEV);
1202 may_queue = elv_may_queue(q, op);
1203 if (may_queue == ELV_MQUEUE_NO)
1204 goto rq_starved;
1206 if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) {
1207 if (rl->count[is_sync]+1 >= q->nr_requests) {
1209 * The queue will fill after this allocation, so set
1210 * it as full, and mark this process as "batching".
1211 * This process will be allowed to complete a batch of
1212 * requests, others will be blocked.
1214 if (!blk_rl_full(rl, is_sync)) {
1215 ioc_set_batching(q, ioc);
1216 blk_set_rl_full(rl, is_sync);
1217 } else {
1218 if (may_queue != ELV_MQUEUE_MUST
1219 && !ioc_batching(q, ioc)) {
1221 * The queue is full and the allocating
1222 * process is not a "batcher", and not
1223 * exempted by the IO scheduler
1225 return ERR_PTR(-ENOMEM);
1229 blk_set_congested(rl, is_sync);
1233 * Only allow batching queuers to allocate up to 50% over the defined
1234 * limit of requests, otherwise we could have thousands of requests
1235 * allocated with any setting of ->nr_requests
1237 if (rl->count[is_sync] >= (3 * q->nr_requests / 2))
1238 return ERR_PTR(-ENOMEM);
1240 q->nr_rqs[is_sync]++;
1241 rl->count[is_sync]++;
1242 rl->starved[is_sync] = 0;
1245 * Decide whether the new request will be managed by elevator. If
1246 * so, mark @rq_flags and increment elvpriv. Non-zero elvpriv will
1247 * prevent the current elevator from being destroyed until the new
1248 * request is freed. This guarantees icq's won't be destroyed and
1249 * makes creating new ones safe.
1251 * Flush requests do not use the elevator so skip initialization.
1252 * This allows a request to share the flush and elevator data.
1254 * Also, lookup icq while holding queue_lock. If it doesn't exist,
1255 * it will be created after releasing queue_lock.
1257 if (!op_is_flush(op) && !blk_queue_bypass(q)) {
1258 rq_flags |= RQF_ELVPRIV;
1259 q->nr_rqs_elvpriv++;
1260 if (et->icq_cache && ioc)
1261 icq = ioc_lookup_icq(ioc, q);
1264 if (blk_queue_io_stat(q))
1265 rq_flags |= RQF_IO_STAT;
1266 spin_unlock_irq(q->queue_lock);
1268 /* allocate and init request */
1269 rq = mempool_alloc(rl->rq_pool, gfp_mask);
1270 if (!rq)
1271 goto fail_alloc;
1273 blk_rq_init(q, rq);
1274 blk_rq_set_rl(rq, rl);
1275 rq->cmd_flags = op;
1276 rq->rq_flags = rq_flags;
1278 /* init elvpriv */
1279 if (rq_flags & RQF_ELVPRIV) {
1280 if (unlikely(et->icq_cache && !icq)) {
1281 if (ioc)
1282 icq = ioc_create_icq(ioc, q, gfp_mask);
1283 if (!icq)
1284 goto fail_elvpriv;
1287 rq->elv.icq = icq;
1288 if (unlikely(elv_set_request(q, rq, bio, gfp_mask)))
1289 goto fail_elvpriv;
1291 /* @rq->elv.icq holds io_context until @rq is freed */
1292 if (icq)
1293 get_io_context(icq->ioc);
1295 out:
1297 * ioc may be NULL here, and ioc_batching will be false. That's
1298 * OK, if the queue is under the request limit then requests need
1299 * not count toward the nr_batch_requests limit. There will always
1300 * be some limit enforced by BLK_BATCH_TIME.
1302 if (ioc_batching(q, ioc))
1303 ioc->nr_batch_requests--;
1305 trace_block_getrq(q, bio, op);
1306 return rq;
1308 fail_elvpriv:
1310 * elvpriv init failed. ioc, icq and elvpriv aren't mempool backed
1311 * and may fail indefinitely under memory pressure and thus
1312 * shouldn't stall IO. Treat this request as !elvpriv. This will
1313 * disturb iosched and blkcg but weird is bettern than dead.
1315 printk_ratelimited(KERN_WARNING "%s: dev %s: request aux data allocation failed, iosched may be disturbed\n",
1316 __func__, dev_name(q->backing_dev_info->dev));
1318 rq->rq_flags &= ~RQF_ELVPRIV;
1319 rq->elv.icq = NULL;
1321 spin_lock_irq(q->queue_lock);
1322 q->nr_rqs_elvpriv--;
1323 spin_unlock_irq(q->queue_lock);
1324 goto out;
1326 fail_alloc:
1328 * Allocation failed presumably due to memory. Undo anything we
1329 * might have messed up.
1331 * Allocating task should really be put onto the front of the wait
1332 * queue, but this is pretty rare.
1334 spin_lock_irq(q->queue_lock);
1335 freed_request(rl, is_sync, rq_flags);
1338 * in the very unlikely event that allocation failed and no
1339 * requests for this direction was pending, mark us starved so that
1340 * freeing of a request in the other direction will notice
1341 * us. another possible fix would be to split the rq mempool into
1342 * READ and WRITE
1344 rq_starved:
1345 if (unlikely(rl->count[is_sync] == 0))
1346 rl->starved[is_sync] = 1;
1347 return ERR_PTR(-ENOMEM);
1351 * get_request - get a free request
1352 * @q: request_queue to allocate request from
1353 * @op: operation and flags
1354 * @bio: bio to allocate request for (can be %NULL)
1355 * @gfp_mask: allocation mask
1357 * Get a free request from @q. If %__GFP_DIRECT_RECLAIM is set in @gfp_mask,
1358 * this function keeps retrying under memory pressure and fails iff @q is dead.
1360 * Must be called with @q->queue_lock held and,
1361 * Returns ERR_PTR on failure, with @q->queue_lock held.
1362 * Returns request pointer on success, with @q->queue_lock *not held*.
1364 static struct request *get_request(struct request_queue *q, unsigned int op,
1365 struct bio *bio, gfp_t gfp_mask)
1367 const bool is_sync = op_is_sync(op);
1368 DEFINE_WAIT(wait);
1369 struct request_list *rl;
1370 struct request *rq;
1372 lockdep_assert_held(q->queue_lock);
1373 WARN_ON_ONCE(q->mq_ops);
1375 rl = blk_get_rl(q, bio); /* transferred to @rq on success */
1376 retry:
1377 rq = __get_request(rl, op, bio, gfp_mask);
1378 if (!IS_ERR(rq))
1379 return rq;
1381 if (op & REQ_NOWAIT) {
1382 blk_put_rl(rl);
1383 return ERR_PTR(-EAGAIN);
1386 if (!gfpflags_allow_blocking(gfp_mask) || unlikely(blk_queue_dying(q))) {
1387 blk_put_rl(rl);
1388 return rq;
1391 /* wait on @rl and retry */
1392 prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
1393 TASK_UNINTERRUPTIBLE);
1395 trace_block_sleeprq(q, bio, op);
1397 spin_unlock_irq(q->queue_lock);
1398 io_schedule();
1401 * After sleeping, we become a "batching" process and will be able
1402 * to allocate at least one request, and up to a big batch of them
1403 * for a small period time. See ioc_batching, ioc_set_batching
1405 ioc_set_batching(q, current->io_context);
1407 spin_lock_irq(q->queue_lock);
1408 finish_wait(&rl->wait[is_sync], &wait);
1410 goto retry;
1413 static struct request *blk_old_get_request(struct request_queue *q,
1414 unsigned int op, gfp_t gfp_mask)
1416 struct request *rq;
1418 WARN_ON_ONCE(q->mq_ops);
1420 /* create ioc upfront */
1421 create_io_context(gfp_mask, q->node);
1423 spin_lock_irq(q->queue_lock);
1424 rq = get_request(q, op, NULL, gfp_mask);
1425 if (IS_ERR(rq)) {
1426 spin_unlock_irq(q->queue_lock);
1427 return rq;
1430 /* q->queue_lock is unlocked at this point */
1431 rq->__data_len = 0;
1432 rq->__sector = (sector_t) -1;
1433 rq->bio = rq->biotail = NULL;
1434 return rq;
1437 struct request *blk_get_request(struct request_queue *q, unsigned int op,
1438 gfp_t gfp_mask)
1440 struct request *req;
1442 if (q->mq_ops) {
1443 req = blk_mq_alloc_request(q, op,
1444 (gfp_mask & __GFP_DIRECT_RECLAIM) ?
1445 0 : BLK_MQ_REQ_NOWAIT);
1446 if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn)
1447 q->mq_ops->initialize_rq_fn(req);
1448 } else {
1449 req = blk_old_get_request(q, op, gfp_mask);
1450 if (!IS_ERR(req) && q->initialize_rq_fn)
1451 q->initialize_rq_fn(req);
1454 return req;
1456 EXPORT_SYMBOL(blk_get_request);
1459 * blk_requeue_request - put a request back on queue
1460 * @q: request queue where request should be inserted
1461 * @rq: request to be inserted
1463 * Description:
1464 * Drivers often keep queueing requests until the hardware cannot accept
1465 * more, when that condition happens we need to put the request back
1466 * on the queue. Must be called with queue lock held.
1468 void blk_requeue_request(struct request_queue *q, struct request *rq)
1470 lockdep_assert_held(q->queue_lock);
1471 WARN_ON_ONCE(q->mq_ops);
1473 blk_delete_timer(rq);
1474 blk_clear_rq_complete(rq);
1475 trace_block_rq_requeue(q, rq);
1476 wbt_requeue(q->rq_wb, &rq->issue_stat);
1478 if (rq->rq_flags & RQF_QUEUED)
1479 blk_queue_end_tag(q, rq);
1481 BUG_ON(blk_queued_rq(rq));
1483 elv_requeue_request(q, rq);
1485 EXPORT_SYMBOL(blk_requeue_request);
1487 static void add_acct_request(struct request_queue *q, struct request *rq,
1488 int where)
1490 blk_account_io_start(rq, true);
1491 __elv_add_request(q, rq, where);
1494 static void part_round_stats_single(struct request_queue *q, int cpu,
1495 struct hd_struct *part, unsigned long now,
1496 unsigned int inflight)
1498 if (inflight) {
1499 __part_stat_add(cpu, part, time_in_queue,
1500 inflight * (now - part->stamp));
1501 __part_stat_add(cpu, part, io_ticks, (now - part->stamp));
1503 part->stamp = now;
1507 * part_round_stats() - Round off the performance stats on a struct disk_stats.
1508 * @q: target block queue
1509 * @cpu: cpu number for stats access
1510 * @part: target partition
1512 * The average IO queue length and utilisation statistics are maintained
1513 * by observing the current state of the queue length and the amount of
1514 * time it has been in this state for.
1516 * Normally, that accounting is done on IO completion, but that can result
1517 * in more than a second's worth of IO being accounted for within any one
1518 * second, leading to >100% utilisation. To deal with that, we call this
1519 * function to do a round-off before returning the results when reading
1520 * /proc/diskstats. This accounts immediately for all queue usage up to
1521 * the current jiffies and restarts the counters again.
1523 void part_round_stats(struct request_queue *q, int cpu, struct hd_struct *part)
1525 struct hd_struct *part2 = NULL;
1526 unsigned long now = jiffies;
1527 unsigned int inflight[2];
1528 int stats = 0;
1530 if (part->stamp != now)
1531 stats |= 1;
1533 if (part->partno) {
1534 part2 = &part_to_disk(part)->part0;
1535 if (part2->stamp != now)
1536 stats |= 2;
1539 if (!stats)
1540 return;
1542 part_in_flight(q, part, inflight);
1544 if (stats & 2)
1545 part_round_stats_single(q, cpu, part2, now, inflight[1]);
1546 if (stats & 1)
1547 part_round_stats_single(q, cpu, part, now, inflight[0]);
1549 EXPORT_SYMBOL_GPL(part_round_stats);
1551 #ifdef CONFIG_PM
1552 static void blk_pm_put_request(struct request *rq)
1554 if (rq->q->dev && !(rq->rq_flags & RQF_PM) && !--rq->q->nr_pending)
1555 pm_runtime_mark_last_busy(rq->q->dev);
1557 #else
1558 static inline void blk_pm_put_request(struct request *rq) {}
1559 #endif
1561 void __blk_put_request(struct request_queue *q, struct request *req)
1563 req_flags_t rq_flags = req->rq_flags;
1565 if (unlikely(!q))
1566 return;
1568 if (q->mq_ops) {
1569 blk_mq_free_request(req);
1570 return;
1573 lockdep_assert_held(q->queue_lock);
1575 blk_pm_put_request(req);
1577 elv_completed_request(q, req);
1579 /* this is a bio leak */
1580 WARN_ON(req->bio != NULL);
1582 wbt_done(q->rq_wb, &req->issue_stat);
1585 * Request may not have originated from ll_rw_blk. if not,
1586 * it didn't come out of our reserved rq pools
1588 if (rq_flags & RQF_ALLOCED) {
1589 struct request_list *rl = blk_rq_rl(req);
1590 bool sync = op_is_sync(req->cmd_flags);
1592 BUG_ON(!list_empty(&req->queuelist));
1593 BUG_ON(ELV_ON_HASH(req));
1595 blk_free_request(rl, req);
1596 freed_request(rl, sync, rq_flags);
1597 blk_put_rl(rl);
1600 EXPORT_SYMBOL_GPL(__blk_put_request);
1602 void blk_put_request(struct request *req)
1604 struct request_queue *q = req->q;
1606 if (q->mq_ops)
1607 blk_mq_free_request(req);
1608 else {
1609 unsigned long flags;
1611 spin_lock_irqsave(q->queue_lock, flags);
1612 __blk_put_request(q, req);
1613 spin_unlock_irqrestore(q->queue_lock, flags);
1616 EXPORT_SYMBOL(blk_put_request);
1618 bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
1619 struct bio *bio)
1621 const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
1623 if (!ll_back_merge_fn(q, req, bio))
1624 return false;
1626 trace_block_bio_backmerge(q, req, bio);
1628 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
1629 blk_rq_set_mixed_merge(req);
1631 req->biotail->bi_next = bio;
1632 req->biotail = bio;
1633 req->__data_len += bio->bi_iter.bi_size;
1634 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1636 blk_account_io_start(req, false);
1637 return true;
1640 bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
1641 struct bio *bio)
1643 const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
1645 if (!ll_front_merge_fn(q, req, bio))
1646 return false;
1648 trace_block_bio_frontmerge(q, req, bio);
1650 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
1651 blk_rq_set_mixed_merge(req);
1653 bio->bi_next = req->bio;
1654 req->bio = bio;
1656 req->__sector = bio->bi_iter.bi_sector;
1657 req->__data_len += bio->bi_iter.bi_size;
1658 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1660 blk_account_io_start(req, false);
1661 return true;
1664 bool bio_attempt_discard_merge(struct request_queue *q, struct request *req,
1665 struct bio *bio)
1667 unsigned short segments = blk_rq_nr_discard_segments(req);
1669 if (segments >= queue_max_discard_segments(q))
1670 goto no_merge;
1671 if (blk_rq_sectors(req) + bio_sectors(bio) >
1672 blk_rq_get_max_sectors(req, blk_rq_pos(req)))
1673 goto no_merge;
1675 req->biotail->bi_next = bio;
1676 req->biotail = bio;
1677 req->__data_len += bio->bi_iter.bi_size;
1678 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1679 req->nr_phys_segments = segments + 1;
1681 blk_account_io_start(req, false);
1682 return true;
1683 no_merge:
1684 req_set_nomerge(q, req);
1685 return false;
1689 * blk_attempt_plug_merge - try to merge with %current's plugged list
1690 * @q: request_queue new bio is being queued at
1691 * @bio: new bio being queued
1692 * @request_count: out parameter for number of traversed plugged requests
1693 * @same_queue_rq: pointer to &struct request that gets filled in when
1694 * another request associated with @q is found on the plug list
1695 * (optional, may be %NULL)
1697 * Determine whether @bio being queued on @q can be merged with a request
1698 * on %current's plugged list. Returns %true if merge was successful,
1699 * otherwise %false.
1701 * Plugging coalesces IOs from the same issuer for the same purpose without
1702 * going through @q->queue_lock. As such it's more of an issuing mechanism
1703 * than scheduling, and the request, while may have elvpriv data, is not
1704 * added on the elevator at this point. In addition, we don't have
1705 * reliable access to the elevator outside queue lock. Only check basic
1706 * merging parameters without querying the elevator.
1708 * Caller must ensure !blk_queue_nomerges(q) beforehand.
1710 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
1711 unsigned int *request_count,
1712 struct request **same_queue_rq)
1714 struct blk_plug *plug;
1715 struct request *rq;
1716 struct list_head *plug_list;
1718 plug = current->plug;
1719 if (!plug)
1720 return false;
1721 *request_count = 0;
1723 if (q->mq_ops)
1724 plug_list = &plug->mq_list;
1725 else
1726 plug_list = &plug->list;
1728 list_for_each_entry_reverse(rq, plug_list, queuelist) {
1729 bool merged = false;
1731 if (rq->q == q) {
1732 (*request_count)++;
1734 * Only blk-mq multiple hardware queues case checks the
1735 * rq in the same queue, there should be only one such
1736 * rq in a queue
1738 if (same_queue_rq)
1739 *same_queue_rq = rq;
1742 if (rq->q != q || !blk_rq_merge_ok(rq, bio))
1743 continue;
1745 switch (blk_try_merge(rq, bio)) {
1746 case ELEVATOR_BACK_MERGE:
1747 merged = bio_attempt_back_merge(q, rq, bio);
1748 break;
1749 case ELEVATOR_FRONT_MERGE:
1750 merged = bio_attempt_front_merge(q, rq, bio);
1751 break;
1752 case ELEVATOR_DISCARD_MERGE:
1753 merged = bio_attempt_discard_merge(q, rq, bio);
1754 break;
1755 default:
1756 break;
1759 if (merged)
1760 return true;
1763 return false;
1766 unsigned int blk_plug_queued_count(struct request_queue *q)
1768 struct blk_plug *plug;
1769 struct request *rq;
1770 struct list_head *plug_list;
1771 unsigned int ret = 0;
1773 plug = current->plug;
1774 if (!plug)
1775 goto out;
1777 if (q->mq_ops)
1778 plug_list = &plug->mq_list;
1779 else
1780 plug_list = &plug->list;
1782 list_for_each_entry(rq, plug_list, queuelist) {
1783 if (rq->q == q)
1784 ret++;
1786 out:
1787 return ret;
1790 void blk_init_request_from_bio(struct request *req, struct bio *bio)
1792 struct io_context *ioc = rq_ioc(bio);
1794 if (bio->bi_opf & REQ_RAHEAD)
1795 req->cmd_flags |= REQ_FAILFAST_MASK;
1797 req->__sector = bio->bi_iter.bi_sector;
1798 if (ioprio_valid(bio_prio(bio)))
1799 req->ioprio = bio_prio(bio);
1800 else if (ioc)
1801 req->ioprio = ioc->ioprio;
1802 else
1803 req->ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0);
1804 req->write_hint = bio->bi_write_hint;
1805 blk_rq_bio_prep(req->q, req, bio);
1807 EXPORT_SYMBOL_GPL(blk_init_request_from_bio);
1809 static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
1811 struct blk_plug *plug;
1812 int where = ELEVATOR_INSERT_SORT;
1813 struct request *req, *free;
1814 unsigned int request_count = 0;
1815 unsigned int wb_acct;
1818 * low level driver can indicate that it wants pages above a
1819 * certain limit bounced to low memory (ie for highmem, or even
1820 * ISA dma in theory)
1822 blk_queue_bounce(q, &bio);
1824 blk_queue_split(q, &bio);
1826 if (!bio_integrity_prep(bio))
1827 return BLK_QC_T_NONE;
1829 if (op_is_flush(bio->bi_opf)) {
1830 spin_lock_irq(q->queue_lock);
1831 where = ELEVATOR_INSERT_FLUSH;
1832 goto get_rq;
1836 * Check if we can merge with the plugged list before grabbing
1837 * any locks.
1839 if (!blk_queue_nomerges(q)) {
1840 if (blk_attempt_plug_merge(q, bio, &request_count, NULL))
1841 return BLK_QC_T_NONE;
1842 } else
1843 request_count = blk_plug_queued_count(q);
1845 spin_lock_irq(q->queue_lock);
1847 switch (elv_merge(q, &req, bio)) {
1848 case ELEVATOR_BACK_MERGE:
1849 if (!bio_attempt_back_merge(q, req, bio))
1850 break;
1851 elv_bio_merged(q, req, bio);
1852 free = attempt_back_merge(q, req);
1853 if (free)
1854 __blk_put_request(q, free);
1855 else
1856 elv_merged_request(q, req, ELEVATOR_BACK_MERGE);
1857 goto out_unlock;
1858 case ELEVATOR_FRONT_MERGE:
1859 if (!bio_attempt_front_merge(q, req, bio))
1860 break;
1861 elv_bio_merged(q, req, bio);
1862 free = attempt_front_merge(q, req);
1863 if (free)
1864 __blk_put_request(q, free);
1865 else
1866 elv_merged_request(q, req, ELEVATOR_FRONT_MERGE);
1867 goto out_unlock;
1868 default:
1869 break;
1872 get_rq:
1873 wb_acct = wbt_wait(q->rq_wb, bio, q->queue_lock);
1876 * Grab a free request. This is might sleep but can not fail.
1877 * Returns with the queue unlocked.
1879 req = get_request(q, bio->bi_opf, bio, GFP_NOIO);
1880 if (IS_ERR(req)) {
1881 __wbt_done(q->rq_wb, wb_acct);
1882 if (PTR_ERR(req) == -ENOMEM)
1883 bio->bi_status = BLK_STS_RESOURCE;
1884 else
1885 bio->bi_status = BLK_STS_IOERR;
1886 bio_endio(bio);
1887 goto out_unlock;
1890 wbt_track(&req->issue_stat, wb_acct);
1893 * After dropping the lock and possibly sleeping here, our request
1894 * may now be mergeable after it had proven unmergeable (above).
1895 * We don't worry about that case for efficiency. It won't happen
1896 * often, and the elevators are able to handle it.
1898 blk_init_request_from_bio(req, bio);
1900 if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags))
1901 req->cpu = raw_smp_processor_id();
1903 plug = current->plug;
1904 if (plug) {
1906 * If this is the first request added after a plug, fire
1907 * of a plug trace.
1909 * @request_count may become stale because of schedule
1910 * out, so check plug list again.
1912 if (!request_count || list_empty(&plug->list))
1913 trace_block_plug(q);
1914 else {
1915 struct request *last = list_entry_rq(plug->list.prev);
1916 if (request_count >= BLK_MAX_REQUEST_COUNT ||
1917 blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE) {
1918 blk_flush_plug_list(plug, false);
1919 trace_block_plug(q);
1922 list_add_tail(&req->queuelist, &plug->list);
1923 blk_account_io_start(req, true);
1924 } else {
1925 spin_lock_irq(q->queue_lock);
1926 add_acct_request(q, req, where);
1927 __blk_run_queue(q);
1928 out_unlock:
1929 spin_unlock_irq(q->queue_lock);
1932 return BLK_QC_T_NONE;
1935 static void handle_bad_sector(struct bio *bio)
1937 char b[BDEVNAME_SIZE];
1939 printk(KERN_INFO "attempt to access beyond end of device\n");
1940 printk(KERN_INFO "%s: rw=%d, want=%Lu, limit=%Lu\n",
1941 bio_devname(bio, b), bio->bi_opf,
1942 (unsigned long long)bio_end_sector(bio),
1943 (long long)get_capacity(bio->bi_disk));
1946 #ifdef CONFIG_FAIL_MAKE_REQUEST
1948 static DECLARE_FAULT_ATTR(fail_make_request);
1950 static int __init setup_fail_make_request(char *str)
1952 return setup_fault_attr(&fail_make_request, str);
1954 __setup("fail_make_request=", setup_fail_make_request);
1956 static bool should_fail_request(struct hd_struct *part, unsigned int bytes)
1958 return part->make_it_fail && should_fail(&fail_make_request, bytes);
1961 static int __init fail_make_request_debugfs(void)
1963 struct dentry *dir = fault_create_debugfs_attr("fail_make_request",
1964 NULL, &fail_make_request);
1966 return PTR_ERR_OR_ZERO(dir);
1969 late_initcall(fail_make_request_debugfs);
1971 #else /* CONFIG_FAIL_MAKE_REQUEST */
1973 static inline bool should_fail_request(struct hd_struct *part,
1974 unsigned int bytes)
1976 return false;
1979 #endif /* CONFIG_FAIL_MAKE_REQUEST */
1982 * Remap block n of partition p to block n+start(p) of the disk.
1984 static inline int blk_partition_remap(struct bio *bio)
1986 struct hd_struct *p;
1987 int ret = 0;
1990 * Zone reset does not include bi_size so bio_sectors() is always 0.
1991 * Include a test for the reset op code and perform the remap if needed.
1993 if (!bio->bi_partno ||
1994 (!bio_sectors(bio) && bio_op(bio) != REQ_OP_ZONE_RESET))
1995 return 0;
1997 rcu_read_lock();
1998 p = __disk_get_part(bio->bi_disk, bio->bi_partno);
1999 if (likely(p && !should_fail_request(p, bio->bi_iter.bi_size))) {
2000 bio->bi_iter.bi_sector += p->start_sect;
2001 bio->bi_partno = 0;
2002 trace_block_bio_remap(bio->bi_disk->queue, bio, part_devt(p),
2003 bio->bi_iter.bi_sector - p->start_sect);
2004 } else {
2005 printk("%s: fail for partition %d\n", __func__, bio->bi_partno);
2006 ret = -EIO;
2008 rcu_read_unlock();
2010 return ret;
2014 * Check whether this bio extends beyond the end of the device.
2016 static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
2018 sector_t maxsector;
2020 if (!nr_sectors)
2021 return 0;
2023 /* Test device or partition size, when known. */
2024 maxsector = get_capacity(bio->bi_disk);
2025 if (maxsector) {
2026 sector_t sector = bio->bi_iter.bi_sector;
2028 if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
2030 * This may well happen - the kernel calls bread()
2031 * without checking the size of the device, e.g., when
2032 * mounting a device.
2034 handle_bad_sector(bio);
2035 return 1;
2039 return 0;
2042 static noinline_for_stack bool
2043 generic_make_request_checks(struct bio *bio)
2045 struct request_queue *q;
2046 int nr_sectors = bio_sectors(bio);
2047 blk_status_t status = BLK_STS_IOERR;
2048 char b[BDEVNAME_SIZE];
2050 might_sleep();
2052 if (bio_check_eod(bio, nr_sectors))
2053 goto end_io;
2055 q = bio->bi_disk->queue;
2056 if (unlikely(!q)) {
2057 printk(KERN_ERR
2058 "generic_make_request: Trying to access "
2059 "nonexistent block-device %s (%Lu)\n",
2060 bio_devname(bio, b), (long long)bio->bi_iter.bi_sector);
2061 goto end_io;
2065 * For a REQ_NOWAIT based request, return -EOPNOTSUPP
2066 * if queue is not a request based queue.
2069 if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_rq_based(q))
2070 goto not_supported;
2072 if (should_fail_request(&bio->bi_disk->part0, bio->bi_iter.bi_size))
2073 goto end_io;
2075 if (blk_partition_remap(bio))
2076 goto end_io;
2078 if (bio_check_eod(bio, nr_sectors))
2079 goto end_io;
2082 * Filter flush bio's early so that make_request based
2083 * drivers without flush support don't have to worry
2084 * about them.
2086 if (op_is_flush(bio->bi_opf) &&
2087 !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
2088 bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
2089 if (!nr_sectors) {
2090 status = BLK_STS_OK;
2091 goto end_io;
2095 switch (bio_op(bio)) {
2096 case REQ_OP_DISCARD:
2097 if (!blk_queue_discard(q))
2098 goto not_supported;
2099 break;
2100 case REQ_OP_SECURE_ERASE:
2101 if (!blk_queue_secure_erase(q))
2102 goto not_supported;
2103 break;
2104 case REQ_OP_WRITE_SAME:
2105 if (!q->limits.max_write_same_sectors)
2106 goto not_supported;
2107 break;
2108 case REQ_OP_ZONE_REPORT:
2109 case REQ_OP_ZONE_RESET:
2110 if (!blk_queue_is_zoned(q))
2111 goto not_supported;
2112 break;
2113 case REQ_OP_WRITE_ZEROES:
2114 if (!q->limits.max_write_zeroes_sectors)
2115 goto not_supported;
2116 break;
2117 default:
2118 break;
2122 * Various block parts want %current->io_context and lazy ioc
2123 * allocation ends up trading a lot of pain for a small amount of
2124 * memory. Just allocate it upfront. This may fail and block
2125 * layer knows how to live with it.
2127 create_io_context(GFP_ATOMIC, q->node);
2129 if (!blkcg_bio_issue_check(q, bio))
2130 return false;
2132 if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) {
2133 trace_block_bio_queue(q, bio);
2134 /* Now that enqueuing has been traced, we need to trace
2135 * completion as well.
2137 bio_set_flag(bio, BIO_TRACE_COMPLETION);
2139 return true;
2141 not_supported:
2142 status = BLK_STS_NOTSUPP;
2143 end_io:
2144 bio->bi_status = status;
2145 bio_endio(bio);
2146 return false;
2150 * generic_make_request - hand a buffer to its device driver for I/O
2151 * @bio: The bio describing the location in memory and on the device.
2153 * generic_make_request() is used to make I/O requests of block
2154 * devices. It is passed a &struct bio, which describes the I/O that needs
2155 * to be done.
2157 * generic_make_request() does not return any status. The
2158 * success/failure status of the request, along with notification of
2159 * completion, is delivered asynchronously through the bio->bi_end_io
2160 * function described (one day) else where.
2162 * The caller of generic_make_request must make sure that bi_io_vec
2163 * are set to describe the memory buffer, and that bi_dev and bi_sector are
2164 * set to describe the device address, and the
2165 * bi_end_io and optionally bi_private are set to describe how
2166 * completion notification should be signaled.
2168 * generic_make_request and the drivers it calls may use bi_next if this
2169 * bio happens to be merged with someone else, and may resubmit the bio to
2170 * a lower device by calling into generic_make_request recursively, which
2171 * means the bio should NOT be touched after the call to ->make_request_fn.
2173 blk_qc_t generic_make_request(struct bio *bio)
2176 * bio_list_on_stack[0] contains bios submitted by the current
2177 * make_request_fn.
2178 * bio_list_on_stack[1] contains bios that were submitted before
2179 * the current make_request_fn, but that haven't been processed
2180 * yet.
2182 struct bio_list bio_list_on_stack[2];
2183 blk_qc_t ret = BLK_QC_T_NONE;
2185 if (!generic_make_request_checks(bio))
2186 goto out;
2189 * We only want one ->make_request_fn to be active at a time, else
2190 * stack usage with stacked devices could be a problem. So use
2191 * current->bio_list to keep a list of requests submited by a
2192 * make_request_fn function. current->bio_list is also used as a
2193 * flag to say if generic_make_request is currently active in this
2194 * task or not. If it is NULL, then no make_request is active. If
2195 * it is non-NULL, then a make_request is active, and new requests
2196 * should be added at the tail
2198 if (current->bio_list) {
2199 bio_list_add(&current->bio_list[0], bio);
2200 goto out;
2203 /* following loop may be a bit non-obvious, and so deserves some
2204 * explanation.
2205 * Before entering the loop, bio->bi_next is NULL (as all callers
2206 * ensure that) so we have a list with a single bio.
2207 * We pretend that we have just taken it off a longer list, so
2208 * we assign bio_list to a pointer to the bio_list_on_stack,
2209 * thus initialising the bio_list of new bios to be
2210 * added. ->make_request() may indeed add some more bios
2211 * through a recursive call to generic_make_request. If it
2212 * did, we find a non-NULL value in bio_list and re-enter the loop
2213 * from the top. In this case we really did just take the bio
2214 * of the top of the list (no pretending) and so remove it from
2215 * bio_list, and call into ->make_request() again.
2217 BUG_ON(bio->bi_next);
2218 bio_list_init(&bio_list_on_stack[0]);
2219 current->bio_list = bio_list_on_stack;
2220 do {
2221 struct request_queue *q = bio->bi_disk->queue;
2223 if (likely(blk_queue_enter(q, bio->bi_opf & REQ_NOWAIT) == 0)) {
2224 struct bio_list lower, same;
2226 /* Create a fresh bio_list for all subordinate requests */
2227 bio_list_on_stack[1] = bio_list_on_stack[0];
2228 bio_list_init(&bio_list_on_stack[0]);
2229 ret = q->make_request_fn(q, bio);
2231 blk_queue_exit(q);
2233 /* sort new bios into those for a lower level
2234 * and those for the same level
2236 bio_list_init(&lower);
2237 bio_list_init(&same);
2238 while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
2239 if (q == bio->bi_disk->queue)
2240 bio_list_add(&same, bio);
2241 else
2242 bio_list_add(&lower, bio);
2243 /* now assemble so we handle the lowest level first */
2244 bio_list_merge(&bio_list_on_stack[0], &lower);
2245 bio_list_merge(&bio_list_on_stack[0], &same);
2246 bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
2247 } else {
2248 if (unlikely(!blk_queue_dying(q) &&
2249 (bio->bi_opf & REQ_NOWAIT)))
2250 bio_wouldblock_error(bio);
2251 else
2252 bio_io_error(bio);
2254 bio = bio_list_pop(&bio_list_on_stack[0]);
2255 } while (bio);
2256 current->bio_list = NULL; /* deactivate */
2258 out:
2259 return ret;
2261 EXPORT_SYMBOL(generic_make_request);
2264 * submit_bio - submit a bio to the block device layer for I/O
2265 * @bio: The &struct bio which describes the I/O
2267 * submit_bio() is very similar in purpose to generic_make_request(), and
2268 * uses that function to do most of the work. Both are fairly rough
2269 * interfaces; @bio must be presetup and ready for I/O.
2272 blk_qc_t submit_bio(struct bio *bio)
2275 * If it's a regular read/write or a barrier with data attached,
2276 * go through the normal accounting stuff before submission.
2278 if (bio_has_data(bio)) {
2279 unsigned int count;
2281 if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
2282 count = queue_logical_block_size(bio->bi_disk->queue) >> 9;
2283 else
2284 count = bio_sectors(bio);
2286 if (op_is_write(bio_op(bio))) {
2287 count_vm_events(PGPGOUT, count);
2288 } else {
2289 task_io_account_read(bio->bi_iter.bi_size);
2290 count_vm_events(PGPGIN, count);
2293 if (unlikely(block_dump)) {
2294 char b[BDEVNAME_SIZE];
2295 printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n",
2296 current->comm, task_pid_nr(current),
2297 op_is_write(bio_op(bio)) ? "WRITE" : "READ",
2298 (unsigned long long)bio->bi_iter.bi_sector,
2299 bio_devname(bio, b), count);
2303 return generic_make_request(bio);
2305 EXPORT_SYMBOL(submit_bio);
2308 * blk_cloned_rq_check_limits - Helper function to check a cloned request
2309 * for new the queue limits
2310 * @q: the queue
2311 * @rq: the request being checked
2313 * Description:
2314 * @rq may have been made based on weaker limitations of upper-level queues
2315 * in request stacking drivers, and it may violate the limitation of @q.
2316 * Since the block layer and the underlying device driver trust @rq
2317 * after it is inserted to @q, it should be checked against @q before
2318 * the insertion using this generic function.
2320 * Request stacking drivers like request-based dm may change the queue
2321 * limits when retrying requests on other queues. Those requests need
2322 * to be checked against the new queue limits again during dispatch.
2324 static int blk_cloned_rq_check_limits(struct request_queue *q,
2325 struct request *rq)
2327 if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, req_op(rq))) {
2328 printk(KERN_ERR "%s: over max size limit.\n", __func__);
2329 return -EIO;
2333 * queue's settings related to segment counting like q->bounce_pfn
2334 * may differ from that of other stacking queues.
2335 * Recalculate it to check the request correctly on this queue's
2336 * limitation.
2338 blk_recalc_rq_segments(rq);
2339 if (rq->nr_phys_segments > queue_max_segments(q)) {
2340 printk(KERN_ERR "%s: over max segments limit.\n", __func__);
2341 return -EIO;
2344 return 0;
2348 * blk_insert_cloned_request - Helper for stacking drivers to submit a request
2349 * @q: the queue to submit the request
2350 * @rq: the request being queued
2352 blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq)
2354 unsigned long flags;
2355 int where = ELEVATOR_INSERT_BACK;
2357 if (blk_cloned_rq_check_limits(q, rq))
2358 return BLK_STS_IOERR;
2360 if (rq->rq_disk &&
2361 should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq)))
2362 return BLK_STS_IOERR;
2364 if (q->mq_ops) {
2365 if (blk_queue_io_stat(q))
2366 blk_account_io_start(rq, true);
2368 * Since we have a scheduler attached on the top device,
2369 * bypass a potential scheduler on the bottom device for
2370 * insert.
2372 blk_mq_request_bypass_insert(rq);
2373 return BLK_STS_OK;
2376 spin_lock_irqsave(q->queue_lock, flags);
2377 if (unlikely(blk_queue_dying(q))) {
2378 spin_unlock_irqrestore(q->queue_lock, flags);
2379 return BLK_STS_IOERR;
2383 * Submitting request must be dequeued before calling this function
2384 * because it will be linked to another request_queue
2386 BUG_ON(blk_queued_rq(rq));
2388 if (op_is_flush(rq->cmd_flags))
2389 where = ELEVATOR_INSERT_FLUSH;
2391 add_acct_request(q, rq, where);
2392 if (where == ELEVATOR_INSERT_FLUSH)
2393 __blk_run_queue(q);
2394 spin_unlock_irqrestore(q->queue_lock, flags);
2396 return BLK_STS_OK;
2398 EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
2401 * blk_rq_err_bytes - determine number of bytes till the next failure boundary
2402 * @rq: request to examine
2404 * Description:
2405 * A request could be merge of IOs which require different failure
2406 * handling. This function determines the number of bytes which
2407 * can be failed from the beginning of the request without
2408 * crossing into area which need to be retried further.
2410 * Return:
2411 * The number of bytes to fail.
2413 unsigned int blk_rq_err_bytes(const struct request *rq)
2415 unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
2416 unsigned int bytes = 0;
2417 struct bio *bio;
2419 if (!(rq->rq_flags & RQF_MIXED_MERGE))
2420 return blk_rq_bytes(rq);
2423 * Currently the only 'mixing' which can happen is between
2424 * different fastfail types. We can safely fail portions
2425 * which have all the failfast bits that the first one has -
2426 * the ones which are at least as eager to fail as the first
2427 * one.
2429 for (bio = rq->bio; bio; bio = bio->bi_next) {
2430 if ((bio->bi_opf & ff) != ff)
2431 break;
2432 bytes += bio->bi_iter.bi_size;
2435 /* this could lead to infinite loop */
2436 BUG_ON(blk_rq_bytes(rq) && !bytes);
2437 return bytes;
2439 EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
2441 void blk_account_io_completion(struct request *req, unsigned int bytes)
2443 if (blk_do_io_stat(req)) {
2444 const int rw = rq_data_dir(req);
2445 struct hd_struct *part;
2446 int cpu;
2448 cpu = part_stat_lock();
2449 part = req->part;
2450 part_stat_add(cpu, part, sectors[rw], bytes >> 9);
2451 part_stat_unlock();
2455 void blk_account_io_done(struct request *req)
2458 * Account IO completion. flush_rq isn't accounted as a
2459 * normal IO on queueing nor completion. Accounting the
2460 * containing request is enough.
2462 if (blk_do_io_stat(req) && !(req->rq_flags & RQF_FLUSH_SEQ)) {
2463 unsigned long duration = jiffies - req->start_time;
2464 const int rw = rq_data_dir(req);
2465 struct hd_struct *part;
2466 int cpu;
2468 cpu = part_stat_lock();
2469 part = req->part;
2471 part_stat_inc(cpu, part, ios[rw]);
2472 part_stat_add(cpu, part, ticks[rw], duration);
2473 part_round_stats(req->q, cpu, part);
2474 part_dec_in_flight(req->q, part, rw);
2476 hd_struct_put(part);
2477 part_stat_unlock();
2481 #ifdef CONFIG_PM
2483 * Don't process normal requests when queue is suspended
2484 * or in the process of suspending/resuming
2486 static struct request *blk_pm_peek_request(struct request_queue *q,
2487 struct request *rq)
2489 if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
2490 (q->rpm_status != RPM_ACTIVE && !(rq->rq_flags & RQF_PM))))
2491 return NULL;
2492 else
2493 return rq;
2495 #else
2496 static inline struct request *blk_pm_peek_request(struct request_queue *q,
2497 struct request *rq)
2499 return rq;
2501 #endif
2503 void blk_account_io_start(struct request *rq, bool new_io)
2505 struct hd_struct *part;
2506 int rw = rq_data_dir(rq);
2507 int cpu;
2509 if (!blk_do_io_stat(rq))
2510 return;
2512 cpu = part_stat_lock();
2514 if (!new_io) {
2515 part = rq->part;
2516 part_stat_inc(cpu, part, merges[rw]);
2517 } else {
2518 part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
2519 if (!hd_struct_try_get(part)) {
2521 * The partition is already being removed,
2522 * the request will be accounted on the disk only
2524 * We take a reference on disk->part0 although that
2525 * partition will never be deleted, so we can treat
2526 * it as any other partition.
2528 part = &rq->rq_disk->part0;
2529 hd_struct_get(part);
2531 part_round_stats(rq->q, cpu, part);
2532 part_inc_in_flight(rq->q, part, rw);
2533 rq->part = part;
2536 part_stat_unlock();
2540 * blk_peek_request - peek at the top of a request queue
2541 * @q: request queue to peek at
2543 * Description:
2544 * Return the request at the top of @q. The returned request
2545 * should be started using blk_start_request() before LLD starts
2546 * processing it.
2548 * Return:
2549 * Pointer to the request at the top of @q if available. Null
2550 * otherwise.
2552 struct request *blk_peek_request(struct request_queue *q)
2554 struct request *rq;
2555 int ret;
2557 lockdep_assert_held(q->queue_lock);
2558 WARN_ON_ONCE(q->mq_ops);
2560 while ((rq = __elv_next_request(q)) != NULL) {
2562 rq = blk_pm_peek_request(q, rq);
2563 if (!rq)
2564 break;
2566 if (!(rq->rq_flags & RQF_STARTED)) {
2568 * This is the first time the device driver
2569 * sees this request (possibly after
2570 * requeueing). Notify IO scheduler.
2572 if (rq->rq_flags & RQF_SORTED)
2573 elv_activate_rq(q, rq);
2576 * just mark as started even if we don't start
2577 * it, a request that has been delayed should
2578 * not be passed by new incoming requests
2580 rq->rq_flags |= RQF_STARTED;
2581 trace_block_rq_issue(q, rq);
2584 if (!q->boundary_rq || q->boundary_rq == rq) {
2585 q->end_sector = rq_end_sector(rq);
2586 q->boundary_rq = NULL;
2589 if (rq->rq_flags & RQF_DONTPREP)
2590 break;
2592 if (q->dma_drain_size && blk_rq_bytes(rq)) {
2594 * make sure space for the drain appears we
2595 * know we can do this because max_hw_segments
2596 * has been adjusted to be one fewer than the
2597 * device can handle
2599 rq->nr_phys_segments++;
2602 if (!q->prep_rq_fn)
2603 break;
2605 ret = q->prep_rq_fn(q, rq);
2606 if (ret == BLKPREP_OK) {
2607 break;
2608 } else if (ret == BLKPREP_DEFER) {
2610 * the request may have been (partially) prepped.
2611 * we need to keep this request in the front to
2612 * avoid resource deadlock. RQF_STARTED will
2613 * prevent other fs requests from passing this one.
2615 if (q->dma_drain_size && blk_rq_bytes(rq) &&
2616 !(rq->rq_flags & RQF_DONTPREP)) {
2618 * remove the space for the drain we added
2619 * so that we don't add it again
2621 --rq->nr_phys_segments;
2624 rq = NULL;
2625 break;
2626 } else if (ret == BLKPREP_KILL || ret == BLKPREP_INVALID) {
2627 rq->rq_flags |= RQF_QUIET;
2629 * Mark this request as started so we don't trigger
2630 * any debug logic in the end I/O path.
2632 blk_start_request(rq);
2633 __blk_end_request_all(rq, ret == BLKPREP_INVALID ?
2634 BLK_STS_TARGET : BLK_STS_IOERR);
2635 } else {
2636 printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
2637 break;
2641 return rq;
2643 EXPORT_SYMBOL(blk_peek_request);
2645 static void blk_dequeue_request(struct request *rq)
2647 struct request_queue *q = rq->q;
2649 BUG_ON(list_empty(&rq->queuelist));
2650 BUG_ON(ELV_ON_HASH(rq));
2652 list_del_init(&rq->queuelist);
2655 * the time frame between a request being removed from the lists
2656 * and to it is freed is accounted as io that is in progress at
2657 * the driver side.
2659 if (blk_account_rq(rq)) {
2660 q->in_flight[rq_is_sync(rq)]++;
2661 set_io_start_time_ns(rq);
2666 * blk_start_request - start request processing on the driver
2667 * @req: request to dequeue
2669 * Description:
2670 * Dequeue @req and start timeout timer on it. This hands off the
2671 * request to the driver.
2673 void blk_start_request(struct request *req)
2675 lockdep_assert_held(req->q->queue_lock);
2676 WARN_ON_ONCE(req->q->mq_ops);
2678 blk_dequeue_request(req);
2680 if (test_bit(QUEUE_FLAG_STATS, &req->q->queue_flags)) {
2681 blk_stat_set_issue(&req->issue_stat, blk_rq_sectors(req));
2682 req->rq_flags |= RQF_STATS;
2683 wbt_issue(req->q->rq_wb, &req->issue_stat);
2686 BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags));
2687 blk_add_timer(req);
2689 EXPORT_SYMBOL(blk_start_request);
2692 * blk_fetch_request - fetch a request from a request queue
2693 * @q: request queue to fetch a request from
2695 * Description:
2696 * Return the request at the top of @q. The request is started on
2697 * return and LLD can start processing it immediately.
2699 * Return:
2700 * Pointer to the request at the top of @q if available. Null
2701 * otherwise.
2703 struct request *blk_fetch_request(struct request_queue *q)
2705 struct request *rq;
2707 lockdep_assert_held(q->queue_lock);
2708 WARN_ON_ONCE(q->mq_ops);
2710 rq = blk_peek_request(q);
2711 if (rq)
2712 blk_start_request(rq);
2713 return rq;
2715 EXPORT_SYMBOL(blk_fetch_request);
2718 * blk_update_request - Special helper function for request stacking drivers
2719 * @req: the request being processed
2720 * @error: block status code
2721 * @nr_bytes: number of bytes to complete @req
2723 * Description:
2724 * Ends I/O on a number of bytes attached to @req, but doesn't complete
2725 * the request structure even if @req doesn't have leftover.
2726 * If @req has leftover, sets it up for the next range of segments.
2728 * This special helper function is only for request stacking drivers
2729 * (e.g. request-based dm) so that they can handle partial completion.
2730 * Actual device drivers should use blk_end_request instead.
2732 * Passing the result of blk_rq_bytes() as @nr_bytes guarantees
2733 * %false return from this function.
2735 * Return:
2736 * %false - this request doesn't have any more data
2737 * %true - this request has more data
2739 bool blk_update_request(struct request *req, blk_status_t error,
2740 unsigned int nr_bytes)
2742 int total_bytes;
2744 trace_block_rq_complete(req, blk_status_to_errno(error), nr_bytes);
2746 if (!req->bio)
2747 return false;
2749 if (unlikely(error && !blk_rq_is_passthrough(req) &&
2750 !(req->rq_flags & RQF_QUIET)))
2751 print_req_error(req, error);
2753 blk_account_io_completion(req, nr_bytes);
2755 total_bytes = 0;
2756 while (req->bio) {
2757 struct bio *bio = req->bio;
2758 unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
2760 if (bio_bytes == bio->bi_iter.bi_size)
2761 req->bio = bio->bi_next;
2763 /* Completion has already been traced */
2764 bio_clear_flag(bio, BIO_TRACE_COMPLETION);
2765 req_bio_endio(req, bio, bio_bytes, error);
2767 total_bytes += bio_bytes;
2768 nr_bytes -= bio_bytes;
2770 if (!nr_bytes)
2771 break;
2775 * completely done
2777 if (!req->bio) {
2779 * Reset counters so that the request stacking driver
2780 * can find how many bytes remain in the request
2781 * later.
2783 req->__data_len = 0;
2784 return false;
2787 req->__data_len -= total_bytes;
2789 /* update sector only for requests with clear definition of sector */
2790 if (!blk_rq_is_passthrough(req))
2791 req->__sector += total_bytes >> 9;
2793 /* mixed attributes always follow the first bio */
2794 if (req->rq_flags & RQF_MIXED_MERGE) {
2795 req->cmd_flags &= ~REQ_FAILFAST_MASK;
2796 req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
2799 if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) {
2801 * If total number of sectors is less than the first segment
2802 * size, something has gone terribly wrong.
2804 if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
2805 blk_dump_rq_flags(req, "request botched");
2806 req->__data_len = blk_rq_cur_bytes(req);
2809 /* recalculate the number of segments */
2810 blk_recalc_rq_segments(req);
2813 return true;
2815 EXPORT_SYMBOL_GPL(blk_update_request);
2817 static bool blk_update_bidi_request(struct request *rq, blk_status_t error,
2818 unsigned int nr_bytes,
2819 unsigned int bidi_bytes)
2821 if (blk_update_request(rq, error, nr_bytes))
2822 return true;
2824 /* Bidi request must be completed as a whole */
2825 if (unlikely(blk_bidi_rq(rq)) &&
2826 blk_update_request(rq->next_rq, error, bidi_bytes))
2827 return true;
2829 if (blk_queue_add_random(rq->q))
2830 add_disk_randomness(rq->rq_disk);
2832 return false;
2836 * blk_unprep_request - unprepare a request
2837 * @req: the request
2839 * This function makes a request ready for complete resubmission (or
2840 * completion). It happens only after all error handling is complete,
2841 * so represents the appropriate moment to deallocate any resources
2842 * that were allocated to the request in the prep_rq_fn. The queue
2843 * lock is held when calling this.
2845 void blk_unprep_request(struct request *req)
2847 struct request_queue *q = req->q;
2849 req->rq_flags &= ~RQF_DONTPREP;
2850 if (q->unprep_rq_fn)
2851 q->unprep_rq_fn(q, req);
2853 EXPORT_SYMBOL_GPL(blk_unprep_request);
2855 void blk_finish_request(struct request *req, blk_status_t error)
2857 struct request_queue *q = req->q;
2859 lockdep_assert_held(req->q->queue_lock);
2860 WARN_ON_ONCE(q->mq_ops);
2862 if (req->rq_flags & RQF_STATS)
2863 blk_stat_add(req);
2865 if (req->rq_flags & RQF_QUEUED)
2866 blk_queue_end_tag(q, req);
2868 BUG_ON(blk_queued_rq(req));
2870 if (unlikely(laptop_mode) && !blk_rq_is_passthrough(req))
2871 laptop_io_completion(req->q->backing_dev_info);
2873 blk_delete_timer(req);
2875 if (req->rq_flags & RQF_DONTPREP)
2876 blk_unprep_request(req);
2878 blk_account_io_done(req);
2880 if (req->end_io) {
2881 wbt_done(req->q->rq_wb, &req->issue_stat);
2882 req->end_io(req, error);
2883 } else {
2884 if (blk_bidi_rq(req))
2885 __blk_put_request(req->next_rq->q, req->next_rq);
2887 __blk_put_request(q, req);
2890 EXPORT_SYMBOL(blk_finish_request);
2893 * blk_end_bidi_request - Complete a bidi request
2894 * @rq: the request to complete
2895 * @error: block status code
2896 * @nr_bytes: number of bytes to complete @rq
2897 * @bidi_bytes: number of bytes to complete @rq->next_rq
2899 * Description:
2900 * Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
2901 * Drivers that supports bidi can safely call this member for any
2902 * type of request, bidi or uni. In the later case @bidi_bytes is
2903 * just ignored.
2905 * Return:
2906 * %false - we are done with this request
2907 * %true - still buffers pending for this request
2909 static bool blk_end_bidi_request(struct request *rq, blk_status_t error,
2910 unsigned int nr_bytes, unsigned int bidi_bytes)
2912 struct request_queue *q = rq->q;
2913 unsigned long flags;
2915 WARN_ON_ONCE(q->mq_ops);
2917 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
2918 return true;
2920 spin_lock_irqsave(q->queue_lock, flags);
2921 blk_finish_request(rq, error);
2922 spin_unlock_irqrestore(q->queue_lock, flags);
2924 return false;
2928 * __blk_end_bidi_request - Complete a bidi request with queue lock held
2929 * @rq: the request to complete
2930 * @error: block status code
2931 * @nr_bytes: number of bytes to complete @rq
2932 * @bidi_bytes: number of bytes to complete @rq->next_rq
2934 * Description:
2935 * Identical to blk_end_bidi_request() except that queue lock is
2936 * assumed to be locked on entry and remains so on return.
2938 * Return:
2939 * %false - we are done with this request
2940 * %true - still buffers pending for this request
2942 static bool __blk_end_bidi_request(struct request *rq, blk_status_t error,
2943 unsigned int nr_bytes, unsigned int bidi_bytes)
2945 lockdep_assert_held(rq->q->queue_lock);
2946 WARN_ON_ONCE(rq->q->mq_ops);
2948 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
2949 return true;
2951 blk_finish_request(rq, error);
2953 return false;
2957 * blk_end_request - Helper function for drivers to complete the request.
2958 * @rq: the request being processed
2959 * @error: block status code
2960 * @nr_bytes: number of bytes to complete
2962 * Description:
2963 * Ends I/O on a number of bytes attached to @rq.
2964 * If @rq has leftover, sets it up for the next range of segments.
2966 * Return:
2967 * %false - we are done with this request
2968 * %true - still buffers pending for this request
2970 bool blk_end_request(struct request *rq, blk_status_t error,
2971 unsigned int nr_bytes)
2973 WARN_ON_ONCE(rq->q->mq_ops);
2974 return blk_end_bidi_request(rq, error, nr_bytes, 0);
2976 EXPORT_SYMBOL(blk_end_request);
2979 * blk_end_request_all - Helper function for drives to finish the request.
2980 * @rq: the request to finish
2981 * @error: block status code
2983 * Description:
2984 * Completely finish @rq.
2986 void blk_end_request_all(struct request *rq, blk_status_t error)
2988 bool pending;
2989 unsigned int bidi_bytes = 0;
2991 if (unlikely(blk_bidi_rq(rq)))
2992 bidi_bytes = blk_rq_bytes(rq->next_rq);
2994 pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
2995 BUG_ON(pending);
2997 EXPORT_SYMBOL(blk_end_request_all);
3000 * __blk_end_request - Helper function for drivers to complete the request.
3001 * @rq: the request being processed
3002 * @error: block status code
3003 * @nr_bytes: number of bytes to complete
3005 * Description:
3006 * Must be called with queue lock held unlike blk_end_request().
3008 * Return:
3009 * %false - we are done with this request
3010 * %true - still buffers pending for this request
3012 bool __blk_end_request(struct request *rq, blk_status_t error,
3013 unsigned int nr_bytes)
3015 lockdep_assert_held(rq->q->queue_lock);
3016 WARN_ON_ONCE(rq->q->mq_ops);
3018 return __blk_end_bidi_request(rq, error, nr_bytes, 0);
3020 EXPORT_SYMBOL(__blk_end_request);
3023 * __blk_end_request_all - Helper function for drives to finish the request.
3024 * @rq: the request to finish
3025 * @error: block status code
3027 * Description:
3028 * Completely finish @rq. Must be called with queue lock held.
3030 void __blk_end_request_all(struct request *rq, blk_status_t error)
3032 bool pending;
3033 unsigned int bidi_bytes = 0;
3035 lockdep_assert_held(rq->q->queue_lock);
3036 WARN_ON_ONCE(rq->q->mq_ops);
3038 if (unlikely(blk_bidi_rq(rq)))
3039 bidi_bytes = blk_rq_bytes(rq->next_rq);
3041 pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
3042 BUG_ON(pending);
3044 EXPORT_SYMBOL(__blk_end_request_all);
3047 * __blk_end_request_cur - Helper function to finish the current request chunk.
3048 * @rq: the request to finish the current chunk for
3049 * @error: block status code
3051 * Description:
3052 * Complete the current consecutively mapped chunk from @rq. Must
3053 * be called with queue lock held.
3055 * Return:
3056 * %false - we are done with this request
3057 * %true - still buffers pending for this request
3059 bool __blk_end_request_cur(struct request *rq, blk_status_t error)
3061 return __blk_end_request(rq, error, blk_rq_cur_bytes(rq));
3063 EXPORT_SYMBOL(__blk_end_request_cur);
3065 void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
3066 struct bio *bio)
3068 if (bio_has_data(bio))
3069 rq->nr_phys_segments = bio_phys_segments(q, bio);
3070 else if (bio_op(bio) == REQ_OP_DISCARD)
3071 rq->nr_phys_segments = 1;
3073 rq->__data_len = bio->bi_iter.bi_size;
3074 rq->bio = rq->biotail = bio;
3076 if (bio->bi_disk)
3077 rq->rq_disk = bio->bi_disk;
3080 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
3082 * rq_flush_dcache_pages - Helper function to flush all pages in a request
3083 * @rq: the request to be flushed
3085 * Description:
3086 * Flush all pages in @rq.
3088 void rq_flush_dcache_pages(struct request *rq)
3090 struct req_iterator iter;
3091 struct bio_vec bvec;
3093 rq_for_each_segment(bvec, rq, iter)
3094 flush_dcache_page(bvec.bv_page);
3096 EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
3097 #endif
3100 * blk_lld_busy - Check if underlying low-level drivers of a device are busy
3101 * @q : the queue of the device being checked
3103 * Description:
3104 * Check if underlying low-level drivers of a device are busy.
3105 * If the drivers want to export their busy state, they must set own
3106 * exporting function using blk_queue_lld_busy() first.
3108 * Basically, this function is used only by request stacking drivers
3109 * to stop dispatching requests to underlying devices when underlying
3110 * devices are busy. This behavior helps more I/O merging on the queue
3111 * of the request stacking driver and prevents I/O throughput regression
3112 * on burst I/O load.
3114 * Return:
3115 * 0 - Not busy (The request stacking driver should dispatch request)
3116 * 1 - Busy (The request stacking driver should stop dispatching request)
3118 int blk_lld_busy(struct request_queue *q)
3120 if (q->lld_busy_fn)
3121 return q->lld_busy_fn(q);
3123 return 0;
3125 EXPORT_SYMBOL_GPL(blk_lld_busy);
3128 * blk_rq_unprep_clone - Helper function to free all bios in a cloned request
3129 * @rq: the clone request to be cleaned up
3131 * Description:
3132 * Free all bios in @rq for a cloned request.
3134 void blk_rq_unprep_clone(struct request *rq)
3136 struct bio *bio;
3138 while ((bio = rq->bio) != NULL) {
3139 rq->bio = bio->bi_next;
3141 bio_put(bio);
3144 EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
3147 * Copy attributes of the original request to the clone request.
3148 * The actual data parts (e.g. ->cmd, ->sense) are not copied.
3150 static void __blk_rq_prep_clone(struct request *dst, struct request *src)
3152 dst->cpu = src->cpu;
3153 dst->__sector = blk_rq_pos(src);
3154 dst->__data_len = blk_rq_bytes(src);
3155 if (src->rq_flags & RQF_SPECIAL_PAYLOAD) {
3156 dst->rq_flags |= RQF_SPECIAL_PAYLOAD;
3157 dst->special_vec = src->special_vec;
3159 dst->nr_phys_segments = src->nr_phys_segments;
3160 dst->ioprio = src->ioprio;
3161 dst->extra_len = src->extra_len;
3165 * blk_rq_prep_clone - Helper function to setup clone request
3166 * @rq: the request to be setup
3167 * @rq_src: original request to be cloned
3168 * @bs: bio_set that bios for clone are allocated from
3169 * @gfp_mask: memory allocation mask for bio
3170 * @bio_ctr: setup function to be called for each clone bio.
3171 * Returns %0 for success, non %0 for failure.
3172 * @data: private data to be passed to @bio_ctr
3174 * Description:
3175 * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
3176 * The actual data parts of @rq_src (e.g. ->cmd, ->sense)
3177 * are not copied, and copying such parts is the caller's responsibility.
3178 * Also, pages which the original bios are pointing to are not copied
3179 * and the cloned bios just point same pages.
3180 * So cloned bios must be completed before original bios, which means
3181 * the caller must complete @rq before @rq_src.
3183 int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
3184 struct bio_set *bs, gfp_t gfp_mask,
3185 int (*bio_ctr)(struct bio *, struct bio *, void *),
3186 void *data)
3188 struct bio *bio, *bio_src;
3190 if (!bs)
3191 bs = fs_bio_set;
3193 __rq_for_each_bio(bio_src, rq_src) {
3194 bio = bio_clone_fast(bio_src, gfp_mask, bs);
3195 if (!bio)
3196 goto free_and_out;
3198 if (bio_ctr && bio_ctr(bio, bio_src, data))
3199 goto free_and_out;
3201 if (rq->bio) {
3202 rq->biotail->bi_next = bio;
3203 rq->biotail = bio;
3204 } else
3205 rq->bio = rq->biotail = bio;
3208 __blk_rq_prep_clone(rq, rq_src);
3210 return 0;
3212 free_and_out:
3213 if (bio)
3214 bio_put(bio);
3215 blk_rq_unprep_clone(rq);
3217 return -ENOMEM;
3219 EXPORT_SYMBOL_GPL(blk_rq_prep_clone);
3221 int kblockd_schedule_work(struct work_struct *work)
3223 return queue_work(kblockd_workqueue, work);
3225 EXPORT_SYMBOL(kblockd_schedule_work);
3227 int kblockd_schedule_work_on(int cpu, struct work_struct *work)
3229 return queue_work_on(cpu, kblockd_workqueue, work);
3231 EXPORT_SYMBOL(kblockd_schedule_work_on);
3233 int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork,
3234 unsigned long delay)
3236 return mod_delayed_work_on(cpu, kblockd_workqueue, dwork, delay);
3238 EXPORT_SYMBOL(kblockd_mod_delayed_work_on);
3240 int kblockd_schedule_delayed_work(struct delayed_work *dwork,
3241 unsigned long delay)
3243 return queue_delayed_work(kblockd_workqueue, dwork, delay);
3245 EXPORT_SYMBOL(kblockd_schedule_delayed_work);
3247 int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
3248 unsigned long delay)
3250 return queue_delayed_work_on(cpu, kblockd_workqueue, dwork, delay);
3252 EXPORT_SYMBOL(kblockd_schedule_delayed_work_on);
3255 * blk_start_plug - initialize blk_plug and track it inside the task_struct
3256 * @plug: The &struct blk_plug that needs to be initialized
3258 * Description:
3259 * Tracking blk_plug inside the task_struct will help with auto-flushing the
3260 * pending I/O should the task end up blocking between blk_start_plug() and
3261 * blk_finish_plug(). This is important from a performance perspective, but
3262 * also ensures that we don't deadlock. For instance, if the task is blocking
3263 * for a memory allocation, memory reclaim could end up wanting to free a
3264 * page belonging to that request that is currently residing in our private
3265 * plug. By flushing the pending I/O when the process goes to sleep, we avoid
3266 * this kind of deadlock.
3268 void blk_start_plug(struct blk_plug *plug)
3270 struct task_struct *tsk = current;
3273 * If this is a nested plug, don't actually assign it.
3275 if (tsk->plug)
3276 return;
3278 INIT_LIST_HEAD(&plug->list);
3279 INIT_LIST_HEAD(&plug->mq_list);
3280 INIT_LIST_HEAD(&plug->cb_list);
3282 * Store ordering should not be needed here, since a potential
3283 * preempt will imply a full memory barrier
3285 tsk->plug = plug;
3287 EXPORT_SYMBOL(blk_start_plug);
3289 static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
3291 struct request *rqa = container_of(a, struct request, queuelist);
3292 struct request *rqb = container_of(b, struct request, queuelist);
3294 return !(rqa->q < rqb->q ||
3295 (rqa->q == rqb->q && blk_rq_pos(rqa) < blk_rq_pos(rqb)));
3299 * If 'from_schedule' is true, then postpone the dispatch of requests
3300 * until a safe kblockd context. We due this to avoid accidental big
3301 * additional stack usage in driver dispatch, in places where the originally
3302 * plugger did not intend it.
3304 static void queue_unplugged(struct request_queue *q, unsigned int depth,
3305 bool from_schedule)
3306 __releases(q->queue_lock)
3308 lockdep_assert_held(q->queue_lock);
3310 trace_block_unplug(q, depth, !from_schedule);
3312 if (from_schedule)
3313 blk_run_queue_async(q);
3314 else
3315 __blk_run_queue(q);
3316 spin_unlock(q->queue_lock);
3319 static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
3321 LIST_HEAD(callbacks);
3323 while (!list_empty(&plug->cb_list)) {
3324 list_splice_init(&plug->cb_list, &callbacks);
3326 while (!list_empty(&callbacks)) {
3327 struct blk_plug_cb *cb = list_first_entry(&callbacks,
3328 struct blk_plug_cb,
3329 list);
3330 list_del(&cb->list);
3331 cb->callback(cb, from_schedule);
3336 struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data,
3337 int size)
3339 struct blk_plug *plug = current->plug;
3340 struct blk_plug_cb *cb;
3342 if (!plug)
3343 return NULL;
3345 list_for_each_entry(cb, &plug->cb_list, list)
3346 if (cb->callback == unplug && cb->data == data)
3347 return cb;
3349 /* Not currently on the callback list */
3350 BUG_ON(size < sizeof(*cb));
3351 cb = kzalloc(size, GFP_ATOMIC);
3352 if (cb) {
3353 cb->data = data;
3354 cb->callback = unplug;
3355 list_add(&cb->list, &plug->cb_list);
3357 return cb;
3359 EXPORT_SYMBOL(blk_check_plugged);
3361 void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
3363 struct request_queue *q;
3364 unsigned long flags;
3365 struct request *rq;
3366 LIST_HEAD(list);
3367 unsigned int depth;
3369 flush_plug_callbacks(plug, from_schedule);
3371 if (!list_empty(&plug->mq_list))
3372 blk_mq_flush_plug_list(plug, from_schedule);
3374 if (list_empty(&plug->list))
3375 return;
3377 list_splice_init(&plug->list, &list);
3379 list_sort(NULL, &list, plug_rq_cmp);
3381 q = NULL;
3382 depth = 0;
3385 * Save and disable interrupts here, to avoid doing it for every
3386 * queue lock we have to take.
3388 local_irq_save(flags);
3389 while (!list_empty(&list)) {
3390 rq = list_entry_rq(list.next);
3391 list_del_init(&rq->queuelist);
3392 BUG_ON(!rq->q);
3393 if (rq->q != q) {
3395 * This drops the queue lock
3397 if (q)
3398 queue_unplugged(q, depth, from_schedule);
3399 q = rq->q;
3400 depth = 0;
3401 spin_lock(q->queue_lock);
3405 * Short-circuit if @q is dead
3407 if (unlikely(blk_queue_dying(q))) {
3408 __blk_end_request_all(rq, BLK_STS_IOERR);
3409 continue;
3413 * rq is already accounted, so use raw insert
3415 if (op_is_flush(rq->cmd_flags))
3416 __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH);
3417 else
3418 __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE);
3420 depth++;
3424 * This drops the queue lock
3426 if (q)
3427 queue_unplugged(q, depth, from_schedule);
3429 local_irq_restore(flags);
3432 void blk_finish_plug(struct blk_plug *plug)
3434 if (plug != current->plug)
3435 return;
3436 blk_flush_plug_list(plug, false);
3438 current->plug = NULL;
3440 EXPORT_SYMBOL(blk_finish_plug);
3442 #ifdef CONFIG_PM
3444 * blk_pm_runtime_init - Block layer runtime PM initialization routine
3445 * @q: the queue of the device
3446 * @dev: the device the queue belongs to
3448 * Description:
3449 * Initialize runtime-PM-related fields for @q and start auto suspend for
3450 * @dev. Drivers that want to take advantage of request-based runtime PM
3451 * should call this function after @dev has been initialized, and its
3452 * request queue @q has been allocated, and runtime PM for it can not happen
3453 * yet(either due to disabled/forbidden or its usage_count > 0). In most
3454 * cases, driver should call this function before any I/O has taken place.
3456 * This function takes care of setting up using auto suspend for the device,
3457 * the autosuspend delay is set to -1 to make runtime suspend impossible
3458 * until an updated value is either set by user or by driver. Drivers do
3459 * not need to touch other autosuspend settings.
3461 * The block layer runtime PM is request based, so only works for drivers
3462 * that use request as their IO unit instead of those directly use bio's.
3464 void blk_pm_runtime_init(struct request_queue *q, struct device *dev)
3466 /* Don't enable runtime PM for blk-mq until it is ready */
3467 if (q->mq_ops) {
3468 pm_runtime_disable(dev);
3469 return;
3472 q->dev = dev;
3473 q->rpm_status = RPM_ACTIVE;
3474 pm_runtime_set_autosuspend_delay(q->dev, -1);
3475 pm_runtime_use_autosuspend(q->dev);
3477 EXPORT_SYMBOL(blk_pm_runtime_init);
3480 * blk_pre_runtime_suspend - Pre runtime suspend check
3481 * @q: the queue of the device
3483 * Description:
3484 * This function will check if runtime suspend is allowed for the device
3485 * by examining if there are any requests pending in the queue. If there
3486 * are requests pending, the device can not be runtime suspended; otherwise,
3487 * the queue's status will be updated to SUSPENDING and the driver can
3488 * proceed to suspend the device.
3490 * For the not allowed case, we mark last busy for the device so that
3491 * runtime PM core will try to autosuspend it some time later.
3493 * This function should be called near the start of the device's
3494 * runtime_suspend callback.
3496 * Return:
3497 * 0 - OK to runtime suspend the device
3498 * -EBUSY - Device should not be runtime suspended
3500 int blk_pre_runtime_suspend(struct request_queue *q)
3502 int ret = 0;
3504 if (!q->dev)
3505 return ret;
3507 spin_lock_irq(q->queue_lock);
3508 if (q->nr_pending) {
3509 ret = -EBUSY;
3510 pm_runtime_mark_last_busy(q->dev);
3511 } else {
3512 q->rpm_status = RPM_SUSPENDING;
3514 spin_unlock_irq(q->queue_lock);
3515 return ret;
3517 EXPORT_SYMBOL(blk_pre_runtime_suspend);
3520 * blk_post_runtime_suspend - Post runtime suspend processing
3521 * @q: the queue of the device
3522 * @err: return value of the device's runtime_suspend function
3524 * Description:
3525 * Update the queue's runtime status according to the return value of the
3526 * device's runtime suspend function and mark last busy for the device so
3527 * that PM core will try to auto suspend the device at a later time.
3529 * This function should be called near the end of the device's
3530 * runtime_suspend callback.
3532 void blk_post_runtime_suspend(struct request_queue *q, int err)
3534 if (!q->dev)
3535 return;
3537 spin_lock_irq(q->queue_lock);
3538 if (!err) {
3539 q->rpm_status = RPM_SUSPENDED;
3540 } else {
3541 q->rpm_status = RPM_ACTIVE;
3542 pm_runtime_mark_last_busy(q->dev);
3544 spin_unlock_irq(q->queue_lock);
3546 EXPORT_SYMBOL(blk_post_runtime_suspend);
3549 * blk_pre_runtime_resume - Pre runtime resume processing
3550 * @q: the queue of the device
3552 * Description:
3553 * Update the queue's runtime status to RESUMING in preparation for the
3554 * runtime resume of the device.
3556 * This function should be called near the start of the device's
3557 * runtime_resume callback.
3559 void blk_pre_runtime_resume(struct request_queue *q)
3561 if (!q->dev)
3562 return;
3564 spin_lock_irq(q->queue_lock);
3565 q->rpm_status = RPM_RESUMING;
3566 spin_unlock_irq(q->queue_lock);
3568 EXPORT_SYMBOL(blk_pre_runtime_resume);
3571 * blk_post_runtime_resume - Post runtime resume processing
3572 * @q: the queue of the device
3573 * @err: return value of the device's runtime_resume function
3575 * Description:
3576 * Update the queue's runtime status according to the return value of the
3577 * device's runtime_resume function. If it is successfully resumed, process
3578 * the requests that are queued into the device's queue when it is resuming
3579 * and then mark last busy and initiate autosuspend for it.
3581 * This function should be called near the end of the device's
3582 * runtime_resume callback.
3584 void blk_post_runtime_resume(struct request_queue *q, int err)
3586 if (!q->dev)
3587 return;
3589 spin_lock_irq(q->queue_lock);
3590 if (!err) {
3591 q->rpm_status = RPM_ACTIVE;
3592 __blk_run_queue(q);
3593 pm_runtime_mark_last_busy(q->dev);
3594 pm_request_autosuspend(q->dev);
3595 } else {
3596 q->rpm_status = RPM_SUSPENDED;
3598 spin_unlock_irq(q->queue_lock);
3600 EXPORT_SYMBOL(blk_post_runtime_resume);
3603 * blk_set_runtime_active - Force runtime status of the queue to be active
3604 * @q: the queue of the device
3606 * If the device is left runtime suspended during system suspend the resume
3607 * hook typically resumes the device and corrects runtime status
3608 * accordingly. However, that does not affect the queue runtime PM status
3609 * which is still "suspended". This prevents processing requests from the
3610 * queue.
3612 * This function can be used in driver's resume hook to correct queue
3613 * runtime PM status and re-enable peeking requests from the queue. It
3614 * should be called before first request is added to the queue.
3616 void blk_set_runtime_active(struct request_queue *q)
3618 spin_lock_irq(q->queue_lock);
3619 q->rpm_status = RPM_ACTIVE;
3620 pm_runtime_mark_last_busy(q->dev);
3621 pm_request_autosuspend(q->dev);
3622 spin_unlock_irq(q->queue_lock);
3624 EXPORT_SYMBOL(blk_set_runtime_active);
3625 #endif
3627 int __init blk_dev_init(void)
3629 BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS));
3630 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
3631 FIELD_SIZEOF(struct request, cmd_flags));
3632 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
3633 FIELD_SIZEOF(struct bio, bi_opf));
3635 /* used for unplugging and affects IO latency/throughput - HIGHPRI */
3636 kblockd_workqueue = alloc_workqueue("kblockd",
3637 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
3638 if (!kblockd_workqueue)
3639 panic("Failed to create kblockd\n");
3641 request_cachep = kmem_cache_create("blkdev_requests",
3642 sizeof(struct request), 0, SLAB_PANIC, NULL);
3644 blk_requestq_cachep = kmem_cache_create("request_queue",
3645 sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
3647 #ifdef CONFIG_DEBUG_FS
3648 blk_debugfs_root = debugfs_create_dir("block", NULL);
3649 #endif
3651 return 0;