2 * Block device elevator/IO-scheduler.
4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
6 * 30042000 Jens Axboe <axboe@kernel.dk> :
8 * Split the elevator a bit so that it is possible to choose a different
9 * one or even write a new "plug in". There are three pieces:
10 * - elevator_fn, inserts a new request in the queue list
11 * - elevator_merge_fn, decides whether a new buffer can be merged with
13 * - elevator_dequeue_fn, called when a request is taken off the active list
15 * 20082000 Dave Jones <davej@suse.de> :
16 * Removed tests for max-bomb-segments, which was breaking elvtune
17 * when run without -bN
20 * - Rework again to work with bio instead of buffer_heads
21 * - loose bi_dev comparisons, partition handling is right now
22 * - completely modularize elevator setup and teardown
25 #include <linux/kernel.h>
27 #include <linux/blkdev.h>
28 #include <linux/elevator.h>
29 #include <linux/bio.h>
30 #include <linux/module.h>
31 #include <linux/slab.h>
32 #include <linux/init.h>
33 #include <linux/compiler.h>
34 #include <linux/blktrace_api.h>
35 #include <linux/hash.h>
36 #include <linux/uaccess.h>
37 #include <linux/pm_runtime.h>
38 #include <linux/blk-cgroup.h>
40 #include <trace/events/block.h>
43 #include "blk-mq-sched.h"
47 static DEFINE_SPINLOCK(elv_list_lock
);
48 static LIST_HEAD(elv_list
);
53 #define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
56 * Query io scheduler to see if the current process issuing bio may be
59 static int elv_iosched_allow_bio_merge(struct request
*rq
, struct bio
*bio
)
61 struct request_queue
*q
= rq
->q
;
62 struct elevator_queue
*e
= q
->elevator
;
64 if (e
->uses_mq
&& e
->type
->ops
.mq
.allow_merge
)
65 return e
->type
->ops
.mq
.allow_merge(q
, rq
, bio
);
66 else if (!e
->uses_mq
&& e
->type
->ops
.sq
.elevator_allow_bio_merge_fn
)
67 return e
->type
->ops
.sq
.elevator_allow_bio_merge_fn(q
, rq
, bio
);
73 * can we safely merge with this request?
75 bool elv_bio_merge_ok(struct request
*rq
, struct bio
*bio
)
77 if (!blk_rq_merge_ok(rq
, bio
))
80 if (!elv_iosched_allow_bio_merge(rq
, bio
))
85 EXPORT_SYMBOL(elv_bio_merge_ok
);
87 static bool elevator_match(const struct elevator_type
*e
, const char *name
)
89 if (!strcmp(e
->elevator_name
, name
))
91 if (e
->elevator_alias
&& !strcmp(e
->elevator_alias
, name
))
98 * Return scheduler with name 'name' and with matching 'mq capability
100 static struct elevator_type
*elevator_find(const char *name
, bool mq
)
102 struct elevator_type
*e
;
104 list_for_each_entry(e
, &elv_list
, list
) {
105 if (elevator_match(e
, name
) && (mq
== e
->uses_mq
))
112 static void elevator_put(struct elevator_type
*e
)
114 module_put(e
->elevator_owner
);
117 static struct elevator_type
*elevator_get(struct request_queue
*q
,
118 const char *name
, bool try_loading
)
120 struct elevator_type
*e
;
122 spin_lock(&elv_list_lock
);
124 e
= elevator_find(name
, q
->mq_ops
!= NULL
);
125 if (!e
&& try_loading
) {
126 spin_unlock(&elv_list_lock
);
127 request_module("%s-iosched", name
);
128 spin_lock(&elv_list_lock
);
129 e
= elevator_find(name
, q
->mq_ops
!= NULL
);
132 if (e
&& !try_module_get(e
->elevator_owner
))
135 spin_unlock(&elv_list_lock
);
139 static char chosen_elevator
[ELV_NAME_MAX
];
141 static int __init
elevator_setup(char *str
)
144 * Be backwards-compatible with previous kernels, so users
145 * won't get the wrong elevator.
147 strncpy(chosen_elevator
, str
, sizeof(chosen_elevator
) - 1);
151 __setup("elevator=", elevator_setup
);
153 /* called during boot to load the elevator chosen by the elevator param */
154 void __init
load_default_elevator_module(void)
156 struct elevator_type
*e
;
158 if (!chosen_elevator
[0])
162 * Boot parameter is deprecated, we haven't supported that for MQ.
163 * Only look for non-mq schedulers from here.
165 spin_lock(&elv_list_lock
);
166 e
= elevator_find(chosen_elevator
, false);
167 spin_unlock(&elv_list_lock
);
170 request_module("%s-iosched", chosen_elevator
);
173 static struct kobj_type elv_ktype
;
175 struct elevator_queue
*elevator_alloc(struct request_queue
*q
,
176 struct elevator_type
*e
)
178 struct elevator_queue
*eq
;
180 eq
= kzalloc_node(sizeof(*eq
), GFP_KERNEL
, q
->node
);
185 kobject_init(&eq
->kobj
, &elv_ktype
);
186 mutex_init(&eq
->sysfs_lock
);
188 eq
->uses_mq
= e
->uses_mq
;
192 EXPORT_SYMBOL(elevator_alloc
);
194 static void elevator_release(struct kobject
*kobj
)
196 struct elevator_queue
*e
;
198 e
= container_of(kobj
, struct elevator_queue
, kobj
);
199 elevator_put(e
->type
);
204 * Use the default elevator specified by config boot param for non-mq devices,
205 * or by config option. Don't try to load modules as we could be running off
206 * async and request_module() isn't allowed from async.
208 int elevator_init(struct request_queue
*q
)
210 struct elevator_type
*e
= NULL
;
214 * q->sysfs_lock must be held to provide mutual exclusion between
215 * elevator_switch() and here.
217 mutex_lock(&q
->sysfs_lock
);
218 if (unlikely(q
->elevator
))
221 if (*chosen_elevator
) {
222 e
= elevator_get(q
, chosen_elevator
, false);
224 printk(KERN_ERR
"I/O scheduler %s not found\n",
229 e
= elevator_get(q
, CONFIG_DEFAULT_IOSCHED
, false);
232 "Default I/O scheduler not found. Using noop.\n");
233 e
= elevator_get(q
, "noop", false);
236 err
= e
->ops
.sq
.elevator_init_fn(q
, e
);
240 mutex_unlock(&q
->sysfs_lock
);
244 void elevator_exit(struct request_queue
*q
, struct elevator_queue
*e
)
246 mutex_lock(&e
->sysfs_lock
);
247 if (e
->uses_mq
&& e
->type
->ops
.mq
.exit_sched
)
248 blk_mq_exit_sched(q
, e
);
249 else if (!e
->uses_mq
&& e
->type
->ops
.sq
.elevator_exit_fn
)
250 e
->type
->ops
.sq
.elevator_exit_fn(e
);
251 mutex_unlock(&e
->sysfs_lock
);
253 kobject_put(&e
->kobj
);
256 static inline void __elv_rqhash_del(struct request
*rq
)
259 rq
->rq_flags
&= ~RQF_HASHED
;
262 void elv_rqhash_del(struct request_queue
*q
, struct request
*rq
)
265 __elv_rqhash_del(rq
);
267 EXPORT_SYMBOL_GPL(elv_rqhash_del
);
269 void elv_rqhash_add(struct request_queue
*q
, struct request
*rq
)
271 struct elevator_queue
*e
= q
->elevator
;
273 BUG_ON(ELV_ON_HASH(rq
));
274 hash_add(e
->hash
, &rq
->hash
, rq_hash_key(rq
));
275 rq
->rq_flags
|= RQF_HASHED
;
277 EXPORT_SYMBOL_GPL(elv_rqhash_add
);
279 void elv_rqhash_reposition(struct request_queue
*q
, struct request
*rq
)
281 __elv_rqhash_del(rq
);
282 elv_rqhash_add(q
, rq
);
285 struct request
*elv_rqhash_find(struct request_queue
*q
, sector_t offset
)
287 struct elevator_queue
*e
= q
->elevator
;
288 struct hlist_node
*next
;
291 hash_for_each_possible_safe(e
->hash
, rq
, next
, hash
, offset
) {
292 BUG_ON(!ELV_ON_HASH(rq
));
294 if (unlikely(!rq_mergeable(rq
))) {
295 __elv_rqhash_del(rq
);
299 if (rq_hash_key(rq
) == offset
)
307 * RB-tree support functions for inserting/lookup/removal of requests
308 * in a sorted RB tree.
310 void elv_rb_add(struct rb_root
*root
, struct request
*rq
)
312 struct rb_node
**p
= &root
->rb_node
;
313 struct rb_node
*parent
= NULL
;
314 struct request
*__rq
;
318 __rq
= rb_entry(parent
, struct request
, rb_node
);
320 if (blk_rq_pos(rq
) < blk_rq_pos(__rq
))
322 else if (blk_rq_pos(rq
) >= blk_rq_pos(__rq
))
326 rb_link_node(&rq
->rb_node
, parent
, p
);
327 rb_insert_color(&rq
->rb_node
, root
);
329 EXPORT_SYMBOL(elv_rb_add
);
331 void elv_rb_del(struct rb_root
*root
, struct request
*rq
)
333 BUG_ON(RB_EMPTY_NODE(&rq
->rb_node
));
334 rb_erase(&rq
->rb_node
, root
);
335 RB_CLEAR_NODE(&rq
->rb_node
);
337 EXPORT_SYMBOL(elv_rb_del
);
339 struct request
*elv_rb_find(struct rb_root
*root
, sector_t sector
)
341 struct rb_node
*n
= root
->rb_node
;
345 rq
= rb_entry(n
, struct request
, rb_node
);
347 if (sector
< blk_rq_pos(rq
))
349 else if (sector
> blk_rq_pos(rq
))
357 EXPORT_SYMBOL(elv_rb_find
);
360 * Insert rq into dispatch queue of q. Queue lock must be held on
361 * entry. rq is sort instead into the dispatch queue. To be used by
362 * specific elevators.
364 void elv_dispatch_sort(struct request_queue
*q
, struct request
*rq
)
367 struct list_head
*entry
;
369 if (q
->last_merge
== rq
)
370 q
->last_merge
= NULL
;
372 elv_rqhash_del(q
, rq
);
376 boundary
= q
->end_sector
;
377 list_for_each_prev(entry
, &q
->queue_head
) {
378 struct request
*pos
= list_entry_rq(entry
);
380 if (req_op(rq
) != req_op(pos
))
382 if (rq_data_dir(rq
) != rq_data_dir(pos
))
384 if (pos
->rq_flags
& (RQF_STARTED
| RQF_SOFTBARRIER
))
386 if (blk_rq_pos(rq
) >= boundary
) {
387 if (blk_rq_pos(pos
) < boundary
)
390 if (blk_rq_pos(pos
) >= boundary
)
393 if (blk_rq_pos(rq
) >= blk_rq_pos(pos
))
397 list_add(&rq
->queuelist
, entry
);
399 EXPORT_SYMBOL(elv_dispatch_sort
);
402 * Insert rq into dispatch queue of q. Queue lock must be held on
403 * entry. rq is added to the back of the dispatch queue. To be used by
404 * specific elevators.
406 void elv_dispatch_add_tail(struct request_queue
*q
, struct request
*rq
)
408 if (q
->last_merge
== rq
)
409 q
->last_merge
= NULL
;
411 elv_rqhash_del(q
, rq
);
415 q
->end_sector
= rq_end_sector(rq
);
417 list_add_tail(&rq
->queuelist
, &q
->queue_head
);
419 EXPORT_SYMBOL(elv_dispatch_add_tail
);
421 enum elv_merge
elv_merge(struct request_queue
*q
, struct request
**req
,
424 struct elevator_queue
*e
= q
->elevator
;
425 struct request
*__rq
;
429 * nomerges: No merges at all attempted
430 * noxmerges: Only simple one-hit cache try
431 * merges: All merge tries attempted
433 if (blk_queue_nomerges(q
) || !bio_mergeable(bio
))
434 return ELEVATOR_NO_MERGE
;
437 * First try one-hit cache.
439 if (q
->last_merge
&& elv_bio_merge_ok(q
->last_merge
, bio
)) {
440 enum elv_merge ret
= blk_try_merge(q
->last_merge
, bio
);
442 if (ret
!= ELEVATOR_NO_MERGE
) {
443 *req
= q
->last_merge
;
448 if (blk_queue_noxmerges(q
))
449 return ELEVATOR_NO_MERGE
;
452 * See if our hash lookup can find a potential backmerge.
454 __rq
= elv_rqhash_find(q
, bio
->bi_iter
.bi_sector
);
455 if (__rq
&& elv_bio_merge_ok(__rq
, bio
)) {
457 return ELEVATOR_BACK_MERGE
;
460 if (e
->uses_mq
&& e
->type
->ops
.mq
.request_merge
)
461 return e
->type
->ops
.mq
.request_merge(q
, req
, bio
);
462 else if (!e
->uses_mq
&& e
->type
->ops
.sq
.elevator_merge_fn
)
463 return e
->type
->ops
.sq
.elevator_merge_fn(q
, req
, bio
);
465 return ELEVATOR_NO_MERGE
;
469 * Attempt to do an insertion back merge. Only check for the case where
470 * we can append 'rq' to an existing request, so we can throw 'rq' away
473 * Returns true if we merged, false otherwise
475 bool elv_attempt_insert_merge(struct request_queue
*q
, struct request
*rq
)
477 struct request
*__rq
;
480 if (blk_queue_nomerges(q
))
484 * First try one-hit cache.
486 if (q
->last_merge
&& blk_attempt_req_merge(q
, q
->last_merge
, rq
))
489 if (blk_queue_noxmerges(q
))
494 * See if our hash lookup can find a potential backmerge.
497 __rq
= elv_rqhash_find(q
, blk_rq_pos(rq
));
498 if (!__rq
|| !blk_attempt_req_merge(q
, __rq
, rq
))
501 /* The merged request could be merged with others, try again */
509 void elv_merged_request(struct request_queue
*q
, struct request
*rq
,
512 struct elevator_queue
*e
= q
->elevator
;
514 if (e
->uses_mq
&& e
->type
->ops
.mq
.request_merged
)
515 e
->type
->ops
.mq
.request_merged(q
, rq
, type
);
516 else if (!e
->uses_mq
&& e
->type
->ops
.sq
.elevator_merged_fn
)
517 e
->type
->ops
.sq
.elevator_merged_fn(q
, rq
, type
);
519 if (type
== ELEVATOR_BACK_MERGE
)
520 elv_rqhash_reposition(q
, rq
);
525 void elv_merge_requests(struct request_queue
*q
, struct request
*rq
,
526 struct request
*next
)
528 struct elevator_queue
*e
= q
->elevator
;
529 bool next_sorted
= false;
531 if (e
->uses_mq
&& e
->type
->ops
.mq
.requests_merged
)
532 e
->type
->ops
.mq
.requests_merged(q
, rq
, next
);
533 else if (e
->type
->ops
.sq
.elevator_merge_req_fn
) {
534 next_sorted
= (__force
bool)(next
->rq_flags
& RQF_SORTED
);
536 e
->type
->ops
.sq
.elevator_merge_req_fn(q
, rq
, next
);
539 elv_rqhash_reposition(q
, rq
);
542 elv_rqhash_del(q
, next
);
549 void elv_bio_merged(struct request_queue
*q
, struct request
*rq
,
552 struct elevator_queue
*e
= q
->elevator
;
554 if (WARN_ON_ONCE(e
->uses_mq
))
557 if (e
->type
->ops
.sq
.elevator_bio_merged_fn
)
558 e
->type
->ops
.sq
.elevator_bio_merged_fn(q
, rq
, bio
);
561 void elv_requeue_request(struct request_queue
*q
, struct request
*rq
)
564 * it already went through dequeue, we need to decrement the
565 * in_flight count again
567 if (blk_account_rq(rq
)) {
568 q
->in_flight
[rq_is_sync(rq
)]--;
569 if (rq
->rq_flags
& RQF_SORTED
)
570 elv_deactivate_rq(q
, rq
);
573 rq
->rq_flags
&= ~RQF_STARTED
;
575 blk_pm_requeue_request(rq
);
577 __elv_add_request(q
, rq
, ELEVATOR_INSERT_REQUEUE
);
580 void elv_drain_elevator(struct request_queue
*q
)
582 struct elevator_queue
*e
= q
->elevator
;
585 if (WARN_ON_ONCE(e
->uses_mq
))
588 lockdep_assert_held(q
->queue_lock
);
590 while (e
->type
->ops
.sq
.elevator_dispatch_fn(q
, 1))
592 if (q
->nr_sorted
&& !blk_queue_is_zoned(q
) && printed
++ < 10 ) {
593 printk(KERN_ERR
"%s: forced dispatching is broken "
594 "(nr_sorted=%u), please report this\n",
595 q
->elevator
->type
->elevator_name
, q
->nr_sorted
);
599 void __elv_add_request(struct request_queue
*q
, struct request
*rq
, int where
)
601 trace_block_rq_insert(q
, rq
);
603 blk_pm_add_request(q
, rq
);
607 if (rq
->rq_flags
& RQF_SOFTBARRIER
) {
608 /* barriers are scheduling boundary, update end_sector */
609 if (!blk_rq_is_passthrough(rq
)) {
610 q
->end_sector
= rq_end_sector(rq
);
613 } else if (!(rq
->rq_flags
& RQF_ELVPRIV
) &&
614 (where
== ELEVATOR_INSERT_SORT
||
615 where
== ELEVATOR_INSERT_SORT_MERGE
))
616 where
= ELEVATOR_INSERT_BACK
;
619 case ELEVATOR_INSERT_REQUEUE
:
620 case ELEVATOR_INSERT_FRONT
:
621 rq
->rq_flags
|= RQF_SOFTBARRIER
;
622 list_add(&rq
->queuelist
, &q
->queue_head
);
625 case ELEVATOR_INSERT_BACK
:
626 rq
->rq_flags
|= RQF_SOFTBARRIER
;
627 elv_drain_elevator(q
);
628 list_add_tail(&rq
->queuelist
, &q
->queue_head
);
630 * We kick the queue here for the following reasons.
631 * - The elevator might have returned NULL previously
632 * to delay requests and returned them now. As the
633 * queue wasn't empty before this request, ll_rw_blk
634 * won't run the queue on return, resulting in hang.
635 * - Usually, back inserted requests won't be merged
636 * with anything. There's no point in delaying queue
642 case ELEVATOR_INSERT_SORT_MERGE
:
644 * If we succeed in merging this request with one in the
645 * queue already, we are done - rq has now been freed,
646 * so no need to do anything further.
648 if (elv_attempt_insert_merge(q
, rq
))
651 case ELEVATOR_INSERT_SORT
:
652 BUG_ON(blk_rq_is_passthrough(rq
));
653 rq
->rq_flags
|= RQF_SORTED
;
655 if (rq_mergeable(rq
)) {
656 elv_rqhash_add(q
, rq
);
662 * Some ioscheds (cfq) run q->request_fn directly, so
663 * rq cannot be accessed after calling
664 * elevator_add_req_fn.
666 q
->elevator
->type
->ops
.sq
.elevator_add_req_fn(q
, rq
);
669 case ELEVATOR_INSERT_FLUSH
:
670 rq
->rq_flags
|= RQF_SOFTBARRIER
;
671 blk_insert_flush(rq
);
674 printk(KERN_ERR
"%s: bad insertion point %d\n",
679 EXPORT_SYMBOL(__elv_add_request
);
681 void elv_add_request(struct request_queue
*q
, struct request
*rq
, int where
)
685 spin_lock_irqsave(q
->queue_lock
, flags
);
686 __elv_add_request(q
, rq
, where
);
687 spin_unlock_irqrestore(q
->queue_lock
, flags
);
689 EXPORT_SYMBOL(elv_add_request
);
691 struct request
*elv_latter_request(struct request_queue
*q
, struct request
*rq
)
693 struct elevator_queue
*e
= q
->elevator
;
695 if (e
->uses_mq
&& e
->type
->ops
.mq
.next_request
)
696 return e
->type
->ops
.mq
.next_request(q
, rq
);
697 else if (!e
->uses_mq
&& e
->type
->ops
.sq
.elevator_latter_req_fn
)
698 return e
->type
->ops
.sq
.elevator_latter_req_fn(q
, rq
);
703 struct request
*elv_former_request(struct request_queue
*q
, struct request
*rq
)
705 struct elevator_queue
*e
= q
->elevator
;
707 if (e
->uses_mq
&& e
->type
->ops
.mq
.former_request
)
708 return e
->type
->ops
.mq
.former_request(q
, rq
);
709 if (!e
->uses_mq
&& e
->type
->ops
.sq
.elevator_former_req_fn
)
710 return e
->type
->ops
.sq
.elevator_former_req_fn(q
, rq
);
714 int elv_set_request(struct request_queue
*q
, struct request
*rq
,
715 struct bio
*bio
, gfp_t gfp_mask
)
717 struct elevator_queue
*e
= q
->elevator
;
719 if (WARN_ON_ONCE(e
->uses_mq
))
722 if (e
->type
->ops
.sq
.elevator_set_req_fn
)
723 return e
->type
->ops
.sq
.elevator_set_req_fn(q
, rq
, bio
, gfp_mask
);
727 void elv_put_request(struct request_queue
*q
, struct request
*rq
)
729 struct elevator_queue
*e
= q
->elevator
;
731 if (WARN_ON_ONCE(e
->uses_mq
))
734 if (e
->type
->ops
.sq
.elevator_put_req_fn
)
735 e
->type
->ops
.sq
.elevator_put_req_fn(rq
);
738 int elv_may_queue(struct request_queue
*q
, unsigned int op
)
740 struct elevator_queue
*e
= q
->elevator
;
742 if (WARN_ON_ONCE(e
->uses_mq
))
745 if (e
->type
->ops
.sq
.elevator_may_queue_fn
)
746 return e
->type
->ops
.sq
.elevator_may_queue_fn(q
, op
);
748 return ELV_MQUEUE_MAY
;
751 void elv_completed_request(struct request_queue
*q
, struct request
*rq
)
753 struct elevator_queue
*e
= q
->elevator
;
755 if (WARN_ON_ONCE(e
->uses_mq
))
759 * request is released from the driver, io must be done
761 if (blk_account_rq(rq
)) {
762 q
->in_flight
[rq_is_sync(rq
)]--;
763 if ((rq
->rq_flags
& RQF_SORTED
) &&
764 e
->type
->ops
.sq
.elevator_completed_req_fn
)
765 e
->type
->ops
.sq
.elevator_completed_req_fn(q
, rq
);
769 #define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
772 elv_attr_show(struct kobject
*kobj
, struct attribute
*attr
, char *page
)
774 struct elv_fs_entry
*entry
= to_elv(attr
);
775 struct elevator_queue
*e
;
781 e
= container_of(kobj
, struct elevator_queue
, kobj
);
782 mutex_lock(&e
->sysfs_lock
);
783 error
= e
->type
? entry
->show(e
, page
) : -ENOENT
;
784 mutex_unlock(&e
->sysfs_lock
);
789 elv_attr_store(struct kobject
*kobj
, struct attribute
*attr
,
790 const char *page
, size_t length
)
792 struct elv_fs_entry
*entry
= to_elv(attr
);
793 struct elevator_queue
*e
;
799 e
= container_of(kobj
, struct elevator_queue
, kobj
);
800 mutex_lock(&e
->sysfs_lock
);
801 error
= e
->type
? entry
->store(e
, page
, length
) : -ENOENT
;
802 mutex_unlock(&e
->sysfs_lock
);
806 static const struct sysfs_ops elv_sysfs_ops
= {
807 .show
= elv_attr_show
,
808 .store
= elv_attr_store
,
811 static struct kobj_type elv_ktype
= {
812 .sysfs_ops
= &elv_sysfs_ops
,
813 .release
= elevator_release
,
816 int elv_register_queue(struct request_queue
*q
)
818 struct elevator_queue
*e
= q
->elevator
;
821 lockdep_assert_held(&q
->sysfs_lock
);
823 error
= kobject_add(&e
->kobj
, &q
->kobj
, "%s", "iosched");
825 struct elv_fs_entry
*attr
= e
->type
->elevator_attrs
;
827 while (attr
->attr
.name
) {
828 if (sysfs_create_file(&e
->kobj
, &attr
->attr
))
833 kobject_uevent(&e
->kobj
, KOBJ_ADD
);
835 if (!e
->uses_mq
&& e
->type
->ops
.sq
.elevator_registered_fn
)
836 e
->type
->ops
.sq
.elevator_registered_fn(q
);
841 void elv_unregister_queue(struct request_queue
*q
)
843 lockdep_assert_held(&q
->sysfs_lock
);
846 struct elevator_queue
*e
= q
->elevator
;
848 kobject_uevent(&e
->kobj
, KOBJ_REMOVE
);
849 kobject_del(&e
->kobj
);
851 /* Re-enable throttling in case elevator disabled it */
852 wbt_enable_default(q
);
856 int elv_register(struct elevator_type
*e
)
860 /* create icq_cache if requested */
862 if (WARN_ON(e
->icq_size
< sizeof(struct io_cq
)) ||
863 WARN_ON(e
->icq_align
< __alignof__(struct io_cq
)))
866 snprintf(e
->icq_cache_name
, sizeof(e
->icq_cache_name
),
867 "%s_io_cq", e
->elevator_name
);
868 e
->icq_cache
= kmem_cache_create(e
->icq_cache_name
, e
->icq_size
,
869 e
->icq_align
, 0, NULL
);
874 /* register, don't allow duplicate names */
875 spin_lock(&elv_list_lock
);
876 if (elevator_find(e
->elevator_name
, e
->uses_mq
)) {
877 spin_unlock(&elv_list_lock
);
878 kmem_cache_destroy(e
->icq_cache
);
881 list_add_tail(&e
->list
, &elv_list
);
882 spin_unlock(&elv_list_lock
);
884 /* print pretty message */
885 if (elevator_match(e
, chosen_elevator
) ||
886 (!*chosen_elevator
&&
887 elevator_match(e
, CONFIG_DEFAULT_IOSCHED
)))
890 printk(KERN_INFO
"io scheduler %s registered%s\n", e
->elevator_name
,
894 EXPORT_SYMBOL_GPL(elv_register
);
896 void elv_unregister(struct elevator_type
*e
)
899 spin_lock(&elv_list_lock
);
900 list_del_init(&e
->list
);
901 spin_unlock(&elv_list_lock
);
904 * Destroy icq_cache if it exists. icq's are RCU managed. Make
905 * sure all RCU operations are complete before proceeding.
909 kmem_cache_destroy(e
->icq_cache
);
913 EXPORT_SYMBOL_GPL(elv_unregister
);
915 int elevator_switch_mq(struct request_queue
*q
,
916 struct elevator_type
*new_e
)
920 lockdep_assert_held(&q
->sysfs_lock
);
923 if (q
->elevator
->registered
)
924 elv_unregister_queue(q
);
926 elevator_exit(q
, q
->elevator
);
929 ret
= blk_mq_init_sched(q
, new_e
);
934 ret
= elv_register_queue(q
);
936 elevator_exit(q
, q
->elevator
);
942 blk_add_trace_msg(q
, "elv switch: %s", new_e
->elevator_name
);
944 blk_add_trace_msg(q
, "elv switch: none");
951 * For blk-mq devices, we default to using mq-deadline, if available, for single
952 * queue devices. If deadline isn't available OR we have multiple queues,
955 int elevator_init_mq(struct request_queue
*q
)
957 struct elevator_type
*e
;
960 if (q
->nr_hw_queues
!= 1)
964 * q->sysfs_lock must be held to provide mutual exclusion between
965 * elevator_switch() and here.
967 mutex_lock(&q
->sysfs_lock
);
968 if (unlikely(q
->elevator
))
971 e
= elevator_get(q
, "mq-deadline", false);
975 err
= blk_mq_init_sched(q
, e
);
979 mutex_unlock(&q
->sysfs_lock
);
985 * switch to new_e io scheduler. be careful not to introduce deadlocks -
986 * we don't free the old io scheduler, before we have allocated what we
987 * need for the new one. this way we have a chance of going back to the old
988 * one, if the new one fails init for some reason.
990 static int elevator_switch(struct request_queue
*q
, struct elevator_type
*new_e
)
992 struct elevator_queue
*old
= q
->elevator
;
993 bool old_registered
= false;
996 lockdep_assert_held(&q
->sysfs_lock
);
999 blk_mq_freeze_queue(q
);
1000 blk_mq_quiesce_queue(q
);
1002 err
= elevator_switch_mq(q
, new_e
);
1004 blk_mq_unquiesce_queue(q
);
1005 blk_mq_unfreeze_queue(q
);
1011 * Turn on BYPASS and drain all requests w/ elevator private data.
1012 * Block layer doesn't call into a quiesced elevator - all requests
1013 * are directly put on the dispatch list without elevator data
1014 * using INSERT_BACK. All requests have SOFTBARRIER set and no
1015 * merge happens either.
1018 old_registered
= old
->registered
;
1020 blk_queue_bypass_start(q
);
1022 /* unregister and clear all auxiliary data of the old elevator */
1024 elv_unregister_queue(q
);
1029 /* allocate, init and register new elevator */
1030 err
= new_e
->ops
.sq
.elevator_init_fn(q
, new_e
);
1034 err
= elv_register_queue(q
);
1038 /* done, kill the old one and finish */
1040 elevator_exit(q
, old
);
1041 blk_queue_bypass_end(q
);
1044 blk_add_trace_msg(q
, "elv switch: %s", new_e
->elevator_name
);
1049 elevator_exit(q
, q
->elevator
);
1051 /* switch failed, restore and re-register old elevator */
1054 elv_register_queue(q
);
1055 blk_queue_bypass_end(q
);
1062 * Switch this queue to the given IO scheduler.
1064 static int __elevator_change(struct request_queue
*q
, const char *name
)
1066 char elevator_name
[ELV_NAME_MAX
];
1067 struct elevator_type
*e
;
1069 /* Make sure queue is not in the middle of being removed */
1070 if (!test_bit(QUEUE_FLAG_REGISTERED
, &q
->queue_flags
))
1074 * Special case for mq, turn off scheduling
1076 if (q
->mq_ops
&& !strncmp(name
, "none", 4))
1077 return elevator_switch(q
, NULL
);
1079 strlcpy(elevator_name
, name
, sizeof(elevator_name
));
1080 e
= elevator_get(q
, strstrip(elevator_name
), true);
1084 if (q
->elevator
&& elevator_match(q
->elevator
->type
, elevator_name
)) {
1089 return elevator_switch(q
, e
);
1092 static inline bool elv_support_iosched(struct request_queue
*q
)
1094 if (q
->mq_ops
&& q
->tag_set
&& (q
->tag_set
->flags
&
1100 ssize_t
elv_iosched_store(struct request_queue
*q
, const char *name
,
1105 if (!(q
->mq_ops
|| q
->request_fn
) || !elv_support_iosched(q
))
1108 ret
= __elevator_change(q
, name
);
1115 ssize_t
elv_iosched_show(struct request_queue
*q
, char *name
)
1117 struct elevator_queue
*e
= q
->elevator
;
1118 struct elevator_type
*elv
= NULL
;
1119 struct elevator_type
*__e
;
1120 bool uses_mq
= q
->mq_ops
!= NULL
;
1123 if (!queue_is_rq_based(q
))
1124 return sprintf(name
, "none\n");
1127 len
+= sprintf(name
+len
, "[none] ");
1131 spin_lock(&elv_list_lock
);
1132 list_for_each_entry(__e
, &elv_list
, list
) {
1133 if (elv
&& elevator_match(elv
, __e
->elevator_name
) &&
1134 (__e
->uses_mq
== uses_mq
)) {
1135 len
+= sprintf(name
+len
, "[%s] ", elv
->elevator_name
);
1138 if (__e
->uses_mq
&& q
->mq_ops
&& elv_support_iosched(q
))
1139 len
+= sprintf(name
+len
, "%s ", __e
->elevator_name
);
1140 else if (!__e
->uses_mq
&& !q
->mq_ops
)
1141 len
+= sprintf(name
+len
, "%s ", __e
->elevator_name
);
1143 spin_unlock(&elv_list_lock
);
1145 if (q
->mq_ops
&& q
->elevator
)
1146 len
+= sprintf(name
+len
, "none");
1148 len
+= sprintf(len
+name
, "\n");
1152 struct request
*elv_rb_former_request(struct request_queue
*q
,
1155 struct rb_node
*rbprev
= rb_prev(&rq
->rb_node
);
1158 return rb_entry_rq(rbprev
);
1162 EXPORT_SYMBOL(elv_rb_former_request
);
1164 struct request
*elv_rb_latter_request(struct request_queue
*q
,
1167 struct rb_node
*rbnext
= rb_next(&rq
->rb_node
);
1170 return rb_entry_rq(rbnext
);
1174 EXPORT_SYMBOL(elv_rb_latter_request
);