2 * Block device elevator/IO-scheduler.
4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
6 * 30042000 Jens Axboe <axboe@kernel.dk> :
8 * Split the elevator a bit so that it is possible to choose a different
9 * one or even write a new "plug in". There are three pieces:
10 * - elevator_fn, inserts a new request in the queue list
11 * - elevator_merge_fn, decides whether a new buffer can be merged with
13 * - elevator_dequeue_fn, called when a request is taken off the active list
15 * 20082000 Dave Jones <davej@suse.de> :
16 * Removed tests for max-bomb-segments, which was breaking elvtune
17 * when run without -bN
20 * - Rework again to work with bio instead of buffer_heads
21 * - loose bi_dev comparisons, partition handling is right now
22 * - completely modularize elevator setup and teardown
25 #include <linux/kernel.h>
27 #include <linux/blkdev.h>
28 #include <linux/elevator.h>
29 #include <linux/bio.h>
30 #include <linux/module.h>
31 #include <linux/slab.h>
32 #include <linux/init.h>
33 #include <linux/compiler.h>
34 #include <linux/blktrace_api.h>
35 #include <linux/hash.h>
36 #include <linux/uaccess.h>
38 #include <trace/events/block.h>
41 #include "blk-cgroup.h"
43 static DEFINE_SPINLOCK(elv_list_lock
);
44 static LIST_HEAD(elv_list
);
49 static const int elv_hash_shift
= 6;
50 #define ELV_HASH_BLOCK(sec) ((sec) >> 3)
51 #define ELV_HASH_FN(sec) \
52 (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift))
53 #define ELV_HASH_ENTRIES (1 << elv_hash_shift)
54 #define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
57 * Query io scheduler to see if the current process issuing bio may be
60 static int elv_iosched_allow_merge(struct request
*rq
, struct bio
*bio
)
62 struct request_queue
*q
= rq
->q
;
63 struct elevator_queue
*e
= q
->elevator
;
65 if (e
->type
->ops
.elevator_allow_merge_fn
)
66 return e
->type
->ops
.elevator_allow_merge_fn(q
, rq
, bio
);
72 * can we safely merge with this request?
74 bool elv_rq_merge_ok(struct request
*rq
, struct bio
*bio
)
76 if (!blk_rq_merge_ok(rq
, bio
))
79 if (!elv_iosched_allow_merge(rq
, bio
))
84 EXPORT_SYMBOL(elv_rq_merge_ok
);
86 static struct elevator_type
*elevator_find(const char *name
)
88 struct elevator_type
*e
;
90 list_for_each_entry(e
, &elv_list
, list
) {
91 if (!strcmp(e
->elevator_name
, name
))
98 static void elevator_put(struct elevator_type
*e
)
100 module_put(e
->elevator_owner
);
103 static struct elevator_type
*elevator_get(const char *name
)
105 struct elevator_type
*e
;
107 spin_lock(&elv_list_lock
);
109 e
= elevator_find(name
);
111 spin_unlock(&elv_list_lock
);
112 request_module("%s-iosched", name
);
113 spin_lock(&elv_list_lock
);
114 e
= elevator_find(name
);
117 if (e
&& !try_module_get(e
->elevator_owner
))
120 spin_unlock(&elv_list_lock
);
125 static char chosen_elevator
[ELV_NAME_MAX
];
127 static int __init
elevator_setup(char *str
)
130 * Be backwards-compatible with previous kernels, so users
131 * won't get the wrong elevator.
133 strncpy(chosen_elevator
, str
, sizeof(chosen_elevator
) - 1);
137 __setup("elevator=", elevator_setup
);
139 static struct kobj_type elv_ktype
;
141 static struct elevator_queue
*elevator_alloc(struct request_queue
*q
,
142 struct elevator_type
*e
)
144 struct elevator_queue
*eq
;
147 eq
= kmalloc_node(sizeof(*eq
), GFP_KERNEL
| __GFP_ZERO
, q
->node
);
152 kobject_init(&eq
->kobj
, &elv_ktype
);
153 mutex_init(&eq
->sysfs_lock
);
155 eq
->hash
= kmalloc_node(sizeof(struct hlist_head
) * ELV_HASH_ENTRIES
,
156 GFP_KERNEL
, q
->node
);
160 for (i
= 0; i
< ELV_HASH_ENTRIES
; i
++)
161 INIT_HLIST_HEAD(&eq
->hash
[i
]);
170 static void elevator_release(struct kobject
*kobj
)
172 struct elevator_queue
*e
;
174 e
= container_of(kobj
, struct elevator_queue
, kobj
);
175 elevator_put(e
->type
);
180 int elevator_init(struct request_queue
*q
, char *name
)
182 struct elevator_type
*e
= NULL
;
185 if (unlikely(q
->elevator
))
188 INIT_LIST_HEAD(&q
->queue_head
);
189 q
->last_merge
= NULL
;
191 q
->boundary_rq
= NULL
;
194 e
= elevator_get(name
);
199 if (!e
&& *chosen_elevator
) {
200 e
= elevator_get(chosen_elevator
);
202 printk(KERN_ERR
"I/O scheduler %s not found\n",
207 e
= elevator_get(CONFIG_DEFAULT_IOSCHED
);
210 "Default I/O scheduler not found. " \
212 e
= elevator_get("noop");
216 q
->elevator
= elevator_alloc(q
, e
);
220 err
= e
->ops
.elevator_init_fn(q
);
222 kobject_put(&q
->elevator
->kobj
);
228 EXPORT_SYMBOL(elevator_init
);
230 void elevator_exit(struct elevator_queue
*e
)
232 mutex_lock(&e
->sysfs_lock
);
233 if (e
->type
->ops
.elevator_exit_fn
)
234 e
->type
->ops
.elevator_exit_fn(e
);
235 mutex_unlock(&e
->sysfs_lock
);
237 kobject_put(&e
->kobj
);
239 EXPORT_SYMBOL(elevator_exit
);
241 static inline void __elv_rqhash_del(struct request
*rq
)
243 hlist_del_init(&rq
->hash
);
246 static void elv_rqhash_del(struct request_queue
*q
, struct request
*rq
)
249 __elv_rqhash_del(rq
);
252 static void elv_rqhash_add(struct request_queue
*q
, struct request
*rq
)
254 struct elevator_queue
*e
= q
->elevator
;
256 BUG_ON(ELV_ON_HASH(rq
));
257 hlist_add_head(&rq
->hash
, &e
->hash
[ELV_HASH_FN(rq_hash_key(rq
))]);
260 static void elv_rqhash_reposition(struct request_queue
*q
, struct request
*rq
)
262 __elv_rqhash_del(rq
);
263 elv_rqhash_add(q
, rq
);
266 static struct request
*elv_rqhash_find(struct request_queue
*q
, sector_t offset
)
268 struct elevator_queue
*e
= q
->elevator
;
269 struct hlist_head
*hash_list
= &e
->hash
[ELV_HASH_FN(offset
)];
270 struct hlist_node
*entry
, *next
;
273 hlist_for_each_entry_safe(rq
, entry
, next
, hash_list
, hash
) {
274 BUG_ON(!ELV_ON_HASH(rq
));
276 if (unlikely(!rq_mergeable(rq
))) {
277 __elv_rqhash_del(rq
);
281 if (rq_hash_key(rq
) == offset
)
289 * RB-tree support functions for inserting/lookup/removal of requests
290 * in a sorted RB tree.
292 void elv_rb_add(struct rb_root
*root
, struct request
*rq
)
294 struct rb_node
**p
= &root
->rb_node
;
295 struct rb_node
*parent
= NULL
;
296 struct request
*__rq
;
300 __rq
= rb_entry(parent
, struct request
, rb_node
);
302 if (blk_rq_pos(rq
) < blk_rq_pos(__rq
))
304 else if (blk_rq_pos(rq
) >= blk_rq_pos(__rq
))
308 rb_link_node(&rq
->rb_node
, parent
, p
);
309 rb_insert_color(&rq
->rb_node
, root
);
311 EXPORT_SYMBOL(elv_rb_add
);
313 void elv_rb_del(struct rb_root
*root
, struct request
*rq
)
315 BUG_ON(RB_EMPTY_NODE(&rq
->rb_node
));
316 rb_erase(&rq
->rb_node
, root
);
317 RB_CLEAR_NODE(&rq
->rb_node
);
319 EXPORT_SYMBOL(elv_rb_del
);
321 struct request
*elv_rb_find(struct rb_root
*root
, sector_t sector
)
323 struct rb_node
*n
= root
->rb_node
;
327 rq
= rb_entry(n
, struct request
, rb_node
);
329 if (sector
< blk_rq_pos(rq
))
331 else if (sector
> blk_rq_pos(rq
))
339 EXPORT_SYMBOL(elv_rb_find
);
342 * Insert rq into dispatch queue of q. Queue lock must be held on
343 * entry. rq is sort instead into the dispatch queue. To be used by
344 * specific elevators.
346 void elv_dispatch_sort(struct request_queue
*q
, struct request
*rq
)
349 struct list_head
*entry
;
352 if (q
->last_merge
== rq
)
353 q
->last_merge
= NULL
;
355 elv_rqhash_del(q
, rq
);
359 boundary
= q
->end_sector
;
360 stop_flags
= REQ_SOFTBARRIER
| REQ_STARTED
;
361 list_for_each_prev(entry
, &q
->queue_head
) {
362 struct request
*pos
= list_entry_rq(entry
);
364 if ((rq
->cmd_flags
& REQ_DISCARD
) !=
365 (pos
->cmd_flags
& REQ_DISCARD
))
367 if (rq_data_dir(rq
) != rq_data_dir(pos
))
369 if (pos
->cmd_flags
& stop_flags
)
371 if (blk_rq_pos(rq
) >= boundary
) {
372 if (blk_rq_pos(pos
) < boundary
)
375 if (blk_rq_pos(pos
) >= boundary
)
378 if (blk_rq_pos(rq
) >= blk_rq_pos(pos
))
382 list_add(&rq
->queuelist
, entry
);
384 EXPORT_SYMBOL(elv_dispatch_sort
);
387 * Insert rq into dispatch queue of q. Queue lock must be held on
388 * entry. rq is added to the back of the dispatch queue. To be used by
389 * specific elevators.
391 void elv_dispatch_add_tail(struct request_queue
*q
, struct request
*rq
)
393 if (q
->last_merge
== rq
)
394 q
->last_merge
= NULL
;
396 elv_rqhash_del(q
, rq
);
400 q
->end_sector
= rq_end_sector(rq
);
402 list_add_tail(&rq
->queuelist
, &q
->queue_head
);
404 EXPORT_SYMBOL(elv_dispatch_add_tail
);
406 int elv_merge(struct request_queue
*q
, struct request
**req
, struct bio
*bio
)
408 struct elevator_queue
*e
= q
->elevator
;
409 struct request
*__rq
;
414 * nomerges: No merges at all attempted
415 * noxmerges: Only simple one-hit cache try
416 * merges: All merge tries attempted
418 if (blk_queue_nomerges(q
))
419 return ELEVATOR_NO_MERGE
;
422 * First try one-hit cache.
424 if (q
->last_merge
&& elv_rq_merge_ok(q
->last_merge
, bio
)) {
425 ret
= blk_try_merge(q
->last_merge
, bio
);
426 if (ret
!= ELEVATOR_NO_MERGE
) {
427 *req
= q
->last_merge
;
432 if (blk_queue_noxmerges(q
))
433 return ELEVATOR_NO_MERGE
;
436 * See if our hash lookup can find a potential backmerge.
438 __rq
= elv_rqhash_find(q
, bio
->bi_sector
);
439 if (__rq
&& elv_rq_merge_ok(__rq
, bio
)) {
441 return ELEVATOR_BACK_MERGE
;
444 if (e
->type
->ops
.elevator_merge_fn
)
445 return e
->type
->ops
.elevator_merge_fn(q
, req
, bio
);
447 return ELEVATOR_NO_MERGE
;
451 * Attempt to do an insertion back merge. Only check for the case where
452 * we can append 'rq' to an existing request, so we can throw 'rq' away
455 * Returns true if we merged, false otherwise
457 static bool elv_attempt_insert_merge(struct request_queue
*q
,
460 struct request
*__rq
;
462 if (blk_queue_nomerges(q
))
466 * First try one-hit cache.
468 if (q
->last_merge
&& blk_attempt_req_merge(q
, q
->last_merge
, rq
))
471 if (blk_queue_noxmerges(q
))
475 * See if our hash lookup can find a potential backmerge.
477 __rq
= elv_rqhash_find(q
, blk_rq_pos(rq
));
478 if (__rq
&& blk_attempt_req_merge(q
, __rq
, rq
))
484 void elv_merged_request(struct request_queue
*q
, struct request
*rq
, int type
)
486 struct elevator_queue
*e
= q
->elevator
;
488 if (e
->type
->ops
.elevator_merged_fn
)
489 e
->type
->ops
.elevator_merged_fn(q
, rq
, type
);
491 if (type
== ELEVATOR_BACK_MERGE
)
492 elv_rqhash_reposition(q
, rq
);
497 void elv_merge_requests(struct request_queue
*q
, struct request
*rq
,
498 struct request
*next
)
500 struct elevator_queue
*e
= q
->elevator
;
501 const int next_sorted
= next
->cmd_flags
& REQ_SORTED
;
503 if (next_sorted
&& e
->type
->ops
.elevator_merge_req_fn
)
504 e
->type
->ops
.elevator_merge_req_fn(q
, rq
, next
);
506 elv_rqhash_reposition(q
, rq
);
509 elv_rqhash_del(q
, next
);
516 void elv_bio_merged(struct request_queue
*q
, struct request
*rq
,
519 struct elevator_queue
*e
= q
->elevator
;
521 if (e
->type
->ops
.elevator_bio_merged_fn
)
522 e
->type
->ops
.elevator_bio_merged_fn(q
, rq
, bio
);
525 void elv_requeue_request(struct request_queue
*q
, struct request
*rq
)
528 * it already went through dequeue, we need to decrement the
529 * in_flight count again
531 if (blk_account_rq(rq
)) {
532 q
->in_flight
[rq_is_sync(rq
)]--;
533 if (rq
->cmd_flags
& REQ_SORTED
)
534 elv_deactivate_rq(q
, rq
);
537 rq
->cmd_flags
&= ~REQ_STARTED
;
539 __elv_add_request(q
, rq
, ELEVATOR_INSERT_REQUEUE
);
542 void elv_drain_elevator(struct request_queue
*q
)
546 lockdep_assert_held(q
->queue_lock
);
548 while (q
->elevator
->type
->ops
.elevator_dispatch_fn(q
, 1))
550 if (q
->nr_sorted
&& printed
++ < 10) {
551 printk(KERN_ERR
"%s: forced dispatching is broken "
552 "(nr_sorted=%u), please report this\n",
553 q
->elevator
->type
->elevator_name
, q
->nr_sorted
);
557 void __elv_add_request(struct request_queue
*q
, struct request
*rq
, int where
)
559 trace_block_rq_insert(q
, rq
);
563 if (rq
->cmd_flags
& REQ_SOFTBARRIER
) {
564 /* barriers are scheduling boundary, update end_sector */
565 if (rq
->cmd_type
== REQ_TYPE_FS
||
566 (rq
->cmd_flags
& REQ_DISCARD
)) {
567 q
->end_sector
= rq_end_sector(rq
);
570 } else if (!(rq
->cmd_flags
& REQ_ELVPRIV
) &&
571 (where
== ELEVATOR_INSERT_SORT
||
572 where
== ELEVATOR_INSERT_SORT_MERGE
))
573 where
= ELEVATOR_INSERT_BACK
;
576 case ELEVATOR_INSERT_REQUEUE
:
577 case ELEVATOR_INSERT_FRONT
:
578 rq
->cmd_flags
|= REQ_SOFTBARRIER
;
579 list_add(&rq
->queuelist
, &q
->queue_head
);
582 case ELEVATOR_INSERT_BACK
:
583 rq
->cmd_flags
|= REQ_SOFTBARRIER
;
584 elv_drain_elevator(q
);
585 list_add_tail(&rq
->queuelist
, &q
->queue_head
);
587 * We kick the queue here for the following reasons.
588 * - The elevator might have returned NULL previously
589 * to delay requests and returned them now. As the
590 * queue wasn't empty before this request, ll_rw_blk
591 * won't run the queue on return, resulting in hang.
592 * - Usually, back inserted requests won't be merged
593 * with anything. There's no point in delaying queue
599 case ELEVATOR_INSERT_SORT_MERGE
:
601 * If we succeed in merging this request with one in the
602 * queue already, we are done - rq has now been freed,
603 * so no need to do anything further.
605 if (elv_attempt_insert_merge(q
, rq
))
607 case ELEVATOR_INSERT_SORT
:
608 BUG_ON(rq
->cmd_type
!= REQ_TYPE_FS
&&
609 !(rq
->cmd_flags
& REQ_DISCARD
));
610 rq
->cmd_flags
|= REQ_SORTED
;
612 if (rq_mergeable(rq
)) {
613 elv_rqhash_add(q
, rq
);
619 * Some ioscheds (cfq) run q->request_fn directly, so
620 * rq cannot be accessed after calling
621 * elevator_add_req_fn.
623 q
->elevator
->type
->ops
.elevator_add_req_fn(q
, rq
);
626 case ELEVATOR_INSERT_FLUSH
:
627 rq
->cmd_flags
|= REQ_SOFTBARRIER
;
628 blk_insert_flush(rq
);
631 printk(KERN_ERR
"%s: bad insertion point %d\n",
636 EXPORT_SYMBOL(__elv_add_request
);
638 void elv_add_request(struct request_queue
*q
, struct request
*rq
, int where
)
642 spin_lock_irqsave(q
->queue_lock
, flags
);
643 __elv_add_request(q
, rq
, where
);
644 spin_unlock_irqrestore(q
->queue_lock
, flags
);
646 EXPORT_SYMBOL(elv_add_request
);
648 struct request
*elv_latter_request(struct request_queue
*q
, struct request
*rq
)
650 struct elevator_queue
*e
= q
->elevator
;
652 if (e
->type
->ops
.elevator_latter_req_fn
)
653 return e
->type
->ops
.elevator_latter_req_fn(q
, rq
);
657 struct request
*elv_former_request(struct request_queue
*q
, struct request
*rq
)
659 struct elevator_queue
*e
= q
->elevator
;
661 if (e
->type
->ops
.elevator_former_req_fn
)
662 return e
->type
->ops
.elevator_former_req_fn(q
, rq
);
666 int elv_set_request(struct request_queue
*q
, struct request
*rq
,
667 struct bio
*bio
, gfp_t gfp_mask
)
669 struct elevator_queue
*e
= q
->elevator
;
671 if (e
->type
->ops
.elevator_set_req_fn
)
672 return e
->type
->ops
.elevator_set_req_fn(q
, rq
, bio
, gfp_mask
);
676 void elv_put_request(struct request_queue
*q
, struct request
*rq
)
678 struct elevator_queue
*e
= q
->elevator
;
680 if (e
->type
->ops
.elevator_put_req_fn
)
681 e
->type
->ops
.elevator_put_req_fn(rq
);
684 int elv_may_queue(struct request_queue
*q
, int rw
)
686 struct elevator_queue
*e
= q
->elevator
;
688 if (e
->type
->ops
.elevator_may_queue_fn
)
689 return e
->type
->ops
.elevator_may_queue_fn(q
, rw
);
691 return ELV_MQUEUE_MAY
;
694 void elv_abort_queue(struct request_queue
*q
)
698 blk_abort_flushes(q
);
700 while (!list_empty(&q
->queue_head
)) {
701 rq
= list_entry_rq(q
->queue_head
.next
);
702 rq
->cmd_flags
|= REQ_QUIET
;
703 trace_block_rq_abort(q
, rq
);
705 * Mark this request as started so we don't trigger
706 * any debug logic in the end I/O path.
708 blk_start_request(rq
);
709 __blk_end_request_all(rq
, -EIO
);
712 EXPORT_SYMBOL(elv_abort_queue
);
714 void elv_completed_request(struct request_queue
*q
, struct request
*rq
)
716 struct elevator_queue
*e
= q
->elevator
;
719 * request is released from the driver, io must be done
721 if (blk_account_rq(rq
)) {
722 q
->in_flight
[rq_is_sync(rq
)]--;
723 if ((rq
->cmd_flags
& REQ_SORTED
) &&
724 e
->type
->ops
.elevator_completed_req_fn
)
725 e
->type
->ops
.elevator_completed_req_fn(q
, rq
);
729 #define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
732 elv_attr_show(struct kobject
*kobj
, struct attribute
*attr
, char *page
)
734 struct elv_fs_entry
*entry
= to_elv(attr
);
735 struct elevator_queue
*e
;
741 e
= container_of(kobj
, struct elevator_queue
, kobj
);
742 mutex_lock(&e
->sysfs_lock
);
743 error
= e
->type
? entry
->show(e
, page
) : -ENOENT
;
744 mutex_unlock(&e
->sysfs_lock
);
749 elv_attr_store(struct kobject
*kobj
, struct attribute
*attr
,
750 const char *page
, size_t length
)
752 struct elv_fs_entry
*entry
= to_elv(attr
);
753 struct elevator_queue
*e
;
759 e
= container_of(kobj
, struct elevator_queue
, kobj
);
760 mutex_lock(&e
->sysfs_lock
);
761 error
= e
->type
? entry
->store(e
, page
, length
) : -ENOENT
;
762 mutex_unlock(&e
->sysfs_lock
);
766 static const struct sysfs_ops elv_sysfs_ops
= {
767 .show
= elv_attr_show
,
768 .store
= elv_attr_store
,
771 static struct kobj_type elv_ktype
= {
772 .sysfs_ops
= &elv_sysfs_ops
,
773 .release
= elevator_release
,
776 int elv_register_queue(struct request_queue
*q
)
778 struct elevator_queue
*e
= q
->elevator
;
781 error
= kobject_add(&e
->kobj
, &q
->kobj
, "%s", "iosched");
783 struct elv_fs_entry
*attr
= e
->type
->elevator_attrs
;
785 while (attr
->attr
.name
) {
786 if (sysfs_create_file(&e
->kobj
, &attr
->attr
))
791 kobject_uevent(&e
->kobj
, KOBJ_ADD
);
796 EXPORT_SYMBOL(elv_register_queue
);
798 void elv_unregister_queue(struct request_queue
*q
)
801 struct elevator_queue
*e
= q
->elevator
;
803 kobject_uevent(&e
->kobj
, KOBJ_REMOVE
);
804 kobject_del(&e
->kobj
);
808 EXPORT_SYMBOL(elv_unregister_queue
);
810 int elv_register(struct elevator_type
*e
)
814 /* create icq_cache if requested */
816 if (WARN_ON(e
->icq_size
< sizeof(struct io_cq
)) ||
817 WARN_ON(e
->icq_align
< __alignof__(struct io_cq
)))
820 snprintf(e
->icq_cache_name
, sizeof(e
->icq_cache_name
),
821 "%s_io_cq", e
->elevator_name
);
822 e
->icq_cache
= kmem_cache_create(e
->icq_cache_name
, e
->icq_size
,
823 e
->icq_align
, 0, NULL
);
828 /* register, don't allow duplicate names */
829 spin_lock(&elv_list_lock
);
830 if (elevator_find(e
->elevator_name
)) {
831 spin_unlock(&elv_list_lock
);
833 kmem_cache_destroy(e
->icq_cache
);
836 list_add_tail(&e
->list
, &elv_list
);
837 spin_unlock(&elv_list_lock
);
839 /* print pretty message */
840 if (!strcmp(e
->elevator_name
, chosen_elevator
) ||
841 (!*chosen_elevator
&&
842 !strcmp(e
->elevator_name
, CONFIG_DEFAULT_IOSCHED
)))
845 printk(KERN_INFO
"io scheduler %s registered%s\n", e
->elevator_name
,
849 EXPORT_SYMBOL_GPL(elv_register
);
851 void elv_unregister(struct elevator_type
*e
)
854 spin_lock(&elv_list_lock
);
855 list_del_init(&e
->list
);
856 spin_unlock(&elv_list_lock
);
859 * Destroy icq_cache if it exists. icq's are RCU managed. Make
860 * sure all RCU operations are complete before proceeding.
864 kmem_cache_destroy(e
->icq_cache
);
868 EXPORT_SYMBOL_GPL(elv_unregister
);
871 * switch to new_e io scheduler. be careful not to introduce deadlocks -
872 * we don't free the old io scheduler, before we have allocated what we
873 * need for the new one. this way we have a chance of going back to the old
874 * one, if the new one fails init for some reason.
876 static int elevator_switch(struct request_queue
*q
, struct elevator_type
*new_e
)
878 struct elevator_queue
*old
= q
->elevator
;
879 bool registered
= old
->registered
;
883 * Turn on BYPASS and drain all requests w/ elevator private data.
884 * Block layer doesn't call into a quiesced elevator - all requests
885 * are directly put on the dispatch list without elevator data
886 * using INSERT_BACK. All requests have SOFTBARRIER set and no
887 * merge happens either.
889 blk_queue_bypass_start(q
);
891 /* unregister and clear all auxiliary data of the old elevator */
893 elv_unregister_queue(q
);
895 spin_lock_irq(q
->queue_lock
);
897 spin_unlock_irq(q
->queue_lock
);
899 /* allocate, init and register new elevator */
901 q
->elevator
= elevator_alloc(q
, new_e
);
905 err
= new_e
->ops
.elevator_init_fn(q
);
907 kobject_put(&q
->elevator
->kobj
);
912 err
= elv_register_queue(q
);
917 /* done, kill the old one and finish */
919 blk_queue_bypass_end(q
);
921 blk_add_trace_msg(q
, "elv switch: %s", new_e
->elevator_name
);
926 elevator_exit(q
->elevator
);
928 /* switch failed, restore and re-register old elevator */
930 elv_register_queue(q
);
931 blk_queue_bypass_end(q
);
937 * Switch this queue to the given IO scheduler.
939 int elevator_change(struct request_queue
*q
, const char *name
)
941 char elevator_name
[ELV_NAME_MAX
];
942 struct elevator_type
*e
;
947 strlcpy(elevator_name
, name
, sizeof(elevator_name
));
948 e
= elevator_get(strstrip(elevator_name
));
950 printk(KERN_ERR
"elevator: type %s not found\n", elevator_name
);
954 if (!strcmp(elevator_name
, q
->elevator
->type
->elevator_name
)) {
959 return elevator_switch(q
, e
);
961 EXPORT_SYMBOL(elevator_change
);
963 ssize_t
elv_iosched_store(struct request_queue
*q
, const char *name
,
971 ret
= elevator_change(q
, name
);
975 printk(KERN_ERR
"elevator: switch to %s failed\n", name
);
979 ssize_t
elv_iosched_show(struct request_queue
*q
, char *name
)
981 struct elevator_queue
*e
= q
->elevator
;
982 struct elevator_type
*elv
;
983 struct elevator_type
*__e
;
986 if (!q
->elevator
|| !blk_queue_stackable(q
))
987 return sprintf(name
, "none\n");
991 spin_lock(&elv_list_lock
);
992 list_for_each_entry(__e
, &elv_list
, list
) {
993 if (!strcmp(elv
->elevator_name
, __e
->elevator_name
))
994 len
+= sprintf(name
+len
, "[%s] ", elv
->elevator_name
);
996 len
+= sprintf(name
+len
, "%s ", __e
->elevator_name
);
998 spin_unlock(&elv_list_lock
);
1000 len
+= sprintf(len
+name
, "\n");
1004 struct request
*elv_rb_former_request(struct request_queue
*q
,
1007 struct rb_node
*rbprev
= rb_prev(&rq
->rb_node
);
1010 return rb_entry_rq(rbprev
);
1014 EXPORT_SYMBOL(elv_rb_former_request
);
1016 struct request
*elv_rb_latter_request(struct request_queue
*q
,
1019 struct rb_node
*rbnext
= rb_next(&rq
->rb_node
);
1022 return rb_entry_rq(rbnext
);
1026 EXPORT_SYMBOL(elv_rb_latter_request
);