2 * linux/drivers/block/elevator.c
4 * Block device elevator/IO-scheduler.
6 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
8 * 30042000 Jens Axboe <axboe@suse.de> :
10 * Split the elevator a bit so that it is possible to choose a different
11 * one or even write a new "plug in". There are three pieces:
12 * - elevator_fn, inserts a new request in the queue list
13 * - elevator_merge_fn, decides whether a new buffer can be merged with
15 * - elevator_dequeue_fn, called when a request is taken off the active list
17 * 20082000 Dave Jones <davej@suse.de> :
18 * Removed tests for max-bomb-segments, which was breaking elvtune
19 * when run without -bN
22 * - Rework again to work with bio instead of buffer_heads
23 * - loose bi_dev comparisons, partition handling is right now
24 * - completely modularize elevator setup and teardown
27 #include <linux/kernel.h>
29 #include <linux/blkdev.h>
30 #include <linux/elevator.h>
31 #include <linux/bio.h>
32 #include <linux/config.h>
33 #include <linux/module.h>
34 #include <linux/slab.h>
35 #include <linux/init.h>
36 #include <linux/compiler.h>
37 #include <linux/delay.h>
39 #include <asm/uaccess.h>
41 static DEFINE_SPINLOCK(elv_list_lock
);
42 static LIST_HEAD(elv_list
);
45 * can we safely merge with this request?
47 inline int elv_rq_merge_ok(struct request
*rq
, struct bio
*bio
)
49 if (!rq_mergeable(rq
))
53 * different data direction or already started, don't merge
55 if (bio_data_dir(bio
) != rq_data_dir(rq
))
59 * same device and no special stuff set, merge is ok
61 if (rq
->rq_disk
== bio
->bi_bdev
->bd_disk
&&
62 !rq
->waiting
&& !rq
->special
)
67 EXPORT_SYMBOL(elv_rq_merge_ok
);
69 inline int elv_try_merge(struct request
*__rq
, struct bio
*bio
)
71 int ret
= ELEVATOR_NO_MERGE
;
74 * we can merge and sequence is ok, check if it's possible
76 if (elv_rq_merge_ok(__rq
, bio
)) {
77 if (__rq
->sector
+ __rq
->nr_sectors
== bio
->bi_sector
)
78 ret
= ELEVATOR_BACK_MERGE
;
79 else if (__rq
->sector
- bio_sectors(bio
) == bio
->bi_sector
)
80 ret
= ELEVATOR_FRONT_MERGE
;
85 EXPORT_SYMBOL(elv_try_merge
);
87 static struct elevator_type
*elevator_find(const char *name
)
89 struct elevator_type
*e
= NULL
;
90 struct list_head
*entry
;
92 list_for_each(entry
, &elv_list
) {
93 struct elevator_type
*__e
;
95 __e
= list_entry(entry
, struct elevator_type
, list
);
97 if (!strcmp(__e
->elevator_name
, name
)) {
106 static void elevator_put(struct elevator_type
*e
)
108 module_put(e
->elevator_owner
);
111 static struct elevator_type
*elevator_get(const char *name
)
113 struct elevator_type
*e
;
115 spin_lock_irq(&elv_list_lock
);
117 e
= elevator_find(name
);
118 if (e
&& !try_module_get(e
->elevator_owner
))
121 spin_unlock_irq(&elv_list_lock
);
126 static int elevator_attach(request_queue_t
*q
, struct elevator_type
*e
,
127 struct elevator_queue
*eq
)
131 memset(eq
, 0, sizeof(*eq
));
133 eq
->elevator_type
= e
;
137 if (eq
->ops
->elevator_init_fn
)
138 ret
= eq
->ops
->elevator_init_fn(q
, eq
);
143 static char chosen_elevator
[16];
145 static void elevator_setup_default(void)
147 struct elevator_type
*e
;
150 * If default has not been set, use the compiled-in selection.
152 if (!chosen_elevator
[0])
153 strcpy(chosen_elevator
, CONFIG_DEFAULT_IOSCHED
);
156 * If the given scheduler is not available, fall back to no-op.
158 if ((e
= elevator_find(chosen_elevator
)))
161 strcpy(chosen_elevator
, "noop");
164 static int __init
elevator_setup(char *str
)
166 strncpy(chosen_elevator
, str
, sizeof(chosen_elevator
) - 1);
170 __setup("elevator=", elevator_setup
);
172 int elevator_init(request_queue_t
*q
, char *name
)
174 struct elevator_type
*e
= NULL
;
175 struct elevator_queue
*eq
;
178 INIT_LIST_HEAD(&q
->queue_head
);
179 q
->last_merge
= NULL
;
181 q
->boundary_rq
= NULL
;
183 elevator_setup_default();
186 name
= chosen_elevator
;
188 e
= elevator_get(name
);
192 eq
= kmalloc(sizeof(struct elevator_queue
), GFP_KERNEL
);
198 ret
= elevator_attach(q
, e
, eq
);
207 void elevator_exit(elevator_t
*e
)
209 if (e
->ops
->elevator_exit_fn
)
210 e
->ops
->elevator_exit_fn(e
);
212 elevator_put(e
->elevator_type
);
213 e
->elevator_type
= NULL
;
218 * Insert rq into dispatch queue of q. Queue lock must be held on
219 * entry. If sort != 0, rq is sort-inserted; otherwise, rq will be
220 * appended to the dispatch queue. To be used by specific elevators.
222 void elv_dispatch_sort(request_queue_t
*q
, struct request
*rq
)
225 struct list_head
*entry
;
227 if (q
->last_merge
== rq
)
228 q
->last_merge
= NULL
;
231 boundary
= q
->end_sector
;
233 list_for_each_prev(entry
, &q
->queue_head
) {
234 struct request
*pos
= list_entry_rq(entry
);
236 if (pos
->flags
& (REQ_SOFTBARRIER
|REQ_HARDBARRIER
|REQ_STARTED
))
238 if (rq
->sector
>= boundary
) {
239 if (pos
->sector
< boundary
)
242 if (pos
->sector
>= boundary
)
245 if (rq
->sector
>= pos
->sector
)
249 list_add(&rq
->queuelist
, entry
);
252 int elv_merge(request_queue_t
*q
, struct request
**req
, struct bio
*bio
)
254 elevator_t
*e
= q
->elevator
;
258 ret
= elv_try_merge(q
->last_merge
, bio
);
259 if (ret
!= ELEVATOR_NO_MERGE
) {
260 *req
= q
->last_merge
;
265 if (e
->ops
->elevator_merge_fn
)
266 return e
->ops
->elevator_merge_fn(q
, req
, bio
);
268 return ELEVATOR_NO_MERGE
;
271 void elv_merged_request(request_queue_t
*q
, struct request
*rq
)
273 elevator_t
*e
= q
->elevator
;
275 if (e
->ops
->elevator_merged_fn
)
276 e
->ops
->elevator_merged_fn(q
, rq
);
281 void elv_merge_requests(request_queue_t
*q
, struct request
*rq
,
282 struct request
*next
)
284 elevator_t
*e
= q
->elevator
;
286 if (e
->ops
->elevator_merge_req_fn
)
287 e
->ops
->elevator_merge_req_fn(q
, rq
, next
);
293 void elv_requeue_request(request_queue_t
*q
, struct request
*rq
)
295 elevator_t
*e
= q
->elevator
;
298 * it already went through dequeue, we need to decrement the
299 * in_flight count again
301 if (blk_account_rq(rq
)) {
303 if (blk_sorted_rq(rq
) && e
->ops
->elevator_deactivate_req_fn
)
304 e
->ops
->elevator_deactivate_req_fn(q
, rq
);
307 rq
->flags
&= ~REQ_STARTED
;
310 * if this is the flush, requeue the original instead and drop the flush
312 if (rq
->flags
& REQ_BAR_FLUSH
) {
313 clear_bit(QUEUE_FLAG_FLUSH
, &q
->queue_flags
);
314 rq
= rq
->end_io_data
;
317 __elv_add_request(q
, rq
, ELEVATOR_INSERT_FRONT
, 0);
320 static void elv_drain_elevator(request_queue_t
*q
)
323 while (q
->elevator
->ops
->elevator_dispatch_fn(q
, 1))
325 if (q
->nr_sorted
== 0)
327 if (printed
++ < 10) {
328 printk(KERN_ERR
"%s: forced dispatching is broken "
329 "(nr_sorted=%u), please report this\n",
330 q
->elevator
->elevator_type
->elevator_name
, q
->nr_sorted
);
334 void __elv_add_request(request_queue_t
*q
, struct request
*rq
, int where
,
337 if (rq
->flags
& (REQ_SOFTBARRIER
| REQ_HARDBARRIER
)) {
339 * barriers implicitly indicate back insertion
341 if (where
== ELEVATOR_INSERT_SORT
)
342 where
= ELEVATOR_INSERT_BACK
;
345 * this request is scheduling boundary, update end_sector
347 if (blk_fs_request(rq
)) {
348 q
->end_sector
= rq_end_sector(rq
);
351 } else if (!(rq
->flags
& REQ_ELVPRIV
) && where
== ELEVATOR_INSERT_SORT
)
352 where
= ELEVATOR_INSERT_BACK
;
360 case ELEVATOR_INSERT_FRONT
:
361 rq
->flags
|= REQ_SOFTBARRIER
;
363 list_add(&rq
->queuelist
, &q
->queue_head
);
366 case ELEVATOR_INSERT_BACK
:
367 rq
->flags
|= REQ_SOFTBARRIER
;
368 elv_drain_elevator(q
);
369 list_add_tail(&rq
->queuelist
, &q
->queue_head
);
371 * We kick the queue here for the following reasons.
372 * - The elevator might have returned NULL previously
373 * to delay requests and returned them now. As the
374 * queue wasn't empty before this request, ll_rw_blk
375 * won't run the queue on return, resulting in hang.
376 * - Usually, back inserted requests won't be merged
377 * with anything. There's no point in delaying queue
384 case ELEVATOR_INSERT_SORT
:
385 BUG_ON(!blk_fs_request(rq
));
386 rq
->flags
|= REQ_SORTED
;
388 if (q
->last_merge
== NULL
&& rq_mergeable(rq
))
391 * Some ioscheds (cfq) run q->request_fn directly, so
392 * rq cannot be accessed after calling
393 * elevator_add_req_fn.
395 q
->elevator
->ops
->elevator_add_req_fn(q
, rq
);
399 printk(KERN_ERR
"%s: bad insertion point %d\n",
400 __FUNCTION__
, where
);
404 if (blk_queue_plugged(q
)) {
405 int nrq
= q
->rq
.count
[READ
] + q
->rq
.count
[WRITE
]
408 if (nrq
>= q
->unplug_thresh
)
409 __generic_unplug_device(q
);
413 void elv_add_request(request_queue_t
*q
, struct request
*rq
, int where
,
418 spin_lock_irqsave(q
->queue_lock
, flags
);
419 __elv_add_request(q
, rq
, where
, plug
);
420 spin_unlock_irqrestore(q
->queue_lock
, flags
);
423 static inline struct request
*__elv_next_request(request_queue_t
*q
)
427 if (unlikely(list_empty(&q
->queue_head
) &&
428 !q
->elevator
->ops
->elevator_dispatch_fn(q
, 0)))
431 rq
= list_entry_rq(q
->queue_head
.next
);
434 * if this is a barrier write and the device has to issue a
435 * flush sequence to support it, check how far we are
437 if (blk_fs_request(rq
) && blk_barrier_rq(rq
)) {
438 BUG_ON(q
->ordered
== QUEUE_ORDERED_NONE
);
440 if (q
->ordered
== QUEUE_ORDERED_FLUSH
&&
441 !blk_barrier_preflush(rq
))
442 rq
= blk_start_pre_flush(q
, rq
);
448 struct request
*elv_next_request(request_queue_t
*q
)
453 while ((rq
= __elv_next_request(q
)) != NULL
) {
454 if (!(rq
->flags
& REQ_STARTED
)) {
455 elevator_t
*e
= q
->elevator
;
458 * This is the first time the device driver
459 * sees this request (possibly after
460 * requeueing). Notify IO scheduler.
462 if (blk_sorted_rq(rq
) &&
463 e
->ops
->elevator_activate_req_fn
)
464 e
->ops
->elevator_activate_req_fn(q
, rq
);
467 * just mark as started even if we don't start
468 * it, a request that has been delayed should
469 * not be passed by new incoming requests
471 rq
->flags
|= REQ_STARTED
;
474 if (!q
->boundary_rq
|| q
->boundary_rq
== rq
) {
475 q
->end_sector
= rq_end_sector(rq
);
476 q
->boundary_rq
= NULL
;
479 if ((rq
->flags
& REQ_DONTPREP
) || !q
->prep_rq_fn
)
482 ret
= q
->prep_rq_fn(q
, rq
);
483 if (ret
== BLKPREP_OK
) {
485 } else if (ret
== BLKPREP_DEFER
) {
487 * the request may have been (partially) prepped.
488 * we need to keep this request in the front to
489 * avoid resource deadlock. REQ_STARTED will
490 * prevent other fs requests from passing this one.
494 } else if (ret
== BLKPREP_KILL
) {
495 int nr_bytes
= rq
->hard_nr_sectors
<< 9;
498 nr_bytes
= rq
->data_len
;
500 blkdev_dequeue_request(rq
);
501 rq
->flags
|= REQ_QUIET
;
502 end_that_request_chunk(rq
, 0, nr_bytes
);
503 end_that_request_last(rq
);
505 printk(KERN_ERR
"%s: bad return=%d\n", __FUNCTION__
,
514 void elv_dequeue_request(request_queue_t
*q
, struct request
*rq
)
516 BUG_ON(list_empty(&rq
->queuelist
));
518 list_del_init(&rq
->queuelist
);
521 * the time frame between a request being removed from the lists
522 * and to it is freed is accounted as io that is in progress at
525 if (blk_account_rq(rq
))
529 int elv_queue_empty(request_queue_t
*q
)
531 elevator_t
*e
= q
->elevator
;
533 if (!list_empty(&q
->queue_head
))
536 if (e
->ops
->elevator_queue_empty_fn
)
537 return e
->ops
->elevator_queue_empty_fn(q
);
542 struct request
*elv_latter_request(request_queue_t
*q
, struct request
*rq
)
544 elevator_t
*e
= q
->elevator
;
546 if (e
->ops
->elevator_latter_req_fn
)
547 return e
->ops
->elevator_latter_req_fn(q
, rq
);
551 struct request
*elv_former_request(request_queue_t
*q
, struct request
*rq
)
553 elevator_t
*e
= q
->elevator
;
555 if (e
->ops
->elevator_former_req_fn
)
556 return e
->ops
->elevator_former_req_fn(q
, rq
);
560 int elv_set_request(request_queue_t
*q
, struct request
*rq
, struct bio
*bio
,
563 elevator_t
*e
= q
->elevator
;
565 if (e
->ops
->elevator_set_req_fn
)
566 return e
->ops
->elevator_set_req_fn(q
, rq
, bio
, gfp_mask
);
568 rq
->elevator_private
= NULL
;
572 void elv_put_request(request_queue_t
*q
, struct request
*rq
)
574 elevator_t
*e
= q
->elevator
;
576 if (e
->ops
->elevator_put_req_fn
)
577 e
->ops
->elevator_put_req_fn(q
, rq
);
580 int elv_may_queue(request_queue_t
*q
, int rw
, struct bio
*bio
)
582 elevator_t
*e
= q
->elevator
;
584 if (e
->ops
->elevator_may_queue_fn
)
585 return e
->ops
->elevator_may_queue_fn(q
, rw
, bio
);
587 return ELV_MQUEUE_MAY
;
590 void elv_completed_request(request_queue_t
*q
, struct request
*rq
)
592 elevator_t
*e
= q
->elevator
;
595 * request is released from the driver, io must be done
597 if (blk_account_rq(rq
)) {
599 if (blk_sorted_rq(rq
) && e
->ops
->elevator_completed_req_fn
)
600 e
->ops
->elevator_completed_req_fn(q
, rq
);
604 int elv_register_queue(struct request_queue
*q
)
606 elevator_t
*e
= q
->elevator
;
608 e
->kobj
.parent
= kobject_get(&q
->kobj
);
612 snprintf(e
->kobj
.name
, KOBJ_NAME_LEN
, "%s", "iosched");
613 e
->kobj
.ktype
= e
->elevator_type
->elevator_ktype
;
615 return kobject_register(&e
->kobj
);
618 void elv_unregister_queue(struct request_queue
*q
)
621 elevator_t
*e
= q
->elevator
;
622 kobject_unregister(&e
->kobj
);
623 kobject_put(&q
->kobj
);
627 int elv_register(struct elevator_type
*e
)
629 spin_lock_irq(&elv_list_lock
);
630 if (elevator_find(e
->elevator_name
))
632 list_add_tail(&e
->list
, &elv_list
);
633 spin_unlock_irq(&elv_list_lock
);
635 printk(KERN_INFO
"io scheduler %s registered", e
->elevator_name
);
636 if (!strcmp(e
->elevator_name
, chosen_elevator
))
637 printk(" (default)");
641 EXPORT_SYMBOL_GPL(elv_register
);
643 void elv_unregister(struct elevator_type
*e
)
645 struct task_struct
*g
, *p
;
648 * Iterate every thread in the process to remove the io contexts.
650 read_lock(&tasklist_lock
);
651 do_each_thread(g
, p
) {
652 struct io_context
*ioc
= p
->io_context
;
653 if (ioc
&& ioc
->cic
) {
654 ioc
->cic
->exit(ioc
->cic
);
655 ioc
->cic
->dtor(ioc
->cic
);
658 if (ioc
&& ioc
->aic
) {
659 ioc
->aic
->exit(ioc
->aic
);
660 ioc
->aic
->dtor(ioc
->aic
);
663 } while_each_thread(g
, p
);
664 read_unlock(&tasklist_lock
);
666 spin_lock_irq(&elv_list_lock
);
667 list_del_init(&e
->list
);
668 spin_unlock_irq(&elv_list_lock
);
670 EXPORT_SYMBOL_GPL(elv_unregister
);
673 * switch to new_e io scheduler. be careful not to introduce deadlocks -
674 * we don't free the old io scheduler, before we have allocated what we
675 * need for the new one. this way we have a chance of going back to the old
676 * one, if the new one fails init for some reason.
678 static void elevator_switch(request_queue_t
*q
, struct elevator_type
*new_e
)
680 elevator_t
*old_elevator
, *e
;
683 * Allocate new elevator
685 e
= kmalloc(sizeof(elevator_t
), GFP_KERNEL
);
690 * Turn on BYPASS and drain all requests w/ elevator private data
692 spin_lock_irq(q
->queue_lock
);
694 set_bit(QUEUE_FLAG_ELVSWITCH
, &q
->queue_flags
);
696 elv_drain_elevator(q
);
698 while (q
->rq
.elvpriv
) {
701 spin_unlock_irq(q
->queue_lock
);
703 spin_lock_irq(q
->queue_lock
);
704 elv_drain_elevator(q
);
707 spin_unlock_irq(q
->queue_lock
);
710 * unregister old elevator data
712 elv_unregister_queue(q
);
713 old_elevator
= q
->elevator
;
716 * attach and start new elevator
718 if (elevator_attach(q
, new_e
, e
))
721 if (elv_register_queue(q
))
725 * finally exit old elevator and turn off BYPASS.
727 elevator_exit(old_elevator
);
728 clear_bit(QUEUE_FLAG_ELVSWITCH
, &q
->queue_flags
);
733 * switch failed, exit the new io scheduler and reattach the old
734 * one again (along with re-adding the sysfs dir)
739 q
->elevator
= old_elevator
;
740 elv_register_queue(q
);
741 clear_bit(QUEUE_FLAG_ELVSWITCH
, &q
->queue_flags
);
745 printk(KERN_ERR
"elevator: switch to %s failed\n",new_e
->elevator_name
);
748 ssize_t
elv_iosched_store(request_queue_t
*q
, const char *name
, size_t count
)
750 char elevator_name
[ELV_NAME_MAX
];
752 struct elevator_type
*e
;
754 elevator_name
[sizeof(elevator_name
) - 1] = '\0';
755 strncpy(elevator_name
, name
, sizeof(elevator_name
) - 1);
756 len
= strlen(elevator_name
);
758 if (len
&& elevator_name
[len
- 1] == '\n')
759 elevator_name
[len
- 1] = '\0';
761 e
= elevator_get(elevator_name
);
763 printk(KERN_ERR
"elevator: type %s not found\n", elevator_name
);
767 if (!strcmp(elevator_name
, q
->elevator
->elevator_type
->elevator_name
)) {
772 elevator_switch(q
, e
);
776 ssize_t
elv_iosched_show(request_queue_t
*q
, char *name
)
778 elevator_t
*e
= q
->elevator
;
779 struct elevator_type
*elv
= e
->elevator_type
;
780 struct list_head
*entry
;
783 spin_lock_irq(q
->queue_lock
);
784 list_for_each(entry
, &elv_list
) {
785 struct elevator_type
*__e
;
787 __e
= list_entry(entry
, struct elevator_type
, list
);
788 if (!strcmp(elv
->elevator_name
, __e
->elevator_name
))
789 len
+= sprintf(name
+len
, "[%s] ", elv
->elevator_name
);
791 len
+= sprintf(name
+len
, "%s ", __e
->elevator_name
);
793 spin_unlock_irq(q
->queue_lock
);
795 len
+= sprintf(len
+name
, "\n");
799 EXPORT_SYMBOL(elv_dispatch_sort
);
800 EXPORT_SYMBOL(elv_add_request
);
801 EXPORT_SYMBOL(__elv_add_request
);
802 EXPORT_SYMBOL(elv_requeue_request
);
803 EXPORT_SYMBOL(elv_next_request
);
804 EXPORT_SYMBOL(elv_dequeue_request
);
805 EXPORT_SYMBOL(elv_queue_empty
);
806 EXPORT_SYMBOL(elv_completed_request
);
807 EXPORT_SYMBOL(elevator_exit
);
808 EXPORT_SYMBOL(elevator_init
);