2 * linux/drivers/block/elevator.c
4 * Block device elevator/IO-scheduler.
6 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
8 * 30042000 Jens Axboe <axboe@suse.de> :
10 * Split the elevator a bit so that it is possible to choose a different
11 * one or even write a new "plug in". There are three pieces:
12 * - elevator_fn, inserts a new request in the queue list
13 * - elevator_merge_fn, decides whether a new buffer can be merged with
15 * - elevator_dequeue_fn, called when a request is taken off the active list
17 * 20082000 Dave Jones <davej@suse.de> :
18 * Removed tests for max-bomb-segments, which was breaking elvtune
19 * when run without -bN
22 * - Rework again to work with bio instead of buffer_heads
23 * - loose bi_dev comparisons, partition handling is right now
24 * - completely modularize elevator setup and teardown
27 #include <linux/kernel.h>
29 #include <linux/blkdev.h>
30 #include <linux/elevator.h>
31 #include <linux/bio.h>
32 #include <linux/config.h>
33 #include <linux/module.h>
34 #include <linux/slab.h>
35 #include <linux/init.h>
36 #include <linux/compiler.h>
38 #include <asm/uaccess.h>
41 * can we safely merge with this request?
43 inline int elv_rq_merge_ok(struct request
*rq
, struct bio
*bio
)
45 if (!rq_mergeable(rq
))
49 * different data direction or already started, don't merge
51 if (bio_data_dir(bio
) != rq_data_dir(rq
))
55 * same device and no special stuff set, merge is ok
57 if (rq
->rq_disk
== bio
->bi_bdev
->bd_disk
&&
58 !rq
->waiting
&& !rq
->special
)
64 inline int elv_try_merge(struct request
*__rq
, struct bio
*bio
)
66 int ret
= ELEVATOR_NO_MERGE
;
69 * we can merge and sequence is ok, check if it's possible
71 if (elv_rq_merge_ok(__rq
, bio
)) {
72 if (__rq
->sector
+ __rq
->nr_sectors
== bio
->bi_sector
)
73 ret
= ELEVATOR_BACK_MERGE
;
74 else if (__rq
->sector
- bio_sectors(bio
) == bio
->bi_sector
)
75 ret
= ELEVATOR_FRONT_MERGE
;
81 inline int elv_try_last_merge(request_queue_t
*q
, struct bio
*bio
)
84 return elv_try_merge(q
->last_merge
, bio
);
86 return ELEVATOR_NO_MERGE
;
90 * general block -> elevator interface starts here
92 int elevator_init(request_queue_t
*q
, elevator_t
*type
)
94 elevator_t
*e
= &q
->elevator
;
96 memcpy(e
, type
, sizeof(*e
));
98 INIT_LIST_HEAD(&q
->queue_head
);
101 if (e
->elevator_init_fn
)
102 return e
->elevator_init_fn(q
, e
);
107 void elevator_exit(request_queue_t
*q
)
109 elevator_t
*e
= &q
->elevator
;
111 if (e
->elevator_exit_fn
)
112 e
->elevator_exit_fn(q
, e
);
115 int elevator_global_init(void)
120 int elv_merge(request_queue_t
*q
, struct request
**req
, struct bio
*bio
)
122 elevator_t
*e
= &q
->elevator
;
124 if (e
->elevator_merge_fn
)
125 return e
->elevator_merge_fn(q
, req
, bio
);
127 return ELEVATOR_NO_MERGE
;
130 void elv_merged_request(request_queue_t
*q
, struct request
*rq
)
132 elevator_t
*e
= &q
->elevator
;
134 if (e
->elevator_merged_fn
)
135 e
->elevator_merged_fn(q
, rq
);
138 void elv_merge_requests(request_queue_t
*q
, struct request
*rq
,
139 struct request
*next
)
141 elevator_t
*e
= &q
->elevator
;
143 if (q
->last_merge
== next
)
144 q
->last_merge
= NULL
;
146 if (e
->elevator_merge_req_fn
)
147 e
->elevator_merge_req_fn(q
, rq
, next
);
150 void elv_requeue_request(request_queue_t
*q
, struct request
*rq
)
153 * it already went through dequeue, we need to decrement the
154 * in_flight count again
156 if (blk_account_rq(rq
))
160 * if iosched has an explicit requeue hook, then use that. otherwise
161 * just put the request at the front of the queue
163 if (q
->elevator
.elevator_requeue_req_fn
)
164 q
->elevator
.elevator_requeue_req_fn(q
, rq
);
166 __elv_add_request(q
, rq
, ELEVATOR_INSERT_FRONT
, 0);
169 void __elv_add_request(request_queue_t
*q
, struct request
*rq
, int where
,
173 * barriers implicitly indicate back insertion
175 if (rq
->flags
& (REQ_SOFTBARRIER
| REQ_HARDBARRIER
) &&
176 where
== ELEVATOR_INSERT_SORT
)
177 where
= ELEVATOR_INSERT_BACK
;
183 q
->elevator
.elevator_add_req_fn(q
, rq
, where
);
185 if (blk_queue_plugged(q
)) {
186 int nrq
= q
->rq
.count
[READ
] + q
->rq
.count
[WRITE
] - q
->in_flight
;
188 if (nrq
== q
->unplug_thresh
)
189 __generic_unplug_device(q
);
194 void elv_add_request(request_queue_t
*q
, struct request
*rq
, int where
,
199 spin_lock_irqsave(q
->queue_lock
, flags
);
200 __elv_add_request(q
, rq
, where
, plug
);
201 spin_unlock_irqrestore(q
->queue_lock
, flags
);
204 static inline struct request
*__elv_next_request(request_queue_t
*q
)
206 return q
->elevator
.elevator_next_req_fn(q
);
209 struct request
*elv_next_request(request_queue_t
*q
)
214 while ((rq
= __elv_next_request(q
)) != NULL
) {
216 * just mark as started even if we don't start it, a request
217 * that has been delayed should not be passed by new incoming
220 rq
->flags
|= REQ_STARTED
;
222 if (rq
== q
->last_merge
)
223 q
->last_merge
= NULL
;
225 if ((rq
->flags
& REQ_DONTPREP
) || !q
->prep_rq_fn
)
228 ret
= q
->prep_rq_fn(q
, rq
);
229 if (ret
== BLKPREP_OK
) {
231 } else if (ret
== BLKPREP_DEFER
) {
234 } else if (ret
== BLKPREP_KILL
) {
235 int nr_bytes
= rq
->hard_nr_sectors
<< 9;
238 nr_bytes
= rq
->data_len
;
240 blkdev_dequeue_request(rq
);
241 rq
->flags
|= REQ_QUIET
;
242 end_that_request_chunk(rq
, 0, nr_bytes
);
243 end_that_request_last(rq
);
245 printk("%s: bad return=%d\n", __FUNCTION__
, ret
);
253 void elv_remove_request(request_queue_t
*q
, struct request
*rq
)
255 elevator_t
*e
= &q
->elevator
;
258 * the time frame between a request being removed from the lists
259 * and to it is freed is accounted as io that is in progress at
260 * the driver side. note that we only account requests that the
261 * driver has seen (REQ_STARTED set), to avoid false accounting
262 * for request-request merges
264 if (blk_account_rq(rq
))
268 * the main clearing point for q->last_merge is on retrieval of
269 * request by driver (it calls elv_next_request()), but it _can_
270 * also happen here if a request is added to the queue but later
271 * deleted without ever being given to driver (merged with another
274 if (rq
== q
->last_merge
)
275 q
->last_merge
= NULL
;
277 if (e
->elevator_remove_req_fn
)
278 e
->elevator_remove_req_fn(q
, rq
);
281 int elv_queue_empty(request_queue_t
*q
)
283 elevator_t
*e
= &q
->elevator
;
285 if (e
->elevator_queue_empty_fn
)
286 return e
->elevator_queue_empty_fn(q
);
288 return list_empty(&q
->queue_head
);
291 struct request
*elv_latter_request(request_queue_t
*q
, struct request
*rq
)
293 struct list_head
*next
;
295 elevator_t
*e
= &q
->elevator
;
297 if (e
->elevator_latter_req_fn
)
298 return e
->elevator_latter_req_fn(q
, rq
);
300 next
= rq
->queuelist
.next
;
301 if (next
!= &q
->queue_head
&& next
!= &rq
->queuelist
)
302 return list_entry_rq(next
);
307 struct request
*elv_former_request(request_queue_t
*q
, struct request
*rq
)
309 struct list_head
*prev
;
311 elevator_t
*e
= &q
->elevator
;
313 if (e
->elevator_former_req_fn
)
314 return e
->elevator_former_req_fn(q
, rq
);
316 prev
= rq
->queuelist
.prev
;
317 if (prev
!= &q
->queue_head
&& prev
!= &rq
->queuelist
)
318 return list_entry_rq(prev
);
323 int elv_set_request(request_queue_t
*q
, struct request
*rq
, int gfp_mask
)
325 elevator_t
*e
= &q
->elevator
;
327 if (e
->elevator_set_req_fn
)
328 return e
->elevator_set_req_fn(q
, rq
, gfp_mask
);
330 rq
->elevator_private
= NULL
;
334 void elv_put_request(request_queue_t
*q
, struct request
*rq
)
336 elevator_t
*e
= &q
->elevator
;
338 if (e
->elevator_put_req_fn
)
339 e
->elevator_put_req_fn(q
, rq
);
342 int elv_may_queue(request_queue_t
*q
, int rw
)
344 elevator_t
*e
= &q
->elevator
;
346 if (e
->elevator_may_queue_fn
)
347 return e
->elevator_may_queue_fn(q
, rw
);
352 void elv_completed_request(request_queue_t
*q
, struct request
*rq
)
354 elevator_t
*e
= &q
->elevator
;
357 * request is released from the driver, io must be done
359 if (blk_account_rq(rq
))
362 if (e
->elevator_completed_req_fn
)
363 e
->elevator_completed_req_fn(q
, rq
);
366 int elv_register_queue(struct request_queue
*q
)
372 e
->kobj
.parent
= kobject_get(&q
->kobj
);
376 snprintf(e
->kobj
.name
, KOBJ_NAME_LEN
, "%s", "iosched");
377 e
->kobj
.ktype
= e
->elevator_ktype
;
379 return kobject_register(&e
->kobj
);
382 void elv_unregister_queue(struct request_queue
*q
)
385 elevator_t
* e
= &q
->elevator
;
386 kobject_unregister(&e
->kobj
);
387 kobject_put(&q
->kobj
);
391 module_init(elevator_global_init
);
393 EXPORT_SYMBOL(elv_add_request
);
394 EXPORT_SYMBOL(__elv_add_request
);
395 EXPORT_SYMBOL(elv_requeue_request
);
396 EXPORT_SYMBOL(elv_next_request
);
397 EXPORT_SYMBOL(elv_remove_request
);
398 EXPORT_SYMBOL(elv_queue_empty
);
399 EXPORT_SYMBOL(elv_completed_request
);
400 EXPORT_SYMBOL(elevator_exit
);
401 EXPORT_SYMBOL(elevator_init
);