[TG3]: Set minimal hw interrupt mitigation.
[linux-2.6/verdex.git] / drivers / block / deadline-iosched.c
blobd63d34c671f7a7fcbc9007ca009a04f125303cd1
1 /*
2 * linux/drivers/block/deadline-iosched.c
4 * Deadline i/o scheduler.
6 * Copyright (C) 2002 Jens Axboe <axboe@suse.de>
7 */
8 #include <linux/kernel.h>
9 #include <linux/fs.h>
10 #include <linux/blkdev.h>
11 #include <linux/elevator.h>
12 #include <linux/bio.h>
13 #include <linux/config.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/init.h>
17 #include <linux/compiler.h>
18 #include <linux/hash.h>
19 #include <linux/rbtree.h>
22 * See Documentation/block/deadline-iosched.txt
24 static int read_expire = HZ / 2; /* max time before a read is submitted. */
25 static int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
26 static int writes_starved = 2; /* max times reads can starve a write */
27 static int fifo_batch = 16; /* # of sequential requests treated as one
28 by the above parameters. For throughput. */
30 static const int deadline_hash_shift = 5;
31 #define DL_HASH_BLOCK(sec) ((sec) >> 3)
32 #define DL_HASH_FN(sec) (hash_long(DL_HASH_BLOCK((sec)), deadline_hash_shift))
33 #define DL_HASH_ENTRIES (1 << deadline_hash_shift)
34 #define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
35 #define list_entry_hash(ptr) list_entry((ptr), struct deadline_rq, hash)
36 #define ON_HASH(drq) (drq)->on_hash
38 struct deadline_data {
40 * run time data
44 * requests (deadline_rq s) are present on both sort_list and fifo_list
46 struct rb_root sort_list[2];
47 struct list_head fifo_list[2];
50 * next in sort order. read, write or both are NULL
52 struct deadline_rq *next_drq[2];
53 struct list_head *dispatch; /* driver dispatch queue */
54 struct list_head *hash; /* request hash */
55 unsigned int batching; /* number of sequential requests made */
56 sector_t last_sector; /* head position */
57 unsigned int starved; /* times reads have starved writes */
60 * settings that change how the i/o scheduler behaves
62 int fifo_expire[2];
63 int fifo_batch;
64 int writes_starved;
65 int front_merges;
67 mempool_t *drq_pool;
71 * pre-request data.
73 struct deadline_rq {
75 * rbtree index, key is the starting offset
77 struct rb_node rb_node;
78 sector_t rb_key;
80 struct request *request;
83 * request hash, key is the ending offset (for back merge lookup)
85 struct list_head hash;
86 char on_hash;
89 * expire fifo
91 struct list_head fifo;
92 unsigned long expires;
95 static void deadline_move_request(struct deadline_data *dd, struct deadline_rq *drq);
97 static kmem_cache_t *drq_pool;
99 #define RQ_DATA(rq) ((struct deadline_rq *) (rq)->elevator_private)
102 * the back merge hash support functions
104 static inline void __deadline_del_drq_hash(struct deadline_rq *drq)
106 drq->on_hash = 0;
107 list_del_init(&drq->hash);
110 static inline void deadline_del_drq_hash(struct deadline_rq *drq)
112 if (ON_HASH(drq))
113 __deadline_del_drq_hash(drq);
116 static void
117 deadline_remove_merge_hints(request_queue_t *q, struct deadline_rq *drq)
119 deadline_del_drq_hash(drq);
121 if (q->last_merge == drq->request)
122 q->last_merge = NULL;
125 static inline void
126 deadline_add_drq_hash(struct deadline_data *dd, struct deadline_rq *drq)
128 struct request *rq = drq->request;
130 BUG_ON(ON_HASH(drq));
132 drq->on_hash = 1;
133 list_add(&drq->hash, &dd->hash[DL_HASH_FN(rq_hash_key(rq))]);
137 * move hot entry to front of chain
139 static inline void
140 deadline_hot_drq_hash(struct deadline_data *dd, struct deadline_rq *drq)
142 struct request *rq = drq->request;
143 struct list_head *head = &dd->hash[DL_HASH_FN(rq_hash_key(rq))];
145 if (ON_HASH(drq) && drq->hash.prev != head) {
146 list_del(&drq->hash);
147 list_add(&drq->hash, head);
151 static struct request *
152 deadline_find_drq_hash(struct deadline_data *dd, sector_t offset)
154 struct list_head *hash_list = &dd->hash[DL_HASH_FN(offset)];
155 struct list_head *entry, *next = hash_list->next;
157 while ((entry = next) != hash_list) {
158 struct deadline_rq *drq = list_entry_hash(entry);
159 struct request *__rq = drq->request;
161 next = entry->next;
163 BUG_ON(!ON_HASH(drq));
165 if (!rq_mergeable(__rq)) {
166 __deadline_del_drq_hash(drq);
167 continue;
170 if (rq_hash_key(__rq) == offset)
171 return __rq;
174 return NULL;
178 * rb tree support functions
180 #define RB_NONE (2)
181 #define RB_EMPTY(root) ((root)->rb_node == NULL)
182 #define ON_RB(node) ((node)->rb_color != RB_NONE)
183 #define RB_CLEAR(node) ((node)->rb_color = RB_NONE)
184 #define rb_entry_drq(node) rb_entry((node), struct deadline_rq, rb_node)
185 #define DRQ_RB_ROOT(dd, drq) (&(dd)->sort_list[rq_data_dir((drq)->request)])
186 #define rq_rb_key(rq) (rq)->sector
188 static struct deadline_rq *
189 __deadline_add_drq_rb(struct deadline_data *dd, struct deadline_rq *drq)
191 struct rb_node **p = &DRQ_RB_ROOT(dd, drq)->rb_node;
192 struct rb_node *parent = NULL;
193 struct deadline_rq *__drq;
195 while (*p) {
196 parent = *p;
197 __drq = rb_entry_drq(parent);
199 if (drq->rb_key < __drq->rb_key)
200 p = &(*p)->rb_left;
201 else if (drq->rb_key > __drq->rb_key)
202 p = &(*p)->rb_right;
203 else
204 return __drq;
207 rb_link_node(&drq->rb_node, parent, p);
208 return NULL;
211 static void
212 deadline_add_drq_rb(struct deadline_data *dd, struct deadline_rq *drq)
214 struct deadline_rq *__alias;
216 drq->rb_key = rq_rb_key(drq->request);
218 retry:
219 __alias = __deadline_add_drq_rb(dd, drq);
220 if (!__alias) {
221 rb_insert_color(&drq->rb_node, DRQ_RB_ROOT(dd, drq));
222 return;
225 deadline_move_request(dd, __alias);
226 goto retry;
229 static inline void
230 deadline_del_drq_rb(struct deadline_data *dd, struct deadline_rq *drq)
232 const int data_dir = rq_data_dir(drq->request);
234 if (dd->next_drq[data_dir] == drq) {
235 struct rb_node *rbnext = rb_next(&drq->rb_node);
237 dd->next_drq[data_dir] = NULL;
238 if (rbnext)
239 dd->next_drq[data_dir] = rb_entry_drq(rbnext);
242 if (ON_RB(&drq->rb_node)) {
243 rb_erase(&drq->rb_node, DRQ_RB_ROOT(dd, drq));
244 RB_CLEAR(&drq->rb_node);
248 static struct request *
249 deadline_find_drq_rb(struct deadline_data *dd, sector_t sector, int data_dir)
251 struct rb_node *n = dd->sort_list[data_dir].rb_node;
252 struct deadline_rq *drq;
254 while (n) {
255 drq = rb_entry_drq(n);
257 if (sector < drq->rb_key)
258 n = n->rb_left;
259 else if (sector > drq->rb_key)
260 n = n->rb_right;
261 else
262 return drq->request;
265 return NULL;
269 * deadline_find_first_drq finds the first (lowest sector numbered) request
270 * for the specified data_dir. Used to sweep back to the start of the disk
271 * (1-way elevator) after we process the last (highest sector) request.
273 static struct deadline_rq *
274 deadline_find_first_drq(struct deadline_data *dd, int data_dir)
276 struct rb_node *n = dd->sort_list[data_dir].rb_node;
278 for (;;) {
279 if (n->rb_left == NULL)
280 return rb_entry_drq(n);
282 n = n->rb_left;
287 * add drq to rbtree and fifo
289 static inline void
290 deadline_add_request(struct request_queue *q, struct request *rq)
292 struct deadline_data *dd = q->elevator->elevator_data;
293 struct deadline_rq *drq = RQ_DATA(rq);
295 const int data_dir = rq_data_dir(drq->request);
297 deadline_add_drq_rb(dd, drq);
299 * set expire time (only used for reads) and add to fifo list
301 drq->expires = jiffies + dd->fifo_expire[data_dir];
302 list_add_tail(&drq->fifo, &dd->fifo_list[data_dir]);
304 if (rq_mergeable(rq)) {
305 deadline_add_drq_hash(dd, drq);
307 if (!q->last_merge)
308 q->last_merge = rq;
313 * remove rq from rbtree, fifo, and hash
315 static void deadline_remove_request(request_queue_t *q, struct request *rq)
317 struct deadline_rq *drq = RQ_DATA(rq);
319 if (drq) {
320 struct deadline_data *dd = q->elevator->elevator_data;
322 list_del_init(&drq->fifo);
323 deadline_remove_merge_hints(q, drq);
324 deadline_del_drq_rb(dd, drq);
328 static int
329 deadline_merge(request_queue_t *q, struct request **req, struct bio *bio)
331 struct deadline_data *dd = q->elevator->elevator_data;
332 struct request *__rq;
333 int ret;
336 * try last_merge to avoid going to hash
338 ret = elv_try_last_merge(q, bio);
339 if (ret != ELEVATOR_NO_MERGE) {
340 __rq = q->last_merge;
341 goto out_insert;
345 * see if the merge hash can satisfy a back merge
347 __rq = deadline_find_drq_hash(dd, bio->bi_sector);
348 if (__rq) {
349 BUG_ON(__rq->sector + __rq->nr_sectors != bio->bi_sector);
351 if (elv_rq_merge_ok(__rq, bio)) {
352 ret = ELEVATOR_BACK_MERGE;
353 goto out;
358 * check for front merge
360 if (dd->front_merges) {
361 sector_t rb_key = bio->bi_sector + bio_sectors(bio);
363 __rq = deadline_find_drq_rb(dd, rb_key, bio_data_dir(bio));
364 if (__rq) {
365 BUG_ON(rb_key != rq_rb_key(__rq));
367 if (elv_rq_merge_ok(__rq, bio)) {
368 ret = ELEVATOR_FRONT_MERGE;
369 goto out;
374 return ELEVATOR_NO_MERGE;
375 out:
376 q->last_merge = __rq;
377 out_insert:
378 if (ret)
379 deadline_hot_drq_hash(dd, RQ_DATA(__rq));
380 *req = __rq;
381 return ret;
384 static void deadline_merged_request(request_queue_t *q, struct request *req)
386 struct deadline_data *dd = q->elevator->elevator_data;
387 struct deadline_rq *drq = RQ_DATA(req);
390 * hash always needs to be repositioned, key is end sector
392 deadline_del_drq_hash(drq);
393 deadline_add_drq_hash(dd, drq);
396 * if the merge was a front merge, we need to reposition request
398 if (rq_rb_key(req) != drq->rb_key) {
399 deadline_del_drq_rb(dd, drq);
400 deadline_add_drq_rb(dd, drq);
403 q->last_merge = req;
406 static void
407 deadline_merged_requests(request_queue_t *q, struct request *req,
408 struct request *next)
410 struct deadline_data *dd = q->elevator->elevator_data;
411 struct deadline_rq *drq = RQ_DATA(req);
412 struct deadline_rq *dnext = RQ_DATA(next);
414 BUG_ON(!drq);
415 BUG_ON(!dnext);
418 * reposition drq (this is the merged request) in hash, and in rbtree
419 * in case of a front merge
421 deadline_del_drq_hash(drq);
422 deadline_add_drq_hash(dd, drq);
424 if (rq_rb_key(req) != drq->rb_key) {
425 deadline_del_drq_rb(dd, drq);
426 deadline_add_drq_rb(dd, drq);
430 * if dnext expires before drq, assign its expire time to drq
431 * and move into dnext position (dnext will be deleted) in fifo
433 if (!list_empty(&drq->fifo) && !list_empty(&dnext->fifo)) {
434 if (time_before(dnext->expires, drq->expires)) {
435 list_move(&drq->fifo, &dnext->fifo);
436 drq->expires = dnext->expires;
441 * kill knowledge of next, this one is a goner
443 deadline_remove_request(q, next);
447 * move request from sort list to dispatch queue.
449 static inline void
450 deadline_move_to_dispatch(struct deadline_data *dd, struct deadline_rq *drq)
452 request_queue_t *q = drq->request->q;
454 deadline_remove_request(q, drq->request);
455 list_add_tail(&drq->request->queuelist, dd->dispatch);
459 * move an entry to dispatch queue
461 static void
462 deadline_move_request(struct deadline_data *dd, struct deadline_rq *drq)
464 const int data_dir = rq_data_dir(drq->request);
465 struct rb_node *rbnext = rb_next(&drq->rb_node);
467 dd->next_drq[READ] = NULL;
468 dd->next_drq[WRITE] = NULL;
470 if (rbnext)
471 dd->next_drq[data_dir] = rb_entry_drq(rbnext);
473 dd->last_sector = drq->request->sector + drq->request->nr_sectors;
476 * take it off the sort and fifo list, move
477 * to dispatch queue
479 deadline_move_to_dispatch(dd, drq);
482 #define list_entry_fifo(ptr) list_entry((ptr), struct deadline_rq, fifo)
485 * deadline_check_fifo returns 0 if there are no expired reads on the fifo,
486 * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
488 static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
490 struct deadline_rq *drq = list_entry_fifo(dd->fifo_list[ddir].next);
493 * drq is expired!
495 if (time_after(jiffies, drq->expires))
496 return 1;
498 return 0;
502 * deadline_dispatch_requests selects the best request according to
503 * read/write expire, fifo_batch, etc
505 static int deadline_dispatch_requests(struct deadline_data *dd)
507 const int reads = !list_empty(&dd->fifo_list[READ]);
508 const int writes = !list_empty(&dd->fifo_list[WRITE]);
509 struct deadline_rq *drq;
510 int data_dir, other_dir;
513 * batches are currently reads XOR writes
515 drq = NULL;
517 if (dd->next_drq[READ])
518 drq = dd->next_drq[READ];
520 if (dd->next_drq[WRITE])
521 drq = dd->next_drq[WRITE];
523 if (drq) {
524 /* we have a "next request" */
526 if (dd->last_sector != drq->request->sector)
527 /* end the batch on a non sequential request */
528 dd->batching += dd->fifo_batch;
530 if (dd->batching < dd->fifo_batch)
531 /* we are still entitled to batch */
532 goto dispatch_request;
536 * at this point we are not running a batch. select the appropriate
537 * data direction (read / write)
540 if (reads) {
541 BUG_ON(RB_EMPTY(&dd->sort_list[READ]));
543 if (writes && (dd->starved++ >= dd->writes_starved))
544 goto dispatch_writes;
546 data_dir = READ;
547 other_dir = WRITE;
549 goto dispatch_find_request;
553 * there are either no reads or writes have been starved
556 if (writes) {
557 dispatch_writes:
558 BUG_ON(RB_EMPTY(&dd->sort_list[WRITE]));
560 dd->starved = 0;
562 data_dir = WRITE;
563 other_dir = READ;
565 goto dispatch_find_request;
568 return 0;
570 dispatch_find_request:
572 * we are not running a batch, find best request for selected data_dir
574 if (deadline_check_fifo(dd, data_dir)) {
575 /* An expired request exists - satisfy it */
576 dd->batching = 0;
577 drq = list_entry_fifo(dd->fifo_list[data_dir].next);
579 } else if (dd->next_drq[data_dir]) {
581 * The last req was the same dir and we have a next request in
582 * sort order. No expired requests so continue on from here.
584 drq = dd->next_drq[data_dir];
585 } else {
587 * The last req was the other direction or we have run out of
588 * higher-sectored requests. Go back to the lowest sectored
589 * request (1 way elevator) and start a new batch.
591 dd->batching = 0;
592 drq = deadline_find_first_drq(dd, data_dir);
595 dispatch_request:
597 * drq is the selected appropriate request.
599 dd->batching++;
600 deadline_move_request(dd, drq);
602 return 1;
605 static struct request *deadline_next_request(request_queue_t *q)
607 struct deadline_data *dd = q->elevator->elevator_data;
608 struct request *rq;
611 * if there are still requests on the dispatch queue, grab the first one
613 if (!list_empty(dd->dispatch)) {
614 dispatch:
615 rq = list_entry_rq(dd->dispatch->next);
616 return rq;
619 if (deadline_dispatch_requests(dd))
620 goto dispatch;
622 return NULL;
625 static void
626 deadline_insert_request(request_queue_t *q, struct request *rq, int where)
628 struct deadline_data *dd = q->elevator->elevator_data;
630 /* barriers must flush the reorder queue */
631 if (unlikely(rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)
632 && where == ELEVATOR_INSERT_SORT))
633 where = ELEVATOR_INSERT_BACK;
635 switch (where) {
636 case ELEVATOR_INSERT_BACK:
637 while (deadline_dispatch_requests(dd))
639 list_add_tail(&rq->queuelist, dd->dispatch);
640 break;
641 case ELEVATOR_INSERT_FRONT:
642 list_add(&rq->queuelist, dd->dispatch);
643 break;
644 case ELEVATOR_INSERT_SORT:
645 BUG_ON(!blk_fs_request(rq));
646 deadline_add_request(q, rq);
647 break;
648 default:
649 printk("%s: bad insert point %d\n", __FUNCTION__,where);
650 return;
654 static int deadline_queue_empty(request_queue_t *q)
656 struct deadline_data *dd = q->elevator->elevator_data;
658 if (!list_empty(&dd->fifo_list[WRITE])
659 || !list_empty(&dd->fifo_list[READ])
660 || !list_empty(dd->dispatch))
661 return 0;
663 return 1;
666 static struct request *
667 deadline_former_request(request_queue_t *q, struct request *rq)
669 struct deadline_rq *drq = RQ_DATA(rq);
670 struct rb_node *rbprev = rb_prev(&drq->rb_node);
672 if (rbprev)
673 return rb_entry_drq(rbprev)->request;
675 return NULL;
678 static struct request *
679 deadline_latter_request(request_queue_t *q, struct request *rq)
681 struct deadline_rq *drq = RQ_DATA(rq);
682 struct rb_node *rbnext = rb_next(&drq->rb_node);
684 if (rbnext)
685 return rb_entry_drq(rbnext)->request;
687 return NULL;
690 static void deadline_exit_queue(elevator_t *e)
692 struct deadline_data *dd = e->elevator_data;
694 BUG_ON(!list_empty(&dd->fifo_list[READ]));
695 BUG_ON(!list_empty(&dd->fifo_list[WRITE]));
697 mempool_destroy(dd->drq_pool);
698 kfree(dd->hash);
699 kfree(dd);
703 * initialize elevator private data (deadline_data), and alloc a drq for
704 * each request on the free lists
706 static int deadline_init_queue(request_queue_t *q, elevator_t *e)
708 struct deadline_data *dd;
709 int i;
711 if (!drq_pool)
712 return -ENOMEM;
714 dd = kmalloc(sizeof(*dd), GFP_KERNEL);
715 if (!dd)
716 return -ENOMEM;
717 memset(dd, 0, sizeof(*dd));
719 dd->hash = kmalloc(sizeof(struct list_head)*DL_HASH_ENTRIES,GFP_KERNEL);
720 if (!dd->hash) {
721 kfree(dd);
722 return -ENOMEM;
725 dd->drq_pool = mempool_create(BLKDEV_MIN_RQ, mempool_alloc_slab, mempool_free_slab, drq_pool);
726 if (!dd->drq_pool) {
727 kfree(dd->hash);
728 kfree(dd);
729 return -ENOMEM;
732 for (i = 0; i < DL_HASH_ENTRIES; i++)
733 INIT_LIST_HEAD(&dd->hash[i]);
735 INIT_LIST_HEAD(&dd->fifo_list[READ]);
736 INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
737 dd->sort_list[READ] = RB_ROOT;
738 dd->sort_list[WRITE] = RB_ROOT;
739 dd->dispatch = &q->queue_head;
740 dd->fifo_expire[READ] = read_expire;
741 dd->fifo_expire[WRITE] = write_expire;
742 dd->writes_starved = writes_starved;
743 dd->front_merges = 1;
744 dd->fifo_batch = fifo_batch;
745 e->elevator_data = dd;
746 return 0;
749 static void deadline_put_request(request_queue_t *q, struct request *rq)
751 struct deadline_data *dd = q->elevator->elevator_data;
752 struct deadline_rq *drq = RQ_DATA(rq);
754 if (drq) {
755 mempool_free(drq, dd->drq_pool);
756 rq->elevator_private = NULL;
760 static int
761 deadline_set_request(request_queue_t *q, struct request *rq, int gfp_mask)
763 struct deadline_data *dd = q->elevator->elevator_data;
764 struct deadline_rq *drq;
766 drq = mempool_alloc(dd->drq_pool, gfp_mask);
767 if (drq) {
768 memset(drq, 0, sizeof(*drq));
769 RB_CLEAR(&drq->rb_node);
770 drq->request = rq;
772 INIT_LIST_HEAD(&drq->hash);
773 drq->on_hash = 0;
775 INIT_LIST_HEAD(&drq->fifo);
777 rq->elevator_private = drq;
778 return 0;
781 return 1;
785 * sysfs parts below
787 struct deadline_fs_entry {
788 struct attribute attr;
789 ssize_t (*show)(struct deadline_data *, char *);
790 ssize_t (*store)(struct deadline_data *, const char *, size_t);
793 static ssize_t
794 deadline_var_show(int var, char *page)
796 return sprintf(page, "%d\n", var);
799 static ssize_t
800 deadline_var_store(int *var, const char *page, size_t count)
802 char *p = (char *) page;
804 *var = simple_strtol(p, &p, 10);
805 return count;
808 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
809 static ssize_t __FUNC(struct deadline_data *dd, char *page) \
811 int __data = __VAR; \
812 if (__CONV) \
813 __data = jiffies_to_msecs(__data); \
814 return deadline_var_show(__data, (page)); \
816 SHOW_FUNCTION(deadline_readexpire_show, dd->fifo_expire[READ], 1);
817 SHOW_FUNCTION(deadline_writeexpire_show, dd->fifo_expire[WRITE], 1);
818 SHOW_FUNCTION(deadline_writesstarved_show, dd->writes_starved, 0);
819 SHOW_FUNCTION(deadline_frontmerges_show, dd->front_merges, 0);
820 SHOW_FUNCTION(deadline_fifobatch_show, dd->fifo_batch, 0);
821 #undef SHOW_FUNCTION
823 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
824 static ssize_t __FUNC(struct deadline_data *dd, const char *page, size_t count) \
826 int __data; \
827 int ret = deadline_var_store(&__data, (page), count); \
828 if (__data < (MIN)) \
829 __data = (MIN); \
830 else if (__data > (MAX)) \
831 __data = (MAX); \
832 if (__CONV) \
833 *(__PTR) = msecs_to_jiffies(__data); \
834 else \
835 *(__PTR) = __data; \
836 return ret; \
838 STORE_FUNCTION(deadline_readexpire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1);
839 STORE_FUNCTION(deadline_writeexpire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1);
840 STORE_FUNCTION(deadline_writesstarved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0);
841 STORE_FUNCTION(deadline_frontmerges_store, &dd->front_merges, 0, 1, 0);
842 STORE_FUNCTION(deadline_fifobatch_store, &dd->fifo_batch, 0, INT_MAX, 0);
843 #undef STORE_FUNCTION
845 static struct deadline_fs_entry deadline_readexpire_entry = {
846 .attr = {.name = "read_expire", .mode = S_IRUGO | S_IWUSR },
847 .show = deadline_readexpire_show,
848 .store = deadline_readexpire_store,
850 static struct deadline_fs_entry deadline_writeexpire_entry = {
851 .attr = {.name = "write_expire", .mode = S_IRUGO | S_IWUSR },
852 .show = deadline_writeexpire_show,
853 .store = deadline_writeexpire_store,
855 static struct deadline_fs_entry deadline_writesstarved_entry = {
856 .attr = {.name = "writes_starved", .mode = S_IRUGO | S_IWUSR },
857 .show = deadline_writesstarved_show,
858 .store = deadline_writesstarved_store,
860 static struct deadline_fs_entry deadline_frontmerges_entry = {
861 .attr = {.name = "front_merges", .mode = S_IRUGO | S_IWUSR },
862 .show = deadline_frontmerges_show,
863 .store = deadline_frontmerges_store,
865 static struct deadline_fs_entry deadline_fifobatch_entry = {
866 .attr = {.name = "fifo_batch", .mode = S_IRUGO | S_IWUSR },
867 .show = deadline_fifobatch_show,
868 .store = deadline_fifobatch_store,
871 static struct attribute *default_attrs[] = {
872 &deadline_readexpire_entry.attr,
873 &deadline_writeexpire_entry.attr,
874 &deadline_writesstarved_entry.attr,
875 &deadline_frontmerges_entry.attr,
876 &deadline_fifobatch_entry.attr,
877 NULL,
880 #define to_deadline(atr) container_of((atr), struct deadline_fs_entry, attr)
882 static ssize_t
883 deadline_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
885 elevator_t *e = container_of(kobj, elevator_t, kobj);
886 struct deadline_fs_entry *entry = to_deadline(attr);
888 if (!entry->show)
889 return 0;
891 return entry->show(e->elevator_data, page);
894 static ssize_t
895 deadline_attr_store(struct kobject *kobj, struct attribute *attr,
896 const char *page, size_t length)
898 elevator_t *e = container_of(kobj, elevator_t, kobj);
899 struct deadline_fs_entry *entry = to_deadline(attr);
901 if (!entry->store)
902 return -EINVAL;
904 return entry->store(e->elevator_data, page, length);
907 static struct sysfs_ops deadline_sysfs_ops = {
908 .show = deadline_attr_show,
909 .store = deadline_attr_store,
912 static struct kobj_type deadline_ktype = {
913 .sysfs_ops = &deadline_sysfs_ops,
914 .default_attrs = default_attrs,
917 static struct elevator_type iosched_deadline = {
918 .ops = {
919 .elevator_merge_fn = deadline_merge,
920 .elevator_merged_fn = deadline_merged_request,
921 .elevator_merge_req_fn = deadline_merged_requests,
922 .elevator_next_req_fn = deadline_next_request,
923 .elevator_add_req_fn = deadline_insert_request,
924 .elevator_remove_req_fn = deadline_remove_request,
925 .elevator_queue_empty_fn = deadline_queue_empty,
926 .elevator_former_req_fn = deadline_former_request,
927 .elevator_latter_req_fn = deadline_latter_request,
928 .elevator_set_req_fn = deadline_set_request,
929 .elevator_put_req_fn = deadline_put_request,
930 .elevator_init_fn = deadline_init_queue,
931 .elevator_exit_fn = deadline_exit_queue,
934 .elevator_ktype = &deadline_ktype,
935 .elevator_name = "deadline",
936 .elevator_owner = THIS_MODULE,
939 static int __init deadline_init(void)
941 int ret;
943 drq_pool = kmem_cache_create("deadline_drq", sizeof(struct deadline_rq),
944 0, 0, NULL, NULL);
946 if (!drq_pool)
947 return -ENOMEM;
949 ret = elv_register(&iosched_deadline);
950 if (ret)
951 kmem_cache_destroy(drq_pool);
953 return ret;
956 static void __exit deadline_exit(void)
958 kmem_cache_destroy(drq_pool);
959 elv_unregister(&iosched_deadline);
962 module_init(deadline_init);
963 module_exit(deadline_exit);
965 MODULE_AUTHOR("Jens Axboe");
966 MODULE_LICENSE("GPL");
967 MODULE_DESCRIPTION("deadline IO scheduler");