2 * linux/drivers/block/cfq-iosched.c
4 * CFQ, or complete fairness queueing, disk scheduler.
6 * Based on ideas from a previously unfinished io
7 * scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
9 * Copyright (C) 2003 Jens Axboe <axboe@suse.de>
11 #include <linux/kernel.h>
13 #include <linux/blkdev.h>
14 #include <linux/elevator.h>
15 #include <linux/bio.h>
16 #include <linux/config.h>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/init.h>
20 #include <linux/compiler.h>
21 #include <linux/hash.h>
22 #include <linux/rbtree.h>
23 #include <linux/mempool.h>
25 static unsigned long max_elapsed_crq
;
26 static unsigned long max_elapsed_dispatch
;
31 static int cfq_quantum
= 4; /* max queue in one round of service */
32 static int cfq_queued
= 8; /* minimum rq allocate limit per-queue*/
33 static int cfq_service
= HZ
; /* period over which service is avg */
34 static int cfq_fifo_expire_r
= HZ
/ 2; /* fifo timeout for sync requests */
35 static int cfq_fifo_expire_w
= 5 * HZ
; /* fifo timeout for async requests */
36 static int cfq_fifo_rate
= HZ
/ 8; /* fifo expiry rate */
37 static int cfq_back_max
= 16 * 1024; /* maximum backwards seek, in KiB */
38 static int cfq_back_penalty
= 2; /* penalty of a backwards seek */
41 * for the hash of cfqq inside the cfqd
43 #define CFQ_QHASH_SHIFT 6
44 #define CFQ_QHASH_ENTRIES (1 << CFQ_QHASH_SHIFT)
45 #define list_entry_qhash(entry) hlist_entry((entry), struct cfq_queue, cfq_hash)
48 * for the hash of crq inside the cfqq
50 #define CFQ_MHASH_SHIFT 6
51 #define CFQ_MHASH_BLOCK(sec) ((sec) >> 3)
52 #define CFQ_MHASH_ENTRIES (1 << CFQ_MHASH_SHIFT)
53 #define CFQ_MHASH_FN(sec) hash_long(CFQ_MHASH_BLOCK(sec), CFQ_MHASH_SHIFT)
54 #define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
55 #define list_entry_hash(ptr) hlist_entry((ptr), struct cfq_rq, hash)
57 #define list_entry_cfqq(ptr) list_entry((ptr), struct cfq_queue, cfq_list)
59 #define RQ_DATA(rq) (rq)->elevator_private
65 #define RB_EMPTY(node) ((node)->rb_node == NULL)
66 #define RB_CLEAR_COLOR(node) (node)->rb_color = RB_NONE
67 #define RB_CLEAR(node) do { \
68 (node)->rb_parent = NULL; \
69 RB_CLEAR_COLOR((node)); \
70 (node)->rb_right = NULL; \
71 (node)->rb_left = NULL; \
73 #define RB_CLEAR_ROOT(root) ((root)->rb_node = NULL)
74 #define ON_RB(node) ((node)->rb_color != RB_NONE)
75 #define rb_entry_crq(node) rb_entry((node), struct cfq_rq, rb_node)
76 #define rq_rb_key(rq) (rq)->sector
79 * threshold for switching off non-tag accounting
81 #define CFQ_MAX_TAG (4)
84 * sort key types and names
94 static char *cfq_key_types
[] = { "pgid", "tgid", "uid", "gid", NULL
};
96 static kmem_cache_t
*crq_pool
;
97 static kmem_cache_t
*cfq_pool
;
98 static kmem_cache_t
*cfq_ioc_pool
;
101 struct list_head rr_list
;
102 struct list_head empty_list
;
104 struct hlist_head
*cfq_hash
;
105 struct hlist_head
*crq_hash
;
107 /* queues on rr_list (ie they have pending requests */
108 unsigned int busy_queues
;
110 unsigned int max_queued
;
118 request_queue_t
*queue
;
120 sector_t last_sector
;
125 * tunables, see top of file
127 unsigned int cfq_quantum
;
128 unsigned int cfq_queued
;
129 unsigned int cfq_fifo_expire_r
;
130 unsigned int cfq_fifo_expire_w
;
131 unsigned int cfq_fifo_batch_expire
;
132 unsigned int cfq_back_penalty
;
133 unsigned int cfq_back_max
;
134 unsigned int find_best_crq
;
136 unsigned int cfq_tagged
;
140 /* reference count */
142 /* parent cfq_data */
143 struct cfq_data
*cfqd
;
144 /* hash of mergeable requests */
145 struct hlist_node cfq_hash
;
148 /* whether queue is on rr (or empty) list */
150 /* on either rr or empty list of cfqd */
151 struct list_head cfq_list
;
152 /* sorted list of pending requests */
153 struct rb_root sort_list
;
154 /* if fifo isn't expired, next request to serve */
155 struct cfq_rq
*next_crq
;
156 /* requests queued in sort_list */
158 /* currently allocated requests */
160 /* fifo list of requests in sort_list */
161 struct list_head fifo
[2];
162 /* last time fifo expired */
163 unsigned long last_fifo_expire
;
167 unsigned long service_start
;
168 unsigned long service_used
;
170 unsigned int max_rate
;
172 /* number of requests that have been handed to the driver */
174 /* number of currently allocated requests */
179 struct rb_node rb_node
;
181 struct request
*request
;
182 struct hlist_node hash
;
184 struct cfq_queue
*cfq_queue
;
185 struct cfq_io_context
*io_context
;
187 unsigned long service_start
;
188 unsigned long queue_start
;
190 unsigned int in_flight
: 1;
191 unsigned int accounted
: 1;
192 unsigned int is_sync
: 1;
193 unsigned int is_write
: 1;
196 static struct cfq_queue
*cfq_find_cfq_hash(struct cfq_data
*, unsigned long);
197 static void cfq_dispatch_sort(request_queue_t
*, struct cfq_rq
*);
198 static void cfq_update_next_crq(struct cfq_rq
*);
199 static void cfq_put_cfqd(struct cfq_data
*cfqd
);
202 * what the fairness is based on (ie how processes are grouped and
205 static inline unsigned long
206 cfq_hash_key(struct cfq_data
*cfqd
, struct task_struct
*tsk
)
209 * optimize this so that ->key_type is the offset into the struct
211 switch (cfqd
->key_type
) {
213 return process_group(tsk
);
225 * lots of deadline iosched dupes, can be abstracted later...
227 static inline void cfq_del_crq_hash(struct cfq_rq
*crq
)
229 hlist_del_init(&crq
->hash
);
232 static void cfq_remove_merge_hints(request_queue_t
*q
, struct cfq_rq
*crq
)
234 cfq_del_crq_hash(crq
);
236 if (q
->last_merge
== crq
->request
)
237 q
->last_merge
= NULL
;
239 cfq_update_next_crq(crq
);
242 static inline void cfq_add_crq_hash(struct cfq_data
*cfqd
, struct cfq_rq
*crq
)
244 const int hash_idx
= CFQ_MHASH_FN(rq_hash_key(crq
->request
));
246 BUG_ON(!hlist_unhashed(&crq
->hash
));
248 hlist_add_head(&crq
->hash
, &cfqd
->crq_hash
[hash_idx
]);
251 static struct request
*cfq_find_rq_hash(struct cfq_data
*cfqd
, sector_t offset
)
253 struct hlist_head
*hash_list
= &cfqd
->crq_hash
[CFQ_MHASH_FN(offset
)];
254 struct hlist_node
*entry
, *next
;
256 hlist_for_each_safe(entry
, next
, hash_list
) {
257 struct cfq_rq
*crq
= list_entry_hash(entry
);
258 struct request
*__rq
= crq
->request
;
260 BUG_ON(hlist_unhashed(&crq
->hash
));
262 if (!rq_mergeable(__rq
)) {
263 cfq_del_crq_hash(crq
);
267 if (rq_hash_key(__rq
) == offset
)
275 * Lifted from AS - choose which of crq1 and crq2 that is best served now.
276 * We choose the request that is closest to the head right now. Distance
277 * behind the head are penalized and only allowed to a certain extent.
279 static struct cfq_rq
*
280 cfq_choose_req(struct cfq_data
*cfqd
, struct cfq_rq
*crq1
, struct cfq_rq
*crq2
)
282 sector_t last
, s1
, s2
, d1
= 0, d2
= 0;
283 int r1_wrap
= 0, r2_wrap
= 0; /* requests are behind the disk head */
284 unsigned long back_max
;
286 if (crq1
== NULL
|| crq1
== crq2
)
291 s1
= crq1
->request
->sector
;
292 s2
= crq2
->request
->sector
;
294 last
= cfqd
->last_sector
;
297 if (!list_empty(&cfqd
->queue
->queue_head
)) {
298 struct list_head
*entry
= &cfqd
->queue
->queue_head
;
299 unsigned long distance
= ~0UL;
302 while ((entry
= entry
->prev
) != &cfqd
->queue
->queue_head
) {
303 rq
= list_entry_rq(entry
);
305 if (blk_barrier_rq(rq
))
308 if (distance
< abs(s1
- rq
->sector
+ rq
->nr_sectors
)) {
309 distance
= abs(s1
- rq
->sector
+rq
->nr_sectors
);
310 last
= rq
->sector
+ rq
->nr_sectors
;
312 if (distance
< abs(s2
- rq
->sector
+ rq
->nr_sectors
)) {
313 distance
= abs(s2
- rq
->sector
+rq
->nr_sectors
);
314 last
= rq
->sector
+ rq
->nr_sectors
;
321 * by definition, 1KiB is 2 sectors
323 back_max
= cfqd
->cfq_back_max
* 2;
326 * Strict one way elevator _except_ in the case where we allow
327 * short backward seeks which are biased as twice the cost of a
328 * similar forward seek.
332 else if (s1
+ back_max
>= last
)
333 d1
= (last
- s1
) * cfqd
->cfq_back_penalty
;
339 else if (s2
+ back_max
>= last
)
340 d2
= (last
- s2
) * cfqd
->cfq_back_penalty
;
344 /* Found required data */
345 if (!r1_wrap
&& r2_wrap
)
347 else if (!r2_wrap
&& r1_wrap
)
349 else if (r1_wrap
&& r2_wrap
) {
350 /* both behind the head */
357 /* Both requests in front of the head */
371 * would be nice to take fifo expire time into account as well
373 static struct cfq_rq
*
374 cfq_find_next_crq(struct cfq_data
*cfqd
, struct cfq_queue
*cfqq
,
377 struct cfq_rq
*crq_next
= NULL
, *crq_prev
= NULL
;
378 struct rb_node
*rbnext
, *rbprev
;
380 if (!ON_RB(&last
->rb_node
))
383 if ((rbnext
= rb_next(&last
->rb_node
)) == NULL
)
384 rbnext
= rb_first(&cfqq
->sort_list
);
386 rbprev
= rb_prev(&last
->rb_node
);
389 crq_prev
= rb_entry_crq(rbprev
);
391 crq_next
= rb_entry_crq(rbnext
);
393 return cfq_choose_req(cfqd
, crq_next
, crq_prev
);
396 static void cfq_update_next_crq(struct cfq_rq
*crq
)
398 struct cfq_queue
*cfqq
= crq
->cfq_queue
;
400 if (cfqq
->next_crq
== crq
)
401 cfqq
->next_crq
= cfq_find_next_crq(cfqq
->cfqd
, cfqq
, crq
);
404 static int cfq_check_sort_rr_list(struct cfq_queue
*cfqq
)
406 struct list_head
*head
= &cfqq
->cfqd
->rr_list
;
407 struct list_head
*next
, *prev
;
410 * list might still be ordered
412 next
= cfqq
->cfq_list
.next
;
414 struct cfq_queue
*cnext
= list_entry_cfqq(next
);
416 if (cfqq
->service_used
> cnext
->service_used
)
420 prev
= cfqq
->cfq_list
.prev
;
422 struct cfq_queue
*cprev
= list_entry_cfqq(prev
);
424 if (cfqq
->service_used
< cprev
->service_used
)
431 static void cfq_sort_rr_list(struct cfq_queue
*cfqq
, int new_queue
)
433 struct list_head
*entry
= &cfqq
->cfqd
->rr_list
;
437 if (!new_queue
&& !cfq_check_sort_rr_list(cfqq
))
440 list_del(&cfqq
->cfq_list
);
443 * sort by our mean service_used, sub-sort by in-flight requests
445 while ((entry
= entry
->prev
) != &cfqq
->cfqd
->rr_list
) {
446 struct cfq_queue
*__cfqq
= list_entry_cfqq(entry
);
448 if (cfqq
->service_used
> __cfqq
->service_used
)
450 else if (cfqq
->service_used
== __cfqq
->service_used
) {
451 struct list_head
*prv
;
453 while ((prv
= entry
->prev
) != &cfqq
->cfqd
->rr_list
) {
454 __cfqq
= list_entry_cfqq(prv
);
456 WARN_ON(__cfqq
->service_used
> cfqq
->service_used
);
457 if (cfqq
->service_used
!= __cfqq
->service_used
)
459 if (cfqq
->in_flight
> __cfqq
->in_flight
)
467 list_add(&cfqq
->cfq_list
, entry
);
471 * add to busy list of queues for service, trying to be fair in ordering
472 * the pending list according to requests serviced
475 cfq_add_cfqq_rr(struct cfq_data
*cfqd
, struct cfq_queue
*cfqq
)
478 * it's currently on the empty list
483 if (time_after(jiffies
, cfqq
->service_start
+ cfq_service
))
484 cfqq
->service_used
>>= 3;
486 cfq_sort_rr_list(cfqq
, 1);
490 cfq_del_cfqq_rr(struct cfq_data
*cfqd
, struct cfq_queue
*cfqq
)
492 list_move(&cfqq
->cfq_list
, &cfqd
->empty_list
);
495 BUG_ON(!cfqd
->busy_queues
);
500 * rb tree support functions
502 static inline void cfq_del_crq_rb(struct cfq_rq
*crq
)
504 struct cfq_queue
*cfqq
= crq
->cfq_queue
;
506 if (ON_RB(&crq
->rb_node
)) {
507 struct cfq_data
*cfqd
= cfqq
->cfqd
;
509 BUG_ON(!cfqq
->queued
[crq
->is_sync
]);
511 cfq_update_next_crq(crq
);
513 cfqq
->queued
[crq
->is_sync
]--;
514 rb_erase(&crq
->rb_node
, &cfqq
->sort_list
);
515 RB_CLEAR_COLOR(&crq
->rb_node
);
517 if (RB_EMPTY(&cfqq
->sort_list
) && cfqq
->on_rr
)
518 cfq_del_cfqq_rr(cfqd
, cfqq
);
522 static struct cfq_rq
*
523 __cfq_add_crq_rb(struct cfq_rq
*crq
)
525 struct rb_node
**p
= &crq
->cfq_queue
->sort_list
.rb_node
;
526 struct rb_node
*parent
= NULL
;
527 struct cfq_rq
*__crq
;
531 __crq
= rb_entry_crq(parent
);
533 if (crq
->rb_key
< __crq
->rb_key
)
535 else if (crq
->rb_key
> __crq
->rb_key
)
541 rb_link_node(&crq
->rb_node
, parent
, p
);
545 static void cfq_add_crq_rb(struct cfq_rq
*crq
)
547 struct cfq_queue
*cfqq
= crq
->cfq_queue
;
548 struct cfq_data
*cfqd
= cfqq
->cfqd
;
549 struct request
*rq
= crq
->request
;
550 struct cfq_rq
*__alias
;
552 crq
->rb_key
= rq_rb_key(rq
);
553 cfqq
->queued
[crq
->is_sync
]++;
556 * looks a little odd, but the first insert might return an alias.
557 * if that happens, put the alias on the dispatch list
559 while ((__alias
= __cfq_add_crq_rb(crq
)) != NULL
)
560 cfq_dispatch_sort(cfqd
->queue
, __alias
);
562 rb_insert_color(&crq
->rb_node
, &cfqq
->sort_list
);
565 cfq_add_cfqq_rr(cfqd
, cfqq
);
568 * check if this request is a better next-serve candidate
570 cfqq
->next_crq
= cfq_choose_req(cfqd
, cfqq
->next_crq
, crq
);
574 cfq_reposition_crq_rb(struct cfq_queue
*cfqq
, struct cfq_rq
*crq
)
576 if (ON_RB(&crq
->rb_node
)) {
577 rb_erase(&crq
->rb_node
, &cfqq
->sort_list
);
578 cfqq
->queued
[crq
->is_sync
]--;
584 static struct request
*
585 cfq_find_rq_rb(struct cfq_data
*cfqd
, sector_t sector
)
587 const unsigned long key
= cfq_hash_key(cfqd
, current
);
588 struct cfq_queue
*cfqq
= cfq_find_cfq_hash(cfqd
, key
);
594 n
= cfqq
->sort_list
.rb_node
;
596 struct cfq_rq
*crq
= rb_entry_crq(n
);
598 if (sector
< crq
->rb_key
)
600 else if (sector
> crq
->rb_key
)
610 static void cfq_deactivate_request(request_queue_t
*q
, struct request
*rq
)
612 struct cfq_rq
*crq
= RQ_DATA(rq
);
615 struct cfq_queue
*cfqq
= crq
->cfq_queue
;
617 if (cfqq
->cfqd
->cfq_tagged
) {
618 cfqq
->service_used
--;
619 cfq_sort_rr_list(cfqq
, 0);
622 if (crq
->accounted
) {
624 cfqq
->cfqd
->rq_in_driver
--;
630 * make sure the service time gets corrected on reissue of this request
632 static void cfq_requeue_request(request_queue_t
*q
, struct request
*rq
)
634 cfq_deactivate_request(q
, rq
);
635 list_add(&rq
->queuelist
, &q
->queue_head
);
638 static void cfq_remove_request(request_queue_t
*q
, struct request
*rq
)
640 struct cfq_rq
*crq
= RQ_DATA(rq
);
643 cfq_remove_merge_hints(q
, crq
);
644 list_del_init(&rq
->queuelist
);
652 cfq_merge(request_queue_t
*q
, struct request
**req
, struct bio
*bio
)
654 struct cfq_data
*cfqd
= q
->elevator
->elevator_data
;
655 struct request
*__rq
;
658 ret
= elv_try_last_merge(q
, bio
);
659 if (ret
!= ELEVATOR_NO_MERGE
) {
660 __rq
= q
->last_merge
;
664 __rq
= cfq_find_rq_hash(cfqd
, bio
->bi_sector
);
666 BUG_ON(__rq
->sector
+ __rq
->nr_sectors
!= bio
->bi_sector
);
668 if (elv_rq_merge_ok(__rq
, bio
)) {
669 ret
= ELEVATOR_BACK_MERGE
;
674 __rq
= cfq_find_rq_rb(cfqd
, bio
->bi_sector
+ bio_sectors(bio
));
676 if (elv_rq_merge_ok(__rq
, bio
)) {
677 ret
= ELEVATOR_FRONT_MERGE
;
682 return ELEVATOR_NO_MERGE
;
684 q
->last_merge
= __rq
;
690 static void cfq_merged_request(request_queue_t
*q
, struct request
*req
)
692 struct cfq_data
*cfqd
= q
->elevator
->elevator_data
;
693 struct cfq_rq
*crq
= RQ_DATA(req
);
695 cfq_del_crq_hash(crq
);
696 cfq_add_crq_hash(cfqd
, crq
);
698 if (ON_RB(&crq
->rb_node
) && (rq_rb_key(req
) != crq
->rb_key
)) {
699 struct cfq_queue
*cfqq
= crq
->cfq_queue
;
701 cfq_update_next_crq(crq
);
702 cfq_reposition_crq_rb(cfqq
, crq
);
709 cfq_merged_requests(request_queue_t
*q
, struct request
*rq
,
710 struct request
*next
)
712 struct cfq_rq
*crq
= RQ_DATA(rq
);
713 struct cfq_rq
*cnext
= RQ_DATA(next
);
715 cfq_merged_request(q
, rq
);
717 if (!list_empty(&rq
->queuelist
) && !list_empty(&next
->queuelist
)) {
718 if (time_before(cnext
->queue_start
, crq
->queue_start
)) {
719 list_move(&rq
->queuelist
, &next
->queuelist
);
720 crq
->queue_start
= cnext
->queue_start
;
724 cfq_update_next_crq(cnext
);
725 cfq_remove_request(q
, next
);
729 * we dispatch cfqd->cfq_quantum requests in total from the rr_list queues,
730 * this function sector sorts the selected request to minimize seeks. we start
731 * at cfqd->last_sector, not 0.
733 static void cfq_dispatch_sort(request_queue_t
*q
, struct cfq_rq
*crq
)
735 struct cfq_data
*cfqd
= q
->elevator
->elevator_data
;
736 struct cfq_queue
*cfqq
= crq
->cfq_queue
;
737 struct list_head
*head
= &q
->queue_head
, *entry
= head
;
738 struct request
*__rq
;
742 cfq_remove_merge_hints(q
, crq
);
743 list_del(&crq
->request
->queuelist
);
745 last
= cfqd
->last_sector
;
746 while ((entry
= entry
->prev
) != head
) {
747 __rq
= list_entry_rq(entry
);
749 if (blk_barrier_rq(crq
->request
))
751 if (!blk_fs_request(crq
->request
))
754 if (crq
->request
->sector
> __rq
->sector
)
756 if (__rq
->sector
> last
&& crq
->request
->sector
< last
) {
757 last
= crq
->request
->sector
;
762 cfqd
->last_sector
= last
;
765 list_add(&crq
->request
->queuelist
, entry
);
769 * return expired entry, or NULL to just start from scratch in rbtree
771 static inline struct cfq_rq
*cfq_check_fifo(struct cfq_queue
*cfqq
)
773 struct cfq_data
*cfqd
= cfqq
->cfqd
;
774 const int reads
= !list_empty(&cfqq
->fifo
[0]);
775 const int writes
= !list_empty(&cfqq
->fifo
[1]);
776 unsigned long now
= jiffies
;
779 if (time_before(now
, cfqq
->last_fifo_expire
+ cfqd
->cfq_fifo_batch_expire
))
782 crq
= RQ_DATA(list_entry(cfqq
->fifo
[0].next
, struct request
, queuelist
));
783 if (reads
&& time_after(now
, crq
->queue_start
+ cfqd
->cfq_fifo_expire_r
)) {
784 cfqq
->last_fifo_expire
= now
;
788 crq
= RQ_DATA(list_entry(cfqq
->fifo
[1].next
, struct request
, queuelist
));
789 if (writes
&& time_after(now
, crq
->queue_start
+ cfqd
->cfq_fifo_expire_w
)) {
790 cfqq
->last_fifo_expire
= now
;
798 * dispatch a single request from given queue
801 cfq_dispatch_request(request_queue_t
*q
, struct cfq_data
*cfqd
,
802 struct cfq_queue
*cfqq
)
807 * follow expired path, else get first next available
809 if ((crq
= cfq_check_fifo(cfqq
)) == NULL
) {
810 if (cfqd
->find_best_crq
)
811 crq
= cfqq
->next_crq
;
813 crq
= rb_entry_crq(rb_first(&cfqq
->sort_list
));
816 cfqd
->last_sector
= crq
->request
->sector
+ crq
->request
->nr_sectors
;
819 * finally, insert request into driver list
821 cfq_dispatch_sort(q
, crq
);
824 static int cfq_dispatch_requests(request_queue_t
*q
, int max_dispatch
)
826 struct cfq_data
*cfqd
= q
->elevator
->elevator_data
;
827 struct cfq_queue
*cfqq
;
828 struct list_head
*entry
, *tmp
;
829 int queued
, busy_queues
, first_round
;
831 if (list_empty(&cfqd
->rr_list
))
838 list_for_each_safe(entry
, tmp
, &cfqd
->rr_list
) {
839 cfqq
= list_entry_cfqq(entry
);
841 BUG_ON(RB_EMPTY(&cfqq
->sort_list
));
844 * first round of queueing, only select from queues that
845 * don't already have io in-flight
847 if (first_round
&& cfqq
->in_flight
)
850 cfq_dispatch_request(q
, cfqd
, cfqq
);
852 if (!RB_EMPTY(&cfqq
->sort_list
))
858 if ((queued
< max_dispatch
) && (busy_queues
|| first_round
)) {
866 static inline void cfq_account_dispatch(struct cfq_rq
*crq
)
868 struct cfq_queue
*cfqq
= crq
->cfq_queue
;
869 struct cfq_data
*cfqd
= cfqq
->cfqd
;
870 unsigned long now
, elapsed
;
872 if (!blk_fs_request(crq
->request
))
876 * accounted bit is necessary since some drivers will call
877 * elv_next_request() many times for the same request (eg ide)
883 if (cfqq
->service_start
== ~0UL)
884 cfqq
->service_start
= now
;
887 * on drives with tagged command queueing, command turn-around time
888 * doesn't necessarily reflect the time spent processing this very
889 * command inside the drive. so do the accounting differently there,
890 * by just sorting on the number of requests
892 if (cfqd
->cfq_tagged
) {
893 if (time_after(now
, cfqq
->service_start
+ cfq_service
)) {
894 cfqq
->service_start
= now
;
895 cfqq
->service_used
/= 10;
898 cfqq
->service_used
++;
899 cfq_sort_rr_list(cfqq
, 0);
902 elapsed
= now
- crq
->queue_start
;
903 if (elapsed
> max_elapsed_dispatch
)
904 max_elapsed_dispatch
= elapsed
;
907 crq
->service_start
= now
;
909 if (++cfqd
->rq_in_driver
>= CFQ_MAX_TAG
&& !cfqd
->cfq_tagged
) {
910 cfqq
->cfqd
->cfq_tagged
= 1;
911 printk("cfq: depth %d reached, tagging now on\n", CFQ_MAX_TAG
);
916 cfq_account_completion(struct cfq_queue
*cfqq
, struct cfq_rq
*crq
)
918 struct cfq_data
*cfqd
= cfqq
->cfqd
;
923 WARN_ON(!cfqd
->rq_in_driver
);
924 cfqd
->rq_in_driver
--;
926 if (!cfqd
->cfq_tagged
) {
927 unsigned long now
= jiffies
;
928 unsigned long duration
= now
- crq
->service_start
;
930 if (time_after(now
, cfqq
->service_start
+ cfq_service
)) {
931 cfqq
->service_start
= now
;
932 cfqq
->service_used
>>= 3;
935 cfqq
->service_used
+= duration
;
936 cfq_sort_rr_list(cfqq
, 0);
938 if (duration
> max_elapsed_crq
)
939 max_elapsed_crq
= duration
;
943 static struct request
*cfq_next_request(request_queue_t
*q
)
945 struct cfq_data
*cfqd
= q
->elevator
->elevator_data
;
948 if (!list_empty(&q
->queue_head
)) {
951 rq
= list_entry_rq(q
->queue_head
.next
);
953 if ((crq
= RQ_DATA(rq
)) != NULL
) {
954 cfq_remove_merge_hints(q
, crq
);
955 cfq_account_dispatch(crq
);
961 if (cfq_dispatch_requests(q
, cfqd
->cfq_quantum
))
968 * task holds one reference to the queue, dropped when task exits. each crq
969 * in-flight on this queue also holds a reference, dropped when crq is freed.
971 * queue lock must be held here.
973 static void cfq_put_queue(struct cfq_queue
*cfqq
)
975 BUG_ON(!atomic_read(&cfqq
->ref
));
977 if (!atomic_dec_and_test(&cfqq
->ref
))
980 BUG_ON(rb_first(&cfqq
->sort_list
));
983 cfq_put_cfqd(cfqq
->cfqd
);
986 * it's on the empty list and still hashed
988 list_del(&cfqq
->cfq_list
);
989 hlist_del(&cfqq
->cfq_hash
);
990 kmem_cache_free(cfq_pool
, cfqq
);
993 static inline struct cfq_queue
*
994 __cfq_find_cfq_hash(struct cfq_data
*cfqd
, unsigned long key
, const int hashval
)
996 struct hlist_head
*hash_list
= &cfqd
->cfq_hash
[hashval
];
997 struct hlist_node
*entry
, *next
;
999 hlist_for_each_safe(entry
, next
, hash_list
) {
1000 struct cfq_queue
*__cfqq
= list_entry_qhash(entry
);
1002 if (__cfqq
->key
== key
)
1009 static struct cfq_queue
*
1010 cfq_find_cfq_hash(struct cfq_data
*cfqd
, unsigned long key
)
1012 return __cfq_find_cfq_hash(cfqd
, key
, hash_long(key
, CFQ_QHASH_SHIFT
));
1016 cfq_rehash_cfqq(struct cfq_data
*cfqd
, struct cfq_queue
**cfqq
,
1017 struct cfq_io_context
*cic
)
1019 unsigned long hashkey
= cfq_hash_key(cfqd
, current
);
1020 unsigned long hashval
= hash_long(hashkey
, CFQ_QHASH_SHIFT
);
1021 struct cfq_queue
*__cfqq
;
1022 unsigned long flags
;
1024 spin_lock_irqsave(cfqd
->queue
->queue_lock
, flags
);
1026 hlist_del(&(*cfqq
)->cfq_hash
);
1028 __cfqq
= __cfq_find_cfq_hash(cfqd
, hashkey
, hashval
);
1029 if (!__cfqq
|| __cfqq
== *cfqq
) {
1031 hlist_add_head(&__cfqq
->cfq_hash
, &cfqd
->cfq_hash
[hashval
]);
1032 __cfqq
->key_type
= cfqd
->key_type
;
1034 atomic_inc(&__cfqq
->ref
);
1036 cfq_put_queue(*cfqq
);
1041 spin_unlock_irqrestore(cfqd
->queue
->queue_lock
, flags
);
1044 static void cfq_free_io_context(struct cfq_io_context
*cic
)
1046 kmem_cache_free(cfq_ioc_pool
, cic
);
1050 * locking hierarchy is: io_context lock -> queue locks
1052 static void cfq_exit_io_context(struct cfq_io_context
*cic
)
1054 struct cfq_queue
*cfqq
= cic
->cfqq
;
1055 struct list_head
*entry
= &cic
->list
;
1057 unsigned long flags
;
1060 * put the reference this task is holding to the various queues
1062 spin_lock_irqsave(&cic
->ioc
->lock
, flags
);
1063 while ((entry
= cic
->list
.next
) != &cic
->list
) {
1064 struct cfq_io_context
*__cic
;
1066 __cic
= list_entry(entry
, struct cfq_io_context
, list
);
1069 q
= __cic
->cfqq
->cfqd
->queue
;
1070 spin_lock(q
->queue_lock
);
1071 cfq_put_queue(__cic
->cfqq
);
1072 spin_unlock(q
->queue_lock
);
1075 q
= cfqq
->cfqd
->queue
;
1076 spin_lock(q
->queue_lock
);
1077 cfq_put_queue(cfqq
);
1078 spin_unlock(q
->queue_lock
);
1081 spin_unlock_irqrestore(&cic
->ioc
->lock
, flags
);
1084 static struct cfq_io_context
*cfq_alloc_io_context(int gfp_flags
)
1086 struct cfq_io_context
*cic
= kmem_cache_alloc(cfq_ioc_pool
, gfp_flags
);
1089 cic
->dtor
= cfq_free_io_context
;
1090 cic
->exit
= cfq_exit_io_context
;
1091 INIT_LIST_HEAD(&cic
->list
);
1099 * Setup general io context and cfq io context. There can be several cfq
1100 * io contexts per general io context, if this process is doing io to more
1101 * than one device managed by cfq. Note that caller is holding a reference to
1102 * cfqq, so we don't need to worry about it disappearing
1104 static struct cfq_io_context
*
1105 cfq_get_io_context(struct cfq_queue
**cfqq
, int gfp_flags
)
1107 struct cfq_data
*cfqd
= (*cfqq
)->cfqd
;
1108 struct cfq_queue
*__cfqq
= *cfqq
;
1109 struct cfq_io_context
*cic
;
1110 struct io_context
*ioc
;
1112 might_sleep_if(gfp_flags
& __GFP_WAIT
);
1114 ioc
= get_io_context(gfp_flags
);
1118 if ((cic
= ioc
->cic
) == NULL
) {
1119 cic
= cfq_alloc_io_context(gfp_flags
);
1127 atomic_inc(&__cfqq
->ref
);
1129 struct cfq_io_context
*__cic
;
1130 unsigned long flags
;
1133 * since the first cic on the list is actually the head
1134 * itself, need to check this here or we'll duplicate an
1135 * cic per ioc for no reason
1137 if (cic
->cfqq
== __cfqq
)
1141 * cic exists, check if we already are there. linear search
1142 * should be ok here, the list will usually not be more than
1143 * 1 or a few entries long
1145 spin_lock_irqsave(&ioc
->lock
, flags
);
1146 list_for_each_entry(__cic
, &cic
->list
, list
) {
1148 * this process is already holding a reference to
1149 * this queue, so no need to get one more
1151 if (__cic
->cfqq
== __cfqq
) {
1153 spin_unlock_irqrestore(&ioc
->lock
, flags
);
1157 spin_unlock_irqrestore(&ioc
->lock
, flags
);
1160 * nope, process doesn't have a cic assoicated with this
1161 * cfqq yet. get a new one and add to list
1163 __cic
= cfq_alloc_io_context(gfp_flags
);
1168 __cic
->cfqq
= __cfqq
;
1169 atomic_inc(&__cfqq
->ref
);
1170 spin_lock_irqsave(&ioc
->lock
, flags
);
1171 list_add(&__cic
->list
, &cic
->list
);
1172 spin_unlock_irqrestore(&ioc
->lock
, flags
);
1180 * if key_type has been changed on the fly, we lazily rehash
1181 * each queue at lookup time
1183 if ((*cfqq
)->key_type
!= cfqd
->key_type
)
1184 cfq_rehash_cfqq(cfqd
, cfqq
, cic
);
1188 put_io_context(ioc
);
1192 static struct cfq_queue
*
1193 __cfq_get_queue(struct cfq_data
*cfqd
, unsigned long key
, int gfp_mask
)
1195 const int hashval
= hash_long(key
, CFQ_QHASH_SHIFT
);
1196 struct cfq_queue
*cfqq
, *new_cfqq
= NULL
;
1199 cfqq
= __cfq_find_cfq_hash(cfqd
, key
, hashval
);
1205 } else if (gfp_mask
& __GFP_WAIT
) {
1206 spin_unlock_irq(cfqd
->queue
->queue_lock
);
1207 new_cfqq
= kmem_cache_alloc(cfq_pool
, gfp_mask
);
1208 spin_lock_irq(cfqd
->queue
->queue_lock
);
1213 memset(cfqq
, 0, sizeof(*cfqq
));
1215 INIT_HLIST_NODE(&cfqq
->cfq_hash
);
1216 INIT_LIST_HEAD(&cfqq
->cfq_list
);
1217 RB_CLEAR_ROOT(&cfqq
->sort_list
);
1218 INIT_LIST_HEAD(&cfqq
->fifo
[0]);
1219 INIT_LIST_HEAD(&cfqq
->fifo
[1]);
1222 hlist_add_head(&cfqq
->cfq_hash
, &cfqd
->cfq_hash
[hashval
]);
1223 atomic_set(&cfqq
->ref
, 0);
1225 atomic_inc(&cfqd
->ref
);
1226 cfqq
->key_type
= cfqd
->key_type
;
1227 cfqq
->service_start
= ~0UL;
1231 kmem_cache_free(cfq_pool
, new_cfqq
);
1233 atomic_inc(&cfqq
->ref
);
1235 WARN_ON((gfp_mask
& __GFP_WAIT
) && !cfqq
);
1239 static void cfq_enqueue(struct cfq_data
*cfqd
, struct cfq_rq
*crq
)
1242 if (rq_data_dir(crq
->request
) == READ
|| current
->flags
& PF_SYNCWRITE
)
1245 cfq_add_crq_rb(crq
);
1246 crq
->queue_start
= jiffies
;
1248 list_add_tail(&crq
->request
->queuelist
, &crq
->cfq_queue
->fifo
[crq
->is_sync
]);
1252 cfq_insert_request(request_queue_t
*q
, struct request
*rq
, int where
)
1254 struct cfq_data
*cfqd
= q
->elevator
->elevator_data
;
1255 struct cfq_rq
*crq
= RQ_DATA(rq
);
1258 case ELEVATOR_INSERT_BACK
:
1259 while (cfq_dispatch_requests(q
, cfqd
->cfq_quantum
))
1261 list_add_tail(&rq
->queuelist
, &q
->queue_head
);
1263 case ELEVATOR_INSERT_FRONT
:
1264 list_add(&rq
->queuelist
, &q
->queue_head
);
1266 case ELEVATOR_INSERT_SORT
:
1267 BUG_ON(!blk_fs_request(rq
));
1268 cfq_enqueue(cfqd
, crq
);
1271 printk("%s: bad insert point %d\n", __FUNCTION__
,where
);
1275 if (rq_mergeable(rq
)) {
1276 cfq_add_crq_hash(cfqd
, crq
);
1283 static int cfq_queue_empty(request_queue_t
*q
)
1285 struct cfq_data
*cfqd
= q
->elevator
->elevator_data
;
1287 return list_empty(&q
->queue_head
) && list_empty(&cfqd
->rr_list
);
1290 static void cfq_completed_request(request_queue_t
*q
, struct request
*rq
)
1292 struct cfq_rq
*crq
= RQ_DATA(rq
);
1293 struct cfq_queue
*cfqq
;
1295 if (unlikely(!blk_fs_request(rq
)))
1298 cfqq
= crq
->cfq_queue
;
1300 if (crq
->in_flight
) {
1301 WARN_ON(!cfqq
->in_flight
);
1305 cfq_account_completion(cfqq
, crq
);
1308 static struct request
*
1309 cfq_former_request(request_queue_t
*q
, struct request
*rq
)
1311 struct cfq_rq
*crq
= RQ_DATA(rq
);
1312 struct rb_node
*rbprev
= rb_prev(&crq
->rb_node
);
1315 return rb_entry_crq(rbprev
)->request
;
1320 static struct request
*
1321 cfq_latter_request(request_queue_t
*q
, struct request
*rq
)
1323 struct cfq_rq
*crq
= RQ_DATA(rq
);
1324 struct rb_node
*rbnext
= rb_next(&crq
->rb_node
);
1327 return rb_entry_crq(rbnext
)->request
;
1332 static int cfq_may_queue(request_queue_t
*q
, int rw
)
1334 struct cfq_data
*cfqd
= q
->elevator
->elevator_data
;
1335 struct cfq_queue
*cfqq
;
1336 int ret
= ELV_MQUEUE_MAY
;
1338 if (current
->flags
& PF_MEMALLOC
)
1339 return ELV_MQUEUE_MAY
;
1341 cfqq
= cfq_find_cfq_hash(cfqd
, cfq_hash_key(cfqd
, current
));
1343 int limit
= cfqd
->max_queued
;
1345 if (cfqq
->allocated
[rw
] < cfqd
->cfq_queued
)
1346 return ELV_MQUEUE_MUST
;
1348 if (cfqd
->busy_queues
)
1349 limit
= q
->nr_requests
/ cfqd
->busy_queues
;
1351 if (limit
< cfqd
->cfq_queued
)
1352 limit
= cfqd
->cfq_queued
;
1353 else if (limit
> cfqd
->max_queued
)
1354 limit
= cfqd
->max_queued
;
1356 if (cfqq
->allocated
[rw
] >= limit
) {
1357 if (limit
> cfqq
->alloc_limit
[rw
])
1358 cfqq
->alloc_limit
[rw
] = limit
;
1360 ret
= ELV_MQUEUE_NO
;
1367 static void cfq_check_waiters(request_queue_t
*q
, struct cfq_queue
*cfqq
)
1369 struct request_list
*rl
= &q
->rq
;
1370 const int write
= waitqueue_active(&rl
->wait
[WRITE
]);
1371 const int read
= waitqueue_active(&rl
->wait
[READ
]);
1373 if (read
&& cfqq
->allocated
[READ
] < cfqq
->alloc_limit
[READ
])
1374 wake_up(&rl
->wait
[READ
]);
1375 if (write
&& cfqq
->allocated
[WRITE
] < cfqq
->alloc_limit
[WRITE
])
1376 wake_up(&rl
->wait
[WRITE
]);
1380 * queue lock held here
1382 static void cfq_put_request(request_queue_t
*q
, struct request
*rq
)
1384 struct cfq_data
*cfqd
= q
->elevator
->elevator_data
;
1385 struct cfq_rq
*crq
= RQ_DATA(rq
);
1388 struct cfq_queue
*cfqq
= crq
->cfq_queue
;
1390 BUG_ON(q
->last_merge
== rq
);
1391 BUG_ON(!hlist_unhashed(&crq
->hash
));
1393 if (crq
->io_context
)
1394 put_io_context(crq
->io_context
->ioc
);
1396 BUG_ON(!cfqq
->allocated
[crq
->is_write
]);
1397 cfqq
->allocated
[crq
->is_write
]--;
1399 mempool_free(crq
, cfqd
->crq_pool
);
1400 rq
->elevator_private
= NULL
;
1403 cfq_check_waiters(q
, cfqq
);
1404 cfq_put_queue(cfqq
);
1409 * Allocate cfq data structures associated with this request. A queue and
1411 static int cfq_set_request(request_queue_t
*q
, struct request
*rq
, int gfp_mask
)
1413 struct cfq_data
*cfqd
= q
->elevator
->elevator_data
;
1414 struct cfq_io_context
*cic
;
1415 const int rw
= rq_data_dir(rq
);
1416 struct cfq_queue
*cfqq
, *saved_cfqq
;
1418 unsigned long flags
;
1420 might_sleep_if(gfp_mask
& __GFP_WAIT
);
1422 spin_lock_irqsave(q
->queue_lock
, flags
);
1424 cfqq
= __cfq_get_queue(cfqd
, cfq_hash_key(cfqd
, current
), gfp_mask
);
1429 if (cfqq
->allocated
[rw
] >= cfqd
->max_queued
)
1432 cfqq
->allocated
[rw
]++;
1433 spin_unlock_irqrestore(q
->queue_lock
, flags
);
1436 * if hashing type has changed, the cfq_queue might change here.
1439 cic
= cfq_get_io_context(&cfqq
, gfp_mask
);
1444 * repeat allocation checks on queue change
1446 if (unlikely(saved_cfqq
!= cfqq
)) {
1447 spin_lock_irqsave(q
->queue_lock
, flags
);
1448 saved_cfqq
->allocated
[rw
]--;
1452 crq
= mempool_alloc(cfqd
->crq_pool
, gfp_mask
);
1454 RB_CLEAR(&crq
->rb_node
);
1457 INIT_HLIST_NODE(&crq
->hash
);
1458 crq
->cfq_queue
= cfqq
;
1459 crq
->io_context
= cic
;
1460 crq
->service_start
= crq
->queue_start
= 0;
1461 crq
->in_flight
= crq
->accounted
= crq
->is_sync
= 0;
1463 rq
->elevator_private
= crq
;
1464 cfqq
->alloc_limit
[rw
] = 0;
1468 put_io_context(cic
->ioc
);
1470 spin_lock_irqsave(q
->queue_lock
, flags
);
1471 cfqq
->allocated
[rw
]--;
1472 cfq_put_queue(cfqq
);
1474 spin_unlock_irqrestore(q
->queue_lock
, flags
);
1478 static void cfq_put_cfqd(struct cfq_data
*cfqd
)
1480 request_queue_t
*q
= cfqd
->queue
;
1482 if (!atomic_dec_and_test(&cfqd
->ref
))
1487 mempool_destroy(cfqd
->crq_pool
);
1488 kfree(cfqd
->crq_hash
);
1489 kfree(cfqd
->cfq_hash
);
1493 static void cfq_exit_queue(elevator_t
*e
)
1495 cfq_put_cfqd(e
->elevator_data
);
1498 static int cfq_init_queue(request_queue_t
*q
, elevator_t
*e
)
1500 struct cfq_data
*cfqd
;
1503 cfqd
= kmalloc(sizeof(*cfqd
), GFP_KERNEL
);
1507 memset(cfqd
, 0, sizeof(*cfqd
));
1508 INIT_LIST_HEAD(&cfqd
->rr_list
);
1509 INIT_LIST_HEAD(&cfqd
->empty_list
);
1511 cfqd
->crq_hash
= kmalloc(sizeof(struct hlist_head
) * CFQ_MHASH_ENTRIES
, GFP_KERNEL
);
1512 if (!cfqd
->crq_hash
)
1515 cfqd
->cfq_hash
= kmalloc(sizeof(struct hlist_head
) * CFQ_QHASH_ENTRIES
, GFP_KERNEL
);
1516 if (!cfqd
->cfq_hash
)
1519 cfqd
->crq_pool
= mempool_create(BLKDEV_MIN_RQ
, mempool_alloc_slab
, mempool_free_slab
, crq_pool
);
1520 if (!cfqd
->crq_pool
)
1523 for (i
= 0; i
< CFQ_MHASH_ENTRIES
; i
++)
1524 INIT_HLIST_HEAD(&cfqd
->crq_hash
[i
]);
1525 for (i
= 0; i
< CFQ_QHASH_ENTRIES
; i
++)
1526 INIT_HLIST_HEAD(&cfqd
->cfq_hash
[i
]);
1528 e
->elevator_data
= cfqd
;
1531 atomic_inc(&q
->refcnt
);
1534 * just set it to some high value, we want anyone to be able to queue
1535 * some requests. fairness is handled differently
1537 q
->nr_requests
= 1024;
1538 cfqd
->max_queued
= q
->nr_requests
/ 16;
1539 q
->nr_batching
= cfq_queued
;
1540 cfqd
->key_type
= CFQ_KEY_TGID
;
1541 cfqd
->find_best_crq
= 1;
1542 atomic_set(&cfqd
->ref
, 1);
1544 cfqd
->cfq_queued
= cfq_queued
;
1545 cfqd
->cfq_quantum
= cfq_quantum
;
1546 cfqd
->cfq_fifo_expire_r
= cfq_fifo_expire_r
;
1547 cfqd
->cfq_fifo_expire_w
= cfq_fifo_expire_w
;
1548 cfqd
->cfq_fifo_batch_expire
= cfq_fifo_rate
;
1549 cfqd
->cfq_back_max
= cfq_back_max
;
1550 cfqd
->cfq_back_penalty
= cfq_back_penalty
;
1554 kfree(cfqd
->cfq_hash
);
1556 kfree(cfqd
->crq_hash
);
1562 static void cfq_slab_kill(void)
1565 kmem_cache_destroy(crq_pool
);
1567 kmem_cache_destroy(cfq_pool
);
1569 kmem_cache_destroy(cfq_ioc_pool
);
1572 static int __init
cfq_slab_setup(void)
1574 crq_pool
= kmem_cache_create("crq_pool", sizeof(struct cfq_rq
), 0, 0,
1579 cfq_pool
= kmem_cache_create("cfq_pool", sizeof(struct cfq_queue
), 0, 0,
1584 cfq_ioc_pool
= kmem_cache_create("cfq_ioc_pool",
1585 sizeof(struct cfq_io_context
), 0, 0, NULL
, NULL
);
1597 * sysfs parts below -->
1599 struct cfq_fs_entry
{
1600 struct attribute attr
;
1601 ssize_t (*show
)(struct cfq_data
*, char *);
1602 ssize_t (*store
)(struct cfq_data
*, const char *, size_t);
1606 cfq_var_show(unsigned int var
, char *page
)
1608 return sprintf(page
, "%d\n", var
);
1612 cfq_var_store(unsigned int *var
, const char *page
, size_t count
)
1614 char *p
= (char *) page
;
1616 *var
= simple_strtoul(p
, &p
, 10);
1621 cfq_clear_elapsed(struct cfq_data
*cfqd
, const char *page
, size_t count
)
1623 max_elapsed_dispatch
= max_elapsed_crq
= 0;
1628 cfq_set_key_type(struct cfq_data
*cfqd
, const char *page
, size_t count
)
1630 spin_lock_irq(cfqd
->queue
->queue_lock
);
1631 if (!strncmp(page
, "pgid", 4))
1632 cfqd
->key_type
= CFQ_KEY_PGID
;
1633 else if (!strncmp(page
, "tgid", 4))
1634 cfqd
->key_type
= CFQ_KEY_TGID
;
1635 else if (!strncmp(page
, "uid", 3))
1636 cfqd
->key_type
= CFQ_KEY_UID
;
1637 else if (!strncmp(page
, "gid", 3))
1638 cfqd
->key_type
= CFQ_KEY_GID
;
1639 spin_unlock_irq(cfqd
->queue
->queue_lock
);
1644 cfq_read_key_type(struct cfq_data
*cfqd
, char *page
)
1649 for (i
= CFQ_KEY_PGID
; i
< CFQ_KEY_LAST
; i
++) {
1650 if (cfqd
->key_type
== i
)
1651 len
+= sprintf(page
+len
, "[%s] ", cfq_key_types
[i
]);
1653 len
+= sprintf(page
+len
, "%s ", cfq_key_types
[i
]);
1655 len
+= sprintf(page
+len
, "\n");
1659 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
1660 static ssize_t __FUNC(struct cfq_data *cfqd, char *page) \
1662 unsigned int __data = __VAR; \
1664 __data = jiffies_to_msecs(__data); \
1665 return cfq_var_show(__data, (page)); \
1667 SHOW_FUNCTION(cfq_quantum_show
, cfqd
->cfq_quantum
, 0);
1668 SHOW_FUNCTION(cfq_queued_show
, cfqd
->cfq_queued
, 0);
1669 SHOW_FUNCTION(cfq_fifo_expire_r_show
, cfqd
->cfq_fifo_expire_r
, 1);
1670 SHOW_FUNCTION(cfq_fifo_expire_w_show
, cfqd
->cfq_fifo_expire_w
, 1);
1671 SHOW_FUNCTION(cfq_fifo_batch_expire_show
, cfqd
->cfq_fifo_batch_expire
, 1);
1672 SHOW_FUNCTION(cfq_find_best_show
, cfqd
->find_best_crq
, 0);
1673 SHOW_FUNCTION(cfq_back_max_show
, cfqd
->cfq_back_max
, 0);
1674 SHOW_FUNCTION(cfq_back_penalty_show
, cfqd
->cfq_back_penalty
, 0);
1675 #undef SHOW_FUNCTION
1677 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
1678 static ssize_t __FUNC(struct cfq_data *cfqd, const char *page, size_t count) \
1680 unsigned int __data; \
1681 int ret = cfq_var_store(&__data, (page), count); \
1682 if (__data < (MIN)) \
1684 else if (__data > (MAX)) \
1687 *(__PTR) = msecs_to_jiffies(__data); \
1689 *(__PTR) = __data; \
1692 STORE_FUNCTION(cfq_quantum_store
, &cfqd
->cfq_quantum
, 1, UINT_MAX
, 0);
1693 STORE_FUNCTION(cfq_queued_store
, &cfqd
->cfq_queued
, 1, UINT_MAX
, 0);
1694 STORE_FUNCTION(cfq_fifo_expire_r_store
, &cfqd
->cfq_fifo_expire_r
, 1, UINT_MAX
, 1);
1695 STORE_FUNCTION(cfq_fifo_expire_w_store
, &cfqd
->cfq_fifo_expire_w
, 1, UINT_MAX
, 1);
1696 STORE_FUNCTION(cfq_fifo_batch_expire_store
, &cfqd
->cfq_fifo_batch_expire
, 0, UINT_MAX
, 1);
1697 STORE_FUNCTION(cfq_find_best_store
, &cfqd
->find_best_crq
, 0, 1, 0);
1698 STORE_FUNCTION(cfq_back_max_store
, &cfqd
->cfq_back_max
, 0, UINT_MAX
, 0);
1699 STORE_FUNCTION(cfq_back_penalty_store
, &cfqd
->cfq_back_penalty
, 1, UINT_MAX
, 0);
1700 #undef STORE_FUNCTION
1702 static struct cfq_fs_entry cfq_quantum_entry
= {
1703 .attr
= {.name
= "quantum", .mode
= S_IRUGO
| S_IWUSR
},
1704 .show
= cfq_quantum_show
,
1705 .store
= cfq_quantum_store
,
1707 static struct cfq_fs_entry cfq_queued_entry
= {
1708 .attr
= {.name
= "queued", .mode
= S_IRUGO
| S_IWUSR
},
1709 .show
= cfq_queued_show
,
1710 .store
= cfq_queued_store
,
1712 static struct cfq_fs_entry cfq_fifo_expire_r_entry
= {
1713 .attr
= {.name
= "fifo_expire_sync", .mode
= S_IRUGO
| S_IWUSR
},
1714 .show
= cfq_fifo_expire_r_show
,
1715 .store
= cfq_fifo_expire_r_store
,
1717 static struct cfq_fs_entry cfq_fifo_expire_w_entry
= {
1718 .attr
= {.name
= "fifo_expire_async", .mode
= S_IRUGO
| S_IWUSR
},
1719 .show
= cfq_fifo_expire_w_show
,
1720 .store
= cfq_fifo_expire_w_store
,
1722 static struct cfq_fs_entry cfq_fifo_batch_expire_entry
= {
1723 .attr
= {.name
= "fifo_batch_expire", .mode
= S_IRUGO
| S_IWUSR
},
1724 .show
= cfq_fifo_batch_expire_show
,
1725 .store
= cfq_fifo_batch_expire_store
,
1727 static struct cfq_fs_entry cfq_find_best_entry
= {
1728 .attr
= {.name
= "find_best_crq", .mode
= S_IRUGO
| S_IWUSR
},
1729 .show
= cfq_find_best_show
,
1730 .store
= cfq_find_best_store
,
1732 static struct cfq_fs_entry cfq_back_max_entry
= {
1733 .attr
= {.name
= "back_seek_max", .mode
= S_IRUGO
| S_IWUSR
},
1734 .show
= cfq_back_max_show
,
1735 .store
= cfq_back_max_store
,
1737 static struct cfq_fs_entry cfq_back_penalty_entry
= {
1738 .attr
= {.name
= "back_seek_penalty", .mode
= S_IRUGO
| S_IWUSR
},
1739 .show
= cfq_back_penalty_show
,
1740 .store
= cfq_back_penalty_store
,
1742 static struct cfq_fs_entry cfq_clear_elapsed_entry
= {
1743 .attr
= {.name
= "clear_elapsed", .mode
= S_IWUSR
},
1744 .store
= cfq_clear_elapsed
,
1746 static struct cfq_fs_entry cfq_key_type_entry
= {
1747 .attr
= {.name
= "key_type", .mode
= S_IRUGO
| S_IWUSR
},
1748 .show
= cfq_read_key_type
,
1749 .store
= cfq_set_key_type
,
1752 static struct attribute
*default_attrs
[] = {
1753 &cfq_quantum_entry
.attr
,
1754 &cfq_queued_entry
.attr
,
1755 &cfq_fifo_expire_r_entry
.attr
,
1756 &cfq_fifo_expire_w_entry
.attr
,
1757 &cfq_fifo_batch_expire_entry
.attr
,
1758 &cfq_key_type_entry
.attr
,
1759 &cfq_find_best_entry
.attr
,
1760 &cfq_back_max_entry
.attr
,
1761 &cfq_back_penalty_entry
.attr
,
1762 &cfq_clear_elapsed_entry
.attr
,
1766 #define to_cfq(atr) container_of((atr), struct cfq_fs_entry, attr)
1769 cfq_attr_show(struct kobject
*kobj
, struct attribute
*attr
, char *page
)
1771 elevator_t
*e
= container_of(kobj
, elevator_t
, kobj
);
1772 struct cfq_fs_entry
*entry
= to_cfq(attr
);
1777 return entry
->show(e
->elevator_data
, page
);
1781 cfq_attr_store(struct kobject
*kobj
, struct attribute
*attr
,
1782 const char *page
, size_t length
)
1784 elevator_t
*e
= container_of(kobj
, elevator_t
, kobj
);
1785 struct cfq_fs_entry
*entry
= to_cfq(attr
);
1790 return entry
->store(e
->elevator_data
, page
, length
);
1793 static struct sysfs_ops cfq_sysfs_ops
= {
1794 .show
= cfq_attr_show
,
1795 .store
= cfq_attr_store
,
1798 static struct kobj_type cfq_ktype
= {
1799 .sysfs_ops
= &cfq_sysfs_ops
,
1800 .default_attrs
= default_attrs
,
1803 static struct elevator_type iosched_cfq
= {
1805 .elevator_merge_fn
= cfq_merge
,
1806 .elevator_merged_fn
= cfq_merged_request
,
1807 .elevator_merge_req_fn
= cfq_merged_requests
,
1808 .elevator_next_req_fn
= cfq_next_request
,
1809 .elevator_add_req_fn
= cfq_insert_request
,
1810 .elevator_remove_req_fn
= cfq_remove_request
,
1811 .elevator_requeue_req_fn
= cfq_requeue_request
,
1812 .elevator_deactivate_req_fn
= cfq_deactivate_request
,
1813 .elevator_queue_empty_fn
= cfq_queue_empty
,
1814 .elevator_completed_req_fn
= cfq_completed_request
,
1815 .elevator_former_req_fn
= cfq_former_request
,
1816 .elevator_latter_req_fn
= cfq_latter_request
,
1817 .elevator_set_req_fn
= cfq_set_request
,
1818 .elevator_put_req_fn
= cfq_put_request
,
1819 .elevator_may_queue_fn
= cfq_may_queue
,
1820 .elevator_init_fn
= cfq_init_queue
,
1821 .elevator_exit_fn
= cfq_exit_queue
,
1823 .elevator_ktype
= &cfq_ktype
,
1824 .elevator_name
= "cfq",
1825 .elevator_owner
= THIS_MODULE
,
1828 static int __init
cfq_init(void)
1832 if (cfq_slab_setup())
1835 ret
= elv_register(&iosched_cfq
);
1837 __module_get(THIS_MODULE
);
1845 static void __exit
cfq_exit(void)
1848 elv_unregister(&iosched_cfq
);
1851 module_init(cfq_init
);
1852 module_exit(cfq_exit
);
1854 MODULE_AUTHOR("Jens Axboe");
1855 MODULE_LICENSE("GPL");
1856 MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");