1 // SPDX-License-Identifier: GPL-2.0
3 * The Kyber I/O scheduler. Controls latency by throttling queue depths using
6 * Copyright (C) 2017 Facebook
9 #include <linux/kernel.h>
10 #include <linux/blkdev.h>
11 #include <linux/blk-mq.h>
12 #include <linux/elevator.h>
13 #include <linux/module.h>
14 #include <linux/sbitmap.h>
18 #include "blk-mq-debugfs.h"
19 #include "blk-mq-sched.h"
20 #include "blk-mq-tag.h"
22 #define CREATE_TRACE_POINTS
23 #include <trace/events/kyber.h>
26 * Scheduling domains: the device is divided into multiple domains based on the
37 static const char *kyber_domain_names
[] = {
38 [KYBER_READ
] = "READ",
39 [KYBER_WRITE
] = "WRITE",
40 [KYBER_DISCARD
] = "DISCARD",
41 [KYBER_OTHER
] = "OTHER",
46 * In order to prevent starvation of synchronous requests by a flood of
47 * asynchronous requests, we reserve 25% of requests for synchronous
50 KYBER_ASYNC_PERCENT
= 75,
54 * Maximum device-wide depth for each scheduling domain.
56 * Even for fast devices with lots of tags like NVMe, you can saturate the
57 * device with only a fraction of the maximum possible queue depth. So, we cap
58 * these to a reasonable value.
60 static const unsigned int kyber_depth
[] = {
68 * Default latency targets for each scheduling domain.
70 static const u64 kyber_latency_targets
[] = {
71 [KYBER_READ
] = 2ULL * NSEC_PER_MSEC
,
72 [KYBER_WRITE
] = 10ULL * NSEC_PER_MSEC
,
73 [KYBER_DISCARD
] = 5ULL * NSEC_PER_SEC
,
77 * Batch size (number of requests we'll dispatch in a row) for each scheduling
80 static const unsigned int kyber_batch_size
[] = {
88 * Requests latencies are recorded in a histogram with buckets defined relative
89 * to the target latency:
91 * <= 1/4 * target latency
92 * <= 1/2 * target latency
93 * <= 3/4 * target latency
95 * <= 1 1/4 * target latency
96 * <= 1 1/2 * target latency
97 * <= 1 3/4 * target latency
98 * > 1 3/4 * target latency
102 * The width of the latency histogram buckets is
103 * 1 / (1 << KYBER_LATENCY_SHIFT) * target latency.
105 KYBER_LATENCY_SHIFT
= 2,
107 * The first (1 << KYBER_LATENCY_SHIFT) buckets are <= target latency,
110 KYBER_GOOD_BUCKETS
= 1 << KYBER_LATENCY_SHIFT
,
111 /* There are also (1 << KYBER_LATENCY_SHIFT) "bad" buckets. */
112 KYBER_LATENCY_BUCKETS
= 2 << KYBER_LATENCY_SHIFT
,
116 * We measure both the total latency and the I/O latency (i.e., latency after
117 * submitting to the device).
124 static const char *kyber_latency_type_names
[] = {
125 [KYBER_TOTAL_LATENCY
] = "total",
126 [KYBER_IO_LATENCY
] = "I/O",
130 * Per-cpu latency histograms: total latency and I/O latency for each scheduling
131 * domain except for KYBER_OTHER.
133 struct kyber_cpu_latency
{
134 atomic_t buckets
[KYBER_OTHER
][2][KYBER_LATENCY_BUCKETS
];
138 * There is a same mapping between ctx & hctx and kcq & khd,
139 * we use request->mq_ctx->index_hw to index the kcq in khd.
141 struct kyber_ctx_queue
{
143 * Used to ensure operations on rq_list and kcq_map to be an atmoic one.
144 * Also protect the rqs on rq_list when merge.
147 struct list_head rq_list
[KYBER_NUM_DOMAINS
];
148 } ____cacheline_aligned_in_smp
;
150 struct kyber_queue_data
{
151 struct request_queue
*q
;
154 * Each scheduling domain has a limited number of in-flight requests
155 * device-wide, limited by these tokens.
157 struct sbitmap_queue domain_tokens
[KYBER_NUM_DOMAINS
];
160 * Async request percentage, converted to per-word depth for
161 * sbitmap_get_shallow().
163 unsigned int async_depth
;
165 struct kyber_cpu_latency __percpu
*cpu_latency
;
167 /* Timer for stats aggregation and adjusting domain tokens. */
168 struct timer_list timer
;
170 unsigned int latency_buckets
[KYBER_OTHER
][2][KYBER_LATENCY_BUCKETS
];
172 unsigned long latency_timeout
[KYBER_OTHER
];
174 int domain_p99
[KYBER_OTHER
];
176 /* Target latencies in nanoseconds. */
177 u64 latency_targets
[KYBER_OTHER
];
180 struct kyber_hctx_data
{
182 struct list_head rqs
[KYBER_NUM_DOMAINS
];
183 unsigned int cur_domain
;
184 unsigned int batching
;
185 struct kyber_ctx_queue
*kcqs
;
186 struct sbitmap kcq_map
[KYBER_NUM_DOMAINS
];
187 struct sbq_wait domain_wait
[KYBER_NUM_DOMAINS
];
188 struct sbq_wait_state
*domain_ws
[KYBER_NUM_DOMAINS
];
189 atomic_t wait_index
[KYBER_NUM_DOMAINS
];
192 static int kyber_domain_wake(wait_queue_entry_t
*wait
, unsigned mode
, int flags
,
195 static unsigned int kyber_sched_domain(unsigned int op
)
197 switch (op
& REQ_OP_MASK
) {
203 return KYBER_DISCARD
;
209 static void flush_latency_buckets(struct kyber_queue_data
*kqd
,
210 struct kyber_cpu_latency
*cpu_latency
,
211 unsigned int sched_domain
, unsigned int type
)
213 unsigned int *buckets
= kqd
->latency_buckets
[sched_domain
][type
];
214 atomic_t
*cpu_buckets
= cpu_latency
->buckets
[sched_domain
][type
];
217 for (bucket
= 0; bucket
< KYBER_LATENCY_BUCKETS
; bucket
++)
218 buckets
[bucket
] += atomic_xchg(&cpu_buckets
[bucket
], 0);
222 * Calculate the histogram bucket with the given percentile rank, or -1 if there
223 * aren't enough samples yet.
225 static int calculate_percentile(struct kyber_queue_data
*kqd
,
226 unsigned int sched_domain
, unsigned int type
,
227 unsigned int percentile
)
229 unsigned int *buckets
= kqd
->latency_buckets
[sched_domain
][type
];
230 unsigned int bucket
, samples
= 0, percentile_samples
;
232 for (bucket
= 0; bucket
< KYBER_LATENCY_BUCKETS
; bucket
++)
233 samples
+= buckets
[bucket
];
239 * We do the calculation once we have 500 samples or one second passes
240 * since the first sample was recorded, whichever comes first.
242 if (!kqd
->latency_timeout
[sched_domain
])
243 kqd
->latency_timeout
[sched_domain
] = max(jiffies
+ HZ
, 1UL);
245 time_is_after_jiffies(kqd
->latency_timeout
[sched_domain
])) {
248 kqd
->latency_timeout
[sched_domain
] = 0;
250 percentile_samples
= DIV_ROUND_UP(samples
* percentile
, 100);
251 for (bucket
= 0; bucket
< KYBER_LATENCY_BUCKETS
- 1; bucket
++) {
252 if (buckets
[bucket
] >= percentile_samples
)
254 percentile_samples
-= buckets
[bucket
];
256 memset(buckets
, 0, sizeof(kqd
->latency_buckets
[sched_domain
][type
]));
258 trace_kyber_latency(kqd
->q
, kyber_domain_names
[sched_domain
],
259 kyber_latency_type_names
[type
], percentile
,
260 bucket
+ 1, 1 << KYBER_LATENCY_SHIFT
, samples
);
265 static void kyber_resize_domain(struct kyber_queue_data
*kqd
,
266 unsigned int sched_domain
, unsigned int depth
)
268 depth
= clamp(depth
, 1U, kyber_depth
[sched_domain
]);
269 if (depth
!= kqd
->domain_tokens
[sched_domain
].sb
.depth
) {
270 sbitmap_queue_resize(&kqd
->domain_tokens
[sched_domain
], depth
);
271 trace_kyber_adjust(kqd
->q
, kyber_domain_names
[sched_domain
],
276 static void kyber_timer_fn(struct timer_list
*t
)
278 struct kyber_queue_data
*kqd
= from_timer(kqd
, t
, timer
);
279 unsigned int sched_domain
;
283 /* Sum all of the per-cpu latency histograms. */
284 for_each_online_cpu(cpu
) {
285 struct kyber_cpu_latency
*cpu_latency
;
287 cpu_latency
= per_cpu_ptr(kqd
->cpu_latency
, cpu
);
288 for (sched_domain
= 0; sched_domain
< KYBER_OTHER
; sched_domain
++) {
289 flush_latency_buckets(kqd
, cpu_latency
, sched_domain
,
290 KYBER_TOTAL_LATENCY
);
291 flush_latency_buckets(kqd
, cpu_latency
, sched_domain
,
297 * Check if any domains have a high I/O latency, which might indicate
298 * congestion in the device. Note that we use the p90; we don't want to
299 * be too sensitive to outliers here.
301 for (sched_domain
= 0; sched_domain
< KYBER_OTHER
; sched_domain
++) {
304 p90
= calculate_percentile(kqd
, sched_domain
, KYBER_IO_LATENCY
,
306 if (p90
>= KYBER_GOOD_BUCKETS
)
311 * Adjust the scheduling domain depths. If we determined that there was
312 * congestion, we throttle all domains with good latencies. Either way,
313 * we ease up on throttling domains with bad latencies.
315 for (sched_domain
= 0; sched_domain
< KYBER_OTHER
; sched_domain
++) {
316 unsigned int orig_depth
, depth
;
319 p99
= calculate_percentile(kqd
, sched_domain
,
320 KYBER_TOTAL_LATENCY
, 99);
322 * This is kind of subtle: different domains will not
323 * necessarily have enough samples to calculate the latency
324 * percentiles during the same window, so we have to remember
325 * the p99 for the next time we observe congestion; once we do,
326 * we don't want to throttle again until we get more data, so we
331 p99
= kqd
->domain_p99
[sched_domain
];
332 kqd
->domain_p99
[sched_domain
] = -1;
333 } else if (p99
>= 0) {
334 kqd
->domain_p99
[sched_domain
] = p99
;
340 * If this domain has bad latency, throttle less. Otherwise,
341 * throttle more iff we determined that there is congestion.
343 * The new depth is scaled linearly with the p99 latency vs the
344 * latency target. E.g., if the p99 is 3/4 of the target, then
345 * we throttle down to 3/4 of the current depth, and if the p99
346 * is 2x the target, then we double the depth.
348 if (bad
|| p99
>= KYBER_GOOD_BUCKETS
) {
349 orig_depth
= kqd
->domain_tokens
[sched_domain
].sb
.depth
;
350 depth
= (orig_depth
* (p99
+ 1)) >> KYBER_LATENCY_SHIFT
;
351 kyber_resize_domain(kqd
, sched_domain
, depth
);
356 static unsigned int kyber_sched_tags_shift(struct request_queue
*q
)
359 * All of the hardware queues have the same depth, so we can just grab
360 * the shift of the first one.
362 return q
->queue_hw_ctx
[0]->sched_tags
->bitmap_tags
.sb
.shift
;
365 static struct kyber_queue_data
*kyber_queue_data_alloc(struct request_queue
*q
)
367 struct kyber_queue_data
*kqd
;
372 kqd
= kzalloc_node(sizeof(*kqd
), GFP_KERNEL
, q
->node
);
378 kqd
->cpu_latency
= alloc_percpu_gfp(struct kyber_cpu_latency
,
379 GFP_KERNEL
| __GFP_ZERO
);
380 if (!kqd
->cpu_latency
)
383 timer_setup(&kqd
->timer
, kyber_timer_fn
, 0);
385 for (i
= 0; i
< KYBER_NUM_DOMAINS
; i
++) {
386 WARN_ON(!kyber_depth
[i
]);
387 WARN_ON(!kyber_batch_size
[i
]);
388 ret
= sbitmap_queue_init_node(&kqd
->domain_tokens
[i
],
389 kyber_depth
[i
], -1, false,
390 GFP_KERNEL
, q
->node
);
393 sbitmap_queue_free(&kqd
->domain_tokens
[i
]);
398 for (i
= 0; i
< KYBER_OTHER
; i
++) {
399 kqd
->domain_p99
[i
] = -1;
400 kqd
->latency_targets
[i
] = kyber_latency_targets
[i
];
403 shift
= kyber_sched_tags_shift(q
);
404 kqd
->async_depth
= (1U << shift
) * KYBER_ASYNC_PERCENT
/ 100U;
409 free_percpu(kqd
->cpu_latency
);
416 static int kyber_init_sched(struct request_queue
*q
, struct elevator_type
*e
)
418 struct kyber_queue_data
*kqd
;
419 struct elevator_queue
*eq
;
421 eq
= elevator_alloc(q
, e
);
425 kqd
= kyber_queue_data_alloc(q
);
427 kobject_put(&eq
->kobj
);
431 blk_stat_enable_accounting(q
);
433 eq
->elevator_data
= kqd
;
439 static void kyber_exit_sched(struct elevator_queue
*e
)
441 struct kyber_queue_data
*kqd
= e
->elevator_data
;
444 del_timer_sync(&kqd
->timer
);
446 for (i
= 0; i
< KYBER_NUM_DOMAINS
; i
++)
447 sbitmap_queue_free(&kqd
->domain_tokens
[i
]);
448 free_percpu(kqd
->cpu_latency
);
452 static void kyber_ctx_queue_init(struct kyber_ctx_queue
*kcq
)
456 spin_lock_init(&kcq
->lock
);
457 for (i
= 0; i
< KYBER_NUM_DOMAINS
; i
++)
458 INIT_LIST_HEAD(&kcq
->rq_list
[i
]);
461 static int kyber_init_hctx(struct blk_mq_hw_ctx
*hctx
, unsigned int hctx_idx
)
463 struct kyber_queue_data
*kqd
= hctx
->queue
->elevator
->elevator_data
;
464 struct kyber_hctx_data
*khd
;
467 khd
= kmalloc_node(sizeof(*khd
), GFP_KERNEL
, hctx
->numa_node
);
471 khd
->kcqs
= kmalloc_array_node(hctx
->nr_ctx
,
472 sizeof(struct kyber_ctx_queue
),
473 GFP_KERNEL
, hctx
->numa_node
);
477 for (i
= 0; i
< hctx
->nr_ctx
; i
++)
478 kyber_ctx_queue_init(&khd
->kcqs
[i
]);
480 for (i
= 0; i
< KYBER_NUM_DOMAINS
; i
++) {
481 if (sbitmap_init_node(&khd
->kcq_map
[i
], hctx
->nr_ctx
,
482 ilog2(8), GFP_KERNEL
, hctx
->numa_node
)) {
484 sbitmap_free(&khd
->kcq_map
[i
]);
489 spin_lock_init(&khd
->lock
);
491 for (i
= 0; i
< KYBER_NUM_DOMAINS
; i
++) {
492 INIT_LIST_HEAD(&khd
->rqs
[i
]);
493 khd
->domain_wait
[i
].sbq
= NULL
;
494 init_waitqueue_func_entry(&khd
->domain_wait
[i
].wait
,
496 khd
->domain_wait
[i
].wait
.private = hctx
;
497 INIT_LIST_HEAD(&khd
->domain_wait
[i
].wait
.entry
);
498 atomic_set(&khd
->wait_index
[i
], 0);
504 hctx
->sched_data
= khd
;
505 sbitmap_queue_min_shallow_depth(&hctx
->sched_tags
->bitmap_tags
,
517 static void kyber_exit_hctx(struct blk_mq_hw_ctx
*hctx
, unsigned int hctx_idx
)
519 struct kyber_hctx_data
*khd
= hctx
->sched_data
;
522 for (i
= 0; i
< KYBER_NUM_DOMAINS
; i
++)
523 sbitmap_free(&khd
->kcq_map
[i
]);
525 kfree(hctx
->sched_data
);
528 static int rq_get_domain_token(struct request
*rq
)
530 return (long)rq
->elv
.priv
[0];
533 static void rq_set_domain_token(struct request
*rq
, int token
)
535 rq
->elv
.priv
[0] = (void *)(long)token
;
538 static void rq_clear_domain_token(struct kyber_queue_data
*kqd
,
541 unsigned int sched_domain
;
544 nr
= rq_get_domain_token(rq
);
546 sched_domain
= kyber_sched_domain(rq
->cmd_flags
);
547 sbitmap_queue_clear(&kqd
->domain_tokens
[sched_domain
], nr
,
552 static void kyber_limit_depth(unsigned int op
, struct blk_mq_alloc_data
*data
)
555 * We use the scheduler tags as per-hardware queue queueing tokens.
556 * Async requests can be limited at this stage.
558 if (!op_is_sync(op
)) {
559 struct kyber_queue_data
*kqd
= data
->q
->elevator
->elevator_data
;
561 data
->shallow_depth
= kqd
->async_depth
;
565 static bool kyber_bio_merge(struct blk_mq_hw_ctx
*hctx
, struct bio
*bio
)
567 struct kyber_hctx_data
*khd
= hctx
->sched_data
;
568 struct blk_mq_ctx
*ctx
= blk_mq_get_ctx(hctx
->queue
);
569 struct kyber_ctx_queue
*kcq
= &khd
->kcqs
[ctx
->index_hw
[hctx
->type
]];
570 unsigned int sched_domain
= kyber_sched_domain(bio
->bi_opf
);
571 struct list_head
*rq_list
= &kcq
->rq_list
[sched_domain
];
574 spin_lock(&kcq
->lock
);
575 merged
= blk_mq_bio_list_merge(hctx
->queue
, rq_list
, bio
);
576 spin_unlock(&kcq
->lock
);
582 static void kyber_prepare_request(struct request
*rq
, struct bio
*bio
)
584 rq_set_domain_token(rq
, -1);
587 static void kyber_insert_requests(struct blk_mq_hw_ctx
*hctx
,
588 struct list_head
*rq_list
, bool at_head
)
590 struct kyber_hctx_data
*khd
= hctx
->sched_data
;
591 struct request
*rq
, *next
;
593 list_for_each_entry_safe(rq
, next
, rq_list
, queuelist
) {
594 unsigned int sched_domain
= kyber_sched_domain(rq
->cmd_flags
);
595 struct kyber_ctx_queue
*kcq
= &khd
->kcqs
[rq
->mq_ctx
->index_hw
[hctx
->type
]];
596 struct list_head
*head
= &kcq
->rq_list
[sched_domain
];
598 spin_lock(&kcq
->lock
);
600 list_move(&rq
->queuelist
, head
);
602 list_move_tail(&rq
->queuelist
, head
);
603 sbitmap_set_bit(&khd
->kcq_map
[sched_domain
],
604 rq
->mq_ctx
->index_hw
[hctx
->type
]);
605 blk_mq_sched_request_inserted(rq
);
606 spin_unlock(&kcq
->lock
);
610 static void kyber_finish_request(struct request
*rq
)
612 struct kyber_queue_data
*kqd
= rq
->q
->elevator
->elevator_data
;
614 rq_clear_domain_token(kqd
, rq
);
617 static void add_latency_sample(struct kyber_cpu_latency
*cpu_latency
,
618 unsigned int sched_domain
, unsigned int type
,
619 u64 target
, u64 latency
)
625 divisor
= max_t(u64
, target
>> KYBER_LATENCY_SHIFT
, 1);
626 bucket
= min_t(unsigned int, div64_u64(latency
- 1, divisor
),
627 KYBER_LATENCY_BUCKETS
- 1);
632 atomic_inc(&cpu_latency
->buckets
[sched_domain
][type
][bucket
]);
635 static void kyber_completed_request(struct request
*rq
, u64 now
)
637 struct kyber_queue_data
*kqd
= rq
->q
->elevator
->elevator_data
;
638 struct kyber_cpu_latency
*cpu_latency
;
639 unsigned int sched_domain
;
642 sched_domain
= kyber_sched_domain(rq
->cmd_flags
);
643 if (sched_domain
== KYBER_OTHER
)
646 cpu_latency
= get_cpu_ptr(kqd
->cpu_latency
);
647 target
= kqd
->latency_targets
[sched_domain
];
648 add_latency_sample(cpu_latency
, sched_domain
, KYBER_TOTAL_LATENCY
,
649 target
, now
- rq
->start_time_ns
);
650 add_latency_sample(cpu_latency
, sched_domain
, KYBER_IO_LATENCY
, target
,
651 now
- rq
->io_start_time_ns
);
652 put_cpu_ptr(kqd
->cpu_latency
);
654 timer_reduce(&kqd
->timer
, jiffies
+ HZ
/ 10);
657 struct flush_kcq_data
{
658 struct kyber_hctx_data
*khd
;
659 unsigned int sched_domain
;
660 struct list_head
*list
;
663 static bool flush_busy_kcq(struct sbitmap
*sb
, unsigned int bitnr
, void *data
)
665 struct flush_kcq_data
*flush_data
= data
;
666 struct kyber_ctx_queue
*kcq
= &flush_data
->khd
->kcqs
[bitnr
];
668 spin_lock(&kcq
->lock
);
669 list_splice_tail_init(&kcq
->rq_list
[flush_data
->sched_domain
],
671 sbitmap_clear_bit(sb
, bitnr
);
672 spin_unlock(&kcq
->lock
);
677 static void kyber_flush_busy_kcqs(struct kyber_hctx_data
*khd
,
678 unsigned int sched_domain
,
679 struct list_head
*list
)
681 struct flush_kcq_data data
= {
683 .sched_domain
= sched_domain
,
687 sbitmap_for_each_set(&khd
->kcq_map
[sched_domain
],
688 flush_busy_kcq
, &data
);
691 static int kyber_domain_wake(wait_queue_entry_t
*wqe
, unsigned mode
, int flags
,
694 struct blk_mq_hw_ctx
*hctx
= READ_ONCE(wqe
->private);
695 struct sbq_wait
*wait
= container_of(wqe
, struct sbq_wait
, wait
);
697 sbitmap_del_wait_queue(wait
);
698 blk_mq_run_hw_queue(hctx
, true);
702 static int kyber_get_domain_token(struct kyber_queue_data
*kqd
,
703 struct kyber_hctx_data
*khd
,
704 struct blk_mq_hw_ctx
*hctx
)
706 unsigned int sched_domain
= khd
->cur_domain
;
707 struct sbitmap_queue
*domain_tokens
= &kqd
->domain_tokens
[sched_domain
];
708 struct sbq_wait
*wait
= &khd
->domain_wait
[sched_domain
];
709 struct sbq_wait_state
*ws
;
712 nr
= __sbitmap_queue_get(domain_tokens
);
715 * If we failed to get a domain token, make sure the hardware queue is
716 * run when one becomes available. Note that this is serialized on
717 * khd->lock, but we still need to be careful about the waker.
719 if (nr
< 0 && list_empty_careful(&wait
->wait
.entry
)) {
720 ws
= sbq_wait_ptr(domain_tokens
,
721 &khd
->wait_index
[sched_domain
]);
722 khd
->domain_ws
[sched_domain
] = ws
;
723 sbitmap_add_wait_queue(domain_tokens
, ws
, wait
);
726 * Try again in case a token was freed before we got on the wait
729 nr
= __sbitmap_queue_get(domain_tokens
);
733 * If we got a token while we were on the wait queue, remove ourselves
734 * from the wait queue to ensure that all wake ups make forward
735 * progress. It's possible that the waker already deleted the entry
736 * between the !list_empty_careful() check and us grabbing the lock, but
737 * list_del_init() is okay with that.
739 if (nr
>= 0 && !list_empty_careful(&wait
->wait
.entry
)) {
740 ws
= khd
->domain_ws
[sched_domain
];
741 spin_lock_irq(&ws
->wait
.lock
);
742 sbitmap_del_wait_queue(wait
);
743 spin_unlock_irq(&ws
->wait
.lock
);
749 static struct request
*
750 kyber_dispatch_cur_domain(struct kyber_queue_data
*kqd
,
751 struct kyber_hctx_data
*khd
,
752 struct blk_mq_hw_ctx
*hctx
)
754 struct list_head
*rqs
;
758 rqs
= &khd
->rqs
[khd
->cur_domain
];
761 * If we already have a flushed request, then we just need to get a
762 * token for it. Otherwise, if there are pending requests in the kcqs,
763 * flush the kcqs, but only if we can get a token. If not, we should
764 * leave the requests in the kcqs so that they can be merged. Note that
765 * khd->lock serializes the flushes, so if we observed any bit set in
766 * the kcq_map, we will always get a request.
768 rq
= list_first_entry_or_null(rqs
, struct request
, queuelist
);
770 nr
= kyber_get_domain_token(kqd
, khd
, hctx
);
773 rq_set_domain_token(rq
, nr
);
774 list_del_init(&rq
->queuelist
);
777 trace_kyber_throttled(kqd
->q
,
778 kyber_domain_names
[khd
->cur_domain
]);
780 } else if (sbitmap_any_bit_set(&khd
->kcq_map
[khd
->cur_domain
])) {
781 nr
= kyber_get_domain_token(kqd
, khd
, hctx
);
783 kyber_flush_busy_kcqs(khd
, khd
->cur_domain
, rqs
);
784 rq
= list_first_entry(rqs
, struct request
, queuelist
);
786 rq_set_domain_token(rq
, nr
);
787 list_del_init(&rq
->queuelist
);
790 trace_kyber_throttled(kqd
->q
,
791 kyber_domain_names
[khd
->cur_domain
]);
795 /* There were either no pending requests or no tokens. */
799 static struct request
*kyber_dispatch_request(struct blk_mq_hw_ctx
*hctx
)
801 struct kyber_queue_data
*kqd
= hctx
->queue
->elevator
->elevator_data
;
802 struct kyber_hctx_data
*khd
= hctx
->sched_data
;
806 spin_lock(&khd
->lock
);
809 * First, if we are still entitled to batch, try to dispatch a request
812 if (khd
->batching
< kyber_batch_size
[khd
->cur_domain
]) {
813 rq
= kyber_dispatch_cur_domain(kqd
, khd
, hctx
);
820 * 1. We were no longer entitled to a batch.
821 * 2. The domain we were batching didn't have any requests.
822 * 3. The domain we were batching was out of tokens.
824 * Start another batch. Note that this wraps back around to the original
825 * domain if no other domains have requests or tokens.
828 for (i
= 0; i
< KYBER_NUM_DOMAINS
; i
++) {
829 if (khd
->cur_domain
== KYBER_NUM_DOMAINS
- 1)
834 rq
= kyber_dispatch_cur_domain(kqd
, khd
, hctx
);
841 spin_unlock(&khd
->lock
);
845 static bool kyber_has_work(struct blk_mq_hw_ctx
*hctx
)
847 struct kyber_hctx_data
*khd
= hctx
->sched_data
;
850 for (i
= 0; i
< KYBER_NUM_DOMAINS
; i
++) {
851 if (!list_empty_careful(&khd
->rqs
[i
]) ||
852 sbitmap_any_bit_set(&khd
->kcq_map
[i
]))
859 #define KYBER_LAT_SHOW_STORE(domain, name) \
860 static ssize_t kyber_##name##_lat_show(struct elevator_queue *e, \
863 struct kyber_queue_data *kqd = e->elevator_data; \
865 return sprintf(page, "%llu\n", kqd->latency_targets[domain]); \
868 static ssize_t kyber_##name##_lat_store(struct elevator_queue *e, \
869 const char *page, size_t count) \
871 struct kyber_queue_data *kqd = e->elevator_data; \
872 unsigned long long nsec; \
875 ret = kstrtoull(page, 10, &nsec); \
879 kqd->latency_targets[domain] = nsec; \
883 KYBER_LAT_SHOW_STORE(KYBER_READ
, read
);
884 KYBER_LAT_SHOW_STORE(KYBER_WRITE
, write
);
885 #undef KYBER_LAT_SHOW_STORE
887 #define KYBER_LAT_ATTR(op) __ATTR(op##_lat_nsec, 0644, kyber_##op##_lat_show, kyber_##op##_lat_store)
888 static struct elv_fs_entry kyber_sched_attrs
[] = {
889 KYBER_LAT_ATTR(read
),
890 KYBER_LAT_ATTR(write
),
893 #undef KYBER_LAT_ATTR
895 #ifdef CONFIG_BLK_DEBUG_FS
896 #define KYBER_DEBUGFS_DOMAIN_ATTRS(domain, name) \
897 static int kyber_##name##_tokens_show(void *data, struct seq_file *m) \
899 struct request_queue *q = data; \
900 struct kyber_queue_data *kqd = q->elevator->elevator_data; \
902 sbitmap_queue_show(&kqd->domain_tokens[domain], m); \
906 static void *kyber_##name##_rqs_start(struct seq_file *m, loff_t *pos) \
907 __acquires(&khd->lock) \
909 struct blk_mq_hw_ctx *hctx = m->private; \
910 struct kyber_hctx_data *khd = hctx->sched_data; \
912 spin_lock(&khd->lock); \
913 return seq_list_start(&khd->rqs[domain], *pos); \
916 static void *kyber_##name##_rqs_next(struct seq_file *m, void *v, \
919 struct blk_mq_hw_ctx *hctx = m->private; \
920 struct kyber_hctx_data *khd = hctx->sched_data; \
922 return seq_list_next(v, &khd->rqs[domain], pos); \
925 static void kyber_##name##_rqs_stop(struct seq_file *m, void *v) \
926 __releases(&khd->lock) \
928 struct blk_mq_hw_ctx *hctx = m->private; \
929 struct kyber_hctx_data *khd = hctx->sched_data; \
931 spin_unlock(&khd->lock); \
934 static const struct seq_operations kyber_##name##_rqs_seq_ops = { \
935 .start = kyber_##name##_rqs_start, \
936 .next = kyber_##name##_rqs_next, \
937 .stop = kyber_##name##_rqs_stop, \
938 .show = blk_mq_debugfs_rq_show, \
941 static int kyber_##name##_waiting_show(void *data, struct seq_file *m) \
943 struct blk_mq_hw_ctx *hctx = data; \
944 struct kyber_hctx_data *khd = hctx->sched_data; \
945 wait_queue_entry_t *wait = &khd->domain_wait[domain].wait; \
947 seq_printf(m, "%d\n", !list_empty_careful(&wait->entry)); \
950 KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_READ
, read
)
951 KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_WRITE
, write
)
952 KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_DISCARD
, discard
)
953 KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_OTHER
, other
)
954 #undef KYBER_DEBUGFS_DOMAIN_ATTRS
956 static int kyber_async_depth_show(void *data
, struct seq_file
*m
)
958 struct request_queue
*q
= data
;
959 struct kyber_queue_data
*kqd
= q
->elevator
->elevator_data
;
961 seq_printf(m
, "%u\n", kqd
->async_depth
);
965 static int kyber_cur_domain_show(void *data
, struct seq_file
*m
)
967 struct blk_mq_hw_ctx
*hctx
= data
;
968 struct kyber_hctx_data
*khd
= hctx
->sched_data
;
970 seq_printf(m
, "%s\n", kyber_domain_names
[khd
->cur_domain
]);
974 static int kyber_batching_show(void *data
, struct seq_file
*m
)
976 struct blk_mq_hw_ctx
*hctx
= data
;
977 struct kyber_hctx_data
*khd
= hctx
->sched_data
;
979 seq_printf(m
, "%u\n", khd
->batching
);
983 #define KYBER_QUEUE_DOMAIN_ATTRS(name) \
984 {#name "_tokens", 0400, kyber_##name##_tokens_show}
985 static const struct blk_mq_debugfs_attr kyber_queue_debugfs_attrs
[] = {
986 KYBER_QUEUE_DOMAIN_ATTRS(read
),
987 KYBER_QUEUE_DOMAIN_ATTRS(write
),
988 KYBER_QUEUE_DOMAIN_ATTRS(discard
),
989 KYBER_QUEUE_DOMAIN_ATTRS(other
),
990 {"async_depth", 0400, kyber_async_depth_show
},
993 #undef KYBER_QUEUE_DOMAIN_ATTRS
995 #define KYBER_HCTX_DOMAIN_ATTRS(name) \
996 {#name "_rqs", 0400, .seq_ops = &kyber_##name##_rqs_seq_ops}, \
997 {#name "_waiting", 0400, kyber_##name##_waiting_show}
998 static const struct blk_mq_debugfs_attr kyber_hctx_debugfs_attrs
[] = {
999 KYBER_HCTX_DOMAIN_ATTRS(read
),
1000 KYBER_HCTX_DOMAIN_ATTRS(write
),
1001 KYBER_HCTX_DOMAIN_ATTRS(discard
),
1002 KYBER_HCTX_DOMAIN_ATTRS(other
),
1003 {"cur_domain", 0400, kyber_cur_domain_show
},
1004 {"batching", 0400, kyber_batching_show
},
1007 #undef KYBER_HCTX_DOMAIN_ATTRS
1010 static struct elevator_type kyber_sched
= {
1012 .init_sched
= kyber_init_sched
,
1013 .exit_sched
= kyber_exit_sched
,
1014 .init_hctx
= kyber_init_hctx
,
1015 .exit_hctx
= kyber_exit_hctx
,
1016 .limit_depth
= kyber_limit_depth
,
1017 .bio_merge
= kyber_bio_merge
,
1018 .prepare_request
= kyber_prepare_request
,
1019 .insert_requests
= kyber_insert_requests
,
1020 .finish_request
= kyber_finish_request
,
1021 .requeue_request
= kyber_finish_request
,
1022 .completed_request
= kyber_completed_request
,
1023 .dispatch_request
= kyber_dispatch_request
,
1024 .has_work
= kyber_has_work
,
1026 #ifdef CONFIG_BLK_DEBUG_FS
1027 .queue_debugfs_attrs
= kyber_queue_debugfs_attrs
,
1028 .hctx_debugfs_attrs
= kyber_hctx_debugfs_attrs
,
1030 .elevator_attrs
= kyber_sched_attrs
,
1031 .elevator_name
= "kyber",
1032 .elevator_owner
= THIS_MODULE
,
1035 static int __init
kyber_init(void)
1037 return elv_register(&kyber_sched
);
1040 static void __exit
kyber_exit(void)
1042 elv_unregister(&kyber_sched
);
1045 module_init(kyber_init
);
1046 module_exit(kyber_exit
);
1048 MODULE_AUTHOR("Omar Sandoval");
1049 MODULE_LICENSE("GPL");
1050 MODULE_DESCRIPTION("Kyber I/O scheduler");