1 // SPDX-License-Identifier: GPL-2.0
3 * Block rq-qos base io controller
5 * This works similar to wbt with a few exceptions
7 * - It's bio based, so the latency covers the whole block layer in addition to
9 * - We will throttle all IO that comes in here if we need to.
10 * - We use the mean latency over the 100ms window. This is because writes can
11 * be particularly fast, which could give us a false sense of the impact of
12 * other workloads on our protected workload.
13 * - By default there's no throttling, we set the queue_depth to UINT_MAX so
14 * that we can have as many outstanding bio's as we're allowed to. Only at
15 * throttle time do we pay attention to the actual queue depth.
17 * The hierarchy works like the cpu controller does, we track the latency at
18 * every configured node, and each configured node has it's own independent
19 * queue depth. This means that we only care about our latency targets at the
20 * peer level. Some group at the bottom of the hierarchy isn't going to affect
21 * a group at the end of some other path if we're only configred at leaf level.
23 * Consider the following
27 * fast (target=5ms) slow (target=10ms)
29 * a b normal(15ms) unloved
31 * "a" and "b" have no target, but their combined io under "fast" cannot exceed
32 * an average latency of 5ms. If it does then we will throttle the "slow"
33 * group. In the case of "normal", if it exceeds its 15ms target, we will
34 * throttle "unloved", but nobody else.
36 * In this example "fast", "slow", and "normal" will be the only groups actually
37 * accounting their io latencies. We have to walk up the heirarchy to the root
38 * on every submit and complete so we can do the appropriate stat recording and
39 * adjust the queue depth of ourselves if needed.
41 * There are 2 ways we throttle IO.
43 * 1) Queue depth throttling. As we throttle down we will adjust the maximum
44 * number of IO's we're allowed to have in flight. This starts at (u64)-1 down
45 * to 1. If the group is only ever submitting IO for itself then this is the
46 * only way we throttle.
48 * 2) Induced delay throttling. This is for the case that a group is generating
49 * IO that has to be issued by the root cg to avoid priority inversion. So think
50 * REQ_META or REQ_SWAP. If we are already at qd == 1 and we're getting a lot
51 * of work done for us on behalf of the root cg and are being asked to scale
52 * down more then we induce a latency at userspace return. We accumulate the
53 * total amount of time we need to be punished by doing
55 * total_time += min_lat_nsec - actual_io_completion
57 * and then at throttle time will do
59 * throttle_time = min(total_time, NSEC_PER_SEC)
61 * This induced delay will throttle back the activity that is generating the
62 * root cg issued io's, wethere that's some metadata intensive operation or the
63 * group is using so much memory that it is pushing us into swap.
65 * Copyright (C) 2018 Josef Bacik
67 #include <linux/kernel.h>
68 #include <linux/blk_types.h>
69 #include <linux/backing-dev.h>
70 #include <linux/module.h>
71 #include <linux/timer.h>
72 #include <linux/memcontrol.h>
73 #include <linux/sched/loadavg.h>
74 #include <linux/sched/signal.h>
75 #include <trace/events/block.h>
76 #include <linux/blk-mq.h>
77 #include "blk-rq-qos.h"
81 #define DEFAULT_SCALE_COOKIE 1000000U
83 static struct blkcg_policy blkcg_policy_iolatency
;
86 struct blk_iolatency
{
88 struct timer_list timer
;
92 static inline struct blk_iolatency
*BLKIOLATENCY(struct rq_qos
*rqos
)
94 return container_of(rqos
, struct blk_iolatency
, rqos
);
97 static inline bool blk_iolatency_enabled(struct blk_iolatency
*blkiolat
)
99 return atomic_read(&blkiolat
->enabled
) > 0;
102 struct child_latency_info
{
105 /* Last time we adjusted the scale of everybody. */
106 u64 last_scale_event
;
108 /* The latency that we missed. */
111 /* Total io's from all of our children for the last summation. */
114 /* The guy who actually changed the latency numbers. */
115 struct iolatency_grp
*scale_grp
;
117 /* Cookie to tell if we need to scale up or down. */
118 atomic_t scale_cookie
;
121 struct percentile_stats
{
126 struct latency_stat
{
128 struct percentile_stats ps
;
129 struct blk_rq_stat rqs
;
133 struct iolatency_grp
{
134 struct blkg_policy_data pd
;
135 struct latency_stat __percpu
*stats
;
136 struct latency_stat cur_stat
;
137 struct blk_iolatency
*blkiolat
;
138 struct rq_depth rq_depth
;
139 struct rq_wait rq_wait
;
140 atomic64_t window_start
;
141 atomic_t scale_cookie
;
145 /* total running average of our io latency. */
148 /* Our current number of IO's for the last summation. */
152 struct child_latency_info child_lat
;
155 #define BLKIOLATENCY_MIN_WIN_SIZE (100 * NSEC_PER_MSEC)
156 #define BLKIOLATENCY_MAX_WIN_SIZE NSEC_PER_SEC
158 * These are the constants used to fake the fixed-point moving average
159 * calculation just like load average. The call to calc_load() folds
160 * (FIXED_1 (2048) - exp_factor) * new_sample into lat_avg. The sampling
161 * window size is bucketed to try to approximately calculate average
162 * latency such that 1/exp (decay rate) is [1 min, 2.5 min) when windows
163 * elapse immediately. Note, windows only elapse with IO activity. Idle
164 * periods extend the most recent window.
166 #define BLKIOLATENCY_NR_EXP_FACTORS 5
167 #define BLKIOLATENCY_EXP_BUCKET_SIZE (BLKIOLATENCY_MAX_WIN_SIZE / \
168 (BLKIOLATENCY_NR_EXP_FACTORS - 1))
169 static const u64 iolatency_exp_factors
[BLKIOLATENCY_NR_EXP_FACTORS
] = {
170 2045, // exp(1/600) - 600 samples
171 2039, // exp(1/240) - 240 samples
172 2031, // exp(1/120) - 120 samples
173 2023, // exp(1/80) - 80 samples
174 2014, // exp(1/60) - 60 samples
177 static inline struct iolatency_grp
*pd_to_lat(struct blkg_policy_data
*pd
)
179 return pd
? container_of(pd
, struct iolatency_grp
, pd
) : NULL
;
182 static inline struct iolatency_grp
*blkg_to_lat(struct blkcg_gq
*blkg
)
184 return pd_to_lat(blkg_to_pd(blkg
, &blkcg_policy_iolatency
));
187 static inline struct blkcg_gq
*lat_to_blkg(struct iolatency_grp
*iolat
)
189 return pd_to_blkg(&iolat
->pd
);
192 static inline void latency_stat_init(struct iolatency_grp
*iolat
,
193 struct latency_stat
*stat
)
199 blk_rq_stat_init(&stat
->rqs
);
202 static inline void latency_stat_sum(struct iolatency_grp
*iolat
,
203 struct latency_stat
*sum
,
204 struct latency_stat
*stat
)
207 sum
->ps
.total
+= stat
->ps
.total
;
208 sum
->ps
.missed
+= stat
->ps
.missed
;
210 blk_rq_stat_sum(&sum
->rqs
, &stat
->rqs
);
213 static inline void latency_stat_record_time(struct iolatency_grp
*iolat
,
216 struct latency_stat
*stat
= get_cpu_ptr(iolat
->stats
);
218 if (req_time
>= iolat
->min_lat_nsec
)
222 blk_rq_stat_add(&stat
->rqs
, req_time
);
226 static inline bool latency_sum_ok(struct iolatency_grp
*iolat
,
227 struct latency_stat
*stat
)
230 u64 thresh
= div64_u64(stat
->ps
.total
, 10);
231 thresh
= max(thresh
, 1ULL);
232 return stat
->ps
.missed
< thresh
;
234 return stat
->rqs
.mean
<= iolat
->min_lat_nsec
;
237 static inline u64
latency_stat_samples(struct iolatency_grp
*iolat
,
238 struct latency_stat
*stat
)
241 return stat
->ps
.total
;
242 return stat
->rqs
.nr_samples
;
245 static inline void iolat_update_total_lat_avg(struct iolatency_grp
*iolat
,
246 struct latency_stat
*stat
)
254 * calc_load() takes in a number stored in fixed point representation.
255 * Because we are using this for IO time in ns, the values stored
256 * are significantly larger than the FIXED_1 denominator (2048).
257 * Therefore, rounding errors in the calculation are negligible and
260 exp_idx
= min_t(int, BLKIOLATENCY_NR_EXP_FACTORS
- 1,
261 div64_u64(iolat
->cur_win_nsec
,
262 BLKIOLATENCY_EXP_BUCKET_SIZE
));
263 iolat
->lat_avg
= calc_load(iolat
->lat_avg
,
264 iolatency_exp_factors
[exp_idx
],
268 static void iolat_cleanup_cb(struct rq_wait
*rqw
, void *private_data
)
270 atomic_dec(&rqw
->inflight
);
274 static bool iolat_acquire_inflight(struct rq_wait
*rqw
, void *private_data
)
276 struct iolatency_grp
*iolat
= private_data
;
277 return rq_wait_inc_below(rqw
, iolat
->rq_depth
.max_depth
);
280 static void __blkcg_iolatency_throttle(struct rq_qos
*rqos
,
281 struct iolatency_grp
*iolat
,
285 struct rq_wait
*rqw
= &iolat
->rq_wait
;
286 unsigned use_delay
= atomic_read(&lat_to_blkg(iolat
)->use_delay
);
289 blkcg_schedule_throttle(rqos
->q
, use_memdelay
);
292 * To avoid priority inversions we want to just take a slot if we are
293 * issuing as root. If we're being killed off there's no point in
294 * delaying things, we may have been killed by OOM so throttling may
295 * make recovery take even longer, so just let the IO's through so the
298 if (issue_as_root
|| fatal_signal_pending(current
)) {
299 atomic_inc(&rqw
->inflight
);
303 rq_qos_wait(rqw
, iolat
, iolat_acquire_inflight
, iolat_cleanup_cb
);
306 #define SCALE_DOWN_FACTOR 2
307 #define SCALE_UP_FACTOR 4
309 static inline unsigned long scale_amount(unsigned long qd
, bool up
)
311 return max(up
? qd
>> SCALE_UP_FACTOR
: qd
>> SCALE_DOWN_FACTOR
, 1UL);
315 * We scale the qd down faster than we scale up, so we need to use this helper
316 * to adjust the scale_cookie accordingly so we don't prematurely get
317 * scale_cookie at DEFAULT_SCALE_COOKIE and unthrottle too much.
319 * Each group has their own local copy of the last scale cookie they saw, so if
320 * the global scale cookie goes up or down they know which way they need to go
321 * based on their last knowledge of it.
323 static void scale_cookie_change(struct blk_iolatency
*blkiolat
,
324 struct child_latency_info
*lat_info
,
327 unsigned long qd
= blkiolat
->rqos
.q
->nr_requests
;
328 unsigned long scale
= scale_amount(qd
, up
);
329 unsigned long old
= atomic_read(&lat_info
->scale_cookie
);
330 unsigned long max_scale
= qd
<< 1;
331 unsigned long diff
= 0;
333 if (old
< DEFAULT_SCALE_COOKIE
)
334 diff
= DEFAULT_SCALE_COOKIE
- old
;
337 if (scale
+ old
> DEFAULT_SCALE_COOKIE
)
338 atomic_set(&lat_info
->scale_cookie
,
339 DEFAULT_SCALE_COOKIE
);
341 atomic_inc(&lat_info
->scale_cookie
);
343 atomic_add(scale
, &lat_info
->scale_cookie
);
346 * We don't want to dig a hole so deep that it takes us hours to
347 * dig out of it. Just enough that we don't throttle/unthrottle
348 * with jagged workloads but can still unthrottle once pressure
349 * has sufficiently dissipated.
352 if (diff
< max_scale
)
353 atomic_dec(&lat_info
->scale_cookie
);
355 atomic_sub(scale
, &lat_info
->scale_cookie
);
361 * Change the queue depth of the iolatency_grp. We add/subtract 1/16th of the
362 * queue depth at a time so we don't get wild swings and hopefully dial in to
363 * fairer distribution of the overall queue depth.
365 static void scale_change(struct iolatency_grp
*iolat
, bool up
)
367 unsigned long qd
= iolat
->blkiolat
->rqos
.q
->nr_requests
;
368 unsigned long scale
= scale_amount(qd
, up
);
369 unsigned long old
= iolat
->rq_depth
.max_depth
;
375 if (old
== 1 && blkcg_unuse_delay(lat_to_blkg(iolat
)))
381 iolat
->rq_depth
.max_depth
= old
;
382 wake_up_all(&iolat
->rq_wait
.wait
);
386 iolat
->rq_depth
.max_depth
= max(old
, 1UL);
390 /* Check our parent and see if the scale cookie has changed. */
391 static void check_scale_change(struct iolatency_grp
*iolat
)
393 struct iolatency_grp
*parent
;
394 struct child_latency_info
*lat_info
;
395 unsigned int cur_cookie
;
396 unsigned int our_cookie
= atomic_read(&iolat
->scale_cookie
);
401 if (lat_to_blkg(iolat
)->parent
== NULL
)
404 parent
= blkg_to_lat(lat_to_blkg(iolat
)->parent
);
408 lat_info
= &parent
->child_lat
;
409 cur_cookie
= atomic_read(&lat_info
->scale_cookie
);
410 scale_lat
= READ_ONCE(lat_info
->scale_lat
);
412 if (cur_cookie
< our_cookie
)
414 else if (cur_cookie
> our_cookie
)
419 old
= atomic_cmpxchg(&iolat
->scale_cookie
, our_cookie
, cur_cookie
);
421 /* Somebody beat us to the punch, just bail. */
422 if (old
!= our_cookie
)
425 if (direction
< 0 && iolat
->min_lat_nsec
) {
428 if (!scale_lat
|| iolat
->min_lat_nsec
<= scale_lat
)
432 * Sometimes high priority groups are their own worst enemy, so
433 * instead of taking it out on some poor other group that did 5%
434 * or less of the IO's for the last summation just skip this
437 samples_thresh
= lat_info
->nr_samples
* 5;
438 samples_thresh
= max(1ULL, div64_u64(samples_thresh
, 100));
439 if (iolat
->nr_samples
<= samples_thresh
)
443 /* We're as low as we can go. */
444 if (iolat
->rq_depth
.max_depth
== 1 && direction
< 0) {
445 blkcg_use_delay(lat_to_blkg(iolat
));
449 /* We're back to the default cookie, unthrottle all the things. */
450 if (cur_cookie
== DEFAULT_SCALE_COOKIE
) {
451 blkcg_clear_delay(lat_to_blkg(iolat
));
452 iolat
->rq_depth
.max_depth
= UINT_MAX
;
453 wake_up_all(&iolat
->rq_wait
.wait
);
457 scale_change(iolat
, direction
> 0);
460 static void blkcg_iolatency_throttle(struct rq_qos
*rqos
, struct bio
*bio
)
462 struct blk_iolatency
*blkiolat
= BLKIOLATENCY(rqos
);
463 struct blkcg_gq
*blkg
= bio
->bi_blkg
;
464 bool issue_as_root
= bio_issue_as_root_blkg(bio
);
466 if (!blk_iolatency_enabled(blkiolat
))
469 while (blkg
&& blkg
->parent
) {
470 struct iolatency_grp
*iolat
= blkg_to_lat(blkg
);
476 check_scale_change(iolat
);
477 __blkcg_iolatency_throttle(rqos
, iolat
, issue_as_root
,
478 (bio
->bi_opf
& REQ_SWAP
) == REQ_SWAP
);
481 if (!timer_pending(&blkiolat
->timer
))
482 mod_timer(&blkiolat
->timer
, jiffies
+ HZ
);
485 static void iolatency_record_time(struct iolatency_grp
*iolat
,
486 struct bio_issue
*issue
, u64 now
,
489 u64 start
= bio_issue_time(issue
);
493 * Have to do this so we are truncated to the correct time that our
494 * issue is truncated to.
496 now
= __bio_issue_time(now
);
501 req_time
= now
- start
;
504 * We don't want to count issue_as_root bio's in the cgroups latency
505 * statistics as it could skew the numbers downwards.
507 if (unlikely(issue_as_root
&& iolat
->rq_depth
.max_depth
!= UINT_MAX
)) {
508 u64 sub
= iolat
->min_lat_nsec
;
510 blkcg_add_delay(lat_to_blkg(iolat
), now
, sub
- req_time
);
514 latency_stat_record_time(iolat
, req_time
);
517 #define BLKIOLATENCY_MIN_ADJUST_TIME (500 * NSEC_PER_MSEC)
518 #define BLKIOLATENCY_MIN_GOOD_SAMPLES 5
520 static void iolatency_check_latencies(struct iolatency_grp
*iolat
, u64 now
)
522 struct blkcg_gq
*blkg
= lat_to_blkg(iolat
);
523 struct iolatency_grp
*parent
;
524 struct child_latency_info
*lat_info
;
525 struct latency_stat stat
;
529 latency_stat_init(iolat
, &stat
);
531 for_each_online_cpu(cpu
) {
532 struct latency_stat
*s
;
533 s
= per_cpu_ptr(iolat
->stats
, cpu
);
534 latency_stat_sum(iolat
, &stat
, s
);
535 latency_stat_init(iolat
, s
);
539 parent
= blkg_to_lat(blkg
->parent
);
543 lat_info
= &parent
->child_lat
;
545 iolat_update_total_lat_avg(iolat
, &stat
);
547 /* Everything is ok and we don't need to adjust the scale. */
548 if (latency_sum_ok(iolat
, &stat
) &&
549 atomic_read(&lat_info
->scale_cookie
) == DEFAULT_SCALE_COOKIE
)
552 /* Somebody beat us to the punch, just bail. */
553 spin_lock_irqsave(&lat_info
->lock
, flags
);
555 latency_stat_sum(iolat
, &iolat
->cur_stat
, &stat
);
556 lat_info
->nr_samples
-= iolat
->nr_samples
;
557 lat_info
->nr_samples
+= latency_stat_samples(iolat
, &iolat
->cur_stat
);
558 iolat
->nr_samples
= latency_stat_samples(iolat
, &iolat
->cur_stat
);
560 if ((lat_info
->last_scale_event
>= now
||
561 now
- lat_info
->last_scale_event
< BLKIOLATENCY_MIN_ADJUST_TIME
))
564 if (latency_sum_ok(iolat
, &iolat
->cur_stat
) &&
565 latency_sum_ok(iolat
, &stat
)) {
566 if (latency_stat_samples(iolat
, &iolat
->cur_stat
) <
567 BLKIOLATENCY_MIN_GOOD_SAMPLES
)
569 if (lat_info
->scale_grp
== iolat
) {
570 lat_info
->last_scale_event
= now
;
571 scale_cookie_change(iolat
->blkiolat
, lat_info
, true);
573 } else if (lat_info
->scale_lat
== 0 ||
574 lat_info
->scale_lat
>= iolat
->min_lat_nsec
) {
575 lat_info
->last_scale_event
= now
;
576 if (!lat_info
->scale_grp
||
577 lat_info
->scale_lat
> iolat
->min_lat_nsec
) {
578 WRITE_ONCE(lat_info
->scale_lat
, iolat
->min_lat_nsec
);
579 lat_info
->scale_grp
= iolat
;
581 scale_cookie_change(iolat
->blkiolat
, lat_info
, false);
583 latency_stat_init(iolat
, &iolat
->cur_stat
);
585 spin_unlock_irqrestore(&lat_info
->lock
, flags
);
588 static void blkcg_iolatency_done_bio(struct rq_qos
*rqos
, struct bio
*bio
)
590 struct blkcg_gq
*blkg
;
592 struct iolatency_grp
*iolat
;
594 u64 now
= ktime_to_ns(ktime_get());
595 bool issue_as_root
= bio_issue_as_root_blkg(bio
);
596 bool enabled
= false;
600 if (!blkg
|| !bio_flagged(bio
, BIO_TRACKED
))
603 iolat
= blkg_to_lat(bio
->bi_blkg
);
607 enabled
= blk_iolatency_enabled(iolat
->blkiolat
);
611 while (blkg
&& blkg
->parent
) {
612 iolat
= blkg_to_lat(blkg
);
617 rqw
= &iolat
->rq_wait
;
619 inflight
= atomic_dec_return(&rqw
->inflight
);
620 WARN_ON_ONCE(inflight
< 0);
622 * If bi_status is BLK_STS_AGAIN, the bio wasn't actually
623 * submitted, so do not account for it.
625 if (iolat
->min_lat_nsec
&& bio
->bi_status
!= BLK_STS_AGAIN
) {
626 iolatency_record_time(iolat
, &bio
->bi_issue
, now
,
628 window_start
= atomic64_read(&iolat
->window_start
);
629 if (now
> window_start
&&
630 (now
- window_start
) >= iolat
->cur_win_nsec
) {
631 if (atomic64_cmpxchg(&iolat
->window_start
,
632 window_start
, now
) == window_start
)
633 iolatency_check_latencies(iolat
, now
);
641 static void blkcg_iolatency_exit(struct rq_qos
*rqos
)
643 struct blk_iolatency
*blkiolat
= BLKIOLATENCY(rqos
);
645 del_timer_sync(&blkiolat
->timer
);
646 blkcg_deactivate_policy(rqos
->q
, &blkcg_policy_iolatency
);
650 static struct rq_qos_ops blkcg_iolatency_ops
= {
651 .throttle
= blkcg_iolatency_throttle
,
652 .done_bio
= blkcg_iolatency_done_bio
,
653 .exit
= blkcg_iolatency_exit
,
656 static void blkiolatency_timer_fn(struct timer_list
*t
)
658 struct blk_iolatency
*blkiolat
= from_timer(blkiolat
, t
, timer
);
659 struct blkcg_gq
*blkg
;
660 struct cgroup_subsys_state
*pos_css
;
661 u64 now
= ktime_to_ns(ktime_get());
664 blkg_for_each_descendant_pre(blkg
, pos_css
,
665 blkiolat
->rqos
.q
->root_blkg
) {
666 struct iolatency_grp
*iolat
;
667 struct child_latency_info
*lat_info
;
672 * We could be exiting, don't access the pd unless we have a
675 if (!blkg_tryget(blkg
))
678 iolat
= blkg_to_lat(blkg
);
682 lat_info
= &iolat
->child_lat
;
683 cookie
= atomic_read(&lat_info
->scale_cookie
);
685 if (cookie
>= DEFAULT_SCALE_COOKIE
)
688 spin_lock_irqsave(&lat_info
->lock
, flags
);
689 if (lat_info
->last_scale_event
>= now
)
693 * We scaled down but don't have a scale_grp, scale up and carry
696 if (lat_info
->scale_grp
== NULL
) {
697 scale_cookie_change(iolat
->blkiolat
, lat_info
, true);
702 * It's been 5 seconds since our last scale event, clear the
703 * scale grp in case the group that needed the scale down isn't
704 * doing any IO currently.
706 if (now
- lat_info
->last_scale_event
>=
707 ((u64
)NSEC_PER_SEC
* 5))
708 lat_info
->scale_grp
= NULL
;
710 spin_unlock_irqrestore(&lat_info
->lock
, flags
);
717 int blk_iolatency_init(struct request_queue
*q
)
719 struct blk_iolatency
*blkiolat
;
723 blkiolat
= kzalloc(sizeof(*blkiolat
), GFP_KERNEL
);
727 rqos
= &blkiolat
->rqos
;
728 rqos
->id
= RQ_QOS_LATENCY
;
729 rqos
->ops
= &blkcg_iolatency_ops
;
734 ret
= blkcg_activate_policy(q
, &blkcg_policy_iolatency
);
741 timer_setup(&blkiolat
->timer
, blkiolatency_timer_fn
, 0);
747 * return 1 for enabling iolatency, return -1 for disabling iolatency, otherwise
750 static int iolatency_set_min_lat_nsec(struct blkcg_gq
*blkg
, u64 val
)
752 struct iolatency_grp
*iolat
= blkg_to_lat(blkg
);
753 u64 oldval
= iolat
->min_lat_nsec
;
755 iolat
->min_lat_nsec
= val
;
756 iolat
->cur_win_nsec
= max_t(u64
, val
<< 4, BLKIOLATENCY_MIN_WIN_SIZE
);
757 iolat
->cur_win_nsec
= min_t(u64
, iolat
->cur_win_nsec
,
758 BLKIOLATENCY_MAX_WIN_SIZE
);
762 if (oldval
&& !val
) {
763 blkcg_clear_delay(blkg
);
769 static void iolatency_clear_scaling(struct blkcg_gq
*blkg
)
772 struct iolatency_grp
*iolat
= blkg_to_lat(blkg
->parent
);
773 struct child_latency_info
*lat_info
;
777 lat_info
= &iolat
->child_lat
;
778 spin_lock(&lat_info
->lock
);
779 atomic_set(&lat_info
->scale_cookie
, DEFAULT_SCALE_COOKIE
);
780 lat_info
->last_scale_event
= 0;
781 lat_info
->scale_grp
= NULL
;
782 lat_info
->scale_lat
= 0;
783 spin_unlock(&lat_info
->lock
);
787 static ssize_t
iolatency_set_limit(struct kernfs_open_file
*of
, char *buf
,
788 size_t nbytes
, loff_t off
)
790 struct blkcg
*blkcg
= css_to_blkcg(of_css(of
));
791 struct blkcg_gq
*blkg
;
792 struct blkg_conf_ctx ctx
;
793 struct iolatency_grp
*iolat
;
800 ret
= blkg_conf_prep(blkcg
, &blkcg_policy_iolatency
, buf
, &ctx
);
804 iolat
= blkg_to_lat(ctx
.blkg
);
808 while ((tok
= strsep(&p
, " "))) {
810 char val
[21]; /* 18446744073709551616 */
812 if (sscanf(tok
, "%15[^=]=%20s", key
, val
) != 2)
815 if (!strcmp(key
, "target")) {
818 if (!strcmp(val
, "max"))
820 else if (sscanf(val
, "%llu", &v
) == 1)
821 lat_val
= v
* NSEC_PER_USEC
;
829 /* Walk up the tree to see if our new val is lower than it should be. */
831 oldval
= iolat
->min_lat_nsec
;
833 enable
= iolatency_set_min_lat_nsec(blkg
, lat_val
);
835 WARN_ON_ONCE(!blk_get_queue(blkg
->q
));
839 if (oldval
!= iolat
->min_lat_nsec
) {
840 iolatency_clear_scaling(blkg
);
845 blkg_conf_finish(&ctx
);
846 if (ret
== 0 && enable
) {
847 struct iolatency_grp
*tmp
= blkg_to_lat(blkg
);
848 struct blk_iolatency
*blkiolat
= tmp
->blkiolat
;
850 blk_mq_freeze_queue(blkg
->q
);
853 atomic_inc(&blkiolat
->enabled
);
854 else if (enable
== -1)
855 atomic_dec(&blkiolat
->enabled
);
859 blk_mq_unfreeze_queue(blkg
->q
);
862 blk_put_queue(blkg
->q
);
864 return ret
?: nbytes
;
867 static u64
iolatency_prfill_limit(struct seq_file
*sf
,
868 struct blkg_policy_data
*pd
, int off
)
870 struct iolatency_grp
*iolat
= pd_to_lat(pd
);
871 const char *dname
= blkg_dev_name(pd
->blkg
);
873 if (!dname
|| !iolat
->min_lat_nsec
)
875 seq_printf(sf
, "%s target=%llu\n",
876 dname
, div_u64(iolat
->min_lat_nsec
, NSEC_PER_USEC
));
880 static int iolatency_print_limit(struct seq_file
*sf
, void *v
)
882 blkcg_print_blkgs(sf
, css_to_blkcg(seq_css(sf
)),
883 iolatency_prfill_limit
,
884 &blkcg_policy_iolatency
, seq_cft(sf
)->private, false);
888 static size_t iolatency_ssd_stat(struct iolatency_grp
*iolat
, char *buf
,
891 struct latency_stat stat
;
894 latency_stat_init(iolat
, &stat
);
896 for_each_online_cpu(cpu
) {
897 struct latency_stat
*s
;
898 s
= per_cpu_ptr(iolat
->stats
, cpu
);
899 latency_stat_sum(iolat
, &stat
, s
);
903 if (iolat
->rq_depth
.max_depth
== UINT_MAX
)
904 return scnprintf(buf
, size
, " missed=%llu total=%llu depth=max",
905 (unsigned long long)stat
.ps
.missed
,
906 (unsigned long long)stat
.ps
.total
);
907 return scnprintf(buf
, size
, " missed=%llu total=%llu depth=%u",
908 (unsigned long long)stat
.ps
.missed
,
909 (unsigned long long)stat
.ps
.total
,
910 iolat
->rq_depth
.max_depth
);
913 static size_t iolatency_pd_stat(struct blkg_policy_data
*pd
, char *buf
,
916 struct iolatency_grp
*iolat
= pd_to_lat(pd
);
917 unsigned long long avg_lat
;
918 unsigned long long cur_win
;
920 if (!blkcg_debug_stats
)
924 return iolatency_ssd_stat(iolat
, buf
, size
);
926 avg_lat
= div64_u64(iolat
->lat_avg
, NSEC_PER_USEC
);
927 cur_win
= div64_u64(iolat
->cur_win_nsec
, NSEC_PER_MSEC
);
928 if (iolat
->rq_depth
.max_depth
== UINT_MAX
)
929 return scnprintf(buf
, size
, " depth=max avg_lat=%llu win=%llu",
932 return scnprintf(buf
, size
, " depth=%u avg_lat=%llu win=%llu",
933 iolat
->rq_depth
.max_depth
, avg_lat
, cur_win
);
937 static struct blkg_policy_data
*iolatency_pd_alloc(gfp_t gfp
,
938 struct request_queue
*q
,
941 struct iolatency_grp
*iolat
;
943 iolat
= kzalloc_node(sizeof(*iolat
), gfp
, q
->node
);
946 iolat
->stats
= __alloc_percpu_gfp(sizeof(struct latency_stat
),
947 __alignof__(struct latency_stat
), gfp
);
955 static void iolatency_pd_init(struct blkg_policy_data
*pd
)
957 struct iolatency_grp
*iolat
= pd_to_lat(pd
);
958 struct blkcg_gq
*blkg
= lat_to_blkg(iolat
);
959 struct rq_qos
*rqos
= blkcg_rq_qos(blkg
->q
);
960 struct blk_iolatency
*blkiolat
= BLKIOLATENCY(rqos
);
961 u64 now
= ktime_to_ns(ktime_get());
964 if (blk_queue_nonrot(blkg
->q
))
969 for_each_possible_cpu(cpu
) {
970 struct latency_stat
*stat
;
971 stat
= per_cpu_ptr(iolat
->stats
, cpu
);
972 latency_stat_init(iolat
, stat
);
975 latency_stat_init(iolat
, &iolat
->cur_stat
);
976 rq_wait_init(&iolat
->rq_wait
);
977 spin_lock_init(&iolat
->child_lat
.lock
);
978 iolat
->rq_depth
.queue_depth
= blkg
->q
->nr_requests
;
979 iolat
->rq_depth
.max_depth
= UINT_MAX
;
980 iolat
->rq_depth
.default_depth
= iolat
->rq_depth
.queue_depth
;
981 iolat
->blkiolat
= blkiolat
;
982 iolat
->cur_win_nsec
= 100 * NSEC_PER_MSEC
;
983 atomic64_set(&iolat
->window_start
, now
);
986 * We init things in list order, so the pd for the parent may not be
987 * init'ed yet for whatever reason.
989 if (blkg
->parent
&& blkg_to_pd(blkg
->parent
, &blkcg_policy_iolatency
)) {
990 struct iolatency_grp
*parent
= blkg_to_lat(blkg
->parent
);
991 atomic_set(&iolat
->scale_cookie
,
992 atomic_read(&parent
->child_lat
.scale_cookie
));
994 atomic_set(&iolat
->scale_cookie
, DEFAULT_SCALE_COOKIE
);
997 atomic_set(&iolat
->child_lat
.scale_cookie
, DEFAULT_SCALE_COOKIE
);
1000 static void iolatency_pd_offline(struct blkg_policy_data
*pd
)
1002 struct iolatency_grp
*iolat
= pd_to_lat(pd
);
1003 struct blkcg_gq
*blkg
= lat_to_blkg(iolat
);
1004 struct blk_iolatency
*blkiolat
= iolat
->blkiolat
;
1007 ret
= iolatency_set_min_lat_nsec(blkg
, 0);
1009 atomic_inc(&blkiolat
->enabled
);
1011 atomic_dec(&blkiolat
->enabled
);
1012 iolatency_clear_scaling(blkg
);
1015 static void iolatency_pd_free(struct blkg_policy_data
*pd
)
1017 struct iolatency_grp
*iolat
= pd_to_lat(pd
);
1018 free_percpu(iolat
->stats
);
1022 static struct cftype iolatency_files
[] = {
1025 .flags
= CFTYPE_NOT_ON_ROOT
,
1026 .seq_show
= iolatency_print_limit
,
1027 .write
= iolatency_set_limit
,
1032 static struct blkcg_policy blkcg_policy_iolatency
= {
1033 .dfl_cftypes
= iolatency_files
,
1034 .pd_alloc_fn
= iolatency_pd_alloc
,
1035 .pd_init_fn
= iolatency_pd_init
,
1036 .pd_offline_fn
= iolatency_pd_offline
,
1037 .pd_free_fn
= iolatency_pd_free
,
1038 .pd_stat_fn
= iolatency_pd_stat
,
1041 static int __init
iolatency_init(void)
1043 return blkcg_policy_register(&blkcg_policy_iolatency
);
1046 static void __exit
iolatency_exit(void)
1048 return blkcg_policy_unregister(&blkcg_policy_iolatency
);
1051 module_init(iolatency_init
);
1052 module_exit(iolatency_exit
);